1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/kernel.h> 3 #include <linux/errno.h> 4 #include <linux/file.h> 5 #include <linux/slab.h> 6 #include <linux/net.h> 7 #include <linux/compat.h> 8 #include <net/compat.h> 9 #include <linux/io_uring.h> 10 11 #include <uapi/linux/io_uring.h> 12 13 #include "io_uring.h" 14 #include "kbuf.h" 15 #include "alloc_cache.h" 16 #include "net.h" 17 #include "notif.h" 18 #include "rsrc.h" 19 #include "zcrx.h" 20 21 #if defined(CONFIG_NET) 22 struct io_shutdown { 23 struct file *file; 24 int how; 25 }; 26 27 struct io_accept { 28 struct file *file; 29 struct sockaddr __user *addr; 30 int __user *addr_len; 31 int flags; 32 int iou_flags; 33 u32 file_slot; 34 unsigned long nofile; 35 }; 36 37 struct io_socket { 38 struct file *file; 39 int domain; 40 int type; 41 int protocol; 42 int flags; 43 u32 file_slot; 44 unsigned long nofile; 45 }; 46 47 struct io_connect { 48 struct file *file; 49 struct sockaddr __user *addr; 50 int addr_len; 51 bool in_progress; 52 bool seen_econnaborted; 53 }; 54 55 struct io_bind { 56 struct file *file; 57 int addr_len; 58 }; 59 60 struct io_listen { 61 struct file *file; 62 int backlog; 63 }; 64 65 struct io_sr_msg { 66 struct file *file; 67 union { 68 struct compat_msghdr __user *umsg_compat; 69 struct user_msghdr __user *umsg; 70 void __user *buf; 71 }; 72 int len; 73 unsigned done_io; 74 unsigned msg_flags; 75 unsigned nr_multishot_loops; 76 u16 flags; 77 /* initialised and used only by !msg send variants */ 78 u16 buf_group; 79 bool retry; 80 void __user *msg_control; 81 /* used only for send zerocopy */ 82 struct io_kiocb *notif; 83 }; 84 85 /* 86 * Number of times we'll try and do receives if there's more data. If we 87 * exceed this limit, then add us to the back of the queue and retry from 88 * there. This helps fairness between flooding clients. 89 */ 90 #define MULTISHOT_MAX_RETRY 32 91 92 struct io_recvzc { 93 struct file *file; 94 unsigned msg_flags; 95 u16 flags; 96 u32 len; 97 struct io_zcrx_ifq *ifq; 98 }; 99 100 int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 101 { 102 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown); 103 104 if (unlikely(sqe->off || sqe->addr || sqe->rw_flags || 105 sqe->buf_index || sqe->splice_fd_in)) 106 return -EINVAL; 107 108 shutdown->how = READ_ONCE(sqe->len); 109 req->flags |= REQ_F_FORCE_ASYNC; 110 return 0; 111 } 112 113 int io_shutdown(struct io_kiocb *req, unsigned int issue_flags) 114 { 115 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown); 116 struct socket *sock; 117 int ret; 118 119 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); 120 121 sock = sock_from_file(req->file); 122 if (unlikely(!sock)) 123 return -ENOTSOCK; 124 125 ret = __sys_shutdown_sock(sock, shutdown->how); 126 io_req_set_res(req, ret, 0); 127 return IOU_OK; 128 } 129 130 static bool io_net_retry(struct socket *sock, int flags) 131 { 132 if (!(flags & MSG_WAITALL)) 133 return false; 134 return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET; 135 } 136 137 static void io_netmsg_iovec_free(struct io_async_msghdr *kmsg) 138 { 139 if (kmsg->vec.iovec) 140 io_vec_free(&kmsg->vec); 141 } 142 143 static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags) 144 { 145 struct io_async_msghdr *hdr = req->async_data; 146 147 /* can't recycle, ensure we free the iovec if we have one */ 148 if (unlikely(issue_flags & IO_URING_F_UNLOCKED)) { 149 io_netmsg_iovec_free(hdr); 150 return; 151 } 152 153 /* Let normal cleanup path reap it if we fail adding to the cache */ 154 io_alloc_cache_vec_kasan(&hdr->vec); 155 if (hdr->vec.nr > IO_VEC_CACHE_SOFT_CAP) 156 io_vec_free(&hdr->vec); 157 158 if (io_alloc_cache_put(&req->ctx->netmsg_cache, hdr)) { 159 req->async_data = NULL; 160 req->flags &= ~(REQ_F_ASYNC_DATA|REQ_F_NEED_CLEANUP); 161 } 162 } 163 164 static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req) 165 { 166 struct io_ring_ctx *ctx = req->ctx; 167 struct io_async_msghdr *hdr; 168 169 hdr = io_uring_alloc_async_data(&ctx->netmsg_cache, req); 170 if (!hdr) 171 return NULL; 172 173 /* If the async data was cached, we might have an iov cached inside. */ 174 if (hdr->vec.iovec) 175 req->flags |= REQ_F_NEED_CLEANUP; 176 return hdr; 177 } 178 179 /* assign new iovec to kmsg, if we need to */ 180 static void io_net_vec_assign(struct io_kiocb *req, struct io_async_msghdr *kmsg, 181 struct iovec *iov) 182 { 183 if (iov) { 184 req->flags |= REQ_F_NEED_CLEANUP; 185 io_vec_reset_iovec(&kmsg->vec, iov, kmsg->msg.msg_iter.nr_segs); 186 } 187 } 188 189 static inline void io_mshot_prep_retry(struct io_kiocb *req, 190 struct io_async_msghdr *kmsg) 191 { 192 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 193 194 req->flags &= ~REQ_F_BL_EMPTY; 195 sr->done_io = 0; 196 sr->retry = false; 197 sr->len = 0; /* get from the provided buffer */ 198 req->buf_index = sr->buf_group; 199 } 200 201 static int io_net_import_vec(struct io_kiocb *req, struct io_async_msghdr *iomsg, 202 const struct iovec __user *uiov, unsigned uvec_seg, 203 int ddir) 204 { 205 struct iovec *iov; 206 int ret, nr_segs; 207 208 if (iomsg->vec.iovec) { 209 nr_segs = iomsg->vec.nr; 210 iov = iomsg->vec.iovec; 211 } else { 212 nr_segs = 1; 213 iov = &iomsg->fast_iov; 214 } 215 216 ret = __import_iovec(ddir, uiov, uvec_seg, nr_segs, &iov, 217 &iomsg->msg.msg_iter, io_is_compat(req->ctx)); 218 if (unlikely(ret < 0)) 219 return ret; 220 io_net_vec_assign(req, iomsg, iov); 221 return 0; 222 } 223 224 static int io_compat_msg_copy_hdr(struct io_kiocb *req, 225 struct io_async_msghdr *iomsg, 226 struct compat_msghdr *msg, int ddir, 227 struct sockaddr __user **save_addr) 228 { 229 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 230 struct compat_iovec __user *uiov; 231 int ret; 232 233 if (copy_from_user(msg, sr->umsg_compat, sizeof(*msg))) 234 return -EFAULT; 235 236 ret = __get_compat_msghdr(&iomsg->msg, msg, save_addr); 237 if (ret) 238 return ret; 239 240 uiov = compat_ptr(msg->msg_iov); 241 if (req->flags & REQ_F_BUFFER_SELECT) { 242 if (msg->msg_iovlen == 0) { 243 sr->len = 0; 244 } else if (msg->msg_iovlen > 1) { 245 return -EINVAL; 246 } else { 247 struct compat_iovec tmp_iov; 248 249 if (copy_from_user(&tmp_iov, uiov, sizeof(tmp_iov))) 250 return -EFAULT; 251 sr->len = tmp_iov.iov_len; 252 } 253 } 254 return 0; 255 } 256 257 static int io_copy_msghdr_from_user(struct user_msghdr *msg, 258 struct user_msghdr __user *umsg) 259 { 260 if (!user_access_begin(umsg, sizeof(*umsg))) 261 return -EFAULT; 262 unsafe_get_user(msg->msg_name, &umsg->msg_name, ua_end); 263 unsafe_get_user(msg->msg_namelen, &umsg->msg_namelen, ua_end); 264 unsafe_get_user(msg->msg_iov, &umsg->msg_iov, ua_end); 265 unsafe_get_user(msg->msg_iovlen, &umsg->msg_iovlen, ua_end); 266 unsafe_get_user(msg->msg_control, &umsg->msg_control, ua_end); 267 unsafe_get_user(msg->msg_controllen, &umsg->msg_controllen, ua_end); 268 user_access_end(); 269 return 0; 270 ua_end: 271 user_access_end(); 272 return -EFAULT; 273 } 274 275 static int io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg, 276 struct user_msghdr *msg, int ddir, 277 struct sockaddr __user **save_addr) 278 { 279 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 280 struct user_msghdr __user *umsg = sr->umsg; 281 int ret; 282 283 iomsg->msg.msg_name = &iomsg->addr; 284 iomsg->msg.msg_iter.nr_segs = 0; 285 286 if (io_is_compat(req->ctx)) { 287 struct compat_msghdr cmsg; 288 289 ret = io_compat_msg_copy_hdr(req, iomsg, &cmsg, ddir, save_addr); 290 if (ret) 291 return ret; 292 293 memset(msg, 0, sizeof(*msg)); 294 msg->msg_namelen = cmsg.msg_namelen; 295 msg->msg_controllen = cmsg.msg_controllen; 296 msg->msg_iov = compat_ptr(cmsg.msg_iov); 297 msg->msg_iovlen = cmsg.msg_iovlen; 298 return 0; 299 } 300 301 ret = io_copy_msghdr_from_user(msg, umsg); 302 if (unlikely(ret)) 303 return ret; 304 305 msg->msg_flags = 0; 306 307 ret = __copy_msghdr(&iomsg->msg, msg, save_addr); 308 if (ret) 309 return ret; 310 311 if (req->flags & REQ_F_BUFFER_SELECT) { 312 if (msg->msg_iovlen == 0) { 313 sr->len = 0; 314 } else if (msg->msg_iovlen > 1) { 315 return -EINVAL; 316 } else { 317 struct iovec __user *uiov = msg->msg_iov; 318 struct iovec tmp_iov; 319 320 if (copy_from_user(&tmp_iov, uiov, sizeof(tmp_iov))) 321 return -EFAULT; 322 sr->len = tmp_iov.iov_len; 323 } 324 } 325 return 0; 326 } 327 328 static int io_sendmsg_copy_hdr(struct io_kiocb *req, 329 struct io_async_msghdr *iomsg) 330 { 331 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 332 struct user_msghdr msg; 333 int ret; 334 335 ret = io_msg_copy_hdr(req, iomsg, &msg, ITER_SOURCE, NULL); 336 if (unlikely(ret)) 337 return ret; 338 339 if (!(req->flags & REQ_F_BUFFER_SELECT)) 340 ret = io_net_import_vec(req, iomsg, msg.msg_iov, msg.msg_iovlen, 341 ITER_SOURCE); 342 /* save msg_control as sys_sendmsg() overwrites it */ 343 sr->msg_control = iomsg->msg.msg_control_user; 344 return ret; 345 } 346 347 void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req) 348 { 349 struct io_async_msghdr *io = req->async_data; 350 351 io_netmsg_iovec_free(io); 352 } 353 354 static int io_send_setup(struct io_kiocb *req, const struct io_uring_sqe *sqe) 355 { 356 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 357 struct io_async_msghdr *kmsg = req->async_data; 358 void __user *addr; 359 u16 addr_len; 360 int ret; 361 362 sr->buf = u64_to_user_ptr(READ_ONCE(sqe->addr)); 363 364 if (READ_ONCE(sqe->__pad3[0])) 365 return -EINVAL; 366 367 kmsg->msg.msg_name = NULL; 368 kmsg->msg.msg_namelen = 0; 369 kmsg->msg.msg_control = NULL; 370 kmsg->msg.msg_controllen = 0; 371 kmsg->msg.msg_ubuf = NULL; 372 373 addr = u64_to_user_ptr(READ_ONCE(sqe->addr2)); 374 addr_len = READ_ONCE(sqe->addr_len); 375 if (addr) { 376 ret = move_addr_to_kernel(addr, addr_len, &kmsg->addr); 377 if (unlikely(ret < 0)) 378 return ret; 379 kmsg->msg.msg_name = &kmsg->addr; 380 kmsg->msg.msg_namelen = addr_len; 381 } 382 if (!io_do_buffer_select(req)) { 383 ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len, 384 &kmsg->msg.msg_iter); 385 if (unlikely(ret < 0)) 386 return ret; 387 } 388 return 0; 389 } 390 391 static int io_sendmsg_setup(struct io_kiocb *req, const struct io_uring_sqe *sqe) 392 { 393 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 394 struct io_async_msghdr *kmsg = req->async_data; 395 396 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr)); 397 398 return io_sendmsg_copy_hdr(req, kmsg); 399 } 400 401 static int io_sendmsg_zc_setup(struct io_kiocb *req, const struct io_uring_sqe *sqe) 402 { 403 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 404 struct io_async_msghdr *kmsg = req->async_data; 405 struct user_msghdr msg; 406 int ret; 407 408 if (!(sr->flags & IORING_RECVSEND_FIXED_BUF)) 409 return io_sendmsg_setup(req, sqe); 410 411 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr)); 412 413 ret = io_msg_copy_hdr(req, kmsg, &msg, ITER_SOURCE, NULL); 414 if (unlikely(ret)) 415 return ret; 416 sr->msg_control = kmsg->msg.msg_control_user; 417 kmsg->msg.msg_iter.nr_segs = msg.msg_iovlen; 418 419 return io_prep_reg_iovec(req, &kmsg->vec, msg.msg_iov, msg.msg_iovlen); 420 } 421 422 #define SENDMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_BUNDLE) 423 424 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 425 { 426 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 427 428 sr->done_io = 0; 429 sr->retry = false; 430 431 if (req->opcode != IORING_OP_SEND) { 432 if (sqe->addr2 || sqe->file_index) 433 return -EINVAL; 434 } 435 436 sr->len = READ_ONCE(sqe->len); 437 sr->flags = READ_ONCE(sqe->ioprio); 438 if (sr->flags & ~SENDMSG_FLAGS) 439 return -EINVAL; 440 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL; 441 if (sr->msg_flags & MSG_DONTWAIT) 442 req->flags |= REQ_F_NOWAIT; 443 if (sr->flags & IORING_RECVSEND_BUNDLE) { 444 if (req->opcode == IORING_OP_SENDMSG) 445 return -EINVAL; 446 if (!(req->flags & REQ_F_BUFFER_SELECT)) 447 return -EINVAL; 448 sr->msg_flags |= MSG_WAITALL; 449 sr->buf_group = req->buf_index; 450 req->buf_list = NULL; 451 req->flags |= REQ_F_MULTISHOT; 452 } 453 454 if (io_is_compat(req->ctx)) 455 sr->msg_flags |= MSG_CMSG_COMPAT; 456 457 if (unlikely(!io_msg_alloc_async(req))) 458 return -ENOMEM; 459 if (req->opcode != IORING_OP_SENDMSG) 460 return io_send_setup(req, sqe); 461 return io_sendmsg_setup(req, sqe); 462 } 463 464 static void io_req_msg_cleanup(struct io_kiocb *req, 465 unsigned int issue_flags) 466 { 467 io_netmsg_recycle(req, issue_flags); 468 } 469 470 /* 471 * For bundle completions, we need to figure out how many segments we consumed. 472 * A bundle could be using a single ITER_UBUF if that's all we mapped, or it 473 * could be using an ITER_IOVEC. If the latter, then if we consumed all of 474 * the segments, then it's a trivial questiont o answer. If we have residual 475 * data in the iter, then loop the segments to figure out how much we 476 * transferred. 477 */ 478 static int io_bundle_nbufs(struct io_async_msghdr *kmsg, int ret) 479 { 480 struct iovec *iov; 481 int nbufs; 482 483 /* no data is always zero segments, and a ubuf is always 1 segment */ 484 if (ret <= 0) 485 return 0; 486 if (iter_is_ubuf(&kmsg->msg.msg_iter)) 487 return 1; 488 489 iov = kmsg->vec.iovec; 490 if (!iov) 491 iov = &kmsg->fast_iov; 492 493 /* if all data was transferred, it's basic pointer math */ 494 if (!iov_iter_count(&kmsg->msg.msg_iter)) 495 return iter_iov(&kmsg->msg.msg_iter) - iov; 496 497 /* short transfer, count segments */ 498 nbufs = 0; 499 do { 500 int this_len = min_t(int, iov[nbufs].iov_len, ret); 501 502 nbufs++; 503 ret -= this_len; 504 } while (ret); 505 506 return nbufs; 507 } 508 509 static inline bool io_send_finish(struct io_kiocb *req, int *ret, 510 struct io_async_msghdr *kmsg, 511 unsigned issue_flags) 512 { 513 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 514 bool bundle_finished = *ret <= 0; 515 unsigned int cflags; 516 517 if (!(sr->flags & IORING_RECVSEND_BUNDLE)) { 518 cflags = io_put_kbuf(req, *ret, issue_flags); 519 goto finish; 520 } 521 522 cflags = io_put_kbufs(req, *ret, io_bundle_nbufs(kmsg, *ret), issue_flags); 523 524 if (bundle_finished || req->flags & REQ_F_BL_EMPTY) 525 goto finish; 526 527 /* 528 * Fill CQE for this receive and see if we should keep trying to 529 * receive from this socket. 530 */ 531 if (io_req_post_cqe(req, *ret, cflags | IORING_CQE_F_MORE)) { 532 io_mshot_prep_retry(req, kmsg); 533 return false; 534 } 535 536 /* Otherwise stop bundle and use the current result. */ 537 finish: 538 io_req_set_res(req, *ret, cflags); 539 *ret = IOU_OK; 540 return true; 541 } 542 543 int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags) 544 { 545 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 546 struct io_async_msghdr *kmsg = req->async_data; 547 struct socket *sock; 548 unsigned flags; 549 int min_ret = 0; 550 int ret; 551 552 sock = sock_from_file(req->file); 553 if (unlikely(!sock)) 554 return -ENOTSOCK; 555 556 if (!(req->flags & REQ_F_POLLED) && 557 (sr->flags & IORING_RECVSEND_POLL_FIRST)) 558 return -EAGAIN; 559 560 flags = sr->msg_flags; 561 if (issue_flags & IO_URING_F_NONBLOCK) 562 flags |= MSG_DONTWAIT; 563 if (flags & MSG_WAITALL) 564 min_ret = iov_iter_count(&kmsg->msg.msg_iter); 565 566 kmsg->msg.msg_control_user = sr->msg_control; 567 568 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags); 569 570 if (ret < min_ret) { 571 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) 572 return -EAGAIN; 573 if (ret > 0 && io_net_retry(sock, flags)) { 574 kmsg->msg.msg_controllen = 0; 575 kmsg->msg.msg_control = NULL; 576 sr->done_io += ret; 577 req->flags |= REQ_F_BL_NO_RECYCLE; 578 return -EAGAIN; 579 } 580 if (ret == -ERESTARTSYS) 581 ret = -EINTR; 582 req_set_fail(req); 583 } 584 io_req_msg_cleanup(req, issue_flags); 585 if (ret >= 0) 586 ret += sr->done_io; 587 else if (sr->done_io) 588 ret = sr->done_io; 589 io_req_set_res(req, ret, 0); 590 return IOU_OK; 591 } 592 593 static int io_send_select_buffer(struct io_kiocb *req, unsigned int issue_flags, 594 struct io_async_msghdr *kmsg) 595 { 596 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 597 598 int ret; 599 struct buf_sel_arg arg = { 600 .iovs = &kmsg->fast_iov, 601 .max_len = min_not_zero(sr->len, INT_MAX), 602 .nr_iovs = 1, 603 }; 604 605 if (kmsg->vec.iovec) { 606 arg.nr_iovs = kmsg->vec.nr; 607 arg.iovs = kmsg->vec.iovec; 608 arg.mode = KBUF_MODE_FREE; 609 } 610 611 if (!(sr->flags & IORING_RECVSEND_BUNDLE)) 612 arg.nr_iovs = 1; 613 else 614 arg.mode |= KBUF_MODE_EXPAND; 615 616 ret = io_buffers_select(req, &arg, issue_flags); 617 if (unlikely(ret < 0)) 618 return ret; 619 620 if (arg.iovs != &kmsg->fast_iov && arg.iovs != kmsg->vec.iovec) { 621 kmsg->vec.nr = ret; 622 kmsg->vec.iovec = arg.iovs; 623 req->flags |= REQ_F_NEED_CLEANUP; 624 } 625 sr->len = arg.out_len; 626 627 if (ret == 1) { 628 sr->buf = arg.iovs[0].iov_base; 629 ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len, 630 &kmsg->msg.msg_iter); 631 if (unlikely(ret)) 632 return ret; 633 } else { 634 iov_iter_init(&kmsg->msg.msg_iter, ITER_SOURCE, 635 arg.iovs, ret, arg.out_len); 636 } 637 638 return 0; 639 } 640 641 int io_send(struct io_kiocb *req, unsigned int issue_flags) 642 { 643 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 644 struct io_async_msghdr *kmsg = req->async_data; 645 struct socket *sock; 646 unsigned flags; 647 int min_ret = 0; 648 int ret; 649 650 sock = sock_from_file(req->file); 651 if (unlikely(!sock)) 652 return -ENOTSOCK; 653 654 if (!(req->flags & REQ_F_POLLED) && 655 (sr->flags & IORING_RECVSEND_POLL_FIRST)) 656 return -EAGAIN; 657 658 flags = sr->msg_flags; 659 if (issue_flags & IO_URING_F_NONBLOCK) 660 flags |= MSG_DONTWAIT; 661 662 retry_bundle: 663 if (io_do_buffer_select(req)) { 664 ret = io_send_select_buffer(req, issue_flags, kmsg); 665 if (ret) 666 return ret; 667 } 668 669 /* 670 * If MSG_WAITALL is set, or this is a bundle send, then we need 671 * the full amount. If just bundle is set, if we do a short send 672 * then we complete the bundle sequence rather than continue on. 673 */ 674 if (flags & MSG_WAITALL || sr->flags & IORING_RECVSEND_BUNDLE) 675 min_ret = iov_iter_count(&kmsg->msg.msg_iter); 676 677 flags &= ~MSG_INTERNAL_SENDMSG_FLAGS; 678 kmsg->msg.msg_flags = flags; 679 ret = sock_sendmsg(sock, &kmsg->msg); 680 if (ret < min_ret) { 681 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) 682 return -EAGAIN; 683 684 if (ret > 0 && io_net_retry(sock, flags)) { 685 sr->len -= ret; 686 sr->buf += ret; 687 sr->done_io += ret; 688 req->flags |= REQ_F_BL_NO_RECYCLE; 689 return -EAGAIN; 690 } 691 if (ret == -ERESTARTSYS) 692 ret = -EINTR; 693 req_set_fail(req); 694 } 695 if (ret >= 0) 696 ret += sr->done_io; 697 else if (sr->done_io) 698 ret = sr->done_io; 699 700 if (!io_send_finish(req, &ret, kmsg, issue_flags)) 701 goto retry_bundle; 702 703 io_req_msg_cleanup(req, issue_flags); 704 return ret; 705 } 706 707 static int io_recvmsg_mshot_prep(struct io_kiocb *req, 708 struct io_async_msghdr *iomsg, 709 int namelen, size_t controllen) 710 { 711 if ((req->flags & (REQ_F_APOLL_MULTISHOT|REQ_F_BUFFER_SELECT)) == 712 (REQ_F_APOLL_MULTISHOT|REQ_F_BUFFER_SELECT)) { 713 int hdr; 714 715 if (unlikely(namelen < 0)) 716 return -EOVERFLOW; 717 if (check_add_overflow(sizeof(struct io_uring_recvmsg_out), 718 namelen, &hdr)) 719 return -EOVERFLOW; 720 if (check_add_overflow(hdr, controllen, &hdr)) 721 return -EOVERFLOW; 722 723 iomsg->namelen = namelen; 724 iomsg->controllen = controllen; 725 return 0; 726 } 727 728 return 0; 729 } 730 731 static int io_recvmsg_copy_hdr(struct io_kiocb *req, 732 struct io_async_msghdr *iomsg) 733 { 734 struct user_msghdr msg; 735 int ret; 736 737 ret = io_msg_copy_hdr(req, iomsg, &msg, ITER_DEST, &iomsg->uaddr); 738 if (unlikely(ret)) 739 return ret; 740 741 if (!(req->flags & REQ_F_BUFFER_SELECT)) { 742 ret = io_net_import_vec(req, iomsg, msg.msg_iov, msg.msg_iovlen, 743 ITER_DEST); 744 if (unlikely(ret)) 745 return ret; 746 } 747 return io_recvmsg_mshot_prep(req, iomsg, msg.msg_namelen, 748 msg.msg_controllen); 749 } 750 751 static int io_recvmsg_prep_setup(struct io_kiocb *req) 752 { 753 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 754 struct io_async_msghdr *kmsg; 755 int ret; 756 757 kmsg = io_msg_alloc_async(req); 758 if (unlikely(!kmsg)) 759 return -ENOMEM; 760 761 if (req->opcode == IORING_OP_RECV) { 762 kmsg->msg.msg_name = NULL; 763 kmsg->msg.msg_namelen = 0; 764 kmsg->msg.msg_inq = 0; 765 kmsg->msg.msg_control = NULL; 766 kmsg->msg.msg_get_inq = 1; 767 kmsg->msg.msg_controllen = 0; 768 kmsg->msg.msg_iocb = NULL; 769 kmsg->msg.msg_ubuf = NULL; 770 771 if (!io_do_buffer_select(req)) { 772 ret = import_ubuf(ITER_DEST, sr->buf, sr->len, 773 &kmsg->msg.msg_iter); 774 if (unlikely(ret)) 775 return ret; 776 } 777 return 0; 778 } 779 780 return io_recvmsg_copy_hdr(req, kmsg); 781 } 782 783 #define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT | \ 784 IORING_RECVSEND_BUNDLE) 785 786 int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 787 { 788 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 789 790 sr->done_io = 0; 791 sr->retry = false; 792 793 if (unlikely(sqe->file_index || sqe->addr2)) 794 return -EINVAL; 795 796 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr)); 797 sr->len = READ_ONCE(sqe->len); 798 sr->flags = READ_ONCE(sqe->ioprio); 799 if (sr->flags & ~RECVMSG_FLAGS) 800 return -EINVAL; 801 sr->msg_flags = READ_ONCE(sqe->msg_flags); 802 if (sr->msg_flags & MSG_DONTWAIT) 803 req->flags |= REQ_F_NOWAIT; 804 if (sr->msg_flags & MSG_ERRQUEUE) 805 req->flags |= REQ_F_CLEAR_POLLIN; 806 if (req->flags & REQ_F_BUFFER_SELECT) { 807 /* 808 * Store the buffer group for this multishot receive separately, 809 * as if we end up doing an io-wq based issue that selects a 810 * buffer, it has to be committed immediately and that will 811 * clear ->buf_list. This means we lose the link to the buffer 812 * list, and the eventual buffer put on completion then cannot 813 * restore it. 814 */ 815 sr->buf_group = req->buf_index; 816 req->buf_list = NULL; 817 } 818 if (sr->flags & IORING_RECV_MULTISHOT) { 819 if (!(req->flags & REQ_F_BUFFER_SELECT)) 820 return -EINVAL; 821 if (sr->msg_flags & MSG_WAITALL) 822 return -EINVAL; 823 if (req->opcode == IORING_OP_RECV && sr->len) 824 return -EINVAL; 825 req->flags |= REQ_F_APOLL_MULTISHOT; 826 } 827 if (sr->flags & IORING_RECVSEND_BUNDLE) { 828 if (req->opcode == IORING_OP_RECVMSG) 829 return -EINVAL; 830 } 831 832 if (io_is_compat(req->ctx)) 833 sr->msg_flags |= MSG_CMSG_COMPAT; 834 835 sr->nr_multishot_loops = 0; 836 return io_recvmsg_prep_setup(req); 837 } 838 839 /* bits to clear in old and inherit in new cflags on bundle retry */ 840 #define CQE_F_MASK (IORING_CQE_F_SOCK_NONEMPTY|IORING_CQE_F_MORE) 841 842 /* 843 * Finishes io_recv and io_recvmsg. 844 * 845 * Returns true if it is actually finished, or false if it should run 846 * again (for multishot). 847 */ 848 static inline bool io_recv_finish(struct io_kiocb *req, int *ret, 849 struct io_async_msghdr *kmsg, 850 bool mshot_finished, unsigned issue_flags) 851 { 852 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 853 unsigned int cflags = 0; 854 855 if (kmsg->msg.msg_inq > 0) 856 cflags |= IORING_CQE_F_SOCK_NONEMPTY; 857 858 if (sr->flags & IORING_RECVSEND_BUNDLE) { 859 cflags |= io_put_kbufs(req, *ret, io_bundle_nbufs(kmsg, *ret), 860 issue_flags); 861 if (sr->retry) 862 cflags = req->cqe.flags | (cflags & CQE_F_MASK); 863 /* bundle with no more immediate buffers, we're done */ 864 if (req->flags & REQ_F_BL_EMPTY) 865 goto finish; 866 /* if more is available, retry and append to this one */ 867 if (!sr->retry && kmsg->msg.msg_inq > 0 && *ret > 0) { 868 req->cqe.flags = cflags & ~CQE_F_MASK; 869 sr->len = kmsg->msg.msg_inq; 870 sr->done_io += *ret; 871 sr->retry = true; 872 return false; 873 } 874 } else { 875 cflags |= io_put_kbuf(req, *ret, issue_flags); 876 } 877 878 /* 879 * Fill CQE for this receive and see if we should keep trying to 880 * receive from this socket. 881 */ 882 if ((req->flags & REQ_F_APOLL_MULTISHOT) && !mshot_finished && 883 io_req_post_cqe(req, *ret, cflags | IORING_CQE_F_MORE)) { 884 *ret = IOU_RETRY; 885 io_mshot_prep_retry(req, kmsg); 886 /* Known not-empty or unknown state, retry */ 887 if (cflags & IORING_CQE_F_SOCK_NONEMPTY || kmsg->msg.msg_inq < 0) { 888 if (sr->nr_multishot_loops++ < MULTISHOT_MAX_RETRY) 889 return false; 890 /* mshot retries exceeded, force a requeue */ 891 sr->nr_multishot_loops = 0; 892 if (issue_flags & IO_URING_F_MULTISHOT) 893 *ret = IOU_REQUEUE; 894 } 895 return true; 896 } 897 898 /* Finish the request / stop multishot. */ 899 finish: 900 io_req_set_res(req, *ret, cflags); 901 *ret = IOU_COMPLETE; 902 io_req_msg_cleanup(req, issue_flags); 903 return true; 904 } 905 906 static int io_recvmsg_prep_multishot(struct io_async_msghdr *kmsg, 907 struct io_sr_msg *sr, void __user **buf, 908 size_t *len) 909 { 910 unsigned long ubuf = (unsigned long) *buf; 911 unsigned long hdr; 912 913 hdr = sizeof(struct io_uring_recvmsg_out) + kmsg->namelen + 914 kmsg->controllen; 915 if (*len < hdr) 916 return -EFAULT; 917 918 if (kmsg->controllen) { 919 unsigned long control = ubuf + hdr - kmsg->controllen; 920 921 kmsg->msg.msg_control_user = (void __user *) control; 922 kmsg->msg.msg_controllen = kmsg->controllen; 923 } 924 925 sr->buf = *buf; /* stash for later copy */ 926 *buf = (void __user *) (ubuf + hdr); 927 kmsg->payloadlen = *len = *len - hdr; 928 return 0; 929 } 930 931 struct io_recvmsg_multishot_hdr { 932 struct io_uring_recvmsg_out msg; 933 struct sockaddr_storage addr; 934 }; 935 936 static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io, 937 struct io_async_msghdr *kmsg, 938 unsigned int flags, bool *finished) 939 { 940 int err; 941 int copy_len; 942 struct io_recvmsg_multishot_hdr hdr; 943 944 if (kmsg->namelen) 945 kmsg->msg.msg_name = &hdr.addr; 946 kmsg->msg.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT); 947 kmsg->msg.msg_namelen = 0; 948 949 if (sock->file->f_flags & O_NONBLOCK) 950 flags |= MSG_DONTWAIT; 951 952 err = sock_recvmsg(sock, &kmsg->msg, flags); 953 *finished = err <= 0; 954 if (err < 0) 955 return err; 956 957 hdr.msg = (struct io_uring_recvmsg_out) { 958 .controllen = kmsg->controllen - kmsg->msg.msg_controllen, 959 .flags = kmsg->msg.msg_flags & ~MSG_CMSG_COMPAT 960 }; 961 962 hdr.msg.payloadlen = err; 963 if (err > kmsg->payloadlen) 964 err = kmsg->payloadlen; 965 966 copy_len = sizeof(struct io_uring_recvmsg_out); 967 if (kmsg->msg.msg_namelen > kmsg->namelen) 968 copy_len += kmsg->namelen; 969 else 970 copy_len += kmsg->msg.msg_namelen; 971 972 /* 973 * "fromlen shall refer to the value before truncation.." 974 * 1003.1g 975 */ 976 hdr.msg.namelen = kmsg->msg.msg_namelen; 977 978 /* ensure that there is no gap between hdr and sockaddr_storage */ 979 BUILD_BUG_ON(offsetof(struct io_recvmsg_multishot_hdr, addr) != 980 sizeof(struct io_uring_recvmsg_out)); 981 if (copy_to_user(io->buf, &hdr, copy_len)) { 982 *finished = true; 983 return -EFAULT; 984 } 985 986 return sizeof(struct io_uring_recvmsg_out) + kmsg->namelen + 987 kmsg->controllen + err; 988 } 989 990 int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags) 991 { 992 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 993 struct io_async_msghdr *kmsg = req->async_data; 994 struct socket *sock; 995 unsigned flags; 996 int ret, min_ret = 0; 997 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; 998 bool mshot_finished = true; 999 1000 sock = sock_from_file(req->file); 1001 if (unlikely(!sock)) 1002 return -ENOTSOCK; 1003 1004 if (!(req->flags & REQ_F_POLLED) && 1005 (sr->flags & IORING_RECVSEND_POLL_FIRST)) 1006 return -EAGAIN; 1007 1008 flags = sr->msg_flags; 1009 if (force_nonblock) 1010 flags |= MSG_DONTWAIT; 1011 1012 retry_multishot: 1013 if (io_do_buffer_select(req)) { 1014 void __user *buf; 1015 size_t len = sr->len; 1016 1017 buf = io_buffer_select(req, &len, issue_flags); 1018 if (!buf) 1019 return -ENOBUFS; 1020 1021 if (req->flags & REQ_F_APOLL_MULTISHOT) { 1022 ret = io_recvmsg_prep_multishot(kmsg, sr, &buf, &len); 1023 if (ret) { 1024 io_kbuf_recycle(req, issue_flags); 1025 return ret; 1026 } 1027 } 1028 1029 iov_iter_ubuf(&kmsg->msg.msg_iter, ITER_DEST, buf, len); 1030 } 1031 1032 kmsg->msg.msg_get_inq = 1; 1033 kmsg->msg.msg_inq = -1; 1034 if (req->flags & REQ_F_APOLL_MULTISHOT) { 1035 ret = io_recvmsg_multishot(sock, sr, kmsg, flags, 1036 &mshot_finished); 1037 } else { 1038 /* disable partial retry for recvmsg with cmsg attached */ 1039 if (flags & MSG_WAITALL && !kmsg->msg.msg_controllen) 1040 min_ret = iov_iter_count(&kmsg->msg.msg_iter); 1041 1042 ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg, 1043 kmsg->uaddr, flags); 1044 } 1045 1046 if (ret < min_ret) { 1047 if (ret == -EAGAIN && force_nonblock) { 1048 if (issue_flags & IO_URING_F_MULTISHOT) 1049 io_kbuf_recycle(req, issue_flags); 1050 1051 return IOU_RETRY; 1052 } 1053 if (ret > 0 && io_net_retry(sock, flags)) { 1054 sr->done_io += ret; 1055 req->flags |= REQ_F_BL_NO_RECYCLE; 1056 return IOU_RETRY; 1057 } 1058 if (ret == -ERESTARTSYS) 1059 ret = -EINTR; 1060 req_set_fail(req); 1061 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) { 1062 req_set_fail(req); 1063 } 1064 1065 if (ret > 0) 1066 ret += sr->done_io; 1067 else if (sr->done_io) 1068 ret = sr->done_io; 1069 else 1070 io_kbuf_recycle(req, issue_flags); 1071 1072 if (!io_recv_finish(req, &ret, kmsg, mshot_finished, issue_flags)) 1073 goto retry_multishot; 1074 1075 return ret; 1076 } 1077 1078 static int io_recv_buf_select(struct io_kiocb *req, struct io_async_msghdr *kmsg, 1079 size_t *len, unsigned int issue_flags) 1080 { 1081 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 1082 int ret; 1083 1084 /* 1085 * If the ring isn't locked, then don't use the peek interface 1086 * to grab multiple buffers as we will lock/unlock between 1087 * this selection and posting the buffers. 1088 */ 1089 if (!(issue_flags & IO_URING_F_UNLOCKED) && 1090 sr->flags & IORING_RECVSEND_BUNDLE) { 1091 struct buf_sel_arg arg = { 1092 .iovs = &kmsg->fast_iov, 1093 .nr_iovs = 1, 1094 .mode = KBUF_MODE_EXPAND, 1095 }; 1096 1097 if (kmsg->vec.iovec) { 1098 arg.nr_iovs = kmsg->vec.nr; 1099 arg.iovs = kmsg->vec.iovec; 1100 arg.mode |= KBUF_MODE_FREE; 1101 } 1102 1103 if (kmsg->msg.msg_inq > 0) 1104 arg.max_len = min_not_zero(sr->len, kmsg->msg.msg_inq); 1105 1106 ret = io_buffers_peek(req, &arg); 1107 if (unlikely(ret < 0)) 1108 return ret; 1109 1110 /* special case 1 vec, can be a fast path */ 1111 if (ret == 1) { 1112 sr->buf = arg.iovs[0].iov_base; 1113 sr->len = arg.iovs[0].iov_len; 1114 goto map_ubuf; 1115 } 1116 iov_iter_init(&kmsg->msg.msg_iter, ITER_DEST, arg.iovs, ret, 1117 arg.out_len); 1118 if (arg.iovs != &kmsg->fast_iov && arg.iovs != kmsg->vec.iovec) { 1119 kmsg->vec.nr = ret; 1120 kmsg->vec.iovec = arg.iovs; 1121 req->flags |= REQ_F_NEED_CLEANUP; 1122 } 1123 } else { 1124 void __user *buf; 1125 1126 *len = sr->len; 1127 buf = io_buffer_select(req, len, issue_flags); 1128 if (!buf) 1129 return -ENOBUFS; 1130 sr->buf = buf; 1131 sr->len = *len; 1132 map_ubuf: 1133 ret = import_ubuf(ITER_DEST, sr->buf, sr->len, 1134 &kmsg->msg.msg_iter); 1135 if (unlikely(ret)) 1136 return ret; 1137 } 1138 1139 return 0; 1140 } 1141 1142 int io_recv(struct io_kiocb *req, unsigned int issue_flags) 1143 { 1144 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 1145 struct io_async_msghdr *kmsg = req->async_data; 1146 struct socket *sock; 1147 unsigned flags; 1148 int ret, min_ret = 0; 1149 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; 1150 size_t len = sr->len; 1151 bool mshot_finished; 1152 1153 if (!(req->flags & REQ_F_POLLED) && 1154 (sr->flags & IORING_RECVSEND_POLL_FIRST)) 1155 return -EAGAIN; 1156 1157 sock = sock_from_file(req->file); 1158 if (unlikely(!sock)) 1159 return -ENOTSOCK; 1160 1161 flags = sr->msg_flags; 1162 if (force_nonblock) 1163 flags |= MSG_DONTWAIT; 1164 1165 retry_multishot: 1166 if (io_do_buffer_select(req)) { 1167 ret = io_recv_buf_select(req, kmsg, &len, issue_flags); 1168 if (unlikely(ret)) { 1169 kmsg->msg.msg_inq = -1; 1170 goto out_free; 1171 } 1172 sr->buf = NULL; 1173 } 1174 1175 kmsg->msg.msg_flags = 0; 1176 kmsg->msg.msg_inq = -1; 1177 1178 if (flags & MSG_WAITALL) 1179 min_ret = iov_iter_count(&kmsg->msg.msg_iter); 1180 1181 ret = sock_recvmsg(sock, &kmsg->msg, flags); 1182 if (ret < min_ret) { 1183 if (ret == -EAGAIN && force_nonblock) { 1184 if (issue_flags & IO_URING_F_MULTISHOT) 1185 io_kbuf_recycle(req, issue_flags); 1186 1187 return IOU_RETRY; 1188 } 1189 if (ret > 0 && io_net_retry(sock, flags)) { 1190 sr->len -= ret; 1191 sr->buf += ret; 1192 sr->done_io += ret; 1193 req->flags |= REQ_F_BL_NO_RECYCLE; 1194 return -EAGAIN; 1195 } 1196 if (ret == -ERESTARTSYS) 1197 ret = -EINTR; 1198 req_set_fail(req); 1199 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) { 1200 out_free: 1201 req_set_fail(req); 1202 } 1203 1204 mshot_finished = ret <= 0; 1205 if (ret > 0) 1206 ret += sr->done_io; 1207 else if (sr->done_io) 1208 ret = sr->done_io; 1209 else 1210 io_kbuf_recycle(req, issue_flags); 1211 1212 if (!io_recv_finish(req, &ret, kmsg, mshot_finished, issue_flags)) 1213 goto retry_multishot; 1214 1215 return ret; 1216 } 1217 1218 int io_recvzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 1219 { 1220 struct io_recvzc *zc = io_kiocb_to_cmd(req, struct io_recvzc); 1221 unsigned ifq_idx; 1222 1223 if (unlikely(sqe->file_index || sqe->addr2 || sqe->addr || 1224 sqe->addr3)) 1225 return -EINVAL; 1226 1227 ifq_idx = READ_ONCE(sqe->zcrx_ifq_idx); 1228 if (ifq_idx != 0) 1229 return -EINVAL; 1230 zc->ifq = req->ctx->ifq; 1231 if (!zc->ifq) 1232 return -EINVAL; 1233 zc->len = READ_ONCE(sqe->len); 1234 zc->flags = READ_ONCE(sqe->ioprio); 1235 zc->msg_flags = READ_ONCE(sqe->msg_flags); 1236 if (zc->msg_flags) 1237 return -EINVAL; 1238 if (zc->flags & ~(IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT)) 1239 return -EINVAL; 1240 /* multishot required */ 1241 if (!(zc->flags & IORING_RECV_MULTISHOT)) 1242 return -EINVAL; 1243 /* All data completions are posted as aux CQEs. */ 1244 req->flags |= REQ_F_APOLL_MULTISHOT; 1245 1246 return 0; 1247 } 1248 1249 int io_recvzc(struct io_kiocb *req, unsigned int issue_flags) 1250 { 1251 struct io_recvzc *zc = io_kiocb_to_cmd(req, struct io_recvzc); 1252 struct socket *sock; 1253 unsigned int len; 1254 int ret; 1255 1256 if (!(req->flags & REQ_F_POLLED) && 1257 (zc->flags & IORING_RECVSEND_POLL_FIRST)) 1258 return -EAGAIN; 1259 1260 sock = sock_from_file(req->file); 1261 if (unlikely(!sock)) 1262 return -ENOTSOCK; 1263 1264 len = zc->len; 1265 ret = io_zcrx_recv(req, zc->ifq, sock, zc->msg_flags | MSG_DONTWAIT, 1266 issue_flags, &zc->len); 1267 if (len && zc->len == 0) { 1268 io_req_set_res(req, 0, 0); 1269 1270 return IOU_COMPLETE; 1271 } 1272 if (unlikely(ret <= 0) && ret != -EAGAIN) { 1273 if (ret == -ERESTARTSYS) 1274 ret = -EINTR; 1275 if (ret == IOU_REQUEUE) 1276 return IOU_REQUEUE; 1277 1278 req_set_fail(req); 1279 io_req_set_res(req, ret, 0); 1280 return IOU_COMPLETE; 1281 } 1282 return IOU_RETRY; 1283 } 1284 1285 void io_send_zc_cleanup(struct io_kiocb *req) 1286 { 1287 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg); 1288 struct io_async_msghdr *io = req->async_data; 1289 1290 if (req_has_async_data(req)) 1291 io_netmsg_iovec_free(io); 1292 if (zc->notif) { 1293 io_notif_flush(zc->notif); 1294 zc->notif = NULL; 1295 } 1296 } 1297 1298 #define IO_ZC_FLAGS_COMMON (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_FIXED_BUF) 1299 #define IO_ZC_FLAGS_VALID (IO_ZC_FLAGS_COMMON | IORING_SEND_ZC_REPORT_USAGE) 1300 1301 int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 1302 { 1303 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg); 1304 struct io_ring_ctx *ctx = req->ctx; 1305 struct io_kiocb *notif; 1306 1307 zc->done_io = 0; 1308 zc->retry = false; 1309 req->flags |= REQ_F_POLL_NO_LAZY; 1310 1311 if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3))) 1312 return -EINVAL; 1313 /* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */ 1314 if (req->flags & REQ_F_CQE_SKIP) 1315 return -EINVAL; 1316 1317 notif = zc->notif = io_alloc_notif(ctx); 1318 if (!notif) 1319 return -ENOMEM; 1320 notif->cqe.user_data = req->cqe.user_data; 1321 notif->cqe.res = 0; 1322 notif->cqe.flags = IORING_CQE_F_NOTIF; 1323 req->flags |= REQ_F_NEED_CLEANUP; 1324 1325 zc->flags = READ_ONCE(sqe->ioprio); 1326 if (unlikely(zc->flags & ~IO_ZC_FLAGS_COMMON)) { 1327 if (zc->flags & ~IO_ZC_FLAGS_VALID) 1328 return -EINVAL; 1329 if (zc->flags & IORING_SEND_ZC_REPORT_USAGE) { 1330 struct io_notif_data *nd = io_notif_to_data(notif); 1331 1332 nd->zc_report = true; 1333 nd->zc_used = false; 1334 nd->zc_copied = false; 1335 } 1336 } 1337 1338 if (req->opcode != IORING_OP_SEND_ZC) { 1339 if (unlikely(sqe->addr2 || sqe->file_index)) 1340 return -EINVAL; 1341 } 1342 1343 zc->len = READ_ONCE(sqe->len); 1344 zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL | MSG_ZEROCOPY; 1345 req->buf_index = READ_ONCE(sqe->buf_index); 1346 if (zc->msg_flags & MSG_DONTWAIT) 1347 req->flags |= REQ_F_NOWAIT; 1348 1349 if (io_is_compat(req->ctx)) 1350 zc->msg_flags |= MSG_CMSG_COMPAT; 1351 1352 if (unlikely(!io_msg_alloc_async(req))) 1353 return -ENOMEM; 1354 if (req->opcode == IORING_OP_SEND_ZC) { 1355 req->flags |= REQ_F_IMPORT_BUFFER; 1356 return io_send_setup(req, sqe); 1357 } 1358 return io_sendmsg_zc_setup(req, sqe); 1359 } 1360 1361 static int io_sg_from_iter_iovec(struct sk_buff *skb, 1362 struct iov_iter *from, size_t length) 1363 { 1364 skb_zcopy_downgrade_managed(skb); 1365 return zerocopy_fill_skb_from_iter(skb, from, length); 1366 } 1367 1368 static int io_sg_from_iter(struct sk_buff *skb, 1369 struct iov_iter *from, size_t length) 1370 { 1371 struct skb_shared_info *shinfo = skb_shinfo(skb); 1372 int frag = shinfo->nr_frags; 1373 int ret = 0; 1374 struct bvec_iter bi; 1375 ssize_t copied = 0; 1376 unsigned long truesize = 0; 1377 1378 if (!frag) 1379 shinfo->flags |= SKBFL_MANAGED_FRAG_REFS; 1380 else if (unlikely(!skb_zcopy_managed(skb))) 1381 return zerocopy_fill_skb_from_iter(skb, from, length); 1382 1383 bi.bi_size = min(from->count, length); 1384 bi.bi_bvec_done = from->iov_offset; 1385 bi.bi_idx = 0; 1386 1387 while (bi.bi_size && frag < MAX_SKB_FRAGS) { 1388 struct bio_vec v = mp_bvec_iter_bvec(from->bvec, bi); 1389 1390 copied += v.bv_len; 1391 truesize += PAGE_ALIGN(v.bv_len + v.bv_offset); 1392 __skb_fill_page_desc_noacc(shinfo, frag++, v.bv_page, 1393 v.bv_offset, v.bv_len); 1394 bvec_iter_advance_single(from->bvec, &bi, v.bv_len); 1395 } 1396 if (bi.bi_size) 1397 ret = -EMSGSIZE; 1398 1399 shinfo->nr_frags = frag; 1400 from->bvec += bi.bi_idx; 1401 from->nr_segs -= bi.bi_idx; 1402 from->count -= copied; 1403 from->iov_offset = bi.bi_bvec_done; 1404 1405 skb->data_len += copied; 1406 skb->len += copied; 1407 skb->truesize += truesize; 1408 return ret; 1409 } 1410 1411 static int io_send_zc_import(struct io_kiocb *req, unsigned int issue_flags) 1412 { 1413 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 1414 struct io_async_msghdr *kmsg = req->async_data; 1415 int ret; 1416 1417 if (sr->flags & IORING_RECVSEND_FIXED_BUF) { 1418 sr->notif->buf_index = req->buf_index; 1419 ret = io_import_reg_buf(sr->notif, &kmsg->msg.msg_iter, 1420 (u64)(uintptr_t)sr->buf, sr->len, 1421 ITER_SOURCE, issue_flags); 1422 if (unlikely(ret)) 1423 return ret; 1424 kmsg->msg.sg_from_iter = io_sg_from_iter; 1425 } else { 1426 ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len, &kmsg->msg.msg_iter); 1427 if (unlikely(ret)) 1428 return ret; 1429 ret = io_notif_account_mem(sr->notif, sr->len); 1430 if (unlikely(ret)) 1431 return ret; 1432 kmsg->msg.sg_from_iter = io_sg_from_iter_iovec; 1433 } 1434 1435 return ret; 1436 } 1437 1438 int io_send_zc(struct io_kiocb *req, unsigned int issue_flags) 1439 { 1440 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg); 1441 struct io_async_msghdr *kmsg = req->async_data; 1442 struct socket *sock; 1443 unsigned msg_flags; 1444 int ret, min_ret = 0; 1445 1446 sock = sock_from_file(req->file); 1447 if (unlikely(!sock)) 1448 return -ENOTSOCK; 1449 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags)) 1450 return -EOPNOTSUPP; 1451 1452 if (!(req->flags & REQ_F_POLLED) && 1453 (zc->flags & IORING_RECVSEND_POLL_FIRST)) 1454 return -EAGAIN; 1455 1456 if (req->flags & REQ_F_IMPORT_BUFFER) { 1457 req->flags &= ~REQ_F_IMPORT_BUFFER; 1458 ret = io_send_zc_import(req, issue_flags); 1459 if (unlikely(ret)) 1460 return ret; 1461 } 1462 1463 msg_flags = zc->msg_flags; 1464 if (issue_flags & IO_URING_F_NONBLOCK) 1465 msg_flags |= MSG_DONTWAIT; 1466 if (msg_flags & MSG_WAITALL) 1467 min_ret = iov_iter_count(&kmsg->msg.msg_iter); 1468 msg_flags &= ~MSG_INTERNAL_SENDMSG_FLAGS; 1469 1470 kmsg->msg.msg_flags = msg_flags; 1471 kmsg->msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg; 1472 ret = sock_sendmsg(sock, &kmsg->msg); 1473 1474 if (unlikely(ret < min_ret)) { 1475 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) 1476 return -EAGAIN; 1477 1478 if (ret > 0 && io_net_retry(sock, kmsg->msg.msg_flags)) { 1479 zc->len -= ret; 1480 zc->buf += ret; 1481 zc->done_io += ret; 1482 req->flags |= REQ_F_BL_NO_RECYCLE; 1483 return -EAGAIN; 1484 } 1485 if (ret == -ERESTARTSYS) 1486 ret = -EINTR; 1487 req_set_fail(req); 1488 } 1489 1490 if (ret >= 0) 1491 ret += zc->done_io; 1492 else if (zc->done_io) 1493 ret = zc->done_io; 1494 1495 /* 1496 * If we're in io-wq we can't rely on tw ordering guarantees, defer 1497 * flushing notif to io_send_zc_cleanup() 1498 */ 1499 if (!(issue_flags & IO_URING_F_UNLOCKED)) { 1500 io_notif_flush(zc->notif); 1501 zc->notif = NULL; 1502 io_req_msg_cleanup(req, 0); 1503 } 1504 io_req_set_res(req, ret, IORING_CQE_F_MORE); 1505 return IOU_OK; 1506 } 1507 1508 int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags) 1509 { 1510 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 1511 struct io_async_msghdr *kmsg = req->async_data; 1512 struct socket *sock; 1513 unsigned flags; 1514 int ret, min_ret = 0; 1515 1516 kmsg->msg.sg_from_iter = io_sg_from_iter_iovec; 1517 1518 if (req->flags & REQ_F_IMPORT_BUFFER) { 1519 unsigned uvec_segs = kmsg->msg.msg_iter.nr_segs; 1520 int ret; 1521 1522 ret = io_import_reg_vec(ITER_SOURCE, &kmsg->msg.msg_iter, req, 1523 &kmsg->vec, uvec_segs, issue_flags); 1524 if (unlikely(ret)) 1525 return ret; 1526 kmsg->msg.sg_from_iter = io_sg_from_iter; 1527 req->flags &= ~REQ_F_IMPORT_BUFFER; 1528 } 1529 1530 sock = sock_from_file(req->file); 1531 if (unlikely(!sock)) 1532 return -ENOTSOCK; 1533 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags)) 1534 return -EOPNOTSUPP; 1535 1536 if (!(req->flags & REQ_F_POLLED) && 1537 (sr->flags & IORING_RECVSEND_POLL_FIRST)) 1538 return -EAGAIN; 1539 1540 flags = sr->msg_flags; 1541 if (issue_flags & IO_URING_F_NONBLOCK) 1542 flags |= MSG_DONTWAIT; 1543 if (flags & MSG_WAITALL) 1544 min_ret = iov_iter_count(&kmsg->msg.msg_iter); 1545 1546 kmsg->msg.msg_control_user = sr->msg_control; 1547 kmsg->msg.msg_ubuf = &io_notif_to_data(sr->notif)->uarg; 1548 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags); 1549 1550 if (unlikely(ret < min_ret)) { 1551 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) 1552 return -EAGAIN; 1553 1554 if (ret > 0 && io_net_retry(sock, flags)) { 1555 sr->done_io += ret; 1556 req->flags |= REQ_F_BL_NO_RECYCLE; 1557 return -EAGAIN; 1558 } 1559 if (ret == -ERESTARTSYS) 1560 ret = -EINTR; 1561 req_set_fail(req); 1562 } 1563 1564 if (ret >= 0) 1565 ret += sr->done_io; 1566 else if (sr->done_io) 1567 ret = sr->done_io; 1568 1569 /* 1570 * If we're in io-wq we can't rely on tw ordering guarantees, defer 1571 * flushing notif to io_send_zc_cleanup() 1572 */ 1573 if (!(issue_flags & IO_URING_F_UNLOCKED)) { 1574 io_notif_flush(sr->notif); 1575 sr->notif = NULL; 1576 io_req_msg_cleanup(req, 0); 1577 } 1578 io_req_set_res(req, ret, IORING_CQE_F_MORE); 1579 return IOU_OK; 1580 } 1581 1582 void io_sendrecv_fail(struct io_kiocb *req) 1583 { 1584 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); 1585 1586 if (sr->done_io) 1587 req->cqe.res = sr->done_io; 1588 1589 if ((req->flags & REQ_F_NEED_CLEANUP) && 1590 (req->opcode == IORING_OP_SEND_ZC || req->opcode == IORING_OP_SENDMSG_ZC)) 1591 req->cqe.flags |= IORING_CQE_F_MORE; 1592 } 1593 1594 #define ACCEPT_FLAGS (IORING_ACCEPT_MULTISHOT | IORING_ACCEPT_DONTWAIT | \ 1595 IORING_ACCEPT_POLL_FIRST) 1596 1597 int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 1598 { 1599 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept); 1600 1601 if (sqe->len || sqe->buf_index) 1602 return -EINVAL; 1603 1604 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr)); 1605 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2)); 1606 accept->flags = READ_ONCE(sqe->accept_flags); 1607 accept->nofile = rlimit(RLIMIT_NOFILE); 1608 accept->iou_flags = READ_ONCE(sqe->ioprio); 1609 if (accept->iou_flags & ~ACCEPT_FLAGS) 1610 return -EINVAL; 1611 1612 accept->file_slot = READ_ONCE(sqe->file_index); 1613 if (accept->file_slot) { 1614 if (accept->flags & SOCK_CLOEXEC) 1615 return -EINVAL; 1616 if (accept->iou_flags & IORING_ACCEPT_MULTISHOT && 1617 accept->file_slot != IORING_FILE_INDEX_ALLOC) 1618 return -EINVAL; 1619 } 1620 if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) 1621 return -EINVAL; 1622 if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK)) 1623 accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK; 1624 if (accept->iou_flags & IORING_ACCEPT_MULTISHOT) 1625 req->flags |= REQ_F_APOLL_MULTISHOT; 1626 if (accept->iou_flags & IORING_ACCEPT_DONTWAIT) 1627 req->flags |= REQ_F_NOWAIT; 1628 return 0; 1629 } 1630 1631 int io_accept(struct io_kiocb *req, unsigned int issue_flags) 1632 { 1633 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept); 1634 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; 1635 bool fixed = !!accept->file_slot; 1636 struct proto_accept_arg arg = { 1637 .flags = force_nonblock ? O_NONBLOCK : 0, 1638 }; 1639 struct file *file; 1640 unsigned cflags; 1641 int ret, fd; 1642 1643 if (!(req->flags & REQ_F_POLLED) && 1644 accept->iou_flags & IORING_ACCEPT_POLL_FIRST) 1645 return -EAGAIN; 1646 1647 retry: 1648 if (!fixed) { 1649 fd = __get_unused_fd_flags(accept->flags, accept->nofile); 1650 if (unlikely(fd < 0)) 1651 return fd; 1652 } 1653 arg.err = 0; 1654 arg.is_empty = -1; 1655 file = do_accept(req->file, &arg, accept->addr, accept->addr_len, 1656 accept->flags); 1657 if (IS_ERR(file)) { 1658 if (!fixed) 1659 put_unused_fd(fd); 1660 ret = PTR_ERR(file); 1661 if (ret == -EAGAIN && force_nonblock && 1662 !(accept->iou_flags & IORING_ACCEPT_DONTWAIT)) 1663 return IOU_RETRY; 1664 1665 if (ret == -ERESTARTSYS) 1666 ret = -EINTR; 1667 } else if (!fixed) { 1668 fd_install(fd, file); 1669 ret = fd; 1670 } else { 1671 ret = io_fixed_fd_install(req, issue_flags, file, 1672 accept->file_slot); 1673 } 1674 1675 cflags = 0; 1676 if (!arg.is_empty) 1677 cflags |= IORING_CQE_F_SOCK_NONEMPTY; 1678 1679 if (ret >= 0 && (req->flags & REQ_F_APOLL_MULTISHOT) && 1680 io_req_post_cqe(req, ret, cflags | IORING_CQE_F_MORE)) { 1681 if (cflags & IORING_CQE_F_SOCK_NONEMPTY || arg.is_empty == -1) 1682 goto retry; 1683 return IOU_RETRY; 1684 } 1685 1686 io_req_set_res(req, ret, cflags); 1687 if (ret < 0) 1688 req_set_fail(req); 1689 return IOU_COMPLETE; 1690 } 1691 1692 int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 1693 { 1694 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket); 1695 1696 if (sqe->addr || sqe->rw_flags || sqe->buf_index) 1697 return -EINVAL; 1698 1699 sock->domain = READ_ONCE(sqe->fd); 1700 sock->type = READ_ONCE(sqe->off); 1701 sock->protocol = READ_ONCE(sqe->len); 1702 sock->file_slot = READ_ONCE(sqe->file_index); 1703 sock->nofile = rlimit(RLIMIT_NOFILE); 1704 1705 sock->flags = sock->type & ~SOCK_TYPE_MASK; 1706 if (sock->file_slot && (sock->flags & SOCK_CLOEXEC)) 1707 return -EINVAL; 1708 if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) 1709 return -EINVAL; 1710 return 0; 1711 } 1712 1713 int io_socket(struct io_kiocb *req, unsigned int issue_flags) 1714 { 1715 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket); 1716 bool fixed = !!sock->file_slot; 1717 struct file *file; 1718 int ret, fd; 1719 1720 if (!fixed) { 1721 fd = __get_unused_fd_flags(sock->flags, sock->nofile); 1722 if (unlikely(fd < 0)) 1723 return fd; 1724 } 1725 file = __sys_socket_file(sock->domain, sock->type, sock->protocol); 1726 if (IS_ERR(file)) { 1727 if (!fixed) 1728 put_unused_fd(fd); 1729 ret = PTR_ERR(file); 1730 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) 1731 return -EAGAIN; 1732 if (ret == -ERESTARTSYS) 1733 ret = -EINTR; 1734 req_set_fail(req); 1735 } else if (!fixed) { 1736 fd_install(fd, file); 1737 ret = fd; 1738 } else { 1739 ret = io_fixed_fd_install(req, issue_flags, file, 1740 sock->file_slot); 1741 } 1742 io_req_set_res(req, ret, 0); 1743 return IOU_OK; 1744 } 1745 1746 int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 1747 { 1748 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect); 1749 struct io_async_msghdr *io; 1750 1751 if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in) 1752 return -EINVAL; 1753 1754 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr)); 1755 conn->addr_len = READ_ONCE(sqe->addr2); 1756 conn->in_progress = conn->seen_econnaborted = false; 1757 1758 io = io_msg_alloc_async(req); 1759 if (unlikely(!io)) 1760 return -ENOMEM; 1761 1762 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->addr); 1763 } 1764 1765 int io_connect(struct io_kiocb *req, unsigned int issue_flags) 1766 { 1767 struct io_connect *connect = io_kiocb_to_cmd(req, struct io_connect); 1768 struct io_async_msghdr *io = req->async_data; 1769 unsigned file_flags; 1770 int ret; 1771 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; 1772 1773 if (unlikely(req->flags & REQ_F_FAIL)) { 1774 ret = -ECONNRESET; 1775 goto out; 1776 } 1777 1778 file_flags = force_nonblock ? O_NONBLOCK : 0; 1779 1780 ret = __sys_connect_file(req->file, &io->addr, connect->addr_len, 1781 file_flags); 1782 if ((ret == -EAGAIN || ret == -EINPROGRESS || ret == -ECONNABORTED) 1783 && force_nonblock) { 1784 if (ret == -EINPROGRESS) { 1785 connect->in_progress = true; 1786 } else if (ret == -ECONNABORTED) { 1787 if (connect->seen_econnaborted) 1788 goto out; 1789 connect->seen_econnaborted = true; 1790 } 1791 return -EAGAIN; 1792 } 1793 if (connect->in_progress) { 1794 /* 1795 * At least bluetooth will return -EBADFD on a re-connect 1796 * attempt, and it's (supposedly) also valid to get -EISCONN 1797 * which means the previous result is good. For both of these, 1798 * grab the sock_error() and use that for the completion. 1799 */ 1800 if (ret == -EBADFD || ret == -EISCONN) 1801 ret = sock_error(sock_from_file(req->file)->sk); 1802 } 1803 if (ret == -ERESTARTSYS) 1804 ret = -EINTR; 1805 out: 1806 if (ret < 0) 1807 req_set_fail(req); 1808 io_req_msg_cleanup(req, issue_flags); 1809 io_req_set_res(req, ret, 0); 1810 return IOU_OK; 1811 } 1812 1813 int io_bind_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 1814 { 1815 struct io_bind *bind = io_kiocb_to_cmd(req, struct io_bind); 1816 struct sockaddr __user *uaddr; 1817 struct io_async_msghdr *io; 1818 1819 if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in) 1820 return -EINVAL; 1821 1822 uaddr = u64_to_user_ptr(READ_ONCE(sqe->addr)); 1823 bind->addr_len = READ_ONCE(sqe->addr2); 1824 1825 io = io_msg_alloc_async(req); 1826 if (unlikely(!io)) 1827 return -ENOMEM; 1828 return move_addr_to_kernel(uaddr, bind->addr_len, &io->addr); 1829 } 1830 1831 int io_bind(struct io_kiocb *req, unsigned int issue_flags) 1832 { 1833 struct io_bind *bind = io_kiocb_to_cmd(req, struct io_bind); 1834 struct io_async_msghdr *io = req->async_data; 1835 struct socket *sock; 1836 int ret; 1837 1838 sock = sock_from_file(req->file); 1839 if (unlikely(!sock)) 1840 return -ENOTSOCK; 1841 1842 ret = __sys_bind_socket(sock, &io->addr, bind->addr_len); 1843 if (ret < 0) 1844 req_set_fail(req); 1845 io_req_set_res(req, ret, 0); 1846 return 0; 1847 } 1848 1849 int io_listen_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 1850 { 1851 struct io_listen *listen = io_kiocb_to_cmd(req, struct io_listen); 1852 1853 if (sqe->addr || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in || sqe->addr2) 1854 return -EINVAL; 1855 1856 listen->backlog = READ_ONCE(sqe->len); 1857 return 0; 1858 } 1859 1860 int io_listen(struct io_kiocb *req, unsigned int issue_flags) 1861 { 1862 struct io_listen *listen = io_kiocb_to_cmd(req, struct io_listen); 1863 struct socket *sock; 1864 int ret; 1865 1866 sock = sock_from_file(req->file); 1867 if (unlikely(!sock)) 1868 return -ENOTSOCK; 1869 1870 ret = __sys_listen_socket(sock, listen->backlog); 1871 if (ret < 0) 1872 req_set_fail(req); 1873 io_req_set_res(req, ret, 0); 1874 return 0; 1875 } 1876 1877 void io_netmsg_cache_free(const void *entry) 1878 { 1879 struct io_async_msghdr *kmsg = (struct io_async_msghdr *) entry; 1880 1881 io_vec_free(&kmsg->vec); 1882 kfree(kmsg); 1883 } 1884 #endif 1885