1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/kernel.h> 3 #include <linux/errno.h> 4 #include <linux/fs.h> 5 #include <linux/file.h> 6 #include <linux/blk-mq.h> 7 #include <linux/mm.h> 8 #include <linux/slab.h> 9 #include <linux/fsnotify.h> 10 #include <linux/poll.h> 11 #include <linux/nospec.h> 12 #include <linux/compat.h> 13 #include <linux/io_uring/cmd.h> 14 #include <linux/indirect_call_wrapper.h> 15 16 #include <uapi/linux/io_uring.h> 17 18 #include "io_uring.h" 19 #include "opdef.h" 20 #include "kbuf.h" 21 #include "alloc_cache.h" 22 #include "rsrc.h" 23 #include "poll.h" 24 #include "rw.h" 25 26 static void io_complete_rw(struct kiocb *kiocb, long res); 27 static void io_complete_rw_iopoll(struct kiocb *kiocb, long res); 28 29 struct io_rw { 30 /* NOTE: kiocb has the file as the first member, so don't do it here */ 31 struct kiocb kiocb; 32 u64 addr; 33 u32 len; 34 rwf_t flags; 35 }; 36 37 static bool io_file_supports_nowait(struct io_kiocb *req, __poll_t mask) 38 { 39 /* If FMODE_NOWAIT is set for a file, we're golden */ 40 if (req->flags & REQ_F_SUPPORT_NOWAIT) 41 return true; 42 /* No FMODE_NOWAIT, if we can poll, check the status */ 43 if (io_file_can_poll(req)) { 44 struct poll_table_struct pt = { ._key = mask }; 45 46 return vfs_poll(req->file, &pt) & mask; 47 } 48 /* No FMODE_NOWAIT support, and file isn't pollable. Tough luck. */ 49 return false; 50 } 51 52 #ifdef CONFIG_COMPAT 53 static int io_iov_compat_buffer_select_prep(struct io_rw *rw) 54 { 55 struct compat_iovec __user *uiov; 56 compat_ssize_t clen; 57 58 uiov = u64_to_user_ptr(rw->addr); 59 if (!access_ok(uiov, sizeof(*uiov))) 60 return -EFAULT; 61 if (__get_user(clen, &uiov->iov_len)) 62 return -EFAULT; 63 if (clen < 0) 64 return -EINVAL; 65 66 rw->len = clen; 67 return 0; 68 } 69 #endif 70 71 static int io_iov_buffer_select_prep(struct io_kiocb *req) 72 { 73 struct iovec __user *uiov; 74 struct iovec iov; 75 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 76 77 if (rw->len != 1) 78 return -EINVAL; 79 80 #ifdef CONFIG_COMPAT 81 if (req->ctx->compat) 82 return io_iov_compat_buffer_select_prep(rw); 83 #endif 84 85 uiov = u64_to_user_ptr(rw->addr); 86 if (copy_from_user(&iov, uiov, sizeof(*uiov))) 87 return -EFAULT; 88 rw->len = iov.iov_len; 89 return 0; 90 } 91 92 static int __io_import_iovec(int ddir, struct io_kiocb *req, 93 struct io_async_rw *io, 94 unsigned int issue_flags) 95 { 96 const struct io_issue_def *def = &io_issue_defs[req->opcode]; 97 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 98 struct iovec *iov; 99 void __user *buf; 100 int nr_segs, ret; 101 size_t sqe_len; 102 103 buf = u64_to_user_ptr(rw->addr); 104 sqe_len = rw->len; 105 106 if (!def->vectored || req->flags & REQ_F_BUFFER_SELECT) { 107 if (io_do_buffer_select(req)) { 108 buf = io_buffer_select(req, &sqe_len, issue_flags); 109 if (!buf) 110 return -ENOBUFS; 111 rw->addr = (unsigned long) buf; 112 rw->len = sqe_len; 113 } 114 115 return import_ubuf(ddir, buf, sqe_len, &io->iter); 116 } 117 118 if (io->free_iovec) { 119 nr_segs = io->free_iov_nr; 120 iov = io->free_iovec; 121 } else { 122 iov = &io->fast_iov; 123 nr_segs = 1; 124 } 125 ret = __import_iovec(ddir, buf, sqe_len, nr_segs, &iov, &io->iter, 126 req->ctx->compat); 127 if (unlikely(ret < 0)) 128 return ret; 129 if (iov) { 130 req->flags |= REQ_F_NEED_CLEANUP; 131 io->free_iov_nr = io->iter.nr_segs; 132 kfree(io->free_iovec); 133 io->free_iovec = iov; 134 } 135 return 0; 136 } 137 138 static inline int io_import_iovec(int rw, struct io_kiocb *req, 139 struct io_async_rw *io, 140 unsigned int issue_flags) 141 { 142 int ret; 143 144 ret = __io_import_iovec(rw, req, io, issue_flags); 145 if (unlikely(ret < 0)) 146 return ret; 147 148 iov_iter_save_state(&io->iter, &io->iter_state); 149 return 0; 150 } 151 152 static void io_rw_recycle(struct io_kiocb *req, unsigned int issue_flags) 153 { 154 struct io_async_rw *rw = req->async_data; 155 156 if (unlikely(issue_flags & IO_URING_F_UNLOCKED)) 157 return; 158 159 io_alloc_cache_kasan(&rw->free_iovec, &rw->free_iov_nr); 160 if (io_alloc_cache_put(&req->ctx->rw_cache, rw)) { 161 req->async_data = NULL; 162 req->flags &= ~REQ_F_ASYNC_DATA; 163 } 164 } 165 166 static void io_req_rw_cleanup(struct io_kiocb *req, unsigned int issue_flags) 167 { 168 /* 169 * Disable quick recycling for anything that's gone through io-wq. 170 * In theory, this should be fine to cleanup. However, some read or 171 * write iter handling touches the iovec AFTER having called into the 172 * handler, eg to reexpand or revert. This means we can have: 173 * 174 * task io-wq 175 * issue 176 * punt to io-wq 177 * issue 178 * blkdev_write_iter() 179 * ->ki_complete() 180 * io_complete_rw() 181 * queue tw complete 182 * run tw 183 * req_rw_cleanup 184 * iov_iter_count() <- look at iov_iter again 185 * 186 * which can lead to a UAF. This is only possible for io-wq offload 187 * as the cleanup can run in parallel. As io-wq is not the fast path, 188 * just leave cleanup to the end. 189 * 190 * This is really a bug in the core code that does this, any issue 191 * path should assume that a successful (or -EIOCBQUEUED) return can 192 * mean that the underlying data can be gone at any time. But that 193 * should be fixed seperately, and then this check could be killed. 194 */ 195 if (!(req->flags & (REQ_F_REISSUE | REQ_F_REFCOUNT))) { 196 req->flags &= ~REQ_F_NEED_CLEANUP; 197 io_rw_recycle(req, issue_flags); 198 } 199 } 200 201 static int io_rw_alloc_async(struct io_kiocb *req) 202 { 203 struct io_ring_ctx *ctx = req->ctx; 204 struct io_async_rw *rw; 205 206 rw = io_uring_alloc_async_data(&ctx->rw_cache, req); 207 if (!rw) 208 return -ENOMEM; 209 if (rw->free_iovec) 210 req->flags |= REQ_F_NEED_CLEANUP; 211 rw->bytes_done = 0; 212 return 0; 213 } 214 215 static int io_prep_rw_setup(struct io_kiocb *req, int ddir, bool do_import) 216 { 217 struct io_async_rw *rw; 218 219 if (io_rw_alloc_async(req)) 220 return -ENOMEM; 221 222 if (!do_import || io_do_buffer_select(req)) 223 return 0; 224 225 rw = req->async_data; 226 return io_import_iovec(ddir, req, rw, 0); 227 } 228 229 static inline void io_meta_save_state(struct io_async_rw *io) 230 { 231 io->meta_state.seed = io->meta.seed; 232 iov_iter_save_state(&io->meta.iter, &io->meta_state.iter_meta); 233 } 234 235 static inline void io_meta_restore(struct io_async_rw *io, struct kiocb *kiocb) 236 { 237 if (kiocb->ki_flags & IOCB_HAS_METADATA) { 238 io->meta.seed = io->meta_state.seed; 239 iov_iter_restore(&io->meta.iter, &io->meta_state.iter_meta); 240 } 241 } 242 243 static int io_prep_rw_pi(struct io_kiocb *req, struct io_rw *rw, int ddir, 244 u64 attr_ptr, u64 attr_type_mask) 245 { 246 struct io_uring_attr_pi pi_attr; 247 struct io_async_rw *io; 248 int ret; 249 250 if (copy_from_user(&pi_attr, u64_to_user_ptr(attr_ptr), 251 sizeof(pi_attr))) 252 return -EFAULT; 253 254 if (pi_attr.rsvd) 255 return -EINVAL; 256 257 io = req->async_data; 258 io->meta.flags = pi_attr.flags; 259 io->meta.app_tag = pi_attr.app_tag; 260 io->meta.seed = pi_attr.seed; 261 ret = import_ubuf(ddir, u64_to_user_ptr(pi_attr.addr), 262 pi_attr.len, &io->meta.iter); 263 if (unlikely(ret < 0)) 264 return ret; 265 req->flags |= REQ_F_HAS_METADATA; 266 io_meta_save_state(io); 267 return ret; 268 } 269 270 static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, 271 int ddir, bool do_import) 272 { 273 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 274 unsigned ioprio; 275 u64 attr_type_mask; 276 int ret; 277 278 rw->kiocb.ki_pos = READ_ONCE(sqe->off); 279 /* used for fixed read/write too - just read unconditionally */ 280 req->buf_index = READ_ONCE(sqe->buf_index); 281 282 ioprio = READ_ONCE(sqe->ioprio); 283 if (ioprio) { 284 ret = ioprio_check_cap(ioprio); 285 if (ret) 286 return ret; 287 288 rw->kiocb.ki_ioprio = ioprio; 289 } else { 290 rw->kiocb.ki_ioprio = get_current_ioprio(); 291 } 292 rw->kiocb.dio_complete = NULL; 293 rw->kiocb.ki_flags = 0; 294 295 if (req->ctx->flags & IORING_SETUP_IOPOLL) 296 rw->kiocb.ki_complete = io_complete_rw_iopoll; 297 else 298 rw->kiocb.ki_complete = io_complete_rw; 299 300 rw->addr = READ_ONCE(sqe->addr); 301 rw->len = READ_ONCE(sqe->len); 302 rw->flags = READ_ONCE(sqe->rw_flags); 303 ret = io_prep_rw_setup(req, ddir, do_import); 304 305 if (unlikely(ret)) 306 return ret; 307 308 attr_type_mask = READ_ONCE(sqe->attr_type_mask); 309 if (attr_type_mask) { 310 u64 attr_ptr; 311 312 /* only PI attribute is supported currently */ 313 if (attr_type_mask != IORING_RW_ATTR_FLAG_PI) 314 return -EINVAL; 315 316 attr_ptr = READ_ONCE(sqe->attr_ptr); 317 ret = io_prep_rw_pi(req, rw, ddir, attr_ptr, attr_type_mask); 318 } 319 return ret; 320 } 321 322 int io_prep_read(struct io_kiocb *req, const struct io_uring_sqe *sqe) 323 { 324 return io_prep_rw(req, sqe, ITER_DEST, true); 325 } 326 327 int io_prep_write(struct io_kiocb *req, const struct io_uring_sqe *sqe) 328 { 329 return io_prep_rw(req, sqe, ITER_SOURCE, true); 330 } 331 332 static int io_prep_rwv(struct io_kiocb *req, const struct io_uring_sqe *sqe, 333 int ddir) 334 { 335 const bool do_import = !(req->flags & REQ_F_BUFFER_SELECT); 336 int ret; 337 338 ret = io_prep_rw(req, sqe, ddir, do_import); 339 if (unlikely(ret)) 340 return ret; 341 if (do_import) 342 return 0; 343 344 /* 345 * Have to do this validation here, as this is in io_read() rw->len 346 * might have chanaged due to buffer selection 347 */ 348 return io_iov_buffer_select_prep(req); 349 } 350 351 int io_prep_readv(struct io_kiocb *req, const struct io_uring_sqe *sqe) 352 { 353 return io_prep_rwv(req, sqe, ITER_DEST); 354 } 355 356 int io_prep_writev(struct io_kiocb *req, const struct io_uring_sqe *sqe) 357 { 358 return io_prep_rwv(req, sqe, ITER_SOURCE); 359 } 360 361 static int io_prep_rw_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe, 362 int ddir) 363 { 364 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 365 struct io_ring_ctx *ctx = req->ctx; 366 struct io_rsrc_node *node; 367 struct io_async_rw *io; 368 int ret; 369 370 ret = io_prep_rw(req, sqe, ddir, false); 371 if (unlikely(ret)) 372 return ret; 373 374 node = io_rsrc_node_lookup(&ctx->buf_table, req->buf_index); 375 if (!node) 376 return -EFAULT; 377 io_req_assign_buf_node(req, node); 378 379 io = req->async_data; 380 ret = io_import_fixed(ddir, &io->iter, node->buf, rw->addr, rw->len); 381 iov_iter_save_state(&io->iter, &io->iter_state); 382 return ret; 383 } 384 385 int io_prep_read_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe) 386 { 387 return io_prep_rw_fixed(req, sqe, ITER_DEST); 388 } 389 390 int io_prep_write_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe) 391 { 392 return io_prep_rw_fixed(req, sqe, ITER_SOURCE); 393 } 394 395 /* 396 * Multishot read is prepared just like a normal read/write request, only 397 * difference is that we set the MULTISHOT flag. 398 */ 399 int io_read_mshot_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 400 { 401 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 402 int ret; 403 404 /* must be used with provided buffers */ 405 if (!(req->flags & REQ_F_BUFFER_SELECT)) 406 return -EINVAL; 407 408 ret = io_prep_rw(req, sqe, ITER_DEST, false); 409 if (unlikely(ret)) 410 return ret; 411 412 if (rw->addr || rw->len) 413 return -EINVAL; 414 415 req->flags |= REQ_F_APOLL_MULTISHOT; 416 return 0; 417 } 418 419 void io_readv_writev_cleanup(struct io_kiocb *req) 420 { 421 lockdep_assert_held(&req->ctx->uring_lock); 422 io_rw_recycle(req, 0); 423 } 424 425 static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req) 426 { 427 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 428 429 if (rw->kiocb.ki_pos != -1) 430 return &rw->kiocb.ki_pos; 431 432 if (!(req->file->f_mode & FMODE_STREAM)) { 433 req->flags |= REQ_F_CUR_POS; 434 rw->kiocb.ki_pos = req->file->f_pos; 435 return &rw->kiocb.ki_pos; 436 } 437 438 rw->kiocb.ki_pos = 0; 439 return NULL; 440 } 441 442 static bool io_rw_should_reissue(struct io_kiocb *req) 443 { 444 #ifdef CONFIG_BLOCK 445 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 446 umode_t mode = file_inode(req->file)->i_mode; 447 struct io_async_rw *io = req->async_data; 448 struct io_ring_ctx *ctx = req->ctx; 449 450 if (!S_ISBLK(mode) && !S_ISREG(mode)) 451 return false; 452 if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() && 453 !(ctx->flags & IORING_SETUP_IOPOLL))) 454 return false; 455 /* 456 * If ref is dying, we might be running poll reap from the exit work. 457 * Don't attempt to reissue from that path, just let it fail with 458 * -EAGAIN. 459 */ 460 if (percpu_ref_is_dying(&ctx->refs)) 461 return false; 462 463 io_meta_restore(io, &rw->kiocb); 464 iov_iter_restore(&io->iter, &io->iter_state); 465 return true; 466 #else 467 return false; 468 #endif 469 } 470 471 static void io_req_end_write(struct io_kiocb *req) 472 { 473 if (req->flags & REQ_F_ISREG) { 474 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 475 476 kiocb_end_write(&rw->kiocb); 477 } 478 } 479 480 /* 481 * Trigger the notifications after having done some IO, and finish the write 482 * accounting, if any. 483 */ 484 static void io_req_io_end(struct io_kiocb *req) 485 { 486 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 487 488 if (rw->kiocb.ki_flags & IOCB_WRITE) { 489 io_req_end_write(req); 490 fsnotify_modify(req->file); 491 } else { 492 fsnotify_access(req->file); 493 } 494 } 495 496 static void __io_complete_rw_common(struct io_kiocb *req, long res) 497 { 498 if (res == req->cqe.res) 499 return; 500 if (res == -EAGAIN && io_rw_should_reissue(req)) { 501 req->flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE; 502 } else { 503 req_set_fail(req); 504 req->cqe.res = res; 505 } 506 } 507 508 static inline int io_fixup_rw_res(struct io_kiocb *req, long res) 509 { 510 struct io_async_rw *io = req->async_data; 511 512 /* add previously done IO, if any */ 513 if (req_has_async_data(req) && io->bytes_done > 0) { 514 if (res < 0) 515 res = io->bytes_done; 516 else 517 res += io->bytes_done; 518 } 519 return res; 520 } 521 522 void io_req_rw_complete(struct io_kiocb *req, struct io_tw_state *ts) 523 { 524 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 525 struct kiocb *kiocb = &rw->kiocb; 526 527 if ((kiocb->ki_flags & IOCB_DIO_CALLER_COMP) && kiocb->dio_complete) { 528 long res = kiocb->dio_complete(rw->kiocb.private); 529 530 io_req_set_res(req, io_fixup_rw_res(req, res), 0); 531 } 532 533 io_req_io_end(req); 534 535 if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) 536 req->cqe.flags |= io_put_kbuf(req, req->cqe.res, 0); 537 538 io_req_rw_cleanup(req, 0); 539 io_req_task_complete(req, ts); 540 } 541 542 static void io_complete_rw(struct kiocb *kiocb, long res) 543 { 544 struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb); 545 struct io_kiocb *req = cmd_to_io_kiocb(rw); 546 547 if (!kiocb->dio_complete || !(kiocb->ki_flags & IOCB_DIO_CALLER_COMP)) { 548 __io_complete_rw_common(req, res); 549 io_req_set_res(req, io_fixup_rw_res(req, res), 0); 550 } 551 req->io_task_work.func = io_req_rw_complete; 552 __io_req_task_work_add(req, IOU_F_TWQ_LAZY_WAKE); 553 } 554 555 static void io_complete_rw_iopoll(struct kiocb *kiocb, long res) 556 { 557 struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb); 558 struct io_kiocb *req = cmd_to_io_kiocb(rw); 559 560 if (kiocb->ki_flags & IOCB_WRITE) 561 io_req_end_write(req); 562 if (unlikely(res != req->cqe.res)) { 563 if (res == -EAGAIN && io_rw_should_reissue(req)) 564 req->flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE; 565 else 566 req->cqe.res = res; 567 } 568 569 /* order with io_iopoll_complete() checking ->iopoll_completed */ 570 smp_store_release(&req->iopoll_completed, 1); 571 } 572 573 static inline void io_rw_done(struct io_kiocb *req, ssize_t ret) 574 { 575 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 576 577 /* IO was queued async, completion will happen later */ 578 if (ret == -EIOCBQUEUED) 579 return; 580 581 /* transform internal restart error codes */ 582 if (unlikely(ret < 0)) { 583 switch (ret) { 584 case -ERESTARTSYS: 585 case -ERESTARTNOINTR: 586 case -ERESTARTNOHAND: 587 case -ERESTART_RESTARTBLOCK: 588 /* 589 * We can't just restart the syscall, since previously 590 * submitted sqes may already be in progress. Just fail 591 * this IO with EINTR. 592 */ 593 ret = -EINTR; 594 break; 595 } 596 } 597 598 if (req->ctx->flags & IORING_SETUP_IOPOLL) 599 io_complete_rw_iopoll(&rw->kiocb, ret); 600 else 601 io_complete_rw(&rw->kiocb, ret); 602 } 603 604 static int kiocb_done(struct io_kiocb *req, ssize_t ret, 605 unsigned int issue_flags) 606 { 607 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 608 unsigned final_ret = io_fixup_rw_res(req, ret); 609 610 if (ret >= 0 && req->flags & REQ_F_CUR_POS) 611 req->file->f_pos = rw->kiocb.ki_pos; 612 if (ret >= 0 && !(req->ctx->flags & IORING_SETUP_IOPOLL)) { 613 __io_complete_rw_common(req, ret); 614 /* 615 * Safe to call io_end from here as we're inline 616 * from the submission path. 617 */ 618 io_req_io_end(req); 619 io_req_set_res(req, final_ret, io_put_kbuf(req, ret, issue_flags)); 620 io_req_rw_cleanup(req, issue_flags); 621 return IOU_OK; 622 } else { 623 io_rw_done(req, ret); 624 } 625 626 return IOU_ISSUE_SKIP_COMPLETE; 627 } 628 629 static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb) 630 { 631 return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos; 632 } 633 634 /* 635 * For files that don't have ->read_iter() and ->write_iter(), handle them 636 * by looping over ->read() or ->write() manually. 637 */ 638 static ssize_t loop_rw_iter(int ddir, struct io_rw *rw, struct iov_iter *iter) 639 { 640 struct kiocb *kiocb = &rw->kiocb; 641 struct file *file = kiocb->ki_filp; 642 ssize_t ret = 0; 643 loff_t *ppos; 644 645 /* 646 * Don't support polled IO through this interface, and we can't 647 * support non-blocking either. For the latter, this just causes 648 * the kiocb to be handled from an async context. 649 */ 650 if (kiocb->ki_flags & IOCB_HIPRI) 651 return -EOPNOTSUPP; 652 if ((kiocb->ki_flags & IOCB_NOWAIT) && 653 !(kiocb->ki_filp->f_flags & O_NONBLOCK)) 654 return -EAGAIN; 655 656 ppos = io_kiocb_ppos(kiocb); 657 658 while (iov_iter_count(iter)) { 659 void __user *addr; 660 size_t len; 661 ssize_t nr; 662 663 if (iter_is_ubuf(iter)) { 664 addr = iter->ubuf + iter->iov_offset; 665 len = iov_iter_count(iter); 666 } else if (!iov_iter_is_bvec(iter)) { 667 addr = iter_iov_addr(iter); 668 len = iter_iov_len(iter); 669 } else { 670 addr = u64_to_user_ptr(rw->addr); 671 len = rw->len; 672 } 673 674 if (ddir == READ) 675 nr = file->f_op->read(file, addr, len, ppos); 676 else 677 nr = file->f_op->write(file, addr, len, ppos); 678 679 if (nr < 0) { 680 if (!ret) 681 ret = nr; 682 break; 683 } 684 ret += nr; 685 if (!iov_iter_is_bvec(iter)) { 686 iov_iter_advance(iter, nr); 687 } else { 688 rw->addr += nr; 689 rw->len -= nr; 690 if (!rw->len) 691 break; 692 } 693 if (nr != len) 694 break; 695 } 696 697 return ret; 698 } 699 700 /* 701 * This is our waitqueue callback handler, registered through __folio_lock_async() 702 * when we initially tried to do the IO with the iocb armed our waitqueue. 703 * This gets called when the page is unlocked, and we generally expect that to 704 * happen when the page IO is completed and the page is now uptodate. This will 705 * queue a task_work based retry of the operation, attempting to copy the data 706 * again. If the latter fails because the page was NOT uptodate, then we will 707 * do a thread based blocking retry of the operation. That's the unexpected 708 * slow path. 709 */ 710 static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode, 711 int sync, void *arg) 712 { 713 struct wait_page_queue *wpq; 714 struct io_kiocb *req = wait->private; 715 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 716 struct wait_page_key *key = arg; 717 718 wpq = container_of(wait, struct wait_page_queue, wait); 719 720 if (!wake_page_match(wpq, key)) 721 return 0; 722 723 rw->kiocb.ki_flags &= ~IOCB_WAITQ; 724 list_del_init(&wait->entry); 725 io_req_task_queue(req); 726 return 1; 727 } 728 729 /* 730 * This controls whether a given IO request should be armed for async page 731 * based retry. If we return false here, the request is handed to the async 732 * worker threads for retry. If we're doing buffered reads on a regular file, 733 * we prepare a private wait_page_queue entry and retry the operation. This 734 * will either succeed because the page is now uptodate and unlocked, or it 735 * will register a callback when the page is unlocked at IO completion. Through 736 * that callback, io_uring uses task_work to setup a retry of the operation. 737 * That retry will attempt the buffered read again. The retry will generally 738 * succeed, or in rare cases where it fails, we then fall back to using the 739 * async worker threads for a blocking retry. 740 */ 741 static bool io_rw_should_retry(struct io_kiocb *req) 742 { 743 struct io_async_rw *io = req->async_data; 744 struct wait_page_queue *wait = &io->wpq; 745 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 746 struct kiocb *kiocb = &rw->kiocb; 747 748 /* 749 * Never retry for NOWAIT or a request with metadata, we just complete 750 * with -EAGAIN. 751 */ 752 if (req->flags & (REQ_F_NOWAIT | REQ_F_HAS_METADATA)) 753 return false; 754 755 /* Only for buffered IO */ 756 if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI)) 757 return false; 758 759 /* 760 * just use poll if we can, and don't attempt if the fs doesn't 761 * support callback based unlocks 762 */ 763 if (io_file_can_poll(req) || 764 !(req->file->f_op->fop_flags & FOP_BUFFER_RASYNC)) 765 return false; 766 767 wait->wait.func = io_async_buf_func; 768 wait->wait.private = req; 769 wait->wait.flags = 0; 770 INIT_LIST_HEAD(&wait->wait.entry); 771 kiocb->ki_flags |= IOCB_WAITQ; 772 kiocb->ki_flags &= ~IOCB_NOWAIT; 773 kiocb->ki_waitq = wait; 774 return true; 775 } 776 777 static inline int io_iter_do_read(struct io_rw *rw, struct iov_iter *iter) 778 { 779 struct file *file = rw->kiocb.ki_filp; 780 781 if (likely(file->f_op->read_iter)) 782 return file->f_op->read_iter(&rw->kiocb, iter); 783 else if (file->f_op->read) 784 return loop_rw_iter(READ, rw, iter); 785 else 786 return -EINVAL; 787 } 788 789 static bool need_complete_io(struct io_kiocb *req) 790 { 791 return req->flags & REQ_F_ISREG || 792 S_ISBLK(file_inode(req->file)->i_mode); 793 } 794 795 static int io_rw_init_file(struct io_kiocb *req, fmode_t mode, int rw_type) 796 { 797 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 798 struct kiocb *kiocb = &rw->kiocb; 799 struct io_ring_ctx *ctx = req->ctx; 800 struct file *file = req->file; 801 int ret; 802 803 if (unlikely(!(file->f_mode & mode))) 804 return -EBADF; 805 806 if (!(req->flags & REQ_F_FIXED_FILE)) 807 req->flags |= io_file_get_flags(file); 808 809 kiocb->ki_flags = file->f_iocb_flags; 810 ret = kiocb_set_rw_flags(kiocb, rw->flags, rw_type); 811 if (unlikely(ret)) 812 return ret; 813 kiocb->ki_flags |= IOCB_ALLOC_CACHE; 814 815 /* 816 * If the file is marked O_NONBLOCK, still allow retry for it if it 817 * supports async. Otherwise it's impossible to use O_NONBLOCK files 818 * reliably. If not, or it IOCB_NOWAIT is set, don't retry. 819 */ 820 if (kiocb->ki_flags & IOCB_NOWAIT || 821 ((file->f_flags & O_NONBLOCK && !(req->flags & REQ_F_SUPPORT_NOWAIT)))) 822 req->flags |= REQ_F_NOWAIT; 823 824 if (ctx->flags & IORING_SETUP_IOPOLL) { 825 if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll) 826 return -EOPNOTSUPP; 827 kiocb->private = NULL; 828 kiocb->ki_flags |= IOCB_HIPRI; 829 req->iopoll_completed = 0; 830 if (ctx->flags & IORING_SETUP_HYBRID_IOPOLL) { 831 /* make sure every req only blocks once*/ 832 req->flags &= ~REQ_F_IOPOLL_STATE; 833 req->iopoll_start = ktime_get_ns(); 834 } 835 } else { 836 if (kiocb->ki_flags & IOCB_HIPRI) 837 return -EINVAL; 838 } 839 840 if (req->flags & REQ_F_HAS_METADATA) { 841 struct io_async_rw *io = req->async_data; 842 843 /* 844 * We have a union of meta fields with wpq used for buffered-io 845 * in io_async_rw, so fail it here. 846 */ 847 if (!(req->file->f_flags & O_DIRECT)) 848 return -EOPNOTSUPP; 849 kiocb->ki_flags |= IOCB_HAS_METADATA; 850 kiocb->private = &io->meta; 851 } 852 853 return 0; 854 } 855 856 static int __io_read(struct io_kiocb *req, unsigned int issue_flags) 857 { 858 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; 859 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 860 struct io_async_rw *io = req->async_data; 861 struct kiocb *kiocb = &rw->kiocb; 862 ssize_t ret; 863 loff_t *ppos; 864 865 if (io_do_buffer_select(req)) { 866 ret = io_import_iovec(ITER_DEST, req, io, issue_flags); 867 if (unlikely(ret < 0)) 868 return ret; 869 } 870 ret = io_rw_init_file(req, FMODE_READ, READ); 871 if (unlikely(ret)) 872 return ret; 873 req->cqe.res = iov_iter_count(&io->iter); 874 875 if (force_nonblock) { 876 /* If the file doesn't support async, just async punt */ 877 if (unlikely(!io_file_supports_nowait(req, EPOLLIN))) 878 return -EAGAIN; 879 kiocb->ki_flags |= IOCB_NOWAIT; 880 } else { 881 /* Ensure we clear previously set non-block flag */ 882 kiocb->ki_flags &= ~IOCB_NOWAIT; 883 } 884 885 ppos = io_kiocb_update_pos(req); 886 887 ret = rw_verify_area(READ, req->file, ppos, req->cqe.res); 888 if (unlikely(ret)) 889 return ret; 890 891 ret = io_iter_do_read(rw, &io->iter); 892 893 /* 894 * Some file systems like to return -EOPNOTSUPP for an IOCB_NOWAIT 895 * issue, even though they should be returning -EAGAIN. To be safe, 896 * retry from blocking context for either. 897 */ 898 if (ret == -EOPNOTSUPP && force_nonblock) 899 ret = -EAGAIN; 900 901 if (ret == -EAGAIN) { 902 /* If we can poll, just do that. */ 903 if (io_file_can_poll(req)) 904 return -EAGAIN; 905 /* IOPOLL retry should happen for io-wq threads */ 906 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL)) 907 goto done; 908 /* no retry on NONBLOCK nor RWF_NOWAIT */ 909 if (req->flags & REQ_F_NOWAIT) 910 goto done; 911 ret = 0; 912 } else if (ret == -EIOCBQUEUED) { 913 return IOU_ISSUE_SKIP_COMPLETE; 914 } else if (ret == req->cqe.res || ret <= 0 || !force_nonblock || 915 (req->flags & REQ_F_NOWAIT) || !need_complete_io(req) || 916 (issue_flags & IO_URING_F_MULTISHOT)) { 917 /* read all, failed, already did sync or don't want to retry */ 918 goto done; 919 } 920 921 /* 922 * Don't depend on the iter state matching what was consumed, or being 923 * untouched in case of error. Restore it and we'll advance it 924 * manually if we need to. 925 */ 926 iov_iter_restore(&io->iter, &io->iter_state); 927 io_meta_restore(io, kiocb); 928 929 do { 930 /* 931 * We end up here because of a partial read, either from 932 * above or inside this loop. Advance the iter by the bytes 933 * that were consumed. 934 */ 935 iov_iter_advance(&io->iter, ret); 936 if (!iov_iter_count(&io->iter)) 937 break; 938 io->bytes_done += ret; 939 iov_iter_save_state(&io->iter, &io->iter_state); 940 941 /* if we can retry, do so with the callbacks armed */ 942 if (!io_rw_should_retry(req)) { 943 kiocb->ki_flags &= ~IOCB_WAITQ; 944 return -EAGAIN; 945 } 946 947 req->cqe.res = iov_iter_count(&io->iter); 948 /* 949 * Now retry read with the IOCB_WAITQ parts set in the iocb. If 950 * we get -EIOCBQUEUED, then we'll get a notification when the 951 * desired page gets unlocked. We can also get a partial read 952 * here, and if we do, then just retry at the new offset. 953 */ 954 ret = io_iter_do_read(rw, &io->iter); 955 if (ret == -EIOCBQUEUED) 956 return IOU_ISSUE_SKIP_COMPLETE; 957 /* we got some bytes, but not all. retry. */ 958 kiocb->ki_flags &= ~IOCB_WAITQ; 959 iov_iter_restore(&io->iter, &io->iter_state); 960 } while (ret > 0); 961 done: 962 /* it's faster to check here then delegate to kfree */ 963 return ret; 964 } 965 966 int io_read(struct io_kiocb *req, unsigned int issue_flags) 967 { 968 int ret; 969 970 ret = __io_read(req, issue_flags); 971 if (ret >= 0) 972 return kiocb_done(req, ret, issue_flags); 973 974 return ret; 975 } 976 977 int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags) 978 { 979 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 980 unsigned int cflags = 0; 981 int ret; 982 983 /* 984 * Multishot MUST be used on a pollable file 985 */ 986 if (!io_file_can_poll(req)) 987 return -EBADFD; 988 989 /* make it sync, multishot doesn't support async execution */ 990 rw->kiocb.ki_complete = NULL; 991 ret = __io_read(req, issue_flags); 992 993 /* 994 * If we get -EAGAIN, recycle our buffer and just let normal poll 995 * handling arm it. 996 */ 997 if (ret == -EAGAIN) { 998 /* 999 * Reset rw->len to 0 again to avoid clamping future mshot 1000 * reads, in case the buffer size varies. 1001 */ 1002 if (io_kbuf_recycle(req, issue_flags)) 1003 rw->len = 0; 1004 if (issue_flags & IO_URING_F_MULTISHOT) 1005 return IOU_ISSUE_SKIP_COMPLETE; 1006 return -EAGAIN; 1007 } else if (ret <= 0) { 1008 io_kbuf_recycle(req, issue_flags); 1009 if (ret < 0) 1010 req_set_fail(req); 1011 } else if (!(req->flags & REQ_F_APOLL_MULTISHOT)) { 1012 cflags = io_put_kbuf(req, ret, issue_flags); 1013 } else { 1014 /* 1015 * Any successful return value will keep the multishot read 1016 * armed, if it's still set. Put our buffer and post a CQE. If 1017 * we fail to post a CQE, or multishot is no longer set, then 1018 * jump to the termination path. This request is then done. 1019 */ 1020 cflags = io_put_kbuf(req, ret, issue_flags); 1021 rw->len = 0; /* similarly to above, reset len to 0 */ 1022 1023 if (io_req_post_cqe(req, ret, cflags | IORING_CQE_F_MORE)) { 1024 if (issue_flags & IO_URING_F_MULTISHOT) { 1025 /* 1026 * Force retry, as we might have more data to 1027 * be read and otherwise it won't get retried 1028 * until (if ever) another poll is triggered. 1029 */ 1030 io_poll_multishot_retry(req); 1031 return IOU_ISSUE_SKIP_COMPLETE; 1032 } 1033 return -EAGAIN; 1034 } 1035 } 1036 1037 /* 1038 * Either an error, or we've hit overflow posting the CQE. For any 1039 * multishot request, hitting overflow will terminate it. 1040 */ 1041 io_req_set_res(req, ret, cflags); 1042 io_req_rw_cleanup(req, issue_flags); 1043 if (issue_flags & IO_URING_F_MULTISHOT) 1044 return IOU_STOP_MULTISHOT; 1045 return IOU_OK; 1046 } 1047 1048 static bool io_kiocb_start_write(struct io_kiocb *req, struct kiocb *kiocb) 1049 { 1050 struct inode *inode; 1051 bool ret; 1052 1053 if (!(req->flags & REQ_F_ISREG)) 1054 return true; 1055 if (!(kiocb->ki_flags & IOCB_NOWAIT)) { 1056 kiocb_start_write(kiocb); 1057 return true; 1058 } 1059 1060 inode = file_inode(kiocb->ki_filp); 1061 ret = sb_start_write_trylock(inode->i_sb); 1062 if (ret) 1063 __sb_writers_release(inode->i_sb, SB_FREEZE_WRITE); 1064 return ret; 1065 } 1066 1067 int io_write(struct io_kiocb *req, unsigned int issue_flags) 1068 { 1069 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; 1070 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 1071 struct io_async_rw *io = req->async_data; 1072 struct kiocb *kiocb = &rw->kiocb; 1073 ssize_t ret, ret2; 1074 loff_t *ppos; 1075 1076 ret = io_rw_init_file(req, FMODE_WRITE, WRITE); 1077 if (unlikely(ret)) 1078 return ret; 1079 req->cqe.res = iov_iter_count(&io->iter); 1080 1081 if (force_nonblock) { 1082 /* If the file doesn't support async, just async punt */ 1083 if (unlikely(!io_file_supports_nowait(req, EPOLLOUT))) 1084 goto ret_eagain; 1085 1086 /* Check if we can support NOWAIT. */ 1087 if (!(kiocb->ki_flags & IOCB_DIRECT) && 1088 !(req->file->f_op->fop_flags & FOP_BUFFER_WASYNC) && 1089 (req->flags & REQ_F_ISREG)) 1090 goto ret_eagain; 1091 1092 kiocb->ki_flags |= IOCB_NOWAIT; 1093 } else { 1094 /* Ensure we clear previously set non-block flag */ 1095 kiocb->ki_flags &= ~IOCB_NOWAIT; 1096 } 1097 1098 ppos = io_kiocb_update_pos(req); 1099 1100 ret = rw_verify_area(WRITE, req->file, ppos, req->cqe.res); 1101 if (unlikely(ret)) 1102 return ret; 1103 1104 if (unlikely(!io_kiocb_start_write(req, kiocb))) 1105 return -EAGAIN; 1106 kiocb->ki_flags |= IOCB_WRITE; 1107 1108 if (likely(req->file->f_op->write_iter)) 1109 ret2 = req->file->f_op->write_iter(kiocb, &io->iter); 1110 else if (req->file->f_op->write) 1111 ret2 = loop_rw_iter(WRITE, rw, &io->iter); 1112 else 1113 ret2 = -EINVAL; 1114 1115 /* 1116 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just 1117 * retry them without IOCB_NOWAIT. 1118 */ 1119 if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT)) 1120 ret2 = -EAGAIN; 1121 /* no retry on NONBLOCK nor RWF_NOWAIT */ 1122 if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT)) 1123 goto done; 1124 if (!force_nonblock || ret2 != -EAGAIN) { 1125 /* IOPOLL retry should happen for io-wq threads */ 1126 if (ret2 == -EAGAIN && (req->ctx->flags & IORING_SETUP_IOPOLL)) 1127 goto ret_eagain; 1128 1129 if (ret2 != req->cqe.res && ret2 >= 0 && need_complete_io(req)) { 1130 trace_io_uring_short_write(req->ctx, kiocb->ki_pos - ret2, 1131 req->cqe.res, ret2); 1132 1133 /* This is a partial write. The file pos has already been 1134 * updated, setup the async struct to complete the request 1135 * in the worker. Also update bytes_done to account for 1136 * the bytes already written. 1137 */ 1138 iov_iter_save_state(&io->iter, &io->iter_state); 1139 io->bytes_done += ret2; 1140 1141 if (kiocb->ki_flags & IOCB_WRITE) 1142 io_req_end_write(req); 1143 return -EAGAIN; 1144 } 1145 done: 1146 return kiocb_done(req, ret2, issue_flags); 1147 } else { 1148 ret_eagain: 1149 iov_iter_restore(&io->iter, &io->iter_state); 1150 io_meta_restore(io, kiocb); 1151 if (kiocb->ki_flags & IOCB_WRITE) 1152 io_req_end_write(req); 1153 return -EAGAIN; 1154 } 1155 } 1156 1157 void io_rw_fail(struct io_kiocb *req) 1158 { 1159 int res; 1160 1161 res = io_fixup_rw_res(req, req->cqe.res); 1162 io_req_set_res(req, res, req->cqe.flags); 1163 } 1164 1165 static int io_uring_classic_poll(struct io_kiocb *req, struct io_comp_batch *iob, 1166 unsigned int poll_flags) 1167 { 1168 struct file *file = req->file; 1169 1170 if (req->opcode == IORING_OP_URING_CMD) { 1171 struct io_uring_cmd *ioucmd; 1172 1173 ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd); 1174 return file->f_op->uring_cmd_iopoll(ioucmd, iob, poll_flags); 1175 } else { 1176 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 1177 1178 return file->f_op->iopoll(&rw->kiocb, iob, poll_flags); 1179 } 1180 } 1181 1182 static u64 io_hybrid_iopoll_delay(struct io_ring_ctx *ctx, struct io_kiocb *req) 1183 { 1184 struct hrtimer_sleeper timer; 1185 enum hrtimer_mode mode; 1186 ktime_t kt; 1187 u64 sleep_time; 1188 1189 if (req->flags & REQ_F_IOPOLL_STATE) 1190 return 0; 1191 1192 if (ctx->hybrid_poll_time == LLONG_MAX) 1193 return 0; 1194 1195 /* Using half the running time to do schedule */ 1196 sleep_time = ctx->hybrid_poll_time / 2; 1197 1198 kt = ktime_set(0, sleep_time); 1199 req->flags |= REQ_F_IOPOLL_STATE; 1200 1201 mode = HRTIMER_MODE_REL; 1202 hrtimer_setup_sleeper_on_stack(&timer, CLOCK_MONOTONIC, mode); 1203 hrtimer_set_expires(&timer.timer, kt); 1204 set_current_state(TASK_INTERRUPTIBLE); 1205 hrtimer_sleeper_start_expires(&timer, mode); 1206 1207 if (timer.task) 1208 io_schedule(); 1209 1210 hrtimer_cancel(&timer.timer); 1211 __set_current_state(TASK_RUNNING); 1212 destroy_hrtimer_on_stack(&timer.timer); 1213 return sleep_time; 1214 } 1215 1216 static int io_uring_hybrid_poll(struct io_kiocb *req, 1217 struct io_comp_batch *iob, unsigned int poll_flags) 1218 { 1219 struct io_ring_ctx *ctx = req->ctx; 1220 u64 runtime, sleep_time; 1221 int ret; 1222 1223 sleep_time = io_hybrid_iopoll_delay(ctx, req); 1224 ret = io_uring_classic_poll(req, iob, poll_flags); 1225 runtime = ktime_get_ns() - req->iopoll_start - sleep_time; 1226 1227 /* 1228 * Use minimum sleep time if we're polling devices with different 1229 * latencies. We could get more completions from the faster ones. 1230 */ 1231 if (ctx->hybrid_poll_time > runtime) 1232 ctx->hybrid_poll_time = runtime; 1233 1234 return ret; 1235 } 1236 1237 int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) 1238 { 1239 struct io_wq_work_node *pos, *start, *prev; 1240 unsigned int poll_flags = 0; 1241 DEFINE_IO_COMP_BATCH(iob); 1242 int nr_events = 0; 1243 1244 /* 1245 * Only spin for completions if we don't have multiple devices hanging 1246 * off our complete list. 1247 */ 1248 if (ctx->poll_multi_queue || force_nonspin) 1249 poll_flags |= BLK_POLL_ONESHOT; 1250 1251 wq_list_for_each(pos, start, &ctx->iopoll_list) { 1252 struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list); 1253 int ret; 1254 1255 /* 1256 * Move completed and retryable entries to our local lists. 1257 * If we find a request that requires polling, break out 1258 * and complete those lists first, if we have entries there. 1259 */ 1260 if (READ_ONCE(req->iopoll_completed)) 1261 break; 1262 1263 if (ctx->flags & IORING_SETUP_HYBRID_IOPOLL) 1264 ret = io_uring_hybrid_poll(req, &iob, poll_flags); 1265 else 1266 ret = io_uring_classic_poll(req, &iob, poll_flags); 1267 1268 if (unlikely(ret < 0)) 1269 return ret; 1270 else if (ret) 1271 poll_flags |= BLK_POLL_ONESHOT; 1272 1273 /* iopoll may have completed current req */ 1274 if (!rq_list_empty(&iob.req_list) || 1275 READ_ONCE(req->iopoll_completed)) 1276 break; 1277 } 1278 1279 if (!rq_list_empty(&iob.req_list)) 1280 iob.complete(&iob); 1281 else if (!pos) 1282 return 0; 1283 1284 prev = start; 1285 wq_list_for_each_resume(pos, prev) { 1286 struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list); 1287 1288 /* order with io_complete_rw_iopoll(), e.g. ->result updates */ 1289 if (!smp_load_acquire(&req->iopoll_completed)) 1290 break; 1291 nr_events++; 1292 req->cqe.flags = io_put_kbuf(req, req->cqe.res, 0); 1293 if (req->opcode != IORING_OP_URING_CMD) 1294 io_req_rw_cleanup(req, 0); 1295 } 1296 if (unlikely(!nr_events)) 1297 return 0; 1298 1299 pos = start ? start->next : ctx->iopoll_list.first; 1300 wq_list_cut(&ctx->iopoll_list, prev, start); 1301 1302 if (WARN_ON_ONCE(!wq_list_empty(&ctx->submit_state.compl_reqs))) 1303 return 0; 1304 ctx->submit_state.compl_reqs.first = pos; 1305 __io_submit_flush_completions(ctx); 1306 return nr_events; 1307 } 1308 1309 void io_rw_cache_free(const void *entry) 1310 { 1311 struct io_async_rw *rw = (struct io_async_rw *) entry; 1312 1313 if (rw->free_iovec) 1314 kfree(rw->free_iovec); 1315 kfree(rw); 1316 } 1317