1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/kernel.h> 3 #include <linux/errno.h> 4 #include <linux/fs.h> 5 #include <linux/file.h> 6 #include <linux/blk-mq.h> 7 #include <linux/mm.h> 8 #include <linux/slab.h> 9 #include <linux/fsnotify.h> 10 #include <linux/poll.h> 11 #include <linux/nospec.h> 12 #include <linux/compat.h> 13 #include <linux/io_uring/cmd.h> 14 15 #include <uapi/linux/io_uring.h> 16 17 #include "io_uring.h" 18 #include "opdef.h" 19 #include "kbuf.h" 20 #include "rsrc.h" 21 #include "rw.h" 22 23 struct io_rw { 24 /* NOTE: kiocb has the file as the first member, so don't do it here */ 25 struct kiocb kiocb; 26 u64 addr; 27 u32 len; 28 rwf_t flags; 29 }; 30 31 static inline bool io_file_supports_nowait(struct io_kiocb *req) 32 { 33 return req->flags & REQ_F_SUPPORT_NOWAIT; 34 } 35 36 #ifdef CONFIG_COMPAT 37 static int io_iov_compat_buffer_select_prep(struct io_rw *rw) 38 { 39 struct compat_iovec __user *uiov; 40 compat_ssize_t clen; 41 42 uiov = u64_to_user_ptr(rw->addr); 43 if (!access_ok(uiov, sizeof(*uiov))) 44 return -EFAULT; 45 if (__get_user(clen, &uiov->iov_len)) 46 return -EFAULT; 47 if (clen < 0) 48 return -EINVAL; 49 50 rw->len = clen; 51 return 0; 52 } 53 #endif 54 55 static int io_iov_buffer_select_prep(struct io_kiocb *req) 56 { 57 struct iovec __user *uiov; 58 struct iovec iov; 59 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 60 61 if (rw->len != 1) 62 return -EINVAL; 63 64 #ifdef CONFIG_COMPAT 65 if (req->ctx->compat) 66 return io_iov_compat_buffer_select_prep(rw); 67 #endif 68 69 uiov = u64_to_user_ptr(rw->addr); 70 if (copy_from_user(&iov, uiov, sizeof(*uiov))) 71 return -EFAULT; 72 rw->len = iov.iov_len; 73 return 0; 74 } 75 76 int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe) 77 { 78 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 79 unsigned ioprio; 80 int ret; 81 82 rw->kiocb.ki_pos = READ_ONCE(sqe->off); 83 /* used for fixed read/write too - just read unconditionally */ 84 req->buf_index = READ_ONCE(sqe->buf_index); 85 86 ioprio = READ_ONCE(sqe->ioprio); 87 if (ioprio) { 88 ret = ioprio_check_cap(ioprio); 89 if (ret) 90 return ret; 91 92 rw->kiocb.ki_ioprio = ioprio; 93 } else { 94 rw->kiocb.ki_ioprio = get_current_ioprio(); 95 } 96 rw->kiocb.dio_complete = NULL; 97 98 rw->addr = READ_ONCE(sqe->addr); 99 rw->len = READ_ONCE(sqe->len); 100 rw->flags = READ_ONCE(sqe->rw_flags); 101 return 0; 102 } 103 104 int io_prep_rwv(struct io_kiocb *req, const struct io_uring_sqe *sqe) 105 { 106 int ret; 107 108 ret = io_prep_rw(req, sqe); 109 if (unlikely(ret)) 110 return ret; 111 112 /* 113 * Have to do this validation here, as this is in io_read() rw->len 114 * might have chanaged due to buffer selection 115 */ 116 if (req->flags & REQ_F_BUFFER_SELECT) 117 return io_iov_buffer_select_prep(req); 118 119 return 0; 120 } 121 122 int io_prep_rw_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe) 123 { 124 struct io_ring_ctx *ctx = req->ctx; 125 u16 index; 126 int ret; 127 128 ret = io_prep_rw(req, sqe); 129 if (unlikely(ret)) 130 return ret; 131 132 if (unlikely(req->buf_index >= ctx->nr_user_bufs)) 133 return -EFAULT; 134 index = array_index_nospec(req->buf_index, ctx->nr_user_bufs); 135 req->imu = ctx->user_bufs[index]; 136 io_req_set_rsrc_node(req, ctx, 0); 137 return 0; 138 } 139 140 /* 141 * Multishot read is prepared just like a normal read/write request, only 142 * difference is that we set the MULTISHOT flag. 143 */ 144 int io_read_mshot_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 145 { 146 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 147 int ret; 148 149 /* must be used with provided buffers */ 150 if (!(req->flags & REQ_F_BUFFER_SELECT)) 151 return -EINVAL; 152 153 ret = io_prep_rw(req, sqe); 154 if (unlikely(ret)) 155 return ret; 156 157 if (rw->addr || rw->len) 158 return -EINVAL; 159 160 req->flags |= REQ_F_APOLL_MULTISHOT; 161 return 0; 162 } 163 164 void io_readv_writev_cleanup(struct io_kiocb *req) 165 { 166 struct io_async_rw *io = req->async_data; 167 168 kfree(io->free_iovec); 169 } 170 171 static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret) 172 { 173 switch (ret) { 174 case -EIOCBQUEUED: 175 break; 176 case -ERESTARTSYS: 177 case -ERESTARTNOINTR: 178 case -ERESTARTNOHAND: 179 case -ERESTART_RESTARTBLOCK: 180 /* 181 * We can't just restart the syscall, since previously 182 * submitted sqes may already be in progress. Just fail this 183 * IO with EINTR. 184 */ 185 ret = -EINTR; 186 fallthrough; 187 default: 188 kiocb->ki_complete(kiocb, ret); 189 } 190 } 191 192 static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req) 193 { 194 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 195 196 if (rw->kiocb.ki_pos != -1) 197 return &rw->kiocb.ki_pos; 198 199 if (!(req->file->f_mode & FMODE_STREAM)) { 200 req->flags |= REQ_F_CUR_POS; 201 rw->kiocb.ki_pos = req->file->f_pos; 202 return &rw->kiocb.ki_pos; 203 } 204 205 rw->kiocb.ki_pos = 0; 206 return NULL; 207 } 208 209 static void io_req_task_queue_reissue(struct io_kiocb *req) 210 { 211 req->io_task_work.func = io_queue_iowq; 212 io_req_task_work_add(req); 213 } 214 215 #ifdef CONFIG_BLOCK 216 static bool io_resubmit_prep(struct io_kiocb *req) 217 { 218 struct io_async_rw *io = req->async_data; 219 220 if (!req_has_async_data(req)) 221 return !io_req_prep_async(req); 222 iov_iter_restore(&io->s.iter, &io->s.iter_state); 223 return true; 224 } 225 226 static bool io_rw_should_reissue(struct io_kiocb *req) 227 { 228 umode_t mode = file_inode(req->file)->i_mode; 229 struct io_ring_ctx *ctx = req->ctx; 230 231 if (!S_ISBLK(mode) && !S_ISREG(mode)) 232 return false; 233 if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() && 234 !(ctx->flags & IORING_SETUP_IOPOLL))) 235 return false; 236 /* 237 * If ref is dying, we might be running poll reap from the exit work. 238 * Don't attempt to reissue from that path, just let it fail with 239 * -EAGAIN. 240 */ 241 if (percpu_ref_is_dying(&ctx->refs)) 242 return false; 243 /* 244 * Play it safe and assume not safe to re-import and reissue if we're 245 * not in the original thread group (or in task context). 246 */ 247 if (!same_thread_group(req->task, current) || !in_task()) 248 return false; 249 return true; 250 } 251 #else 252 static bool io_resubmit_prep(struct io_kiocb *req) 253 { 254 return false; 255 } 256 static bool io_rw_should_reissue(struct io_kiocb *req) 257 { 258 return false; 259 } 260 #endif 261 262 static void io_req_end_write(struct io_kiocb *req) 263 { 264 if (req->flags & REQ_F_ISREG) { 265 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 266 267 kiocb_end_write(&rw->kiocb); 268 } 269 } 270 271 /* 272 * Trigger the notifications after having done some IO, and finish the write 273 * accounting, if any. 274 */ 275 static void io_req_io_end(struct io_kiocb *req) 276 { 277 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 278 279 if (rw->kiocb.ki_flags & IOCB_WRITE) { 280 io_req_end_write(req); 281 fsnotify_modify(req->file); 282 } else { 283 fsnotify_access(req->file); 284 } 285 } 286 287 static bool __io_complete_rw_common(struct io_kiocb *req, long res) 288 { 289 if (unlikely(res != req->cqe.res)) { 290 if ((res == -EAGAIN || res == -EOPNOTSUPP) && 291 io_rw_should_reissue(req)) { 292 /* 293 * Reissue will start accounting again, finish the 294 * current cycle. 295 */ 296 io_req_io_end(req); 297 req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO; 298 return true; 299 } 300 req_set_fail(req); 301 req->cqe.res = res; 302 } 303 return false; 304 } 305 306 static inline int io_fixup_rw_res(struct io_kiocb *req, long res) 307 { 308 struct io_async_rw *io = req->async_data; 309 310 /* add previously done IO, if any */ 311 if (req_has_async_data(req) && io->bytes_done > 0) { 312 if (res < 0) 313 res = io->bytes_done; 314 else 315 res += io->bytes_done; 316 } 317 return res; 318 } 319 320 void io_req_rw_complete(struct io_kiocb *req, struct io_tw_state *ts) 321 { 322 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 323 struct kiocb *kiocb = &rw->kiocb; 324 325 if ((kiocb->ki_flags & IOCB_DIO_CALLER_COMP) && kiocb->dio_complete) { 326 long res = kiocb->dio_complete(rw->kiocb.private); 327 328 io_req_set_res(req, io_fixup_rw_res(req, res), 0); 329 } 330 331 io_req_io_end(req); 332 333 if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) { 334 unsigned issue_flags = ts->locked ? 0 : IO_URING_F_UNLOCKED; 335 336 req->cqe.flags |= io_put_kbuf(req, issue_flags); 337 } 338 io_req_task_complete(req, ts); 339 } 340 341 static void io_complete_rw(struct kiocb *kiocb, long res) 342 { 343 struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb); 344 struct io_kiocb *req = cmd_to_io_kiocb(rw); 345 346 if (!kiocb->dio_complete || !(kiocb->ki_flags & IOCB_DIO_CALLER_COMP)) { 347 if (__io_complete_rw_common(req, res)) 348 return; 349 io_req_set_res(req, io_fixup_rw_res(req, res), 0); 350 } 351 req->io_task_work.func = io_req_rw_complete; 352 __io_req_task_work_add(req, IOU_F_TWQ_LAZY_WAKE); 353 } 354 355 static void io_complete_rw_iopoll(struct kiocb *kiocb, long res) 356 { 357 struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb); 358 struct io_kiocb *req = cmd_to_io_kiocb(rw); 359 360 if (kiocb->ki_flags & IOCB_WRITE) 361 io_req_end_write(req); 362 if (unlikely(res != req->cqe.res)) { 363 if (res == -EAGAIN && io_rw_should_reissue(req)) { 364 req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO; 365 return; 366 } 367 req->cqe.res = res; 368 } 369 370 /* order with io_iopoll_complete() checking ->iopoll_completed */ 371 smp_store_release(&req->iopoll_completed, 1); 372 } 373 374 static int kiocb_done(struct io_kiocb *req, ssize_t ret, 375 unsigned int issue_flags) 376 { 377 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 378 unsigned final_ret = io_fixup_rw_res(req, ret); 379 380 if (ret >= 0 && req->flags & REQ_F_CUR_POS) 381 req->file->f_pos = rw->kiocb.ki_pos; 382 if (ret >= 0 && (rw->kiocb.ki_complete == io_complete_rw)) { 383 if (!__io_complete_rw_common(req, ret)) { 384 /* 385 * Safe to call io_end from here as we're inline 386 * from the submission path. 387 */ 388 io_req_io_end(req); 389 io_req_set_res(req, final_ret, 390 io_put_kbuf(req, issue_flags)); 391 return IOU_OK; 392 } 393 } else { 394 io_rw_done(&rw->kiocb, ret); 395 } 396 397 if (req->flags & REQ_F_REISSUE) { 398 req->flags &= ~REQ_F_REISSUE; 399 if (io_resubmit_prep(req)) 400 io_req_task_queue_reissue(req); 401 else 402 io_req_task_queue_fail(req, final_ret); 403 } 404 return IOU_ISSUE_SKIP_COMPLETE; 405 } 406 407 static struct iovec *__io_import_iovec(int ddir, struct io_kiocb *req, 408 struct io_rw_state *s, 409 unsigned int issue_flags) 410 { 411 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 412 struct iov_iter *iter = &s->iter; 413 u8 opcode = req->opcode; 414 struct iovec *iovec; 415 void __user *buf; 416 size_t sqe_len; 417 ssize_t ret; 418 419 if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) { 420 ret = io_import_fixed(ddir, iter, req->imu, rw->addr, rw->len); 421 if (ret) 422 return ERR_PTR(ret); 423 return NULL; 424 } 425 426 buf = u64_to_user_ptr(rw->addr); 427 sqe_len = rw->len; 428 429 if (!io_issue_defs[opcode].vectored || req->flags & REQ_F_BUFFER_SELECT) { 430 if (io_do_buffer_select(req)) { 431 buf = io_buffer_select(req, &sqe_len, issue_flags); 432 if (!buf) 433 return ERR_PTR(-ENOBUFS); 434 rw->addr = (unsigned long) buf; 435 rw->len = sqe_len; 436 } 437 438 ret = import_ubuf(ddir, buf, sqe_len, iter); 439 if (ret) 440 return ERR_PTR(ret); 441 return NULL; 442 } 443 444 iovec = s->fast_iov; 445 ret = __import_iovec(ddir, buf, sqe_len, UIO_FASTIOV, &iovec, iter, 446 req->ctx->compat); 447 if (unlikely(ret < 0)) 448 return ERR_PTR(ret); 449 return iovec; 450 } 451 452 static inline int io_import_iovec(int rw, struct io_kiocb *req, 453 struct iovec **iovec, struct io_rw_state *s, 454 unsigned int issue_flags) 455 { 456 *iovec = __io_import_iovec(rw, req, s, issue_flags); 457 if (IS_ERR(*iovec)) 458 return PTR_ERR(*iovec); 459 460 iov_iter_save_state(&s->iter, &s->iter_state); 461 return 0; 462 } 463 464 static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb) 465 { 466 return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos; 467 } 468 469 /* 470 * For files that don't have ->read_iter() and ->write_iter(), handle them 471 * by looping over ->read() or ->write() manually. 472 */ 473 static ssize_t loop_rw_iter(int ddir, struct io_rw *rw, struct iov_iter *iter) 474 { 475 struct kiocb *kiocb = &rw->kiocb; 476 struct file *file = kiocb->ki_filp; 477 ssize_t ret = 0; 478 loff_t *ppos; 479 480 /* 481 * Don't support polled IO through this interface, and we can't 482 * support non-blocking either. For the latter, this just causes 483 * the kiocb to be handled from an async context. 484 */ 485 if (kiocb->ki_flags & IOCB_HIPRI) 486 return -EOPNOTSUPP; 487 if ((kiocb->ki_flags & IOCB_NOWAIT) && 488 !(kiocb->ki_filp->f_flags & O_NONBLOCK)) 489 return -EAGAIN; 490 491 ppos = io_kiocb_ppos(kiocb); 492 493 while (iov_iter_count(iter)) { 494 void __user *addr; 495 size_t len; 496 ssize_t nr; 497 498 if (iter_is_ubuf(iter)) { 499 addr = iter->ubuf + iter->iov_offset; 500 len = iov_iter_count(iter); 501 } else if (!iov_iter_is_bvec(iter)) { 502 addr = iter_iov_addr(iter); 503 len = iter_iov_len(iter); 504 } else { 505 addr = u64_to_user_ptr(rw->addr); 506 len = rw->len; 507 } 508 509 if (ddir == READ) 510 nr = file->f_op->read(file, addr, len, ppos); 511 else 512 nr = file->f_op->write(file, addr, len, ppos); 513 514 if (nr < 0) { 515 if (!ret) 516 ret = nr; 517 break; 518 } 519 ret += nr; 520 if (!iov_iter_is_bvec(iter)) { 521 iov_iter_advance(iter, nr); 522 } else { 523 rw->addr += nr; 524 rw->len -= nr; 525 if (!rw->len) 526 break; 527 } 528 if (nr != len) 529 break; 530 } 531 532 return ret; 533 } 534 535 static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec, 536 const struct iovec *fast_iov, struct iov_iter *iter) 537 { 538 struct io_async_rw *io = req->async_data; 539 540 memcpy(&io->s.iter, iter, sizeof(*iter)); 541 io->free_iovec = iovec; 542 io->bytes_done = 0; 543 /* can only be fixed buffers, no need to do anything */ 544 if (iov_iter_is_bvec(iter) || iter_is_ubuf(iter)) 545 return; 546 if (!iovec) { 547 unsigned iov_off = 0; 548 549 io->s.iter.__iov = io->s.fast_iov; 550 if (iter->__iov != fast_iov) { 551 iov_off = iter_iov(iter) - fast_iov; 552 io->s.iter.__iov += iov_off; 553 } 554 if (io->s.fast_iov != fast_iov) 555 memcpy(io->s.fast_iov + iov_off, fast_iov + iov_off, 556 sizeof(struct iovec) * iter->nr_segs); 557 } else { 558 req->flags |= REQ_F_NEED_CLEANUP; 559 } 560 } 561 562 static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec, 563 struct io_rw_state *s, bool force) 564 { 565 if (!force && !io_cold_defs[req->opcode].prep_async) 566 return 0; 567 /* opcode type doesn't need async data */ 568 if (!io_cold_defs[req->opcode].async_size) 569 return 0; 570 if (!req_has_async_data(req)) { 571 struct io_async_rw *iorw; 572 573 if (io_alloc_async_data(req)) { 574 kfree(iovec); 575 return -ENOMEM; 576 } 577 578 io_req_map_rw(req, iovec, s->fast_iov, &s->iter); 579 iorw = req->async_data; 580 /* we've copied and mapped the iter, ensure state is saved */ 581 iov_iter_save_state(&iorw->s.iter, &iorw->s.iter_state); 582 } 583 return 0; 584 } 585 586 static inline int io_rw_prep_async(struct io_kiocb *req, int rw) 587 { 588 struct io_async_rw *iorw = req->async_data; 589 struct iovec *iov; 590 int ret; 591 592 iorw->bytes_done = 0; 593 iorw->free_iovec = NULL; 594 595 /* submission path, ->uring_lock should already be taken */ 596 ret = io_import_iovec(rw, req, &iov, &iorw->s, 0); 597 if (unlikely(ret < 0)) 598 return ret; 599 600 if (iov) { 601 iorw->free_iovec = iov; 602 req->flags |= REQ_F_NEED_CLEANUP; 603 } 604 605 return 0; 606 } 607 608 int io_readv_prep_async(struct io_kiocb *req) 609 { 610 return io_rw_prep_async(req, ITER_DEST); 611 } 612 613 int io_writev_prep_async(struct io_kiocb *req) 614 { 615 return io_rw_prep_async(req, ITER_SOURCE); 616 } 617 618 /* 619 * This is our waitqueue callback handler, registered through __folio_lock_async() 620 * when we initially tried to do the IO with the iocb armed our waitqueue. 621 * This gets called when the page is unlocked, and we generally expect that to 622 * happen when the page IO is completed and the page is now uptodate. This will 623 * queue a task_work based retry of the operation, attempting to copy the data 624 * again. If the latter fails because the page was NOT uptodate, then we will 625 * do a thread based blocking retry of the operation. That's the unexpected 626 * slow path. 627 */ 628 static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode, 629 int sync, void *arg) 630 { 631 struct wait_page_queue *wpq; 632 struct io_kiocb *req = wait->private; 633 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 634 struct wait_page_key *key = arg; 635 636 wpq = container_of(wait, struct wait_page_queue, wait); 637 638 if (!wake_page_match(wpq, key)) 639 return 0; 640 641 rw->kiocb.ki_flags &= ~IOCB_WAITQ; 642 list_del_init(&wait->entry); 643 io_req_task_queue(req); 644 return 1; 645 } 646 647 /* 648 * This controls whether a given IO request should be armed for async page 649 * based retry. If we return false here, the request is handed to the async 650 * worker threads for retry. If we're doing buffered reads on a regular file, 651 * we prepare a private wait_page_queue entry and retry the operation. This 652 * will either succeed because the page is now uptodate and unlocked, or it 653 * will register a callback when the page is unlocked at IO completion. Through 654 * that callback, io_uring uses task_work to setup a retry of the operation. 655 * That retry will attempt the buffered read again. The retry will generally 656 * succeed, or in rare cases where it fails, we then fall back to using the 657 * async worker threads for a blocking retry. 658 */ 659 static bool io_rw_should_retry(struct io_kiocb *req) 660 { 661 struct io_async_rw *io = req->async_data; 662 struct wait_page_queue *wait = &io->wpq; 663 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 664 struct kiocb *kiocb = &rw->kiocb; 665 666 /* never retry for NOWAIT, we just complete with -EAGAIN */ 667 if (req->flags & REQ_F_NOWAIT) 668 return false; 669 670 /* Only for buffered IO */ 671 if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI)) 672 return false; 673 674 /* 675 * just use poll if we can, and don't attempt if the fs doesn't 676 * support callback based unlocks 677 */ 678 if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC)) 679 return false; 680 681 wait->wait.func = io_async_buf_func; 682 wait->wait.private = req; 683 wait->wait.flags = 0; 684 INIT_LIST_HEAD(&wait->wait.entry); 685 kiocb->ki_flags |= IOCB_WAITQ; 686 kiocb->ki_flags &= ~IOCB_NOWAIT; 687 kiocb->ki_waitq = wait; 688 return true; 689 } 690 691 static inline int io_iter_do_read(struct io_rw *rw, struct iov_iter *iter) 692 { 693 struct file *file = rw->kiocb.ki_filp; 694 695 if (likely(file->f_op->read_iter)) 696 return call_read_iter(file, &rw->kiocb, iter); 697 else if (file->f_op->read) 698 return loop_rw_iter(READ, rw, iter); 699 else 700 return -EINVAL; 701 } 702 703 static bool need_complete_io(struct io_kiocb *req) 704 { 705 return req->flags & REQ_F_ISREG || 706 S_ISBLK(file_inode(req->file)->i_mode); 707 } 708 709 static int io_rw_init_file(struct io_kiocb *req, fmode_t mode) 710 { 711 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 712 struct kiocb *kiocb = &rw->kiocb; 713 struct io_ring_ctx *ctx = req->ctx; 714 struct file *file = req->file; 715 int ret; 716 717 if (unlikely(!file || !(file->f_mode & mode))) 718 return -EBADF; 719 720 if (!(req->flags & REQ_F_FIXED_FILE)) 721 req->flags |= io_file_get_flags(file); 722 723 kiocb->ki_flags = file->f_iocb_flags; 724 ret = kiocb_set_rw_flags(kiocb, rw->flags); 725 if (unlikely(ret)) 726 return ret; 727 kiocb->ki_flags |= IOCB_ALLOC_CACHE; 728 729 /* 730 * If the file is marked O_NONBLOCK, still allow retry for it if it 731 * supports async. Otherwise it's impossible to use O_NONBLOCK files 732 * reliably. If not, or it IOCB_NOWAIT is set, don't retry. 733 */ 734 if ((kiocb->ki_flags & IOCB_NOWAIT) || 735 ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req))) 736 req->flags |= REQ_F_NOWAIT; 737 738 if (ctx->flags & IORING_SETUP_IOPOLL) { 739 if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll) 740 return -EOPNOTSUPP; 741 742 kiocb->private = NULL; 743 kiocb->ki_flags |= IOCB_HIPRI; 744 kiocb->ki_complete = io_complete_rw_iopoll; 745 req->iopoll_completed = 0; 746 } else { 747 if (kiocb->ki_flags & IOCB_HIPRI) 748 return -EINVAL; 749 kiocb->ki_complete = io_complete_rw; 750 } 751 752 return 0; 753 } 754 755 static int __io_read(struct io_kiocb *req, unsigned int issue_flags) 756 { 757 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 758 struct io_rw_state __s, *s = &__s; 759 struct iovec *iovec; 760 struct kiocb *kiocb = &rw->kiocb; 761 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; 762 struct io_async_rw *io; 763 ssize_t ret, ret2; 764 loff_t *ppos; 765 766 if (!req_has_async_data(req)) { 767 ret = io_import_iovec(ITER_DEST, req, &iovec, s, issue_flags); 768 if (unlikely(ret < 0)) 769 return ret; 770 } else { 771 io = req->async_data; 772 s = &io->s; 773 774 /* 775 * Safe and required to re-import if we're using provided 776 * buffers, as we dropped the selected one before retry. 777 */ 778 if (io_do_buffer_select(req)) { 779 ret = io_import_iovec(ITER_DEST, req, &iovec, s, issue_flags); 780 if (unlikely(ret < 0)) 781 return ret; 782 } 783 784 /* 785 * We come here from an earlier attempt, restore our state to 786 * match in case it doesn't. It's cheap enough that we don't 787 * need to make this conditional. 788 */ 789 iov_iter_restore(&s->iter, &s->iter_state); 790 iovec = NULL; 791 } 792 ret = io_rw_init_file(req, FMODE_READ); 793 if (unlikely(ret)) { 794 kfree(iovec); 795 return ret; 796 } 797 req->cqe.res = iov_iter_count(&s->iter); 798 799 if (force_nonblock) { 800 /* If the file doesn't support async, just async punt */ 801 if (unlikely(!io_file_supports_nowait(req))) { 802 ret = io_setup_async_rw(req, iovec, s, true); 803 return ret ?: -EAGAIN; 804 } 805 kiocb->ki_flags |= IOCB_NOWAIT; 806 } else { 807 /* Ensure we clear previously set non-block flag */ 808 kiocb->ki_flags &= ~IOCB_NOWAIT; 809 } 810 811 ppos = io_kiocb_update_pos(req); 812 813 ret = rw_verify_area(READ, req->file, ppos, req->cqe.res); 814 if (unlikely(ret)) { 815 kfree(iovec); 816 return ret; 817 } 818 819 ret = io_iter_do_read(rw, &s->iter); 820 821 if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) { 822 req->flags &= ~REQ_F_REISSUE; 823 /* 824 * If we can poll, just do that. For a vectored read, we'll 825 * need to copy state first. 826 */ 827 if (file_can_poll(req->file) && !io_issue_defs[req->opcode].vectored) 828 return -EAGAIN; 829 /* IOPOLL retry should happen for io-wq threads */ 830 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL)) 831 goto done; 832 /* no retry on NONBLOCK nor RWF_NOWAIT */ 833 if (req->flags & REQ_F_NOWAIT) 834 goto done; 835 ret = 0; 836 } else if (ret == -EIOCBQUEUED) { 837 if (iovec) 838 kfree(iovec); 839 return IOU_ISSUE_SKIP_COMPLETE; 840 } else if (ret == req->cqe.res || ret <= 0 || !force_nonblock || 841 (req->flags & REQ_F_NOWAIT) || !need_complete_io(req)) { 842 /* read all, failed, already did sync or don't want to retry */ 843 goto done; 844 } 845 846 /* 847 * Don't depend on the iter state matching what was consumed, or being 848 * untouched in case of error. Restore it and we'll advance it 849 * manually if we need to. 850 */ 851 iov_iter_restore(&s->iter, &s->iter_state); 852 853 ret2 = io_setup_async_rw(req, iovec, s, true); 854 iovec = NULL; 855 if (ret2) { 856 ret = ret > 0 ? ret : ret2; 857 goto done; 858 } 859 860 io = req->async_data; 861 s = &io->s; 862 /* 863 * Now use our persistent iterator and state, if we aren't already. 864 * We've restored and mapped the iter to match. 865 */ 866 867 do { 868 /* 869 * We end up here because of a partial read, either from 870 * above or inside this loop. Advance the iter by the bytes 871 * that were consumed. 872 */ 873 iov_iter_advance(&s->iter, ret); 874 if (!iov_iter_count(&s->iter)) 875 break; 876 io->bytes_done += ret; 877 iov_iter_save_state(&s->iter, &s->iter_state); 878 879 /* if we can retry, do so with the callbacks armed */ 880 if (!io_rw_should_retry(req)) { 881 kiocb->ki_flags &= ~IOCB_WAITQ; 882 return -EAGAIN; 883 } 884 885 req->cqe.res = iov_iter_count(&s->iter); 886 /* 887 * Now retry read with the IOCB_WAITQ parts set in the iocb. If 888 * we get -EIOCBQUEUED, then we'll get a notification when the 889 * desired page gets unlocked. We can also get a partial read 890 * here, and if we do, then just retry at the new offset. 891 */ 892 ret = io_iter_do_read(rw, &s->iter); 893 if (ret == -EIOCBQUEUED) 894 return IOU_ISSUE_SKIP_COMPLETE; 895 /* we got some bytes, but not all. retry. */ 896 kiocb->ki_flags &= ~IOCB_WAITQ; 897 iov_iter_restore(&s->iter, &s->iter_state); 898 } while (ret > 0); 899 done: 900 /* it's faster to check here then delegate to kfree */ 901 if (iovec) 902 kfree(iovec); 903 return ret; 904 } 905 906 int io_read(struct io_kiocb *req, unsigned int issue_flags) 907 { 908 int ret; 909 910 ret = __io_read(req, issue_flags); 911 if (ret >= 0) 912 return kiocb_done(req, ret, issue_flags); 913 914 return ret; 915 } 916 917 int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags) 918 { 919 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 920 unsigned int cflags = 0; 921 int ret; 922 923 /* 924 * Multishot MUST be used on a pollable file 925 */ 926 if (!file_can_poll(req->file)) 927 return -EBADFD; 928 929 ret = __io_read(req, issue_flags); 930 931 /* 932 * If we get -EAGAIN, recycle our buffer and just let normal poll 933 * handling arm it. 934 */ 935 if (ret == -EAGAIN) { 936 /* 937 * Reset rw->len to 0 again to avoid clamping future mshot 938 * reads, in case the buffer size varies. 939 */ 940 if (io_kbuf_recycle(req, issue_flags)) 941 rw->len = 0; 942 return -EAGAIN; 943 } 944 945 /* 946 * Any successful return value will keep the multishot read armed. 947 */ 948 if (ret > 0) { 949 /* 950 * Put our buffer and post a CQE. If we fail to post a CQE, then 951 * jump to the termination path. This request is then done. 952 */ 953 cflags = io_put_kbuf(req, issue_flags); 954 rw->len = 0; /* similarly to above, reset len to 0 */ 955 956 if (io_fill_cqe_req_aux(req, 957 issue_flags & IO_URING_F_COMPLETE_DEFER, 958 ret, cflags | IORING_CQE_F_MORE)) { 959 if (issue_flags & IO_URING_F_MULTISHOT) 960 return IOU_ISSUE_SKIP_COMPLETE; 961 return -EAGAIN; 962 } 963 } 964 965 /* 966 * Either an error, or we've hit overflow posting the CQE. For any 967 * multishot request, hitting overflow will terminate it. 968 */ 969 io_req_set_res(req, ret, cflags); 970 if (issue_flags & IO_URING_F_MULTISHOT) 971 return IOU_STOP_MULTISHOT; 972 return IOU_OK; 973 } 974 975 int io_write(struct io_kiocb *req, unsigned int issue_flags) 976 { 977 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 978 struct io_rw_state __s, *s = &__s; 979 struct iovec *iovec; 980 struct kiocb *kiocb = &rw->kiocb; 981 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; 982 ssize_t ret, ret2; 983 loff_t *ppos; 984 985 if (!req_has_async_data(req)) { 986 ret = io_import_iovec(ITER_SOURCE, req, &iovec, s, issue_flags); 987 if (unlikely(ret < 0)) 988 return ret; 989 } else { 990 struct io_async_rw *io = req->async_data; 991 992 s = &io->s; 993 iov_iter_restore(&s->iter, &s->iter_state); 994 iovec = NULL; 995 } 996 ret = io_rw_init_file(req, FMODE_WRITE); 997 if (unlikely(ret)) { 998 kfree(iovec); 999 return ret; 1000 } 1001 req->cqe.res = iov_iter_count(&s->iter); 1002 1003 if (force_nonblock) { 1004 /* If the file doesn't support async, just async punt */ 1005 if (unlikely(!io_file_supports_nowait(req))) 1006 goto copy_iov; 1007 1008 /* File path supports NOWAIT for non-direct_IO only for block devices. */ 1009 if (!(kiocb->ki_flags & IOCB_DIRECT) && 1010 !(kiocb->ki_filp->f_mode & FMODE_BUF_WASYNC) && 1011 (req->flags & REQ_F_ISREG)) 1012 goto copy_iov; 1013 1014 kiocb->ki_flags |= IOCB_NOWAIT; 1015 } else { 1016 /* Ensure we clear previously set non-block flag */ 1017 kiocb->ki_flags &= ~IOCB_NOWAIT; 1018 } 1019 1020 ppos = io_kiocb_update_pos(req); 1021 1022 ret = rw_verify_area(WRITE, req->file, ppos, req->cqe.res); 1023 if (unlikely(ret)) { 1024 kfree(iovec); 1025 return ret; 1026 } 1027 1028 if (req->flags & REQ_F_ISREG) 1029 kiocb_start_write(kiocb); 1030 kiocb->ki_flags |= IOCB_WRITE; 1031 1032 if (likely(req->file->f_op->write_iter)) 1033 ret2 = call_write_iter(req->file, kiocb, &s->iter); 1034 else if (req->file->f_op->write) 1035 ret2 = loop_rw_iter(WRITE, rw, &s->iter); 1036 else 1037 ret2 = -EINVAL; 1038 1039 if (req->flags & REQ_F_REISSUE) { 1040 req->flags &= ~REQ_F_REISSUE; 1041 ret2 = -EAGAIN; 1042 } 1043 1044 /* 1045 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just 1046 * retry them without IOCB_NOWAIT. 1047 */ 1048 if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT)) 1049 ret2 = -EAGAIN; 1050 /* no retry on NONBLOCK nor RWF_NOWAIT */ 1051 if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT)) 1052 goto done; 1053 if (!force_nonblock || ret2 != -EAGAIN) { 1054 /* IOPOLL retry should happen for io-wq threads */ 1055 if (ret2 == -EAGAIN && (req->ctx->flags & IORING_SETUP_IOPOLL)) 1056 goto copy_iov; 1057 1058 if (ret2 != req->cqe.res && ret2 >= 0 && need_complete_io(req)) { 1059 struct io_async_rw *io; 1060 1061 trace_io_uring_short_write(req->ctx, kiocb->ki_pos - ret2, 1062 req->cqe.res, ret2); 1063 1064 /* This is a partial write. The file pos has already been 1065 * updated, setup the async struct to complete the request 1066 * in the worker. Also update bytes_done to account for 1067 * the bytes already written. 1068 */ 1069 iov_iter_save_state(&s->iter, &s->iter_state); 1070 ret = io_setup_async_rw(req, iovec, s, true); 1071 1072 io = req->async_data; 1073 if (io) 1074 io->bytes_done += ret2; 1075 1076 if (kiocb->ki_flags & IOCB_WRITE) 1077 io_req_end_write(req); 1078 return ret ? ret : -EAGAIN; 1079 } 1080 done: 1081 ret = kiocb_done(req, ret2, issue_flags); 1082 } else { 1083 copy_iov: 1084 iov_iter_restore(&s->iter, &s->iter_state); 1085 ret = io_setup_async_rw(req, iovec, s, false); 1086 if (!ret) { 1087 if (kiocb->ki_flags & IOCB_WRITE) 1088 io_req_end_write(req); 1089 return -EAGAIN; 1090 } 1091 return ret; 1092 } 1093 /* it's reportedly faster than delegating the null check to kfree() */ 1094 if (iovec) 1095 kfree(iovec); 1096 return ret; 1097 } 1098 1099 void io_rw_fail(struct io_kiocb *req) 1100 { 1101 int res; 1102 1103 res = io_fixup_rw_res(req, req->cqe.res); 1104 io_req_set_res(req, res, req->cqe.flags); 1105 } 1106 1107 int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) 1108 { 1109 struct io_wq_work_node *pos, *start, *prev; 1110 unsigned int poll_flags = 0; 1111 DEFINE_IO_COMP_BATCH(iob); 1112 int nr_events = 0; 1113 1114 /* 1115 * Only spin for completions if we don't have multiple devices hanging 1116 * off our complete list. 1117 */ 1118 if (ctx->poll_multi_queue || force_nonspin) 1119 poll_flags |= BLK_POLL_ONESHOT; 1120 1121 wq_list_for_each(pos, start, &ctx->iopoll_list) { 1122 struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list); 1123 struct file *file = req->file; 1124 int ret; 1125 1126 /* 1127 * Move completed and retryable entries to our local lists. 1128 * If we find a request that requires polling, break out 1129 * and complete those lists first, if we have entries there. 1130 */ 1131 if (READ_ONCE(req->iopoll_completed)) 1132 break; 1133 1134 if (req->opcode == IORING_OP_URING_CMD) { 1135 struct io_uring_cmd *ioucmd; 1136 1137 ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd); 1138 ret = file->f_op->uring_cmd_iopoll(ioucmd, &iob, 1139 poll_flags); 1140 } else { 1141 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 1142 1143 ret = file->f_op->iopoll(&rw->kiocb, &iob, poll_flags); 1144 } 1145 if (unlikely(ret < 0)) 1146 return ret; 1147 else if (ret) 1148 poll_flags |= BLK_POLL_ONESHOT; 1149 1150 /* iopoll may have completed current req */ 1151 if (!rq_list_empty(iob.req_list) || 1152 READ_ONCE(req->iopoll_completed)) 1153 break; 1154 } 1155 1156 if (!rq_list_empty(iob.req_list)) 1157 iob.complete(&iob); 1158 else if (!pos) 1159 return 0; 1160 1161 prev = start; 1162 wq_list_for_each_resume(pos, prev) { 1163 struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list); 1164 1165 /* order with io_complete_rw_iopoll(), e.g. ->result updates */ 1166 if (!smp_load_acquire(&req->iopoll_completed)) 1167 break; 1168 nr_events++; 1169 req->cqe.flags = io_put_kbuf(req, 0); 1170 } 1171 if (unlikely(!nr_events)) 1172 return 0; 1173 1174 pos = start ? start->next : ctx->iopoll_list.first; 1175 wq_list_cut(&ctx->iopoll_list, prev, start); 1176 1177 if (WARN_ON_ONCE(!wq_list_empty(&ctx->submit_state.compl_reqs))) 1178 return 0; 1179 ctx->submit_state.compl_reqs.first = pos; 1180 __io_submit_flush_completions(ctx); 1181 return nr_events; 1182 } 1183