1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/kernel.h> 3 #include <linux/errno.h> 4 #include <linux/fs.h> 5 #include <linux/file.h> 6 #include <linux/blk-mq.h> 7 #include <linux/mm.h> 8 #include <linux/slab.h> 9 #include <linux/fsnotify.h> 10 #include <linux/poll.h> 11 #include <linux/nospec.h> 12 #include <linux/compat.h> 13 #include <linux/io_uring/cmd.h> 14 15 #include <uapi/linux/io_uring.h> 16 17 #include "io_uring.h" 18 #include "opdef.h" 19 #include "kbuf.h" 20 #include "rsrc.h" 21 #include "rw.h" 22 23 struct io_rw { 24 /* NOTE: kiocb has the file as the first member, so don't do it here */ 25 struct kiocb kiocb; 26 u64 addr; 27 u32 len; 28 rwf_t flags; 29 }; 30 31 static inline bool io_file_supports_nowait(struct io_kiocb *req) 32 { 33 return req->flags & REQ_F_SUPPORT_NOWAIT; 34 } 35 36 #ifdef CONFIG_COMPAT 37 static int io_iov_compat_buffer_select_prep(struct io_rw *rw) 38 { 39 struct compat_iovec __user *uiov; 40 compat_ssize_t clen; 41 42 uiov = u64_to_user_ptr(rw->addr); 43 if (!access_ok(uiov, sizeof(*uiov))) 44 return -EFAULT; 45 if (__get_user(clen, &uiov->iov_len)) 46 return -EFAULT; 47 if (clen < 0) 48 return -EINVAL; 49 50 rw->len = clen; 51 return 0; 52 } 53 #endif 54 55 static int io_iov_buffer_select_prep(struct io_kiocb *req) 56 { 57 struct iovec __user *uiov; 58 struct iovec iov; 59 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 60 61 if (rw->len != 1) 62 return -EINVAL; 63 64 #ifdef CONFIG_COMPAT 65 if (req->ctx->compat) 66 return io_iov_compat_buffer_select_prep(rw); 67 #endif 68 69 uiov = u64_to_user_ptr(rw->addr); 70 if (copy_from_user(&iov, uiov, sizeof(*uiov))) 71 return -EFAULT; 72 rw->len = iov.iov_len; 73 return 0; 74 } 75 76 int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe) 77 { 78 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 79 unsigned ioprio; 80 int ret; 81 82 rw->kiocb.ki_pos = READ_ONCE(sqe->off); 83 /* used for fixed read/write too - just read unconditionally */ 84 req->buf_index = READ_ONCE(sqe->buf_index); 85 86 ioprio = READ_ONCE(sqe->ioprio); 87 if (ioprio) { 88 ret = ioprio_check_cap(ioprio); 89 if (ret) 90 return ret; 91 92 rw->kiocb.ki_ioprio = ioprio; 93 } else { 94 rw->kiocb.ki_ioprio = get_current_ioprio(); 95 } 96 rw->kiocb.dio_complete = NULL; 97 98 rw->addr = READ_ONCE(sqe->addr); 99 rw->len = READ_ONCE(sqe->len); 100 rw->flags = READ_ONCE(sqe->rw_flags); 101 return 0; 102 } 103 104 int io_prep_rwv(struct io_kiocb *req, const struct io_uring_sqe *sqe) 105 { 106 int ret; 107 108 ret = io_prep_rw(req, sqe); 109 if (unlikely(ret)) 110 return ret; 111 112 /* 113 * Have to do this validation here, as this is in io_read() rw->len 114 * might have chanaged due to buffer selection 115 */ 116 if (req->flags & REQ_F_BUFFER_SELECT) 117 return io_iov_buffer_select_prep(req); 118 119 return 0; 120 } 121 122 int io_prep_rw_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe) 123 { 124 struct io_ring_ctx *ctx = req->ctx; 125 u16 index; 126 int ret; 127 128 ret = io_prep_rw(req, sqe); 129 if (unlikely(ret)) 130 return ret; 131 132 if (unlikely(req->buf_index >= ctx->nr_user_bufs)) 133 return -EFAULT; 134 index = array_index_nospec(req->buf_index, ctx->nr_user_bufs); 135 req->imu = ctx->user_bufs[index]; 136 io_req_set_rsrc_node(req, ctx, 0); 137 return 0; 138 } 139 140 /* 141 * Multishot read is prepared just like a normal read/write request, only 142 * difference is that we set the MULTISHOT flag. 143 */ 144 int io_read_mshot_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 145 { 146 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 147 int ret; 148 149 /* must be used with provided buffers */ 150 if (!(req->flags & REQ_F_BUFFER_SELECT)) 151 return -EINVAL; 152 153 ret = io_prep_rw(req, sqe); 154 if (unlikely(ret)) 155 return ret; 156 157 if (rw->addr || rw->len) 158 return -EINVAL; 159 160 req->flags |= REQ_F_APOLL_MULTISHOT; 161 return 0; 162 } 163 164 void io_readv_writev_cleanup(struct io_kiocb *req) 165 { 166 struct io_async_rw *io = req->async_data; 167 168 kfree(io->free_iovec); 169 } 170 171 static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req) 172 { 173 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 174 175 if (rw->kiocb.ki_pos != -1) 176 return &rw->kiocb.ki_pos; 177 178 if (!(req->file->f_mode & FMODE_STREAM)) { 179 req->flags |= REQ_F_CUR_POS; 180 rw->kiocb.ki_pos = req->file->f_pos; 181 return &rw->kiocb.ki_pos; 182 } 183 184 rw->kiocb.ki_pos = 0; 185 return NULL; 186 } 187 188 static void io_req_task_queue_reissue(struct io_kiocb *req) 189 { 190 req->io_task_work.func = io_queue_iowq; 191 io_req_task_work_add(req); 192 } 193 194 #ifdef CONFIG_BLOCK 195 static bool io_resubmit_prep(struct io_kiocb *req) 196 { 197 struct io_async_rw *io = req->async_data; 198 199 if (!req_has_async_data(req)) 200 return !io_req_prep_async(req); 201 iov_iter_restore(&io->s.iter, &io->s.iter_state); 202 return true; 203 } 204 205 static bool io_rw_should_reissue(struct io_kiocb *req) 206 { 207 umode_t mode = file_inode(req->file)->i_mode; 208 struct io_ring_ctx *ctx = req->ctx; 209 210 if (!S_ISBLK(mode) && !S_ISREG(mode)) 211 return false; 212 if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() && 213 !(ctx->flags & IORING_SETUP_IOPOLL))) 214 return false; 215 /* 216 * If ref is dying, we might be running poll reap from the exit work. 217 * Don't attempt to reissue from that path, just let it fail with 218 * -EAGAIN. 219 */ 220 if (percpu_ref_is_dying(&ctx->refs)) 221 return false; 222 /* 223 * Play it safe and assume not safe to re-import and reissue if we're 224 * not in the original thread group (or in task context). 225 */ 226 if (!same_thread_group(req->task, current) || !in_task()) 227 return false; 228 return true; 229 } 230 #else 231 static bool io_resubmit_prep(struct io_kiocb *req) 232 { 233 return false; 234 } 235 static bool io_rw_should_reissue(struct io_kiocb *req) 236 { 237 return false; 238 } 239 #endif 240 241 static void io_req_end_write(struct io_kiocb *req) 242 { 243 if (req->flags & REQ_F_ISREG) { 244 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 245 246 kiocb_end_write(&rw->kiocb); 247 } 248 } 249 250 /* 251 * Trigger the notifications after having done some IO, and finish the write 252 * accounting, if any. 253 */ 254 static void io_req_io_end(struct io_kiocb *req) 255 { 256 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 257 258 if (rw->kiocb.ki_flags & IOCB_WRITE) { 259 io_req_end_write(req); 260 fsnotify_modify(req->file); 261 } else { 262 fsnotify_access(req->file); 263 } 264 } 265 266 static bool __io_complete_rw_common(struct io_kiocb *req, long res) 267 { 268 if (unlikely(res != req->cqe.res)) { 269 if ((res == -EAGAIN || res == -EOPNOTSUPP) && 270 io_rw_should_reissue(req)) { 271 /* 272 * Reissue will start accounting again, finish the 273 * current cycle. 274 */ 275 io_req_io_end(req); 276 req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO; 277 return true; 278 } 279 req_set_fail(req); 280 req->cqe.res = res; 281 } 282 return false; 283 } 284 285 static inline int io_fixup_rw_res(struct io_kiocb *req, long res) 286 { 287 struct io_async_rw *io = req->async_data; 288 289 /* add previously done IO, if any */ 290 if (req_has_async_data(req) && io->bytes_done > 0) { 291 if (res < 0) 292 res = io->bytes_done; 293 else 294 res += io->bytes_done; 295 } 296 return res; 297 } 298 299 void io_req_rw_complete(struct io_kiocb *req, struct io_tw_state *ts) 300 { 301 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 302 struct kiocb *kiocb = &rw->kiocb; 303 304 if ((kiocb->ki_flags & IOCB_DIO_CALLER_COMP) && kiocb->dio_complete) { 305 long res = kiocb->dio_complete(rw->kiocb.private); 306 307 io_req_set_res(req, io_fixup_rw_res(req, res), 0); 308 } 309 310 io_req_io_end(req); 311 312 if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) { 313 unsigned issue_flags = ts->locked ? 0 : IO_URING_F_UNLOCKED; 314 315 req->cqe.flags |= io_put_kbuf(req, issue_flags); 316 } 317 io_req_task_complete(req, ts); 318 } 319 320 static void io_complete_rw(struct kiocb *kiocb, long res) 321 { 322 struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb); 323 struct io_kiocb *req = cmd_to_io_kiocb(rw); 324 325 if (!kiocb->dio_complete || !(kiocb->ki_flags & IOCB_DIO_CALLER_COMP)) { 326 if (__io_complete_rw_common(req, res)) 327 return; 328 io_req_set_res(req, io_fixup_rw_res(req, res), 0); 329 } 330 req->io_task_work.func = io_req_rw_complete; 331 __io_req_task_work_add(req, IOU_F_TWQ_LAZY_WAKE); 332 } 333 334 static void io_complete_rw_iopoll(struct kiocb *kiocb, long res) 335 { 336 struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb); 337 struct io_kiocb *req = cmd_to_io_kiocb(rw); 338 339 if (kiocb->ki_flags & IOCB_WRITE) 340 io_req_end_write(req); 341 if (unlikely(res != req->cqe.res)) { 342 if (res == -EAGAIN && io_rw_should_reissue(req)) { 343 req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO; 344 return; 345 } 346 req->cqe.res = res; 347 } 348 349 /* order with io_iopoll_complete() checking ->iopoll_completed */ 350 smp_store_release(&req->iopoll_completed, 1); 351 } 352 353 static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret) 354 { 355 /* IO was queued async, completion will happen later */ 356 if (ret == -EIOCBQUEUED) 357 return; 358 359 /* transform internal restart error codes */ 360 if (unlikely(ret < 0)) { 361 switch (ret) { 362 case -ERESTARTSYS: 363 case -ERESTARTNOINTR: 364 case -ERESTARTNOHAND: 365 case -ERESTART_RESTARTBLOCK: 366 /* 367 * We can't just restart the syscall, since previously 368 * submitted sqes may already be in progress. Just fail 369 * this IO with EINTR. 370 */ 371 ret = -EINTR; 372 break; 373 } 374 } 375 376 INDIRECT_CALL_2(kiocb->ki_complete, io_complete_rw_iopoll, 377 io_complete_rw, kiocb, ret); 378 } 379 380 static int kiocb_done(struct io_kiocb *req, ssize_t ret, 381 unsigned int issue_flags) 382 { 383 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 384 unsigned final_ret = io_fixup_rw_res(req, ret); 385 386 if (ret >= 0 && req->flags & REQ_F_CUR_POS) 387 req->file->f_pos = rw->kiocb.ki_pos; 388 if (ret >= 0 && (rw->kiocb.ki_complete == io_complete_rw)) { 389 if (!__io_complete_rw_common(req, ret)) { 390 /* 391 * Safe to call io_end from here as we're inline 392 * from the submission path. 393 */ 394 io_req_io_end(req); 395 io_req_set_res(req, final_ret, 396 io_put_kbuf(req, issue_flags)); 397 return IOU_OK; 398 } 399 } else { 400 io_rw_done(&rw->kiocb, ret); 401 } 402 403 if (req->flags & REQ_F_REISSUE) { 404 req->flags &= ~REQ_F_REISSUE; 405 if (io_resubmit_prep(req)) 406 io_req_task_queue_reissue(req); 407 else 408 io_req_task_queue_fail(req, final_ret); 409 } 410 return IOU_ISSUE_SKIP_COMPLETE; 411 } 412 413 static struct iovec *__io_import_iovec(int ddir, struct io_kiocb *req, 414 struct io_rw_state *s, 415 unsigned int issue_flags) 416 { 417 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 418 struct iov_iter *iter = &s->iter; 419 u8 opcode = req->opcode; 420 struct iovec *iovec; 421 void __user *buf; 422 size_t sqe_len; 423 ssize_t ret; 424 425 if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) { 426 ret = io_import_fixed(ddir, iter, req->imu, rw->addr, rw->len); 427 if (ret) 428 return ERR_PTR(ret); 429 return NULL; 430 } 431 432 buf = u64_to_user_ptr(rw->addr); 433 sqe_len = rw->len; 434 435 if (!io_issue_defs[opcode].vectored || req->flags & REQ_F_BUFFER_SELECT) { 436 if (io_do_buffer_select(req)) { 437 buf = io_buffer_select(req, &sqe_len, issue_flags); 438 if (!buf) 439 return ERR_PTR(-ENOBUFS); 440 rw->addr = (unsigned long) buf; 441 rw->len = sqe_len; 442 } 443 444 ret = import_ubuf(ddir, buf, sqe_len, iter); 445 if (ret) 446 return ERR_PTR(ret); 447 return NULL; 448 } 449 450 iovec = s->fast_iov; 451 ret = __import_iovec(ddir, buf, sqe_len, UIO_FASTIOV, &iovec, iter, 452 req->ctx->compat); 453 if (unlikely(ret < 0)) 454 return ERR_PTR(ret); 455 return iovec; 456 } 457 458 static inline int io_import_iovec(int rw, struct io_kiocb *req, 459 struct iovec **iovec, struct io_rw_state *s, 460 unsigned int issue_flags) 461 { 462 *iovec = __io_import_iovec(rw, req, s, issue_flags); 463 if (IS_ERR(*iovec)) 464 return PTR_ERR(*iovec); 465 466 iov_iter_save_state(&s->iter, &s->iter_state); 467 return 0; 468 } 469 470 static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb) 471 { 472 return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos; 473 } 474 475 /* 476 * For files that don't have ->read_iter() and ->write_iter(), handle them 477 * by looping over ->read() or ->write() manually. 478 */ 479 static ssize_t loop_rw_iter(int ddir, struct io_rw *rw, struct iov_iter *iter) 480 { 481 struct kiocb *kiocb = &rw->kiocb; 482 struct file *file = kiocb->ki_filp; 483 ssize_t ret = 0; 484 loff_t *ppos; 485 486 /* 487 * Don't support polled IO through this interface, and we can't 488 * support non-blocking either. For the latter, this just causes 489 * the kiocb to be handled from an async context. 490 */ 491 if (kiocb->ki_flags & IOCB_HIPRI) 492 return -EOPNOTSUPP; 493 if ((kiocb->ki_flags & IOCB_NOWAIT) && 494 !(kiocb->ki_filp->f_flags & O_NONBLOCK)) 495 return -EAGAIN; 496 497 ppos = io_kiocb_ppos(kiocb); 498 499 while (iov_iter_count(iter)) { 500 void __user *addr; 501 size_t len; 502 ssize_t nr; 503 504 if (iter_is_ubuf(iter)) { 505 addr = iter->ubuf + iter->iov_offset; 506 len = iov_iter_count(iter); 507 } else if (!iov_iter_is_bvec(iter)) { 508 addr = iter_iov_addr(iter); 509 len = iter_iov_len(iter); 510 } else { 511 addr = u64_to_user_ptr(rw->addr); 512 len = rw->len; 513 } 514 515 if (ddir == READ) 516 nr = file->f_op->read(file, addr, len, ppos); 517 else 518 nr = file->f_op->write(file, addr, len, ppos); 519 520 if (nr < 0) { 521 if (!ret) 522 ret = nr; 523 break; 524 } 525 ret += nr; 526 if (!iov_iter_is_bvec(iter)) { 527 iov_iter_advance(iter, nr); 528 } else { 529 rw->addr += nr; 530 rw->len -= nr; 531 if (!rw->len) 532 break; 533 } 534 if (nr != len) 535 break; 536 } 537 538 return ret; 539 } 540 541 static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec, 542 const struct iovec *fast_iov, struct iov_iter *iter) 543 { 544 struct io_async_rw *io = req->async_data; 545 546 memcpy(&io->s.iter, iter, sizeof(*iter)); 547 io->free_iovec = iovec; 548 io->bytes_done = 0; 549 /* can only be fixed buffers, no need to do anything */ 550 if (iov_iter_is_bvec(iter) || iter_is_ubuf(iter)) 551 return; 552 if (!iovec) { 553 unsigned iov_off = 0; 554 555 io->s.iter.__iov = io->s.fast_iov; 556 if (iter->__iov != fast_iov) { 557 iov_off = iter_iov(iter) - fast_iov; 558 io->s.iter.__iov += iov_off; 559 } 560 if (io->s.fast_iov != fast_iov) 561 memcpy(io->s.fast_iov + iov_off, fast_iov + iov_off, 562 sizeof(struct iovec) * iter->nr_segs); 563 } else { 564 req->flags |= REQ_F_NEED_CLEANUP; 565 } 566 } 567 568 static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec, 569 struct io_rw_state *s, bool force) 570 { 571 if (!force && !io_cold_defs[req->opcode].prep_async) 572 return 0; 573 /* opcode type doesn't need async data */ 574 if (!io_cold_defs[req->opcode].async_size) 575 return 0; 576 if (!req_has_async_data(req)) { 577 struct io_async_rw *iorw; 578 579 if (io_alloc_async_data(req)) { 580 kfree(iovec); 581 return -ENOMEM; 582 } 583 584 io_req_map_rw(req, iovec, s->fast_iov, &s->iter); 585 iorw = req->async_data; 586 /* we've copied and mapped the iter, ensure state is saved */ 587 iov_iter_save_state(&iorw->s.iter, &iorw->s.iter_state); 588 } 589 return 0; 590 } 591 592 static inline int io_rw_prep_async(struct io_kiocb *req, int rw) 593 { 594 struct io_async_rw *iorw = req->async_data; 595 struct iovec *iov; 596 int ret; 597 598 iorw->bytes_done = 0; 599 iorw->free_iovec = NULL; 600 601 /* submission path, ->uring_lock should already be taken */ 602 ret = io_import_iovec(rw, req, &iov, &iorw->s, 0); 603 if (unlikely(ret < 0)) 604 return ret; 605 606 if (iov) { 607 iorw->free_iovec = iov; 608 req->flags |= REQ_F_NEED_CLEANUP; 609 } 610 611 return 0; 612 } 613 614 int io_readv_prep_async(struct io_kiocb *req) 615 { 616 return io_rw_prep_async(req, ITER_DEST); 617 } 618 619 int io_writev_prep_async(struct io_kiocb *req) 620 { 621 return io_rw_prep_async(req, ITER_SOURCE); 622 } 623 624 /* 625 * This is our waitqueue callback handler, registered through __folio_lock_async() 626 * when we initially tried to do the IO with the iocb armed our waitqueue. 627 * This gets called when the page is unlocked, and we generally expect that to 628 * happen when the page IO is completed and the page is now uptodate. This will 629 * queue a task_work based retry of the operation, attempting to copy the data 630 * again. If the latter fails because the page was NOT uptodate, then we will 631 * do a thread based blocking retry of the operation. That's the unexpected 632 * slow path. 633 */ 634 static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode, 635 int sync, void *arg) 636 { 637 struct wait_page_queue *wpq; 638 struct io_kiocb *req = wait->private; 639 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 640 struct wait_page_key *key = arg; 641 642 wpq = container_of(wait, struct wait_page_queue, wait); 643 644 if (!wake_page_match(wpq, key)) 645 return 0; 646 647 rw->kiocb.ki_flags &= ~IOCB_WAITQ; 648 list_del_init(&wait->entry); 649 io_req_task_queue(req); 650 return 1; 651 } 652 653 /* 654 * This controls whether a given IO request should be armed for async page 655 * based retry. If we return false here, the request is handed to the async 656 * worker threads for retry. If we're doing buffered reads on a regular file, 657 * we prepare a private wait_page_queue entry and retry the operation. This 658 * will either succeed because the page is now uptodate and unlocked, or it 659 * will register a callback when the page is unlocked at IO completion. Through 660 * that callback, io_uring uses task_work to setup a retry of the operation. 661 * That retry will attempt the buffered read again. The retry will generally 662 * succeed, or in rare cases where it fails, we then fall back to using the 663 * async worker threads for a blocking retry. 664 */ 665 static bool io_rw_should_retry(struct io_kiocb *req) 666 { 667 struct io_async_rw *io = req->async_data; 668 struct wait_page_queue *wait = &io->wpq; 669 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 670 struct kiocb *kiocb = &rw->kiocb; 671 672 /* never retry for NOWAIT, we just complete with -EAGAIN */ 673 if (req->flags & REQ_F_NOWAIT) 674 return false; 675 676 /* Only for buffered IO */ 677 if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI)) 678 return false; 679 680 /* 681 * just use poll if we can, and don't attempt if the fs doesn't 682 * support callback based unlocks 683 */ 684 if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC)) 685 return false; 686 687 wait->wait.func = io_async_buf_func; 688 wait->wait.private = req; 689 wait->wait.flags = 0; 690 INIT_LIST_HEAD(&wait->wait.entry); 691 kiocb->ki_flags |= IOCB_WAITQ; 692 kiocb->ki_flags &= ~IOCB_NOWAIT; 693 kiocb->ki_waitq = wait; 694 return true; 695 } 696 697 static inline int io_iter_do_read(struct io_rw *rw, struct iov_iter *iter) 698 { 699 struct file *file = rw->kiocb.ki_filp; 700 701 if (likely(file->f_op->read_iter)) 702 return call_read_iter(file, &rw->kiocb, iter); 703 else if (file->f_op->read) 704 return loop_rw_iter(READ, rw, iter); 705 else 706 return -EINVAL; 707 } 708 709 static bool need_complete_io(struct io_kiocb *req) 710 { 711 return req->flags & REQ_F_ISREG || 712 S_ISBLK(file_inode(req->file)->i_mode); 713 } 714 715 static int io_rw_init_file(struct io_kiocb *req, fmode_t mode) 716 { 717 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 718 struct kiocb *kiocb = &rw->kiocb; 719 struct io_ring_ctx *ctx = req->ctx; 720 struct file *file = req->file; 721 int ret; 722 723 if (unlikely(!file || !(file->f_mode & mode))) 724 return -EBADF; 725 726 if (!(req->flags & REQ_F_FIXED_FILE)) 727 req->flags |= io_file_get_flags(file); 728 729 kiocb->ki_flags = file->f_iocb_flags; 730 ret = kiocb_set_rw_flags(kiocb, rw->flags); 731 if (unlikely(ret)) 732 return ret; 733 kiocb->ki_flags |= IOCB_ALLOC_CACHE; 734 735 /* 736 * If the file is marked O_NONBLOCK, still allow retry for it if it 737 * supports async. Otherwise it's impossible to use O_NONBLOCK files 738 * reliably. If not, or it IOCB_NOWAIT is set, don't retry. 739 */ 740 if ((kiocb->ki_flags & IOCB_NOWAIT) || 741 ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req))) 742 req->flags |= REQ_F_NOWAIT; 743 744 if (ctx->flags & IORING_SETUP_IOPOLL) { 745 if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll) 746 return -EOPNOTSUPP; 747 748 kiocb->private = NULL; 749 kiocb->ki_flags |= IOCB_HIPRI; 750 kiocb->ki_complete = io_complete_rw_iopoll; 751 req->iopoll_completed = 0; 752 } else { 753 if (kiocb->ki_flags & IOCB_HIPRI) 754 return -EINVAL; 755 kiocb->ki_complete = io_complete_rw; 756 } 757 758 return 0; 759 } 760 761 static int __io_read(struct io_kiocb *req, unsigned int issue_flags) 762 { 763 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 764 struct io_rw_state __s, *s = &__s; 765 struct iovec *iovec; 766 struct kiocb *kiocb = &rw->kiocb; 767 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; 768 struct io_async_rw *io; 769 ssize_t ret, ret2; 770 loff_t *ppos; 771 772 if (!req_has_async_data(req)) { 773 ret = io_import_iovec(ITER_DEST, req, &iovec, s, issue_flags); 774 if (unlikely(ret < 0)) 775 return ret; 776 } else { 777 io = req->async_data; 778 s = &io->s; 779 780 /* 781 * Safe and required to re-import if we're using provided 782 * buffers, as we dropped the selected one before retry. 783 */ 784 if (io_do_buffer_select(req)) { 785 ret = io_import_iovec(ITER_DEST, req, &iovec, s, issue_flags); 786 if (unlikely(ret < 0)) 787 return ret; 788 } 789 790 /* 791 * We come here from an earlier attempt, restore our state to 792 * match in case it doesn't. It's cheap enough that we don't 793 * need to make this conditional. 794 */ 795 iov_iter_restore(&s->iter, &s->iter_state); 796 iovec = NULL; 797 } 798 ret = io_rw_init_file(req, FMODE_READ); 799 if (unlikely(ret)) { 800 kfree(iovec); 801 return ret; 802 } 803 req->cqe.res = iov_iter_count(&s->iter); 804 805 if (force_nonblock) { 806 /* If the file doesn't support async, just async punt */ 807 if (unlikely(!io_file_supports_nowait(req))) { 808 ret = io_setup_async_rw(req, iovec, s, true); 809 return ret ?: -EAGAIN; 810 } 811 kiocb->ki_flags |= IOCB_NOWAIT; 812 } else { 813 /* Ensure we clear previously set non-block flag */ 814 kiocb->ki_flags &= ~IOCB_NOWAIT; 815 } 816 817 ppos = io_kiocb_update_pos(req); 818 819 ret = rw_verify_area(READ, req->file, ppos, req->cqe.res); 820 if (unlikely(ret)) { 821 kfree(iovec); 822 return ret; 823 } 824 825 ret = io_iter_do_read(rw, &s->iter); 826 827 if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) { 828 req->flags &= ~REQ_F_REISSUE; 829 /* 830 * If we can poll, just do that. For a vectored read, we'll 831 * need to copy state first. 832 */ 833 if (file_can_poll(req->file) && !io_issue_defs[req->opcode].vectored) 834 return -EAGAIN; 835 /* IOPOLL retry should happen for io-wq threads */ 836 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL)) 837 goto done; 838 /* no retry on NONBLOCK nor RWF_NOWAIT */ 839 if (req->flags & REQ_F_NOWAIT) 840 goto done; 841 ret = 0; 842 } else if (ret == -EIOCBQUEUED) { 843 if (iovec) 844 kfree(iovec); 845 return IOU_ISSUE_SKIP_COMPLETE; 846 } else if (ret == req->cqe.res || ret <= 0 || !force_nonblock || 847 (req->flags & REQ_F_NOWAIT) || !need_complete_io(req)) { 848 /* read all, failed, already did sync or don't want to retry */ 849 goto done; 850 } 851 852 /* 853 * Don't depend on the iter state matching what was consumed, or being 854 * untouched in case of error. Restore it and we'll advance it 855 * manually if we need to. 856 */ 857 iov_iter_restore(&s->iter, &s->iter_state); 858 859 ret2 = io_setup_async_rw(req, iovec, s, true); 860 iovec = NULL; 861 if (ret2) { 862 ret = ret > 0 ? ret : ret2; 863 goto done; 864 } 865 866 io = req->async_data; 867 s = &io->s; 868 /* 869 * Now use our persistent iterator and state, if we aren't already. 870 * We've restored and mapped the iter to match. 871 */ 872 873 do { 874 /* 875 * We end up here because of a partial read, either from 876 * above or inside this loop. Advance the iter by the bytes 877 * that were consumed. 878 */ 879 iov_iter_advance(&s->iter, ret); 880 if (!iov_iter_count(&s->iter)) 881 break; 882 io->bytes_done += ret; 883 iov_iter_save_state(&s->iter, &s->iter_state); 884 885 /* if we can retry, do so with the callbacks armed */ 886 if (!io_rw_should_retry(req)) { 887 kiocb->ki_flags &= ~IOCB_WAITQ; 888 return -EAGAIN; 889 } 890 891 req->cqe.res = iov_iter_count(&s->iter); 892 /* 893 * Now retry read with the IOCB_WAITQ parts set in the iocb. If 894 * we get -EIOCBQUEUED, then we'll get a notification when the 895 * desired page gets unlocked. We can also get a partial read 896 * here, and if we do, then just retry at the new offset. 897 */ 898 ret = io_iter_do_read(rw, &s->iter); 899 if (ret == -EIOCBQUEUED) 900 return IOU_ISSUE_SKIP_COMPLETE; 901 /* we got some bytes, but not all. retry. */ 902 kiocb->ki_flags &= ~IOCB_WAITQ; 903 iov_iter_restore(&s->iter, &s->iter_state); 904 } while (ret > 0); 905 done: 906 /* it's faster to check here then delegate to kfree */ 907 if (iovec) 908 kfree(iovec); 909 return ret; 910 } 911 912 int io_read(struct io_kiocb *req, unsigned int issue_flags) 913 { 914 int ret; 915 916 ret = __io_read(req, issue_flags); 917 if (ret >= 0) 918 return kiocb_done(req, ret, issue_flags); 919 920 return ret; 921 } 922 923 int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags) 924 { 925 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 926 unsigned int cflags = 0; 927 int ret; 928 929 /* 930 * Multishot MUST be used on a pollable file 931 */ 932 if (!file_can_poll(req->file)) 933 return -EBADFD; 934 935 ret = __io_read(req, issue_flags); 936 937 /* 938 * If we get -EAGAIN, recycle our buffer and just let normal poll 939 * handling arm it. 940 */ 941 if (ret == -EAGAIN) { 942 /* 943 * Reset rw->len to 0 again to avoid clamping future mshot 944 * reads, in case the buffer size varies. 945 */ 946 if (io_kbuf_recycle(req, issue_flags)) 947 rw->len = 0; 948 return -EAGAIN; 949 } 950 951 /* 952 * Any successful return value will keep the multishot read armed. 953 */ 954 if (ret > 0) { 955 /* 956 * Put our buffer and post a CQE. If we fail to post a CQE, then 957 * jump to the termination path. This request is then done. 958 */ 959 cflags = io_put_kbuf(req, issue_flags); 960 rw->len = 0; /* similarly to above, reset len to 0 */ 961 962 if (io_fill_cqe_req_aux(req, 963 issue_flags & IO_URING_F_COMPLETE_DEFER, 964 ret, cflags | IORING_CQE_F_MORE)) { 965 if (issue_flags & IO_URING_F_MULTISHOT) 966 return IOU_ISSUE_SKIP_COMPLETE; 967 return -EAGAIN; 968 } 969 } 970 971 /* 972 * Either an error, or we've hit overflow posting the CQE. For any 973 * multishot request, hitting overflow will terminate it. 974 */ 975 io_req_set_res(req, ret, cflags); 976 if (issue_flags & IO_URING_F_MULTISHOT) 977 return IOU_STOP_MULTISHOT; 978 return IOU_OK; 979 } 980 981 int io_write(struct io_kiocb *req, unsigned int issue_flags) 982 { 983 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 984 struct io_rw_state __s, *s = &__s; 985 struct iovec *iovec; 986 struct kiocb *kiocb = &rw->kiocb; 987 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; 988 ssize_t ret, ret2; 989 loff_t *ppos; 990 991 if (!req_has_async_data(req)) { 992 ret = io_import_iovec(ITER_SOURCE, req, &iovec, s, issue_flags); 993 if (unlikely(ret < 0)) 994 return ret; 995 } else { 996 struct io_async_rw *io = req->async_data; 997 998 s = &io->s; 999 iov_iter_restore(&s->iter, &s->iter_state); 1000 iovec = NULL; 1001 } 1002 ret = io_rw_init_file(req, FMODE_WRITE); 1003 if (unlikely(ret)) { 1004 kfree(iovec); 1005 return ret; 1006 } 1007 req->cqe.res = iov_iter_count(&s->iter); 1008 1009 if (force_nonblock) { 1010 /* If the file doesn't support async, just async punt */ 1011 if (unlikely(!io_file_supports_nowait(req))) 1012 goto copy_iov; 1013 1014 /* File path supports NOWAIT for non-direct_IO only for block devices. */ 1015 if (!(kiocb->ki_flags & IOCB_DIRECT) && 1016 !(kiocb->ki_filp->f_mode & FMODE_BUF_WASYNC) && 1017 (req->flags & REQ_F_ISREG)) 1018 goto copy_iov; 1019 1020 kiocb->ki_flags |= IOCB_NOWAIT; 1021 } else { 1022 /* Ensure we clear previously set non-block flag */ 1023 kiocb->ki_flags &= ~IOCB_NOWAIT; 1024 } 1025 1026 ppos = io_kiocb_update_pos(req); 1027 1028 ret = rw_verify_area(WRITE, req->file, ppos, req->cqe.res); 1029 if (unlikely(ret)) { 1030 kfree(iovec); 1031 return ret; 1032 } 1033 1034 if (req->flags & REQ_F_ISREG) 1035 kiocb_start_write(kiocb); 1036 kiocb->ki_flags |= IOCB_WRITE; 1037 1038 if (likely(req->file->f_op->write_iter)) 1039 ret2 = call_write_iter(req->file, kiocb, &s->iter); 1040 else if (req->file->f_op->write) 1041 ret2 = loop_rw_iter(WRITE, rw, &s->iter); 1042 else 1043 ret2 = -EINVAL; 1044 1045 if (req->flags & REQ_F_REISSUE) { 1046 req->flags &= ~REQ_F_REISSUE; 1047 ret2 = -EAGAIN; 1048 } 1049 1050 /* 1051 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just 1052 * retry them without IOCB_NOWAIT. 1053 */ 1054 if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT)) 1055 ret2 = -EAGAIN; 1056 /* no retry on NONBLOCK nor RWF_NOWAIT */ 1057 if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT)) 1058 goto done; 1059 if (!force_nonblock || ret2 != -EAGAIN) { 1060 /* IOPOLL retry should happen for io-wq threads */ 1061 if (ret2 == -EAGAIN && (req->ctx->flags & IORING_SETUP_IOPOLL)) 1062 goto copy_iov; 1063 1064 if (ret2 != req->cqe.res && ret2 >= 0 && need_complete_io(req)) { 1065 struct io_async_rw *io; 1066 1067 trace_io_uring_short_write(req->ctx, kiocb->ki_pos - ret2, 1068 req->cqe.res, ret2); 1069 1070 /* This is a partial write. The file pos has already been 1071 * updated, setup the async struct to complete the request 1072 * in the worker. Also update bytes_done to account for 1073 * the bytes already written. 1074 */ 1075 iov_iter_save_state(&s->iter, &s->iter_state); 1076 ret = io_setup_async_rw(req, iovec, s, true); 1077 1078 io = req->async_data; 1079 if (io) 1080 io->bytes_done += ret2; 1081 1082 if (kiocb->ki_flags & IOCB_WRITE) 1083 io_req_end_write(req); 1084 return ret ? ret : -EAGAIN; 1085 } 1086 done: 1087 ret = kiocb_done(req, ret2, issue_flags); 1088 } else { 1089 copy_iov: 1090 iov_iter_restore(&s->iter, &s->iter_state); 1091 ret = io_setup_async_rw(req, iovec, s, false); 1092 if (!ret) { 1093 if (kiocb->ki_flags & IOCB_WRITE) 1094 io_req_end_write(req); 1095 return -EAGAIN; 1096 } 1097 return ret; 1098 } 1099 /* it's reportedly faster than delegating the null check to kfree() */ 1100 if (iovec) 1101 kfree(iovec); 1102 return ret; 1103 } 1104 1105 void io_rw_fail(struct io_kiocb *req) 1106 { 1107 int res; 1108 1109 res = io_fixup_rw_res(req, req->cqe.res); 1110 io_req_set_res(req, res, req->cqe.flags); 1111 } 1112 1113 int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) 1114 { 1115 struct io_wq_work_node *pos, *start, *prev; 1116 unsigned int poll_flags = 0; 1117 DEFINE_IO_COMP_BATCH(iob); 1118 int nr_events = 0; 1119 1120 /* 1121 * Only spin for completions if we don't have multiple devices hanging 1122 * off our complete list. 1123 */ 1124 if (ctx->poll_multi_queue || force_nonspin) 1125 poll_flags |= BLK_POLL_ONESHOT; 1126 1127 wq_list_for_each(pos, start, &ctx->iopoll_list) { 1128 struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list); 1129 struct file *file = req->file; 1130 int ret; 1131 1132 /* 1133 * Move completed and retryable entries to our local lists. 1134 * If we find a request that requires polling, break out 1135 * and complete those lists first, if we have entries there. 1136 */ 1137 if (READ_ONCE(req->iopoll_completed)) 1138 break; 1139 1140 if (req->opcode == IORING_OP_URING_CMD) { 1141 struct io_uring_cmd *ioucmd; 1142 1143 ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd); 1144 ret = file->f_op->uring_cmd_iopoll(ioucmd, &iob, 1145 poll_flags); 1146 } else { 1147 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 1148 1149 ret = file->f_op->iopoll(&rw->kiocb, &iob, poll_flags); 1150 } 1151 if (unlikely(ret < 0)) 1152 return ret; 1153 else if (ret) 1154 poll_flags |= BLK_POLL_ONESHOT; 1155 1156 /* iopoll may have completed current req */ 1157 if (!rq_list_empty(iob.req_list) || 1158 READ_ONCE(req->iopoll_completed)) 1159 break; 1160 } 1161 1162 if (!rq_list_empty(iob.req_list)) 1163 iob.complete(&iob); 1164 else if (!pos) 1165 return 0; 1166 1167 prev = start; 1168 wq_list_for_each_resume(pos, prev) { 1169 struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list); 1170 1171 /* order with io_complete_rw_iopoll(), e.g. ->result updates */ 1172 if (!smp_load_acquire(&req->iopoll_completed)) 1173 break; 1174 nr_events++; 1175 req->cqe.flags = io_put_kbuf(req, 0); 1176 } 1177 if (unlikely(!nr_events)) 1178 return 0; 1179 1180 pos = start ? start->next : ctx->iopoll_list.first; 1181 wq_list_cut(&ctx->iopoll_list, prev, start); 1182 1183 if (WARN_ON_ONCE(!wq_list_empty(&ctx->submit_state.compl_reqs))) 1184 return 0; 1185 ctx->submit_state.compl_reqs.first = pos; 1186 __io_submit_flush_completions(ctx); 1187 return nr_events; 1188 } 1189