1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/kernel.h> 3 #include <linux/errno.h> 4 #include <linux/fs.h> 5 #include <linux/file.h> 6 #include <linux/blk-mq.h> 7 #include <linux/mm.h> 8 #include <linux/slab.h> 9 #include <linux/fsnotify.h> 10 #include <linux/poll.h> 11 #include <linux/nospec.h> 12 #include <linux/compat.h> 13 #include <linux/io_uring/cmd.h> 14 #include <linux/indirect_call_wrapper.h> 15 16 #include <uapi/linux/io_uring.h> 17 18 #include "filetable.h" 19 #include "io_uring.h" 20 #include "opdef.h" 21 #include "kbuf.h" 22 #include "alloc_cache.h" 23 #include "rsrc.h" 24 #include "poll.h" 25 #include "rw.h" 26 27 static void io_complete_rw(struct kiocb *kiocb, long res); 28 static void io_complete_rw_iopoll(struct kiocb *kiocb, long res); 29 30 struct io_rw { 31 /* NOTE: kiocb has the file as the first member, so don't do it here */ 32 struct kiocb kiocb; 33 u64 addr; 34 u32 len; 35 rwf_t flags; 36 }; 37 38 static bool io_file_supports_nowait(struct io_kiocb *req, __poll_t mask) 39 { 40 /* If FMODE_NOWAIT is set for a file, we're golden */ 41 if (req->flags & REQ_F_SUPPORT_NOWAIT) 42 return true; 43 /* No FMODE_NOWAIT, if we can poll, check the status */ 44 if (io_file_can_poll(req)) { 45 struct poll_table_struct pt = { ._key = mask }; 46 47 return vfs_poll(req->file, &pt) & mask; 48 } 49 /* No FMODE_NOWAIT support, and file isn't pollable. Tough luck. */ 50 return false; 51 } 52 53 static int io_iov_compat_buffer_select_prep(struct io_rw *rw) 54 { 55 struct compat_iovec __user *uiov = u64_to_user_ptr(rw->addr); 56 struct compat_iovec iov; 57 58 if (copy_from_user(&iov, uiov, sizeof(iov))) 59 return -EFAULT; 60 rw->len = iov.iov_len; 61 return 0; 62 } 63 64 static int io_iov_buffer_select_prep(struct io_kiocb *req) 65 { 66 struct iovec __user *uiov; 67 struct iovec iov; 68 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 69 70 if (rw->len != 1) 71 return -EINVAL; 72 73 if (io_is_compat(req->ctx)) 74 return io_iov_compat_buffer_select_prep(rw); 75 76 uiov = u64_to_user_ptr(rw->addr); 77 if (copy_from_user(&iov, uiov, sizeof(*uiov))) 78 return -EFAULT; 79 rw->len = iov.iov_len; 80 return 0; 81 } 82 83 static int io_import_vec(int ddir, struct io_kiocb *req, 84 struct io_async_rw *io, 85 const struct iovec __user *uvec, 86 size_t uvec_segs) 87 { 88 int ret, nr_segs; 89 struct iovec *iov; 90 91 if (io->vec.iovec) { 92 nr_segs = io->vec.nr; 93 iov = io->vec.iovec; 94 } else { 95 nr_segs = 1; 96 iov = &io->fast_iov; 97 } 98 99 ret = __import_iovec(ddir, uvec, uvec_segs, nr_segs, &iov, &io->iter, 100 io_is_compat(req->ctx)); 101 if (unlikely(ret < 0)) 102 return ret; 103 if (iov) { 104 req->flags |= REQ_F_NEED_CLEANUP; 105 io_vec_reset_iovec(&io->vec, iov, io->iter.nr_segs); 106 } 107 return 0; 108 } 109 110 static int __io_import_rw_buffer(int ddir, struct io_kiocb *req, 111 struct io_async_rw *io, struct io_br_sel *sel, 112 unsigned int issue_flags) 113 { 114 const struct io_issue_def *def = &io_issue_defs[req->opcode]; 115 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 116 size_t sqe_len = rw->len; 117 118 sel->addr = u64_to_user_ptr(rw->addr); 119 if (def->vectored && !(req->flags & REQ_F_BUFFER_SELECT)) 120 return io_import_vec(ddir, req, io, sel->addr, sqe_len); 121 122 if (io_do_buffer_select(req)) { 123 *sel = io_buffer_select(req, &sqe_len, io->buf_group, issue_flags); 124 if (!sel->addr) 125 return -ENOBUFS; 126 rw->addr = (unsigned long) sel->addr; 127 rw->len = sqe_len; 128 } 129 return import_ubuf(ddir, sel->addr, sqe_len, &io->iter); 130 } 131 132 static inline int io_import_rw_buffer(int rw, struct io_kiocb *req, 133 struct io_async_rw *io, 134 struct io_br_sel *sel, 135 unsigned int issue_flags) 136 { 137 int ret; 138 139 ret = __io_import_rw_buffer(rw, req, io, sel, issue_flags); 140 if (unlikely(ret < 0)) 141 return ret; 142 143 iov_iter_save_state(&io->iter, &io->iter_state); 144 return 0; 145 } 146 147 static bool io_rw_recycle(struct io_kiocb *req, unsigned int issue_flags) 148 { 149 struct io_async_rw *rw = req->async_data; 150 151 if (unlikely(issue_flags & IO_URING_F_UNLOCKED)) 152 return false; 153 154 io_alloc_cache_vec_kasan(&rw->vec); 155 if (rw->vec.nr > IO_VEC_CACHE_SOFT_CAP) 156 io_vec_free(&rw->vec); 157 158 if (io_alloc_cache_put(&req->ctx->rw_cache, rw)) { 159 io_req_async_data_clear(req, 0); 160 return true; 161 } 162 return false; 163 } 164 165 static void io_req_rw_cleanup(struct io_kiocb *req, unsigned int issue_flags) 166 { 167 /* 168 * Disable quick recycling for anything that's gone through io-wq. 169 * In theory, this should be fine to cleanup. However, some read or 170 * write iter handling touches the iovec AFTER having called into the 171 * handler, eg to reexpand or revert. This means we can have: 172 * 173 * task io-wq 174 * issue 175 * punt to io-wq 176 * issue 177 * blkdev_write_iter() 178 * ->ki_complete() 179 * io_complete_rw() 180 * queue tw complete 181 * run tw 182 * req_rw_cleanup 183 * iov_iter_count() <- look at iov_iter again 184 * 185 * which can lead to a UAF. This is only possible for io-wq offload 186 * as the cleanup can run in parallel. As io-wq is not the fast path, 187 * just leave cleanup to the end. 188 * 189 * This is really a bug in the core code that does this, any issue 190 * path should assume that a successful (or -EIOCBQUEUED) return can 191 * mean that the underlying data can be gone at any time. But that 192 * should be fixed separately, and then this check could be killed. 193 */ 194 if (!(req->flags & (REQ_F_REISSUE | REQ_F_REFCOUNT))) { 195 req->flags &= ~REQ_F_NEED_CLEANUP; 196 if (!io_rw_recycle(req, issue_flags)) { 197 struct io_async_rw *rw = req->async_data; 198 199 io_vec_free(&rw->vec); 200 } 201 } 202 } 203 204 static int io_rw_alloc_async(struct io_kiocb *req) 205 { 206 struct io_ring_ctx *ctx = req->ctx; 207 struct io_async_rw *rw; 208 209 rw = io_uring_alloc_async_data(&ctx->rw_cache, req); 210 if (!rw) 211 return -ENOMEM; 212 if (rw->vec.iovec) 213 req->flags |= REQ_F_NEED_CLEANUP; 214 rw->bytes_done = 0; 215 return 0; 216 } 217 218 static inline void io_meta_save_state(struct io_async_rw *io) 219 { 220 io->meta_state.seed = io->meta.seed; 221 iov_iter_save_state(&io->meta.iter, &io->meta_state.iter_meta); 222 } 223 224 static inline void io_meta_restore(struct io_async_rw *io, struct kiocb *kiocb) 225 { 226 if (kiocb->ki_flags & IOCB_HAS_METADATA) { 227 io->meta.seed = io->meta_state.seed; 228 iov_iter_restore(&io->meta.iter, &io->meta_state.iter_meta); 229 } 230 } 231 232 static int io_prep_rw_pi(struct io_kiocb *req, struct io_rw *rw, int ddir, 233 u64 attr_ptr, u64 attr_type_mask) 234 { 235 struct io_uring_attr_pi pi_attr; 236 struct io_async_rw *io; 237 int ret; 238 239 if (copy_from_user(&pi_attr, u64_to_user_ptr(attr_ptr), 240 sizeof(pi_attr))) 241 return -EFAULT; 242 243 if (pi_attr.rsvd) 244 return -EINVAL; 245 246 io = req->async_data; 247 io->meta.flags = pi_attr.flags; 248 io->meta.app_tag = pi_attr.app_tag; 249 io->meta.seed = pi_attr.seed; 250 ret = import_ubuf(ddir, u64_to_user_ptr(pi_attr.addr), 251 pi_attr.len, &io->meta.iter); 252 if (unlikely(ret < 0)) 253 return ret; 254 req->flags |= REQ_F_HAS_METADATA; 255 io_meta_save_state(io); 256 return ret; 257 } 258 259 static int __io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, 260 int ddir) 261 { 262 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 263 struct io_async_rw *io; 264 unsigned ioprio; 265 u64 attr_type_mask; 266 int ret; 267 268 if (io_rw_alloc_async(req)) 269 return -ENOMEM; 270 io = req->async_data; 271 272 rw->kiocb.ki_pos = READ_ONCE(sqe->off); 273 /* used for fixed read/write too - just read unconditionally */ 274 req->buf_index = READ_ONCE(sqe->buf_index); 275 io->buf_group = req->buf_index; 276 277 ioprio = READ_ONCE(sqe->ioprio); 278 if (ioprio) { 279 ret = ioprio_check_cap(ioprio); 280 if (ret) 281 return ret; 282 283 rw->kiocb.ki_ioprio = ioprio; 284 } else { 285 rw->kiocb.ki_ioprio = get_current_ioprio(); 286 } 287 rw->kiocb.ki_flags = 0; 288 rw->kiocb.ki_write_stream = READ_ONCE(sqe->write_stream); 289 290 if (req->ctx->flags & IORING_SETUP_IOPOLL) 291 rw->kiocb.ki_complete = io_complete_rw_iopoll; 292 else 293 rw->kiocb.ki_complete = io_complete_rw; 294 295 rw->addr = READ_ONCE(sqe->addr); 296 rw->len = READ_ONCE(sqe->len); 297 rw->flags = (__force rwf_t) READ_ONCE(sqe->rw_flags); 298 299 attr_type_mask = READ_ONCE(sqe->attr_type_mask); 300 if (attr_type_mask) { 301 u64 attr_ptr; 302 303 /* only PI attribute is supported currently */ 304 if (attr_type_mask != IORING_RW_ATTR_FLAG_PI) 305 return -EINVAL; 306 307 attr_ptr = READ_ONCE(sqe->attr_ptr); 308 return io_prep_rw_pi(req, rw, ddir, attr_ptr, attr_type_mask); 309 } 310 return 0; 311 } 312 313 static int io_rw_do_import(struct io_kiocb *req, int ddir) 314 { 315 struct io_br_sel sel = { }; 316 317 if (io_do_buffer_select(req)) 318 return 0; 319 320 return io_import_rw_buffer(ddir, req, req->async_data, &sel, 0); 321 } 322 323 static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, 324 int ddir) 325 { 326 int ret; 327 328 ret = __io_prep_rw(req, sqe, ddir); 329 if (unlikely(ret)) 330 return ret; 331 332 return io_rw_do_import(req, ddir); 333 } 334 335 int io_prep_read(struct io_kiocb *req, const struct io_uring_sqe *sqe) 336 { 337 return io_prep_rw(req, sqe, ITER_DEST); 338 } 339 340 int io_prep_write(struct io_kiocb *req, const struct io_uring_sqe *sqe) 341 { 342 return io_prep_rw(req, sqe, ITER_SOURCE); 343 } 344 345 static int io_prep_rwv(struct io_kiocb *req, const struct io_uring_sqe *sqe, 346 int ddir) 347 { 348 int ret; 349 350 ret = io_prep_rw(req, sqe, ddir); 351 if (unlikely(ret)) 352 return ret; 353 if (!(req->flags & REQ_F_BUFFER_SELECT)) 354 return 0; 355 356 /* 357 * Have to do this validation here, as this is in io_read() rw->len 358 * might have changed due to buffer selection 359 */ 360 return io_iov_buffer_select_prep(req); 361 } 362 363 int io_prep_readv(struct io_kiocb *req, const struct io_uring_sqe *sqe) 364 { 365 return io_prep_rwv(req, sqe, ITER_DEST); 366 } 367 368 int io_prep_writev(struct io_kiocb *req, const struct io_uring_sqe *sqe) 369 { 370 return io_prep_rwv(req, sqe, ITER_SOURCE); 371 } 372 373 static int io_init_rw_fixed(struct io_kiocb *req, unsigned int issue_flags, 374 int ddir) 375 { 376 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 377 struct io_async_rw *io = req->async_data; 378 int ret; 379 380 if (io->bytes_done) 381 return 0; 382 383 ret = io_import_reg_buf(req, &io->iter, rw->addr, rw->len, ddir, 384 issue_flags); 385 iov_iter_save_state(&io->iter, &io->iter_state); 386 return ret; 387 } 388 389 int io_prep_read_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe) 390 { 391 return __io_prep_rw(req, sqe, ITER_DEST); 392 } 393 394 int io_prep_write_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe) 395 { 396 return __io_prep_rw(req, sqe, ITER_SOURCE); 397 } 398 399 static int io_rw_import_reg_vec(struct io_kiocb *req, 400 struct io_async_rw *io, 401 int ddir, unsigned int issue_flags) 402 { 403 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 404 unsigned uvec_segs = rw->len; 405 int ret; 406 407 ret = io_import_reg_vec(ddir, &io->iter, req, &io->vec, 408 uvec_segs, issue_flags); 409 if (unlikely(ret)) 410 return ret; 411 iov_iter_save_state(&io->iter, &io->iter_state); 412 req->flags &= ~REQ_F_IMPORT_BUFFER; 413 return 0; 414 } 415 416 static int io_rw_prep_reg_vec(struct io_kiocb *req) 417 { 418 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 419 struct io_async_rw *io = req->async_data; 420 const struct iovec __user *uvec; 421 422 uvec = u64_to_user_ptr(rw->addr); 423 return io_prep_reg_iovec(req, &io->vec, uvec, rw->len); 424 } 425 426 int io_prep_readv_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe) 427 { 428 int ret; 429 430 ret = __io_prep_rw(req, sqe, ITER_DEST); 431 if (unlikely(ret)) 432 return ret; 433 return io_rw_prep_reg_vec(req); 434 } 435 436 int io_prep_writev_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe) 437 { 438 int ret; 439 440 ret = __io_prep_rw(req, sqe, ITER_SOURCE); 441 if (unlikely(ret)) 442 return ret; 443 return io_rw_prep_reg_vec(req); 444 } 445 446 /* 447 * Multishot read is prepared just like a normal read/write request, only 448 * difference is that we set the MULTISHOT flag. 449 */ 450 int io_read_mshot_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 451 { 452 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 453 int ret; 454 455 /* must be used with provided buffers */ 456 if (!(req->flags & REQ_F_BUFFER_SELECT)) 457 return -EINVAL; 458 459 ret = __io_prep_rw(req, sqe, ITER_DEST); 460 if (unlikely(ret)) 461 return ret; 462 463 if (rw->addr || rw->len) 464 return -EINVAL; 465 466 req->flags |= REQ_F_APOLL_MULTISHOT; 467 return 0; 468 } 469 470 void io_readv_writev_cleanup(struct io_kiocb *req) 471 { 472 struct io_async_rw *rw = req->async_data; 473 474 lockdep_assert_held(&req->ctx->uring_lock); 475 io_vec_free(&rw->vec); 476 io_rw_recycle(req, 0); 477 } 478 479 static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req) 480 { 481 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 482 483 if (rw->kiocb.ki_pos != -1) 484 return &rw->kiocb.ki_pos; 485 486 if (!(req->file->f_mode & FMODE_STREAM)) { 487 req->flags |= REQ_F_CUR_POS; 488 rw->kiocb.ki_pos = req->file->f_pos; 489 return &rw->kiocb.ki_pos; 490 } 491 492 rw->kiocb.ki_pos = 0; 493 return NULL; 494 } 495 496 static bool io_rw_should_reissue(struct io_kiocb *req) 497 { 498 #ifdef CONFIG_BLOCK 499 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 500 umode_t mode = file_inode(req->file)->i_mode; 501 struct io_async_rw *io = req->async_data; 502 struct io_ring_ctx *ctx = req->ctx; 503 504 if (!S_ISBLK(mode) && !S_ISREG(mode)) 505 return false; 506 if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() && 507 !(ctx->flags & IORING_SETUP_IOPOLL))) 508 return false; 509 /* 510 * If ref is dying, we might be running poll reap from the exit work. 511 * Don't attempt to reissue from that path, just let it fail with 512 * -EAGAIN. 513 */ 514 if (percpu_ref_is_dying(&ctx->refs)) 515 return false; 516 517 io_meta_restore(io, &rw->kiocb); 518 iov_iter_restore(&io->iter, &io->iter_state); 519 return true; 520 #else 521 return false; 522 #endif 523 } 524 525 static void io_req_end_write(struct io_kiocb *req) 526 { 527 if (req->flags & REQ_F_ISREG) { 528 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 529 530 kiocb_end_write(&rw->kiocb); 531 } 532 } 533 534 /* 535 * Trigger the notifications after having done some IO, and finish the write 536 * accounting, if any. 537 */ 538 static void io_req_io_end(struct io_kiocb *req) 539 { 540 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 541 542 if (rw->kiocb.ki_flags & IOCB_WRITE) { 543 io_req_end_write(req); 544 fsnotify_modify(req->file); 545 } else { 546 fsnotify_access(req->file); 547 } 548 } 549 550 static void __io_complete_rw_common(struct io_kiocb *req, long res) 551 { 552 if (res == req->cqe.res) 553 return; 554 if ((res == -EOPNOTSUPP || res == -EAGAIN) && io_rw_should_reissue(req)) { 555 req->flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE; 556 } else { 557 req_set_fail(req); 558 req->cqe.res = res; 559 } 560 } 561 562 static inline int io_fixup_rw_res(struct io_kiocb *req, long res) 563 { 564 struct io_async_rw *io = req->async_data; 565 566 /* add previously done IO, if any */ 567 if (req_has_async_data(req) && io->bytes_done > 0) { 568 if (res < 0) 569 res = io->bytes_done; 570 else 571 res += io->bytes_done; 572 } 573 return res; 574 } 575 576 void io_req_rw_complete(struct io_tw_req tw_req, io_tw_token_t tw) 577 { 578 struct io_kiocb *req = tw_req.req; 579 580 io_req_io_end(req); 581 582 if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) 583 req->cqe.flags |= io_put_kbuf(req, req->cqe.res, NULL); 584 585 io_req_rw_cleanup(req, 0); 586 io_req_task_complete(tw_req, tw); 587 } 588 589 static void io_complete_rw(struct kiocb *kiocb, long res) 590 { 591 struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb); 592 struct io_kiocb *req = cmd_to_io_kiocb(rw); 593 594 __io_complete_rw_common(req, res); 595 io_req_set_res(req, io_fixup_rw_res(req, res), 0); 596 req->io_task_work.func = io_req_rw_complete; 597 __io_req_task_work_add(req, IOU_F_TWQ_LAZY_WAKE); 598 } 599 600 static void io_complete_rw_iopoll(struct kiocb *kiocb, long res) 601 { 602 struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb); 603 struct io_kiocb *req = cmd_to_io_kiocb(rw); 604 605 if (kiocb->ki_flags & IOCB_WRITE) 606 io_req_end_write(req); 607 if (unlikely(res != req->cqe.res)) { 608 if (res == -EAGAIN && io_rw_should_reissue(req)) 609 req->flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE; 610 else 611 req->cqe.res = res; 612 } 613 614 /* order with io_iopoll_complete() checking ->iopoll_completed */ 615 smp_store_release(&req->iopoll_completed, 1); 616 } 617 618 static inline void io_rw_done(struct io_kiocb *req, ssize_t ret) 619 { 620 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 621 622 /* IO was queued async, completion will happen later */ 623 if (ret == -EIOCBQUEUED) 624 return; 625 626 /* transform internal restart error codes */ 627 if (unlikely(ret < 0)) { 628 switch (ret) { 629 case -ERESTARTSYS: 630 case -ERESTARTNOINTR: 631 case -ERESTARTNOHAND: 632 case -ERESTART_RESTARTBLOCK: 633 /* 634 * We can't just restart the syscall, since previously 635 * submitted sqes may already be in progress. Just fail 636 * this IO with EINTR. 637 */ 638 ret = -EINTR; 639 break; 640 } 641 } 642 643 if (req->ctx->flags & IORING_SETUP_IOPOLL) 644 io_complete_rw_iopoll(&rw->kiocb, ret); 645 else 646 io_complete_rw(&rw->kiocb, ret); 647 } 648 649 static int kiocb_done(struct io_kiocb *req, ssize_t ret, 650 struct io_br_sel *sel, unsigned int issue_flags) 651 { 652 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 653 unsigned final_ret = io_fixup_rw_res(req, ret); 654 655 if (ret >= 0 && req->flags & REQ_F_CUR_POS) 656 req->file->f_pos = rw->kiocb.ki_pos; 657 if (ret >= 0 && !(req->ctx->flags & IORING_SETUP_IOPOLL)) { 658 u32 cflags = 0; 659 660 __io_complete_rw_common(req, ret); 661 /* 662 * Safe to call io_end from here as we're inline 663 * from the submission path. 664 */ 665 io_req_io_end(req); 666 if (sel) 667 cflags = io_put_kbuf(req, ret, sel->buf_list); 668 io_req_set_res(req, final_ret, cflags); 669 io_req_rw_cleanup(req, issue_flags); 670 return IOU_COMPLETE; 671 } else { 672 io_rw_done(req, ret); 673 } 674 675 return IOU_ISSUE_SKIP_COMPLETE; 676 } 677 678 static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb) 679 { 680 return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos; 681 } 682 683 /* 684 * For files that don't have ->read_iter() and ->write_iter(), handle them 685 * by looping over ->read() or ->write() manually. 686 */ 687 static ssize_t loop_rw_iter(int ddir, struct io_rw *rw, struct iov_iter *iter) 688 { 689 struct io_kiocb *req = cmd_to_io_kiocb(rw); 690 struct kiocb *kiocb = &rw->kiocb; 691 struct file *file = kiocb->ki_filp; 692 ssize_t ret = 0; 693 loff_t *ppos; 694 695 /* 696 * Don't support polled IO through this interface, and we can't 697 * support non-blocking either. For the latter, this just causes 698 * the kiocb to be handled from an async context. 699 */ 700 if (kiocb->ki_flags & IOCB_HIPRI) 701 return -EOPNOTSUPP; 702 if ((kiocb->ki_flags & IOCB_NOWAIT) && 703 !(kiocb->ki_filp->f_flags & O_NONBLOCK)) 704 return -EAGAIN; 705 if ((req->flags & REQ_F_BUF_NODE) && 706 (req->buf_node->buf->flags & IO_REGBUF_F_KBUF)) 707 return -EFAULT; 708 709 ppos = io_kiocb_ppos(kiocb); 710 711 while (iov_iter_count(iter)) { 712 void __user *addr; 713 size_t len; 714 ssize_t nr; 715 716 if (iter_is_ubuf(iter)) { 717 addr = iter->ubuf + iter->iov_offset; 718 len = iov_iter_count(iter); 719 } else if (!iov_iter_is_bvec(iter)) { 720 addr = iter_iov_addr(iter); 721 len = iter_iov_len(iter); 722 } else { 723 addr = u64_to_user_ptr(rw->addr); 724 len = rw->len; 725 } 726 727 if (ddir == READ) 728 nr = file->f_op->read(file, addr, len, ppos); 729 else 730 nr = file->f_op->write(file, addr, len, ppos); 731 732 if (nr < 0) { 733 if (!ret) 734 ret = nr; 735 break; 736 } 737 ret += nr; 738 if (!iov_iter_is_bvec(iter)) { 739 iov_iter_advance(iter, nr); 740 } else { 741 rw->addr += nr; 742 rw->len -= nr; 743 if (!rw->len) 744 break; 745 } 746 if (nr != len) 747 break; 748 } 749 750 return ret; 751 } 752 753 /* 754 * This is our waitqueue callback handler, registered through __folio_lock_async() 755 * when we initially tried to do the IO with the iocb armed our waitqueue. 756 * This gets called when the page is unlocked, and we generally expect that to 757 * happen when the page IO is completed and the page is now uptodate. This will 758 * queue a task_work based retry of the operation, attempting to copy the data 759 * again. If the latter fails because the page was NOT uptodate, then we will 760 * do a thread based blocking retry of the operation. That's the unexpected 761 * slow path. 762 */ 763 static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode, 764 int sync, void *arg) 765 { 766 struct wait_page_queue *wpq; 767 struct io_kiocb *req = wait->private; 768 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 769 struct wait_page_key *key = arg; 770 771 wpq = container_of(wait, struct wait_page_queue, wait); 772 773 if (!wake_page_match(wpq, key)) 774 return 0; 775 776 rw->kiocb.ki_flags &= ~IOCB_WAITQ; 777 list_del_init(&wait->entry); 778 io_req_task_queue(req); 779 return 1; 780 } 781 782 /* 783 * This controls whether a given IO request should be armed for async page 784 * based retry. If we return false here, the request is handed to the async 785 * worker threads for retry. If we're doing buffered reads on a regular file, 786 * we prepare a private wait_page_queue entry and retry the operation. This 787 * will either succeed because the page is now uptodate and unlocked, or it 788 * will register a callback when the page is unlocked at IO completion. Through 789 * that callback, io_uring uses task_work to setup a retry of the operation. 790 * That retry will attempt the buffered read again. The retry will generally 791 * succeed, or in rare cases where it fails, we then fall back to using the 792 * async worker threads for a blocking retry. 793 */ 794 static bool io_rw_should_retry(struct io_kiocb *req) 795 { 796 struct io_async_rw *io = req->async_data; 797 struct wait_page_queue *wait = &io->wpq; 798 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 799 struct kiocb *kiocb = &rw->kiocb; 800 801 /* 802 * Never retry for NOWAIT or a request with metadata, we just complete 803 * with -EAGAIN. 804 */ 805 if (req->flags & (REQ_F_NOWAIT | REQ_F_HAS_METADATA)) 806 return false; 807 808 /* Only for buffered IO */ 809 if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI)) 810 return false; 811 812 /* 813 * just use poll if we can, and don't attempt if the fs doesn't 814 * support callback based unlocks 815 */ 816 if (io_file_can_poll(req) || 817 !(req->file->f_op->fop_flags & FOP_BUFFER_RASYNC)) 818 return false; 819 820 wait->wait.func = io_async_buf_func; 821 wait->wait.private = req; 822 wait->wait.flags = 0; 823 INIT_LIST_HEAD(&wait->wait.entry); 824 kiocb->ki_flags |= IOCB_WAITQ; 825 kiocb->ki_flags &= ~IOCB_NOWAIT; 826 kiocb->ki_waitq = wait; 827 return true; 828 } 829 830 static inline int io_iter_do_read(struct io_rw *rw, struct iov_iter *iter) 831 { 832 struct file *file = rw->kiocb.ki_filp; 833 834 if (likely(file->f_op->read_iter)) 835 return file->f_op->read_iter(&rw->kiocb, iter); 836 else if (file->f_op->read) 837 return loop_rw_iter(READ, rw, iter); 838 else 839 return -EINVAL; 840 } 841 842 static bool need_complete_io(struct io_kiocb *req) 843 { 844 return req->flags & REQ_F_ISREG || 845 S_ISBLK(file_inode(req->file)->i_mode); 846 } 847 848 static int io_rw_init_file(struct io_kiocb *req, fmode_t mode, int rw_type) 849 { 850 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 851 struct kiocb *kiocb = &rw->kiocb; 852 struct io_ring_ctx *ctx = req->ctx; 853 struct file *file = req->file; 854 int ret; 855 856 if (unlikely(!(file->f_mode & mode))) 857 return -EBADF; 858 859 if (!(req->flags & REQ_F_FIXED_FILE)) 860 req->flags |= io_file_get_flags(file); 861 862 kiocb->ki_flags = file->f_iocb_flags; 863 ret = kiocb_set_rw_flags(kiocb, rw->flags, rw_type); 864 if (unlikely(ret)) 865 return ret; 866 867 /* 868 * If the file is marked O_NONBLOCK, still allow retry for it if it 869 * supports async. Otherwise it's impossible to use O_NONBLOCK files 870 * reliably. If not, or it IOCB_NOWAIT is set, don't retry. 871 */ 872 if (kiocb->ki_flags & IOCB_NOWAIT || 873 ((file->f_flags & O_NONBLOCK && !(req->flags & REQ_F_SUPPORT_NOWAIT)))) 874 req->flags |= REQ_F_NOWAIT; 875 876 if (ctx->flags & IORING_SETUP_IOPOLL) { 877 if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll) 878 return -EOPNOTSUPP; 879 kiocb->private = NULL; 880 kiocb->ki_flags |= IOCB_HIPRI; 881 req->iopoll_completed = 0; 882 if (ctx->flags & IORING_SETUP_HYBRID_IOPOLL) { 883 /* make sure every req only blocks once*/ 884 req->flags &= ~REQ_F_IOPOLL_STATE; 885 req->iopoll_start = ktime_get_ns(); 886 } 887 } else { 888 if (kiocb->ki_flags & IOCB_HIPRI) 889 return -EINVAL; 890 } 891 892 if (req->flags & REQ_F_HAS_METADATA) { 893 struct io_async_rw *io = req->async_data; 894 895 if (!(file->f_mode & FMODE_HAS_METADATA)) 896 return -EINVAL; 897 898 /* 899 * We have a union of meta fields with wpq used for buffered-io 900 * in io_async_rw, so fail it here. 901 */ 902 if (!(req->file->f_flags & O_DIRECT)) 903 return -EOPNOTSUPP; 904 kiocb->ki_flags |= IOCB_HAS_METADATA; 905 kiocb->private = &io->meta; 906 } 907 908 return 0; 909 } 910 911 static int __io_read(struct io_kiocb *req, struct io_br_sel *sel, 912 unsigned int issue_flags) 913 { 914 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; 915 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 916 struct io_async_rw *io = req->async_data; 917 struct kiocb *kiocb = &rw->kiocb; 918 ssize_t ret; 919 loff_t *ppos; 920 921 if (req->flags & REQ_F_IMPORT_BUFFER) { 922 ret = io_rw_import_reg_vec(req, io, ITER_DEST, issue_flags); 923 if (unlikely(ret)) 924 return ret; 925 } else if (io_do_buffer_select(req)) { 926 ret = io_import_rw_buffer(ITER_DEST, req, io, sel, issue_flags); 927 if (unlikely(ret < 0)) 928 return ret; 929 } 930 ret = io_rw_init_file(req, FMODE_READ, READ); 931 if (unlikely(ret)) 932 return ret; 933 req->cqe.res = iov_iter_count(&io->iter); 934 935 if (force_nonblock) { 936 /* If the file doesn't support async, just async punt */ 937 if (unlikely(!io_file_supports_nowait(req, EPOLLIN))) 938 return -EAGAIN; 939 kiocb->ki_flags |= IOCB_NOWAIT; 940 } else { 941 /* Ensure we clear previously set non-block flag */ 942 kiocb->ki_flags &= ~IOCB_NOWAIT; 943 } 944 945 ppos = io_kiocb_update_pos(req); 946 947 ret = rw_verify_area(READ, req->file, ppos, req->cqe.res); 948 if (unlikely(ret)) 949 return ret; 950 951 ret = io_iter_do_read(rw, &io->iter); 952 953 /* 954 * Some file systems like to return -EOPNOTSUPP for an IOCB_NOWAIT 955 * issue, even though they should be returning -EAGAIN. To be safe, 956 * retry from blocking context for either. 957 */ 958 if (ret == -EOPNOTSUPP && force_nonblock) 959 ret = -EAGAIN; 960 961 if (ret == -EAGAIN) { 962 /* If we can poll, just do that. */ 963 if (io_file_can_poll(req)) 964 return -EAGAIN; 965 /* IOPOLL retry should happen for io-wq threads */ 966 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL)) 967 goto done; 968 /* no retry on NONBLOCK nor RWF_NOWAIT */ 969 if (req->flags & REQ_F_NOWAIT) 970 goto done; 971 ret = 0; 972 } else if (ret == -EIOCBQUEUED) { 973 return IOU_ISSUE_SKIP_COMPLETE; 974 } else if (ret == req->cqe.res || ret <= 0 || !force_nonblock || 975 (req->flags & REQ_F_NOWAIT) || !need_complete_io(req) || 976 (issue_flags & IO_URING_F_MULTISHOT)) { 977 /* read all, failed, already did sync or don't want to retry */ 978 goto done; 979 } 980 981 /* 982 * Don't depend on the iter state matching what was consumed, or being 983 * untouched in case of error. Restore it and we'll advance it 984 * manually if we need to. 985 */ 986 iov_iter_restore(&io->iter, &io->iter_state); 987 io_meta_restore(io, kiocb); 988 989 do { 990 /* 991 * We end up here because of a partial read, either from 992 * above or inside this loop. Advance the iter by the bytes 993 * that were consumed. 994 */ 995 iov_iter_advance(&io->iter, ret); 996 if (!iov_iter_count(&io->iter)) 997 break; 998 io->bytes_done += ret; 999 iov_iter_save_state(&io->iter, &io->iter_state); 1000 1001 /* if we can retry, do so with the callbacks armed */ 1002 if (!io_rw_should_retry(req)) { 1003 kiocb->ki_flags &= ~IOCB_WAITQ; 1004 return -EAGAIN; 1005 } 1006 1007 req->cqe.res = iov_iter_count(&io->iter); 1008 /* 1009 * Now retry read with the IOCB_WAITQ parts set in the iocb. If 1010 * we get -EIOCBQUEUED, then we'll get a notification when the 1011 * desired page gets unlocked. We can also get a partial read 1012 * here, and if we do, then just retry at the new offset. 1013 */ 1014 ret = io_iter_do_read(rw, &io->iter); 1015 if (ret == -EIOCBQUEUED) 1016 return IOU_ISSUE_SKIP_COMPLETE; 1017 /* we got some bytes, but not all. retry. */ 1018 kiocb->ki_flags &= ~IOCB_WAITQ; 1019 iov_iter_restore(&io->iter, &io->iter_state); 1020 } while (ret > 0); 1021 done: 1022 /* it's faster to check here than delegate to kfree */ 1023 return ret; 1024 } 1025 1026 int io_read(struct io_kiocb *req, unsigned int issue_flags) 1027 { 1028 struct io_br_sel sel = { }; 1029 int ret; 1030 1031 ret = __io_read(req, &sel, issue_flags); 1032 if (ret >= 0) 1033 return kiocb_done(req, ret, &sel, issue_flags); 1034 1035 if (req->flags & REQ_F_BUFFERS_COMMIT) 1036 io_kbuf_recycle(req, sel.buf_list, issue_flags); 1037 return ret; 1038 } 1039 1040 int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags) 1041 { 1042 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 1043 struct io_br_sel sel = { }; 1044 unsigned int cflags = 0; 1045 int ret; 1046 1047 /* 1048 * Multishot MUST be used on a pollable file 1049 */ 1050 if (!io_file_can_poll(req)) 1051 return -EBADFD; 1052 1053 /* make it sync, multishot doesn't support async execution */ 1054 rw->kiocb.ki_complete = NULL; 1055 ret = __io_read(req, &sel, issue_flags); 1056 1057 /* 1058 * If we get -EAGAIN, recycle our buffer and just let normal poll 1059 * handling arm it. 1060 */ 1061 if (ret == -EAGAIN) { 1062 /* 1063 * Reset rw->len to 0 again to avoid clamping future mshot 1064 * reads, in case the buffer size varies. 1065 */ 1066 if (io_kbuf_recycle(req, sel.buf_list, issue_flags)) 1067 rw->len = 0; 1068 return IOU_RETRY; 1069 } else if (ret <= 0) { 1070 io_kbuf_recycle(req, sel.buf_list, issue_flags); 1071 if (ret < 0) 1072 req_set_fail(req); 1073 } else if (!(req->flags & REQ_F_APOLL_MULTISHOT)) { 1074 cflags = io_put_kbuf(req, ret, sel.buf_list); 1075 } else { 1076 /* 1077 * Any successful return value will keep the multishot read 1078 * armed, if it's still set. Put our buffer and post a CQE. If 1079 * we fail to post a CQE, or multishot is no longer set, then 1080 * jump to the termination path. This request is then done. 1081 */ 1082 cflags = io_put_kbuf(req, ret, sel.buf_list); 1083 rw->len = 0; /* similarly to above, reset len to 0 */ 1084 1085 if (io_req_post_cqe(req, ret, cflags | IORING_CQE_F_MORE)) { 1086 if (issue_flags & IO_URING_F_MULTISHOT) 1087 /* 1088 * Force retry, as we might have more data to 1089 * be read and otherwise it won't get retried 1090 * until (if ever) another poll is triggered. 1091 */ 1092 io_poll_multishot_retry(req); 1093 1094 return IOU_RETRY; 1095 } 1096 } 1097 1098 /* 1099 * Either an error, or we've hit overflow posting the CQE. For any 1100 * multishot request, hitting overflow will terminate it. 1101 */ 1102 io_req_set_res(req, ret, cflags); 1103 io_req_rw_cleanup(req, issue_flags); 1104 return IOU_COMPLETE; 1105 } 1106 1107 static bool io_kiocb_start_write(struct io_kiocb *req, struct kiocb *kiocb) 1108 { 1109 struct inode *inode; 1110 bool ret; 1111 1112 if (!(req->flags & REQ_F_ISREG)) 1113 return true; 1114 if (!(kiocb->ki_flags & IOCB_NOWAIT)) { 1115 kiocb_start_write(kiocb); 1116 return true; 1117 } 1118 1119 inode = file_inode(kiocb->ki_filp); 1120 ret = sb_start_write_trylock(inode->i_sb); 1121 if (ret) 1122 __sb_writers_release(inode->i_sb, SB_FREEZE_WRITE); 1123 return ret; 1124 } 1125 1126 int io_write(struct io_kiocb *req, unsigned int issue_flags) 1127 { 1128 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; 1129 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 1130 struct io_async_rw *io = req->async_data; 1131 struct kiocb *kiocb = &rw->kiocb; 1132 ssize_t ret, ret2; 1133 loff_t *ppos; 1134 1135 if (req->flags & REQ_F_IMPORT_BUFFER) { 1136 ret = io_rw_import_reg_vec(req, io, ITER_SOURCE, issue_flags); 1137 if (unlikely(ret)) 1138 return ret; 1139 } 1140 1141 ret = io_rw_init_file(req, FMODE_WRITE, WRITE); 1142 if (unlikely(ret)) 1143 return ret; 1144 req->cqe.res = iov_iter_count(&io->iter); 1145 1146 if (force_nonblock) { 1147 /* If the file doesn't support async, just async punt */ 1148 if (unlikely(!io_file_supports_nowait(req, EPOLLOUT))) 1149 goto ret_eagain; 1150 1151 /* Check if we can support NOWAIT. */ 1152 if (!(kiocb->ki_flags & IOCB_DIRECT) && 1153 !(req->file->f_op->fop_flags & FOP_BUFFER_WASYNC) && 1154 (req->flags & REQ_F_ISREG)) 1155 goto ret_eagain; 1156 1157 kiocb->ki_flags |= IOCB_NOWAIT; 1158 } else { 1159 /* Ensure we clear previously set non-block flag */ 1160 kiocb->ki_flags &= ~IOCB_NOWAIT; 1161 } 1162 1163 ppos = io_kiocb_update_pos(req); 1164 1165 ret = rw_verify_area(WRITE, req->file, ppos, req->cqe.res); 1166 if (unlikely(ret)) 1167 return ret; 1168 1169 if (unlikely(!io_kiocb_start_write(req, kiocb))) 1170 return -EAGAIN; 1171 kiocb->ki_flags |= IOCB_WRITE; 1172 1173 if (likely(req->file->f_op->write_iter)) 1174 ret2 = req->file->f_op->write_iter(kiocb, &io->iter); 1175 else if (req->file->f_op->write) 1176 ret2 = loop_rw_iter(WRITE, rw, &io->iter); 1177 else 1178 ret2 = -EINVAL; 1179 1180 /* 1181 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just 1182 * retry them without IOCB_NOWAIT. 1183 */ 1184 if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT)) 1185 ret2 = -EAGAIN; 1186 /* no retry on NONBLOCK nor RWF_NOWAIT */ 1187 if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT)) 1188 goto done; 1189 if (!force_nonblock || ret2 != -EAGAIN) { 1190 /* IOPOLL retry should happen for io-wq threads */ 1191 if (ret2 == -EAGAIN && (req->ctx->flags & IORING_SETUP_IOPOLL)) 1192 goto ret_eagain; 1193 1194 if (ret2 != req->cqe.res && ret2 >= 0 && need_complete_io(req)) { 1195 trace_io_uring_short_write(req->ctx, kiocb->ki_pos - ret2, 1196 req->cqe.res, ret2); 1197 1198 /* This is a partial write. The file pos has already been 1199 * updated, setup the async struct to complete the request 1200 * in the worker. Also update bytes_done to account for 1201 * the bytes already written. 1202 */ 1203 iov_iter_save_state(&io->iter, &io->iter_state); 1204 io->bytes_done += ret2; 1205 1206 if (kiocb->ki_flags & IOCB_WRITE) 1207 io_req_end_write(req); 1208 return -EAGAIN; 1209 } 1210 done: 1211 return kiocb_done(req, ret2, NULL, issue_flags); 1212 } else { 1213 ret_eagain: 1214 iov_iter_restore(&io->iter, &io->iter_state); 1215 io_meta_restore(io, kiocb); 1216 if (kiocb->ki_flags & IOCB_WRITE) 1217 io_req_end_write(req); 1218 return -EAGAIN; 1219 } 1220 } 1221 1222 int io_read_fixed(struct io_kiocb *req, unsigned int issue_flags) 1223 { 1224 int ret; 1225 1226 ret = io_init_rw_fixed(req, issue_flags, ITER_DEST); 1227 if (unlikely(ret)) 1228 return ret; 1229 1230 return io_read(req, issue_flags); 1231 } 1232 1233 int io_write_fixed(struct io_kiocb *req, unsigned int issue_flags) 1234 { 1235 int ret; 1236 1237 ret = io_init_rw_fixed(req, issue_flags, ITER_SOURCE); 1238 if (unlikely(ret)) 1239 return ret; 1240 1241 return io_write(req, issue_flags); 1242 } 1243 1244 void io_rw_fail(struct io_kiocb *req) 1245 { 1246 int res; 1247 1248 res = io_fixup_rw_res(req, req->cqe.res); 1249 io_req_set_res(req, res, req->cqe.flags); 1250 } 1251 1252 static int io_uring_classic_poll(struct io_kiocb *req, struct io_comp_batch *iob, 1253 unsigned int poll_flags) 1254 { 1255 struct file *file = req->file; 1256 1257 if (req->opcode == IORING_OP_URING_CMD) { 1258 struct io_uring_cmd *ioucmd; 1259 1260 ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd); 1261 return file->f_op->uring_cmd_iopoll(ioucmd, iob, poll_flags); 1262 } else { 1263 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 1264 1265 return file->f_op->iopoll(&rw->kiocb, iob, poll_flags); 1266 } 1267 } 1268 1269 static u64 io_hybrid_iopoll_delay(struct io_ring_ctx *ctx, struct io_kiocb *req) 1270 { 1271 struct hrtimer_sleeper timer; 1272 enum hrtimer_mode mode; 1273 ktime_t kt; 1274 u64 sleep_time; 1275 1276 if (req->flags & REQ_F_IOPOLL_STATE) 1277 return 0; 1278 1279 if (ctx->hybrid_poll_time == LLONG_MAX) 1280 return 0; 1281 1282 /* Using half the running time to do schedule */ 1283 sleep_time = ctx->hybrid_poll_time / 2; 1284 1285 kt = ktime_set(0, sleep_time); 1286 req->flags |= REQ_F_IOPOLL_STATE; 1287 1288 mode = HRTIMER_MODE_REL; 1289 hrtimer_setup_sleeper_on_stack(&timer, CLOCK_MONOTONIC, mode); 1290 hrtimer_set_expires(&timer.timer, kt); 1291 set_current_state(TASK_INTERRUPTIBLE); 1292 hrtimer_sleeper_start_expires(&timer, mode); 1293 1294 if (timer.task) 1295 io_schedule(); 1296 1297 hrtimer_cancel(&timer.timer); 1298 __set_current_state(TASK_RUNNING); 1299 destroy_hrtimer_on_stack(&timer.timer); 1300 return sleep_time; 1301 } 1302 1303 static int io_uring_hybrid_poll(struct io_kiocb *req, 1304 struct io_comp_batch *iob, unsigned int poll_flags) 1305 { 1306 struct io_ring_ctx *ctx = req->ctx; 1307 u64 runtime, sleep_time, iopoll_start; 1308 int ret; 1309 1310 iopoll_start = READ_ONCE(req->iopoll_start); 1311 sleep_time = io_hybrid_iopoll_delay(ctx, req); 1312 ret = io_uring_classic_poll(req, iob, poll_flags); 1313 runtime = ktime_get_ns() - iopoll_start - sleep_time; 1314 1315 /* 1316 * Use minimum sleep time if we're polling devices with different 1317 * latencies. We could get more completions from the faster ones. 1318 */ 1319 if (ctx->hybrid_poll_time > runtime) 1320 ctx->hybrid_poll_time = runtime; 1321 1322 return ret; 1323 } 1324 1325 int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) 1326 { 1327 unsigned int poll_flags = 0; 1328 DEFINE_IO_COMP_BATCH(iob); 1329 struct io_kiocb *req, *tmp; 1330 int nr_events = 0; 1331 1332 /* 1333 * Store the polling io_ring_ctx so drivers can detect if they're 1334 * completing a request in the same ring context that's polling. 1335 */ 1336 iob.poll_ctx = ctx; 1337 1338 /* 1339 * Only spin for completions if we don't have multiple devices hanging 1340 * off our complete list. 1341 */ 1342 if (ctx->poll_multi_queue || force_nonspin) 1343 poll_flags |= BLK_POLL_ONESHOT; 1344 1345 list_for_each_entry(req, &ctx->iopoll_list, iopoll_node) { 1346 int ret; 1347 1348 /* 1349 * Move completed and retryable entries to our local lists. 1350 * If we find a request that requires polling, break out 1351 * and complete those lists first, if we have entries there. 1352 */ 1353 if (READ_ONCE(req->iopoll_completed)) 1354 break; 1355 1356 if (ctx->flags & IORING_SETUP_HYBRID_IOPOLL) 1357 ret = io_uring_hybrid_poll(req, &iob, poll_flags); 1358 else 1359 ret = io_uring_classic_poll(req, &iob, poll_flags); 1360 1361 if (unlikely(ret < 0)) 1362 return ret; 1363 else if (ret) 1364 poll_flags |= BLK_POLL_ONESHOT; 1365 1366 /* iopoll may have completed current req */ 1367 if (!rq_list_empty(&iob.req_list) || 1368 READ_ONCE(req->iopoll_completed)) 1369 break; 1370 } 1371 1372 if (!rq_list_empty(&iob.req_list)) 1373 iob.complete(&iob); 1374 1375 list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, iopoll_node) { 1376 /* order with io_complete_rw_iopoll(), e.g. ->result updates */ 1377 if (!smp_load_acquire(&req->iopoll_completed)) 1378 continue; 1379 list_del(&req->iopoll_node); 1380 wq_list_add_tail(&req->comp_list, &ctx->submit_state.compl_reqs); 1381 nr_events++; 1382 req->cqe.flags = io_put_kbuf(req, req->cqe.res, NULL); 1383 if (req->opcode != IORING_OP_URING_CMD) 1384 io_req_rw_cleanup(req, 0); 1385 } 1386 if (nr_events) 1387 __io_submit_flush_completions(ctx); 1388 return nr_events; 1389 } 1390 1391 void io_rw_cache_free(const void *entry) 1392 { 1393 struct io_async_rw *rw = (struct io_async_rw *) entry; 1394 1395 io_vec_free(&rw->vec); 1396 kfree(rw); 1397 } 1398