1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/kernel.h> 3 #include <linux/errno.h> 4 #include <linux/fs.h> 5 #include <linux/file.h> 6 #include <linux/blk-mq.h> 7 #include <linux/mm.h> 8 #include <linux/slab.h> 9 #include <linux/fsnotify.h> 10 #include <linux/poll.h> 11 #include <linux/nospec.h> 12 #include <linux/compat.h> 13 #include <linux/io_uring/cmd.h> 14 #include <linux/indirect_call_wrapper.h> 15 16 #include <uapi/linux/io_uring.h> 17 18 #include "filetable.h" 19 #include "io_uring.h" 20 #include "opdef.h" 21 #include "kbuf.h" 22 #include "alloc_cache.h" 23 #include "rsrc.h" 24 #include "poll.h" 25 #include "rw.h" 26 27 static void io_complete_rw(struct kiocb *kiocb, long res); 28 static void io_complete_rw_iopoll(struct kiocb *kiocb, long res); 29 30 struct io_rw { 31 /* NOTE: kiocb has the file as the first member, so don't do it here */ 32 struct kiocb kiocb; 33 u64 addr; 34 u32 len; 35 rwf_t flags; 36 }; 37 38 static bool io_file_supports_nowait(struct io_kiocb *req, __poll_t mask) 39 { 40 /* If FMODE_NOWAIT is set for a file, we're golden */ 41 if (req->flags & REQ_F_SUPPORT_NOWAIT) 42 return true; 43 /* No FMODE_NOWAIT, if we can poll, check the status */ 44 if (io_file_can_poll(req)) { 45 struct poll_table_struct pt = { ._key = mask }; 46 47 return vfs_poll(req->file, &pt) & mask; 48 } 49 /* No FMODE_NOWAIT support, and file isn't pollable. Tough luck. */ 50 return false; 51 } 52 53 static int io_iov_compat_buffer_select_prep(struct io_rw *rw) 54 { 55 struct compat_iovec __user *uiov = u64_to_user_ptr(rw->addr); 56 struct compat_iovec iov; 57 58 if (copy_from_user(&iov, uiov, sizeof(iov))) 59 return -EFAULT; 60 rw->len = iov.iov_len; 61 return 0; 62 } 63 64 static int io_iov_buffer_select_prep(struct io_kiocb *req) 65 { 66 struct iovec __user *uiov; 67 struct iovec iov; 68 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 69 70 if (rw->len != 1) 71 return -EINVAL; 72 73 if (io_is_compat(req->ctx)) 74 return io_iov_compat_buffer_select_prep(rw); 75 76 uiov = u64_to_user_ptr(rw->addr); 77 if (copy_from_user(&iov, uiov, sizeof(*uiov))) 78 return -EFAULT; 79 rw->len = iov.iov_len; 80 return 0; 81 } 82 83 static int io_import_vec(int ddir, struct io_kiocb *req, 84 struct io_async_rw *io, 85 const struct iovec __user *uvec, 86 size_t uvec_segs) 87 { 88 int ret, nr_segs; 89 struct iovec *iov; 90 91 if (io->vec.iovec) { 92 nr_segs = io->vec.nr; 93 iov = io->vec.iovec; 94 } else { 95 nr_segs = 1; 96 iov = &io->fast_iov; 97 } 98 99 ret = __import_iovec(ddir, uvec, uvec_segs, nr_segs, &iov, &io->iter, 100 io_is_compat(req->ctx)); 101 if (unlikely(ret < 0)) 102 return ret; 103 if (iov) { 104 req->flags |= REQ_F_NEED_CLEANUP; 105 io_vec_reset_iovec(&io->vec, iov, io->iter.nr_segs); 106 } 107 return 0; 108 } 109 110 static int __io_import_rw_buffer(int ddir, struct io_kiocb *req, 111 struct io_async_rw *io, struct io_br_sel *sel, 112 unsigned int issue_flags) 113 { 114 const struct io_issue_def *def = &io_issue_defs[req->opcode]; 115 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 116 size_t sqe_len = rw->len; 117 118 sel->addr = u64_to_user_ptr(rw->addr); 119 if (def->vectored && !(req->flags & REQ_F_BUFFER_SELECT)) 120 return io_import_vec(ddir, req, io, sel->addr, sqe_len); 121 122 if (io_do_buffer_select(req)) { 123 *sel = io_buffer_select(req, &sqe_len, io->buf_group, issue_flags); 124 if (!sel->addr) 125 return -ENOBUFS; 126 rw->addr = (unsigned long) sel->addr; 127 rw->len = sqe_len; 128 } 129 return import_ubuf(ddir, sel->addr, sqe_len, &io->iter); 130 } 131 132 static inline int io_import_rw_buffer(int rw, struct io_kiocb *req, 133 struct io_async_rw *io, 134 struct io_br_sel *sel, 135 unsigned int issue_flags) 136 { 137 int ret; 138 139 ret = __io_import_rw_buffer(rw, req, io, sel, issue_flags); 140 if (unlikely(ret < 0)) 141 return ret; 142 143 iov_iter_save_state(&io->iter, &io->iter_state); 144 return 0; 145 } 146 147 static void io_rw_recycle(struct io_kiocb *req, unsigned int issue_flags) 148 { 149 struct io_async_rw *rw = req->async_data; 150 151 if (unlikely(issue_flags & IO_URING_F_UNLOCKED)) 152 return; 153 154 io_alloc_cache_vec_kasan(&rw->vec); 155 if (rw->vec.nr > IO_VEC_CACHE_SOFT_CAP) 156 io_vec_free(&rw->vec); 157 158 if (io_alloc_cache_put(&req->ctx->rw_cache, rw)) 159 io_req_async_data_clear(req, 0); 160 } 161 162 static void io_req_rw_cleanup(struct io_kiocb *req, unsigned int issue_flags) 163 { 164 /* 165 * Disable quick recycling for anything that's gone through io-wq. 166 * In theory, this should be fine to cleanup. However, some read or 167 * write iter handling touches the iovec AFTER having called into the 168 * handler, eg to reexpand or revert. This means we can have: 169 * 170 * task io-wq 171 * issue 172 * punt to io-wq 173 * issue 174 * blkdev_write_iter() 175 * ->ki_complete() 176 * io_complete_rw() 177 * queue tw complete 178 * run tw 179 * req_rw_cleanup 180 * iov_iter_count() <- look at iov_iter again 181 * 182 * which can lead to a UAF. This is only possible for io-wq offload 183 * as the cleanup can run in parallel. As io-wq is not the fast path, 184 * just leave cleanup to the end. 185 * 186 * This is really a bug in the core code that does this, any issue 187 * path should assume that a successful (or -EIOCBQUEUED) return can 188 * mean that the underlying data can be gone at any time. But that 189 * should be fixed separately, and then this check could be killed. 190 */ 191 if (!(req->flags & (REQ_F_REISSUE | REQ_F_REFCOUNT))) { 192 req->flags &= ~REQ_F_NEED_CLEANUP; 193 io_rw_recycle(req, issue_flags); 194 } 195 } 196 197 static int io_rw_alloc_async(struct io_kiocb *req) 198 { 199 struct io_ring_ctx *ctx = req->ctx; 200 struct io_async_rw *rw; 201 202 rw = io_uring_alloc_async_data(&ctx->rw_cache, req); 203 if (!rw) 204 return -ENOMEM; 205 if (rw->vec.iovec) 206 req->flags |= REQ_F_NEED_CLEANUP; 207 rw->bytes_done = 0; 208 return 0; 209 } 210 211 static inline void io_meta_save_state(struct io_async_rw *io) 212 { 213 io->meta_state.seed = io->meta.seed; 214 iov_iter_save_state(&io->meta.iter, &io->meta_state.iter_meta); 215 } 216 217 static inline void io_meta_restore(struct io_async_rw *io, struct kiocb *kiocb) 218 { 219 if (kiocb->ki_flags & IOCB_HAS_METADATA) { 220 io->meta.seed = io->meta_state.seed; 221 iov_iter_restore(&io->meta.iter, &io->meta_state.iter_meta); 222 } 223 } 224 225 static int io_prep_rw_pi(struct io_kiocb *req, struct io_rw *rw, int ddir, 226 u64 attr_ptr, u64 attr_type_mask) 227 { 228 struct io_uring_attr_pi pi_attr; 229 struct io_async_rw *io; 230 int ret; 231 232 if (copy_from_user(&pi_attr, u64_to_user_ptr(attr_ptr), 233 sizeof(pi_attr))) 234 return -EFAULT; 235 236 if (pi_attr.rsvd) 237 return -EINVAL; 238 239 io = req->async_data; 240 io->meta.flags = pi_attr.flags; 241 io->meta.app_tag = pi_attr.app_tag; 242 io->meta.seed = pi_attr.seed; 243 ret = import_ubuf(ddir, u64_to_user_ptr(pi_attr.addr), 244 pi_attr.len, &io->meta.iter); 245 if (unlikely(ret < 0)) 246 return ret; 247 req->flags |= REQ_F_HAS_METADATA; 248 io_meta_save_state(io); 249 return ret; 250 } 251 252 static int __io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, 253 int ddir) 254 { 255 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 256 struct io_async_rw *io; 257 unsigned ioprio; 258 u64 attr_type_mask; 259 int ret; 260 261 if (io_rw_alloc_async(req)) 262 return -ENOMEM; 263 io = req->async_data; 264 265 rw->kiocb.ki_pos = READ_ONCE(sqe->off); 266 /* used for fixed read/write too - just read unconditionally */ 267 req->buf_index = READ_ONCE(sqe->buf_index); 268 io->buf_group = req->buf_index; 269 270 ioprio = READ_ONCE(sqe->ioprio); 271 if (ioprio) { 272 ret = ioprio_check_cap(ioprio); 273 if (ret) 274 return ret; 275 276 rw->kiocb.ki_ioprio = ioprio; 277 } else { 278 rw->kiocb.ki_ioprio = get_current_ioprio(); 279 } 280 rw->kiocb.ki_flags = 0; 281 rw->kiocb.ki_write_stream = READ_ONCE(sqe->write_stream); 282 283 if (req->ctx->flags & IORING_SETUP_IOPOLL) 284 rw->kiocb.ki_complete = io_complete_rw_iopoll; 285 else 286 rw->kiocb.ki_complete = io_complete_rw; 287 288 rw->addr = READ_ONCE(sqe->addr); 289 rw->len = READ_ONCE(sqe->len); 290 rw->flags = (__force rwf_t) READ_ONCE(sqe->rw_flags); 291 292 attr_type_mask = READ_ONCE(sqe->attr_type_mask); 293 if (attr_type_mask) { 294 u64 attr_ptr; 295 296 /* only PI attribute is supported currently */ 297 if (attr_type_mask != IORING_RW_ATTR_FLAG_PI) 298 return -EINVAL; 299 300 attr_ptr = READ_ONCE(sqe->attr_ptr); 301 return io_prep_rw_pi(req, rw, ddir, attr_ptr, attr_type_mask); 302 } 303 return 0; 304 } 305 306 static int io_rw_do_import(struct io_kiocb *req, int ddir) 307 { 308 struct io_br_sel sel = { }; 309 310 if (io_do_buffer_select(req)) 311 return 0; 312 313 return io_import_rw_buffer(ddir, req, req->async_data, &sel, 0); 314 } 315 316 static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, 317 int ddir) 318 { 319 int ret; 320 321 ret = __io_prep_rw(req, sqe, ddir); 322 if (unlikely(ret)) 323 return ret; 324 325 return io_rw_do_import(req, ddir); 326 } 327 328 int io_prep_read(struct io_kiocb *req, const struct io_uring_sqe *sqe) 329 { 330 return io_prep_rw(req, sqe, ITER_DEST); 331 } 332 333 int io_prep_write(struct io_kiocb *req, const struct io_uring_sqe *sqe) 334 { 335 return io_prep_rw(req, sqe, ITER_SOURCE); 336 } 337 338 static int io_prep_rwv(struct io_kiocb *req, const struct io_uring_sqe *sqe, 339 int ddir) 340 { 341 int ret; 342 343 ret = io_prep_rw(req, sqe, ddir); 344 if (unlikely(ret)) 345 return ret; 346 if (!(req->flags & REQ_F_BUFFER_SELECT)) 347 return 0; 348 349 /* 350 * Have to do this validation here, as this is in io_read() rw->len 351 * might have changed due to buffer selection 352 */ 353 return io_iov_buffer_select_prep(req); 354 } 355 356 int io_prep_readv(struct io_kiocb *req, const struct io_uring_sqe *sqe) 357 { 358 return io_prep_rwv(req, sqe, ITER_DEST); 359 } 360 361 int io_prep_writev(struct io_kiocb *req, const struct io_uring_sqe *sqe) 362 { 363 return io_prep_rwv(req, sqe, ITER_SOURCE); 364 } 365 366 static int io_init_rw_fixed(struct io_kiocb *req, unsigned int issue_flags, 367 int ddir) 368 { 369 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 370 struct io_async_rw *io = req->async_data; 371 int ret; 372 373 if (io->bytes_done) 374 return 0; 375 376 ret = io_import_reg_buf(req, &io->iter, rw->addr, rw->len, ddir, 377 issue_flags); 378 iov_iter_save_state(&io->iter, &io->iter_state); 379 return ret; 380 } 381 382 int io_prep_read_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe) 383 { 384 return __io_prep_rw(req, sqe, ITER_DEST); 385 } 386 387 int io_prep_write_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe) 388 { 389 return __io_prep_rw(req, sqe, ITER_SOURCE); 390 } 391 392 static int io_rw_import_reg_vec(struct io_kiocb *req, 393 struct io_async_rw *io, 394 int ddir, unsigned int issue_flags) 395 { 396 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 397 unsigned uvec_segs = rw->len; 398 int ret; 399 400 ret = io_import_reg_vec(ddir, &io->iter, req, &io->vec, 401 uvec_segs, issue_flags); 402 if (unlikely(ret)) 403 return ret; 404 iov_iter_save_state(&io->iter, &io->iter_state); 405 req->flags &= ~REQ_F_IMPORT_BUFFER; 406 return 0; 407 } 408 409 static int io_rw_prep_reg_vec(struct io_kiocb *req) 410 { 411 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 412 struct io_async_rw *io = req->async_data; 413 const struct iovec __user *uvec; 414 415 uvec = u64_to_user_ptr(rw->addr); 416 return io_prep_reg_iovec(req, &io->vec, uvec, rw->len); 417 } 418 419 int io_prep_readv_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe) 420 { 421 int ret; 422 423 ret = __io_prep_rw(req, sqe, ITER_DEST); 424 if (unlikely(ret)) 425 return ret; 426 return io_rw_prep_reg_vec(req); 427 } 428 429 int io_prep_writev_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe) 430 { 431 int ret; 432 433 ret = __io_prep_rw(req, sqe, ITER_SOURCE); 434 if (unlikely(ret)) 435 return ret; 436 return io_rw_prep_reg_vec(req); 437 } 438 439 /* 440 * Multishot read is prepared just like a normal read/write request, only 441 * difference is that we set the MULTISHOT flag. 442 */ 443 int io_read_mshot_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 444 { 445 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 446 int ret; 447 448 /* must be used with provided buffers */ 449 if (!(req->flags & REQ_F_BUFFER_SELECT)) 450 return -EINVAL; 451 452 ret = __io_prep_rw(req, sqe, ITER_DEST); 453 if (unlikely(ret)) 454 return ret; 455 456 if (rw->addr || rw->len) 457 return -EINVAL; 458 459 req->flags |= REQ_F_APOLL_MULTISHOT; 460 return 0; 461 } 462 463 void io_readv_writev_cleanup(struct io_kiocb *req) 464 { 465 struct io_async_rw *rw = req->async_data; 466 467 lockdep_assert_held(&req->ctx->uring_lock); 468 io_vec_free(&rw->vec); 469 io_rw_recycle(req, 0); 470 } 471 472 static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req) 473 { 474 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 475 476 if (rw->kiocb.ki_pos != -1) 477 return &rw->kiocb.ki_pos; 478 479 if (!(req->file->f_mode & FMODE_STREAM)) { 480 req->flags |= REQ_F_CUR_POS; 481 rw->kiocb.ki_pos = req->file->f_pos; 482 return &rw->kiocb.ki_pos; 483 } 484 485 rw->kiocb.ki_pos = 0; 486 return NULL; 487 } 488 489 static bool io_rw_should_reissue(struct io_kiocb *req) 490 { 491 #ifdef CONFIG_BLOCK 492 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 493 umode_t mode = file_inode(req->file)->i_mode; 494 struct io_async_rw *io = req->async_data; 495 struct io_ring_ctx *ctx = req->ctx; 496 497 if (!S_ISBLK(mode) && !S_ISREG(mode)) 498 return false; 499 if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() && 500 !(ctx->flags & IORING_SETUP_IOPOLL))) 501 return false; 502 /* 503 * If ref is dying, we might be running poll reap from the exit work. 504 * Don't attempt to reissue from that path, just let it fail with 505 * -EAGAIN. 506 */ 507 if (percpu_ref_is_dying(&ctx->refs)) 508 return false; 509 510 io_meta_restore(io, &rw->kiocb); 511 iov_iter_restore(&io->iter, &io->iter_state); 512 return true; 513 #else 514 return false; 515 #endif 516 } 517 518 static void io_req_end_write(struct io_kiocb *req) 519 { 520 if (req->flags & REQ_F_ISREG) { 521 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 522 523 kiocb_end_write(&rw->kiocb); 524 } 525 } 526 527 /* 528 * Trigger the notifications after having done some IO, and finish the write 529 * accounting, if any. 530 */ 531 static void io_req_io_end(struct io_kiocb *req) 532 { 533 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 534 535 if (rw->kiocb.ki_flags & IOCB_WRITE) { 536 io_req_end_write(req); 537 fsnotify_modify(req->file); 538 } else { 539 fsnotify_access(req->file); 540 } 541 } 542 543 static void __io_complete_rw_common(struct io_kiocb *req, long res) 544 { 545 if (res == req->cqe.res) 546 return; 547 if ((res == -EOPNOTSUPP || res == -EAGAIN) && io_rw_should_reissue(req)) { 548 req->flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE; 549 } else { 550 req_set_fail(req); 551 req->cqe.res = res; 552 } 553 } 554 555 static inline int io_fixup_rw_res(struct io_kiocb *req, long res) 556 { 557 struct io_async_rw *io = req->async_data; 558 559 /* add previously done IO, if any */ 560 if (req_has_async_data(req) && io->bytes_done > 0) { 561 if (res < 0) 562 res = io->bytes_done; 563 else 564 res += io->bytes_done; 565 } 566 return res; 567 } 568 569 void io_req_rw_complete(struct io_tw_req tw_req, io_tw_token_t tw) 570 { 571 struct io_kiocb *req = tw_req.req; 572 573 io_req_io_end(req); 574 575 if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) 576 req->cqe.flags |= io_put_kbuf(req, req->cqe.res, NULL); 577 578 io_req_rw_cleanup(req, 0); 579 io_req_task_complete(tw_req, tw); 580 } 581 582 static void io_complete_rw(struct kiocb *kiocb, long res) 583 { 584 struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb); 585 struct io_kiocb *req = cmd_to_io_kiocb(rw); 586 587 __io_complete_rw_common(req, res); 588 io_req_set_res(req, io_fixup_rw_res(req, res), 0); 589 req->io_task_work.func = io_req_rw_complete; 590 __io_req_task_work_add(req, IOU_F_TWQ_LAZY_WAKE); 591 } 592 593 static void io_complete_rw_iopoll(struct kiocb *kiocb, long res) 594 { 595 struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb); 596 struct io_kiocb *req = cmd_to_io_kiocb(rw); 597 598 if (kiocb->ki_flags & IOCB_WRITE) 599 io_req_end_write(req); 600 if (unlikely(res != req->cqe.res)) { 601 if (res == -EAGAIN && io_rw_should_reissue(req)) 602 req->flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE; 603 else 604 req->cqe.res = res; 605 } 606 607 /* order with io_iopoll_complete() checking ->iopoll_completed */ 608 smp_store_release(&req->iopoll_completed, 1); 609 } 610 611 static inline void io_rw_done(struct io_kiocb *req, ssize_t ret) 612 { 613 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 614 615 /* IO was queued async, completion will happen later */ 616 if (ret == -EIOCBQUEUED) 617 return; 618 619 /* transform internal restart error codes */ 620 if (unlikely(ret < 0)) { 621 switch (ret) { 622 case -ERESTARTSYS: 623 case -ERESTARTNOINTR: 624 case -ERESTARTNOHAND: 625 case -ERESTART_RESTARTBLOCK: 626 /* 627 * We can't just restart the syscall, since previously 628 * submitted sqes may already be in progress. Just fail 629 * this IO with EINTR. 630 */ 631 ret = -EINTR; 632 break; 633 } 634 } 635 636 if (req->ctx->flags & IORING_SETUP_IOPOLL) 637 io_complete_rw_iopoll(&rw->kiocb, ret); 638 else 639 io_complete_rw(&rw->kiocb, ret); 640 } 641 642 static int kiocb_done(struct io_kiocb *req, ssize_t ret, 643 struct io_br_sel *sel, unsigned int issue_flags) 644 { 645 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 646 unsigned final_ret = io_fixup_rw_res(req, ret); 647 648 if (ret >= 0 && req->flags & REQ_F_CUR_POS) 649 req->file->f_pos = rw->kiocb.ki_pos; 650 if (ret >= 0 && !(req->ctx->flags & IORING_SETUP_IOPOLL)) { 651 u32 cflags = 0; 652 653 __io_complete_rw_common(req, ret); 654 /* 655 * Safe to call io_end from here as we're inline 656 * from the submission path. 657 */ 658 io_req_io_end(req); 659 if (sel) 660 cflags = io_put_kbuf(req, ret, sel->buf_list); 661 io_req_set_res(req, final_ret, cflags); 662 io_req_rw_cleanup(req, issue_flags); 663 return IOU_COMPLETE; 664 } else { 665 io_rw_done(req, ret); 666 } 667 668 return IOU_ISSUE_SKIP_COMPLETE; 669 } 670 671 static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb) 672 { 673 return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos; 674 } 675 676 /* 677 * For files that don't have ->read_iter() and ->write_iter(), handle them 678 * by looping over ->read() or ->write() manually. 679 */ 680 static ssize_t loop_rw_iter(int ddir, struct io_rw *rw, struct iov_iter *iter) 681 { 682 struct io_kiocb *req = cmd_to_io_kiocb(rw); 683 struct kiocb *kiocb = &rw->kiocb; 684 struct file *file = kiocb->ki_filp; 685 ssize_t ret = 0; 686 loff_t *ppos; 687 688 /* 689 * Don't support polled IO through this interface, and we can't 690 * support non-blocking either. For the latter, this just causes 691 * the kiocb to be handled from an async context. 692 */ 693 if (kiocb->ki_flags & IOCB_HIPRI) 694 return -EOPNOTSUPP; 695 if ((kiocb->ki_flags & IOCB_NOWAIT) && 696 !(kiocb->ki_filp->f_flags & O_NONBLOCK)) 697 return -EAGAIN; 698 if ((req->flags & REQ_F_BUF_NODE) && req->buf_node->buf->is_kbuf) 699 return -EFAULT; 700 701 ppos = io_kiocb_ppos(kiocb); 702 703 while (iov_iter_count(iter)) { 704 void __user *addr; 705 size_t len; 706 ssize_t nr; 707 708 if (iter_is_ubuf(iter)) { 709 addr = iter->ubuf + iter->iov_offset; 710 len = iov_iter_count(iter); 711 } else if (!iov_iter_is_bvec(iter)) { 712 addr = iter_iov_addr(iter); 713 len = iter_iov_len(iter); 714 } else { 715 addr = u64_to_user_ptr(rw->addr); 716 len = rw->len; 717 } 718 719 if (ddir == READ) 720 nr = file->f_op->read(file, addr, len, ppos); 721 else 722 nr = file->f_op->write(file, addr, len, ppos); 723 724 if (nr < 0) { 725 if (!ret) 726 ret = nr; 727 break; 728 } 729 ret += nr; 730 if (!iov_iter_is_bvec(iter)) { 731 iov_iter_advance(iter, nr); 732 } else { 733 rw->addr += nr; 734 rw->len -= nr; 735 if (!rw->len) 736 break; 737 } 738 if (nr != len) 739 break; 740 } 741 742 return ret; 743 } 744 745 /* 746 * This is our waitqueue callback handler, registered through __folio_lock_async() 747 * when we initially tried to do the IO with the iocb armed our waitqueue. 748 * This gets called when the page is unlocked, and we generally expect that to 749 * happen when the page IO is completed and the page is now uptodate. This will 750 * queue a task_work based retry of the operation, attempting to copy the data 751 * again. If the latter fails because the page was NOT uptodate, then we will 752 * do a thread based blocking retry of the operation. That's the unexpected 753 * slow path. 754 */ 755 static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode, 756 int sync, void *arg) 757 { 758 struct wait_page_queue *wpq; 759 struct io_kiocb *req = wait->private; 760 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 761 struct wait_page_key *key = arg; 762 763 wpq = container_of(wait, struct wait_page_queue, wait); 764 765 if (!wake_page_match(wpq, key)) 766 return 0; 767 768 rw->kiocb.ki_flags &= ~IOCB_WAITQ; 769 list_del_init(&wait->entry); 770 io_req_task_queue(req); 771 return 1; 772 } 773 774 /* 775 * This controls whether a given IO request should be armed for async page 776 * based retry. If we return false here, the request is handed to the async 777 * worker threads for retry. If we're doing buffered reads on a regular file, 778 * we prepare a private wait_page_queue entry and retry the operation. This 779 * will either succeed because the page is now uptodate and unlocked, or it 780 * will register a callback when the page is unlocked at IO completion. Through 781 * that callback, io_uring uses task_work to setup a retry of the operation. 782 * That retry will attempt the buffered read again. The retry will generally 783 * succeed, or in rare cases where it fails, we then fall back to using the 784 * async worker threads for a blocking retry. 785 */ 786 static bool io_rw_should_retry(struct io_kiocb *req) 787 { 788 struct io_async_rw *io = req->async_data; 789 struct wait_page_queue *wait = &io->wpq; 790 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 791 struct kiocb *kiocb = &rw->kiocb; 792 793 /* 794 * Never retry for NOWAIT or a request with metadata, we just complete 795 * with -EAGAIN. 796 */ 797 if (req->flags & (REQ_F_NOWAIT | REQ_F_HAS_METADATA)) 798 return false; 799 800 /* Only for buffered IO */ 801 if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI)) 802 return false; 803 804 /* 805 * just use poll if we can, and don't attempt if the fs doesn't 806 * support callback based unlocks 807 */ 808 if (io_file_can_poll(req) || 809 !(req->file->f_op->fop_flags & FOP_BUFFER_RASYNC)) 810 return false; 811 812 wait->wait.func = io_async_buf_func; 813 wait->wait.private = req; 814 wait->wait.flags = 0; 815 INIT_LIST_HEAD(&wait->wait.entry); 816 kiocb->ki_flags |= IOCB_WAITQ; 817 kiocb->ki_flags &= ~IOCB_NOWAIT; 818 kiocb->ki_waitq = wait; 819 return true; 820 } 821 822 static inline int io_iter_do_read(struct io_rw *rw, struct iov_iter *iter) 823 { 824 struct file *file = rw->kiocb.ki_filp; 825 826 if (likely(file->f_op->read_iter)) 827 return file->f_op->read_iter(&rw->kiocb, iter); 828 else if (file->f_op->read) 829 return loop_rw_iter(READ, rw, iter); 830 else 831 return -EINVAL; 832 } 833 834 static bool need_complete_io(struct io_kiocb *req) 835 { 836 return req->flags & REQ_F_ISREG || 837 S_ISBLK(file_inode(req->file)->i_mode); 838 } 839 840 static int io_rw_init_file(struct io_kiocb *req, fmode_t mode, int rw_type) 841 { 842 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 843 struct kiocb *kiocb = &rw->kiocb; 844 struct io_ring_ctx *ctx = req->ctx; 845 struct file *file = req->file; 846 int ret; 847 848 if (unlikely(!(file->f_mode & mode))) 849 return -EBADF; 850 851 if (!(req->flags & REQ_F_FIXED_FILE)) 852 req->flags |= io_file_get_flags(file); 853 854 kiocb->ki_flags = file->f_iocb_flags; 855 ret = kiocb_set_rw_flags(kiocb, rw->flags, rw_type); 856 if (unlikely(ret)) 857 return ret; 858 kiocb->ki_flags |= IOCB_ALLOC_CACHE; 859 860 /* 861 * If the file is marked O_NONBLOCK, still allow retry for it if it 862 * supports async. Otherwise it's impossible to use O_NONBLOCK files 863 * reliably. If not, or it IOCB_NOWAIT is set, don't retry. 864 */ 865 if (kiocb->ki_flags & IOCB_NOWAIT || 866 ((file->f_flags & O_NONBLOCK && !(req->flags & REQ_F_SUPPORT_NOWAIT)))) 867 req->flags |= REQ_F_NOWAIT; 868 869 if (ctx->flags & IORING_SETUP_IOPOLL) { 870 if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll) 871 return -EOPNOTSUPP; 872 kiocb->private = NULL; 873 kiocb->ki_flags |= IOCB_HIPRI; 874 req->iopoll_completed = 0; 875 if (ctx->flags & IORING_SETUP_HYBRID_IOPOLL) { 876 /* make sure every req only blocks once*/ 877 req->flags &= ~REQ_F_IOPOLL_STATE; 878 req->iopoll_start = ktime_get_ns(); 879 } 880 } else { 881 if (kiocb->ki_flags & IOCB_HIPRI) 882 return -EINVAL; 883 } 884 885 if (req->flags & REQ_F_HAS_METADATA) { 886 struct io_async_rw *io = req->async_data; 887 888 if (!(file->f_mode & FMODE_HAS_METADATA)) 889 return -EINVAL; 890 891 /* 892 * We have a union of meta fields with wpq used for buffered-io 893 * in io_async_rw, so fail it here. 894 */ 895 if (!(req->file->f_flags & O_DIRECT)) 896 return -EOPNOTSUPP; 897 kiocb->ki_flags |= IOCB_HAS_METADATA; 898 kiocb->private = &io->meta; 899 } 900 901 return 0; 902 } 903 904 static int __io_read(struct io_kiocb *req, struct io_br_sel *sel, 905 unsigned int issue_flags) 906 { 907 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; 908 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 909 struct io_async_rw *io = req->async_data; 910 struct kiocb *kiocb = &rw->kiocb; 911 ssize_t ret; 912 loff_t *ppos; 913 914 if (req->flags & REQ_F_IMPORT_BUFFER) { 915 ret = io_rw_import_reg_vec(req, io, ITER_DEST, issue_flags); 916 if (unlikely(ret)) 917 return ret; 918 } else if (io_do_buffer_select(req)) { 919 ret = io_import_rw_buffer(ITER_DEST, req, io, sel, issue_flags); 920 if (unlikely(ret < 0)) 921 return ret; 922 } 923 ret = io_rw_init_file(req, FMODE_READ, READ); 924 if (unlikely(ret)) 925 return ret; 926 req->cqe.res = iov_iter_count(&io->iter); 927 928 if (force_nonblock) { 929 /* If the file doesn't support async, just async punt */ 930 if (unlikely(!io_file_supports_nowait(req, EPOLLIN))) 931 return -EAGAIN; 932 kiocb->ki_flags |= IOCB_NOWAIT; 933 } else { 934 /* Ensure we clear previously set non-block flag */ 935 kiocb->ki_flags &= ~IOCB_NOWAIT; 936 } 937 938 ppos = io_kiocb_update_pos(req); 939 940 ret = rw_verify_area(READ, req->file, ppos, req->cqe.res); 941 if (unlikely(ret)) 942 return ret; 943 944 ret = io_iter_do_read(rw, &io->iter); 945 946 /* 947 * Some file systems like to return -EOPNOTSUPP for an IOCB_NOWAIT 948 * issue, even though they should be returning -EAGAIN. To be safe, 949 * retry from blocking context for either. 950 */ 951 if (ret == -EOPNOTSUPP && force_nonblock) 952 ret = -EAGAIN; 953 954 if (ret == -EAGAIN) { 955 /* If we can poll, just do that. */ 956 if (io_file_can_poll(req)) 957 return -EAGAIN; 958 /* IOPOLL retry should happen for io-wq threads */ 959 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL)) 960 goto done; 961 /* no retry on NONBLOCK nor RWF_NOWAIT */ 962 if (req->flags & REQ_F_NOWAIT) 963 goto done; 964 ret = 0; 965 } else if (ret == -EIOCBQUEUED) { 966 return IOU_ISSUE_SKIP_COMPLETE; 967 } else if (ret == req->cqe.res || ret <= 0 || !force_nonblock || 968 (req->flags & REQ_F_NOWAIT) || !need_complete_io(req) || 969 (issue_flags & IO_URING_F_MULTISHOT)) { 970 /* read all, failed, already did sync or don't want to retry */ 971 goto done; 972 } 973 974 /* 975 * Don't depend on the iter state matching what was consumed, or being 976 * untouched in case of error. Restore it and we'll advance it 977 * manually if we need to. 978 */ 979 iov_iter_restore(&io->iter, &io->iter_state); 980 io_meta_restore(io, kiocb); 981 982 do { 983 /* 984 * We end up here because of a partial read, either from 985 * above or inside this loop. Advance the iter by the bytes 986 * that were consumed. 987 */ 988 iov_iter_advance(&io->iter, ret); 989 if (!iov_iter_count(&io->iter)) 990 break; 991 io->bytes_done += ret; 992 iov_iter_save_state(&io->iter, &io->iter_state); 993 994 /* if we can retry, do so with the callbacks armed */ 995 if (!io_rw_should_retry(req)) { 996 kiocb->ki_flags &= ~IOCB_WAITQ; 997 return -EAGAIN; 998 } 999 1000 req->cqe.res = iov_iter_count(&io->iter); 1001 /* 1002 * Now retry read with the IOCB_WAITQ parts set in the iocb. If 1003 * we get -EIOCBQUEUED, then we'll get a notification when the 1004 * desired page gets unlocked. We can also get a partial read 1005 * here, and if we do, then just retry at the new offset. 1006 */ 1007 ret = io_iter_do_read(rw, &io->iter); 1008 if (ret == -EIOCBQUEUED) 1009 return IOU_ISSUE_SKIP_COMPLETE; 1010 /* we got some bytes, but not all. retry. */ 1011 kiocb->ki_flags &= ~IOCB_WAITQ; 1012 iov_iter_restore(&io->iter, &io->iter_state); 1013 } while (ret > 0); 1014 done: 1015 /* it's faster to check here than delegate to kfree */ 1016 return ret; 1017 } 1018 1019 int io_read(struct io_kiocb *req, unsigned int issue_flags) 1020 { 1021 struct io_br_sel sel = { }; 1022 int ret; 1023 1024 ret = __io_read(req, &sel, issue_flags); 1025 if (ret >= 0) 1026 return kiocb_done(req, ret, &sel, issue_flags); 1027 1028 if (req->flags & REQ_F_BUFFERS_COMMIT) 1029 io_kbuf_recycle(req, sel.buf_list, issue_flags); 1030 return ret; 1031 } 1032 1033 int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags) 1034 { 1035 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 1036 struct io_br_sel sel = { }; 1037 unsigned int cflags = 0; 1038 int ret; 1039 1040 /* 1041 * Multishot MUST be used on a pollable file 1042 */ 1043 if (!io_file_can_poll(req)) 1044 return -EBADFD; 1045 1046 /* make it sync, multishot doesn't support async execution */ 1047 rw->kiocb.ki_complete = NULL; 1048 ret = __io_read(req, &sel, issue_flags); 1049 1050 /* 1051 * If we get -EAGAIN, recycle our buffer and just let normal poll 1052 * handling arm it. 1053 */ 1054 if (ret == -EAGAIN) { 1055 /* 1056 * Reset rw->len to 0 again to avoid clamping future mshot 1057 * reads, in case the buffer size varies. 1058 */ 1059 if (io_kbuf_recycle(req, sel.buf_list, issue_flags)) 1060 rw->len = 0; 1061 return IOU_RETRY; 1062 } else if (ret <= 0) { 1063 io_kbuf_recycle(req, sel.buf_list, issue_flags); 1064 if (ret < 0) 1065 req_set_fail(req); 1066 } else if (!(req->flags & REQ_F_APOLL_MULTISHOT)) { 1067 cflags = io_put_kbuf(req, ret, sel.buf_list); 1068 } else { 1069 /* 1070 * Any successful return value will keep the multishot read 1071 * armed, if it's still set. Put our buffer and post a CQE. If 1072 * we fail to post a CQE, or multishot is no longer set, then 1073 * jump to the termination path. This request is then done. 1074 */ 1075 cflags = io_put_kbuf(req, ret, sel.buf_list); 1076 rw->len = 0; /* similarly to above, reset len to 0 */ 1077 1078 if (io_req_post_cqe(req, ret, cflags | IORING_CQE_F_MORE)) { 1079 if (issue_flags & IO_URING_F_MULTISHOT) 1080 /* 1081 * Force retry, as we might have more data to 1082 * be read and otherwise it won't get retried 1083 * until (if ever) another poll is triggered. 1084 */ 1085 io_poll_multishot_retry(req); 1086 1087 return IOU_RETRY; 1088 } 1089 } 1090 1091 /* 1092 * Either an error, or we've hit overflow posting the CQE. For any 1093 * multishot request, hitting overflow will terminate it. 1094 */ 1095 io_req_set_res(req, ret, cflags); 1096 io_req_rw_cleanup(req, issue_flags); 1097 return IOU_COMPLETE; 1098 } 1099 1100 static bool io_kiocb_start_write(struct io_kiocb *req, struct kiocb *kiocb) 1101 { 1102 struct inode *inode; 1103 bool ret; 1104 1105 if (!(req->flags & REQ_F_ISREG)) 1106 return true; 1107 if (!(kiocb->ki_flags & IOCB_NOWAIT)) { 1108 kiocb_start_write(kiocb); 1109 return true; 1110 } 1111 1112 inode = file_inode(kiocb->ki_filp); 1113 ret = sb_start_write_trylock(inode->i_sb); 1114 if (ret) 1115 __sb_writers_release(inode->i_sb, SB_FREEZE_WRITE); 1116 return ret; 1117 } 1118 1119 int io_write(struct io_kiocb *req, unsigned int issue_flags) 1120 { 1121 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; 1122 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 1123 struct io_async_rw *io = req->async_data; 1124 struct kiocb *kiocb = &rw->kiocb; 1125 ssize_t ret, ret2; 1126 loff_t *ppos; 1127 1128 if (req->flags & REQ_F_IMPORT_BUFFER) { 1129 ret = io_rw_import_reg_vec(req, io, ITER_SOURCE, issue_flags); 1130 if (unlikely(ret)) 1131 return ret; 1132 } 1133 1134 ret = io_rw_init_file(req, FMODE_WRITE, WRITE); 1135 if (unlikely(ret)) 1136 return ret; 1137 req->cqe.res = iov_iter_count(&io->iter); 1138 1139 if (force_nonblock) { 1140 /* If the file doesn't support async, just async punt */ 1141 if (unlikely(!io_file_supports_nowait(req, EPOLLOUT))) 1142 goto ret_eagain; 1143 1144 /* Check if we can support NOWAIT. */ 1145 if (!(kiocb->ki_flags & IOCB_DIRECT) && 1146 !(req->file->f_op->fop_flags & FOP_BUFFER_WASYNC) && 1147 (req->flags & REQ_F_ISREG)) 1148 goto ret_eagain; 1149 1150 kiocb->ki_flags |= IOCB_NOWAIT; 1151 } else { 1152 /* Ensure we clear previously set non-block flag */ 1153 kiocb->ki_flags &= ~IOCB_NOWAIT; 1154 } 1155 1156 ppos = io_kiocb_update_pos(req); 1157 1158 ret = rw_verify_area(WRITE, req->file, ppos, req->cqe.res); 1159 if (unlikely(ret)) 1160 return ret; 1161 1162 if (unlikely(!io_kiocb_start_write(req, kiocb))) 1163 return -EAGAIN; 1164 kiocb->ki_flags |= IOCB_WRITE; 1165 1166 if (likely(req->file->f_op->write_iter)) 1167 ret2 = req->file->f_op->write_iter(kiocb, &io->iter); 1168 else if (req->file->f_op->write) 1169 ret2 = loop_rw_iter(WRITE, rw, &io->iter); 1170 else 1171 ret2 = -EINVAL; 1172 1173 /* 1174 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just 1175 * retry them without IOCB_NOWAIT. 1176 */ 1177 if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT)) 1178 ret2 = -EAGAIN; 1179 /* no retry on NONBLOCK nor RWF_NOWAIT */ 1180 if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT)) 1181 goto done; 1182 if (!force_nonblock || ret2 != -EAGAIN) { 1183 /* IOPOLL retry should happen for io-wq threads */ 1184 if (ret2 == -EAGAIN && (req->ctx->flags & IORING_SETUP_IOPOLL)) 1185 goto ret_eagain; 1186 1187 if (ret2 != req->cqe.res && ret2 >= 0 && need_complete_io(req)) { 1188 trace_io_uring_short_write(req->ctx, kiocb->ki_pos - ret2, 1189 req->cqe.res, ret2); 1190 1191 /* This is a partial write. The file pos has already been 1192 * updated, setup the async struct to complete the request 1193 * in the worker. Also update bytes_done to account for 1194 * the bytes already written. 1195 */ 1196 iov_iter_save_state(&io->iter, &io->iter_state); 1197 io->bytes_done += ret2; 1198 1199 if (kiocb->ki_flags & IOCB_WRITE) 1200 io_req_end_write(req); 1201 return -EAGAIN; 1202 } 1203 done: 1204 return kiocb_done(req, ret2, NULL, issue_flags); 1205 } else { 1206 ret_eagain: 1207 iov_iter_restore(&io->iter, &io->iter_state); 1208 io_meta_restore(io, kiocb); 1209 if (kiocb->ki_flags & IOCB_WRITE) 1210 io_req_end_write(req); 1211 return -EAGAIN; 1212 } 1213 } 1214 1215 int io_read_fixed(struct io_kiocb *req, unsigned int issue_flags) 1216 { 1217 int ret; 1218 1219 ret = io_init_rw_fixed(req, issue_flags, ITER_DEST); 1220 if (unlikely(ret)) 1221 return ret; 1222 1223 return io_read(req, issue_flags); 1224 } 1225 1226 int io_write_fixed(struct io_kiocb *req, unsigned int issue_flags) 1227 { 1228 int ret; 1229 1230 ret = io_init_rw_fixed(req, issue_flags, ITER_SOURCE); 1231 if (unlikely(ret)) 1232 return ret; 1233 1234 return io_write(req, issue_flags); 1235 } 1236 1237 void io_rw_fail(struct io_kiocb *req) 1238 { 1239 int res; 1240 1241 res = io_fixup_rw_res(req, req->cqe.res); 1242 io_req_set_res(req, res, req->cqe.flags); 1243 } 1244 1245 static int io_uring_classic_poll(struct io_kiocb *req, struct io_comp_batch *iob, 1246 unsigned int poll_flags) 1247 { 1248 struct file *file = req->file; 1249 1250 if (req->opcode == IORING_OP_URING_CMD) { 1251 struct io_uring_cmd *ioucmd; 1252 1253 ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd); 1254 return file->f_op->uring_cmd_iopoll(ioucmd, iob, poll_flags); 1255 } else { 1256 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); 1257 1258 return file->f_op->iopoll(&rw->kiocb, iob, poll_flags); 1259 } 1260 } 1261 1262 static u64 io_hybrid_iopoll_delay(struct io_ring_ctx *ctx, struct io_kiocb *req) 1263 { 1264 struct hrtimer_sleeper timer; 1265 enum hrtimer_mode mode; 1266 ktime_t kt; 1267 u64 sleep_time; 1268 1269 if (req->flags & REQ_F_IOPOLL_STATE) 1270 return 0; 1271 1272 if (ctx->hybrid_poll_time == LLONG_MAX) 1273 return 0; 1274 1275 /* Using half the running time to do schedule */ 1276 sleep_time = ctx->hybrid_poll_time / 2; 1277 1278 kt = ktime_set(0, sleep_time); 1279 req->flags |= REQ_F_IOPOLL_STATE; 1280 1281 mode = HRTIMER_MODE_REL; 1282 hrtimer_setup_sleeper_on_stack(&timer, CLOCK_MONOTONIC, mode); 1283 hrtimer_set_expires(&timer.timer, kt); 1284 set_current_state(TASK_INTERRUPTIBLE); 1285 hrtimer_sleeper_start_expires(&timer, mode); 1286 1287 if (timer.task) 1288 io_schedule(); 1289 1290 hrtimer_cancel(&timer.timer); 1291 __set_current_state(TASK_RUNNING); 1292 destroy_hrtimer_on_stack(&timer.timer); 1293 return sleep_time; 1294 } 1295 1296 static int io_uring_hybrid_poll(struct io_kiocb *req, 1297 struct io_comp_batch *iob, unsigned int poll_flags) 1298 { 1299 struct io_ring_ctx *ctx = req->ctx; 1300 u64 runtime, sleep_time; 1301 int ret; 1302 1303 sleep_time = io_hybrid_iopoll_delay(ctx, req); 1304 ret = io_uring_classic_poll(req, iob, poll_flags); 1305 runtime = ktime_get_ns() - req->iopoll_start - sleep_time; 1306 1307 /* 1308 * Use minimum sleep time if we're polling devices with different 1309 * latencies. We could get more completions from the faster ones. 1310 */ 1311 if (ctx->hybrid_poll_time > runtime) 1312 ctx->hybrid_poll_time = runtime; 1313 1314 return ret; 1315 } 1316 1317 int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) 1318 { 1319 struct io_wq_work_node *pos, *start, *prev; 1320 unsigned int poll_flags = 0; 1321 DEFINE_IO_COMP_BATCH(iob); 1322 int nr_events = 0; 1323 1324 /* 1325 * Only spin for completions if we don't have multiple devices hanging 1326 * off our complete list. 1327 */ 1328 if (ctx->poll_multi_queue || force_nonspin) 1329 poll_flags |= BLK_POLL_ONESHOT; 1330 1331 wq_list_for_each(pos, start, &ctx->iopoll_list) { 1332 struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list); 1333 int ret; 1334 1335 /* 1336 * Move completed and retryable entries to our local lists. 1337 * If we find a request that requires polling, break out 1338 * and complete those lists first, if we have entries there. 1339 */ 1340 if (READ_ONCE(req->iopoll_completed)) 1341 break; 1342 1343 if (ctx->flags & IORING_SETUP_HYBRID_IOPOLL) 1344 ret = io_uring_hybrid_poll(req, &iob, poll_flags); 1345 else 1346 ret = io_uring_classic_poll(req, &iob, poll_flags); 1347 1348 if (unlikely(ret < 0)) 1349 return ret; 1350 else if (ret) 1351 poll_flags |= BLK_POLL_ONESHOT; 1352 1353 /* iopoll may have completed current req */ 1354 if (!rq_list_empty(&iob.req_list) || 1355 READ_ONCE(req->iopoll_completed)) 1356 break; 1357 } 1358 1359 if (!rq_list_empty(&iob.req_list)) 1360 iob.complete(&iob); 1361 else if (!pos) 1362 return 0; 1363 1364 prev = start; 1365 wq_list_for_each_resume(pos, prev) { 1366 struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list); 1367 1368 /* order with io_complete_rw_iopoll(), e.g. ->result updates */ 1369 if (!smp_load_acquire(&req->iopoll_completed)) 1370 break; 1371 nr_events++; 1372 req->cqe.flags = io_put_kbuf(req, req->cqe.res, NULL); 1373 if (req->opcode != IORING_OP_URING_CMD) 1374 io_req_rw_cleanup(req, 0); 1375 } 1376 if (unlikely(!nr_events)) 1377 return 0; 1378 1379 pos = start ? start->next : ctx->iopoll_list.first; 1380 wq_list_cut(&ctx->iopoll_list, prev, start); 1381 1382 if (WARN_ON_ONCE(!wq_list_empty(&ctx->submit_state.compl_reqs))) 1383 return 0; 1384 ctx->submit_state.compl_reqs.first = pos; 1385 __io_submit_flush_completions(ctx); 1386 return nr_events; 1387 } 1388 1389 void io_rw_cache_free(const void *entry) 1390 { 1391 struct io_async_rw *rw = (struct io_async_rw *) entry; 1392 1393 io_vec_free(&rw->vec); 1394 kfree(rw); 1395 } 1396