Lines Matching +full:supports +full:- +full:cqe

1 // SPDX-License-Identifier: GPL-2.0
6 #include <linux/blk-mq.h>
40 if (req->flags & REQ_F_SUPPORT_NOWAIT) in io_file_supports_nowait()
46 return vfs_poll(req->file, &pt) & mask; in io_file_supports_nowait()
58 uiov = u64_to_user_ptr(rw->addr); in io_iov_compat_buffer_select_prep()
60 return -EFAULT; in io_iov_compat_buffer_select_prep()
61 if (__get_user(clen, &uiov->iov_len)) in io_iov_compat_buffer_select_prep()
62 return -EFAULT; in io_iov_compat_buffer_select_prep()
64 return -EINVAL; in io_iov_compat_buffer_select_prep()
66 rw->len = clen; in io_iov_compat_buffer_select_prep()
77 if (rw->len != 1) in io_iov_buffer_select_prep()
78 return -EINVAL; in io_iov_buffer_select_prep()
81 if (req->ctx->compat) in io_iov_buffer_select_prep()
85 uiov = u64_to_user_ptr(rw->addr); in io_iov_buffer_select_prep()
87 return -EFAULT; in io_iov_buffer_select_prep()
88 rw->len = iov.iov_len; in io_iov_buffer_select_prep()
96 const struct io_issue_def *def = &io_issue_defs[req->opcode]; in __io_import_iovec()
103 buf = u64_to_user_ptr(rw->addr); in __io_import_iovec()
104 sqe_len = rw->len; in __io_import_iovec()
106 if (!def->vectored || req->flags & REQ_F_BUFFER_SELECT) { in __io_import_iovec()
110 return -ENOBUFS; in __io_import_iovec()
111 rw->addr = (unsigned long) buf; in __io_import_iovec()
112 rw->len = sqe_len; in __io_import_iovec()
115 return import_ubuf(ddir, buf, sqe_len, &io->iter); in __io_import_iovec()
118 if (io->free_iovec) { in __io_import_iovec()
119 nr_segs = io->free_iov_nr; in __io_import_iovec()
120 iov = io->free_iovec; in __io_import_iovec()
122 iov = &io->fast_iov; in __io_import_iovec()
125 ret = __import_iovec(ddir, buf, sqe_len, nr_segs, &iov, &io->iter, in __io_import_iovec()
126 req->ctx->compat); in __io_import_iovec()
130 req->flags |= REQ_F_NEED_CLEANUP; in __io_import_iovec()
131 io->free_iov_nr = io->iter.nr_segs; in __io_import_iovec()
132 kfree(io->free_iovec); in __io_import_iovec()
133 io->free_iovec = iov; in __io_import_iovec()
148 iov_iter_save_state(&io->iter, &io->iter_state); in io_import_iovec()
154 struct io_async_rw *rw = req->async_data; in io_rw_recycle()
159 io_alloc_cache_kasan(&rw->free_iovec, &rw->free_iov_nr); in io_rw_recycle()
160 if (io_alloc_cache_put(&req->ctx->rw_cache, rw)) { in io_rw_recycle()
161 req->async_data = NULL; in io_rw_recycle()
162 req->flags &= ~REQ_F_ASYNC_DATA; in io_rw_recycle()
169 * Disable quick recycling for anything that's gone through io-wq. in io_req_rw_cleanup()
174 * task io-wq in io_req_rw_cleanup()
176 * punt to io-wq in io_req_rw_cleanup()
179 * ->ki_complete() in io_req_rw_cleanup()
184 * iov_iter_count() <- look at iov_iter again in io_req_rw_cleanup()
186 * which can lead to a UAF. This is only possible for io-wq offload in io_req_rw_cleanup()
187 * as the cleanup can run in parallel. As io-wq is not the fast path, in io_req_rw_cleanup()
191 * path should assume that a successful (or -EIOCBQUEUED) return can in io_req_rw_cleanup()
195 if (!(req->flags & (REQ_F_REISSUE | REQ_F_REFCOUNT))) { in io_req_rw_cleanup()
196 req->flags &= ~REQ_F_NEED_CLEANUP; in io_req_rw_cleanup()
203 struct io_ring_ctx *ctx = req->ctx; in io_rw_alloc_async()
206 rw = io_uring_alloc_async_data(&ctx->rw_cache, req); in io_rw_alloc_async()
208 return -ENOMEM; in io_rw_alloc_async()
209 if (rw->free_iovec) in io_rw_alloc_async()
210 req->flags |= REQ_F_NEED_CLEANUP; in io_rw_alloc_async()
211 rw->bytes_done = 0; in io_rw_alloc_async()
220 return -ENOMEM; in io_prep_rw_setup()
225 rw = req->async_data; in io_prep_rw_setup()
231 io->meta_state.seed = io->meta.seed; in io_meta_save_state()
232 iov_iter_save_state(&io->meta.iter, &io->meta_state.iter_meta); in io_meta_save_state()
237 if (kiocb->ki_flags & IOCB_HAS_METADATA) { in io_meta_restore()
238 io->meta.seed = io->meta_state.seed; in io_meta_restore()
239 iov_iter_restore(&io->meta.iter, &io->meta_state.iter_meta); in io_meta_restore()
252 return -EFAULT; in io_prep_rw_pi()
255 return -EINVAL; in io_prep_rw_pi()
257 io = req->async_data; in io_prep_rw_pi()
258 io->meta.flags = pi_attr.flags; in io_prep_rw_pi()
259 io->meta.app_tag = pi_attr.app_tag; in io_prep_rw_pi()
260 io->meta.seed = pi_attr.seed; in io_prep_rw_pi()
262 pi_attr.len, &io->meta.iter); in io_prep_rw_pi()
265 req->flags |= REQ_F_HAS_METADATA; in io_prep_rw_pi()
278 rw->kiocb.ki_pos = READ_ONCE(sqe->off); in io_prep_rw()
279 /* used for fixed read/write too - just read unconditionally */ in io_prep_rw()
280 req->buf_index = READ_ONCE(sqe->buf_index); in io_prep_rw()
282 ioprio = READ_ONCE(sqe->ioprio); in io_prep_rw()
288 rw->kiocb.ki_ioprio = ioprio; in io_prep_rw()
290 rw->kiocb.ki_ioprio = get_current_ioprio(); in io_prep_rw()
292 rw->kiocb.dio_complete = NULL; in io_prep_rw()
293 rw->kiocb.ki_flags = 0; in io_prep_rw()
295 if (req->ctx->flags & IORING_SETUP_IOPOLL) in io_prep_rw()
296 rw->kiocb.ki_complete = io_complete_rw_iopoll; in io_prep_rw()
298 rw->kiocb.ki_complete = io_complete_rw; in io_prep_rw()
300 rw->addr = READ_ONCE(sqe->addr); in io_prep_rw()
301 rw->len = READ_ONCE(sqe->len); in io_prep_rw()
302 rw->flags = READ_ONCE(sqe->rw_flags); in io_prep_rw()
308 attr_type_mask = READ_ONCE(sqe->attr_type_mask); in io_prep_rw()
314 return -EINVAL; in io_prep_rw()
316 attr_ptr = READ_ONCE(sqe->attr_ptr); in io_prep_rw()
335 const bool do_import = !(req->flags & REQ_F_BUFFER_SELECT); in io_prep_rwv()
345 * Have to do this validation here, as this is in io_read() rw->len in io_prep_rwv()
365 struct io_ring_ctx *ctx = req->ctx; in io_prep_rw_fixed()
374 node = io_rsrc_node_lookup(&ctx->buf_table, req->buf_index); in io_prep_rw_fixed()
376 return -EFAULT; in io_prep_rw_fixed()
379 io = req->async_data; in io_prep_rw_fixed()
380 ret = io_import_fixed(ddir, &io->iter, node->buf, rw->addr, rw->len); in io_prep_rw_fixed()
381 iov_iter_save_state(&io->iter, &io->iter_state); in io_prep_rw_fixed()
405 if (!(req->flags & REQ_F_BUFFER_SELECT)) in io_read_mshot_prep()
406 return -EINVAL; in io_read_mshot_prep()
412 if (rw->addr || rw->len) in io_read_mshot_prep()
413 return -EINVAL; in io_read_mshot_prep()
415 req->flags |= REQ_F_APOLL_MULTISHOT; in io_read_mshot_prep()
421 lockdep_assert_held(&req->ctx->uring_lock); in io_readv_writev_cleanup()
429 if (rw->kiocb.ki_pos != -1) in io_kiocb_update_pos()
430 return &rw->kiocb.ki_pos; in io_kiocb_update_pos()
432 if (!(req->file->f_mode & FMODE_STREAM)) { in io_kiocb_update_pos()
433 req->flags |= REQ_F_CUR_POS; in io_kiocb_update_pos()
434 rw->kiocb.ki_pos = req->file->f_pos; in io_kiocb_update_pos()
435 return &rw->kiocb.ki_pos; in io_kiocb_update_pos()
438 rw->kiocb.ki_pos = 0; in io_kiocb_update_pos()
446 umode_t mode = file_inode(req->file)->i_mode; in io_rw_should_reissue()
447 struct io_async_rw *io = req->async_data; in io_rw_should_reissue()
448 struct io_ring_ctx *ctx = req->ctx; in io_rw_should_reissue()
452 if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() && in io_rw_should_reissue()
453 !(ctx->flags & IORING_SETUP_IOPOLL))) in io_rw_should_reissue()
458 * -EAGAIN. in io_rw_should_reissue()
460 if (percpu_ref_is_dying(&ctx->refs)) in io_rw_should_reissue()
463 io_meta_restore(io, &rw->kiocb); in io_rw_should_reissue()
464 iov_iter_restore(&io->iter, &io->iter_state); in io_rw_should_reissue()
473 if (req->flags & REQ_F_ISREG) { in io_req_end_write()
476 kiocb_end_write(&rw->kiocb); in io_req_end_write()
488 if (rw->kiocb.ki_flags & IOCB_WRITE) { in io_req_io_end()
490 fsnotify_modify(req->file); in io_req_io_end()
492 fsnotify_access(req->file); in io_req_io_end()
498 if (res == req->cqe.res) in __io_complete_rw_common()
500 if (res == -EAGAIN && io_rw_should_reissue(req)) { in __io_complete_rw_common()
501 req->flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE; in __io_complete_rw_common()
504 req->cqe.res = res; in __io_complete_rw_common()
510 struct io_async_rw *io = req->async_data; in io_fixup_rw_res()
513 if (req_has_async_data(req) && io->bytes_done > 0) { in io_fixup_rw_res()
515 res = io->bytes_done; in io_fixup_rw_res()
517 res += io->bytes_done; in io_fixup_rw_res()
525 struct kiocb *kiocb = &rw->kiocb; in io_req_rw_complete()
527 if ((kiocb->ki_flags & IOCB_DIO_CALLER_COMP) && kiocb->dio_complete) { in io_req_rw_complete()
528 long res = kiocb->dio_complete(rw->kiocb.private); in io_req_rw_complete()
535 if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) in io_req_rw_complete()
536 req->cqe.flags |= io_put_kbuf(req, req->cqe.res, 0); in io_req_rw_complete()
547 if (!kiocb->dio_complete || !(kiocb->ki_flags & IOCB_DIO_CALLER_COMP)) { in io_complete_rw()
551 req->io_task_work.func = io_req_rw_complete; in io_complete_rw()
560 if (kiocb->ki_flags & IOCB_WRITE) in io_complete_rw_iopoll()
562 if (unlikely(res != req->cqe.res)) { in io_complete_rw_iopoll()
563 if (res == -EAGAIN && io_rw_should_reissue(req)) in io_complete_rw_iopoll()
564 req->flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE; in io_complete_rw_iopoll()
566 req->cqe.res = res; in io_complete_rw_iopoll()
569 /* order with io_iopoll_complete() checking ->iopoll_completed */ in io_complete_rw_iopoll()
570 smp_store_release(&req->iopoll_completed, 1); in io_complete_rw_iopoll()
578 if (ret == -EIOCBQUEUED) in io_rw_done()
584 case -ERESTARTSYS: in io_rw_done()
585 case -ERESTARTNOINTR: in io_rw_done()
586 case -ERESTARTNOHAND: in io_rw_done()
587 case -ERESTART_RESTARTBLOCK: in io_rw_done()
593 ret = -EINTR; in io_rw_done()
598 if (req->ctx->flags & IORING_SETUP_IOPOLL) in io_rw_done()
599 io_complete_rw_iopoll(&rw->kiocb, ret); in io_rw_done()
601 io_complete_rw(&rw->kiocb, ret); in io_rw_done()
610 if (ret >= 0 && req->flags & REQ_F_CUR_POS) in kiocb_done()
611 req->file->f_pos = rw->kiocb.ki_pos; in kiocb_done()
612 if (ret >= 0 && !(req->ctx->flags & IORING_SETUP_IOPOLL)) { in kiocb_done()
631 return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos; in io_kiocb_ppos()
635 * For files that don't have ->read_iter() and ->write_iter(), handle them
636 * by looping over ->read() or ->write() manually.
640 struct kiocb *kiocb = &rw->kiocb; in loop_rw_iter()
641 struct file *file = kiocb->ki_filp; in loop_rw_iter()
647 * support non-blocking either. For the latter, this just causes in loop_rw_iter()
650 if (kiocb->ki_flags & IOCB_HIPRI) in loop_rw_iter()
651 return -EOPNOTSUPP; in loop_rw_iter()
652 if ((kiocb->ki_flags & IOCB_NOWAIT) && in loop_rw_iter()
653 !(kiocb->ki_filp->f_flags & O_NONBLOCK)) in loop_rw_iter()
654 return -EAGAIN; in loop_rw_iter()
664 addr = iter->ubuf + iter->iov_offset; in loop_rw_iter()
670 addr = u64_to_user_ptr(rw->addr); in loop_rw_iter()
671 len = rw->len; in loop_rw_iter()
675 nr = file->f_op->read(file, addr, len, ppos); in loop_rw_iter()
677 nr = file->f_op->write(file, addr, len, ppos); in loop_rw_iter()
688 rw->addr += nr; in loop_rw_iter()
689 rw->len -= nr; in loop_rw_iter()
690 if (!rw->len) in loop_rw_iter()
714 struct io_kiocb *req = wait->private; in io_async_buf_func()
723 rw->kiocb.ki_flags &= ~IOCB_WAITQ; in io_async_buf_func()
724 list_del_init(&wait->entry); in io_async_buf_func()
743 struct io_async_rw *io = req->async_data; in io_rw_should_retry()
744 struct wait_page_queue *wait = &io->wpq; in io_rw_should_retry()
746 struct kiocb *kiocb = &rw->kiocb; in io_rw_should_retry()
750 * with -EAGAIN. in io_rw_should_retry()
752 if (req->flags & (REQ_F_NOWAIT | REQ_F_HAS_METADATA)) in io_rw_should_retry()
756 if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI)) in io_rw_should_retry()
764 !(req->file->f_op->fop_flags & FOP_BUFFER_RASYNC)) in io_rw_should_retry()
767 wait->wait.func = io_async_buf_func; in io_rw_should_retry()
768 wait->wait.private = req; in io_rw_should_retry()
769 wait->wait.flags = 0; in io_rw_should_retry()
770 INIT_LIST_HEAD(&wait->wait.entry); in io_rw_should_retry()
771 kiocb->ki_flags |= IOCB_WAITQ; in io_rw_should_retry()
772 kiocb->ki_flags &= ~IOCB_NOWAIT; in io_rw_should_retry()
773 kiocb->ki_waitq = wait; in io_rw_should_retry()
779 struct file *file = rw->kiocb.ki_filp; in io_iter_do_read()
781 if (likely(file->f_op->read_iter)) in io_iter_do_read()
782 return file->f_op->read_iter(&rw->kiocb, iter); in io_iter_do_read()
783 else if (file->f_op->read) in io_iter_do_read()
786 return -EINVAL; in io_iter_do_read()
791 return req->flags & REQ_F_ISREG || in need_complete_io()
792 S_ISBLK(file_inode(req->file)->i_mode); in need_complete_io()
798 struct kiocb *kiocb = &rw->kiocb; in io_rw_init_file()
799 struct io_ring_ctx *ctx = req->ctx; in io_rw_init_file()
800 struct file *file = req->file; in io_rw_init_file()
803 if (unlikely(!(file->f_mode & mode))) in io_rw_init_file()
804 return -EBADF; in io_rw_init_file()
806 if (!(req->flags & REQ_F_FIXED_FILE)) in io_rw_init_file()
807 req->flags |= io_file_get_flags(file); in io_rw_init_file()
809 kiocb->ki_flags = file->f_iocb_flags; in io_rw_init_file()
810 ret = kiocb_set_rw_flags(kiocb, rw->flags, rw_type); in io_rw_init_file()
813 kiocb->ki_flags |= IOCB_ALLOC_CACHE; in io_rw_init_file()
817 * supports async. Otherwise it's impossible to use O_NONBLOCK files in io_rw_init_file()
820 if (kiocb->ki_flags & IOCB_NOWAIT || in io_rw_init_file()
821 ((file->f_flags & O_NONBLOCK && !(req->flags & REQ_F_SUPPORT_NOWAIT)))) in io_rw_init_file()
822 req->flags |= REQ_F_NOWAIT; in io_rw_init_file()
824 if (ctx->flags & IORING_SETUP_IOPOLL) { in io_rw_init_file()
825 if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll) in io_rw_init_file()
826 return -EOPNOTSUPP; in io_rw_init_file()
827 kiocb->private = NULL; in io_rw_init_file()
828 kiocb->ki_flags |= IOCB_HIPRI; in io_rw_init_file()
829 req->iopoll_completed = 0; in io_rw_init_file()
830 if (ctx->flags & IORING_SETUP_HYBRID_IOPOLL) { in io_rw_init_file()
832 req->flags &= ~REQ_F_IOPOLL_STATE; in io_rw_init_file()
833 req->iopoll_start = ktime_get_ns(); in io_rw_init_file()
836 if (kiocb->ki_flags & IOCB_HIPRI) in io_rw_init_file()
837 return -EINVAL; in io_rw_init_file()
840 if (req->flags & REQ_F_HAS_METADATA) { in io_rw_init_file()
841 struct io_async_rw *io = req->async_data; in io_rw_init_file()
844 * We have a union of meta fields with wpq used for buffered-io in io_rw_init_file()
847 if (!(req->file->f_flags & O_DIRECT)) in io_rw_init_file()
848 return -EOPNOTSUPP; in io_rw_init_file()
849 kiocb->ki_flags |= IOCB_HAS_METADATA; in io_rw_init_file()
850 kiocb->private = &io->meta; in io_rw_init_file()
860 struct io_async_rw *io = req->async_data; in __io_read()
861 struct kiocb *kiocb = &rw->kiocb; in __io_read()
873 req->cqe.res = iov_iter_count(&io->iter); in __io_read()
878 return -EAGAIN; in __io_read()
879 kiocb->ki_flags |= IOCB_NOWAIT; in __io_read()
881 /* Ensure we clear previously set non-block flag */ in __io_read()
882 kiocb->ki_flags &= ~IOCB_NOWAIT; in __io_read()
887 ret = rw_verify_area(READ, req->file, ppos, req->cqe.res); in __io_read()
891 ret = io_iter_do_read(rw, &io->iter); in __io_read()
894 * Some file systems like to return -EOPNOTSUPP for an IOCB_NOWAIT in __io_read()
895 * issue, even though they should be returning -EAGAIN. To be safe, in __io_read()
898 if (ret == -EOPNOTSUPP && force_nonblock) in __io_read()
899 ret = -EAGAIN; in __io_read()
901 if (ret == -EAGAIN) { in __io_read()
904 return -EAGAIN; in __io_read()
905 /* IOPOLL retry should happen for io-wq threads */ in __io_read()
906 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL)) in __io_read()
909 if (req->flags & REQ_F_NOWAIT) in __io_read()
912 } else if (ret == -EIOCBQUEUED) { in __io_read()
914 } else if (ret == req->cqe.res || ret <= 0 || !force_nonblock || in __io_read()
915 (req->flags & REQ_F_NOWAIT) || !need_complete_io(req) || in __io_read()
926 iov_iter_restore(&io->iter, &io->iter_state); in __io_read()
935 iov_iter_advance(&io->iter, ret); in __io_read()
936 if (!iov_iter_count(&io->iter)) in __io_read()
938 io->bytes_done += ret; in __io_read()
939 iov_iter_save_state(&io->iter, &io->iter_state); in __io_read()
943 kiocb->ki_flags &= ~IOCB_WAITQ; in __io_read()
944 return -EAGAIN; in __io_read()
947 req->cqe.res = iov_iter_count(&io->iter); in __io_read()
950 * we get -EIOCBQUEUED, then we'll get a notification when the in __io_read()
954 ret = io_iter_do_read(rw, &io->iter); in __io_read()
955 if (ret == -EIOCBQUEUED) in __io_read()
958 kiocb->ki_flags &= ~IOCB_WAITQ; in __io_read()
959 iov_iter_restore(&io->iter, &io->iter_state); in __io_read()
987 return -EBADFD; in io_read_mshot()
990 rw->kiocb.ki_complete = NULL; in io_read_mshot()
994 * If we get -EAGAIN, recycle our buffer and just let normal poll in io_read_mshot()
997 if (ret == -EAGAIN) { in io_read_mshot()
999 * Reset rw->len to 0 again to avoid clamping future mshot in io_read_mshot()
1003 rw->len = 0; in io_read_mshot()
1006 return -EAGAIN; in io_read_mshot()
1011 } else if (!(req->flags & REQ_F_APOLL_MULTISHOT)) { in io_read_mshot()
1016 * armed, if it's still set. Put our buffer and post a CQE. If in io_read_mshot()
1017 * we fail to post a CQE, or multishot is no longer set, then in io_read_mshot()
1021 rw->len = 0; /* similarly to above, reset len to 0 */ in io_read_mshot()
1033 return -EAGAIN; in io_read_mshot()
1038 * Either an error, or we've hit overflow posting the CQE. For any in io_read_mshot()
1053 if (!(req->flags & REQ_F_ISREG)) in io_kiocb_start_write()
1055 if (!(kiocb->ki_flags & IOCB_NOWAIT)) { in io_kiocb_start_write()
1060 inode = file_inode(kiocb->ki_filp); in io_kiocb_start_write()
1061 ret = sb_start_write_trylock(inode->i_sb); in io_kiocb_start_write()
1063 __sb_writers_release(inode->i_sb, SB_FREEZE_WRITE); in io_kiocb_start_write()
1071 struct io_async_rw *io = req->async_data; in io_write()
1072 struct kiocb *kiocb = &rw->kiocb; in io_write()
1079 req->cqe.res = iov_iter_count(&io->iter); in io_write()
1087 if (!(kiocb->ki_flags & IOCB_DIRECT) && in io_write()
1088 !(req->file->f_op->fop_flags & FOP_BUFFER_WASYNC) && in io_write()
1089 (req->flags & REQ_F_ISREG)) in io_write()
1092 kiocb->ki_flags |= IOCB_NOWAIT; in io_write()
1094 /* Ensure we clear previously set non-block flag */ in io_write()
1095 kiocb->ki_flags &= ~IOCB_NOWAIT; in io_write()
1100 ret = rw_verify_area(WRITE, req->file, ppos, req->cqe.res); in io_write()
1105 return -EAGAIN; in io_write()
1106 kiocb->ki_flags |= IOCB_WRITE; in io_write()
1108 if (likely(req->file->f_op->write_iter)) in io_write()
1109 ret2 = req->file->f_op->write_iter(kiocb, &io->iter); in io_write()
1110 else if (req->file->f_op->write) in io_write()
1111 ret2 = loop_rw_iter(WRITE, rw, &io->iter); in io_write()
1113 ret2 = -EINVAL; in io_write()
1116 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just in io_write()
1119 if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT)) in io_write()
1120 ret2 = -EAGAIN; in io_write()
1122 if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT)) in io_write()
1124 if (!force_nonblock || ret2 != -EAGAIN) { in io_write()
1125 /* IOPOLL retry should happen for io-wq threads */ in io_write()
1126 if (ret2 == -EAGAIN && (req->ctx->flags & IORING_SETUP_IOPOLL)) in io_write()
1129 if (ret2 != req->cqe.res && ret2 >= 0 && need_complete_io(req)) { in io_write()
1130 trace_io_uring_short_write(req->ctx, kiocb->ki_pos - ret2, in io_write()
1131 req->cqe.res, ret2); in io_write()
1138 iov_iter_save_state(&io->iter, &io->iter_state); in io_write()
1139 io->bytes_done += ret2; in io_write()
1141 if (kiocb->ki_flags & IOCB_WRITE) in io_write()
1143 return -EAGAIN; in io_write()
1149 iov_iter_restore(&io->iter, &io->iter_state); in io_write()
1151 if (kiocb->ki_flags & IOCB_WRITE) in io_write()
1153 return -EAGAIN; in io_write()
1161 res = io_fixup_rw_res(req, req->cqe.res); in io_rw_fail()
1162 io_req_set_res(req, res, req->cqe.flags); in io_rw_fail()
1168 struct file *file = req->file; in io_uring_classic_poll()
1170 if (req->opcode == IORING_OP_URING_CMD) { in io_uring_classic_poll()
1174 return file->f_op->uring_cmd_iopoll(ioucmd, iob, poll_flags); in io_uring_classic_poll()
1178 return file->f_op->iopoll(&rw->kiocb, iob, poll_flags); in io_uring_classic_poll()
1189 if (req->flags & REQ_F_IOPOLL_STATE) in io_hybrid_iopoll_delay()
1192 if (ctx->hybrid_poll_time == LLONG_MAX) in io_hybrid_iopoll_delay()
1196 sleep_time = ctx->hybrid_poll_time / 2; in io_hybrid_iopoll_delay()
1199 req->flags |= REQ_F_IOPOLL_STATE; in io_hybrid_iopoll_delay()
1219 struct io_ring_ctx *ctx = req->ctx; in io_uring_hybrid_poll()
1225 runtime = ktime_get_ns() - req->iopoll_start - sleep_time; in io_uring_hybrid_poll()
1231 if (ctx->hybrid_poll_time > runtime) in io_uring_hybrid_poll()
1232 ctx->hybrid_poll_time = runtime; in io_uring_hybrid_poll()
1248 if (ctx->poll_multi_queue || force_nonspin) in io_do_iopoll()
1251 wq_list_for_each(pos, start, &ctx->iopoll_list) { in io_do_iopoll()
1260 if (READ_ONCE(req->iopoll_completed)) in io_do_iopoll()
1263 if (ctx->flags & IORING_SETUP_HYBRID_IOPOLL) in io_do_iopoll()
1275 READ_ONCE(req->iopoll_completed)) in io_do_iopoll()
1288 /* order with io_complete_rw_iopoll(), e.g. ->result updates */ in io_do_iopoll()
1289 if (!smp_load_acquire(&req->iopoll_completed)) in io_do_iopoll()
1292 req->cqe.flags = io_put_kbuf(req, req->cqe.res, 0); in io_do_iopoll()
1293 if (req->opcode != IORING_OP_URING_CMD) in io_do_iopoll()
1299 pos = start ? start->next : ctx->iopoll_list.first; in io_do_iopoll()
1300 wq_list_cut(&ctx->iopoll_list, prev, start); in io_do_iopoll()
1302 if (WARN_ON_ONCE(!wq_list_empty(&ctx->submit_state.compl_reqs))) in io_do_iopoll()
1304 ctx->submit_state.compl_reqs.first = pos; in io_do_iopoll()
1313 if (rw->free_iovec) in io_rw_cache_free()
1314 kfree(rw->free_iovec); in io_rw_cache_free()