1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/blk-mq.h>
7 #include <linux/mm.h>
8 #include <linux/slab.h>
9 #include <linux/fsnotify.h>
10 #include <linux/poll.h>
11 #include <linux/nospec.h>
12 #include <linux/compat.h>
13 #include <linux/io_uring/cmd.h>
14 #include <linux/indirect_call_wrapper.h>
15
16 #include <uapi/linux/io_uring.h>
17
18 #include "io_uring.h"
19 #include "opdef.h"
20 #include "kbuf.h"
21 #include "alloc_cache.h"
22 #include "rsrc.h"
23 #include "poll.h"
24 #include "rw.h"
25
26 struct io_rw {
27 /* NOTE: kiocb has the file as the first member, so don't do it here */
28 struct kiocb kiocb;
29 u64 addr;
30 u32 len;
31 rwf_t flags;
32 };
33
io_file_supports_nowait(struct io_kiocb * req,__poll_t mask)34 static bool io_file_supports_nowait(struct io_kiocb *req, __poll_t mask)
35 {
36 /* If FMODE_NOWAIT is set for a file, we're golden */
37 if (req->flags & REQ_F_SUPPORT_NOWAIT)
38 return true;
39 /* No FMODE_NOWAIT, if we can poll, check the status */
40 if (io_file_can_poll(req)) {
41 struct poll_table_struct pt = { ._key = mask };
42
43 return vfs_poll(req->file, &pt) & mask;
44 }
45 /* No FMODE_NOWAIT support, and file isn't pollable. Tough luck. */
46 return false;
47 }
48
49 #ifdef CONFIG_COMPAT
io_iov_compat_buffer_select_prep(struct io_rw * rw)50 static int io_iov_compat_buffer_select_prep(struct io_rw *rw)
51 {
52 struct compat_iovec __user *uiov;
53 compat_ssize_t clen;
54
55 uiov = u64_to_user_ptr(rw->addr);
56 if (!access_ok(uiov, sizeof(*uiov)))
57 return -EFAULT;
58 if (__get_user(clen, &uiov->iov_len))
59 return -EFAULT;
60 if (clen < 0)
61 return -EINVAL;
62
63 rw->len = clen;
64 return 0;
65 }
66 #endif
67
io_iov_buffer_select_prep(struct io_kiocb * req)68 static int io_iov_buffer_select_prep(struct io_kiocb *req)
69 {
70 struct iovec __user *uiov;
71 struct iovec iov;
72 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
73
74 if (rw->len != 1)
75 return -EINVAL;
76
77 #ifdef CONFIG_COMPAT
78 if (req->ctx->compat)
79 return io_iov_compat_buffer_select_prep(rw);
80 #endif
81
82 uiov = u64_to_user_ptr(rw->addr);
83 if (copy_from_user(&iov, uiov, sizeof(*uiov)))
84 return -EFAULT;
85 rw->len = iov.iov_len;
86 return 0;
87 }
88
__io_import_iovec(int ddir,struct io_kiocb * req,struct io_async_rw * io,unsigned int issue_flags)89 static int __io_import_iovec(int ddir, struct io_kiocb *req,
90 struct io_async_rw *io,
91 unsigned int issue_flags)
92 {
93 const struct io_issue_def *def = &io_issue_defs[req->opcode];
94 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
95 struct iovec *iov;
96 void __user *buf;
97 int nr_segs, ret;
98 size_t sqe_len;
99
100 buf = u64_to_user_ptr(rw->addr);
101 sqe_len = rw->len;
102
103 if (!def->vectored || req->flags & REQ_F_BUFFER_SELECT) {
104 if (io_do_buffer_select(req)) {
105 buf = io_buffer_select(req, &sqe_len, issue_flags);
106 if (!buf)
107 return -ENOBUFS;
108 rw->addr = (unsigned long) buf;
109 rw->len = sqe_len;
110 }
111
112 return import_ubuf(ddir, buf, sqe_len, &io->iter);
113 }
114
115 if (io->free_iovec) {
116 nr_segs = io->free_iov_nr;
117 iov = io->free_iovec;
118 } else {
119 iov = &io->fast_iov;
120 nr_segs = 1;
121 }
122 ret = __import_iovec(ddir, buf, sqe_len, nr_segs, &iov, &io->iter,
123 req->ctx->compat);
124 if (unlikely(ret < 0))
125 return ret;
126 if (iov) {
127 req->flags |= REQ_F_NEED_CLEANUP;
128 io->free_iov_nr = io->iter.nr_segs;
129 kfree(io->free_iovec);
130 io->free_iovec = iov;
131 }
132 return 0;
133 }
134
io_import_iovec(int rw,struct io_kiocb * req,struct io_async_rw * io,unsigned int issue_flags)135 static inline int io_import_iovec(int rw, struct io_kiocb *req,
136 struct io_async_rw *io,
137 unsigned int issue_flags)
138 {
139 int ret;
140
141 ret = __io_import_iovec(rw, req, io, issue_flags);
142 if (unlikely(ret < 0))
143 return ret;
144
145 iov_iter_save_state(&io->iter, &io->iter_state);
146 return 0;
147 }
148
io_rw_iovec_free(struct io_async_rw * rw)149 static void io_rw_iovec_free(struct io_async_rw *rw)
150 {
151 if (rw->free_iovec) {
152 kfree(rw->free_iovec);
153 rw->free_iov_nr = 0;
154 rw->free_iovec = NULL;
155 }
156 }
157
io_rw_recycle(struct io_kiocb * req,unsigned int issue_flags)158 static void io_rw_recycle(struct io_kiocb *req, unsigned int issue_flags)
159 {
160 struct io_async_rw *rw = req->async_data;
161 struct iovec *iov;
162
163 if (unlikely(issue_flags & IO_URING_F_UNLOCKED)) {
164 io_rw_iovec_free(rw);
165 return;
166 }
167 iov = rw->free_iovec;
168 if (io_alloc_cache_put(&req->ctx->rw_cache, rw)) {
169 if (iov)
170 kasan_mempool_poison_object(iov);
171 req->async_data = NULL;
172 req->flags &= ~REQ_F_ASYNC_DATA;
173 }
174 }
175
io_req_rw_cleanup(struct io_kiocb * req,unsigned int issue_flags)176 static void io_req_rw_cleanup(struct io_kiocb *req, unsigned int issue_flags)
177 {
178 /*
179 * Disable quick recycling for anything that's gone through io-wq.
180 * In theory, this should be fine to cleanup. However, some read or
181 * write iter handling touches the iovec AFTER having called into the
182 * handler, eg to reexpand or revert. This means we can have:
183 *
184 * task io-wq
185 * issue
186 * punt to io-wq
187 * issue
188 * blkdev_write_iter()
189 * ->ki_complete()
190 * io_complete_rw()
191 * queue tw complete
192 * run tw
193 * req_rw_cleanup
194 * iov_iter_count() <- look at iov_iter again
195 *
196 * which can lead to a UAF. This is only possible for io-wq offload
197 * as the cleanup can run in parallel. As io-wq is not the fast path,
198 * just leave cleanup to the end.
199 *
200 * This is really a bug in the core code that does this, any issue
201 * path should assume that a successful (or -EIOCBQUEUED) return can
202 * mean that the underlying data can be gone at any time. But that
203 * should be fixed seperately, and then this check could be killed.
204 */
205 if (!(req->flags & REQ_F_REFCOUNT)) {
206 req->flags &= ~REQ_F_NEED_CLEANUP;
207 io_rw_recycle(req, issue_flags);
208 }
209 }
210
io_rw_alloc_async(struct io_kiocb * req)211 static int io_rw_alloc_async(struct io_kiocb *req)
212 {
213 struct io_ring_ctx *ctx = req->ctx;
214 struct io_async_rw *rw;
215
216 rw = io_alloc_cache_get(&ctx->rw_cache);
217 if (rw) {
218 if (rw->free_iovec) {
219 kasan_mempool_unpoison_object(rw->free_iovec,
220 rw->free_iov_nr * sizeof(struct iovec));
221 req->flags |= REQ_F_NEED_CLEANUP;
222 }
223 req->flags |= REQ_F_ASYNC_DATA;
224 req->async_data = rw;
225 goto done;
226 }
227
228 if (!io_alloc_async_data(req)) {
229 rw = req->async_data;
230 rw->free_iovec = NULL;
231 rw->free_iov_nr = 0;
232 done:
233 rw->bytes_done = 0;
234 return 0;
235 }
236
237 return -ENOMEM;
238 }
239
io_prep_rw_setup(struct io_kiocb * req,int ddir,bool do_import)240 static int io_prep_rw_setup(struct io_kiocb *req, int ddir, bool do_import)
241 {
242 struct io_async_rw *rw;
243 int ret;
244
245 if (io_rw_alloc_async(req))
246 return -ENOMEM;
247
248 if (!do_import || io_do_buffer_select(req))
249 return 0;
250
251 rw = req->async_data;
252 ret = io_import_iovec(ddir, req, rw, 0);
253 if (unlikely(ret < 0))
254 return ret;
255
256 iov_iter_save_state(&rw->iter, &rw->iter_state);
257 return 0;
258 }
259
io_prep_rw(struct io_kiocb * req,const struct io_uring_sqe * sqe,int ddir,bool do_import)260 static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
261 int ddir, bool do_import)
262 {
263 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
264 unsigned ioprio;
265 int ret;
266
267 rw->kiocb.ki_pos = READ_ONCE(sqe->off);
268 /* used for fixed read/write too - just read unconditionally */
269 req->buf_index = READ_ONCE(sqe->buf_index);
270
271 ioprio = READ_ONCE(sqe->ioprio);
272 if (ioprio) {
273 ret = ioprio_check_cap(ioprio);
274 if (ret)
275 return ret;
276
277 rw->kiocb.ki_ioprio = ioprio;
278 } else {
279 rw->kiocb.ki_ioprio = get_current_ioprio();
280 }
281 rw->kiocb.dio_complete = NULL;
282
283 rw->addr = READ_ONCE(sqe->addr);
284 rw->len = READ_ONCE(sqe->len);
285 rw->flags = READ_ONCE(sqe->rw_flags);
286 return io_prep_rw_setup(req, ddir, do_import);
287 }
288
io_prep_read(struct io_kiocb * req,const struct io_uring_sqe * sqe)289 int io_prep_read(struct io_kiocb *req, const struct io_uring_sqe *sqe)
290 {
291 return io_prep_rw(req, sqe, ITER_DEST, true);
292 }
293
io_prep_write(struct io_kiocb * req,const struct io_uring_sqe * sqe)294 int io_prep_write(struct io_kiocb *req, const struct io_uring_sqe *sqe)
295 {
296 return io_prep_rw(req, sqe, ITER_SOURCE, true);
297 }
298
io_prep_rwv(struct io_kiocb * req,const struct io_uring_sqe * sqe,int ddir)299 static int io_prep_rwv(struct io_kiocb *req, const struct io_uring_sqe *sqe,
300 int ddir)
301 {
302 const bool do_import = !(req->flags & REQ_F_BUFFER_SELECT);
303 int ret;
304
305 ret = io_prep_rw(req, sqe, ddir, do_import);
306 if (unlikely(ret))
307 return ret;
308 if (do_import)
309 return 0;
310
311 /*
312 * Have to do this validation here, as this is in io_read() rw->len
313 * might have chanaged due to buffer selection
314 */
315 return io_iov_buffer_select_prep(req);
316 }
317
io_prep_readv(struct io_kiocb * req,const struct io_uring_sqe * sqe)318 int io_prep_readv(struct io_kiocb *req, const struct io_uring_sqe *sqe)
319 {
320 return io_prep_rwv(req, sqe, ITER_DEST);
321 }
322
io_prep_writev(struct io_kiocb * req,const struct io_uring_sqe * sqe)323 int io_prep_writev(struct io_kiocb *req, const struct io_uring_sqe *sqe)
324 {
325 return io_prep_rwv(req, sqe, ITER_SOURCE);
326 }
327
io_prep_rw_fixed(struct io_kiocb * req,const struct io_uring_sqe * sqe,int ddir)328 static int io_prep_rw_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe,
329 int ddir)
330 {
331 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
332 struct io_ring_ctx *ctx = req->ctx;
333 struct io_async_rw *io;
334 u16 index;
335 int ret;
336
337 ret = io_prep_rw(req, sqe, ddir, false);
338 if (unlikely(ret))
339 return ret;
340
341 if (unlikely(req->buf_index >= ctx->nr_user_bufs))
342 return -EFAULT;
343 index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
344 req->imu = ctx->user_bufs[index];
345 io_req_set_rsrc_node(req, ctx, 0);
346
347 io = req->async_data;
348 ret = io_import_fixed(ddir, &io->iter, req->imu, rw->addr, rw->len);
349 iov_iter_save_state(&io->iter, &io->iter_state);
350 return ret;
351 }
352
io_prep_read_fixed(struct io_kiocb * req,const struct io_uring_sqe * sqe)353 int io_prep_read_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe)
354 {
355 return io_prep_rw_fixed(req, sqe, ITER_DEST);
356 }
357
io_prep_write_fixed(struct io_kiocb * req,const struct io_uring_sqe * sqe)358 int io_prep_write_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe)
359 {
360 return io_prep_rw_fixed(req, sqe, ITER_SOURCE);
361 }
362
363 /*
364 * Multishot read is prepared just like a normal read/write request, only
365 * difference is that we set the MULTISHOT flag.
366 */
io_read_mshot_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)367 int io_read_mshot_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
368 {
369 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
370 int ret;
371
372 /* must be used with provided buffers */
373 if (!(req->flags & REQ_F_BUFFER_SELECT))
374 return -EINVAL;
375
376 ret = io_prep_rw(req, sqe, ITER_DEST, false);
377 if (unlikely(ret))
378 return ret;
379
380 if (rw->addr || rw->len)
381 return -EINVAL;
382
383 req->flags |= REQ_F_APOLL_MULTISHOT;
384 return 0;
385 }
386
io_readv_writev_cleanup(struct io_kiocb * req)387 void io_readv_writev_cleanup(struct io_kiocb *req)
388 {
389 io_rw_iovec_free(req->async_data);
390 }
391
io_kiocb_update_pos(struct io_kiocb * req)392 static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req)
393 {
394 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
395
396 if (rw->kiocb.ki_pos != -1)
397 return &rw->kiocb.ki_pos;
398
399 if (!(req->file->f_mode & FMODE_STREAM)) {
400 req->flags |= REQ_F_CUR_POS;
401 rw->kiocb.ki_pos = req->file->f_pos;
402 return &rw->kiocb.ki_pos;
403 }
404
405 rw->kiocb.ki_pos = 0;
406 return NULL;
407 }
408
409 #ifdef CONFIG_BLOCK
io_resubmit_prep(struct io_kiocb * req)410 static void io_resubmit_prep(struct io_kiocb *req)
411 {
412 struct io_async_rw *io = req->async_data;
413
414 iov_iter_restore(&io->iter, &io->iter_state);
415 }
416
io_rw_should_reissue(struct io_kiocb * req)417 static bool io_rw_should_reissue(struct io_kiocb *req)
418 {
419 umode_t mode = file_inode(req->file)->i_mode;
420 struct io_ring_ctx *ctx = req->ctx;
421
422 if (!S_ISBLK(mode) && !S_ISREG(mode))
423 return false;
424 if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() &&
425 !(ctx->flags & IORING_SETUP_IOPOLL)))
426 return false;
427 /*
428 * If ref is dying, we might be running poll reap from the exit work.
429 * Don't attempt to reissue from that path, just let it fail with
430 * -EAGAIN.
431 */
432 if (percpu_ref_is_dying(&ctx->refs))
433 return false;
434 /*
435 * Play it safe and assume not safe to re-import and reissue if we're
436 * not in the original thread group (or in task context).
437 */
438 if (!same_thread_group(req->task, current) || !in_task())
439 return false;
440 return true;
441 }
442 #else
io_resubmit_prep(struct io_kiocb * req)443 static void io_resubmit_prep(struct io_kiocb *req)
444 {
445 }
io_rw_should_reissue(struct io_kiocb * req)446 static bool io_rw_should_reissue(struct io_kiocb *req)
447 {
448 return false;
449 }
450 #endif
451
io_req_end_write(struct io_kiocb * req)452 static void io_req_end_write(struct io_kiocb *req)
453 {
454 if (req->flags & REQ_F_ISREG) {
455 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
456
457 kiocb_end_write(&rw->kiocb);
458 }
459 }
460
461 /*
462 * Trigger the notifications after having done some IO, and finish the write
463 * accounting, if any.
464 */
io_req_io_end(struct io_kiocb * req)465 static void io_req_io_end(struct io_kiocb *req)
466 {
467 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
468
469 if (rw->kiocb.ki_flags & IOCB_WRITE) {
470 io_req_end_write(req);
471 fsnotify_modify(req->file);
472 } else {
473 fsnotify_access(req->file);
474 }
475 }
476
__io_complete_rw_common(struct io_kiocb * req,long res)477 static bool __io_complete_rw_common(struct io_kiocb *req, long res)
478 {
479 if (unlikely(res != req->cqe.res)) {
480 if (res == -EAGAIN && io_rw_should_reissue(req)) {
481 /*
482 * Reissue will start accounting again, finish the
483 * current cycle.
484 */
485 io_req_io_end(req);
486 req->flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE;
487 return true;
488 }
489 req_set_fail(req);
490 req->cqe.res = res;
491 }
492 return false;
493 }
494
io_fixup_rw_res(struct io_kiocb * req,long res)495 static inline int io_fixup_rw_res(struct io_kiocb *req, long res)
496 {
497 struct io_async_rw *io = req->async_data;
498
499 /* add previously done IO, if any */
500 if (req_has_async_data(req) && io->bytes_done > 0) {
501 if (res < 0)
502 res = io->bytes_done;
503 else
504 res += io->bytes_done;
505 }
506 return res;
507 }
508
io_req_rw_complete(struct io_kiocb * req,struct io_tw_state * ts)509 void io_req_rw_complete(struct io_kiocb *req, struct io_tw_state *ts)
510 {
511 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
512 struct kiocb *kiocb = &rw->kiocb;
513
514 if ((kiocb->ki_flags & IOCB_DIO_CALLER_COMP) && kiocb->dio_complete) {
515 long res = kiocb->dio_complete(rw->kiocb.private);
516
517 io_req_set_res(req, io_fixup_rw_res(req, res), 0);
518 }
519
520 io_req_io_end(req);
521
522 if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING))
523 req->cqe.flags |= io_put_kbuf(req, req->cqe.res, 0);
524
525 io_req_rw_cleanup(req, 0);
526 io_req_task_complete(req, ts);
527 }
528
io_complete_rw(struct kiocb * kiocb,long res)529 static void io_complete_rw(struct kiocb *kiocb, long res)
530 {
531 struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
532 struct io_kiocb *req = cmd_to_io_kiocb(rw);
533
534 if (!kiocb->dio_complete || !(kiocb->ki_flags & IOCB_DIO_CALLER_COMP)) {
535 if (__io_complete_rw_common(req, res))
536 return;
537 io_req_set_res(req, io_fixup_rw_res(req, res), 0);
538 }
539 req->io_task_work.func = io_req_rw_complete;
540 __io_req_task_work_add(req, IOU_F_TWQ_LAZY_WAKE);
541 }
542
io_complete_rw_iopoll(struct kiocb * kiocb,long res)543 static void io_complete_rw_iopoll(struct kiocb *kiocb, long res)
544 {
545 struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
546 struct io_kiocb *req = cmd_to_io_kiocb(rw);
547
548 if (kiocb->ki_flags & IOCB_WRITE)
549 io_req_end_write(req);
550 if (unlikely(res != req->cqe.res)) {
551 if (res == -EAGAIN && io_rw_should_reissue(req)) {
552 req->flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE;
553 return;
554 }
555 req->cqe.res = res;
556 }
557
558 /* order with io_iopoll_complete() checking ->iopoll_completed */
559 smp_store_release(&req->iopoll_completed, 1);
560 }
561
io_rw_done(struct kiocb * kiocb,ssize_t ret)562 static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
563 {
564 /* IO was queued async, completion will happen later */
565 if (ret == -EIOCBQUEUED)
566 return;
567
568 /* transform internal restart error codes */
569 if (unlikely(ret < 0)) {
570 switch (ret) {
571 case -ERESTARTSYS:
572 case -ERESTARTNOINTR:
573 case -ERESTARTNOHAND:
574 case -ERESTART_RESTARTBLOCK:
575 /*
576 * We can't just restart the syscall, since previously
577 * submitted sqes may already be in progress. Just fail
578 * this IO with EINTR.
579 */
580 ret = -EINTR;
581 break;
582 }
583 }
584
585 INDIRECT_CALL_2(kiocb->ki_complete, io_complete_rw_iopoll,
586 io_complete_rw, kiocb, ret);
587 }
588
kiocb_done(struct io_kiocb * req,ssize_t ret,unsigned int issue_flags)589 static int kiocb_done(struct io_kiocb *req, ssize_t ret,
590 unsigned int issue_flags)
591 {
592 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
593 unsigned final_ret = io_fixup_rw_res(req, ret);
594
595 if (ret >= 0 && req->flags & REQ_F_CUR_POS)
596 req->file->f_pos = rw->kiocb.ki_pos;
597 if (ret >= 0 && (rw->kiocb.ki_complete == io_complete_rw)) {
598 if (!__io_complete_rw_common(req, ret)) {
599 /*
600 * Safe to call io_end from here as we're inline
601 * from the submission path.
602 */
603 io_req_io_end(req);
604 io_req_set_res(req, final_ret,
605 io_put_kbuf(req, ret, issue_flags));
606 io_req_rw_cleanup(req, issue_flags);
607 return IOU_OK;
608 }
609 } else {
610 io_rw_done(&rw->kiocb, ret);
611 }
612
613 if (req->flags & REQ_F_REISSUE) {
614 req->flags &= ~REQ_F_REISSUE;
615 io_resubmit_prep(req);
616 return -EAGAIN;
617 }
618 return IOU_ISSUE_SKIP_COMPLETE;
619 }
620
io_kiocb_ppos(struct kiocb * kiocb)621 static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
622 {
623 return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
624 }
625
626 /*
627 * For files that don't have ->read_iter() and ->write_iter(), handle them
628 * by looping over ->read() or ->write() manually.
629 */
loop_rw_iter(int ddir,struct io_rw * rw,struct iov_iter * iter)630 static ssize_t loop_rw_iter(int ddir, struct io_rw *rw, struct iov_iter *iter)
631 {
632 struct kiocb *kiocb = &rw->kiocb;
633 struct file *file = kiocb->ki_filp;
634 ssize_t ret = 0;
635 loff_t *ppos;
636
637 /*
638 * Don't support polled IO through this interface, and we can't
639 * support non-blocking either. For the latter, this just causes
640 * the kiocb to be handled from an async context.
641 */
642 if (kiocb->ki_flags & IOCB_HIPRI)
643 return -EOPNOTSUPP;
644 if ((kiocb->ki_flags & IOCB_NOWAIT) &&
645 !(kiocb->ki_filp->f_flags & O_NONBLOCK))
646 return -EAGAIN;
647
648 ppos = io_kiocb_ppos(kiocb);
649
650 while (iov_iter_count(iter)) {
651 void __user *addr;
652 size_t len;
653 ssize_t nr;
654
655 if (iter_is_ubuf(iter)) {
656 addr = iter->ubuf + iter->iov_offset;
657 len = iov_iter_count(iter);
658 } else if (!iov_iter_is_bvec(iter)) {
659 addr = iter_iov_addr(iter);
660 len = iter_iov_len(iter);
661 } else {
662 addr = u64_to_user_ptr(rw->addr);
663 len = rw->len;
664 }
665
666 if (ddir == READ)
667 nr = file->f_op->read(file, addr, len, ppos);
668 else
669 nr = file->f_op->write(file, addr, len, ppos);
670
671 if (nr < 0) {
672 if (!ret)
673 ret = nr;
674 break;
675 }
676 ret += nr;
677 if (!iov_iter_is_bvec(iter)) {
678 iov_iter_advance(iter, nr);
679 } else {
680 rw->addr += nr;
681 rw->len -= nr;
682 if (!rw->len)
683 break;
684 }
685 if (nr != len)
686 break;
687 }
688
689 return ret;
690 }
691
692 /*
693 * This is our waitqueue callback handler, registered through __folio_lock_async()
694 * when we initially tried to do the IO with the iocb armed our waitqueue.
695 * This gets called when the page is unlocked, and we generally expect that to
696 * happen when the page IO is completed and the page is now uptodate. This will
697 * queue a task_work based retry of the operation, attempting to copy the data
698 * again. If the latter fails because the page was NOT uptodate, then we will
699 * do a thread based blocking retry of the operation. That's the unexpected
700 * slow path.
701 */
io_async_buf_func(struct wait_queue_entry * wait,unsigned mode,int sync,void * arg)702 static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
703 int sync, void *arg)
704 {
705 struct wait_page_queue *wpq;
706 struct io_kiocb *req = wait->private;
707 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
708 struct wait_page_key *key = arg;
709
710 wpq = container_of(wait, struct wait_page_queue, wait);
711
712 if (!wake_page_match(wpq, key))
713 return 0;
714
715 rw->kiocb.ki_flags &= ~IOCB_WAITQ;
716 list_del_init(&wait->entry);
717 io_req_task_queue(req);
718 return 1;
719 }
720
721 /*
722 * This controls whether a given IO request should be armed for async page
723 * based retry. If we return false here, the request is handed to the async
724 * worker threads for retry. If we're doing buffered reads on a regular file,
725 * we prepare a private wait_page_queue entry and retry the operation. This
726 * will either succeed because the page is now uptodate and unlocked, or it
727 * will register a callback when the page is unlocked at IO completion. Through
728 * that callback, io_uring uses task_work to setup a retry of the operation.
729 * That retry will attempt the buffered read again. The retry will generally
730 * succeed, or in rare cases where it fails, we then fall back to using the
731 * async worker threads for a blocking retry.
732 */
io_rw_should_retry(struct io_kiocb * req)733 static bool io_rw_should_retry(struct io_kiocb *req)
734 {
735 struct io_async_rw *io = req->async_data;
736 struct wait_page_queue *wait = &io->wpq;
737 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
738 struct kiocb *kiocb = &rw->kiocb;
739
740 /* never retry for NOWAIT, we just complete with -EAGAIN */
741 if (req->flags & REQ_F_NOWAIT)
742 return false;
743
744 /* Only for buffered IO */
745 if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
746 return false;
747
748 /*
749 * just use poll if we can, and don't attempt if the fs doesn't
750 * support callback based unlocks
751 */
752 if (io_file_can_poll(req) ||
753 !(req->file->f_op->fop_flags & FOP_BUFFER_RASYNC))
754 return false;
755
756 wait->wait.func = io_async_buf_func;
757 wait->wait.private = req;
758 wait->wait.flags = 0;
759 INIT_LIST_HEAD(&wait->wait.entry);
760 kiocb->ki_flags |= IOCB_WAITQ;
761 kiocb->ki_flags &= ~IOCB_NOWAIT;
762 kiocb->ki_waitq = wait;
763 return true;
764 }
765
io_iter_do_read(struct io_rw * rw,struct iov_iter * iter)766 static inline int io_iter_do_read(struct io_rw *rw, struct iov_iter *iter)
767 {
768 struct file *file = rw->kiocb.ki_filp;
769
770 if (likely(file->f_op->read_iter))
771 return file->f_op->read_iter(&rw->kiocb, iter);
772 else if (file->f_op->read)
773 return loop_rw_iter(READ, rw, iter);
774 else
775 return -EINVAL;
776 }
777
need_complete_io(struct io_kiocb * req)778 static bool need_complete_io(struct io_kiocb *req)
779 {
780 return req->flags & REQ_F_ISREG ||
781 S_ISBLK(file_inode(req->file)->i_mode);
782 }
783
io_rw_init_file(struct io_kiocb * req,fmode_t mode,int rw_type)784 static int io_rw_init_file(struct io_kiocb *req, fmode_t mode, int rw_type)
785 {
786 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
787 struct kiocb *kiocb = &rw->kiocb;
788 struct io_ring_ctx *ctx = req->ctx;
789 struct file *file = req->file;
790 int ret;
791
792 if (unlikely(!(file->f_mode & mode)))
793 return -EBADF;
794
795 if (!(req->flags & REQ_F_FIXED_FILE))
796 req->flags |= io_file_get_flags(file);
797
798 kiocb->ki_flags = file->f_iocb_flags;
799 ret = kiocb_set_rw_flags(kiocb, rw->flags, rw_type);
800 if (unlikely(ret))
801 return ret;
802 kiocb->ki_flags |= IOCB_ALLOC_CACHE;
803
804 /*
805 * If the file is marked O_NONBLOCK, still allow retry for it if it
806 * supports async. Otherwise it's impossible to use O_NONBLOCK files
807 * reliably. If not, or it IOCB_NOWAIT is set, don't retry.
808 */
809 if (kiocb->ki_flags & IOCB_NOWAIT ||
810 ((file->f_flags & O_NONBLOCK && !(req->flags & REQ_F_SUPPORT_NOWAIT))))
811 req->flags |= REQ_F_NOWAIT;
812
813 if (ctx->flags & IORING_SETUP_IOPOLL) {
814 if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll)
815 return -EOPNOTSUPP;
816
817 kiocb->private = NULL;
818 kiocb->ki_flags |= IOCB_HIPRI;
819 kiocb->ki_complete = io_complete_rw_iopoll;
820 req->iopoll_completed = 0;
821 } else {
822 if (kiocb->ki_flags & IOCB_HIPRI)
823 return -EINVAL;
824 kiocb->ki_complete = io_complete_rw;
825 }
826
827 return 0;
828 }
829
__io_read(struct io_kiocb * req,unsigned int issue_flags)830 static int __io_read(struct io_kiocb *req, unsigned int issue_flags)
831 {
832 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
833 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
834 struct io_async_rw *io = req->async_data;
835 struct kiocb *kiocb = &rw->kiocb;
836 ssize_t ret;
837 loff_t *ppos;
838
839 if (io_do_buffer_select(req)) {
840 ret = io_import_iovec(ITER_DEST, req, io, issue_flags);
841 if (unlikely(ret < 0))
842 return ret;
843 }
844 ret = io_rw_init_file(req, FMODE_READ, READ);
845 if (unlikely(ret))
846 return ret;
847 req->cqe.res = iov_iter_count(&io->iter);
848
849 if (force_nonblock) {
850 /* If the file doesn't support async, just async punt */
851 if (unlikely(!io_file_supports_nowait(req, EPOLLIN)))
852 return -EAGAIN;
853 kiocb->ki_flags |= IOCB_NOWAIT;
854 } else {
855 /* Ensure we clear previously set non-block flag */
856 kiocb->ki_flags &= ~IOCB_NOWAIT;
857 }
858
859 ppos = io_kiocb_update_pos(req);
860
861 ret = rw_verify_area(READ, req->file, ppos, req->cqe.res);
862 if (unlikely(ret))
863 return ret;
864
865 ret = io_iter_do_read(rw, &io->iter);
866
867 /*
868 * Some file systems like to return -EOPNOTSUPP for an IOCB_NOWAIT
869 * issue, even though they should be returning -EAGAIN. To be safe,
870 * retry from blocking context for either.
871 */
872 if (ret == -EOPNOTSUPP && force_nonblock)
873 ret = -EAGAIN;
874
875 if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
876 req->flags &= ~REQ_F_REISSUE;
877 /* If we can poll, just do that. */
878 if (io_file_can_poll(req))
879 return -EAGAIN;
880 /* IOPOLL retry should happen for io-wq threads */
881 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
882 goto done;
883 /* no retry on NONBLOCK nor RWF_NOWAIT */
884 if (req->flags & REQ_F_NOWAIT)
885 goto done;
886 ret = 0;
887 } else if (ret == -EIOCBQUEUED) {
888 return IOU_ISSUE_SKIP_COMPLETE;
889 } else if (ret == req->cqe.res || ret <= 0 || !force_nonblock ||
890 (req->flags & REQ_F_NOWAIT) || !need_complete_io(req)) {
891 /* read all, failed, already did sync or don't want to retry */
892 goto done;
893 }
894
895 /*
896 * Don't depend on the iter state matching what was consumed, or being
897 * untouched in case of error. Restore it and we'll advance it
898 * manually if we need to.
899 */
900 iov_iter_restore(&io->iter, &io->iter_state);
901
902 do {
903 /*
904 * We end up here because of a partial read, either from
905 * above or inside this loop. Advance the iter by the bytes
906 * that were consumed.
907 */
908 iov_iter_advance(&io->iter, ret);
909 if (!iov_iter_count(&io->iter))
910 break;
911 io->bytes_done += ret;
912 iov_iter_save_state(&io->iter, &io->iter_state);
913
914 /* if we can retry, do so with the callbacks armed */
915 if (!io_rw_should_retry(req)) {
916 kiocb->ki_flags &= ~IOCB_WAITQ;
917 return -EAGAIN;
918 }
919
920 req->cqe.res = iov_iter_count(&io->iter);
921 /*
922 * Now retry read with the IOCB_WAITQ parts set in the iocb. If
923 * we get -EIOCBQUEUED, then we'll get a notification when the
924 * desired page gets unlocked. We can also get a partial read
925 * here, and if we do, then just retry at the new offset.
926 */
927 ret = io_iter_do_read(rw, &io->iter);
928 if (ret == -EIOCBQUEUED)
929 return IOU_ISSUE_SKIP_COMPLETE;
930 /* we got some bytes, but not all. retry. */
931 kiocb->ki_flags &= ~IOCB_WAITQ;
932 iov_iter_restore(&io->iter, &io->iter_state);
933 } while (ret > 0);
934 done:
935 /* it's faster to check here then delegate to kfree */
936 return ret;
937 }
938
io_read(struct io_kiocb * req,unsigned int issue_flags)939 int io_read(struct io_kiocb *req, unsigned int issue_flags)
940 {
941 int ret;
942
943 ret = __io_read(req, issue_flags);
944 if (ret >= 0)
945 return kiocb_done(req, ret, issue_flags);
946
947 return ret;
948 }
949
io_read_mshot(struct io_kiocb * req,unsigned int issue_flags)950 int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
951 {
952 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
953 unsigned int cflags = 0;
954 int ret;
955
956 /*
957 * Multishot MUST be used on a pollable file
958 */
959 if (!io_file_can_poll(req))
960 return -EBADFD;
961
962 ret = __io_read(req, issue_flags);
963
964 /*
965 * If we get -EAGAIN, recycle our buffer and just let normal poll
966 * handling arm it.
967 */
968 if (ret == -EAGAIN) {
969 /*
970 * Reset rw->len to 0 again to avoid clamping future mshot
971 * reads, in case the buffer size varies.
972 */
973 if (io_kbuf_recycle(req, issue_flags))
974 rw->len = 0;
975 if (issue_flags & IO_URING_F_MULTISHOT)
976 return IOU_ISSUE_SKIP_COMPLETE;
977 return -EAGAIN;
978 } else if (ret <= 0) {
979 io_kbuf_recycle(req, issue_flags);
980 if (ret < 0)
981 req_set_fail(req);
982 } else {
983 /*
984 * Any successful return value will keep the multishot read
985 * armed, if it's still set. Put our buffer and post a CQE. If
986 * we fail to post a CQE, or multishot is no longer set, then
987 * jump to the termination path. This request is then done.
988 */
989 cflags = io_put_kbuf(req, ret, issue_flags);
990 rw->len = 0; /* similarly to above, reset len to 0 */
991
992 if (io_req_post_cqe(req, ret, cflags | IORING_CQE_F_MORE)) {
993 if (issue_flags & IO_URING_F_MULTISHOT) {
994 /*
995 * Force retry, as we might have more data to
996 * be read and otherwise it won't get retried
997 * until (if ever) another poll is triggered.
998 */
999 io_poll_multishot_retry(req);
1000 return IOU_ISSUE_SKIP_COMPLETE;
1001 }
1002 return -EAGAIN;
1003 }
1004 }
1005
1006 /*
1007 * Either an error, or we've hit overflow posting the CQE. For any
1008 * multishot request, hitting overflow will terminate it.
1009 */
1010 io_req_set_res(req, ret, cflags);
1011 io_req_rw_cleanup(req, issue_flags);
1012 if (issue_flags & IO_URING_F_MULTISHOT)
1013 return IOU_STOP_MULTISHOT;
1014 return IOU_OK;
1015 }
1016
io_kiocb_start_write(struct io_kiocb * req,struct kiocb * kiocb)1017 static bool io_kiocb_start_write(struct io_kiocb *req, struct kiocb *kiocb)
1018 {
1019 struct inode *inode;
1020 bool ret;
1021
1022 if (!(req->flags & REQ_F_ISREG))
1023 return true;
1024 if (!(kiocb->ki_flags & IOCB_NOWAIT)) {
1025 kiocb_start_write(kiocb);
1026 return true;
1027 }
1028
1029 inode = file_inode(kiocb->ki_filp);
1030 ret = sb_start_write_trylock(inode->i_sb);
1031 if (ret)
1032 __sb_writers_release(inode->i_sb, SB_FREEZE_WRITE);
1033 return ret;
1034 }
1035
io_write(struct io_kiocb * req,unsigned int issue_flags)1036 int io_write(struct io_kiocb *req, unsigned int issue_flags)
1037 {
1038 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1039 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
1040 struct io_async_rw *io = req->async_data;
1041 struct kiocb *kiocb = &rw->kiocb;
1042 ssize_t ret, ret2;
1043 loff_t *ppos;
1044
1045 ret = io_rw_init_file(req, FMODE_WRITE, WRITE);
1046 if (unlikely(ret))
1047 return ret;
1048 req->cqe.res = iov_iter_count(&io->iter);
1049
1050 if (force_nonblock) {
1051 /* If the file doesn't support async, just async punt */
1052 if (unlikely(!io_file_supports_nowait(req, EPOLLOUT)))
1053 goto ret_eagain;
1054
1055 /* Check if we can support NOWAIT. */
1056 if (!(kiocb->ki_flags & IOCB_DIRECT) &&
1057 !(req->file->f_op->fop_flags & FOP_BUFFER_WASYNC) &&
1058 (req->flags & REQ_F_ISREG))
1059 goto ret_eagain;
1060
1061 kiocb->ki_flags |= IOCB_NOWAIT;
1062 } else {
1063 /* Ensure we clear previously set non-block flag */
1064 kiocb->ki_flags &= ~IOCB_NOWAIT;
1065 }
1066
1067 ppos = io_kiocb_update_pos(req);
1068
1069 ret = rw_verify_area(WRITE, req->file, ppos, req->cqe.res);
1070 if (unlikely(ret))
1071 return ret;
1072
1073 if (unlikely(!io_kiocb_start_write(req, kiocb)))
1074 return -EAGAIN;
1075 kiocb->ki_flags |= IOCB_WRITE;
1076
1077 if (likely(req->file->f_op->write_iter))
1078 ret2 = req->file->f_op->write_iter(kiocb, &io->iter);
1079 else if (req->file->f_op->write)
1080 ret2 = loop_rw_iter(WRITE, rw, &io->iter);
1081 else
1082 ret2 = -EINVAL;
1083
1084 if (req->flags & REQ_F_REISSUE) {
1085 req->flags &= ~REQ_F_REISSUE;
1086 ret2 = -EAGAIN;
1087 }
1088
1089 /*
1090 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
1091 * retry them without IOCB_NOWAIT.
1092 */
1093 if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
1094 ret2 = -EAGAIN;
1095 /* no retry on NONBLOCK nor RWF_NOWAIT */
1096 if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
1097 goto done;
1098 if (!force_nonblock || ret2 != -EAGAIN) {
1099 /* IOPOLL retry should happen for io-wq threads */
1100 if (ret2 == -EAGAIN && (req->ctx->flags & IORING_SETUP_IOPOLL))
1101 goto ret_eagain;
1102
1103 if (ret2 != req->cqe.res && ret2 >= 0 && need_complete_io(req)) {
1104 trace_io_uring_short_write(req->ctx, kiocb->ki_pos - ret2,
1105 req->cqe.res, ret2);
1106
1107 /* This is a partial write. The file pos has already been
1108 * updated, setup the async struct to complete the request
1109 * in the worker. Also update bytes_done to account for
1110 * the bytes already written.
1111 */
1112 iov_iter_save_state(&io->iter, &io->iter_state);
1113 io->bytes_done += ret2;
1114
1115 if (kiocb->ki_flags & IOCB_WRITE)
1116 io_req_end_write(req);
1117 return -EAGAIN;
1118 }
1119 done:
1120 return kiocb_done(req, ret2, issue_flags);
1121 } else {
1122 ret_eagain:
1123 iov_iter_restore(&io->iter, &io->iter_state);
1124 if (kiocb->ki_flags & IOCB_WRITE)
1125 io_req_end_write(req);
1126 return -EAGAIN;
1127 }
1128 }
1129
io_rw_fail(struct io_kiocb * req)1130 void io_rw_fail(struct io_kiocb *req)
1131 {
1132 int res;
1133
1134 res = io_fixup_rw_res(req, req->cqe.res);
1135 io_req_set_res(req, res, req->cqe.flags);
1136 }
1137
io_do_iopoll(struct io_ring_ctx * ctx,bool force_nonspin)1138 int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
1139 {
1140 struct io_wq_work_node *pos, *start, *prev;
1141 unsigned int poll_flags = 0;
1142 DEFINE_IO_COMP_BATCH(iob);
1143 int nr_events = 0;
1144
1145 /*
1146 * Only spin for completions if we don't have multiple devices hanging
1147 * off our complete list.
1148 */
1149 if (ctx->poll_multi_queue || force_nonspin)
1150 poll_flags |= BLK_POLL_ONESHOT;
1151
1152 wq_list_for_each(pos, start, &ctx->iopoll_list) {
1153 struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
1154 struct file *file = req->file;
1155 int ret;
1156
1157 /*
1158 * Move completed and retryable entries to our local lists.
1159 * If we find a request that requires polling, break out
1160 * and complete those lists first, if we have entries there.
1161 */
1162 if (READ_ONCE(req->iopoll_completed))
1163 break;
1164
1165 if (req->opcode == IORING_OP_URING_CMD) {
1166 struct io_uring_cmd *ioucmd;
1167
1168 ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
1169 ret = file->f_op->uring_cmd_iopoll(ioucmd, &iob,
1170 poll_flags);
1171 } else {
1172 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
1173
1174 ret = file->f_op->iopoll(&rw->kiocb, &iob, poll_flags);
1175 }
1176 if (unlikely(ret < 0))
1177 return ret;
1178 else if (ret)
1179 poll_flags |= BLK_POLL_ONESHOT;
1180
1181 /* iopoll may have completed current req */
1182 if (!rq_list_empty(iob.req_list) ||
1183 READ_ONCE(req->iopoll_completed))
1184 break;
1185 }
1186
1187 if (!rq_list_empty(iob.req_list))
1188 iob.complete(&iob);
1189 else if (!pos)
1190 return 0;
1191
1192 prev = start;
1193 wq_list_for_each_resume(pos, prev) {
1194 struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
1195
1196 /* order with io_complete_rw_iopoll(), e.g. ->result updates */
1197 if (!smp_load_acquire(&req->iopoll_completed))
1198 break;
1199 nr_events++;
1200 req->cqe.flags = io_put_kbuf(req, req->cqe.res, 0);
1201 if (req->opcode != IORING_OP_URING_CMD)
1202 io_req_rw_cleanup(req, 0);
1203 }
1204 if (unlikely(!nr_events))
1205 return 0;
1206
1207 pos = start ? start->next : ctx->iopoll_list.first;
1208 wq_list_cut(&ctx->iopoll_list, prev, start);
1209
1210 if (WARN_ON_ONCE(!wq_list_empty(&ctx->submit_state.compl_reqs)))
1211 return 0;
1212 ctx->submit_state.compl_reqs.first = pos;
1213 __io_submit_flush_completions(ctx);
1214 return nr_events;
1215 }
1216
io_rw_cache_free(const void * entry)1217 void io_rw_cache_free(const void *entry)
1218 {
1219 struct io_async_rw *rw = (struct io_async_rw *) entry;
1220
1221 if (rw->free_iovec) {
1222 kasan_mempool_unpoison_object(rw->free_iovec,
1223 rw->free_iov_nr * sizeof(struct iovec));
1224 io_rw_iovec_free(rw);
1225 }
1226 kfree(rw);
1227 }
1228