xref: /linux/io_uring/rw.c (revision 4b132aacb0768ac1e652cf517097ea6f237214b9)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/blk-mq.h>
7 #include <linux/mm.h>
8 #include <linux/slab.h>
9 #include <linux/fsnotify.h>
10 #include <linux/poll.h>
11 #include <linux/nospec.h>
12 #include <linux/compat.h>
13 #include <linux/io_uring/cmd.h>
14 #include <linux/indirect_call_wrapper.h>
15 
16 #include <uapi/linux/io_uring.h>
17 
18 #include "io_uring.h"
19 #include "opdef.h"
20 #include "kbuf.h"
21 #include "alloc_cache.h"
22 #include "rsrc.h"
23 #include "poll.h"
24 #include "rw.h"
25 
26 struct io_rw {
27 	/* NOTE: kiocb has the file as the first member, so don't do it here */
28 	struct kiocb			kiocb;
29 	u64				addr;
30 	u32				len;
31 	rwf_t				flags;
32 };
33 
34 static inline bool io_file_supports_nowait(struct io_kiocb *req)
35 {
36 	return req->flags & REQ_F_SUPPORT_NOWAIT;
37 }
38 
39 #ifdef CONFIG_COMPAT
40 static int io_iov_compat_buffer_select_prep(struct io_rw *rw)
41 {
42 	struct compat_iovec __user *uiov;
43 	compat_ssize_t clen;
44 
45 	uiov = u64_to_user_ptr(rw->addr);
46 	if (!access_ok(uiov, sizeof(*uiov)))
47 		return -EFAULT;
48 	if (__get_user(clen, &uiov->iov_len))
49 		return -EFAULT;
50 	if (clen < 0)
51 		return -EINVAL;
52 
53 	rw->len = clen;
54 	return 0;
55 }
56 #endif
57 
58 static int io_iov_buffer_select_prep(struct io_kiocb *req)
59 {
60 	struct iovec __user *uiov;
61 	struct iovec iov;
62 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
63 
64 	if (rw->len != 1)
65 		return -EINVAL;
66 
67 #ifdef CONFIG_COMPAT
68 	if (req->ctx->compat)
69 		return io_iov_compat_buffer_select_prep(rw);
70 #endif
71 
72 	uiov = u64_to_user_ptr(rw->addr);
73 	if (copy_from_user(&iov, uiov, sizeof(*uiov)))
74 		return -EFAULT;
75 	rw->len = iov.iov_len;
76 	return 0;
77 }
78 
79 static int __io_import_iovec(int ddir, struct io_kiocb *req,
80 			     struct io_async_rw *io,
81 			     unsigned int issue_flags)
82 {
83 	const struct io_issue_def *def = &io_issue_defs[req->opcode];
84 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
85 	struct iovec *iov;
86 	void __user *buf;
87 	int nr_segs, ret;
88 	size_t sqe_len;
89 
90 	buf = u64_to_user_ptr(rw->addr);
91 	sqe_len = rw->len;
92 
93 	if (!def->vectored || req->flags & REQ_F_BUFFER_SELECT) {
94 		if (io_do_buffer_select(req)) {
95 			buf = io_buffer_select(req, &sqe_len, issue_flags);
96 			if (!buf)
97 				return -ENOBUFS;
98 			rw->addr = (unsigned long) buf;
99 			rw->len = sqe_len;
100 		}
101 
102 		return import_ubuf(ddir, buf, sqe_len, &io->iter);
103 	}
104 
105 	if (io->free_iovec) {
106 		nr_segs = io->free_iov_nr;
107 		iov = io->free_iovec;
108 	} else {
109 		iov = &io->fast_iov;
110 		nr_segs = 1;
111 	}
112 	ret = __import_iovec(ddir, buf, sqe_len, nr_segs, &iov, &io->iter,
113 				req->ctx->compat);
114 	if (unlikely(ret < 0))
115 		return ret;
116 	if (iov) {
117 		req->flags |= REQ_F_NEED_CLEANUP;
118 		io->free_iov_nr = io->iter.nr_segs;
119 		kfree(io->free_iovec);
120 		io->free_iovec = iov;
121 	}
122 	return 0;
123 }
124 
125 static inline int io_import_iovec(int rw, struct io_kiocb *req,
126 				  struct io_async_rw *io,
127 				  unsigned int issue_flags)
128 {
129 	int ret;
130 
131 	ret = __io_import_iovec(rw, req, io, issue_flags);
132 	if (unlikely(ret < 0))
133 		return ret;
134 
135 	iov_iter_save_state(&io->iter, &io->iter_state);
136 	return 0;
137 }
138 
139 static void io_rw_iovec_free(struct io_async_rw *rw)
140 {
141 	if (rw->free_iovec) {
142 		kfree(rw->free_iovec);
143 		rw->free_iov_nr = 0;
144 		rw->free_iovec = NULL;
145 	}
146 }
147 
148 static void io_rw_recycle(struct io_kiocb *req, unsigned int issue_flags)
149 {
150 	struct io_async_rw *rw = req->async_data;
151 	struct iovec *iov;
152 
153 	if (unlikely(issue_flags & IO_URING_F_UNLOCKED)) {
154 		io_rw_iovec_free(rw);
155 		return;
156 	}
157 	iov = rw->free_iovec;
158 	if (io_alloc_cache_put(&req->ctx->rw_cache, rw)) {
159 		if (iov)
160 			kasan_mempool_poison_object(iov);
161 		req->async_data = NULL;
162 		req->flags &= ~REQ_F_ASYNC_DATA;
163 	}
164 }
165 
166 static void io_req_rw_cleanup(struct io_kiocb *req, unsigned int issue_flags)
167 {
168 	/*
169 	 * Disable quick recycling for anything that's gone through io-wq.
170 	 * In theory, this should be fine to cleanup. However, some read or
171 	 * write iter handling touches the iovec AFTER having called into the
172 	 * handler, eg to reexpand or revert. This means we can have:
173 	 *
174 	 * task			io-wq
175 	 *   issue
176 	 *     punt to io-wq
177 	 *			issue
178 	 *			  blkdev_write_iter()
179 	 *			    ->ki_complete()
180 	 *			      io_complete_rw()
181 	 *			        queue tw complete
182 	 *  run tw
183 	 *    req_rw_cleanup
184 	 *			iov_iter_count() <- look at iov_iter again
185 	 *
186 	 * which can lead to a UAF. This is only possible for io-wq offload
187 	 * as the cleanup can run in parallel. As io-wq is not the fast path,
188 	 * just leave cleanup to the end.
189 	 *
190 	 * This is really a bug in the core code that does this, any issue
191 	 * path should assume that a successful (or -EIOCBQUEUED) return can
192 	 * mean that the underlying data can be gone at any time. But that
193 	 * should be fixed seperately, and then this check could be killed.
194 	 */
195 	if (!(req->flags & REQ_F_REFCOUNT)) {
196 		req->flags &= ~REQ_F_NEED_CLEANUP;
197 		io_rw_recycle(req, issue_flags);
198 	}
199 }
200 
201 static int io_rw_alloc_async(struct io_kiocb *req)
202 {
203 	struct io_ring_ctx *ctx = req->ctx;
204 	struct io_async_rw *rw;
205 
206 	rw = io_alloc_cache_get(&ctx->rw_cache);
207 	if (rw) {
208 		if (rw->free_iovec) {
209 			kasan_mempool_unpoison_object(rw->free_iovec,
210 				rw->free_iov_nr * sizeof(struct iovec));
211 			req->flags |= REQ_F_NEED_CLEANUP;
212 		}
213 		req->flags |= REQ_F_ASYNC_DATA;
214 		req->async_data = rw;
215 		goto done;
216 	}
217 
218 	if (!io_alloc_async_data(req)) {
219 		rw = req->async_data;
220 		rw->free_iovec = NULL;
221 		rw->free_iov_nr = 0;
222 done:
223 		rw->bytes_done = 0;
224 		return 0;
225 	}
226 
227 	return -ENOMEM;
228 }
229 
230 static int io_prep_rw_setup(struct io_kiocb *req, int ddir, bool do_import)
231 {
232 	struct io_async_rw *rw;
233 	int ret;
234 
235 	if (io_rw_alloc_async(req))
236 		return -ENOMEM;
237 
238 	if (!do_import || io_do_buffer_select(req))
239 		return 0;
240 
241 	rw = req->async_data;
242 	ret = io_import_iovec(ddir, req, rw, 0);
243 	if (unlikely(ret < 0))
244 		return ret;
245 
246 	iov_iter_save_state(&rw->iter, &rw->iter_state);
247 	return 0;
248 }
249 
250 static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
251 		      int ddir, bool do_import)
252 {
253 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
254 	unsigned ioprio;
255 	int ret;
256 
257 	rw->kiocb.ki_pos = READ_ONCE(sqe->off);
258 	/* used for fixed read/write too - just read unconditionally */
259 	req->buf_index = READ_ONCE(sqe->buf_index);
260 
261 	ioprio = READ_ONCE(sqe->ioprio);
262 	if (ioprio) {
263 		ret = ioprio_check_cap(ioprio);
264 		if (ret)
265 			return ret;
266 
267 		rw->kiocb.ki_ioprio = ioprio;
268 	} else {
269 		rw->kiocb.ki_ioprio = get_current_ioprio();
270 	}
271 	rw->kiocb.dio_complete = NULL;
272 
273 	rw->addr = READ_ONCE(sqe->addr);
274 	rw->len = READ_ONCE(sqe->len);
275 	rw->flags = READ_ONCE(sqe->rw_flags);
276 	return io_prep_rw_setup(req, ddir, do_import);
277 }
278 
279 int io_prep_read(struct io_kiocb *req, const struct io_uring_sqe *sqe)
280 {
281 	return io_prep_rw(req, sqe, ITER_DEST, true);
282 }
283 
284 int io_prep_write(struct io_kiocb *req, const struct io_uring_sqe *sqe)
285 {
286 	return io_prep_rw(req, sqe, ITER_SOURCE, true);
287 }
288 
289 static int io_prep_rwv(struct io_kiocb *req, const struct io_uring_sqe *sqe,
290 		       int ddir)
291 {
292 	const bool do_import = !(req->flags & REQ_F_BUFFER_SELECT);
293 	int ret;
294 
295 	ret = io_prep_rw(req, sqe, ddir, do_import);
296 	if (unlikely(ret))
297 		return ret;
298 	if (do_import)
299 		return 0;
300 
301 	/*
302 	 * Have to do this validation here, as this is in io_read() rw->len
303 	 * might have chanaged due to buffer selection
304 	 */
305 	return io_iov_buffer_select_prep(req);
306 }
307 
308 int io_prep_readv(struct io_kiocb *req, const struct io_uring_sqe *sqe)
309 {
310 	return io_prep_rwv(req, sqe, ITER_DEST);
311 }
312 
313 int io_prep_writev(struct io_kiocb *req, const struct io_uring_sqe *sqe)
314 {
315 	return io_prep_rwv(req, sqe, ITER_SOURCE);
316 }
317 
318 static int io_prep_rw_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe,
319 			    int ddir)
320 {
321 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
322 	struct io_ring_ctx *ctx = req->ctx;
323 	struct io_async_rw *io;
324 	u16 index;
325 	int ret;
326 
327 	ret = io_prep_rw(req, sqe, ddir, false);
328 	if (unlikely(ret))
329 		return ret;
330 
331 	if (unlikely(req->buf_index >= ctx->nr_user_bufs))
332 		return -EFAULT;
333 	index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
334 	req->imu = ctx->user_bufs[index];
335 	io_req_set_rsrc_node(req, ctx, 0);
336 
337 	io = req->async_data;
338 	ret = io_import_fixed(ddir, &io->iter, req->imu, rw->addr, rw->len);
339 	iov_iter_save_state(&io->iter, &io->iter_state);
340 	return ret;
341 }
342 
343 int io_prep_read_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe)
344 {
345 	return io_prep_rw_fixed(req, sqe, ITER_DEST);
346 }
347 
348 int io_prep_write_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe)
349 {
350 	return io_prep_rw_fixed(req, sqe, ITER_SOURCE);
351 }
352 
353 /*
354  * Multishot read is prepared just like a normal read/write request, only
355  * difference is that we set the MULTISHOT flag.
356  */
357 int io_read_mshot_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
358 {
359 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
360 	int ret;
361 
362 	/* must be used with provided buffers */
363 	if (!(req->flags & REQ_F_BUFFER_SELECT))
364 		return -EINVAL;
365 
366 	ret = io_prep_rw(req, sqe, ITER_DEST, false);
367 	if (unlikely(ret))
368 		return ret;
369 
370 	if (rw->addr || rw->len)
371 		return -EINVAL;
372 
373 	req->flags |= REQ_F_APOLL_MULTISHOT;
374 	return 0;
375 }
376 
377 void io_readv_writev_cleanup(struct io_kiocb *req)
378 {
379 	io_rw_iovec_free(req->async_data);
380 }
381 
382 static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req)
383 {
384 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
385 
386 	if (rw->kiocb.ki_pos != -1)
387 		return &rw->kiocb.ki_pos;
388 
389 	if (!(req->file->f_mode & FMODE_STREAM)) {
390 		req->flags |= REQ_F_CUR_POS;
391 		rw->kiocb.ki_pos = req->file->f_pos;
392 		return &rw->kiocb.ki_pos;
393 	}
394 
395 	rw->kiocb.ki_pos = 0;
396 	return NULL;
397 }
398 
399 #ifdef CONFIG_BLOCK
400 static void io_resubmit_prep(struct io_kiocb *req)
401 {
402 	struct io_async_rw *io = req->async_data;
403 
404 	iov_iter_restore(&io->iter, &io->iter_state);
405 }
406 
407 static bool io_rw_should_reissue(struct io_kiocb *req)
408 {
409 	umode_t mode = file_inode(req->file)->i_mode;
410 	struct io_ring_ctx *ctx = req->ctx;
411 
412 	if (!S_ISBLK(mode) && !S_ISREG(mode))
413 		return false;
414 	if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() &&
415 	    !(ctx->flags & IORING_SETUP_IOPOLL)))
416 		return false;
417 	/*
418 	 * If ref is dying, we might be running poll reap from the exit work.
419 	 * Don't attempt to reissue from that path, just let it fail with
420 	 * -EAGAIN.
421 	 */
422 	if (percpu_ref_is_dying(&ctx->refs))
423 		return false;
424 	/*
425 	 * Play it safe and assume not safe to re-import and reissue if we're
426 	 * not in the original thread group (or in task context).
427 	 */
428 	if (!same_thread_group(req->task, current) || !in_task())
429 		return false;
430 	return true;
431 }
432 #else
433 static void io_resubmit_prep(struct io_kiocb *req)
434 {
435 }
436 static bool io_rw_should_reissue(struct io_kiocb *req)
437 {
438 	return false;
439 }
440 #endif
441 
442 static void io_req_end_write(struct io_kiocb *req)
443 {
444 	if (req->flags & REQ_F_ISREG) {
445 		struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
446 
447 		kiocb_end_write(&rw->kiocb);
448 	}
449 }
450 
451 /*
452  * Trigger the notifications after having done some IO, and finish the write
453  * accounting, if any.
454  */
455 static void io_req_io_end(struct io_kiocb *req)
456 {
457 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
458 
459 	if (rw->kiocb.ki_flags & IOCB_WRITE) {
460 		io_req_end_write(req);
461 		fsnotify_modify(req->file);
462 	} else {
463 		fsnotify_access(req->file);
464 	}
465 }
466 
467 static bool __io_complete_rw_common(struct io_kiocb *req, long res)
468 {
469 	if (unlikely(res != req->cqe.res)) {
470 		if ((res == -EAGAIN || res == -EOPNOTSUPP) &&
471 		    io_rw_should_reissue(req)) {
472 			/*
473 			 * Reissue will start accounting again, finish the
474 			 * current cycle.
475 			 */
476 			io_req_io_end(req);
477 			req->flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE;
478 			return true;
479 		}
480 		req_set_fail(req);
481 		req->cqe.res = res;
482 	}
483 	return false;
484 }
485 
486 static inline int io_fixup_rw_res(struct io_kiocb *req, long res)
487 {
488 	struct io_async_rw *io = req->async_data;
489 
490 	/* add previously done IO, if any */
491 	if (req_has_async_data(req) && io->bytes_done > 0) {
492 		if (res < 0)
493 			res = io->bytes_done;
494 		else
495 			res += io->bytes_done;
496 	}
497 	return res;
498 }
499 
500 void io_req_rw_complete(struct io_kiocb *req, struct io_tw_state *ts)
501 {
502 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
503 	struct kiocb *kiocb = &rw->kiocb;
504 
505 	if ((kiocb->ki_flags & IOCB_DIO_CALLER_COMP) && kiocb->dio_complete) {
506 		long res = kiocb->dio_complete(rw->kiocb.private);
507 
508 		io_req_set_res(req, io_fixup_rw_res(req, res), 0);
509 	}
510 
511 	io_req_io_end(req);
512 
513 	if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING))
514 		req->cqe.flags |= io_put_kbuf(req, 0);
515 
516 	io_req_rw_cleanup(req, 0);
517 	io_req_task_complete(req, ts);
518 }
519 
520 static void io_complete_rw(struct kiocb *kiocb, long res)
521 {
522 	struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
523 	struct io_kiocb *req = cmd_to_io_kiocb(rw);
524 
525 	if (!kiocb->dio_complete || !(kiocb->ki_flags & IOCB_DIO_CALLER_COMP)) {
526 		if (__io_complete_rw_common(req, res))
527 			return;
528 		io_req_set_res(req, io_fixup_rw_res(req, res), 0);
529 	}
530 	req->io_task_work.func = io_req_rw_complete;
531 	__io_req_task_work_add(req, IOU_F_TWQ_LAZY_WAKE);
532 }
533 
534 static void io_complete_rw_iopoll(struct kiocb *kiocb, long res)
535 {
536 	struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
537 	struct io_kiocb *req = cmd_to_io_kiocb(rw);
538 
539 	if (kiocb->ki_flags & IOCB_WRITE)
540 		io_req_end_write(req);
541 	if (unlikely(res != req->cqe.res)) {
542 		if (res == -EAGAIN && io_rw_should_reissue(req)) {
543 			req->flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE;
544 			return;
545 		}
546 		req->cqe.res = res;
547 	}
548 
549 	/* order with io_iopoll_complete() checking ->iopoll_completed */
550 	smp_store_release(&req->iopoll_completed, 1);
551 }
552 
553 static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
554 {
555 	/* IO was queued async, completion will happen later */
556 	if (ret == -EIOCBQUEUED)
557 		return;
558 
559 	/* transform internal restart error codes */
560 	if (unlikely(ret < 0)) {
561 		switch (ret) {
562 		case -ERESTARTSYS:
563 		case -ERESTARTNOINTR:
564 		case -ERESTARTNOHAND:
565 		case -ERESTART_RESTARTBLOCK:
566 			/*
567 			 * We can't just restart the syscall, since previously
568 			 * submitted sqes may already be in progress. Just fail
569 			 * this IO with EINTR.
570 			 */
571 			ret = -EINTR;
572 			break;
573 		}
574 	}
575 
576 	INDIRECT_CALL_2(kiocb->ki_complete, io_complete_rw_iopoll,
577 			io_complete_rw, kiocb, ret);
578 }
579 
580 static int kiocb_done(struct io_kiocb *req, ssize_t ret,
581 		       unsigned int issue_flags)
582 {
583 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
584 	unsigned final_ret = io_fixup_rw_res(req, ret);
585 
586 	if (ret >= 0 && req->flags & REQ_F_CUR_POS)
587 		req->file->f_pos = rw->kiocb.ki_pos;
588 	if (ret >= 0 && (rw->kiocb.ki_complete == io_complete_rw)) {
589 		if (!__io_complete_rw_common(req, ret)) {
590 			/*
591 			 * Safe to call io_end from here as we're inline
592 			 * from the submission path.
593 			 */
594 			io_req_io_end(req);
595 			io_req_set_res(req, final_ret,
596 				       io_put_kbuf(req, issue_flags));
597 			io_req_rw_cleanup(req, issue_flags);
598 			return IOU_OK;
599 		}
600 	} else {
601 		io_rw_done(&rw->kiocb, ret);
602 	}
603 
604 	if (req->flags & REQ_F_REISSUE) {
605 		req->flags &= ~REQ_F_REISSUE;
606 		io_resubmit_prep(req);
607 		return -EAGAIN;
608 	}
609 	return IOU_ISSUE_SKIP_COMPLETE;
610 }
611 
612 static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
613 {
614 	return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
615 }
616 
617 /*
618  * For files that don't have ->read_iter() and ->write_iter(), handle them
619  * by looping over ->read() or ->write() manually.
620  */
621 static ssize_t loop_rw_iter(int ddir, struct io_rw *rw, struct iov_iter *iter)
622 {
623 	struct kiocb *kiocb = &rw->kiocb;
624 	struct file *file = kiocb->ki_filp;
625 	ssize_t ret = 0;
626 	loff_t *ppos;
627 
628 	/*
629 	 * Don't support polled IO through this interface, and we can't
630 	 * support non-blocking either. For the latter, this just causes
631 	 * the kiocb to be handled from an async context.
632 	 */
633 	if (kiocb->ki_flags & IOCB_HIPRI)
634 		return -EOPNOTSUPP;
635 	if ((kiocb->ki_flags & IOCB_NOWAIT) &&
636 	    !(kiocb->ki_filp->f_flags & O_NONBLOCK))
637 		return -EAGAIN;
638 
639 	ppos = io_kiocb_ppos(kiocb);
640 
641 	while (iov_iter_count(iter)) {
642 		void __user *addr;
643 		size_t len;
644 		ssize_t nr;
645 
646 		if (iter_is_ubuf(iter)) {
647 			addr = iter->ubuf + iter->iov_offset;
648 			len = iov_iter_count(iter);
649 		} else if (!iov_iter_is_bvec(iter)) {
650 			addr = iter_iov_addr(iter);
651 			len = iter_iov_len(iter);
652 		} else {
653 			addr = u64_to_user_ptr(rw->addr);
654 			len = rw->len;
655 		}
656 
657 		if (ddir == READ)
658 			nr = file->f_op->read(file, addr, len, ppos);
659 		else
660 			nr = file->f_op->write(file, addr, len, ppos);
661 
662 		if (nr < 0) {
663 			if (!ret)
664 				ret = nr;
665 			break;
666 		}
667 		ret += nr;
668 		if (!iov_iter_is_bvec(iter)) {
669 			iov_iter_advance(iter, nr);
670 		} else {
671 			rw->addr += nr;
672 			rw->len -= nr;
673 			if (!rw->len)
674 				break;
675 		}
676 		if (nr != len)
677 			break;
678 	}
679 
680 	return ret;
681 }
682 
683 /*
684  * This is our waitqueue callback handler, registered through __folio_lock_async()
685  * when we initially tried to do the IO with the iocb armed our waitqueue.
686  * This gets called when the page is unlocked, and we generally expect that to
687  * happen when the page IO is completed and the page is now uptodate. This will
688  * queue a task_work based retry of the operation, attempting to copy the data
689  * again. If the latter fails because the page was NOT uptodate, then we will
690  * do a thread based blocking retry of the operation. That's the unexpected
691  * slow path.
692  */
693 static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
694 			     int sync, void *arg)
695 {
696 	struct wait_page_queue *wpq;
697 	struct io_kiocb *req = wait->private;
698 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
699 	struct wait_page_key *key = arg;
700 
701 	wpq = container_of(wait, struct wait_page_queue, wait);
702 
703 	if (!wake_page_match(wpq, key))
704 		return 0;
705 
706 	rw->kiocb.ki_flags &= ~IOCB_WAITQ;
707 	list_del_init(&wait->entry);
708 	io_req_task_queue(req);
709 	return 1;
710 }
711 
712 /*
713  * This controls whether a given IO request should be armed for async page
714  * based retry. If we return false here, the request is handed to the async
715  * worker threads for retry. If we're doing buffered reads on a regular file,
716  * we prepare a private wait_page_queue entry and retry the operation. This
717  * will either succeed because the page is now uptodate and unlocked, or it
718  * will register a callback when the page is unlocked at IO completion. Through
719  * that callback, io_uring uses task_work to setup a retry of the operation.
720  * That retry will attempt the buffered read again. The retry will generally
721  * succeed, or in rare cases where it fails, we then fall back to using the
722  * async worker threads for a blocking retry.
723  */
724 static bool io_rw_should_retry(struct io_kiocb *req)
725 {
726 	struct io_async_rw *io = req->async_data;
727 	struct wait_page_queue *wait = &io->wpq;
728 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
729 	struct kiocb *kiocb = &rw->kiocb;
730 
731 	/* never retry for NOWAIT, we just complete with -EAGAIN */
732 	if (req->flags & REQ_F_NOWAIT)
733 		return false;
734 
735 	/* Only for buffered IO */
736 	if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
737 		return false;
738 
739 	/*
740 	 * just use poll if we can, and don't attempt if the fs doesn't
741 	 * support callback based unlocks
742 	 */
743 	if (io_file_can_poll(req) ||
744 	    !(req->file->f_op->fop_flags & FOP_BUFFER_RASYNC))
745 		return false;
746 
747 	wait->wait.func = io_async_buf_func;
748 	wait->wait.private = req;
749 	wait->wait.flags = 0;
750 	INIT_LIST_HEAD(&wait->wait.entry);
751 	kiocb->ki_flags |= IOCB_WAITQ;
752 	kiocb->ki_flags &= ~IOCB_NOWAIT;
753 	kiocb->ki_waitq = wait;
754 	return true;
755 }
756 
757 static inline int io_iter_do_read(struct io_rw *rw, struct iov_iter *iter)
758 {
759 	struct file *file = rw->kiocb.ki_filp;
760 
761 	if (likely(file->f_op->read_iter))
762 		return file->f_op->read_iter(&rw->kiocb, iter);
763 	else if (file->f_op->read)
764 		return loop_rw_iter(READ, rw, iter);
765 	else
766 		return -EINVAL;
767 }
768 
769 static bool need_complete_io(struct io_kiocb *req)
770 {
771 	return req->flags & REQ_F_ISREG ||
772 		S_ISBLK(file_inode(req->file)->i_mode);
773 }
774 
775 static int io_rw_init_file(struct io_kiocb *req, fmode_t mode, int rw_type)
776 {
777 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
778 	struct kiocb *kiocb = &rw->kiocb;
779 	struct io_ring_ctx *ctx = req->ctx;
780 	struct file *file = req->file;
781 	int ret;
782 
783 	if (unlikely(!(file->f_mode & mode)))
784 		return -EBADF;
785 
786 	if (!(req->flags & REQ_F_FIXED_FILE))
787 		req->flags |= io_file_get_flags(file);
788 
789 	kiocb->ki_flags = file->f_iocb_flags;
790 	ret = kiocb_set_rw_flags(kiocb, rw->flags, rw_type);
791 	if (unlikely(ret))
792 		return ret;
793 	kiocb->ki_flags |= IOCB_ALLOC_CACHE;
794 
795 	/*
796 	 * If the file is marked O_NONBLOCK, still allow retry for it if it
797 	 * supports async. Otherwise it's impossible to use O_NONBLOCK files
798 	 * reliably. If not, or it IOCB_NOWAIT is set, don't retry.
799 	 */
800 	if ((kiocb->ki_flags & IOCB_NOWAIT) ||
801 	    ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req)))
802 		req->flags |= REQ_F_NOWAIT;
803 
804 	if (ctx->flags & IORING_SETUP_IOPOLL) {
805 		if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll)
806 			return -EOPNOTSUPP;
807 
808 		kiocb->private = NULL;
809 		kiocb->ki_flags |= IOCB_HIPRI;
810 		kiocb->ki_complete = io_complete_rw_iopoll;
811 		req->iopoll_completed = 0;
812 	} else {
813 		if (kiocb->ki_flags & IOCB_HIPRI)
814 			return -EINVAL;
815 		kiocb->ki_complete = io_complete_rw;
816 	}
817 
818 	return 0;
819 }
820 
821 static int __io_read(struct io_kiocb *req, unsigned int issue_flags)
822 {
823 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
824 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
825 	struct io_async_rw *io = req->async_data;
826 	struct kiocb *kiocb = &rw->kiocb;
827 	ssize_t ret;
828 	loff_t *ppos;
829 
830 	if (io_do_buffer_select(req)) {
831 		ret = io_import_iovec(ITER_DEST, req, io, issue_flags);
832 		if (unlikely(ret < 0))
833 			return ret;
834 	}
835 	ret = io_rw_init_file(req, FMODE_READ, READ);
836 	if (unlikely(ret))
837 		return ret;
838 	req->cqe.res = iov_iter_count(&io->iter);
839 
840 	if (force_nonblock) {
841 		/* If the file doesn't support async, just async punt */
842 		if (unlikely(!io_file_supports_nowait(req)))
843 			return -EAGAIN;
844 		kiocb->ki_flags |= IOCB_NOWAIT;
845 	} else {
846 		/* Ensure we clear previously set non-block flag */
847 		kiocb->ki_flags &= ~IOCB_NOWAIT;
848 	}
849 
850 	ppos = io_kiocb_update_pos(req);
851 
852 	ret = rw_verify_area(READ, req->file, ppos, req->cqe.res);
853 	if (unlikely(ret))
854 		return ret;
855 
856 	ret = io_iter_do_read(rw, &io->iter);
857 
858 	if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
859 		req->flags &= ~REQ_F_REISSUE;
860 		/* If we can poll, just do that. */
861 		if (io_file_can_poll(req))
862 			return -EAGAIN;
863 		/* IOPOLL retry should happen for io-wq threads */
864 		if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
865 			goto done;
866 		/* no retry on NONBLOCK nor RWF_NOWAIT */
867 		if (req->flags & REQ_F_NOWAIT)
868 			goto done;
869 		ret = 0;
870 	} else if (ret == -EIOCBQUEUED) {
871 		return IOU_ISSUE_SKIP_COMPLETE;
872 	} else if (ret == req->cqe.res || ret <= 0 || !force_nonblock ||
873 		   (req->flags & REQ_F_NOWAIT) || !need_complete_io(req)) {
874 		/* read all, failed, already did sync or don't want to retry */
875 		goto done;
876 	}
877 
878 	/*
879 	 * Don't depend on the iter state matching what was consumed, or being
880 	 * untouched in case of error. Restore it and we'll advance it
881 	 * manually if we need to.
882 	 */
883 	iov_iter_restore(&io->iter, &io->iter_state);
884 
885 	do {
886 		/*
887 		 * We end up here because of a partial read, either from
888 		 * above or inside this loop. Advance the iter by the bytes
889 		 * that were consumed.
890 		 */
891 		iov_iter_advance(&io->iter, ret);
892 		if (!iov_iter_count(&io->iter))
893 			break;
894 		io->bytes_done += ret;
895 		iov_iter_save_state(&io->iter, &io->iter_state);
896 
897 		/* if we can retry, do so with the callbacks armed */
898 		if (!io_rw_should_retry(req)) {
899 			kiocb->ki_flags &= ~IOCB_WAITQ;
900 			return -EAGAIN;
901 		}
902 
903 		req->cqe.res = iov_iter_count(&io->iter);
904 		/*
905 		 * Now retry read with the IOCB_WAITQ parts set in the iocb. If
906 		 * we get -EIOCBQUEUED, then we'll get a notification when the
907 		 * desired page gets unlocked. We can also get a partial read
908 		 * here, and if we do, then just retry at the new offset.
909 		 */
910 		ret = io_iter_do_read(rw, &io->iter);
911 		if (ret == -EIOCBQUEUED)
912 			return IOU_ISSUE_SKIP_COMPLETE;
913 		/* we got some bytes, but not all. retry. */
914 		kiocb->ki_flags &= ~IOCB_WAITQ;
915 		iov_iter_restore(&io->iter, &io->iter_state);
916 	} while (ret > 0);
917 done:
918 	/* it's faster to check here then delegate to kfree */
919 	return ret;
920 }
921 
922 int io_read(struct io_kiocb *req, unsigned int issue_flags)
923 {
924 	int ret;
925 
926 	ret = __io_read(req, issue_flags);
927 	if (ret >= 0)
928 		return kiocb_done(req, ret, issue_flags);
929 
930 	return ret;
931 }
932 
933 int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
934 {
935 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
936 	unsigned int cflags = 0;
937 	int ret;
938 
939 	/*
940 	 * Multishot MUST be used on a pollable file
941 	 */
942 	if (!io_file_can_poll(req))
943 		return -EBADFD;
944 
945 	ret = __io_read(req, issue_flags);
946 
947 	/*
948 	 * If the file doesn't support proper NOWAIT, then disable multishot
949 	 * and stay in single shot mode.
950 	 */
951 	if (!io_file_supports_nowait(req))
952 		req->flags &= ~REQ_F_APOLL_MULTISHOT;
953 
954 	/*
955 	 * If we get -EAGAIN, recycle our buffer and just let normal poll
956 	 * handling arm it.
957 	 */
958 	if (ret == -EAGAIN) {
959 		/*
960 		 * Reset rw->len to 0 again to avoid clamping future mshot
961 		 * reads, in case the buffer size varies.
962 		 */
963 		if (io_kbuf_recycle(req, issue_flags))
964 			rw->len = 0;
965 		if (issue_flags & IO_URING_F_MULTISHOT)
966 			return IOU_ISSUE_SKIP_COMPLETE;
967 		return -EAGAIN;
968 	}
969 
970 	/*
971 	 * Any successful return value will keep the multishot read armed.
972 	 */
973 	if (ret > 0 && req->flags & REQ_F_APOLL_MULTISHOT) {
974 		/*
975 		 * Put our buffer and post a CQE. If we fail to post a CQE, then
976 		 * jump to the termination path. This request is then done.
977 		 */
978 		cflags = io_put_kbuf(req, issue_flags);
979 		rw->len = 0; /* similarly to above, reset len to 0 */
980 
981 		if (io_req_post_cqe(req, ret, cflags | IORING_CQE_F_MORE)) {
982 			if (issue_flags & IO_URING_F_MULTISHOT) {
983 				/*
984 				 * Force retry, as we might have more data to
985 				 * be read and otherwise it won't get retried
986 				 * until (if ever) another poll is triggered.
987 				 */
988 				io_poll_multishot_retry(req);
989 				return IOU_ISSUE_SKIP_COMPLETE;
990 			}
991 			return -EAGAIN;
992 		}
993 	}
994 
995 	/*
996 	 * Either an error, or we've hit overflow posting the CQE. For any
997 	 * multishot request, hitting overflow will terminate it.
998 	 */
999 	io_req_set_res(req, ret, cflags);
1000 	io_req_rw_cleanup(req, issue_flags);
1001 	if (issue_flags & IO_URING_F_MULTISHOT)
1002 		return IOU_STOP_MULTISHOT;
1003 	return IOU_OK;
1004 }
1005 
1006 int io_write(struct io_kiocb *req, unsigned int issue_flags)
1007 {
1008 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1009 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
1010 	struct io_async_rw *io = req->async_data;
1011 	struct kiocb *kiocb = &rw->kiocb;
1012 	ssize_t ret, ret2;
1013 	loff_t *ppos;
1014 
1015 	ret = io_rw_init_file(req, FMODE_WRITE, WRITE);
1016 	if (unlikely(ret))
1017 		return ret;
1018 	req->cqe.res = iov_iter_count(&io->iter);
1019 
1020 	if (force_nonblock) {
1021 		/* If the file doesn't support async, just async punt */
1022 		if (unlikely(!io_file_supports_nowait(req)))
1023 			goto ret_eagain;
1024 
1025 		/* Check if we can support NOWAIT. */
1026 		if (!(kiocb->ki_flags & IOCB_DIRECT) &&
1027 		    !(req->file->f_op->fop_flags & FOP_BUFFER_WASYNC) &&
1028 		    (req->flags & REQ_F_ISREG))
1029 			goto ret_eagain;
1030 
1031 		kiocb->ki_flags |= IOCB_NOWAIT;
1032 	} else {
1033 		/* Ensure we clear previously set non-block flag */
1034 		kiocb->ki_flags &= ~IOCB_NOWAIT;
1035 	}
1036 
1037 	ppos = io_kiocb_update_pos(req);
1038 
1039 	ret = rw_verify_area(WRITE, req->file, ppos, req->cqe.res);
1040 	if (unlikely(ret))
1041 		return ret;
1042 
1043 	if (req->flags & REQ_F_ISREG)
1044 		kiocb_start_write(kiocb);
1045 	kiocb->ki_flags |= IOCB_WRITE;
1046 
1047 	if (likely(req->file->f_op->write_iter))
1048 		ret2 = req->file->f_op->write_iter(kiocb, &io->iter);
1049 	else if (req->file->f_op->write)
1050 		ret2 = loop_rw_iter(WRITE, rw, &io->iter);
1051 	else
1052 		ret2 = -EINVAL;
1053 
1054 	if (req->flags & REQ_F_REISSUE) {
1055 		req->flags &= ~REQ_F_REISSUE;
1056 		ret2 = -EAGAIN;
1057 	}
1058 
1059 	/*
1060 	 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
1061 	 * retry them without IOCB_NOWAIT.
1062 	 */
1063 	if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
1064 		ret2 = -EAGAIN;
1065 	/* no retry on NONBLOCK nor RWF_NOWAIT */
1066 	if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
1067 		goto done;
1068 	if (!force_nonblock || ret2 != -EAGAIN) {
1069 		/* IOPOLL retry should happen for io-wq threads */
1070 		if (ret2 == -EAGAIN && (req->ctx->flags & IORING_SETUP_IOPOLL))
1071 			goto ret_eagain;
1072 
1073 		if (ret2 != req->cqe.res && ret2 >= 0 && need_complete_io(req)) {
1074 			trace_io_uring_short_write(req->ctx, kiocb->ki_pos - ret2,
1075 						req->cqe.res, ret2);
1076 
1077 			/* This is a partial write. The file pos has already been
1078 			 * updated, setup the async struct to complete the request
1079 			 * in the worker. Also update bytes_done to account for
1080 			 * the bytes already written.
1081 			 */
1082 			iov_iter_save_state(&io->iter, &io->iter_state);
1083 			io->bytes_done += ret2;
1084 
1085 			if (kiocb->ki_flags & IOCB_WRITE)
1086 				io_req_end_write(req);
1087 			return -EAGAIN;
1088 		}
1089 done:
1090 		return kiocb_done(req, ret2, issue_flags);
1091 	} else {
1092 ret_eagain:
1093 		iov_iter_restore(&io->iter, &io->iter_state);
1094 		if (kiocb->ki_flags & IOCB_WRITE)
1095 			io_req_end_write(req);
1096 		return -EAGAIN;
1097 	}
1098 }
1099 
1100 void io_rw_fail(struct io_kiocb *req)
1101 {
1102 	int res;
1103 
1104 	res = io_fixup_rw_res(req, req->cqe.res);
1105 	io_req_set_res(req, res, req->cqe.flags);
1106 }
1107 
1108 int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
1109 {
1110 	struct io_wq_work_node *pos, *start, *prev;
1111 	unsigned int poll_flags = 0;
1112 	DEFINE_IO_COMP_BATCH(iob);
1113 	int nr_events = 0;
1114 
1115 	/*
1116 	 * Only spin for completions if we don't have multiple devices hanging
1117 	 * off our complete list.
1118 	 */
1119 	if (ctx->poll_multi_queue || force_nonspin)
1120 		poll_flags |= BLK_POLL_ONESHOT;
1121 
1122 	wq_list_for_each(pos, start, &ctx->iopoll_list) {
1123 		struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
1124 		struct file *file = req->file;
1125 		int ret;
1126 
1127 		/*
1128 		 * Move completed and retryable entries to our local lists.
1129 		 * If we find a request that requires polling, break out
1130 		 * and complete those lists first, if we have entries there.
1131 		 */
1132 		if (READ_ONCE(req->iopoll_completed))
1133 			break;
1134 
1135 		if (req->opcode == IORING_OP_URING_CMD) {
1136 			struct io_uring_cmd *ioucmd;
1137 
1138 			ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
1139 			ret = file->f_op->uring_cmd_iopoll(ioucmd, &iob,
1140 								poll_flags);
1141 		} else {
1142 			struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
1143 
1144 			ret = file->f_op->iopoll(&rw->kiocb, &iob, poll_flags);
1145 		}
1146 		if (unlikely(ret < 0))
1147 			return ret;
1148 		else if (ret)
1149 			poll_flags |= BLK_POLL_ONESHOT;
1150 
1151 		/* iopoll may have completed current req */
1152 		if (!rq_list_empty(iob.req_list) ||
1153 		    READ_ONCE(req->iopoll_completed))
1154 			break;
1155 	}
1156 
1157 	if (!rq_list_empty(iob.req_list))
1158 		iob.complete(&iob);
1159 	else if (!pos)
1160 		return 0;
1161 
1162 	prev = start;
1163 	wq_list_for_each_resume(pos, prev) {
1164 		struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
1165 
1166 		/* order with io_complete_rw_iopoll(), e.g. ->result updates */
1167 		if (!smp_load_acquire(&req->iopoll_completed))
1168 			break;
1169 		nr_events++;
1170 		req->cqe.flags = io_put_kbuf(req, 0);
1171 		if (req->opcode != IORING_OP_URING_CMD)
1172 			io_req_rw_cleanup(req, 0);
1173 	}
1174 	if (unlikely(!nr_events))
1175 		return 0;
1176 
1177 	pos = start ? start->next : ctx->iopoll_list.first;
1178 	wq_list_cut(&ctx->iopoll_list, prev, start);
1179 
1180 	if (WARN_ON_ONCE(!wq_list_empty(&ctx->submit_state.compl_reqs)))
1181 		return 0;
1182 	ctx->submit_state.compl_reqs.first = pos;
1183 	__io_submit_flush_completions(ctx);
1184 	return nr_events;
1185 }
1186 
1187 void io_rw_cache_free(const void *entry)
1188 {
1189 	struct io_async_rw *rw = (struct io_async_rw *) entry;
1190 
1191 	if (rw->free_iovec) {
1192 		kasan_mempool_unpoison_object(rw->free_iovec,
1193 				rw->free_iov_nr * sizeof(struct iovec));
1194 		io_rw_iovec_free(rw);
1195 	}
1196 	kfree(rw);
1197 }
1198