xref: /linux/io_uring/rw.c (revision 5e2cb28dd7e182dfa641550dfa225913509ad45d)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/blk-mq.h>
7 #include <linux/mm.h>
8 #include <linux/slab.h>
9 #include <linux/fsnotify.h>
10 #include <linux/poll.h>
11 #include <linux/nospec.h>
12 #include <linux/compat.h>
13 #include <linux/io_uring.h>
14 
15 #include <uapi/linux/io_uring.h>
16 
17 #include "io_uring.h"
18 #include "opdef.h"
19 #include "kbuf.h"
20 #include "rsrc.h"
21 #include "rw.h"
22 
23 struct io_rw {
24 	/* NOTE: kiocb has the file as the first member, so don't do it here */
25 	struct kiocb			kiocb;
26 	u64				addr;
27 	u32				len;
28 	rwf_t				flags;
29 };
30 
31 static inline bool io_file_supports_nowait(struct io_kiocb *req)
32 {
33 	return req->flags & REQ_F_SUPPORT_NOWAIT;
34 }
35 
36 #ifdef CONFIG_COMPAT
37 static int io_iov_compat_buffer_select_prep(struct io_rw *rw)
38 {
39 	struct compat_iovec __user *uiov;
40 	compat_ssize_t clen;
41 
42 	uiov = u64_to_user_ptr(rw->addr);
43 	if (!access_ok(uiov, sizeof(*uiov)))
44 		return -EFAULT;
45 	if (__get_user(clen, &uiov->iov_len))
46 		return -EFAULT;
47 	if (clen < 0)
48 		return -EINVAL;
49 
50 	rw->len = clen;
51 	return 0;
52 }
53 #endif
54 
55 static int io_iov_buffer_select_prep(struct io_kiocb *req)
56 {
57 	struct iovec __user *uiov;
58 	struct iovec iov;
59 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
60 
61 	if (rw->len != 1)
62 		return -EINVAL;
63 
64 #ifdef CONFIG_COMPAT
65 	if (req->ctx->compat)
66 		return io_iov_compat_buffer_select_prep(rw);
67 #endif
68 
69 	uiov = u64_to_user_ptr(rw->addr);
70 	if (copy_from_user(&iov, uiov, sizeof(*uiov)))
71 		return -EFAULT;
72 	rw->len = iov.iov_len;
73 	return 0;
74 }
75 
76 int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
77 {
78 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
79 	unsigned ioprio;
80 	int ret;
81 
82 	rw->kiocb.ki_pos = READ_ONCE(sqe->off);
83 	/* used for fixed read/write too - just read unconditionally */
84 	req->buf_index = READ_ONCE(sqe->buf_index);
85 
86 	if (req->opcode == IORING_OP_READ_FIXED ||
87 	    req->opcode == IORING_OP_WRITE_FIXED) {
88 		struct io_ring_ctx *ctx = req->ctx;
89 		u16 index;
90 
91 		if (unlikely(req->buf_index >= ctx->nr_user_bufs))
92 			return -EFAULT;
93 		index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
94 		req->imu = ctx->user_bufs[index];
95 		io_req_set_rsrc_node(req, ctx, 0);
96 	}
97 
98 	ioprio = READ_ONCE(sqe->ioprio);
99 	if (ioprio) {
100 		ret = ioprio_check_cap(ioprio);
101 		if (ret)
102 			return ret;
103 
104 		rw->kiocb.ki_ioprio = ioprio;
105 	} else {
106 		rw->kiocb.ki_ioprio = get_current_ioprio();
107 	}
108 	rw->kiocb.dio_complete = NULL;
109 
110 	rw->addr = READ_ONCE(sqe->addr);
111 	rw->len = READ_ONCE(sqe->len);
112 	rw->flags = READ_ONCE(sqe->rw_flags);
113 
114 	/* Have to do this validation here, as this is in io_read() rw->len might
115 	 * have chanaged due to buffer selection
116 	 */
117 	if (req->opcode == IORING_OP_READV && req->flags & REQ_F_BUFFER_SELECT) {
118 		ret = io_iov_buffer_select_prep(req);
119 		if (ret)
120 			return ret;
121 	}
122 
123 	return 0;
124 }
125 
126 /*
127  * Multishot read is prepared just like a normal read/write request, only
128  * difference is that we set the MULTISHOT flag.
129  */
130 int io_read_mshot_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
131 {
132 	int ret;
133 
134 	ret = io_prep_rw(req, sqe);
135 	if (unlikely(ret))
136 		return ret;
137 
138 	req->flags |= REQ_F_APOLL_MULTISHOT;
139 	return 0;
140 }
141 
142 void io_readv_writev_cleanup(struct io_kiocb *req)
143 {
144 	struct io_async_rw *io = req->async_data;
145 
146 	kfree(io->free_iovec);
147 }
148 
149 static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
150 {
151 	switch (ret) {
152 	case -EIOCBQUEUED:
153 		break;
154 	case -ERESTARTSYS:
155 	case -ERESTARTNOINTR:
156 	case -ERESTARTNOHAND:
157 	case -ERESTART_RESTARTBLOCK:
158 		/*
159 		 * We can't just restart the syscall, since previously
160 		 * submitted sqes may already be in progress. Just fail this
161 		 * IO with EINTR.
162 		 */
163 		ret = -EINTR;
164 		fallthrough;
165 	default:
166 		kiocb->ki_complete(kiocb, ret);
167 	}
168 }
169 
170 static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req)
171 {
172 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
173 
174 	if (rw->kiocb.ki_pos != -1)
175 		return &rw->kiocb.ki_pos;
176 
177 	if (!(req->file->f_mode & FMODE_STREAM)) {
178 		req->flags |= REQ_F_CUR_POS;
179 		rw->kiocb.ki_pos = req->file->f_pos;
180 		return &rw->kiocb.ki_pos;
181 	}
182 
183 	rw->kiocb.ki_pos = 0;
184 	return NULL;
185 }
186 
187 static void io_req_task_queue_reissue(struct io_kiocb *req)
188 {
189 	req->io_task_work.func = io_queue_iowq;
190 	io_req_task_work_add(req);
191 }
192 
193 #ifdef CONFIG_BLOCK
194 static bool io_resubmit_prep(struct io_kiocb *req)
195 {
196 	struct io_async_rw *io = req->async_data;
197 
198 	if (!req_has_async_data(req))
199 		return !io_req_prep_async(req);
200 	iov_iter_restore(&io->s.iter, &io->s.iter_state);
201 	return true;
202 }
203 
204 static bool io_rw_should_reissue(struct io_kiocb *req)
205 {
206 	umode_t mode = file_inode(req->file)->i_mode;
207 	struct io_ring_ctx *ctx = req->ctx;
208 
209 	if (!S_ISBLK(mode) && !S_ISREG(mode))
210 		return false;
211 	if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() &&
212 	    !(ctx->flags & IORING_SETUP_IOPOLL)))
213 		return false;
214 	/*
215 	 * If ref is dying, we might be running poll reap from the exit work.
216 	 * Don't attempt to reissue from that path, just let it fail with
217 	 * -EAGAIN.
218 	 */
219 	if (percpu_ref_is_dying(&ctx->refs))
220 		return false;
221 	/*
222 	 * Play it safe and assume not safe to re-import and reissue if we're
223 	 * not in the original thread group (or in task context).
224 	 */
225 	if (!same_thread_group(req->task, current) || !in_task())
226 		return false;
227 	return true;
228 }
229 #else
230 static bool io_resubmit_prep(struct io_kiocb *req)
231 {
232 	return false;
233 }
234 static bool io_rw_should_reissue(struct io_kiocb *req)
235 {
236 	return false;
237 }
238 #endif
239 
240 static void io_req_end_write(struct io_kiocb *req)
241 {
242 	if (req->flags & REQ_F_ISREG) {
243 		struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
244 
245 		kiocb_end_write(&rw->kiocb);
246 	}
247 }
248 
249 /*
250  * Trigger the notifications after having done some IO, and finish the write
251  * accounting, if any.
252  */
253 static void io_req_io_end(struct io_kiocb *req)
254 {
255 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
256 
257 	if (rw->kiocb.ki_flags & IOCB_WRITE) {
258 		io_req_end_write(req);
259 		fsnotify_modify(req->file);
260 	} else {
261 		fsnotify_access(req->file);
262 	}
263 }
264 
265 static bool __io_complete_rw_common(struct io_kiocb *req, long res)
266 {
267 	if (unlikely(res != req->cqe.res)) {
268 		if ((res == -EAGAIN || res == -EOPNOTSUPP) &&
269 		    io_rw_should_reissue(req)) {
270 			/*
271 			 * Reissue will start accounting again, finish the
272 			 * current cycle.
273 			 */
274 			io_req_io_end(req);
275 			req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO;
276 			return true;
277 		}
278 		req_set_fail(req);
279 		req->cqe.res = res;
280 	}
281 	return false;
282 }
283 
284 static inline int io_fixup_rw_res(struct io_kiocb *req, long res)
285 {
286 	struct io_async_rw *io = req->async_data;
287 
288 	/* add previously done IO, if any */
289 	if (req_has_async_data(req) && io->bytes_done > 0) {
290 		if (res < 0)
291 			res = io->bytes_done;
292 		else
293 			res += io->bytes_done;
294 	}
295 	return res;
296 }
297 
298 void io_req_rw_complete(struct io_kiocb *req, struct io_tw_state *ts)
299 {
300 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
301 	struct kiocb *kiocb = &rw->kiocb;
302 
303 	if ((kiocb->ki_flags & IOCB_DIO_CALLER_COMP) && kiocb->dio_complete) {
304 		long res = kiocb->dio_complete(rw->kiocb.private);
305 
306 		io_req_set_res(req, io_fixup_rw_res(req, res), 0);
307 	}
308 
309 	io_req_io_end(req);
310 
311 	if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) {
312 		unsigned issue_flags = ts->locked ? 0 : IO_URING_F_UNLOCKED;
313 
314 		req->cqe.flags |= io_put_kbuf(req, issue_flags);
315 	}
316 	io_req_task_complete(req, ts);
317 }
318 
319 static void io_complete_rw(struct kiocb *kiocb, long res)
320 {
321 	struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
322 	struct io_kiocb *req = cmd_to_io_kiocb(rw);
323 
324 	if (!kiocb->dio_complete || !(kiocb->ki_flags & IOCB_DIO_CALLER_COMP)) {
325 		if (__io_complete_rw_common(req, res))
326 			return;
327 		io_req_set_res(req, io_fixup_rw_res(req, res), 0);
328 	}
329 	req->io_task_work.func = io_req_rw_complete;
330 	__io_req_task_work_add(req, IOU_F_TWQ_LAZY_WAKE);
331 }
332 
333 static void io_complete_rw_iopoll(struct kiocb *kiocb, long res)
334 {
335 	struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
336 	struct io_kiocb *req = cmd_to_io_kiocb(rw);
337 
338 	if (kiocb->ki_flags & IOCB_WRITE)
339 		io_req_end_write(req);
340 	if (unlikely(res != req->cqe.res)) {
341 		if (res == -EAGAIN && io_rw_should_reissue(req)) {
342 			req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO;
343 			return;
344 		}
345 		req->cqe.res = res;
346 	}
347 
348 	/* order with io_iopoll_complete() checking ->iopoll_completed */
349 	smp_store_release(&req->iopoll_completed, 1);
350 }
351 
352 static int kiocb_done(struct io_kiocb *req, ssize_t ret,
353 		       unsigned int issue_flags)
354 {
355 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
356 	unsigned final_ret = io_fixup_rw_res(req, ret);
357 
358 	if (ret >= 0 && req->flags & REQ_F_CUR_POS)
359 		req->file->f_pos = rw->kiocb.ki_pos;
360 	if (ret >= 0 && (rw->kiocb.ki_complete == io_complete_rw)) {
361 		if (!__io_complete_rw_common(req, ret)) {
362 			/*
363 			 * Safe to call io_end from here as we're inline
364 			 * from the submission path.
365 			 */
366 			io_req_io_end(req);
367 			io_req_set_res(req, final_ret,
368 				       io_put_kbuf(req, issue_flags));
369 			return IOU_OK;
370 		}
371 	} else {
372 		io_rw_done(&rw->kiocb, ret);
373 	}
374 
375 	if (req->flags & REQ_F_REISSUE) {
376 		req->flags &= ~REQ_F_REISSUE;
377 		if (io_resubmit_prep(req))
378 			io_req_task_queue_reissue(req);
379 		else
380 			io_req_task_queue_fail(req, final_ret);
381 	}
382 	return IOU_ISSUE_SKIP_COMPLETE;
383 }
384 
385 static struct iovec *__io_import_iovec(int ddir, struct io_kiocb *req,
386 				       struct io_rw_state *s,
387 				       unsigned int issue_flags)
388 {
389 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
390 	struct iov_iter *iter = &s->iter;
391 	u8 opcode = req->opcode;
392 	struct iovec *iovec;
393 	void __user *buf;
394 	size_t sqe_len;
395 	ssize_t ret;
396 
397 	if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
398 		ret = io_import_fixed(ddir, iter, req->imu, rw->addr, rw->len);
399 		if (ret)
400 			return ERR_PTR(ret);
401 		return NULL;
402 	}
403 
404 	buf = u64_to_user_ptr(rw->addr);
405 	sqe_len = rw->len;
406 
407 	if (!io_issue_defs[opcode].vectored || req->flags & REQ_F_BUFFER_SELECT) {
408 		if (io_do_buffer_select(req)) {
409 			buf = io_buffer_select(req, &sqe_len, issue_flags);
410 			if (!buf)
411 				return ERR_PTR(-ENOBUFS);
412 			rw->addr = (unsigned long) buf;
413 			rw->len = sqe_len;
414 		}
415 
416 		ret = import_ubuf(ddir, buf, sqe_len, iter);
417 		if (ret)
418 			return ERR_PTR(ret);
419 		return NULL;
420 	}
421 
422 	iovec = s->fast_iov;
423 	ret = __import_iovec(ddir, buf, sqe_len, UIO_FASTIOV, &iovec, iter,
424 			      req->ctx->compat);
425 	if (unlikely(ret < 0))
426 		return ERR_PTR(ret);
427 	return iovec;
428 }
429 
430 static inline int io_import_iovec(int rw, struct io_kiocb *req,
431 				  struct iovec **iovec, struct io_rw_state *s,
432 				  unsigned int issue_flags)
433 {
434 	*iovec = __io_import_iovec(rw, req, s, issue_flags);
435 	if (IS_ERR(*iovec))
436 		return PTR_ERR(*iovec);
437 
438 	iov_iter_save_state(&s->iter, &s->iter_state);
439 	return 0;
440 }
441 
442 static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
443 {
444 	return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
445 }
446 
447 /*
448  * For files that don't have ->read_iter() and ->write_iter(), handle them
449  * by looping over ->read() or ->write() manually.
450  */
451 static ssize_t loop_rw_iter(int ddir, struct io_rw *rw, struct iov_iter *iter)
452 {
453 	struct kiocb *kiocb = &rw->kiocb;
454 	struct file *file = kiocb->ki_filp;
455 	ssize_t ret = 0;
456 	loff_t *ppos;
457 
458 	/*
459 	 * Don't support polled IO through this interface, and we can't
460 	 * support non-blocking either. For the latter, this just causes
461 	 * the kiocb to be handled from an async context.
462 	 */
463 	if (kiocb->ki_flags & IOCB_HIPRI)
464 		return -EOPNOTSUPP;
465 	if ((kiocb->ki_flags & IOCB_NOWAIT) &&
466 	    !(kiocb->ki_filp->f_flags & O_NONBLOCK))
467 		return -EAGAIN;
468 
469 	ppos = io_kiocb_ppos(kiocb);
470 
471 	while (iov_iter_count(iter)) {
472 		void __user *addr;
473 		size_t len;
474 		ssize_t nr;
475 
476 		if (iter_is_ubuf(iter)) {
477 			addr = iter->ubuf + iter->iov_offset;
478 			len = iov_iter_count(iter);
479 		} else if (!iov_iter_is_bvec(iter)) {
480 			addr = iter_iov_addr(iter);
481 			len = iter_iov_len(iter);
482 		} else {
483 			addr = u64_to_user_ptr(rw->addr);
484 			len = rw->len;
485 		}
486 
487 		if (ddir == READ)
488 			nr = file->f_op->read(file, addr, len, ppos);
489 		else
490 			nr = file->f_op->write(file, addr, len, ppos);
491 
492 		if (nr < 0) {
493 			if (!ret)
494 				ret = nr;
495 			break;
496 		}
497 		ret += nr;
498 		if (!iov_iter_is_bvec(iter)) {
499 			iov_iter_advance(iter, nr);
500 		} else {
501 			rw->addr += nr;
502 			rw->len -= nr;
503 			if (!rw->len)
504 				break;
505 		}
506 		if (nr != len)
507 			break;
508 	}
509 
510 	return ret;
511 }
512 
513 static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
514 			  const struct iovec *fast_iov, struct iov_iter *iter)
515 {
516 	struct io_async_rw *io = req->async_data;
517 
518 	memcpy(&io->s.iter, iter, sizeof(*iter));
519 	io->free_iovec = iovec;
520 	io->bytes_done = 0;
521 	/* can only be fixed buffers, no need to do anything */
522 	if (iov_iter_is_bvec(iter) || iter_is_ubuf(iter))
523 		return;
524 	if (!iovec) {
525 		unsigned iov_off = 0;
526 
527 		io->s.iter.__iov = io->s.fast_iov;
528 		if (iter->__iov != fast_iov) {
529 			iov_off = iter_iov(iter) - fast_iov;
530 			io->s.iter.__iov += iov_off;
531 		}
532 		if (io->s.fast_iov != fast_iov)
533 			memcpy(io->s.fast_iov + iov_off, fast_iov + iov_off,
534 			       sizeof(struct iovec) * iter->nr_segs);
535 	} else {
536 		req->flags |= REQ_F_NEED_CLEANUP;
537 	}
538 }
539 
540 static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
541 			     struct io_rw_state *s, bool force)
542 {
543 	if (!force && !io_cold_defs[req->opcode].prep_async)
544 		return 0;
545 	if (!req_has_async_data(req)) {
546 		struct io_async_rw *iorw;
547 
548 		if (io_alloc_async_data(req)) {
549 			kfree(iovec);
550 			return -ENOMEM;
551 		}
552 
553 		io_req_map_rw(req, iovec, s->fast_iov, &s->iter);
554 		iorw = req->async_data;
555 		/* we've copied and mapped the iter, ensure state is saved */
556 		iov_iter_save_state(&iorw->s.iter, &iorw->s.iter_state);
557 	}
558 	return 0;
559 }
560 
561 static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
562 {
563 	struct io_async_rw *iorw = req->async_data;
564 	struct iovec *iov;
565 	int ret;
566 
567 	/* submission path, ->uring_lock should already be taken */
568 	ret = io_import_iovec(rw, req, &iov, &iorw->s, 0);
569 	if (unlikely(ret < 0))
570 		return ret;
571 
572 	iorw->bytes_done = 0;
573 	iorw->free_iovec = iov;
574 	if (iov)
575 		req->flags |= REQ_F_NEED_CLEANUP;
576 	return 0;
577 }
578 
579 int io_readv_prep_async(struct io_kiocb *req)
580 {
581 	return io_rw_prep_async(req, ITER_DEST);
582 }
583 
584 int io_writev_prep_async(struct io_kiocb *req)
585 {
586 	return io_rw_prep_async(req, ITER_SOURCE);
587 }
588 
589 /*
590  * This is our waitqueue callback handler, registered through __folio_lock_async()
591  * when we initially tried to do the IO with the iocb armed our waitqueue.
592  * This gets called when the page is unlocked, and we generally expect that to
593  * happen when the page IO is completed and the page is now uptodate. This will
594  * queue a task_work based retry of the operation, attempting to copy the data
595  * again. If the latter fails because the page was NOT uptodate, then we will
596  * do a thread based blocking retry of the operation. That's the unexpected
597  * slow path.
598  */
599 static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
600 			     int sync, void *arg)
601 {
602 	struct wait_page_queue *wpq;
603 	struct io_kiocb *req = wait->private;
604 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
605 	struct wait_page_key *key = arg;
606 
607 	wpq = container_of(wait, struct wait_page_queue, wait);
608 
609 	if (!wake_page_match(wpq, key))
610 		return 0;
611 
612 	rw->kiocb.ki_flags &= ~IOCB_WAITQ;
613 	list_del_init(&wait->entry);
614 	io_req_task_queue(req);
615 	return 1;
616 }
617 
618 /*
619  * This controls whether a given IO request should be armed for async page
620  * based retry. If we return false here, the request is handed to the async
621  * worker threads for retry. If we're doing buffered reads on a regular file,
622  * we prepare a private wait_page_queue entry and retry the operation. This
623  * will either succeed because the page is now uptodate and unlocked, or it
624  * will register a callback when the page is unlocked at IO completion. Through
625  * that callback, io_uring uses task_work to setup a retry of the operation.
626  * That retry will attempt the buffered read again. The retry will generally
627  * succeed, or in rare cases where it fails, we then fall back to using the
628  * async worker threads for a blocking retry.
629  */
630 static bool io_rw_should_retry(struct io_kiocb *req)
631 {
632 	struct io_async_rw *io = req->async_data;
633 	struct wait_page_queue *wait = &io->wpq;
634 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
635 	struct kiocb *kiocb = &rw->kiocb;
636 
637 	/* never retry for NOWAIT, we just complete with -EAGAIN */
638 	if (req->flags & REQ_F_NOWAIT)
639 		return false;
640 
641 	/* Only for buffered IO */
642 	if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
643 		return false;
644 
645 	/*
646 	 * just use poll if we can, and don't attempt if the fs doesn't
647 	 * support callback based unlocks
648 	 */
649 	if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
650 		return false;
651 
652 	wait->wait.func = io_async_buf_func;
653 	wait->wait.private = req;
654 	wait->wait.flags = 0;
655 	INIT_LIST_HEAD(&wait->wait.entry);
656 	kiocb->ki_flags |= IOCB_WAITQ;
657 	kiocb->ki_flags &= ~IOCB_NOWAIT;
658 	kiocb->ki_waitq = wait;
659 	return true;
660 }
661 
662 static inline int io_iter_do_read(struct io_rw *rw, struct iov_iter *iter)
663 {
664 	struct file *file = rw->kiocb.ki_filp;
665 
666 	if (likely(file->f_op->read_iter))
667 		return call_read_iter(file, &rw->kiocb, iter);
668 	else if (file->f_op->read)
669 		return loop_rw_iter(READ, rw, iter);
670 	else
671 		return -EINVAL;
672 }
673 
674 static bool need_complete_io(struct io_kiocb *req)
675 {
676 	return req->flags & REQ_F_ISREG ||
677 		S_ISBLK(file_inode(req->file)->i_mode);
678 }
679 
680 static int io_rw_init_file(struct io_kiocb *req, fmode_t mode)
681 {
682 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
683 	struct kiocb *kiocb = &rw->kiocb;
684 	struct io_ring_ctx *ctx = req->ctx;
685 	struct file *file = req->file;
686 	int ret;
687 
688 	if (unlikely(!file || !(file->f_mode & mode)))
689 		return -EBADF;
690 
691 	if (!(req->flags & REQ_F_FIXED_FILE))
692 		req->flags |= io_file_get_flags(file);
693 
694 	kiocb->ki_flags = file->f_iocb_flags;
695 	ret = kiocb_set_rw_flags(kiocb, rw->flags);
696 	if (unlikely(ret))
697 		return ret;
698 	kiocb->ki_flags |= IOCB_ALLOC_CACHE;
699 
700 	/*
701 	 * If the file is marked O_NONBLOCK, still allow retry for it if it
702 	 * supports async. Otherwise it's impossible to use O_NONBLOCK files
703 	 * reliably. If not, or it IOCB_NOWAIT is set, don't retry.
704 	 */
705 	if ((kiocb->ki_flags & IOCB_NOWAIT) ||
706 	    ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req)))
707 		req->flags |= REQ_F_NOWAIT;
708 
709 	if (ctx->flags & IORING_SETUP_IOPOLL) {
710 		if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll)
711 			return -EOPNOTSUPP;
712 
713 		kiocb->private = NULL;
714 		kiocb->ki_flags |= IOCB_HIPRI;
715 		kiocb->ki_complete = io_complete_rw_iopoll;
716 		req->iopoll_completed = 0;
717 	} else {
718 		if (kiocb->ki_flags & IOCB_HIPRI)
719 			return -EINVAL;
720 		kiocb->ki_complete = io_complete_rw;
721 	}
722 
723 	return 0;
724 }
725 
726 static int __io_read(struct io_kiocb *req, unsigned int issue_flags)
727 {
728 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
729 	struct io_rw_state __s, *s = &__s;
730 	struct iovec *iovec;
731 	struct kiocb *kiocb = &rw->kiocb;
732 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
733 	struct io_async_rw *io;
734 	ssize_t ret, ret2;
735 	loff_t *ppos;
736 
737 	if (!req_has_async_data(req)) {
738 		ret = io_import_iovec(ITER_DEST, req, &iovec, s, issue_flags);
739 		if (unlikely(ret < 0))
740 			return ret;
741 	} else {
742 		io = req->async_data;
743 		s = &io->s;
744 
745 		/*
746 		 * Safe and required to re-import if we're using provided
747 		 * buffers, as we dropped the selected one before retry.
748 		 */
749 		if (io_do_buffer_select(req)) {
750 			ret = io_import_iovec(ITER_DEST, req, &iovec, s, issue_flags);
751 			if (unlikely(ret < 0))
752 				return ret;
753 		}
754 
755 		/*
756 		 * We come here from an earlier attempt, restore our state to
757 		 * match in case it doesn't. It's cheap enough that we don't
758 		 * need to make this conditional.
759 		 */
760 		iov_iter_restore(&s->iter, &s->iter_state);
761 		iovec = NULL;
762 	}
763 	ret = io_rw_init_file(req, FMODE_READ);
764 	if (unlikely(ret)) {
765 		kfree(iovec);
766 		return ret;
767 	}
768 	req->cqe.res = iov_iter_count(&s->iter);
769 
770 	if (force_nonblock) {
771 		/* If the file doesn't support async, just async punt */
772 		if (unlikely(!io_file_supports_nowait(req))) {
773 			ret = io_setup_async_rw(req, iovec, s, true);
774 			return ret ?: -EAGAIN;
775 		}
776 		kiocb->ki_flags |= IOCB_NOWAIT;
777 	} else {
778 		/* Ensure we clear previously set non-block flag */
779 		kiocb->ki_flags &= ~IOCB_NOWAIT;
780 	}
781 
782 	ppos = io_kiocb_update_pos(req);
783 
784 	ret = rw_verify_area(READ, req->file, ppos, req->cqe.res);
785 	if (unlikely(ret)) {
786 		kfree(iovec);
787 		return ret;
788 	}
789 
790 	ret = io_iter_do_read(rw, &s->iter);
791 
792 	if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
793 		req->flags &= ~REQ_F_REISSUE;
794 		/*
795 		 * If we can poll, just do that. For a vectored read, we'll
796 		 * need to copy state first.
797 		 */
798 		if (file_can_poll(req->file) && !io_issue_defs[req->opcode].vectored)
799 			return -EAGAIN;
800 		/* IOPOLL retry should happen for io-wq threads */
801 		if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
802 			goto done;
803 		/* no retry on NONBLOCK nor RWF_NOWAIT */
804 		if (req->flags & REQ_F_NOWAIT)
805 			goto done;
806 		ret = 0;
807 	} else if (ret == -EIOCBQUEUED) {
808 		if (iovec)
809 			kfree(iovec);
810 		return IOU_ISSUE_SKIP_COMPLETE;
811 	} else if (ret == req->cqe.res || ret <= 0 || !force_nonblock ||
812 		   (req->flags & REQ_F_NOWAIT) || !need_complete_io(req)) {
813 		/* read all, failed, already did sync or don't want to retry */
814 		goto done;
815 	}
816 
817 	/*
818 	 * Don't depend on the iter state matching what was consumed, or being
819 	 * untouched in case of error. Restore it and we'll advance it
820 	 * manually if we need to.
821 	 */
822 	iov_iter_restore(&s->iter, &s->iter_state);
823 
824 	ret2 = io_setup_async_rw(req, iovec, s, true);
825 	iovec = NULL;
826 	if (ret2) {
827 		ret = ret > 0 ? ret : ret2;
828 		goto done;
829 	}
830 
831 	io = req->async_data;
832 	s = &io->s;
833 	/*
834 	 * Now use our persistent iterator and state, if we aren't already.
835 	 * We've restored and mapped the iter to match.
836 	 */
837 
838 	do {
839 		/*
840 		 * We end up here because of a partial read, either from
841 		 * above or inside this loop. Advance the iter by the bytes
842 		 * that were consumed.
843 		 */
844 		iov_iter_advance(&s->iter, ret);
845 		if (!iov_iter_count(&s->iter))
846 			break;
847 		io->bytes_done += ret;
848 		iov_iter_save_state(&s->iter, &s->iter_state);
849 
850 		/* if we can retry, do so with the callbacks armed */
851 		if (!io_rw_should_retry(req)) {
852 			kiocb->ki_flags &= ~IOCB_WAITQ;
853 			return -EAGAIN;
854 		}
855 
856 		req->cqe.res = iov_iter_count(&s->iter);
857 		/*
858 		 * Now retry read with the IOCB_WAITQ parts set in the iocb. If
859 		 * we get -EIOCBQUEUED, then we'll get a notification when the
860 		 * desired page gets unlocked. We can also get a partial read
861 		 * here, and if we do, then just retry at the new offset.
862 		 */
863 		ret = io_iter_do_read(rw, &s->iter);
864 		if (ret == -EIOCBQUEUED)
865 			return IOU_ISSUE_SKIP_COMPLETE;
866 		/* we got some bytes, but not all. retry. */
867 		kiocb->ki_flags &= ~IOCB_WAITQ;
868 		iov_iter_restore(&s->iter, &s->iter_state);
869 	} while (ret > 0);
870 done:
871 	/* it's faster to check here then delegate to kfree */
872 	if (iovec)
873 		kfree(iovec);
874 	return ret;
875 }
876 
877 int io_read(struct io_kiocb *req, unsigned int issue_flags)
878 {
879 	int ret;
880 
881 	ret = __io_read(req, issue_flags);
882 	if (ret >= 0)
883 		return kiocb_done(req, ret, issue_flags);
884 
885 	return ret;
886 }
887 
888 int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
889 {
890 	unsigned int cflags = 0;
891 	int ret;
892 
893 	/*
894 	 * Multishot MUST be used on a pollable file
895 	 */
896 	if (!file_can_poll(req->file))
897 		return -EBADFD;
898 
899 	ret = __io_read(req, issue_flags);
900 
901 	/*
902 	 * If we get -EAGAIN, recycle our buffer and just let normal poll
903 	 * handling arm it.
904 	 */
905 	if (ret == -EAGAIN) {
906 		io_kbuf_recycle(req, issue_flags);
907 		return -EAGAIN;
908 	}
909 
910 	/*
911 	 * Any successful return value will keep the multishot read armed.
912 	 */
913 	if (ret > 0) {
914 		/*
915 		 * Put our buffer and post a CQE. If we fail to post a CQE, then
916 		 * jump to the termination path. This request is then done.
917 		 */
918 		cflags = io_put_kbuf(req, issue_flags);
919 
920 		if (io_fill_cqe_req_aux(req,
921 					issue_flags & IO_URING_F_COMPLETE_DEFER,
922 					ret, cflags | IORING_CQE_F_MORE)) {
923 			if (issue_flags & IO_URING_F_MULTISHOT)
924 				return IOU_ISSUE_SKIP_COMPLETE;
925 			return -EAGAIN;
926 		}
927 	}
928 
929 	/*
930 	 * Either an error, or we've hit overflow posting the CQE. For any
931 	 * multishot request, hitting overflow will terminate it.
932 	 */
933 	io_req_set_res(req, ret, cflags);
934 	if (issue_flags & IO_URING_F_MULTISHOT)
935 		return IOU_STOP_MULTISHOT;
936 	return IOU_OK;
937 }
938 
939 int io_write(struct io_kiocb *req, unsigned int issue_flags)
940 {
941 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
942 	struct io_rw_state __s, *s = &__s;
943 	struct iovec *iovec;
944 	struct kiocb *kiocb = &rw->kiocb;
945 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
946 	ssize_t ret, ret2;
947 	loff_t *ppos;
948 
949 	if (!req_has_async_data(req)) {
950 		ret = io_import_iovec(ITER_SOURCE, req, &iovec, s, issue_flags);
951 		if (unlikely(ret < 0))
952 			return ret;
953 	} else {
954 		struct io_async_rw *io = req->async_data;
955 
956 		s = &io->s;
957 		iov_iter_restore(&s->iter, &s->iter_state);
958 		iovec = NULL;
959 	}
960 	ret = io_rw_init_file(req, FMODE_WRITE);
961 	if (unlikely(ret)) {
962 		kfree(iovec);
963 		return ret;
964 	}
965 	req->cqe.res = iov_iter_count(&s->iter);
966 
967 	if (force_nonblock) {
968 		/* If the file doesn't support async, just async punt */
969 		if (unlikely(!io_file_supports_nowait(req)))
970 			goto copy_iov;
971 
972 		/* File path supports NOWAIT for non-direct_IO only for block devices. */
973 		if (!(kiocb->ki_flags & IOCB_DIRECT) &&
974 			!(kiocb->ki_filp->f_mode & FMODE_BUF_WASYNC) &&
975 			(req->flags & REQ_F_ISREG))
976 			goto copy_iov;
977 
978 		kiocb->ki_flags |= IOCB_NOWAIT;
979 	} else {
980 		/* Ensure we clear previously set non-block flag */
981 		kiocb->ki_flags &= ~IOCB_NOWAIT;
982 	}
983 
984 	ppos = io_kiocb_update_pos(req);
985 
986 	ret = rw_verify_area(WRITE, req->file, ppos, req->cqe.res);
987 	if (unlikely(ret)) {
988 		kfree(iovec);
989 		return ret;
990 	}
991 
992 	if (req->flags & REQ_F_ISREG)
993 		kiocb_start_write(kiocb);
994 	kiocb->ki_flags |= IOCB_WRITE;
995 
996 	if (likely(req->file->f_op->write_iter))
997 		ret2 = call_write_iter(req->file, kiocb, &s->iter);
998 	else if (req->file->f_op->write)
999 		ret2 = loop_rw_iter(WRITE, rw, &s->iter);
1000 	else
1001 		ret2 = -EINVAL;
1002 
1003 	if (req->flags & REQ_F_REISSUE) {
1004 		req->flags &= ~REQ_F_REISSUE;
1005 		ret2 = -EAGAIN;
1006 	}
1007 
1008 	/*
1009 	 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
1010 	 * retry them without IOCB_NOWAIT.
1011 	 */
1012 	if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
1013 		ret2 = -EAGAIN;
1014 	/* no retry on NONBLOCK nor RWF_NOWAIT */
1015 	if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
1016 		goto done;
1017 	if (!force_nonblock || ret2 != -EAGAIN) {
1018 		/* IOPOLL retry should happen for io-wq threads */
1019 		if (ret2 == -EAGAIN && (req->ctx->flags & IORING_SETUP_IOPOLL))
1020 			goto copy_iov;
1021 
1022 		if (ret2 != req->cqe.res && ret2 >= 0 && need_complete_io(req)) {
1023 			struct io_async_rw *io;
1024 
1025 			trace_io_uring_short_write(req->ctx, kiocb->ki_pos - ret2,
1026 						req->cqe.res, ret2);
1027 
1028 			/* This is a partial write. The file pos has already been
1029 			 * updated, setup the async struct to complete the request
1030 			 * in the worker. Also update bytes_done to account for
1031 			 * the bytes already written.
1032 			 */
1033 			iov_iter_save_state(&s->iter, &s->iter_state);
1034 			ret = io_setup_async_rw(req, iovec, s, true);
1035 
1036 			io = req->async_data;
1037 			if (io)
1038 				io->bytes_done += ret2;
1039 
1040 			if (kiocb->ki_flags & IOCB_WRITE)
1041 				io_req_end_write(req);
1042 			return ret ? ret : -EAGAIN;
1043 		}
1044 done:
1045 		ret = kiocb_done(req, ret2, issue_flags);
1046 	} else {
1047 copy_iov:
1048 		iov_iter_restore(&s->iter, &s->iter_state);
1049 		ret = io_setup_async_rw(req, iovec, s, false);
1050 		if (!ret) {
1051 			if (kiocb->ki_flags & IOCB_WRITE)
1052 				io_req_end_write(req);
1053 			return -EAGAIN;
1054 		}
1055 		return ret;
1056 	}
1057 	/* it's reportedly faster than delegating the null check to kfree() */
1058 	if (iovec)
1059 		kfree(iovec);
1060 	return ret;
1061 }
1062 
1063 void io_rw_fail(struct io_kiocb *req)
1064 {
1065 	int res;
1066 
1067 	res = io_fixup_rw_res(req, req->cqe.res);
1068 	io_req_set_res(req, res, req->cqe.flags);
1069 }
1070 
1071 int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
1072 {
1073 	struct io_wq_work_node *pos, *start, *prev;
1074 	unsigned int poll_flags = 0;
1075 	DEFINE_IO_COMP_BATCH(iob);
1076 	int nr_events = 0;
1077 
1078 	/*
1079 	 * Only spin for completions if we don't have multiple devices hanging
1080 	 * off our complete list.
1081 	 */
1082 	if (ctx->poll_multi_queue || force_nonspin)
1083 		poll_flags |= BLK_POLL_ONESHOT;
1084 
1085 	wq_list_for_each(pos, start, &ctx->iopoll_list) {
1086 		struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
1087 		struct file *file = req->file;
1088 		int ret;
1089 
1090 		/*
1091 		 * Move completed and retryable entries to our local lists.
1092 		 * If we find a request that requires polling, break out
1093 		 * and complete those lists first, if we have entries there.
1094 		 */
1095 		if (READ_ONCE(req->iopoll_completed))
1096 			break;
1097 
1098 		if (req->opcode == IORING_OP_URING_CMD) {
1099 			struct io_uring_cmd *ioucmd;
1100 
1101 			ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
1102 			ret = file->f_op->uring_cmd_iopoll(ioucmd, &iob,
1103 								poll_flags);
1104 		} else {
1105 			struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
1106 
1107 			ret = file->f_op->iopoll(&rw->kiocb, &iob, poll_flags);
1108 		}
1109 		if (unlikely(ret < 0))
1110 			return ret;
1111 		else if (ret)
1112 			poll_flags |= BLK_POLL_ONESHOT;
1113 
1114 		/* iopoll may have completed current req */
1115 		if (!rq_list_empty(iob.req_list) ||
1116 		    READ_ONCE(req->iopoll_completed))
1117 			break;
1118 	}
1119 
1120 	if (!rq_list_empty(iob.req_list))
1121 		iob.complete(&iob);
1122 	else if (!pos)
1123 		return 0;
1124 
1125 	prev = start;
1126 	wq_list_for_each_resume(pos, prev) {
1127 		struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
1128 
1129 		/* order with io_complete_rw_iopoll(), e.g. ->result updates */
1130 		if (!smp_load_acquire(&req->iopoll_completed))
1131 			break;
1132 		nr_events++;
1133 		req->cqe.flags = io_put_kbuf(req, 0);
1134 	}
1135 	if (unlikely(!nr_events))
1136 		return 0;
1137 
1138 	pos = start ? start->next : ctx->iopoll_list.first;
1139 	wq_list_cut(&ctx->iopoll_list, prev, start);
1140 
1141 	if (WARN_ON_ONCE(!wq_list_empty(&ctx->submit_state.compl_reqs)))
1142 		return 0;
1143 	ctx->submit_state.compl_reqs.first = pos;
1144 	__io_submit_flush_completions(ctx);
1145 	return nr_events;
1146 }
1147