xref: /linux/io_uring/rw.c (revision 60684c2bd35064043360e6f716d1b7c20e967b7d)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/blk-mq.h>
7 #include <linux/mm.h>
8 #include <linux/slab.h>
9 #include <linux/fsnotify.h>
10 #include <linux/poll.h>
11 #include <linux/nospec.h>
12 #include <linux/compat.h>
13 #include <linux/io_uring.h>
14 
15 #include <uapi/linux/io_uring.h>
16 
17 #include "io_uring.h"
18 #include "opdef.h"
19 #include "kbuf.h"
20 #include "rsrc.h"
21 #include "rw.h"
22 
23 struct io_rw {
24 	/* NOTE: kiocb has the file as the first member, so don't do it here */
25 	struct kiocb			kiocb;
26 	u64				addr;
27 	u32				len;
28 	rwf_t				flags;
29 };
30 
31 static inline bool io_file_supports_nowait(struct io_kiocb *req)
32 {
33 	return req->flags & REQ_F_SUPPORT_NOWAIT;
34 }
35 
36 #ifdef CONFIG_COMPAT
37 static int io_iov_compat_buffer_select_prep(struct io_rw *rw)
38 {
39 	struct compat_iovec __user *uiov;
40 	compat_ssize_t clen;
41 
42 	uiov = u64_to_user_ptr(rw->addr);
43 	if (!access_ok(uiov, sizeof(*uiov)))
44 		return -EFAULT;
45 	if (__get_user(clen, &uiov->iov_len))
46 		return -EFAULT;
47 	if (clen < 0)
48 		return -EINVAL;
49 
50 	rw->len = clen;
51 	return 0;
52 }
53 #endif
54 
55 static int io_iov_buffer_select_prep(struct io_kiocb *req)
56 {
57 	struct iovec __user *uiov;
58 	struct iovec iov;
59 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
60 
61 	if (rw->len != 1)
62 		return -EINVAL;
63 
64 #ifdef CONFIG_COMPAT
65 	if (req->ctx->compat)
66 		return io_iov_compat_buffer_select_prep(rw);
67 #endif
68 
69 	uiov = u64_to_user_ptr(rw->addr);
70 	if (copy_from_user(&iov, uiov, sizeof(*uiov)))
71 		return -EFAULT;
72 	rw->len = iov.iov_len;
73 	return 0;
74 }
75 
76 int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
77 {
78 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
79 	unsigned ioprio;
80 	int ret;
81 
82 	rw->kiocb.ki_pos = READ_ONCE(sqe->off);
83 	/* used for fixed read/write too - just read unconditionally */
84 	req->buf_index = READ_ONCE(sqe->buf_index);
85 
86 	if (req->opcode == IORING_OP_READ_FIXED ||
87 	    req->opcode == IORING_OP_WRITE_FIXED) {
88 		struct io_ring_ctx *ctx = req->ctx;
89 		u16 index;
90 
91 		if (unlikely(req->buf_index >= ctx->nr_user_bufs))
92 			return -EFAULT;
93 		index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
94 		req->imu = ctx->user_bufs[index];
95 		io_req_set_rsrc_node(req, ctx, 0);
96 	}
97 
98 	ioprio = READ_ONCE(sqe->ioprio);
99 	if (ioprio) {
100 		ret = ioprio_check_cap(ioprio);
101 		if (ret)
102 			return ret;
103 
104 		rw->kiocb.ki_ioprio = ioprio;
105 	} else {
106 		rw->kiocb.ki_ioprio = get_current_ioprio();
107 	}
108 
109 	rw->addr = READ_ONCE(sqe->addr);
110 	rw->len = READ_ONCE(sqe->len);
111 	rw->flags = READ_ONCE(sqe->rw_flags);
112 
113 	/* Have to do this validation here, as this is in io_read() rw->len might
114 	 * have chanaged due to buffer selection
115 	 */
116 	if (req->opcode == IORING_OP_READV && req->flags & REQ_F_BUFFER_SELECT) {
117 		ret = io_iov_buffer_select_prep(req);
118 		if (ret)
119 			return ret;
120 	}
121 
122 	return 0;
123 }
124 
125 void io_readv_writev_cleanup(struct io_kiocb *req)
126 {
127 	struct io_async_rw *io = req->async_data;
128 
129 	kfree(io->free_iovec);
130 }
131 
132 static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
133 {
134 	switch (ret) {
135 	case -EIOCBQUEUED:
136 		break;
137 	case -ERESTARTSYS:
138 	case -ERESTARTNOINTR:
139 	case -ERESTARTNOHAND:
140 	case -ERESTART_RESTARTBLOCK:
141 		/*
142 		 * We can't just restart the syscall, since previously
143 		 * submitted sqes may already be in progress. Just fail this
144 		 * IO with EINTR.
145 		 */
146 		ret = -EINTR;
147 		fallthrough;
148 	default:
149 		kiocb->ki_complete(kiocb, ret);
150 	}
151 }
152 
153 static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req)
154 {
155 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
156 
157 	if (rw->kiocb.ki_pos != -1)
158 		return &rw->kiocb.ki_pos;
159 
160 	if (!(req->file->f_mode & FMODE_STREAM)) {
161 		req->flags |= REQ_F_CUR_POS;
162 		rw->kiocb.ki_pos = req->file->f_pos;
163 		return &rw->kiocb.ki_pos;
164 	}
165 
166 	rw->kiocb.ki_pos = 0;
167 	return NULL;
168 }
169 
170 static void io_req_task_queue_reissue(struct io_kiocb *req)
171 {
172 	req->io_task_work.func = io_queue_iowq;
173 	io_req_task_work_add(req);
174 }
175 
176 #ifdef CONFIG_BLOCK
177 static bool io_resubmit_prep(struct io_kiocb *req)
178 {
179 	struct io_async_rw *io = req->async_data;
180 
181 	if (!req_has_async_data(req))
182 		return !io_req_prep_async(req);
183 	iov_iter_restore(&io->s.iter, &io->s.iter_state);
184 	return true;
185 }
186 
187 static bool io_rw_should_reissue(struct io_kiocb *req)
188 {
189 	umode_t mode = file_inode(req->file)->i_mode;
190 	struct io_ring_ctx *ctx = req->ctx;
191 
192 	if (!S_ISBLK(mode) && !S_ISREG(mode))
193 		return false;
194 	if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() &&
195 	    !(ctx->flags & IORING_SETUP_IOPOLL)))
196 		return false;
197 	/*
198 	 * If ref is dying, we might be running poll reap from the exit work.
199 	 * Don't attempt to reissue from that path, just let it fail with
200 	 * -EAGAIN.
201 	 */
202 	if (percpu_ref_is_dying(&ctx->refs))
203 		return false;
204 	/*
205 	 * Play it safe and assume not safe to re-import and reissue if we're
206 	 * not in the original thread group (or in task context).
207 	 */
208 	if (!same_thread_group(req->task, current) || !in_task())
209 		return false;
210 	return true;
211 }
212 #else
213 static bool io_resubmit_prep(struct io_kiocb *req)
214 {
215 	return false;
216 }
217 static bool io_rw_should_reissue(struct io_kiocb *req)
218 {
219 	return false;
220 }
221 #endif
222 
223 static void kiocb_end_write(struct io_kiocb *req)
224 {
225 	/*
226 	 * Tell lockdep we inherited freeze protection from submission
227 	 * thread.
228 	 */
229 	if (req->flags & REQ_F_ISREG) {
230 		struct super_block *sb = file_inode(req->file)->i_sb;
231 
232 		__sb_writers_acquired(sb, SB_FREEZE_WRITE);
233 		sb_end_write(sb);
234 	}
235 }
236 
237 /*
238  * Trigger the notifications after having done some IO, and finish the write
239  * accounting, if any.
240  */
241 static void io_req_io_end(struct io_kiocb *req)
242 {
243 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
244 
245 	if (rw->kiocb.ki_flags & IOCB_WRITE) {
246 		kiocb_end_write(req);
247 		fsnotify_modify(req->file);
248 	} else {
249 		fsnotify_access(req->file);
250 	}
251 }
252 
253 static bool __io_complete_rw_common(struct io_kiocb *req, long res)
254 {
255 	if (unlikely(res != req->cqe.res)) {
256 		if ((res == -EAGAIN || res == -EOPNOTSUPP) &&
257 		    io_rw_should_reissue(req)) {
258 			/*
259 			 * Reissue will start accounting again, finish the
260 			 * current cycle.
261 			 */
262 			io_req_io_end(req);
263 			req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO;
264 			return true;
265 		}
266 		req_set_fail(req);
267 		req->cqe.res = res;
268 	}
269 	return false;
270 }
271 
272 static inline int io_fixup_rw_res(struct io_kiocb *req, long res)
273 {
274 	struct io_async_rw *io = req->async_data;
275 
276 	/* add previously done IO, if any */
277 	if (req_has_async_data(req) && io->bytes_done > 0) {
278 		if (res < 0)
279 			res = io->bytes_done;
280 		else
281 			res += io->bytes_done;
282 	}
283 	return res;
284 }
285 
286 static void io_req_rw_complete(struct io_kiocb *req, bool *locked)
287 {
288 	io_req_io_end(req);
289 
290 	if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) {
291 		unsigned issue_flags = *locked ? 0 : IO_URING_F_UNLOCKED;
292 
293 		req->cqe.flags |= io_put_kbuf(req, issue_flags);
294 	}
295 	io_req_task_complete(req, locked);
296 }
297 
298 static void io_complete_rw(struct kiocb *kiocb, long res)
299 {
300 	struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
301 	struct io_kiocb *req = cmd_to_io_kiocb(rw);
302 
303 	if (__io_complete_rw_common(req, res))
304 		return;
305 	io_req_set_res(req, io_fixup_rw_res(req, res), 0);
306 	req->io_task_work.func = io_req_rw_complete;
307 	io_req_task_work_add(req);
308 }
309 
310 static void io_complete_rw_iopoll(struct kiocb *kiocb, long res)
311 {
312 	struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
313 	struct io_kiocb *req = cmd_to_io_kiocb(rw);
314 
315 	if (kiocb->ki_flags & IOCB_WRITE)
316 		kiocb_end_write(req);
317 	if (unlikely(res != req->cqe.res)) {
318 		if (res == -EAGAIN && io_rw_should_reissue(req)) {
319 			req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO;
320 			return;
321 		}
322 		req->cqe.res = res;
323 	}
324 
325 	/* order with io_iopoll_complete() checking ->iopoll_completed */
326 	smp_store_release(&req->iopoll_completed, 1);
327 }
328 
329 static int kiocb_done(struct io_kiocb *req, ssize_t ret,
330 		       unsigned int issue_flags)
331 {
332 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
333 	unsigned final_ret = io_fixup_rw_res(req, ret);
334 
335 	if (req->flags & REQ_F_CUR_POS)
336 		req->file->f_pos = rw->kiocb.ki_pos;
337 	if (ret >= 0 && (rw->kiocb.ki_complete == io_complete_rw)) {
338 		if (!__io_complete_rw_common(req, ret)) {
339 			/*
340 			 * Safe to call io_end from here as we're inline
341 			 * from the submission path.
342 			 */
343 			io_req_io_end(req);
344 			io_req_set_res(req, final_ret,
345 				       io_put_kbuf(req, issue_flags));
346 			return IOU_OK;
347 		}
348 	} else {
349 		io_rw_done(&rw->kiocb, ret);
350 	}
351 
352 	if (req->flags & REQ_F_REISSUE) {
353 		req->flags &= ~REQ_F_REISSUE;
354 		if (io_resubmit_prep(req))
355 			io_req_task_queue_reissue(req);
356 		else
357 			io_req_task_queue_fail(req, final_ret);
358 	}
359 	return IOU_ISSUE_SKIP_COMPLETE;
360 }
361 
362 static struct iovec *__io_import_iovec(int ddir, struct io_kiocb *req,
363 				       struct io_rw_state *s,
364 				       unsigned int issue_flags)
365 {
366 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
367 	struct iov_iter *iter = &s->iter;
368 	u8 opcode = req->opcode;
369 	struct iovec *iovec;
370 	void __user *buf;
371 	size_t sqe_len;
372 	ssize_t ret;
373 
374 	if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
375 		ret = io_import_fixed(ddir, iter, req->imu, rw->addr, rw->len);
376 		if (ret)
377 			return ERR_PTR(ret);
378 		return NULL;
379 	}
380 
381 	buf = u64_to_user_ptr(rw->addr);
382 	sqe_len = rw->len;
383 
384 	if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE ||
385 	    (req->flags & REQ_F_BUFFER_SELECT)) {
386 		if (io_do_buffer_select(req)) {
387 			buf = io_buffer_select(req, &sqe_len, issue_flags);
388 			if (!buf)
389 				return ERR_PTR(-ENOBUFS);
390 			rw->addr = (unsigned long) buf;
391 			rw->len = sqe_len;
392 		}
393 
394 		ret = import_ubuf(ddir, buf, sqe_len, iter);
395 		if (ret)
396 			return ERR_PTR(ret);
397 		return NULL;
398 	}
399 
400 	iovec = s->fast_iov;
401 	ret = __import_iovec(ddir, buf, sqe_len, UIO_FASTIOV, &iovec, iter,
402 			      req->ctx->compat);
403 	if (unlikely(ret < 0))
404 		return ERR_PTR(ret);
405 	return iovec;
406 }
407 
408 static inline int io_import_iovec(int rw, struct io_kiocb *req,
409 				  struct iovec **iovec, struct io_rw_state *s,
410 				  unsigned int issue_flags)
411 {
412 	*iovec = __io_import_iovec(rw, req, s, issue_flags);
413 	if (IS_ERR(*iovec))
414 		return PTR_ERR(*iovec);
415 
416 	iov_iter_save_state(&s->iter, &s->iter_state);
417 	return 0;
418 }
419 
420 static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
421 {
422 	return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
423 }
424 
425 /*
426  * For files that don't have ->read_iter() and ->write_iter(), handle them
427  * by looping over ->read() or ->write() manually.
428  */
429 static ssize_t loop_rw_iter(int ddir, struct io_rw *rw, struct iov_iter *iter)
430 {
431 	struct kiocb *kiocb = &rw->kiocb;
432 	struct file *file = kiocb->ki_filp;
433 	ssize_t ret = 0;
434 	loff_t *ppos;
435 
436 	/*
437 	 * Don't support polled IO through this interface, and we can't
438 	 * support non-blocking either. For the latter, this just causes
439 	 * the kiocb to be handled from an async context.
440 	 */
441 	if (kiocb->ki_flags & IOCB_HIPRI)
442 		return -EOPNOTSUPP;
443 	if ((kiocb->ki_flags & IOCB_NOWAIT) &&
444 	    !(kiocb->ki_filp->f_flags & O_NONBLOCK))
445 		return -EAGAIN;
446 
447 	ppos = io_kiocb_ppos(kiocb);
448 
449 	while (iov_iter_count(iter)) {
450 		struct iovec iovec;
451 		ssize_t nr;
452 
453 		if (iter_is_ubuf(iter)) {
454 			iovec.iov_base = iter->ubuf + iter->iov_offset;
455 			iovec.iov_len = iov_iter_count(iter);
456 		} else if (!iov_iter_is_bvec(iter)) {
457 			iovec = iov_iter_iovec(iter);
458 		} else {
459 			iovec.iov_base = u64_to_user_ptr(rw->addr);
460 			iovec.iov_len = rw->len;
461 		}
462 
463 		if (ddir == READ) {
464 			nr = file->f_op->read(file, iovec.iov_base,
465 					      iovec.iov_len, ppos);
466 		} else {
467 			nr = file->f_op->write(file, iovec.iov_base,
468 					       iovec.iov_len, ppos);
469 		}
470 
471 		if (nr < 0) {
472 			if (!ret)
473 				ret = nr;
474 			break;
475 		}
476 		ret += nr;
477 		if (!iov_iter_is_bvec(iter)) {
478 			iov_iter_advance(iter, nr);
479 		} else {
480 			rw->addr += nr;
481 			rw->len -= nr;
482 			if (!rw->len)
483 				break;
484 		}
485 		if (nr != iovec.iov_len)
486 			break;
487 	}
488 
489 	return ret;
490 }
491 
492 static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
493 			  const struct iovec *fast_iov, struct iov_iter *iter)
494 {
495 	struct io_async_rw *io = req->async_data;
496 
497 	memcpy(&io->s.iter, iter, sizeof(*iter));
498 	io->free_iovec = iovec;
499 	io->bytes_done = 0;
500 	/* can only be fixed buffers, no need to do anything */
501 	if (iov_iter_is_bvec(iter) || iter_is_ubuf(iter))
502 		return;
503 	if (!iovec) {
504 		unsigned iov_off = 0;
505 
506 		io->s.iter.iov = io->s.fast_iov;
507 		if (iter->iov != fast_iov) {
508 			iov_off = iter->iov - fast_iov;
509 			io->s.iter.iov += iov_off;
510 		}
511 		if (io->s.fast_iov != fast_iov)
512 			memcpy(io->s.fast_iov + iov_off, fast_iov + iov_off,
513 			       sizeof(struct iovec) * iter->nr_segs);
514 	} else {
515 		req->flags |= REQ_F_NEED_CLEANUP;
516 	}
517 }
518 
519 static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
520 			     struct io_rw_state *s, bool force)
521 {
522 	if (!force && !io_cold_defs[req->opcode].prep_async)
523 		return 0;
524 	if (!req_has_async_data(req)) {
525 		struct io_async_rw *iorw;
526 
527 		if (io_alloc_async_data(req)) {
528 			kfree(iovec);
529 			return -ENOMEM;
530 		}
531 
532 		io_req_map_rw(req, iovec, s->fast_iov, &s->iter);
533 		iorw = req->async_data;
534 		/* we've copied and mapped the iter, ensure state is saved */
535 		iov_iter_save_state(&iorw->s.iter, &iorw->s.iter_state);
536 	}
537 	return 0;
538 }
539 
540 static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
541 {
542 	struct io_async_rw *iorw = req->async_data;
543 	struct iovec *iov;
544 	int ret;
545 
546 	/* submission path, ->uring_lock should already be taken */
547 	ret = io_import_iovec(rw, req, &iov, &iorw->s, 0);
548 	if (unlikely(ret < 0))
549 		return ret;
550 
551 	iorw->bytes_done = 0;
552 	iorw->free_iovec = iov;
553 	if (iov)
554 		req->flags |= REQ_F_NEED_CLEANUP;
555 	return 0;
556 }
557 
558 int io_readv_prep_async(struct io_kiocb *req)
559 {
560 	return io_rw_prep_async(req, ITER_DEST);
561 }
562 
563 int io_writev_prep_async(struct io_kiocb *req)
564 {
565 	return io_rw_prep_async(req, ITER_SOURCE);
566 }
567 
568 /*
569  * This is our waitqueue callback handler, registered through __folio_lock_async()
570  * when we initially tried to do the IO with the iocb armed our waitqueue.
571  * This gets called when the page is unlocked, and we generally expect that to
572  * happen when the page IO is completed and the page is now uptodate. This will
573  * queue a task_work based retry of the operation, attempting to copy the data
574  * again. If the latter fails because the page was NOT uptodate, then we will
575  * do a thread based blocking retry of the operation. That's the unexpected
576  * slow path.
577  */
578 static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
579 			     int sync, void *arg)
580 {
581 	struct wait_page_queue *wpq;
582 	struct io_kiocb *req = wait->private;
583 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
584 	struct wait_page_key *key = arg;
585 
586 	wpq = container_of(wait, struct wait_page_queue, wait);
587 
588 	if (!wake_page_match(wpq, key))
589 		return 0;
590 
591 	rw->kiocb.ki_flags &= ~IOCB_WAITQ;
592 	list_del_init(&wait->entry);
593 	io_req_task_queue(req);
594 	return 1;
595 }
596 
597 /*
598  * This controls whether a given IO request should be armed for async page
599  * based retry. If we return false here, the request is handed to the async
600  * worker threads for retry. If we're doing buffered reads on a regular file,
601  * we prepare a private wait_page_queue entry and retry the operation. This
602  * will either succeed because the page is now uptodate and unlocked, or it
603  * will register a callback when the page is unlocked at IO completion. Through
604  * that callback, io_uring uses task_work to setup a retry of the operation.
605  * That retry will attempt the buffered read again. The retry will generally
606  * succeed, or in rare cases where it fails, we then fall back to using the
607  * async worker threads for a blocking retry.
608  */
609 static bool io_rw_should_retry(struct io_kiocb *req)
610 {
611 	struct io_async_rw *io = req->async_data;
612 	struct wait_page_queue *wait = &io->wpq;
613 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
614 	struct kiocb *kiocb = &rw->kiocb;
615 
616 	/* never retry for NOWAIT, we just complete with -EAGAIN */
617 	if (req->flags & REQ_F_NOWAIT)
618 		return false;
619 
620 	/* Only for buffered IO */
621 	if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
622 		return false;
623 
624 	/*
625 	 * just use poll if we can, and don't attempt if the fs doesn't
626 	 * support callback based unlocks
627 	 */
628 	if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
629 		return false;
630 
631 	wait->wait.func = io_async_buf_func;
632 	wait->wait.private = req;
633 	wait->wait.flags = 0;
634 	INIT_LIST_HEAD(&wait->wait.entry);
635 	kiocb->ki_flags |= IOCB_WAITQ;
636 	kiocb->ki_flags &= ~IOCB_NOWAIT;
637 	kiocb->ki_waitq = wait;
638 	return true;
639 }
640 
641 static inline int io_iter_do_read(struct io_rw *rw, struct iov_iter *iter)
642 {
643 	struct file *file = rw->kiocb.ki_filp;
644 
645 	if (likely(file->f_op->read_iter))
646 		return call_read_iter(file, &rw->kiocb, iter);
647 	else if (file->f_op->read)
648 		return loop_rw_iter(READ, rw, iter);
649 	else
650 		return -EINVAL;
651 }
652 
653 static bool need_complete_io(struct io_kiocb *req)
654 {
655 	return req->flags & REQ_F_ISREG ||
656 		S_ISBLK(file_inode(req->file)->i_mode);
657 }
658 
659 static int io_rw_init_file(struct io_kiocb *req, fmode_t mode)
660 {
661 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
662 	struct kiocb *kiocb = &rw->kiocb;
663 	struct io_ring_ctx *ctx = req->ctx;
664 	struct file *file = req->file;
665 	int ret;
666 
667 	if (unlikely(!file || !(file->f_mode & mode)))
668 		return -EBADF;
669 
670 	if (!io_req_ffs_set(req))
671 		req->flags |= io_file_get_flags(file) << REQ_F_SUPPORT_NOWAIT_BIT;
672 
673 	kiocb->ki_flags = file->f_iocb_flags;
674 	ret = kiocb_set_rw_flags(kiocb, rw->flags);
675 	if (unlikely(ret))
676 		return ret;
677 	kiocb->ki_flags |= IOCB_ALLOC_CACHE;
678 
679 	/*
680 	 * If the file is marked O_NONBLOCK, still allow retry for it if it
681 	 * supports async. Otherwise it's impossible to use O_NONBLOCK files
682 	 * reliably. If not, or it IOCB_NOWAIT is set, don't retry.
683 	 */
684 	if ((kiocb->ki_flags & IOCB_NOWAIT) ||
685 	    ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req)))
686 		req->flags |= REQ_F_NOWAIT;
687 
688 	if (ctx->flags & IORING_SETUP_IOPOLL) {
689 		if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll)
690 			return -EOPNOTSUPP;
691 
692 		kiocb->private = NULL;
693 		kiocb->ki_flags |= IOCB_HIPRI;
694 		kiocb->ki_complete = io_complete_rw_iopoll;
695 		req->iopoll_completed = 0;
696 	} else {
697 		if (kiocb->ki_flags & IOCB_HIPRI)
698 			return -EINVAL;
699 		kiocb->ki_complete = io_complete_rw;
700 	}
701 
702 	return 0;
703 }
704 
705 int io_read(struct io_kiocb *req, unsigned int issue_flags)
706 {
707 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
708 	struct io_rw_state __s, *s = &__s;
709 	struct iovec *iovec;
710 	struct kiocb *kiocb = &rw->kiocb;
711 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
712 	struct io_async_rw *io;
713 	ssize_t ret, ret2;
714 	loff_t *ppos;
715 
716 	if (!req_has_async_data(req)) {
717 		ret = io_import_iovec(ITER_DEST, req, &iovec, s, issue_flags);
718 		if (unlikely(ret < 0))
719 			return ret;
720 	} else {
721 		io = req->async_data;
722 		s = &io->s;
723 
724 		/*
725 		 * Safe and required to re-import if we're using provided
726 		 * buffers, as we dropped the selected one before retry.
727 		 */
728 		if (io_do_buffer_select(req)) {
729 			ret = io_import_iovec(ITER_DEST, req, &iovec, s, issue_flags);
730 			if (unlikely(ret < 0))
731 				return ret;
732 		}
733 
734 		/*
735 		 * We come here from an earlier attempt, restore our state to
736 		 * match in case it doesn't. It's cheap enough that we don't
737 		 * need to make this conditional.
738 		 */
739 		iov_iter_restore(&s->iter, &s->iter_state);
740 		iovec = NULL;
741 	}
742 	ret = io_rw_init_file(req, FMODE_READ);
743 	if (unlikely(ret)) {
744 		kfree(iovec);
745 		return ret;
746 	}
747 	req->cqe.res = iov_iter_count(&s->iter);
748 
749 	if (force_nonblock) {
750 		/* If the file doesn't support async, just async punt */
751 		if (unlikely(!io_file_supports_nowait(req))) {
752 			ret = io_setup_async_rw(req, iovec, s, true);
753 			return ret ?: -EAGAIN;
754 		}
755 		kiocb->ki_flags |= IOCB_NOWAIT;
756 	} else {
757 		/* Ensure we clear previously set non-block flag */
758 		kiocb->ki_flags &= ~IOCB_NOWAIT;
759 	}
760 
761 	ppos = io_kiocb_update_pos(req);
762 
763 	ret = rw_verify_area(READ, req->file, ppos, req->cqe.res);
764 	if (unlikely(ret)) {
765 		kfree(iovec);
766 		return ret;
767 	}
768 
769 	ret = io_iter_do_read(rw, &s->iter);
770 
771 	if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
772 		req->flags &= ~REQ_F_REISSUE;
773 		/* if we can poll, just do that */
774 		if (req->opcode == IORING_OP_READ && file_can_poll(req->file))
775 			return -EAGAIN;
776 		/* IOPOLL retry should happen for io-wq threads */
777 		if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
778 			goto done;
779 		/* no retry on NONBLOCK nor RWF_NOWAIT */
780 		if (req->flags & REQ_F_NOWAIT)
781 			goto done;
782 		ret = 0;
783 	} else if (ret == -EIOCBQUEUED) {
784 		if (iovec)
785 			kfree(iovec);
786 		return IOU_ISSUE_SKIP_COMPLETE;
787 	} else if (ret == req->cqe.res || ret <= 0 || !force_nonblock ||
788 		   (req->flags & REQ_F_NOWAIT) || !need_complete_io(req)) {
789 		/* read all, failed, already did sync or don't want to retry */
790 		goto done;
791 	}
792 
793 	/*
794 	 * Don't depend on the iter state matching what was consumed, or being
795 	 * untouched in case of error. Restore it and we'll advance it
796 	 * manually if we need to.
797 	 */
798 	iov_iter_restore(&s->iter, &s->iter_state);
799 
800 	ret2 = io_setup_async_rw(req, iovec, s, true);
801 	iovec = NULL;
802 	if (ret2) {
803 		ret = ret > 0 ? ret : ret2;
804 		goto done;
805 	}
806 
807 	io = req->async_data;
808 	s = &io->s;
809 	/*
810 	 * Now use our persistent iterator and state, if we aren't already.
811 	 * We've restored and mapped the iter to match.
812 	 */
813 
814 	do {
815 		/*
816 		 * We end up here because of a partial read, either from
817 		 * above or inside this loop. Advance the iter by the bytes
818 		 * that were consumed.
819 		 */
820 		iov_iter_advance(&s->iter, ret);
821 		if (!iov_iter_count(&s->iter))
822 			break;
823 		io->bytes_done += ret;
824 		iov_iter_save_state(&s->iter, &s->iter_state);
825 
826 		/* if we can retry, do so with the callbacks armed */
827 		if (!io_rw_should_retry(req)) {
828 			kiocb->ki_flags &= ~IOCB_WAITQ;
829 			return -EAGAIN;
830 		}
831 
832 		req->cqe.res = iov_iter_count(&s->iter);
833 		/*
834 		 * Now retry read with the IOCB_WAITQ parts set in the iocb. If
835 		 * we get -EIOCBQUEUED, then we'll get a notification when the
836 		 * desired page gets unlocked. We can also get a partial read
837 		 * here, and if we do, then just retry at the new offset.
838 		 */
839 		ret = io_iter_do_read(rw, &s->iter);
840 		if (ret == -EIOCBQUEUED)
841 			return IOU_ISSUE_SKIP_COMPLETE;
842 		/* we got some bytes, but not all. retry. */
843 		kiocb->ki_flags &= ~IOCB_WAITQ;
844 		iov_iter_restore(&s->iter, &s->iter_state);
845 	} while (ret > 0);
846 done:
847 	/* it's faster to check here then delegate to kfree */
848 	if (iovec)
849 		kfree(iovec);
850 	return kiocb_done(req, ret, issue_flags);
851 }
852 
853 int io_write(struct io_kiocb *req, unsigned int issue_flags)
854 {
855 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
856 	struct io_rw_state __s, *s = &__s;
857 	struct iovec *iovec;
858 	struct kiocb *kiocb = &rw->kiocb;
859 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
860 	ssize_t ret, ret2;
861 	loff_t *ppos;
862 
863 	if (!req_has_async_data(req)) {
864 		ret = io_import_iovec(ITER_SOURCE, req, &iovec, s, issue_flags);
865 		if (unlikely(ret < 0))
866 			return ret;
867 	} else {
868 		struct io_async_rw *io = req->async_data;
869 
870 		s = &io->s;
871 		iov_iter_restore(&s->iter, &s->iter_state);
872 		iovec = NULL;
873 	}
874 	ret = io_rw_init_file(req, FMODE_WRITE);
875 	if (unlikely(ret)) {
876 		kfree(iovec);
877 		return ret;
878 	}
879 	req->cqe.res = iov_iter_count(&s->iter);
880 
881 	if (force_nonblock) {
882 		/* If the file doesn't support async, just async punt */
883 		if (unlikely(!io_file_supports_nowait(req)))
884 			goto copy_iov;
885 
886 		/* File path supports NOWAIT for non-direct_IO only for block devices. */
887 		if (!(kiocb->ki_flags & IOCB_DIRECT) &&
888 			!(kiocb->ki_filp->f_mode & FMODE_BUF_WASYNC) &&
889 			(req->flags & REQ_F_ISREG))
890 			goto copy_iov;
891 
892 		kiocb->ki_flags |= IOCB_NOWAIT;
893 	} else {
894 		/* Ensure we clear previously set non-block flag */
895 		kiocb->ki_flags &= ~IOCB_NOWAIT;
896 	}
897 
898 	ppos = io_kiocb_update_pos(req);
899 
900 	ret = rw_verify_area(WRITE, req->file, ppos, req->cqe.res);
901 	if (unlikely(ret)) {
902 		kfree(iovec);
903 		return ret;
904 	}
905 
906 	/*
907 	 * Open-code file_start_write here to grab freeze protection,
908 	 * which will be released by another thread in
909 	 * io_complete_rw().  Fool lockdep by telling it the lock got
910 	 * released so that it doesn't complain about the held lock when
911 	 * we return to userspace.
912 	 */
913 	if (req->flags & REQ_F_ISREG) {
914 		sb_start_write(file_inode(req->file)->i_sb);
915 		__sb_writers_release(file_inode(req->file)->i_sb,
916 					SB_FREEZE_WRITE);
917 	}
918 	kiocb->ki_flags |= IOCB_WRITE;
919 
920 	if (likely(req->file->f_op->write_iter))
921 		ret2 = call_write_iter(req->file, kiocb, &s->iter);
922 	else if (req->file->f_op->write)
923 		ret2 = loop_rw_iter(WRITE, rw, &s->iter);
924 	else
925 		ret2 = -EINVAL;
926 
927 	if (req->flags & REQ_F_REISSUE) {
928 		req->flags &= ~REQ_F_REISSUE;
929 		ret2 = -EAGAIN;
930 	}
931 
932 	/*
933 	 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
934 	 * retry them without IOCB_NOWAIT.
935 	 */
936 	if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
937 		ret2 = -EAGAIN;
938 	/* no retry on NONBLOCK nor RWF_NOWAIT */
939 	if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
940 		goto done;
941 	if (!force_nonblock || ret2 != -EAGAIN) {
942 		/* IOPOLL retry should happen for io-wq threads */
943 		if (ret2 == -EAGAIN && (req->ctx->flags & IORING_SETUP_IOPOLL))
944 			goto copy_iov;
945 
946 		if (ret2 != req->cqe.res && ret2 >= 0 && need_complete_io(req)) {
947 			struct io_async_rw *io;
948 
949 			trace_io_uring_short_write(req->ctx, kiocb->ki_pos - ret2,
950 						req->cqe.res, ret2);
951 
952 			/* This is a partial write. The file pos has already been
953 			 * updated, setup the async struct to complete the request
954 			 * in the worker. Also update bytes_done to account for
955 			 * the bytes already written.
956 			 */
957 			iov_iter_save_state(&s->iter, &s->iter_state);
958 			ret = io_setup_async_rw(req, iovec, s, true);
959 
960 			io = req->async_data;
961 			if (io)
962 				io->bytes_done += ret2;
963 
964 			if (kiocb->ki_flags & IOCB_WRITE)
965 				kiocb_end_write(req);
966 			return ret ? ret : -EAGAIN;
967 		}
968 done:
969 		ret = kiocb_done(req, ret2, issue_flags);
970 	} else {
971 copy_iov:
972 		iov_iter_restore(&s->iter, &s->iter_state);
973 		ret = io_setup_async_rw(req, iovec, s, false);
974 		if (!ret) {
975 			if (kiocb->ki_flags & IOCB_WRITE)
976 				kiocb_end_write(req);
977 			return -EAGAIN;
978 		}
979 		return ret;
980 	}
981 	/* it's reportedly faster than delegating the null check to kfree() */
982 	if (iovec)
983 		kfree(iovec);
984 	return ret;
985 }
986 
987 static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
988 {
989 	io_commit_cqring_flush(ctx);
990 	if (ctx->flags & IORING_SETUP_SQPOLL)
991 		io_cqring_wake(ctx);
992 }
993 
994 void io_rw_fail(struct io_kiocb *req)
995 {
996 	int res;
997 
998 	res = io_fixup_rw_res(req, req->cqe.res);
999 	io_req_set_res(req, res, req->cqe.flags);
1000 }
1001 
1002 int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
1003 {
1004 	struct io_wq_work_node *pos, *start, *prev;
1005 	unsigned int poll_flags = BLK_POLL_NOSLEEP;
1006 	DEFINE_IO_COMP_BATCH(iob);
1007 	int nr_events = 0;
1008 
1009 	/*
1010 	 * Only spin for completions if we don't have multiple devices hanging
1011 	 * off our complete list.
1012 	 */
1013 	if (ctx->poll_multi_queue || force_nonspin)
1014 		poll_flags |= BLK_POLL_ONESHOT;
1015 
1016 	wq_list_for_each(pos, start, &ctx->iopoll_list) {
1017 		struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
1018 		struct file *file = req->file;
1019 		int ret;
1020 
1021 		/*
1022 		 * Move completed and retryable entries to our local lists.
1023 		 * If we find a request that requires polling, break out
1024 		 * and complete those lists first, if we have entries there.
1025 		 */
1026 		if (READ_ONCE(req->iopoll_completed))
1027 			break;
1028 
1029 		if (req->opcode == IORING_OP_URING_CMD) {
1030 			struct io_uring_cmd *ioucmd;
1031 
1032 			ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
1033 			ret = file->f_op->uring_cmd_iopoll(ioucmd, &iob,
1034 								poll_flags);
1035 		} else {
1036 			struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
1037 
1038 			ret = file->f_op->iopoll(&rw->kiocb, &iob, poll_flags);
1039 		}
1040 		if (unlikely(ret < 0))
1041 			return ret;
1042 		else if (ret)
1043 			poll_flags |= BLK_POLL_ONESHOT;
1044 
1045 		/* iopoll may have completed current req */
1046 		if (!rq_list_empty(iob.req_list) ||
1047 		    READ_ONCE(req->iopoll_completed))
1048 			break;
1049 	}
1050 
1051 	if (!rq_list_empty(iob.req_list))
1052 		iob.complete(&iob);
1053 	else if (!pos)
1054 		return 0;
1055 
1056 	prev = start;
1057 	wq_list_for_each_resume(pos, prev) {
1058 		struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
1059 
1060 		/* order with io_complete_rw_iopoll(), e.g. ->result updates */
1061 		if (!smp_load_acquire(&req->iopoll_completed))
1062 			break;
1063 		nr_events++;
1064 		if (unlikely(req->flags & REQ_F_CQE_SKIP))
1065 			continue;
1066 
1067 		req->cqe.flags = io_put_kbuf(req, 0);
1068 		if (unlikely(!__io_fill_cqe_req(ctx, req))) {
1069 			spin_lock(&ctx->completion_lock);
1070 			io_req_cqe_overflow(req);
1071 			spin_unlock(&ctx->completion_lock);
1072 		}
1073 	}
1074 
1075 	if (unlikely(!nr_events))
1076 		return 0;
1077 
1078 	io_commit_cqring(ctx);
1079 	io_cqring_ev_posted_iopoll(ctx);
1080 	pos = start ? start->next : ctx->iopoll_list.first;
1081 	wq_list_cut(&ctx->iopoll_list, prev, start);
1082 	io_free_batch_list(ctx, pos);
1083 	return nr_events;
1084 }
1085