xref: /linux/io_uring/rw.c (revision 90e0d94d369d342e735a75174439482119b6c393)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/blk-mq.h>
7 #include <linux/mm.h>
8 #include <linux/slab.h>
9 #include <linux/fsnotify.h>
10 #include <linux/poll.h>
11 #include <linux/nospec.h>
12 #include <linux/compat.h>
13 #include <linux/io_uring.h>
14 
15 #include <uapi/linux/io_uring.h>
16 
17 #include "io_uring.h"
18 #include "opdef.h"
19 #include "kbuf.h"
20 #include "rsrc.h"
21 #include "rw.h"
22 
23 struct io_rw {
24 	/* NOTE: kiocb has the file as the first member, so don't do it here */
25 	struct kiocb			kiocb;
26 	u64				addr;
27 	u32				len;
28 	rwf_t				flags;
29 };
30 
31 static inline bool io_file_supports_nowait(struct io_kiocb *req)
32 {
33 	return req->flags & REQ_F_SUPPORT_NOWAIT;
34 }
35 
36 #ifdef CONFIG_COMPAT
37 static int io_iov_compat_buffer_select_prep(struct io_rw *rw)
38 {
39 	struct compat_iovec __user *uiov;
40 	compat_ssize_t clen;
41 
42 	uiov = u64_to_user_ptr(rw->addr);
43 	if (!access_ok(uiov, sizeof(*uiov)))
44 		return -EFAULT;
45 	if (__get_user(clen, &uiov->iov_len))
46 		return -EFAULT;
47 	if (clen < 0)
48 		return -EINVAL;
49 
50 	rw->len = clen;
51 	return 0;
52 }
53 #endif
54 
55 static int io_iov_buffer_select_prep(struct io_kiocb *req)
56 {
57 	struct iovec __user *uiov;
58 	struct iovec iov;
59 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
60 
61 	if (rw->len != 1)
62 		return -EINVAL;
63 
64 #ifdef CONFIG_COMPAT
65 	if (req->ctx->compat)
66 		return io_iov_compat_buffer_select_prep(rw);
67 #endif
68 
69 	uiov = u64_to_user_ptr(rw->addr);
70 	if (copy_from_user(&iov, uiov, sizeof(*uiov)))
71 		return -EFAULT;
72 	rw->len = iov.iov_len;
73 	return 0;
74 }
75 
76 int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
77 {
78 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
79 	unsigned ioprio;
80 	int ret;
81 
82 	rw->kiocb.ki_pos = READ_ONCE(sqe->off);
83 	/* used for fixed read/write too - just read unconditionally */
84 	req->buf_index = READ_ONCE(sqe->buf_index);
85 
86 	if (req->opcode == IORING_OP_READ_FIXED ||
87 	    req->opcode == IORING_OP_WRITE_FIXED) {
88 		struct io_ring_ctx *ctx = req->ctx;
89 		u16 index;
90 
91 		if (unlikely(req->buf_index >= ctx->nr_user_bufs))
92 			return -EFAULT;
93 		index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
94 		req->imu = ctx->user_bufs[index];
95 		io_req_set_rsrc_node(req, ctx, 0);
96 	}
97 
98 	ioprio = READ_ONCE(sqe->ioprio);
99 	if (ioprio) {
100 		ret = ioprio_check_cap(ioprio);
101 		if (ret)
102 			return ret;
103 
104 		rw->kiocb.ki_ioprio = ioprio;
105 	} else {
106 		rw->kiocb.ki_ioprio = get_current_ioprio();
107 	}
108 
109 	rw->addr = READ_ONCE(sqe->addr);
110 	rw->len = READ_ONCE(sqe->len);
111 	rw->flags = READ_ONCE(sqe->rw_flags);
112 
113 	/* Have to do this validation here, as this is in io_read() rw->len might
114 	 * have chanaged due to buffer selection
115 	 */
116 	if (req->opcode == IORING_OP_READV && req->flags & REQ_F_BUFFER_SELECT) {
117 		ret = io_iov_buffer_select_prep(req);
118 		if (ret)
119 			return ret;
120 	}
121 
122 	return 0;
123 }
124 
125 void io_readv_writev_cleanup(struct io_kiocb *req)
126 {
127 	struct io_async_rw *io = req->async_data;
128 
129 	kfree(io->free_iovec);
130 }
131 
132 static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
133 {
134 	switch (ret) {
135 	case -EIOCBQUEUED:
136 		break;
137 	case -ERESTARTSYS:
138 	case -ERESTARTNOINTR:
139 	case -ERESTARTNOHAND:
140 	case -ERESTART_RESTARTBLOCK:
141 		/*
142 		 * We can't just restart the syscall, since previously
143 		 * submitted sqes may already be in progress. Just fail this
144 		 * IO with EINTR.
145 		 */
146 		ret = -EINTR;
147 		fallthrough;
148 	default:
149 		kiocb->ki_complete(kiocb, ret);
150 	}
151 }
152 
153 static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req)
154 {
155 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
156 
157 	if (rw->kiocb.ki_pos != -1)
158 		return &rw->kiocb.ki_pos;
159 
160 	if (!(req->file->f_mode & FMODE_STREAM)) {
161 		req->flags |= REQ_F_CUR_POS;
162 		rw->kiocb.ki_pos = req->file->f_pos;
163 		return &rw->kiocb.ki_pos;
164 	}
165 
166 	rw->kiocb.ki_pos = 0;
167 	return NULL;
168 }
169 
170 static void io_req_task_queue_reissue(struct io_kiocb *req)
171 {
172 	req->io_task_work.func = io_queue_iowq;
173 	io_req_task_work_add(req);
174 }
175 
176 #ifdef CONFIG_BLOCK
177 static bool io_resubmit_prep(struct io_kiocb *req)
178 {
179 	struct io_async_rw *io = req->async_data;
180 
181 	if (!req_has_async_data(req))
182 		return !io_req_prep_async(req);
183 	iov_iter_restore(&io->s.iter, &io->s.iter_state);
184 	return true;
185 }
186 
187 static bool io_rw_should_reissue(struct io_kiocb *req)
188 {
189 	umode_t mode = file_inode(req->file)->i_mode;
190 	struct io_ring_ctx *ctx = req->ctx;
191 
192 	if (!S_ISBLK(mode) && !S_ISREG(mode))
193 		return false;
194 	if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() &&
195 	    !(ctx->flags & IORING_SETUP_IOPOLL)))
196 		return false;
197 	/*
198 	 * If ref is dying, we might be running poll reap from the exit work.
199 	 * Don't attempt to reissue from that path, just let it fail with
200 	 * -EAGAIN.
201 	 */
202 	if (percpu_ref_is_dying(&ctx->refs))
203 		return false;
204 	/*
205 	 * Play it safe and assume not safe to re-import and reissue if we're
206 	 * not in the original thread group (or in task context).
207 	 */
208 	if (!same_thread_group(req->task, current) || !in_task())
209 		return false;
210 	return true;
211 }
212 #else
213 static bool io_resubmit_prep(struct io_kiocb *req)
214 {
215 	return false;
216 }
217 static bool io_rw_should_reissue(struct io_kiocb *req)
218 {
219 	return false;
220 }
221 #endif
222 
223 static void kiocb_end_write(struct io_kiocb *req)
224 {
225 	/*
226 	 * Tell lockdep we inherited freeze protection from submission
227 	 * thread.
228 	 */
229 	if (req->flags & REQ_F_ISREG) {
230 		struct super_block *sb = file_inode(req->file)->i_sb;
231 
232 		__sb_writers_acquired(sb, SB_FREEZE_WRITE);
233 		sb_end_write(sb);
234 	}
235 }
236 
237 static bool __io_complete_rw_common(struct io_kiocb *req, long res)
238 {
239 	if (unlikely(res != req->cqe.res)) {
240 		if ((res == -EAGAIN || res == -EOPNOTSUPP) &&
241 		    io_rw_should_reissue(req)) {
242 			req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO;
243 			return true;
244 		}
245 		req_set_fail(req);
246 		req->cqe.res = res;
247 	}
248 	return false;
249 }
250 
251 static inline int io_fixup_rw_res(struct io_kiocb *req, long res)
252 {
253 	struct io_async_rw *io = req->async_data;
254 
255 	/* add previously done IO, if any */
256 	if (req_has_async_data(req) && io->bytes_done > 0) {
257 		if (res < 0)
258 			res = io->bytes_done;
259 		else
260 			res += io->bytes_done;
261 	}
262 	return res;
263 }
264 
265 static void io_req_rw_complete(struct io_kiocb *req, bool *locked)
266 {
267 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
268 
269 	if (rw->kiocb.ki_flags & IOCB_WRITE) {
270 		kiocb_end_write(req);
271 		fsnotify_modify(req->file);
272 	} else {
273 		fsnotify_access(req->file);
274 	}
275 
276 	io_req_task_complete(req, locked);
277 }
278 
279 static void io_complete_rw(struct kiocb *kiocb, long res)
280 {
281 	struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
282 	struct io_kiocb *req = cmd_to_io_kiocb(rw);
283 
284 	if (__io_complete_rw_common(req, res))
285 		return;
286 	io_req_set_res(req, io_fixup_rw_res(req, res), 0);
287 	req->io_task_work.func = io_req_rw_complete;
288 	io_req_task_work_add(req);
289 }
290 
291 static void io_complete_rw_iopoll(struct kiocb *kiocb, long res)
292 {
293 	struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
294 	struct io_kiocb *req = cmd_to_io_kiocb(rw);
295 
296 	if (kiocb->ki_flags & IOCB_WRITE)
297 		kiocb_end_write(req);
298 	if (unlikely(res != req->cqe.res)) {
299 		if (res == -EAGAIN && io_rw_should_reissue(req)) {
300 			req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO;
301 			return;
302 		}
303 		req->cqe.res = res;
304 	}
305 
306 	/* order with io_iopoll_complete() checking ->iopoll_completed */
307 	smp_store_release(&req->iopoll_completed, 1);
308 }
309 
310 static int kiocb_done(struct io_kiocb *req, ssize_t ret,
311 		       unsigned int issue_flags)
312 {
313 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
314 	unsigned final_ret = io_fixup_rw_res(req, ret);
315 
316 	if (req->flags & REQ_F_CUR_POS)
317 		req->file->f_pos = rw->kiocb.ki_pos;
318 	if (ret >= 0 && (rw->kiocb.ki_complete == io_complete_rw)) {
319 		if (!__io_complete_rw_common(req, ret)) {
320 			io_req_set_res(req, final_ret,
321 				       io_put_kbuf(req, issue_flags));
322 			return IOU_OK;
323 		}
324 	} else {
325 		io_rw_done(&rw->kiocb, ret);
326 	}
327 
328 	if (req->flags & REQ_F_REISSUE) {
329 		req->flags &= ~REQ_F_REISSUE;
330 		if (io_resubmit_prep(req))
331 			io_req_task_queue_reissue(req);
332 		else
333 			io_req_task_queue_fail(req, final_ret);
334 	}
335 	return IOU_ISSUE_SKIP_COMPLETE;
336 }
337 
338 static struct iovec *__io_import_iovec(int ddir, struct io_kiocb *req,
339 				       struct io_rw_state *s,
340 				       unsigned int issue_flags)
341 {
342 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
343 	struct iov_iter *iter = &s->iter;
344 	u8 opcode = req->opcode;
345 	struct iovec *iovec;
346 	void __user *buf;
347 	size_t sqe_len;
348 	ssize_t ret;
349 
350 	if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
351 		ret = io_import_fixed(ddir, iter, req->imu, rw->addr, rw->len);
352 		if (ret)
353 			return ERR_PTR(ret);
354 		return NULL;
355 	}
356 
357 	buf = u64_to_user_ptr(rw->addr);
358 	sqe_len = rw->len;
359 
360 	if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE ||
361 	    (req->flags & REQ_F_BUFFER_SELECT)) {
362 		if (io_do_buffer_select(req)) {
363 			buf = io_buffer_select(req, &sqe_len, issue_flags);
364 			if (!buf)
365 				return ERR_PTR(-ENOBUFS);
366 			rw->addr = (unsigned long) buf;
367 			rw->len = sqe_len;
368 		}
369 
370 		ret = import_single_range(ddir, buf, sqe_len, s->fast_iov, iter);
371 		if (ret)
372 			return ERR_PTR(ret);
373 		return NULL;
374 	}
375 
376 	iovec = s->fast_iov;
377 	ret = __import_iovec(ddir, buf, sqe_len, UIO_FASTIOV, &iovec, iter,
378 			      req->ctx->compat);
379 	if (unlikely(ret < 0))
380 		return ERR_PTR(ret);
381 	return iovec;
382 }
383 
384 static inline int io_import_iovec(int rw, struct io_kiocb *req,
385 				  struct iovec **iovec, struct io_rw_state *s,
386 				  unsigned int issue_flags)
387 {
388 	*iovec = __io_import_iovec(rw, req, s, issue_flags);
389 	if (unlikely(IS_ERR(*iovec)))
390 		return PTR_ERR(*iovec);
391 
392 	iov_iter_save_state(&s->iter, &s->iter_state);
393 	return 0;
394 }
395 
396 static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
397 {
398 	return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
399 }
400 
401 /*
402  * For files that don't have ->read_iter() and ->write_iter(), handle them
403  * by looping over ->read() or ->write() manually.
404  */
405 static ssize_t loop_rw_iter(int ddir, struct io_rw *rw, struct iov_iter *iter)
406 {
407 	struct kiocb *kiocb = &rw->kiocb;
408 	struct file *file = kiocb->ki_filp;
409 	ssize_t ret = 0;
410 	loff_t *ppos;
411 
412 	/*
413 	 * Don't support polled IO through this interface, and we can't
414 	 * support non-blocking either. For the latter, this just causes
415 	 * the kiocb to be handled from an async context.
416 	 */
417 	if (kiocb->ki_flags & IOCB_HIPRI)
418 		return -EOPNOTSUPP;
419 	if ((kiocb->ki_flags & IOCB_NOWAIT) &&
420 	    !(kiocb->ki_filp->f_flags & O_NONBLOCK))
421 		return -EAGAIN;
422 
423 	ppos = io_kiocb_ppos(kiocb);
424 
425 	while (iov_iter_count(iter)) {
426 		struct iovec iovec;
427 		ssize_t nr;
428 
429 		if (!iov_iter_is_bvec(iter)) {
430 			iovec = iov_iter_iovec(iter);
431 		} else {
432 			iovec.iov_base = u64_to_user_ptr(rw->addr);
433 			iovec.iov_len = rw->len;
434 		}
435 
436 		if (ddir == READ) {
437 			nr = file->f_op->read(file, iovec.iov_base,
438 					      iovec.iov_len, ppos);
439 		} else {
440 			nr = file->f_op->write(file, iovec.iov_base,
441 					       iovec.iov_len, ppos);
442 		}
443 
444 		if (nr < 0) {
445 			if (!ret)
446 				ret = nr;
447 			break;
448 		}
449 		ret += nr;
450 		if (!iov_iter_is_bvec(iter)) {
451 			iov_iter_advance(iter, nr);
452 		} else {
453 			rw->addr += nr;
454 			rw->len -= nr;
455 			if (!rw->len)
456 				break;
457 		}
458 		if (nr != iovec.iov_len)
459 			break;
460 	}
461 
462 	return ret;
463 }
464 
465 static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
466 			  const struct iovec *fast_iov, struct iov_iter *iter)
467 {
468 	struct io_async_rw *io = req->async_data;
469 
470 	memcpy(&io->s.iter, iter, sizeof(*iter));
471 	io->free_iovec = iovec;
472 	io->bytes_done = 0;
473 	/* can only be fixed buffers, no need to do anything */
474 	if (iov_iter_is_bvec(iter))
475 		return;
476 	if (!iovec) {
477 		unsigned iov_off = 0;
478 
479 		io->s.iter.iov = io->s.fast_iov;
480 		if (iter->iov != fast_iov) {
481 			iov_off = iter->iov - fast_iov;
482 			io->s.iter.iov += iov_off;
483 		}
484 		if (io->s.fast_iov != fast_iov)
485 			memcpy(io->s.fast_iov + iov_off, fast_iov + iov_off,
486 			       sizeof(struct iovec) * iter->nr_segs);
487 	} else {
488 		req->flags |= REQ_F_NEED_CLEANUP;
489 	}
490 }
491 
492 static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
493 			     struct io_rw_state *s, bool force)
494 {
495 	if (!force && !io_op_defs[req->opcode].prep_async)
496 		return 0;
497 	if (!req_has_async_data(req)) {
498 		struct io_async_rw *iorw;
499 
500 		if (io_alloc_async_data(req)) {
501 			kfree(iovec);
502 			return -ENOMEM;
503 		}
504 
505 		io_req_map_rw(req, iovec, s->fast_iov, &s->iter);
506 		iorw = req->async_data;
507 		/* we've copied and mapped the iter, ensure state is saved */
508 		iov_iter_save_state(&iorw->s.iter, &iorw->s.iter_state);
509 	}
510 	return 0;
511 }
512 
513 static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
514 {
515 	struct io_async_rw *iorw = req->async_data;
516 	struct iovec *iov;
517 	int ret;
518 
519 	/* submission path, ->uring_lock should already be taken */
520 	ret = io_import_iovec(rw, req, &iov, &iorw->s, 0);
521 	if (unlikely(ret < 0))
522 		return ret;
523 
524 	iorw->bytes_done = 0;
525 	iorw->free_iovec = iov;
526 	if (iov)
527 		req->flags |= REQ_F_NEED_CLEANUP;
528 	return 0;
529 }
530 
531 int io_readv_prep_async(struct io_kiocb *req)
532 {
533 	return io_rw_prep_async(req, READ);
534 }
535 
536 int io_writev_prep_async(struct io_kiocb *req)
537 {
538 	return io_rw_prep_async(req, WRITE);
539 }
540 
541 /*
542  * This is our waitqueue callback handler, registered through __folio_lock_async()
543  * when we initially tried to do the IO with the iocb armed our waitqueue.
544  * This gets called when the page is unlocked, and we generally expect that to
545  * happen when the page IO is completed and the page is now uptodate. This will
546  * queue a task_work based retry of the operation, attempting to copy the data
547  * again. If the latter fails because the page was NOT uptodate, then we will
548  * do a thread based blocking retry of the operation. That's the unexpected
549  * slow path.
550  */
551 static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
552 			     int sync, void *arg)
553 {
554 	struct wait_page_queue *wpq;
555 	struct io_kiocb *req = wait->private;
556 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
557 	struct wait_page_key *key = arg;
558 
559 	wpq = container_of(wait, struct wait_page_queue, wait);
560 
561 	if (!wake_page_match(wpq, key))
562 		return 0;
563 
564 	rw->kiocb.ki_flags &= ~IOCB_WAITQ;
565 	list_del_init(&wait->entry);
566 	io_req_task_queue(req);
567 	return 1;
568 }
569 
570 /*
571  * This controls whether a given IO request should be armed for async page
572  * based retry. If we return false here, the request is handed to the async
573  * worker threads for retry. If we're doing buffered reads on a regular file,
574  * we prepare a private wait_page_queue entry and retry the operation. This
575  * will either succeed because the page is now uptodate and unlocked, or it
576  * will register a callback when the page is unlocked at IO completion. Through
577  * that callback, io_uring uses task_work to setup a retry of the operation.
578  * That retry will attempt the buffered read again. The retry will generally
579  * succeed, or in rare cases where it fails, we then fall back to using the
580  * async worker threads for a blocking retry.
581  */
582 static bool io_rw_should_retry(struct io_kiocb *req)
583 {
584 	struct io_async_rw *io = req->async_data;
585 	struct wait_page_queue *wait = &io->wpq;
586 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
587 	struct kiocb *kiocb = &rw->kiocb;
588 
589 	/* never retry for NOWAIT, we just complete with -EAGAIN */
590 	if (req->flags & REQ_F_NOWAIT)
591 		return false;
592 
593 	/* Only for buffered IO */
594 	if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
595 		return false;
596 
597 	/*
598 	 * just use poll if we can, and don't attempt if the fs doesn't
599 	 * support callback based unlocks
600 	 */
601 	if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
602 		return false;
603 
604 	wait->wait.func = io_async_buf_func;
605 	wait->wait.private = req;
606 	wait->wait.flags = 0;
607 	INIT_LIST_HEAD(&wait->wait.entry);
608 	kiocb->ki_flags |= IOCB_WAITQ;
609 	kiocb->ki_flags &= ~IOCB_NOWAIT;
610 	kiocb->ki_waitq = wait;
611 	return true;
612 }
613 
614 static inline int io_iter_do_read(struct io_rw *rw, struct iov_iter *iter)
615 {
616 	struct file *file = rw->kiocb.ki_filp;
617 
618 	if (likely(file->f_op->read_iter))
619 		return call_read_iter(file, &rw->kiocb, iter);
620 	else if (file->f_op->read)
621 		return loop_rw_iter(READ, rw, iter);
622 	else
623 		return -EINVAL;
624 }
625 
626 static bool need_complete_io(struct io_kiocb *req)
627 {
628 	return req->flags & REQ_F_ISREG ||
629 		S_ISBLK(file_inode(req->file)->i_mode);
630 }
631 
632 static int io_rw_init_file(struct io_kiocb *req, fmode_t mode)
633 {
634 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
635 	struct kiocb *kiocb = &rw->kiocb;
636 	struct io_ring_ctx *ctx = req->ctx;
637 	struct file *file = req->file;
638 	int ret;
639 
640 	if (unlikely(!file || !(file->f_mode & mode)))
641 		return -EBADF;
642 
643 	if (!io_req_ffs_set(req))
644 		req->flags |= io_file_get_flags(file) << REQ_F_SUPPORT_NOWAIT_BIT;
645 
646 	kiocb->ki_flags = file->f_iocb_flags;
647 	ret = kiocb_set_rw_flags(kiocb, rw->flags);
648 	if (unlikely(ret))
649 		return ret;
650 
651 	/*
652 	 * If the file is marked O_NONBLOCK, still allow retry for it if it
653 	 * supports async. Otherwise it's impossible to use O_NONBLOCK files
654 	 * reliably. If not, or it IOCB_NOWAIT is set, don't retry.
655 	 */
656 	if ((kiocb->ki_flags & IOCB_NOWAIT) ||
657 	    ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req)))
658 		req->flags |= REQ_F_NOWAIT;
659 
660 	if (ctx->flags & IORING_SETUP_IOPOLL) {
661 		if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll)
662 			return -EOPNOTSUPP;
663 
664 		kiocb->private = NULL;
665 		kiocb->ki_flags |= IOCB_HIPRI | IOCB_ALLOC_CACHE;
666 		kiocb->ki_complete = io_complete_rw_iopoll;
667 		req->iopoll_completed = 0;
668 	} else {
669 		if (kiocb->ki_flags & IOCB_HIPRI)
670 			return -EINVAL;
671 		kiocb->ki_complete = io_complete_rw;
672 	}
673 
674 	return 0;
675 }
676 
677 int io_read(struct io_kiocb *req, unsigned int issue_flags)
678 {
679 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
680 	struct io_rw_state __s, *s = &__s;
681 	struct iovec *iovec;
682 	struct kiocb *kiocb = &rw->kiocb;
683 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
684 	struct io_async_rw *io;
685 	ssize_t ret, ret2;
686 	loff_t *ppos;
687 
688 	if (!req_has_async_data(req)) {
689 		ret = io_import_iovec(READ, req, &iovec, s, issue_flags);
690 		if (unlikely(ret < 0))
691 			return ret;
692 	} else {
693 		io = req->async_data;
694 		s = &io->s;
695 
696 		/*
697 		 * Safe and required to re-import if we're using provided
698 		 * buffers, as we dropped the selected one before retry.
699 		 */
700 		if (io_do_buffer_select(req)) {
701 			ret = io_import_iovec(READ, req, &iovec, s, issue_flags);
702 			if (unlikely(ret < 0))
703 				return ret;
704 		}
705 
706 		/*
707 		 * We come here from an earlier attempt, restore our state to
708 		 * match in case it doesn't. It's cheap enough that we don't
709 		 * need to make this conditional.
710 		 */
711 		iov_iter_restore(&s->iter, &s->iter_state);
712 		iovec = NULL;
713 	}
714 	ret = io_rw_init_file(req, FMODE_READ);
715 	if (unlikely(ret)) {
716 		kfree(iovec);
717 		return ret;
718 	}
719 	req->cqe.res = iov_iter_count(&s->iter);
720 
721 	if (force_nonblock) {
722 		/* If the file doesn't support async, just async punt */
723 		if (unlikely(!io_file_supports_nowait(req))) {
724 			ret = io_setup_async_rw(req, iovec, s, true);
725 			return ret ?: -EAGAIN;
726 		}
727 		kiocb->ki_flags |= IOCB_NOWAIT;
728 	} else {
729 		/* Ensure we clear previously set non-block flag */
730 		kiocb->ki_flags &= ~IOCB_NOWAIT;
731 	}
732 
733 	ppos = io_kiocb_update_pos(req);
734 
735 	ret = rw_verify_area(READ, req->file, ppos, req->cqe.res);
736 	if (unlikely(ret)) {
737 		kfree(iovec);
738 		return ret;
739 	}
740 
741 	ret = io_iter_do_read(rw, &s->iter);
742 
743 	if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
744 		req->flags &= ~REQ_F_REISSUE;
745 		/* if we can poll, just do that */
746 		if (req->opcode == IORING_OP_READ && file_can_poll(req->file))
747 			return -EAGAIN;
748 		/* IOPOLL retry should happen for io-wq threads */
749 		if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
750 			goto done;
751 		/* no retry on NONBLOCK nor RWF_NOWAIT */
752 		if (req->flags & REQ_F_NOWAIT)
753 			goto done;
754 		ret = 0;
755 	} else if (ret == -EIOCBQUEUED) {
756 		if (iovec)
757 			kfree(iovec);
758 		return IOU_ISSUE_SKIP_COMPLETE;
759 	} else if (ret == req->cqe.res || ret <= 0 || !force_nonblock ||
760 		   (req->flags & REQ_F_NOWAIT) || !need_complete_io(req)) {
761 		/* read all, failed, already did sync or don't want to retry */
762 		goto done;
763 	}
764 
765 	/*
766 	 * Don't depend on the iter state matching what was consumed, or being
767 	 * untouched in case of error. Restore it and we'll advance it
768 	 * manually if we need to.
769 	 */
770 	iov_iter_restore(&s->iter, &s->iter_state);
771 
772 	ret2 = io_setup_async_rw(req, iovec, s, true);
773 	iovec = NULL;
774 	if (ret2) {
775 		ret = ret > 0 ? ret : ret2;
776 		goto done;
777 	}
778 
779 	io = req->async_data;
780 	s = &io->s;
781 	/*
782 	 * Now use our persistent iterator and state, if we aren't already.
783 	 * We've restored and mapped the iter to match.
784 	 */
785 
786 	do {
787 		/*
788 		 * We end up here because of a partial read, either from
789 		 * above or inside this loop. Advance the iter by the bytes
790 		 * that were consumed.
791 		 */
792 		iov_iter_advance(&s->iter, ret);
793 		if (!iov_iter_count(&s->iter))
794 			break;
795 		io->bytes_done += ret;
796 		iov_iter_save_state(&s->iter, &s->iter_state);
797 
798 		/* if we can retry, do so with the callbacks armed */
799 		if (!io_rw_should_retry(req)) {
800 			kiocb->ki_flags &= ~IOCB_WAITQ;
801 			return -EAGAIN;
802 		}
803 
804 		req->cqe.res = iov_iter_count(&s->iter);
805 		/*
806 		 * Now retry read with the IOCB_WAITQ parts set in the iocb. If
807 		 * we get -EIOCBQUEUED, then we'll get a notification when the
808 		 * desired page gets unlocked. We can also get a partial read
809 		 * here, and if we do, then just retry at the new offset.
810 		 */
811 		ret = io_iter_do_read(rw, &s->iter);
812 		if (ret == -EIOCBQUEUED)
813 			return IOU_ISSUE_SKIP_COMPLETE;
814 		/* we got some bytes, but not all. retry. */
815 		kiocb->ki_flags &= ~IOCB_WAITQ;
816 		iov_iter_restore(&s->iter, &s->iter_state);
817 	} while (ret > 0);
818 done:
819 	/* it's faster to check here then delegate to kfree */
820 	if (iovec)
821 		kfree(iovec);
822 	return kiocb_done(req, ret, issue_flags);
823 }
824 
825 int io_write(struct io_kiocb *req, unsigned int issue_flags)
826 {
827 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
828 	struct io_rw_state __s, *s = &__s;
829 	struct iovec *iovec;
830 	struct kiocb *kiocb = &rw->kiocb;
831 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
832 	ssize_t ret, ret2;
833 	loff_t *ppos;
834 
835 	if (!req_has_async_data(req)) {
836 		ret = io_import_iovec(WRITE, req, &iovec, s, issue_flags);
837 		if (unlikely(ret < 0))
838 			return ret;
839 	} else {
840 		struct io_async_rw *io = req->async_data;
841 
842 		s = &io->s;
843 		iov_iter_restore(&s->iter, &s->iter_state);
844 		iovec = NULL;
845 	}
846 	ret = io_rw_init_file(req, FMODE_WRITE);
847 	if (unlikely(ret)) {
848 		kfree(iovec);
849 		return ret;
850 	}
851 	req->cqe.res = iov_iter_count(&s->iter);
852 
853 	if (force_nonblock) {
854 		/* If the file doesn't support async, just async punt */
855 		if (unlikely(!io_file_supports_nowait(req)))
856 			goto copy_iov;
857 
858 		/* File path supports NOWAIT for non-direct_IO only for block devices. */
859 		if (!(kiocb->ki_flags & IOCB_DIRECT) &&
860 			!(kiocb->ki_filp->f_mode & FMODE_BUF_WASYNC) &&
861 			(req->flags & REQ_F_ISREG))
862 			goto copy_iov;
863 
864 		kiocb->ki_flags |= IOCB_NOWAIT;
865 	} else {
866 		/* Ensure we clear previously set non-block flag */
867 		kiocb->ki_flags &= ~IOCB_NOWAIT;
868 	}
869 
870 	ppos = io_kiocb_update_pos(req);
871 
872 	ret = rw_verify_area(WRITE, req->file, ppos, req->cqe.res);
873 	if (unlikely(ret)) {
874 		kfree(iovec);
875 		return ret;
876 	}
877 
878 	/*
879 	 * Open-code file_start_write here to grab freeze protection,
880 	 * which will be released by another thread in
881 	 * io_complete_rw().  Fool lockdep by telling it the lock got
882 	 * released so that it doesn't complain about the held lock when
883 	 * we return to userspace.
884 	 */
885 	if (req->flags & REQ_F_ISREG) {
886 		sb_start_write(file_inode(req->file)->i_sb);
887 		__sb_writers_release(file_inode(req->file)->i_sb,
888 					SB_FREEZE_WRITE);
889 	}
890 	kiocb->ki_flags |= IOCB_WRITE;
891 
892 	if (likely(req->file->f_op->write_iter))
893 		ret2 = call_write_iter(req->file, kiocb, &s->iter);
894 	else if (req->file->f_op->write)
895 		ret2 = loop_rw_iter(WRITE, rw, &s->iter);
896 	else
897 		ret2 = -EINVAL;
898 
899 	if (req->flags & REQ_F_REISSUE) {
900 		req->flags &= ~REQ_F_REISSUE;
901 		ret2 = -EAGAIN;
902 	}
903 
904 	/*
905 	 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
906 	 * retry them without IOCB_NOWAIT.
907 	 */
908 	if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
909 		ret2 = -EAGAIN;
910 	/* no retry on NONBLOCK nor RWF_NOWAIT */
911 	if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
912 		goto done;
913 	if (!force_nonblock || ret2 != -EAGAIN) {
914 		/* IOPOLL retry should happen for io-wq threads */
915 		if (ret2 == -EAGAIN && (req->ctx->flags & IORING_SETUP_IOPOLL))
916 			goto copy_iov;
917 
918 		if (ret2 != req->cqe.res && ret2 >= 0 && need_complete_io(req)) {
919 			struct io_async_rw *rw;
920 
921 			trace_io_uring_short_write(req->ctx, kiocb->ki_pos - ret2,
922 						req->cqe.res, ret2);
923 
924 			/* This is a partial write. The file pos has already been
925 			 * updated, setup the async struct to complete the request
926 			 * in the worker. Also update bytes_done to account for
927 			 * the bytes already written.
928 			 */
929 			iov_iter_save_state(&s->iter, &s->iter_state);
930 			ret = io_setup_async_rw(req, iovec, s, true);
931 
932 			rw = req->async_data;
933 			if (rw)
934 				rw->bytes_done += ret2;
935 
936 			if (kiocb->ki_flags & IOCB_WRITE)
937 				kiocb_end_write(req);
938 			return ret ? ret : -EAGAIN;
939 		}
940 done:
941 		ret = kiocb_done(req, ret2, issue_flags);
942 	} else {
943 copy_iov:
944 		iov_iter_restore(&s->iter, &s->iter_state);
945 		ret = io_setup_async_rw(req, iovec, s, false);
946 		if (!ret) {
947 			if (kiocb->ki_flags & IOCB_WRITE)
948 				kiocb_end_write(req);
949 			return -EAGAIN;
950 		}
951 		return ret;
952 	}
953 	/* it's reportedly faster than delegating the null check to kfree() */
954 	if (iovec)
955 		kfree(iovec);
956 	return ret;
957 }
958 
959 static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
960 {
961 	io_commit_cqring_flush(ctx);
962 	if (ctx->flags & IORING_SETUP_SQPOLL)
963 		io_cqring_wake(ctx);
964 }
965 
966 void io_rw_fail(struct io_kiocb *req)
967 {
968 	int res;
969 
970 	res = io_fixup_rw_res(req, req->cqe.res);
971 	io_req_set_res(req, res, req->cqe.flags);
972 }
973 
974 int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
975 {
976 	struct io_wq_work_node *pos, *start, *prev;
977 	unsigned int poll_flags = BLK_POLL_NOSLEEP;
978 	DEFINE_IO_COMP_BATCH(iob);
979 	int nr_events = 0;
980 
981 	/*
982 	 * Only spin for completions if we don't have multiple devices hanging
983 	 * off our complete list.
984 	 */
985 	if (ctx->poll_multi_queue || force_nonspin)
986 		poll_flags |= BLK_POLL_ONESHOT;
987 
988 	wq_list_for_each(pos, start, &ctx->iopoll_list) {
989 		struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
990 		struct file *file = req->file;
991 		int ret;
992 
993 		/*
994 		 * Move completed and retryable entries to our local lists.
995 		 * If we find a request that requires polling, break out
996 		 * and complete those lists first, if we have entries there.
997 		 */
998 		if (READ_ONCE(req->iopoll_completed))
999 			break;
1000 
1001 		if (req->opcode == IORING_OP_URING_CMD) {
1002 			struct io_uring_cmd *ioucmd;
1003 
1004 			ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
1005 			ret = file->f_op->uring_cmd_iopoll(ioucmd, &iob,
1006 								poll_flags);
1007 		} else {
1008 			struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
1009 
1010 			ret = file->f_op->iopoll(&rw->kiocb, &iob, poll_flags);
1011 		}
1012 		if (unlikely(ret < 0))
1013 			return ret;
1014 		else if (ret)
1015 			poll_flags |= BLK_POLL_ONESHOT;
1016 
1017 		/* iopoll may have completed current req */
1018 		if (!rq_list_empty(iob.req_list) ||
1019 		    READ_ONCE(req->iopoll_completed))
1020 			break;
1021 	}
1022 
1023 	if (!rq_list_empty(iob.req_list))
1024 		iob.complete(&iob);
1025 	else if (!pos)
1026 		return 0;
1027 
1028 	prev = start;
1029 	wq_list_for_each_resume(pos, prev) {
1030 		struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
1031 
1032 		/* order with io_complete_rw_iopoll(), e.g. ->result updates */
1033 		if (!smp_load_acquire(&req->iopoll_completed))
1034 			break;
1035 		nr_events++;
1036 		if (unlikely(req->flags & REQ_F_CQE_SKIP))
1037 			continue;
1038 
1039 		req->cqe.flags = io_put_kbuf(req, 0);
1040 		__io_fill_cqe_req(req->ctx, req);
1041 	}
1042 
1043 	if (unlikely(!nr_events))
1044 		return 0;
1045 
1046 	io_commit_cqring(ctx);
1047 	io_cqring_ev_posted_iopoll(ctx);
1048 	pos = start ? start->next : ctx->iopoll_list.first;
1049 	wq_list_cut(&ctx->iopoll_list, prev, start);
1050 	io_free_batch_list(ctx, pos);
1051 	return nr_events;
1052 }
1053