xref: /linux/io_uring/rw.c (revision 390db60f8e2bd21fae544917eb3a8618265c058c)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/blk-mq.h>
7 #include <linux/mm.h>
8 #include <linux/slab.h>
9 #include <linux/fsnotify.h>
10 #include <linux/poll.h>
11 #include <linux/nospec.h>
12 #include <linux/compat.h>
13 #include <linux/io_uring/cmd.h>
14 #include <linux/indirect_call_wrapper.h>
15 
16 #include <uapi/linux/io_uring.h>
17 
18 #include "filetable.h"
19 #include "io_uring.h"
20 #include "opdef.h"
21 #include "kbuf.h"
22 #include "alloc_cache.h"
23 #include "rsrc.h"
24 #include "poll.h"
25 #include "rw.h"
26 
27 static void io_complete_rw(struct kiocb *kiocb, long res);
28 static void io_complete_rw_iopoll(struct kiocb *kiocb, long res);
29 
30 struct io_rw {
31 	/* NOTE: kiocb has the file as the first member, so don't do it here */
32 	struct kiocb			kiocb;
33 	u64				addr;
34 	u32				len;
35 	rwf_t				flags;
36 };
37 
io_file_supports_nowait(struct io_kiocb * req,__poll_t mask)38 static bool io_file_supports_nowait(struct io_kiocb *req, __poll_t mask)
39 {
40 	/* If FMODE_NOWAIT is set for a file, we're golden */
41 	if (req->flags & REQ_F_SUPPORT_NOWAIT)
42 		return true;
43 	/* No FMODE_NOWAIT, if we can poll, check the status */
44 	if (io_file_can_poll(req)) {
45 		struct poll_table_struct pt = { ._key = mask };
46 
47 		return vfs_poll(req->file, &pt) & mask;
48 	}
49 	/* No FMODE_NOWAIT support, and file isn't pollable. Tough luck. */
50 	return false;
51 }
52 
io_iov_compat_buffer_select_prep(struct io_rw * rw)53 static int io_iov_compat_buffer_select_prep(struct io_rw *rw)
54 {
55 	struct compat_iovec __user *uiov = u64_to_user_ptr(rw->addr);
56 	struct compat_iovec iov;
57 
58 	if (copy_from_user(&iov, uiov, sizeof(iov)))
59 		return -EFAULT;
60 	rw->len = iov.iov_len;
61 	return 0;
62 }
63 
io_iov_buffer_select_prep(struct io_kiocb * req)64 static int io_iov_buffer_select_prep(struct io_kiocb *req)
65 {
66 	struct iovec __user *uiov;
67 	struct iovec iov;
68 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
69 
70 	if (rw->len != 1)
71 		return -EINVAL;
72 
73 	if (io_is_compat(req->ctx))
74 		return io_iov_compat_buffer_select_prep(rw);
75 
76 	uiov = u64_to_user_ptr(rw->addr);
77 	if (copy_from_user(&iov, uiov, sizeof(*uiov)))
78 		return -EFAULT;
79 	rw->len = iov.iov_len;
80 	return 0;
81 }
82 
io_import_vec(int ddir,struct io_kiocb * req,struct io_async_rw * io,const struct iovec __user * uvec,size_t uvec_segs)83 static int io_import_vec(int ddir, struct io_kiocb *req,
84 			 struct io_async_rw *io,
85 			 const struct iovec __user *uvec,
86 			 size_t uvec_segs)
87 {
88 	int ret, nr_segs;
89 	struct iovec *iov;
90 
91 	if (io->vec.iovec) {
92 		nr_segs = io->vec.nr;
93 		iov = io->vec.iovec;
94 	} else {
95 		nr_segs = 1;
96 		iov = &io->fast_iov;
97 	}
98 
99 	ret = __import_iovec(ddir, uvec, uvec_segs, nr_segs, &iov, &io->iter,
100 			     io_is_compat(req->ctx));
101 	if (unlikely(ret < 0))
102 		return ret;
103 	if (iov) {
104 		req->flags |= REQ_F_NEED_CLEANUP;
105 		io_vec_reset_iovec(&io->vec, iov, io->iter.nr_segs);
106 	}
107 	return 0;
108 }
109 
__io_import_rw_buffer(int ddir,struct io_kiocb * req,struct io_async_rw * io,struct io_br_sel * sel,unsigned int issue_flags)110 static int __io_import_rw_buffer(int ddir, struct io_kiocb *req,
111 				 struct io_async_rw *io, struct io_br_sel *sel,
112 				 unsigned int issue_flags)
113 {
114 	const struct io_issue_def *def = &io_issue_defs[req->opcode];
115 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
116 	size_t sqe_len = rw->len;
117 
118 	sel->addr = u64_to_user_ptr(rw->addr);
119 	if (def->vectored && !(req->flags & REQ_F_BUFFER_SELECT))
120 		return io_import_vec(ddir, req, io, sel->addr, sqe_len);
121 
122 	if (io_do_buffer_select(req)) {
123 		*sel = io_buffer_select(req, &sqe_len, io->buf_group, issue_flags);
124 		if (!sel->addr)
125 			return -ENOBUFS;
126 		rw->addr = (unsigned long) sel->addr;
127 		rw->len = sqe_len;
128 	}
129 	return import_ubuf(ddir, sel->addr, sqe_len, &io->iter);
130 }
131 
io_import_rw_buffer(int rw,struct io_kiocb * req,struct io_async_rw * io,struct io_br_sel * sel,unsigned int issue_flags)132 static inline int io_import_rw_buffer(int rw, struct io_kiocb *req,
133 				      struct io_async_rw *io,
134 				      struct io_br_sel *sel,
135 				      unsigned int issue_flags)
136 {
137 	int ret;
138 
139 	ret = __io_import_rw_buffer(rw, req, io, sel, issue_flags);
140 	if (unlikely(ret < 0))
141 		return ret;
142 
143 	iov_iter_save_state(&io->iter, &io->iter_state);
144 	return 0;
145 }
146 
io_rw_recycle(struct io_kiocb * req,unsigned int issue_flags)147 static void io_rw_recycle(struct io_kiocb *req, unsigned int issue_flags)
148 {
149 	struct io_async_rw *rw = req->async_data;
150 
151 	if (unlikely(issue_flags & IO_URING_F_UNLOCKED))
152 		return;
153 
154 	io_alloc_cache_vec_kasan(&rw->vec);
155 	if (rw->vec.nr > IO_VEC_CACHE_SOFT_CAP)
156 		io_vec_free(&rw->vec);
157 
158 	if (io_alloc_cache_put(&req->ctx->rw_cache, rw))
159 		io_req_async_data_clear(req, 0);
160 }
161 
io_req_rw_cleanup(struct io_kiocb * req,unsigned int issue_flags)162 static void io_req_rw_cleanup(struct io_kiocb *req, unsigned int issue_flags)
163 {
164 	/*
165 	 * Disable quick recycling for anything that's gone through io-wq.
166 	 * In theory, this should be fine to cleanup. However, some read or
167 	 * write iter handling touches the iovec AFTER having called into the
168 	 * handler, eg to reexpand or revert. This means we can have:
169 	 *
170 	 * task			io-wq
171 	 *   issue
172 	 *     punt to io-wq
173 	 *			issue
174 	 *			  blkdev_write_iter()
175 	 *			    ->ki_complete()
176 	 *			      io_complete_rw()
177 	 *			        queue tw complete
178 	 *  run tw
179 	 *    req_rw_cleanup
180 	 *			iov_iter_count() <- look at iov_iter again
181 	 *
182 	 * which can lead to a UAF. This is only possible for io-wq offload
183 	 * as the cleanup can run in parallel. As io-wq is not the fast path,
184 	 * just leave cleanup to the end.
185 	 *
186 	 * This is really a bug in the core code that does this, any issue
187 	 * path should assume that a successful (or -EIOCBQUEUED) return can
188 	 * mean that the underlying data can be gone at any time. But that
189 	 * should be fixed seperately, and then this check could be killed.
190 	 */
191 	if (!(req->flags & (REQ_F_REISSUE | REQ_F_REFCOUNT))) {
192 		req->flags &= ~REQ_F_NEED_CLEANUP;
193 		io_rw_recycle(req, issue_flags);
194 	}
195 }
196 
io_rw_alloc_async(struct io_kiocb * req)197 static int io_rw_alloc_async(struct io_kiocb *req)
198 {
199 	struct io_ring_ctx *ctx = req->ctx;
200 	struct io_async_rw *rw;
201 
202 	rw = io_uring_alloc_async_data(&ctx->rw_cache, req);
203 	if (!rw)
204 		return -ENOMEM;
205 	if (rw->vec.iovec)
206 		req->flags |= REQ_F_NEED_CLEANUP;
207 	rw->bytes_done = 0;
208 	return 0;
209 }
210 
io_meta_save_state(struct io_async_rw * io)211 static inline void io_meta_save_state(struct io_async_rw *io)
212 {
213 	io->meta_state.seed = io->meta.seed;
214 	iov_iter_save_state(&io->meta.iter, &io->meta_state.iter_meta);
215 }
216 
io_meta_restore(struct io_async_rw * io,struct kiocb * kiocb)217 static inline void io_meta_restore(struct io_async_rw *io, struct kiocb *kiocb)
218 {
219 	if (kiocb->ki_flags & IOCB_HAS_METADATA) {
220 		io->meta.seed = io->meta_state.seed;
221 		iov_iter_restore(&io->meta.iter, &io->meta_state.iter_meta);
222 	}
223 }
224 
io_prep_rw_pi(struct io_kiocb * req,struct io_rw * rw,int ddir,u64 attr_ptr,u64 attr_type_mask)225 static int io_prep_rw_pi(struct io_kiocb *req, struct io_rw *rw, int ddir,
226 			 u64 attr_ptr, u64 attr_type_mask)
227 {
228 	struct io_uring_attr_pi pi_attr;
229 	struct io_async_rw *io;
230 	int ret;
231 
232 	if (copy_from_user(&pi_attr, u64_to_user_ptr(attr_ptr),
233 	    sizeof(pi_attr)))
234 		return -EFAULT;
235 
236 	if (pi_attr.rsvd)
237 		return -EINVAL;
238 
239 	io = req->async_data;
240 	io->meta.flags = pi_attr.flags;
241 	io->meta.app_tag = pi_attr.app_tag;
242 	io->meta.seed = pi_attr.seed;
243 	ret = import_ubuf(ddir, u64_to_user_ptr(pi_attr.addr),
244 			  pi_attr.len, &io->meta.iter);
245 	if (unlikely(ret < 0))
246 		return ret;
247 	req->flags |= REQ_F_HAS_METADATA;
248 	io_meta_save_state(io);
249 	return ret;
250 }
251 
__io_prep_rw(struct io_kiocb * req,const struct io_uring_sqe * sqe,int ddir)252 static int __io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
253 			int ddir)
254 {
255 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
256 	struct io_async_rw *io;
257 	unsigned ioprio;
258 	u64 attr_type_mask;
259 	int ret;
260 
261 	if (io_rw_alloc_async(req))
262 		return -ENOMEM;
263 	io = req->async_data;
264 
265 	rw->kiocb.ki_pos = READ_ONCE(sqe->off);
266 	/* used for fixed read/write too - just read unconditionally */
267 	req->buf_index = READ_ONCE(sqe->buf_index);
268 	io->buf_group = req->buf_index;
269 
270 	ioprio = READ_ONCE(sqe->ioprio);
271 	if (ioprio) {
272 		ret = ioprio_check_cap(ioprio);
273 		if (ret)
274 			return ret;
275 
276 		rw->kiocb.ki_ioprio = ioprio;
277 	} else {
278 		rw->kiocb.ki_ioprio = get_current_ioprio();
279 	}
280 	rw->kiocb.dio_complete = NULL;
281 	rw->kiocb.ki_flags = 0;
282 	rw->kiocb.ki_write_stream = READ_ONCE(sqe->write_stream);
283 
284 	if (req->ctx->flags & IORING_SETUP_IOPOLL)
285 		rw->kiocb.ki_complete = io_complete_rw_iopoll;
286 	else
287 		rw->kiocb.ki_complete = io_complete_rw;
288 
289 	rw->addr = READ_ONCE(sqe->addr);
290 	rw->len = READ_ONCE(sqe->len);
291 	rw->flags = (__force rwf_t) READ_ONCE(sqe->rw_flags);
292 
293 	attr_type_mask = READ_ONCE(sqe->attr_type_mask);
294 	if (attr_type_mask) {
295 		u64 attr_ptr;
296 
297 		/* only PI attribute is supported currently */
298 		if (attr_type_mask != IORING_RW_ATTR_FLAG_PI)
299 			return -EINVAL;
300 
301 		attr_ptr = READ_ONCE(sqe->attr_ptr);
302 		return io_prep_rw_pi(req, rw, ddir, attr_ptr, attr_type_mask);
303 	}
304 	return 0;
305 }
306 
io_rw_do_import(struct io_kiocb * req,int ddir)307 static int io_rw_do_import(struct io_kiocb *req, int ddir)
308 {
309 	struct io_br_sel sel = { };
310 
311 	if (io_do_buffer_select(req))
312 		return 0;
313 
314 	return io_import_rw_buffer(ddir, req, req->async_data, &sel, 0);
315 }
316 
io_prep_rw(struct io_kiocb * req,const struct io_uring_sqe * sqe,int ddir)317 static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
318 		      int ddir)
319 {
320 	int ret;
321 
322 	ret = __io_prep_rw(req, sqe, ddir);
323 	if (unlikely(ret))
324 		return ret;
325 
326 	return io_rw_do_import(req, ddir);
327 }
328 
io_prep_read(struct io_kiocb * req,const struct io_uring_sqe * sqe)329 int io_prep_read(struct io_kiocb *req, const struct io_uring_sqe *sqe)
330 {
331 	return io_prep_rw(req, sqe, ITER_DEST);
332 }
333 
io_prep_write(struct io_kiocb * req,const struct io_uring_sqe * sqe)334 int io_prep_write(struct io_kiocb *req, const struct io_uring_sqe *sqe)
335 {
336 	return io_prep_rw(req, sqe, ITER_SOURCE);
337 }
338 
io_prep_rwv(struct io_kiocb * req,const struct io_uring_sqe * sqe,int ddir)339 static int io_prep_rwv(struct io_kiocb *req, const struct io_uring_sqe *sqe,
340 		       int ddir)
341 {
342 	int ret;
343 
344 	ret = io_prep_rw(req, sqe, ddir);
345 	if (unlikely(ret))
346 		return ret;
347 	if (!(req->flags & REQ_F_BUFFER_SELECT))
348 		return 0;
349 
350 	/*
351 	 * Have to do this validation here, as this is in io_read() rw->len
352 	 * might have chanaged due to buffer selection
353 	 */
354 	return io_iov_buffer_select_prep(req);
355 }
356 
io_prep_readv(struct io_kiocb * req,const struct io_uring_sqe * sqe)357 int io_prep_readv(struct io_kiocb *req, const struct io_uring_sqe *sqe)
358 {
359 	return io_prep_rwv(req, sqe, ITER_DEST);
360 }
361 
io_prep_writev(struct io_kiocb * req,const struct io_uring_sqe * sqe)362 int io_prep_writev(struct io_kiocb *req, const struct io_uring_sqe *sqe)
363 {
364 	return io_prep_rwv(req, sqe, ITER_SOURCE);
365 }
366 
io_init_rw_fixed(struct io_kiocb * req,unsigned int issue_flags,int ddir)367 static int io_init_rw_fixed(struct io_kiocb *req, unsigned int issue_flags,
368 			    int ddir)
369 {
370 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
371 	struct io_async_rw *io = req->async_data;
372 	int ret;
373 
374 	if (io->bytes_done)
375 		return 0;
376 
377 	ret = io_import_reg_buf(req, &io->iter, rw->addr, rw->len, ddir,
378 				issue_flags);
379 	iov_iter_save_state(&io->iter, &io->iter_state);
380 	return ret;
381 }
382 
io_prep_read_fixed(struct io_kiocb * req,const struct io_uring_sqe * sqe)383 int io_prep_read_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe)
384 {
385 	return __io_prep_rw(req, sqe, ITER_DEST);
386 }
387 
io_prep_write_fixed(struct io_kiocb * req,const struct io_uring_sqe * sqe)388 int io_prep_write_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe)
389 {
390 	return __io_prep_rw(req, sqe, ITER_SOURCE);
391 }
392 
io_rw_import_reg_vec(struct io_kiocb * req,struct io_async_rw * io,int ddir,unsigned int issue_flags)393 static int io_rw_import_reg_vec(struct io_kiocb *req,
394 				struct io_async_rw *io,
395 				int ddir, unsigned int issue_flags)
396 {
397 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
398 	unsigned uvec_segs = rw->len;
399 	int ret;
400 
401 	ret = io_import_reg_vec(ddir, &io->iter, req, &io->vec,
402 				uvec_segs, issue_flags);
403 	if (unlikely(ret))
404 		return ret;
405 	iov_iter_save_state(&io->iter, &io->iter_state);
406 	req->flags &= ~REQ_F_IMPORT_BUFFER;
407 	return 0;
408 }
409 
io_rw_prep_reg_vec(struct io_kiocb * req)410 static int io_rw_prep_reg_vec(struct io_kiocb *req)
411 {
412 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
413 	struct io_async_rw *io = req->async_data;
414 	const struct iovec __user *uvec;
415 
416 	uvec = u64_to_user_ptr(rw->addr);
417 	return io_prep_reg_iovec(req, &io->vec, uvec, rw->len);
418 }
419 
io_prep_readv_fixed(struct io_kiocb * req,const struct io_uring_sqe * sqe)420 int io_prep_readv_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe)
421 {
422 	int ret;
423 
424 	ret = __io_prep_rw(req, sqe, ITER_DEST);
425 	if (unlikely(ret))
426 		return ret;
427 	return io_rw_prep_reg_vec(req);
428 }
429 
io_prep_writev_fixed(struct io_kiocb * req,const struct io_uring_sqe * sqe)430 int io_prep_writev_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe)
431 {
432 	int ret;
433 
434 	ret = __io_prep_rw(req, sqe, ITER_SOURCE);
435 	if (unlikely(ret))
436 		return ret;
437 	return io_rw_prep_reg_vec(req);
438 }
439 
440 /*
441  * Multishot read is prepared just like a normal read/write request, only
442  * difference is that we set the MULTISHOT flag.
443  */
io_read_mshot_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)444 int io_read_mshot_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
445 {
446 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
447 	int ret;
448 
449 	/* must be used with provided buffers */
450 	if (!(req->flags & REQ_F_BUFFER_SELECT))
451 		return -EINVAL;
452 
453 	ret = __io_prep_rw(req, sqe, ITER_DEST);
454 	if (unlikely(ret))
455 		return ret;
456 
457 	if (rw->addr || rw->len)
458 		return -EINVAL;
459 
460 	req->flags |= REQ_F_APOLL_MULTISHOT;
461 	return 0;
462 }
463 
io_readv_writev_cleanup(struct io_kiocb * req)464 void io_readv_writev_cleanup(struct io_kiocb *req)
465 {
466 	lockdep_assert_held(&req->ctx->uring_lock);
467 	io_rw_recycle(req, 0);
468 }
469 
io_kiocb_update_pos(struct io_kiocb * req)470 static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req)
471 {
472 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
473 
474 	if (rw->kiocb.ki_pos != -1)
475 		return &rw->kiocb.ki_pos;
476 
477 	if (!(req->file->f_mode & FMODE_STREAM)) {
478 		req->flags |= REQ_F_CUR_POS;
479 		rw->kiocb.ki_pos = req->file->f_pos;
480 		return &rw->kiocb.ki_pos;
481 	}
482 
483 	rw->kiocb.ki_pos = 0;
484 	return NULL;
485 }
486 
io_rw_should_reissue(struct io_kiocb * req)487 static bool io_rw_should_reissue(struct io_kiocb *req)
488 {
489 #ifdef CONFIG_BLOCK
490 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
491 	umode_t mode = file_inode(req->file)->i_mode;
492 	struct io_async_rw *io = req->async_data;
493 	struct io_ring_ctx *ctx = req->ctx;
494 
495 	if (!S_ISBLK(mode) && !S_ISREG(mode))
496 		return false;
497 	if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() &&
498 	    !(ctx->flags & IORING_SETUP_IOPOLL)))
499 		return false;
500 	/*
501 	 * If ref is dying, we might be running poll reap from the exit work.
502 	 * Don't attempt to reissue from that path, just let it fail with
503 	 * -EAGAIN.
504 	 */
505 	if (percpu_ref_is_dying(&ctx->refs))
506 		return false;
507 
508 	io_meta_restore(io, &rw->kiocb);
509 	iov_iter_restore(&io->iter, &io->iter_state);
510 	return true;
511 #else
512 	return false;
513 #endif
514 }
515 
io_req_end_write(struct io_kiocb * req)516 static void io_req_end_write(struct io_kiocb *req)
517 {
518 	if (req->flags & REQ_F_ISREG) {
519 		struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
520 
521 		kiocb_end_write(&rw->kiocb);
522 	}
523 }
524 
525 /*
526  * Trigger the notifications after having done some IO, and finish the write
527  * accounting, if any.
528  */
io_req_io_end(struct io_kiocb * req)529 static void io_req_io_end(struct io_kiocb *req)
530 {
531 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
532 
533 	if (rw->kiocb.ki_flags & IOCB_WRITE) {
534 		io_req_end_write(req);
535 		fsnotify_modify(req->file);
536 	} else {
537 		fsnotify_access(req->file);
538 	}
539 }
540 
__io_complete_rw_common(struct io_kiocb * req,long res)541 static void __io_complete_rw_common(struct io_kiocb *req, long res)
542 {
543 	if (res == req->cqe.res)
544 		return;
545 	if ((res == -EOPNOTSUPP || res == -EAGAIN) && io_rw_should_reissue(req)) {
546 		req->flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE;
547 	} else {
548 		req_set_fail(req);
549 		req->cqe.res = res;
550 	}
551 }
552 
io_fixup_rw_res(struct io_kiocb * req,long res)553 static inline int io_fixup_rw_res(struct io_kiocb *req, long res)
554 {
555 	struct io_async_rw *io = req->async_data;
556 
557 	/* add previously done IO, if any */
558 	if (req_has_async_data(req) && io->bytes_done > 0) {
559 		if (res < 0)
560 			res = io->bytes_done;
561 		else
562 			res += io->bytes_done;
563 	}
564 	return res;
565 }
566 
io_req_rw_complete(struct io_kiocb * req,io_tw_token_t tw)567 void io_req_rw_complete(struct io_kiocb *req, io_tw_token_t tw)
568 {
569 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
570 	struct kiocb *kiocb = &rw->kiocb;
571 
572 	if ((kiocb->ki_flags & IOCB_DIO_CALLER_COMP) && kiocb->dio_complete) {
573 		long res = kiocb->dio_complete(rw->kiocb.private);
574 
575 		io_req_set_res(req, io_fixup_rw_res(req, res), 0);
576 	}
577 
578 	io_req_io_end(req);
579 
580 	if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING))
581 		req->cqe.flags |= io_put_kbuf(req, req->cqe.res, NULL);
582 
583 	io_req_rw_cleanup(req, 0);
584 	io_req_task_complete(req, tw);
585 }
586 
io_complete_rw(struct kiocb * kiocb,long res)587 static void io_complete_rw(struct kiocb *kiocb, long res)
588 {
589 	struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
590 	struct io_kiocb *req = cmd_to_io_kiocb(rw);
591 
592 	if (!kiocb->dio_complete || !(kiocb->ki_flags & IOCB_DIO_CALLER_COMP)) {
593 		__io_complete_rw_common(req, res);
594 		io_req_set_res(req, io_fixup_rw_res(req, res), 0);
595 	}
596 	req->io_task_work.func = io_req_rw_complete;
597 	__io_req_task_work_add(req, IOU_F_TWQ_LAZY_WAKE);
598 }
599 
io_complete_rw_iopoll(struct kiocb * kiocb,long res)600 static void io_complete_rw_iopoll(struct kiocb *kiocb, long res)
601 {
602 	struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
603 	struct io_kiocb *req = cmd_to_io_kiocb(rw);
604 
605 	if (kiocb->ki_flags & IOCB_WRITE)
606 		io_req_end_write(req);
607 	if (unlikely(res != req->cqe.res)) {
608 		if (res == -EAGAIN && io_rw_should_reissue(req))
609 			req->flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE;
610 		else
611 			req->cqe.res = res;
612 	}
613 
614 	/* order with io_iopoll_complete() checking ->iopoll_completed */
615 	smp_store_release(&req->iopoll_completed, 1);
616 }
617 
io_rw_done(struct io_kiocb * req,ssize_t ret)618 static inline void io_rw_done(struct io_kiocb *req, ssize_t ret)
619 {
620 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
621 
622 	/* IO was queued async, completion will happen later */
623 	if (ret == -EIOCBQUEUED)
624 		return;
625 
626 	/* transform internal restart error codes */
627 	if (unlikely(ret < 0)) {
628 		switch (ret) {
629 		case -ERESTARTSYS:
630 		case -ERESTARTNOINTR:
631 		case -ERESTARTNOHAND:
632 		case -ERESTART_RESTARTBLOCK:
633 			/*
634 			 * We can't just restart the syscall, since previously
635 			 * submitted sqes may already be in progress. Just fail
636 			 * this IO with EINTR.
637 			 */
638 			ret = -EINTR;
639 			break;
640 		}
641 	}
642 
643 	if (req->ctx->flags & IORING_SETUP_IOPOLL)
644 		io_complete_rw_iopoll(&rw->kiocb, ret);
645 	else
646 		io_complete_rw(&rw->kiocb, ret);
647 }
648 
kiocb_done(struct io_kiocb * req,ssize_t ret,struct io_br_sel * sel,unsigned int issue_flags)649 static int kiocb_done(struct io_kiocb *req, ssize_t ret,
650 		      struct io_br_sel *sel, unsigned int issue_flags)
651 {
652 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
653 	unsigned final_ret = io_fixup_rw_res(req, ret);
654 
655 	if (ret >= 0 && req->flags & REQ_F_CUR_POS)
656 		req->file->f_pos = rw->kiocb.ki_pos;
657 	if (ret >= 0 && !(req->ctx->flags & IORING_SETUP_IOPOLL)) {
658 		u32 cflags = 0;
659 
660 		__io_complete_rw_common(req, ret);
661 		/*
662 		 * Safe to call io_end from here as we're inline
663 		 * from the submission path.
664 		 */
665 		io_req_io_end(req);
666 		if (sel)
667 			cflags = io_put_kbuf(req, ret, sel->buf_list);
668 		io_req_set_res(req, final_ret, cflags);
669 		io_req_rw_cleanup(req, issue_flags);
670 		return IOU_COMPLETE;
671 	} else {
672 		io_rw_done(req, ret);
673 	}
674 
675 	return IOU_ISSUE_SKIP_COMPLETE;
676 }
677 
io_kiocb_ppos(struct kiocb * kiocb)678 static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
679 {
680 	return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
681 }
682 
683 /*
684  * For files that don't have ->read_iter() and ->write_iter(), handle them
685  * by looping over ->read() or ->write() manually.
686  */
loop_rw_iter(int ddir,struct io_rw * rw,struct iov_iter * iter)687 static ssize_t loop_rw_iter(int ddir, struct io_rw *rw, struct iov_iter *iter)
688 {
689 	struct io_kiocb *req = cmd_to_io_kiocb(rw);
690 	struct kiocb *kiocb = &rw->kiocb;
691 	struct file *file = kiocb->ki_filp;
692 	ssize_t ret = 0;
693 	loff_t *ppos;
694 
695 	/*
696 	 * Don't support polled IO through this interface, and we can't
697 	 * support non-blocking either. For the latter, this just causes
698 	 * the kiocb to be handled from an async context.
699 	 */
700 	if (kiocb->ki_flags & IOCB_HIPRI)
701 		return -EOPNOTSUPP;
702 	if ((kiocb->ki_flags & IOCB_NOWAIT) &&
703 	    !(kiocb->ki_filp->f_flags & O_NONBLOCK))
704 		return -EAGAIN;
705 	if ((req->flags & REQ_F_BUF_NODE) && req->buf_node->buf->is_kbuf)
706 		return -EFAULT;
707 
708 	ppos = io_kiocb_ppos(kiocb);
709 
710 	while (iov_iter_count(iter)) {
711 		void __user *addr;
712 		size_t len;
713 		ssize_t nr;
714 
715 		if (iter_is_ubuf(iter)) {
716 			addr = iter->ubuf + iter->iov_offset;
717 			len = iov_iter_count(iter);
718 		} else if (!iov_iter_is_bvec(iter)) {
719 			addr = iter_iov_addr(iter);
720 			len = iter_iov_len(iter);
721 		} else {
722 			addr = u64_to_user_ptr(rw->addr);
723 			len = rw->len;
724 		}
725 
726 		if (ddir == READ)
727 			nr = file->f_op->read(file, addr, len, ppos);
728 		else
729 			nr = file->f_op->write(file, addr, len, ppos);
730 
731 		if (nr < 0) {
732 			if (!ret)
733 				ret = nr;
734 			break;
735 		}
736 		ret += nr;
737 		if (!iov_iter_is_bvec(iter)) {
738 			iov_iter_advance(iter, nr);
739 		} else {
740 			rw->addr += nr;
741 			rw->len -= nr;
742 			if (!rw->len)
743 				break;
744 		}
745 		if (nr != len)
746 			break;
747 	}
748 
749 	return ret;
750 }
751 
752 /*
753  * This is our waitqueue callback handler, registered through __folio_lock_async()
754  * when we initially tried to do the IO with the iocb armed our waitqueue.
755  * This gets called when the page is unlocked, and we generally expect that to
756  * happen when the page IO is completed and the page is now uptodate. This will
757  * queue a task_work based retry of the operation, attempting to copy the data
758  * again. If the latter fails because the page was NOT uptodate, then we will
759  * do a thread based blocking retry of the operation. That's the unexpected
760  * slow path.
761  */
io_async_buf_func(struct wait_queue_entry * wait,unsigned mode,int sync,void * arg)762 static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
763 			     int sync, void *arg)
764 {
765 	struct wait_page_queue *wpq;
766 	struct io_kiocb *req = wait->private;
767 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
768 	struct wait_page_key *key = arg;
769 
770 	wpq = container_of(wait, struct wait_page_queue, wait);
771 
772 	if (!wake_page_match(wpq, key))
773 		return 0;
774 
775 	rw->kiocb.ki_flags &= ~IOCB_WAITQ;
776 	list_del_init(&wait->entry);
777 	io_req_task_queue(req);
778 	return 1;
779 }
780 
781 /*
782  * This controls whether a given IO request should be armed for async page
783  * based retry. If we return false here, the request is handed to the async
784  * worker threads for retry. If we're doing buffered reads on a regular file,
785  * we prepare a private wait_page_queue entry and retry the operation. This
786  * will either succeed because the page is now uptodate and unlocked, or it
787  * will register a callback when the page is unlocked at IO completion. Through
788  * that callback, io_uring uses task_work to setup a retry of the operation.
789  * That retry will attempt the buffered read again. The retry will generally
790  * succeed, or in rare cases where it fails, we then fall back to using the
791  * async worker threads for a blocking retry.
792  */
io_rw_should_retry(struct io_kiocb * req)793 static bool io_rw_should_retry(struct io_kiocb *req)
794 {
795 	struct io_async_rw *io = req->async_data;
796 	struct wait_page_queue *wait = &io->wpq;
797 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
798 	struct kiocb *kiocb = &rw->kiocb;
799 
800 	/*
801 	 * Never retry for NOWAIT or a request with metadata, we just complete
802 	 * with -EAGAIN.
803 	 */
804 	if (req->flags & (REQ_F_NOWAIT | REQ_F_HAS_METADATA))
805 		return false;
806 
807 	/* Only for buffered IO */
808 	if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
809 		return false;
810 
811 	/*
812 	 * just use poll if we can, and don't attempt if the fs doesn't
813 	 * support callback based unlocks
814 	 */
815 	if (io_file_can_poll(req) ||
816 	    !(req->file->f_op->fop_flags & FOP_BUFFER_RASYNC))
817 		return false;
818 
819 	wait->wait.func = io_async_buf_func;
820 	wait->wait.private = req;
821 	wait->wait.flags = 0;
822 	INIT_LIST_HEAD(&wait->wait.entry);
823 	kiocb->ki_flags |= IOCB_WAITQ;
824 	kiocb->ki_flags &= ~IOCB_NOWAIT;
825 	kiocb->ki_waitq = wait;
826 	return true;
827 }
828 
io_iter_do_read(struct io_rw * rw,struct iov_iter * iter)829 static inline int io_iter_do_read(struct io_rw *rw, struct iov_iter *iter)
830 {
831 	struct file *file = rw->kiocb.ki_filp;
832 
833 	if (likely(file->f_op->read_iter))
834 		return file->f_op->read_iter(&rw->kiocb, iter);
835 	else if (file->f_op->read)
836 		return loop_rw_iter(READ, rw, iter);
837 	else
838 		return -EINVAL;
839 }
840 
need_complete_io(struct io_kiocb * req)841 static bool need_complete_io(struct io_kiocb *req)
842 {
843 	return req->flags & REQ_F_ISREG ||
844 		S_ISBLK(file_inode(req->file)->i_mode);
845 }
846 
io_rw_init_file(struct io_kiocb * req,fmode_t mode,int rw_type)847 static int io_rw_init_file(struct io_kiocb *req, fmode_t mode, int rw_type)
848 {
849 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
850 	struct kiocb *kiocb = &rw->kiocb;
851 	struct io_ring_ctx *ctx = req->ctx;
852 	struct file *file = req->file;
853 	int ret;
854 
855 	if (unlikely(!(file->f_mode & mode)))
856 		return -EBADF;
857 
858 	if (!(req->flags & REQ_F_FIXED_FILE))
859 		req->flags |= io_file_get_flags(file);
860 
861 	kiocb->ki_flags = file->f_iocb_flags;
862 	ret = kiocb_set_rw_flags(kiocb, rw->flags, rw_type);
863 	if (unlikely(ret))
864 		return ret;
865 	kiocb->ki_flags |= IOCB_ALLOC_CACHE;
866 
867 	/*
868 	 * If the file is marked O_NONBLOCK, still allow retry for it if it
869 	 * supports async. Otherwise it's impossible to use O_NONBLOCK files
870 	 * reliably. If not, or it IOCB_NOWAIT is set, don't retry.
871 	 */
872 	if (kiocb->ki_flags & IOCB_NOWAIT ||
873 	    ((file->f_flags & O_NONBLOCK && !(req->flags & REQ_F_SUPPORT_NOWAIT))))
874 		req->flags |= REQ_F_NOWAIT;
875 
876 	if (ctx->flags & IORING_SETUP_IOPOLL) {
877 		if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll)
878 			return -EOPNOTSUPP;
879 		kiocb->private = NULL;
880 		kiocb->ki_flags |= IOCB_HIPRI;
881 		req->iopoll_completed = 0;
882 		if (ctx->flags & IORING_SETUP_HYBRID_IOPOLL) {
883 			/* make sure every req only blocks once*/
884 			req->flags &= ~REQ_F_IOPOLL_STATE;
885 			req->iopoll_start = ktime_get_ns();
886 		}
887 	} else {
888 		if (kiocb->ki_flags & IOCB_HIPRI)
889 			return -EINVAL;
890 	}
891 
892 	if (req->flags & REQ_F_HAS_METADATA) {
893 		struct io_async_rw *io = req->async_data;
894 
895 		if (!(file->f_mode & FMODE_HAS_METADATA))
896 			return -EINVAL;
897 
898 		/*
899 		 * We have a union of meta fields with wpq used for buffered-io
900 		 * in io_async_rw, so fail it here.
901 		 */
902 		if (!(req->file->f_flags & O_DIRECT))
903 			return -EOPNOTSUPP;
904 		kiocb->ki_flags |= IOCB_HAS_METADATA;
905 		kiocb->private = &io->meta;
906 	}
907 
908 	return 0;
909 }
910 
__io_read(struct io_kiocb * req,struct io_br_sel * sel,unsigned int issue_flags)911 static int __io_read(struct io_kiocb *req, struct io_br_sel *sel,
912 		     unsigned int issue_flags)
913 {
914 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
915 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
916 	struct io_async_rw *io = req->async_data;
917 	struct kiocb *kiocb = &rw->kiocb;
918 	ssize_t ret;
919 	loff_t *ppos;
920 
921 	if (req->flags & REQ_F_IMPORT_BUFFER) {
922 		ret = io_rw_import_reg_vec(req, io, ITER_DEST, issue_flags);
923 		if (unlikely(ret))
924 			return ret;
925 	} else if (io_do_buffer_select(req)) {
926 		ret = io_import_rw_buffer(ITER_DEST, req, io, sel, issue_flags);
927 		if (unlikely(ret < 0))
928 			return ret;
929 	}
930 	ret = io_rw_init_file(req, FMODE_READ, READ);
931 	if (unlikely(ret))
932 		return ret;
933 	req->cqe.res = iov_iter_count(&io->iter);
934 
935 	if (force_nonblock) {
936 		/* If the file doesn't support async, just async punt */
937 		if (unlikely(!io_file_supports_nowait(req, EPOLLIN)))
938 			return -EAGAIN;
939 		kiocb->ki_flags |= IOCB_NOWAIT;
940 	} else {
941 		/* Ensure we clear previously set non-block flag */
942 		kiocb->ki_flags &= ~IOCB_NOWAIT;
943 	}
944 
945 	ppos = io_kiocb_update_pos(req);
946 
947 	ret = rw_verify_area(READ, req->file, ppos, req->cqe.res);
948 	if (unlikely(ret))
949 		return ret;
950 
951 	ret = io_iter_do_read(rw, &io->iter);
952 
953 	/*
954 	 * Some file systems like to return -EOPNOTSUPP for an IOCB_NOWAIT
955 	 * issue, even though they should be returning -EAGAIN. To be safe,
956 	 * retry from blocking context for either.
957 	 */
958 	if (ret == -EOPNOTSUPP && force_nonblock)
959 		ret = -EAGAIN;
960 
961 	if (ret == -EAGAIN) {
962 		/* If we can poll, just do that. */
963 		if (io_file_can_poll(req))
964 			return -EAGAIN;
965 		/* IOPOLL retry should happen for io-wq threads */
966 		if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
967 			goto done;
968 		/* no retry on NONBLOCK nor RWF_NOWAIT */
969 		if (req->flags & REQ_F_NOWAIT)
970 			goto done;
971 		ret = 0;
972 	} else if (ret == -EIOCBQUEUED) {
973 		return IOU_ISSUE_SKIP_COMPLETE;
974 	} else if (ret == req->cqe.res || ret <= 0 || !force_nonblock ||
975 		   (req->flags & REQ_F_NOWAIT) || !need_complete_io(req) ||
976 		   (issue_flags & IO_URING_F_MULTISHOT)) {
977 		/* read all, failed, already did sync or don't want to retry */
978 		goto done;
979 	}
980 
981 	/*
982 	 * Don't depend on the iter state matching what was consumed, or being
983 	 * untouched in case of error. Restore it and we'll advance it
984 	 * manually if we need to.
985 	 */
986 	iov_iter_restore(&io->iter, &io->iter_state);
987 	io_meta_restore(io, kiocb);
988 
989 	do {
990 		/*
991 		 * We end up here because of a partial read, either from
992 		 * above or inside this loop. Advance the iter by the bytes
993 		 * that were consumed.
994 		 */
995 		iov_iter_advance(&io->iter, ret);
996 		if (!iov_iter_count(&io->iter))
997 			break;
998 		io->bytes_done += ret;
999 		iov_iter_save_state(&io->iter, &io->iter_state);
1000 
1001 		/* if we can retry, do so with the callbacks armed */
1002 		if (!io_rw_should_retry(req)) {
1003 			kiocb->ki_flags &= ~IOCB_WAITQ;
1004 			return -EAGAIN;
1005 		}
1006 
1007 		req->cqe.res = iov_iter_count(&io->iter);
1008 		/*
1009 		 * Now retry read with the IOCB_WAITQ parts set in the iocb. If
1010 		 * we get -EIOCBQUEUED, then we'll get a notification when the
1011 		 * desired page gets unlocked. We can also get a partial read
1012 		 * here, and if we do, then just retry at the new offset.
1013 		 */
1014 		ret = io_iter_do_read(rw, &io->iter);
1015 		if (ret == -EIOCBQUEUED)
1016 			return IOU_ISSUE_SKIP_COMPLETE;
1017 		/* we got some bytes, but not all. retry. */
1018 		kiocb->ki_flags &= ~IOCB_WAITQ;
1019 		iov_iter_restore(&io->iter, &io->iter_state);
1020 	} while (ret > 0);
1021 done:
1022 	/* it's faster to check here then delegate to kfree */
1023 	return ret;
1024 }
1025 
io_read(struct io_kiocb * req,unsigned int issue_flags)1026 int io_read(struct io_kiocb *req, unsigned int issue_flags)
1027 {
1028 	struct io_br_sel sel = { };
1029 	int ret;
1030 
1031 	ret = __io_read(req, &sel, issue_flags);
1032 	if (ret >= 0)
1033 		return kiocb_done(req, ret, &sel, issue_flags);
1034 
1035 	if (req->flags & REQ_F_BUFFERS_COMMIT)
1036 		io_kbuf_recycle(req, sel.buf_list, issue_flags);
1037 	return ret;
1038 }
1039 
io_read_mshot(struct io_kiocb * req,unsigned int issue_flags)1040 int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
1041 {
1042 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
1043 	struct io_br_sel sel = { };
1044 	unsigned int cflags = 0;
1045 	int ret;
1046 
1047 	/*
1048 	 * Multishot MUST be used on a pollable file
1049 	 */
1050 	if (!io_file_can_poll(req))
1051 		return -EBADFD;
1052 
1053 	/* make it sync, multishot doesn't support async execution */
1054 	rw->kiocb.ki_complete = NULL;
1055 	ret = __io_read(req, &sel, issue_flags);
1056 
1057 	/*
1058 	 * If we get -EAGAIN, recycle our buffer and just let normal poll
1059 	 * handling arm it.
1060 	 */
1061 	if (ret == -EAGAIN) {
1062 		/*
1063 		 * Reset rw->len to 0 again to avoid clamping future mshot
1064 		 * reads, in case the buffer size varies.
1065 		 */
1066 		if (io_kbuf_recycle(req, sel.buf_list, issue_flags))
1067 			rw->len = 0;
1068 		return IOU_RETRY;
1069 	} else if (ret <= 0) {
1070 		io_kbuf_recycle(req, sel.buf_list, issue_flags);
1071 		if (ret < 0)
1072 			req_set_fail(req);
1073 	} else if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
1074 		cflags = io_put_kbuf(req, ret, sel.buf_list);
1075 	} else {
1076 		/*
1077 		 * Any successful return value will keep the multishot read
1078 		 * armed, if it's still set. Put our buffer and post a CQE. If
1079 		 * we fail to post a CQE, or multishot is no longer set, then
1080 		 * jump to the termination path. This request is then done.
1081 		 */
1082 		cflags = io_put_kbuf(req, ret, sel.buf_list);
1083 		rw->len = 0; /* similarly to above, reset len to 0 */
1084 
1085 		if (io_req_post_cqe(req, ret, cflags | IORING_CQE_F_MORE)) {
1086 			if (issue_flags & IO_URING_F_MULTISHOT)
1087 				/*
1088 				 * Force retry, as we might have more data to
1089 				 * be read and otherwise it won't get retried
1090 				 * until (if ever) another poll is triggered.
1091 				 */
1092 				io_poll_multishot_retry(req);
1093 
1094 			return IOU_RETRY;
1095 		}
1096 	}
1097 
1098 	/*
1099 	 * Either an error, or we've hit overflow posting the CQE. For any
1100 	 * multishot request, hitting overflow will terminate it.
1101 	 */
1102 	io_req_set_res(req, ret, cflags);
1103 	io_req_rw_cleanup(req, issue_flags);
1104 	return IOU_COMPLETE;
1105 }
1106 
io_kiocb_start_write(struct io_kiocb * req,struct kiocb * kiocb)1107 static bool io_kiocb_start_write(struct io_kiocb *req, struct kiocb *kiocb)
1108 {
1109 	struct inode *inode;
1110 	bool ret;
1111 
1112 	if (!(req->flags & REQ_F_ISREG))
1113 		return true;
1114 	if (!(kiocb->ki_flags & IOCB_NOWAIT)) {
1115 		kiocb_start_write(kiocb);
1116 		return true;
1117 	}
1118 
1119 	inode = file_inode(kiocb->ki_filp);
1120 	ret = sb_start_write_trylock(inode->i_sb);
1121 	if (ret)
1122 		__sb_writers_release(inode->i_sb, SB_FREEZE_WRITE);
1123 	return ret;
1124 }
1125 
io_write(struct io_kiocb * req,unsigned int issue_flags)1126 int io_write(struct io_kiocb *req, unsigned int issue_flags)
1127 {
1128 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1129 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
1130 	struct io_async_rw *io = req->async_data;
1131 	struct kiocb *kiocb = &rw->kiocb;
1132 	ssize_t ret, ret2;
1133 	loff_t *ppos;
1134 
1135 	if (req->flags & REQ_F_IMPORT_BUFFER) {
1136 		ret = io_rw_import_reg_vec(req, io, ITER_SOURCE, issue_flags);
1137 		if (unlikely(ret))
1138 			return ret;
1139 	}
1140 
1141 	ret = io_rw_init_file(req, FMODE_WRITE, WRITE);
1142 	if (unlikely(ret))
1143 		return ret;
1144 	req->cqe.res = iov_iter_count(&io->iter);
1145 
1146 	if (force_nonblock) {
1147 		/* If the file doesn't support async, just async punt */
1148 		if (unlikely(!io_file_supports_nowait(req, EPOLLOUT)))
1149 			goto ret_eagain;
1150 
1151 		/* Check if we can support NOWAIT. */
1152 		if (!(kiocb->ki_flags & IOCB_DIRECT) &&
1153 		    !(req->file->f_op->fop_flags & FOP_BUFFER_WASYNC) &&
1154 		    (req->flags & REQ_F_ISREG))
1155 			goto ret_eagain;
1156 
1157 		kiocb->ki_flags |= IOCB_NOWAIT;
1158 	} else {
1159 		/* Ensure we clear previously set non-block flag */
1160 		kiocb->ki_flags &= ~IOCB_NOWAIT;
1161 	}
1162 
1163 	ppos = io_kiocb_update_pos(req);
1164 
1165 	ret = rw_verify_area(WRITE, req->file, ppos, req->cqe.res);
1166 	if (unlikely(ret))
1167 		return ret;
1168 
1169 	if (unlikely(!io_kiocb_start_write(req, kiocb)))
1170 		return -EAGAIN;
1171 	kiocb->ki_flags |= IOCB_WRITE;
1172 
1173 	if (likely(req->file->f_op->write_iter))
1174 		ret2 = req->file->f_op->write_iter(kiocb, &io->iter);
1175 	else if (req->file->f_op->write)
1176 		ret2 = loop_rw_iter(WRITE, rw, &io->iter);
1177 	else
1178 		ret2 = -EINVAL;
1179 
1180 	/*
1181 	 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
1182 	 * retry them without IOCB_NOWAIT.
1183 	 */
1184 	if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
1185 		ret2 = -EAGAIN;
1186 	/* no retry on NONBLOCK nor RWF_NOWAIT */
1187 	if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
1188 		goto done;
1189 	if (!force_nonblock || ret2 != -EAGAIN) {
1190 		/* IOPOLL retry should happen for io-wq threads */
1191 		if (ret2 == -EAGAIN && (req->ctx->flags & IORING_SETUP_IOPOLL))
1192 			goto ret_eagain;
1193 
1194 		if (ret2 != req->cqe.res && ret2 >= 0 && need_complete_io(req)) {
1195 			trace_io_uring_short_write(req->ctx, kiocb->ki_pos - ret2,
1196 						req->cqe.res, ret2);
1197 
1198 			/* This is a partial write. The file pos has already been
1199 			 * updated, setup the async struct to complete the request
1200 			 * in the worker. Also update bytes_done to account for
1201 			 * the bytes already written.
1202 			 */
1203 			iov_iter_save_state(&io->iter, &io->iter_state);
1204 			io->bytes_done += ret2;
1205 
1206 			if (kiocb->ki_flags & IOCB_WRITE)
1207 				io_req_end_write(req);
1208 			return -EAGAIN;
1209 		}
1210 done:
1211 		return kiocb_done(req, ret2, NULL, issue_flags);
1212 	} else {
1213 ret_eagain:
1214 		iov_iter_restore(&io->iter, &io->iter_state);
1215 		io_meta_restore(io, kiocb);
1216 		if (kiocb->ki_flags & IOCB_WRITE)
1217 			io_req_end_write(req);
1218 		return -EAGAIN;
1219 	}
1220 }
1221 
io_read_fixed(struct io_kiocb * req,unsigned int issue_flags)1222 int io_read_fixed(struct io_kiocb *req, unsigned int issue_flags)
1223 {
1224 	int ret;
1225 
1226 	ret = io_init_rw_fixed(req, issue_flags, ITER_DEST);
1227 	if (unlikely(ret))
1228 		return ret;
1229 
1230 	return io_read(req, issue_flags);
1231 }
1232 
io_write_fixed(struct io_kiocb * req,unsigned int issue_flags)1233 int io_write_fixed(struct io_kiocb *req, unsigned int issue_flags)
1234 {
1235 	int ret;
1236 
1237 	ret = io_init_rw_fixed(req, issue_flags, ITER_SOURCE);
1238 	if (unlikely(ret))
1239 		return ret;
1240 
1241 	return io_write(req, issue_flags);
1242 }
1243 
io_rw_fail(struct io_kiocb * req)1244 void io_rw_fail(struct io_kiocb *req)
1245 {
1246 	int res;
1247 
1248 	res = io_fixup_rw_res(req, req->cqe.res);
1249 	io_req_set_res(req, res, req->cqe.flags);
1250 }
1251 
io_uring_classic_poll(struct io_kiocb * req,struct io_comp_batch * iob,unsigned int poll_flags)1252 static int io_uring_classic_poll(struct io_kiocb *req, struct io_comp_batch *iob,
1253 				unsigned int poll_flags)
1254 {
1255 	struct file *file = req->file;
1256 
1257 	if (req->opcode == IORING_OP_URING_CMD) {
1258 		struct io_uring_cmd *ioucmd;
1259 
1260 		ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
1261 		return file->f_op->uring_cmd_iopoll(ioucmd, iob, poll_flags);
1262 	} else {
1263 		struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
1264 
1265 		return file->f_op->iopoll(&rw->kiocb, iob, poll_flags);
1266 	}
1267 }
1268 
io_hybrid_iopoll_delay(struct io_ring_ctx * ctx,struct io_kiocb * req)1269 static u64 io_hybrid_iopoll_delay(struct io_ring_ctx *ctx, struct io_kiocb *req)
1270 {
1271 	struct hrtimer_sleeper timer;
1272 	enum hrtimer_mode mode;
1273 	ktime_t kt;
1274 	u64 sleep_time;
1275 
1276 	if (req->flags & REQ_F_IOPOLL_STATE)
1277 		return 0;
1278 
1279 	if (ctx->hybrid_poll_time == LLONG_MAX)
1280 		return 0;
1281 
1282 	/* Using half the running time to do schedule */
1283 	sleep_time = ctx->hybrid_poll_time / 2;
1284 
1285 	kt = ktime_set(0, sleep_time);
1286 	req->flags |= REQ_F_IOPOLL_STATE;
1287 
1288 	mode = HRTIMER_MODE_REL;
1289 	hrtimer_setup_sleeper_on_stack(&timer, CLOCK_MONOTONIC, mode);
1290 	hrtimer_set_expires(&timer.timer, kt);
1291 	set_current_state(TASK_INTERRUPTIBLE);
1292 	hrtimer_sleeper_start_expires(&timer, mode);
1293 
1294 	if (timer.task)
1295 		io_schedule();
1296 
1297 	hrtimer_cancel(&timer.timer);
1298 	__set_current_state(TASK_RUNNING);
1299 	destroy_hrtimer_on_stack(&timer.timer);
1300 	return sleep_time;
1301 }
1302 
io_uring_hybrid_poll(struct io_kiocb * req,struct io_comp_batch * iob,unsigned int poll_flags)1303 static int io_uring_hybrid_poll(struct io_kiocb *req,
1304 				struct io_comp_batch *iob, unsigned int poll_flags)
1305 {
1306 	struct io_ring_ctx *ctx = req->ctx;
1307 	u64 runtime, sleep_time;
1308 	int ret;
1309 
1310 	sleep_time = io_hybrid_iopoll_delay(ctx, req);
1311 	ret = io_uring_classic_poll(req, iob, poll_flags);
1312 	runtime = ktime_get_ns() - req->iopoll_start - sleep_time;
1313 
1314 	/*
1315 	 * Use minimum sleep time if we're polling devices with different
1316 	 * latencies. We could get more completions from the faster ones.
1317 	 */
1318 	if (ctx->hybrid_poll_time > runtime)
1319 		ctx->hybrid_poll_time = runtime;
1320 
1321 	return ret;
1322 }
1323 
io_do_iopoll(struct io_ring_ctx * ctx,bool force_nonspin)1324 int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
1325 {
1326 	struct io_wq_work_node *pos, *start, *prev;
1327 	unsigned int poll_flags = 0;
1328 	DEFINE_IO_COMP_BATCH(iob);
1329 	int nr_events = 0;
1330 
1331 	/*
1332 	 * Only spin for completions if we don't have multiple devices hanging
1333 	 * off our complete list.
1334 	 */
1335 	if (ctx->poll_multi_queue || force_nonspin)
1336 		poll_flags |= BLK_POLL_ONESHOT;
1337 
1338 	wq_list_for_each(pos, start, &ctx->iopoll_list) {
1339 		struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
1340 		int ret;
1341 
1342 		/*
1343 		 * Move completed and retryable entries to our local lists.
1344 		 * If we find a request that requires polling, break out
1345 		 * and complete those lists first, if we have entries there.
1346 		 */
1347 		if (READ_ONCE(req->iopoll_completed))
1348 			break;
1349 
1350 		if (ctx->flags & IORING_SETUP_HYBRID_IOPOLL)
1351 			ret = io_uring_hybrid_poll(req, &iob, poll_flags);
1352 		else
1353 			ret = io_uring_classic_poll(req, &iob, poll_flags);
1354 
1355 		if (unlikely(ret < 0))
1356 			return ret;
1357 		else if (ret)
1358 			poll_flags |= BLK_POLL_ONESHOT;
1359 
1360 		/* iopoll may have completed current req */
1361 		if (!rq_list_empty(&iob.req_list) ||
1362 		    READ_ONCE(req->iopoll_completed))
1363 			break;
1364 	}
1365 
1366 	if (!rq_list_empty(&iob.req_list))
1367 		iob.complete(&iob);
1368 	else if (!pos)
1369 		return 0;
1370 
1371 	prev = start;
1372 	wq_list_for_each_resume(pos, prev) {
1373 		struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
1374 
1375 		/* order with io_complete_rw_iopoll(), e.g. ->result updates */
1376 		if (!smp_load_acquire(&req->iopoll_completed))
1377 			break;
1378 		nr_events++;
1379 		req->cqe.flags = io_put_kbuf(req, req->cqe.res, NULL);
1380 		if (req->opcode != IORING_OP_URING_CMD)
1381 			io_req_rw_cleanup(req, 0);
1382 	}
1383 	if (unlikely(!nr_events))
1384 		return 0;
1385 
1386 	pos = start ? start->next : ctx->iopoll_list.first;
1387 	wq_list_cut(&ctx->iopoll_list, prev, start);
1388 
1389 	if (WARN_ON_ONCE(!wq_list_empty(&ctx->submit_state.compl_reqs)))
1390 		return 0;
1391 	ctx->submit_state.compl_reqs.first = pos;
1392 	__io_submit_flush_completions(ctx);
1393 	return nr_events;
1394 }
1395 
io_rw_cache_free(const void * entry)1396 void io_rw_cache_free(const void *entry)
1397 {
1398 	struct io_async_rw *rw = (struct io_async_rw *) entry;
1399 
1400 	io_vec_free(&rw->vec);
1401 	kfree(rw);
1402 }
1403