xref: /linux/io_uring/io_uring.h (revision 23b0f90ba871f096474e1c27c3d14f455189d2d9)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef IOU_CORE_H
3 #define IOU_CORE_H
4 
5 #include <linux/errno.h>
6 #include <linux/lockdep.h>
7 #include <linux/resume_user_mode.h>
8 #include <linux/poll.h>
9 #include <linux/io_uring_types.h>
10 #include <uapi/linux/eventpoll.h>
11 #include "alloc_cache.h"
12 #include "io-wq.h"
13 #include "slist.h"
14 #include "tw.h"
15 #include "opdef.h"
16 
17 #ifndef CREATE_TRACE_POINTS
18 #include <trace/events/io_uring.h>
19 #endif
20 
21 struct io_rings_layout {
22 	/* size of CQ + headers + SQ offset array */
23 	size_t rings_size;
24 	size_t sq_size;
25 
26 	size_t sq_array_offset;
27 };
28 
29 struct io_ctx_config {
30 	struct io_uring_params p;
31 	struct io_rings_layout layout;
32 	struct io_uring_params __user *uptr;
33 };
34 
35 #define IORING_FEAT_FLAGS (IORING_FEAT_SINGLE_MMAP |\
36 			IORING_FEAT_NODROP |\
37 			IORING_FEAT_SUBMIT_STABLE |\
38 			IORING_FEAT_RW_CUR_POS |\
39 			IORING_FEAT_CUR_PERSONALITY |\
40 			IORING_FEAT_FAST_POLL |\
41 			IORING_FEAT_POLL_32BITS |\
42 			IORING_FEAT_SQPOLL_NONFIXED |\
43 			IORING_FEAT_EXT_ARG |\
44 			IORING_FEAT_NATIVE_WORKERS |\
45 			IORING_FEAT_RSRC_TAGS |\
46 			IORING_FEAT_CQE_SKIP |\
47 			IORING_FEAT_LINKED_FILE |\
48 			IORING_FEAT_REG_REG_RING |\
49 			IORING_FEAT_RECVSEND_BUNDLE |\
50 			IORING_FEAT_MIN_TIMEOUT |\
51 			IORING_FEAT_RW_ATTR |\
52 			IORING_FEAT_NO_IOWAIT)
53 
54 #define IORING_SETUP_FLAGS (IORING_SETUP_IOPOLL |\
55 			IORING_SETUP_SQPOLL |\
56 			IORING_SETUP_SQ_AFF |\
57 			IORING_SETUP_CQSIZE |\
58 			IORING_SETUP_CLAMP |\
59 			IORING_SETUP_ATTACH_WQ |\
60 			IORING_SETUP_R_DISABLED |\
61 			IORING_SETUP_SUBMIT_ALL |\
62 			IORING_SETUP_COOP_TASKRUN |\
63 			IORING_SETUP_TASKRUN_FLAG |\
64 			IORING_SETUP_SQE128 |\
65 			IORING_SETUP_CQE32 |\
66 			IORING_SETUP_SINGLE_ISSUER |\
67 			IORING_SETUP_DEFER_TASKRUN |\
68 			IORING_SETUP_NO_MMAP |\
69 			IORING_SETUP_REGISTERED_FD_ONLY |\
70 			IORING_SETUP_NO_SQARRAY |\
71 			IORING_SETUP_HYBRID_IOPOLL |\
72 			IORING_SETUP_CQE_MIXED |\
73 			IORING_SETUP_SQE_MIXED |\
74 			IORING_SETUP_SQ_REWIND)
75 
76 #define IORING_ENTER_FLAGS (IORING_ENTER_GETEVENTS |\
77 			IORING_ENTER_SQ_WAKEUP |\
78 			IORING_ENTER_SQ_WAIT |\
79 			IORING_ENTER_EXT_ARG |\
80 			IORING_ENTER_REGISTERED_RING |\
81 			IORING_ENTER_ABS_TIMER |\
82 			IORING_ENTER_EXT_ARG_REG |\
83 			IORING_ENTER_NO_IOWAIT)
84 
85 
86 #define SQE_VALID_FLAGS (IOSQE_FIXED_FILE |\
87 			IOSQE_IO_DRAIN |\
88 			IOSQE_IO_LINK |\
89 			IOSQE_IO_HARDLINK |\
90 			IOSQE_ASYNC |\
91 			IOSQE_BUFFER_SELECT |\
92 			IOSQE_CQE_SKIP_SUCCESS)
93 
94 #define IO_REQ_LINK_FLAGS (REQ_F_LINK | REQ_F_HARDLINK)
95 
96 /*
97  * Complaint timeout for io_uring cancelation exits, and for io-wq exit
98  * worker waiting.
99  */
100 #define IO_URING_EXIT_WAIT_MAX	(HZ * 60 * 5)
101 
102 enum {
103 	IOU_COMPLETE		= 0,
104 
105 	IOU_ISSUE_SKIP_COMPLETE	= -EIOCBQUEUED,
106 
107 	/*
108 	 * The request has more work to do and should be retried. io_uring will
109 	 * attempt to wait on the file for eligible opcodes, but otherwise
110 	 * it'll be handed to iowq for blocking execution. It works for normal
111 	 * requests as well as for the multi shot mode.
112 	 */
113 	IOU_RETRY		= -EAGAIN,
114 
115 	/*
116 	 * Requeue the task_work to restart operations on this request. The
117 	 * actual value isn't important, should just be not an otherwise
118 	 * valid error code, yet less than -MAX_ERRNO and valid internally.
119 	 */
120 	IOU_REQUEUE		= -3072,
121 };
122 
123 struct io_defer_entry {
124 	struct list_head	list;
125 	struct io_kiocb		*req;
126 };
127 
128 struct io_wait_queue {
129 	struct wait_queue_entry wq;
130 	struct io_ring_ctx *ctx;
131 	unsigned cq_tail;
132 	unsigned cq_min_tail;
133 	unsigned nr_timeouts;
134 	int hit_timeout;
135 	ktime_t min_timeout;
136 	ktime_t timeout;
137 	struct hrtimer t;
138 
139 #ifdef CONFIG_NET_RX_BUSY_POLL
140 	ktime_t napi_busy_poll_dt;
141 	bool napi_prefer_busy_poll;
142 #endif
143 };
144 
145 static inline bool io_should_wake(struct io_wait_queue *iowq)
146 {
147 	struct io_ring_ctx *ctx = iowq->ctx;
148 	int dist = READ_ONCE(ctx->rings->cq.tail) - (int) iowq->cq_tail;
149 
150 	/*
151 	 * Wake up if we have enough events, or if a timeout occurred since we
152 	 * started waiting. For timeouts, we always want to return to userspace,
153 	 * regardless of event count.
154 	 */
155 	return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
156 }
157 
158 #define IORING_MAX_ENTRIES	32768
159 #define IORING_MAX_CQ_ENTRIES	(2 * IORING_MAX_ENTRIES)
160 
161 int io_prepare_config(struct io_ctx_config *config);
162 
163 bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow, bool cqe32);
164 void io_req_defer_failed(struct io_kiocb *req, s32 res);
165 bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
166 void io_add_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
167 bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags);
168 bool io_req_post_cqe32(struct io_kiocb *req, struct io_uring_cqe src_cqe[2]);
169 void __io_commit_cqring_flush(struct io_ring_ctx *ctx);
170 
171 unsigned io_linked_nr(struct io_kiocb *req);
172 void io_req_track_inflight(struct io_kiocb *req);
173 struct file *io_file_get_normal(struct io_kiocb *req, int fd);
174 struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
175 			       unsigned issue_flags);
176 
177 void io_req_task_queue(struct io_kiocb *req);
178 void io_req_task_complete(struct io_tw_req tw_req, io_tw_token_t tw);
179 void io_req_task_queue_fail(struct io_kiocb *req, int ret);
180 void io_req_task_submit(struct io_tw_req tw_req, io_tw_token_t tw);
181 __cold void io_uring_drop_tctx_refs(struct task_struct *task);
182 
183 int io_ring_add_registered_file(struct io_uring_task *tctx, struct file *file,
184 				     int start, int end);
185 void io_req_queue_iowq(struct io_kiocb *req);
186 
187 int io_poll_issue(struct io_kiocb *req, io_tw_token_t tw);
188 int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr);
189 int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin);
190 __cold void io_iopoll_try_reap_events(struct io_ring_ctx *ctx);
191 void __io_submit_flush_completions(struct io_ring_ctx *ctx);
192 
193 struct io_wq_work *io_wq_free_work(struct io_wq_work *work);
194 void io_wq_submit_work(struct io_wq_work *work);
195 
196 void io_free_req(struct io_kiocb *req);
197 void io_queue_next(struct io_kiocb *req);
198 void io_task_refs_refill(struct io_uring_task *tctx);
199 bool __io_alloc_req_refill(struct io_ring_ctx *ctx);
200 
201 void io_activate_pollwq(struct io_ring_ctx *ctx);
202 void io_restriction_clone(struct io_restriction *dst, struct io_restriction *src);
203 
204 static inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx)
205 {
206 #if defined(CONFIG_PROVE_LOCKING)
207 	lockdep_assert(in_task());
208 
209 	if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
210 		lockdep_assert_held(&ctx->uring_lock);
211 
212 	if (ctx->flags & IORING_SETUP_IOPOLL) {
213 		lockdep_assert_held(&ctx->uring_lock);
214 	} else if (!ctx->task_complete) {
215 		lockdep_assert_held(&ctx->completion_lock);
216 	} else if (ctx->submitter_task) {
217 		/*
218 		 * ->submitter_task may be NULL and we can still post a CQE,
219 		 * if the ring has been setup with IORING_SETUP_R_DISABLED.
220 		 * Not from an SQE, as those cannot be submitted, but via
221 		 * updating tagged resources.
222 		 */
223 		if (!percpu_ref_is_dying(&ctx->refs))
224 			lockdep_assert(current == ctx->submitter_task);
225 	}
226 #endif
227 }
228 
229 static inline bool io_is_compat(struct io_ring_ctx *ctx)
230 {
231 	return IS_ENABLED(CONFIG_COMPAT) && unlikely(ctx->compat);
232 }
233 
234 static inline void io_submit_flush_completions(struct io_ring_ctx *ctx)
235 {
236 	if (!wq_list_empty(&ctx->submit_state.compl_reqs) ||
237 	    ctx->submit_state.cq_flush)
238 		__io_submit_flush_completions(ctx);
239 }
240 
241 #define io_for_each_link(pos, head) \
242 	for (pos = (head); pos; pos = pos->link)
243 
244 static inline bool io_get_cqe_overflow(struct io_ring_ctx *ctx,
245 					struct io_uring_cqe **ret,
246 					bool overflow, bool cqe32)
247 {
248 	io_lockdep_assert_cq_locked(ctx);
249 
250 	if (unlikely(ctx->cqe_sentinel - ctx->cqe_cached < (cqe32 + 1))) {
251 		if (unlikely(!io_cqe_cache_refill(ctx, overflow, cqe32)))
252 			return false;
253 	}
254 	*ret = ctx->cqe_cached;
255 	ctx->cached_cq_tail++;
256 	ctx->cqe_cached++;
257 	if (ctx->flags & IORING_SETUP_CQE32) {
258 		ctx->cqe_cached++;
259 	} else if (cqe32 && ctx->flags & IORING_SETUP_CQE_MIXED) {
260 		ctx->cqe_cached++;
261 		ctx->cached_cq_tail++;
262 	}
263 	WARN_ON_ONCE(ctx->cqe_cached > ctx->cqe_sentinel);
264 	return true;
265 }
266 
267 static inline bool io_get_cqe(struct io_ring_ctx *ctx, struct io_uring_cqe **ret,
268 				bool cqe32)
269 {
270 	return io_get_cqe_overflow(ctx, ret, false, cqe32);
271 }
272 
273 static inline bool io_defer_get_uncommited_cqe(struct io_ring_ctx *ctx,
274 					       struct io_uring_cqe **cqe_ret)
275 {
276 	io_lockdep_assert_cq_locked(ctx);
277 
278 	ctx->submit_state.cq_flush = true;
279 	return io_get_cqe(ctx, cqe_ret, ctx->flags & IORING_SETUP_CQE_MIXED);
280 }
281 
282 static __always_inline bool io_fill_cqe_req(struct io_ring_ctx *ctx,
283 					    struct io_kiocb *req)
284 {
285 	bool is_cqe32 = req->cqe.flags & IORING_CQE_F_32;
286 	struct io_uring_cqe *cqe;
287 
288 	/*
289 	 * If we can't get a cq entry, userspace overflowed the submission
290 	 * (by quite a lot).
291 	 */
292 	if (unlikely(!io_get_cqe(ctx, &cqe, is_cqe32)))
293 		return false;
294 
295 	memcpy(cqe, &req->cqe, sizeof(*cqe));
296 	if (ctx->flags & IORING_SETUP_CQE32 || is_cqe32) {
297 		memcpy(cqe->big_cqe, &req->big_cqe, sizeof(*cqe));
298 		memset(&req->big_cqe, 0, sizeof(req->big_cqe));
299 	}
300 
301 	if (trace_io_uring_complete_enabled())
302 		trace_io_uring_complete(req->ctx, req, cqe);
303 	return true;
304 }
305 
306 static inline void req_set_fail(struct io_kiocb *req)
307 {
308 	req->flags |= REQ_F_FAIL;
309 	if (req->flags & REQ_F_CQE_SKIP) {
310 		req->flags &= ~REQ_F_CQE_SKIP;
311 		req->flags |= REQ_F_SKIP_LINK_CQES;
312 	}
313 }
314 
315 static inline void io_req_set_res(struct io_kiocb *req, s32 res, u32 cflags)
316 {
317 	req->cqe.res = res;
318 	req->cqe.flags = cflags;
319 }
320 
321 static inline u32 ctx_cqe32_flags(struct io_ring_ctx *ctx)
322 {
323 	if (ctx->flags & IORING_SETUP_CQE_MIXED)
324 		return IORING_CQE_F_32;
325 	return 0;
326 }
327 
328 static inline void io_req_set_res32(struct io_kiocb *req, s32 res, u32 cflags,
329 				    __u64 extra1, __u64 extra2)
330 {
331 	req->cqe.res = res;
332 	req->cqe.flags = cflags | ctx_cqe32_flags(req->ctx);
333 	req->big_cqe.extra1 = extra1;
334 	req->big_cqe.extra2 = extra2;
335 }
336 
337 static inline void *io_uring_alloc_async_data(struct io_alloc_cache *cache,
338 					      struct io_kiocb *req)
339 {
340 	if (cache) {
341 		req->async_data = io_cache_alloc(cache, GFP_KERNEL);
342 	} else {
343 		const struct io_issue_def *def = &io_issue_defs[req->opcode];
344 
345 		WARN_ON_ONCE(!def->async_size);
346 		req->async_data = kmalloc(def->async_size, GFP_KERNEL);
347 	}
348 	if (req->async_data)
349 		req->flags |= REQ_F_ASYNC_DATA;
350 	return req->async_data;
351 }
352 
353 static inline bool req_has_async_data(struct io_kiocb *req)
354 {
355 	return req->flags & REQ_F_ASYNC_DATA;
356 }
357 
358 static inline void io_req_async_data_clear(struct io_kiocb *req,
359 					   io_req_flags_t extra_flags)
360 {
361 	req->flags &= ~(REQ_F_ASYNC_DATA|extra_flags);
362 	req->async_data = NULL;
363 }
364 
365 static inline void io_req_async_data_free(struct io_kiocb *req)
366 {
367 	kfree(req->async_data);
368 	io_req_async_data_clear(req, 0);
369 }
370 
371 static inline void io_put_file(struct io_kiocb *req)
372 {
373 	if (!(req->flags & REQ_F_FIXED_FILE) && req->file)
374 		fput(req->file);
375 }
376 
377 static inline void io_ring_submit_unlock(struct io_ring_ctx *ctx,
378 					 unsigned issue_flags)
379 {
380 	lockdep_assert_held(&ctx->uring_lock);
381 	if (unlikely(issue_flags & IO_URING_F_UNLOCKED))
382 		mutex_unlock(&ctx->uring_lock);
383 }
384 
385 static inline void io_ring_submit_lock(struct io_ring_ctx *ctx,
386 				       unsigned issue_flags)
387 {
388 	/*
389 	 * "Normal" inline submissions always hold the uring_lock, since we
390 	 * grab it from the system call. Same is true for the SQPOLL offload.
391 	 * The only exception is when we've detached the request and issue it
392 	 * from an async worker thread, grab the lock for that case.
393 	 */
394 	if (unlikely(issue_flags & IO_URING_F_UNLOCKED))
395 		mutex_lock(&ctx->uring_lock);
396 	lockdep_assert_held(&ctx->uring_lock);
397 }
398 
399 static inline void io_commit_cqring(struct io_ring_ctx *ctx)
400 {
401 	/* order cqe stores with ring update */
402 	smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
403 }
404 
405 static inline void __io_wq_wake(struct wait_queue_head *wq)
406 {
407 	/*
408 	 *
409 	 * Pass in EPOLLIN|EPOLL_URING_WAKE as the poll wakeup key. The latter
410 	 * set in the mask so that if we recurse back into our own poll
411 	 * waitqueue handlers, we know we have a dependency between eventfd or
412 	 * epoll and should terminate multishot poll at that point.
413 	 */
414 	if (wq_has_sleeper(wq))
415 		__wake_up(wq, TASK_NORMAL, 0, poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
416 }
417 
418 static inline void io_poll_wq_wake(struct io_ring_ctx *ctx)
419 {
420 	__io_wq_wake(&ctx->poll_wq);
421 }
422 
423 static inline void io_cqring_wake(struct io_ring_ctx *ctx)
424 {
425 	/*
426 	 * Trigger waitqueue handler on all waiters on our waitqueue. This
427 	 * won't necessarily wake up all the tasks, io_should_wake() will make
428 	 * that decision.
429 	 */
430 
431 	__io_wq_wake(&ctx->cq_wait);
432 }
433 
434 static inline bool io_sqring_full(struct io_ring_ctx *ctx)
435 {
436 	struct io_rings *r = ctx->rings;
437 
438 	/*
439 	 * SQPOLL must use the actual sqring head, as using the cached_sq_head
440 	 * is race prone if the SQPOLL thread has grabbed entries but not yet
441 	 * committed them to the ring. For !SQPOLL, this doesn't matter, but
442 	 * since this helper is just used for SQPOLL sqring waits (or POLLOUT),
443 	 * just read the actual sqring head unconditionally.
444 	 */
445 	return READ_ONCE(r->sq.tail) - READ_ONCE(r->sq.head) == ctx->sq_entries;
446 }
447 
448 static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
449 {
450 	struct io_rings *rings = ctx->rings;
451 	unsigned int entries;
452 
453 	/* make sure SQ entry isn't read before tail */
454 	entries = smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
455 	return min(entries, ctx->sq_entries);
456 }
457 
458 /*
459  * Don't complete immediately but use deferred completion infrastructure.
460  * Protected by ->uring_lock and can only be used either with
461  * IO_URING_F_COMPLETE_DEFER or inside a tw handler holding the mutex.
462  */
463 static inline void io_req_complete_defer(struct io_kiocb *req)
464 	__must_hold(&req->ctx->uring_lock)
465 {
466 	struct io_submit_state *state = &req->ctx->submit_state;
467 
468 	lockdep_assert_held(&req->ctx->uring_lock);
469 
470 	wq_list_add_tail(&req->comp_list, &state->compl_reqs);
471 }
472 
473 static inline void io_commit_cqring_flush(struct io_ring_ctx *ctx)
474 {
475 	if (unlikely(ctx->off_timeout_used ||
476 		     ctx->has_evfd || ctx->poll_activated))
477 		__io_commit_cqring_flush(ctx);
478 }
479 
480 static inline void io_get_task_refs(int nr)
481 {
482 	struct io_uring_task *tctx = current->io_uring;
483 
484 	tctx->cached_refs -= nr;
485 	if (unlikely(tctx->cached_refs < 0))
486 		io_task_refs_refill(tctx);
487 }
488 
489 static inline bool io_req_cache_empty(struct io_ring_ctx *ctx)
490 {
491 	return !ctx->submit_state.free_list.next;
492 }
493 
494 extern struct kmem_cache *req_cachep;
495 
496 static inline struct io_kiocb *io_extract_req(struct io_ring_ctx *ctx)
497 {
498 	struct io_kiocb *req;
499 
500 	req = container_of(ctx->submit_state.free_list.next, struct io_kiocb, comp_list);
501 	wq_stack_extract(&ctx->submit_state.free_list);
502 	return req;
503 }
504 
505 static inline bool io_alloc_req(struct io_ring_ctx *ctx, struct io_kiocb **req)
506 {
507 	if (unlikely(io_req_cache_empty(ctx))) {
508 		if (!__io_alloc_req_refill(ctx))
509 			return false;
510 	}
511 	*req = io_extract_req(ctx);
512 	return true;
513 }
514 
515 static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res)
516 {
517 	io_req_set_res(req, res, 0);
518 	req->io_task_work.func = io_req_task_complete;
519 	io_req_task_work_add(req);
520 }
521 
522 static inline bool io_file_can_poll(struct io_kiocb *req)
523 {
524 	if (req->flags & REQ_F_CAN_POLL)
525 		return true;
526 	if (req->file && file_can_poll(req->file)) {
527 		req->flags |= REQ_F_CAN_POLL;
528 		return true;
529 	}
530 	return false;
531 }
532 
533 static inline ktime_t io_get_time(struct io_ring_ctx *ctx)
534 {
535 	if (ctx->clockid == CLOCK_MONOTONIC)
536 		return ktime_get();
537 
538 	return ktime_get_with_offset(ctx->clock_offset);
539 }
540 
541 enum {
542 	IO_CHECK_CQ_OVERFLOW_BIT,
543 	IO_CHECK_CQ_DROPPED_BIT,
544 };
545 
546 static inline bool io_has_work(struct io_ring_ctx *ctx)
547 {
548 	return test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq) ||
549 	       io_local_work_pending(ctx);
550 }
551 #endif
552