xref: /linux/io_uring/wait.h (revision 23acda7c221a76ff711d65f4ca90029d43b249a0)
1 // SPDX-License-Identifier: GPL-2.0
2 #ifndef IOU_WAIT_H
3 #define IOU_WAIT_H
4 
5 #include <linux/io_uring_types.h>
6 
7 /*
8  * No waiters. It's larger than any valid value of the tw counter
9  * so that tests against ->cq_wait_nr would fail and skip wake_up().
10  */
11 #define IO_CQ_WAKE_INIT		(-1U)
12 /* Forced wake up if there is a waiter regardless of ->cq_wait_nr */
13 #define IO_CQ_WAKE_FORCE	(IO_CQ_WAKE_INIT >> 1)
14 
15 struct ext_arg {
16 	size_t argsz;
17 	struct timespec64 ts;
18 	const sigset_t __user *sig;
19 	ktime_t min_time;
20 	bool ts_set;
21 	bool iowait;
22 };
23 
24 int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, u32 flags,
25 		   struct ext_arg *ext_arg);
26 int io_run_task_work_sig(struct io_ring_ctx *ctx);
27 void io_cqring_do_overflow_flush(struct io_ring_ctx *ctx);
28 void io_cqring_overflow_flush_locked(struct io_ring_ctx *ctx);
29 
30 static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx)
31 {
32 	struct io_rings *rings = io_get_rings(ctx);
33 	return ctx->cached_cq_tail - READ_ONCE(rings->cq.head);
34 }
35 
36 static inline unsigned int __io_cqring_events_user(struct io_ring_ctx *ctx)
37 {
38 	struct io_rings *rings = io_get_rings(ctx);
39 
40 	return READ_ONCE(rings->cq.tail) - READ_ONCE(rings->cq.head);
41 }
42 
43 /*
44  * Reads the tail/head of the CQ ring while providing an acquire ordering,
45  * see comment at top of io_uring.c.
46  */
47 static inline unsigned io_cqring_events(struct io_ring_ctx *ctx)
48 {
49 	smp_rmb();
50 	return __io_cqring_events(ctx);
51 }
52 
53 #endif
54