xref: /linux/io_uring/wait.h (revision 3d2c3d2eea9acdbee5b5742d15d021069b49d3f9)
1 // SPDX-License-Identifier: GPL-2.0
2 #ifndef IOU_WAIT_H
3 #define IOU_WAIT_H
4 
5 #include <linux/io_uring_types.h>
6 
7 /*
8  * No waiters. It's larger than any valid value of the tw counter
9  * so that tests against ->cq_wait_nr would fail and skip wake_up().
10  */
11 #define IO_CQ_WAKE_INIT		(-1U)
12 /* Forced wake up if there is a waiter regardless of ->cq_wait_nr */
13 #define IO_CQ_WAKE_FORCE	(IO_CQ_WAKE_INIT >> 1)
14 
15 struct ext_arg {
16 	size_t argsz;
17 	struct timespec64 ts;
18 	const sigset_t __user *sig;
19 	ktime_t min_time;
20 	bool ts_set;
21 	bool iowait;
22 };
23 
24 int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, u32 flags,
25 		   struct ext_arg *ext_arg);
26 int io_run_task_work_sig(struct io_ring_ctx *ctx);
27 void io_cqring_do_overflow_flush(struct io_ring_ctx *ctx);
28 
29 static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx)
30 {
31 	struct io_rings *rings = io_get_rings(ctx);
32 	return ctx->cached_cq_tail - READ_ONCE(rings->cq.head);
33 }
34 
35 static inline unsigned int __io_cqring_events_user(struct io_ring_ctx *ctx)
36 {
37 	struct io_rings *rings = io_get_rings(ctx);
38 
39 	return READ_ONCE(rings->cq.tail) - READ_ONCE(rings->cq.head);
40 }
41 
42 /*
43  * Reads the tail/head of the CQ ring while providing an acquire ordering,
44  * see comment at top of io_uring.c.
45  */
46 static inline unsigned io_cqring_events(struct io_ring_ctx *ctx)
47 {
48 	smp_rmb();
49 	return __io_cqring_events(ctx);
50 }
51 
52 #endif
53