xref: /linux/drivers/gpu/drm/i915/i915_wait_util.h (revision 2330437da0994321020777c605a2a8cb0ecb7001)
1 /* SPDX-License-Identifier: MIT */
2 /* Copyright © 2025 Intel Corporation */
3 
4 #ifndef __I915_WAIT_UTIL_H__
5 #define __I915_WAIT_UTIL_H__
6 
7 #include <linux/compiler.h>
8 #include <linux/delay.h>
9 #include <linux/ktime.h>
10 #include <linux/sched/clock.h>
11 #include <linux/smp.h>
12 
13 /*
14  * __wait_for - magic wait macro
15  *
16  * Macro to help avoid open coding check/wait/timeout patterns. Note that it's
17  * important that we check the condition again after having timed out, since the
18  * timeout could be due to preemption or similar and we've never had a chance to
19  * check the condition before the timeout.
20  */
21 #define __wait_for(OP, COND, US, Wmin, Wmax) ({ \
22 	const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \
23 	long wait__ = (Wmin); /* recommended min for usleep is 10 us */	\
24 	int ret__;							\
25 	might_sleep();							\
26 	for (;;) {							\
27 		const bool expired__ = ktime_after(ktime_get_raw(), end__); \
28 		OP;							\
29 		/* Guarantee COND check prior to timeout */		\
30 		barrier();						\
31 		if (COND) {						\
32 			ret__ = 0;					\
33 			break;						\
34 		}							\
35 		if (expired__) {					\
36 			ret__ = -ETIMEDOUT;				\
37 			break;						\
38 		}							\
39 		usleep_range(wait__, wait__ * 2);			\
40 		if (wait__ < (Wmax))					\
41 			wait__ <<= 1;					\
42 	}								\
43 	ret__;								\
44 })
45 
46 #define _wait_for(COND, US, Wmin, Wmax)	__wait_for(, (COND), (US), (Wmin), \
47 						   (Wmax))
48 #define wait_for(COND, MS)		_wait_for((COND), (MS) * 1000, 10, 1000)
49 
50 /*
51  * If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false.
52  * On PREEMPT_RT the context isn't becoming atomic because it is used in an
53  * interrupt handler or because a spinlock_t is acquired. This leads to
54  * warnings which don't occur otherwise and therefore the check is disabled.
55  */
56 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG) && IS_ENABLED(CONFIG_PREEMPT_COUNT) && !defined(CONFIG_PREEMPT_RT)
57 # define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) WARN_ON_ONCE((ATOMIC) && !in_atomic())
58 #else
59 # define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) do { } while (0)
60 #endif
61 
62 #define _wait_for_atomic(COND, US, ATOMIC) \
63 ({ \
64 	int cpu, ret, timeout = (US) * 1000; \
65 	u64 base; \
66 	_WAIT_FOR_ATOMIC_CHECK(ATOMIC); \
67 	if (!(ATOMIC)) { \
68 		preempt_disable(); \
69 		cpu = smp_processor_id(); \
70 	} \
71 	base = local_clock(); \
72 	for (;;) { \
73 		u64 now = local_clock(); \
74 		if (!(ATOMIC)) \
75 			preempt_enable(); \
76 		/* Guarantee COND check prior to timeout */ \
77 		barrier(); \
78 		if (COND) { \
79 			ret = 0; \
80 			break; \
81 		} \
82 		if (now - base >= timeout) { \
83 			ret = -ETIMEDOUT; \
84 			break; \
85 		} \
86 		cpu_relax(); \
87 		if (!(ATOMIC)) { \
88 			preempt_disable(); \
89 			if (unlikely(cpu != smp_processor_id())) { \
90 				timeout -= now - base; \
91 				cpu = smp_processor_id(); \
92 				base = local_clock(); \
93 			} \
94 		} \
95 	} \
96 	ret; \
97 })
98 
99 #define wait_for_us(COND, US) \
100 ({ \
101 	int ret__; \
102 	BUILD_BUG_ON(!__builtin_constant_p(US)); \
103 	if ((US) > 10) \
104 		ret__ = _wait_for((COND), (US), 10, 10); \
105 	else \
106 		ret__ = _wait_for_atomic((COND), (US), 0); \
107 	ret__; \
108 })
109 
110 #define wait_for_atomic_us(COND, US) \
111 ({ \
112 	BUILD_BUG_ON(!__builtin_constant_p(US)); \
113 	BUILD_BUG_ON((US) > 50000); \
114 	_wait_for_atomic((COND), (US), 1); \
115 })
116 
117 #define wait_for_atomic(COND, MS) wait_for_atomic_us((COND), (MS) * 1000)
118 
119 #endif /* __I915_WAIT_UTIL_H__ */
120