1*65805c2eSJani Nikula /* SPDX-License-Identifier: MIT */ 2*65805c2eSJani Nikula /* Copyright © 2025 Intel Corporation */ 3*65805c2eSJani Nikula 4*65805c2eSJani Nikula #ifndef __I915_WAIT_UTIL_H__ 5*65805c2eSJani Nikula #define __I915_WAIT_UTIL_H__ 6*65805c2eSJani Nikula 7*65805c2eSJani Nikula #include <linux/compiler.h> 8*65805c2eSJani Nikula #include <linux/delay.h> 9*65805c2eSJani Nikula #include <linux/ktime.h> 10*65805c2eSJani Nikula #include <linux/sched/clock.h> 11*65805c2eSJani Nikula #include <linux/smp.h> 12*65805c2eSJani Nikula 13*65805c2eSJani Nikula /* 14*65805c2eSJani Nikula * __wait_for - magic wait macro 15*65805c2eSJani Nikula * 16*65805c2eSJani Nikula * Macro to help avoid open coding check/wait/timeout patterns. Note that it's 17*65805c2eSJani Nikula * important that we check the condition again after having timed out, since the 18*65805c2eSJani Nikula * timeout could be due to preemption or similar and we've never had a chance to 19*65805c2eSJani Nikula * check the condition before the timeout. 20*65805c2eSJani Nikula */ 21*65805c2eSJani Nikula #define __wait_for(OP, COND, US, Wmin, Wmax) ({ \ 22*65805c2eSJani Nikula const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \ 23*65805c2eSJani Nikula long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \ 24*65805c2eSJani Nikula int ret__; \ 25*65805c2eSJani Nikula might_sleep(); \ 26*65805c2eSJani Nikula for (;;) { \ 27*65805c2eSJani Nikula const bool expired__ = ktime_after(ktime_get_raw(), end__); \ 28*65805c2eSJani Nikula OP; \ 29*65805c2eSJani Nikula /* Guarantee COND check prior to timeout */ \ 30*65805c2eSJani Nikula barrier(); \ 31*65805c2eSJani Nikula if (COND) { \ 32*65805c2eSJani Nikula ret__ = 0; \ 33*65805c2eSJani Nikula break; \ 34*65805c2eSJani Nikula } \ 35*65805c2eSJani Nikula if (expired__) { \ 36*65805c2eSJani Nikula ret__ = -ETIMEDOUT; \ 37*65805c2eSJani Nikula break; \ 38*65805c2eSJani Nikula } \ 39*65805c2eSJani Nikula usleep_range(wait__, wait__ * 2); \ 40*65805c2eSJani Nikula if (wait__ < (Wmax)) \ 41*65805c2eSJani Nikula wait__ <<= 1; \ 42*65805c2eSJani Nikula } \ 43*65805c2eSJani Nikula ret__; \ 44*65805c2eSJani Nikula }) 45*65805c2eSJani Nikula 46*65805c2eSJani Nikula #define _wait_for(COND, US, Wmin, Wmax) __wait_for(, (COND), (US), (Wmin), \ 47*65805c2eSJani Nikula (Wmax)) 48*65805c2eSJani Nikula #define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000) 49*65805c2eSJani Nikula 50*65805c2eSJani Nikula /* 51*65805c2eSJani Nikula * If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. 52*65805c2eSJani Nikula * On PREEMPT_RT the context isn't becoming atomic because it is used in an 53*65805c2eSJani Nikula * interrupt handler or because a spinlock_t is acquired. This leads to 54*65805c2eSJani Nikula * warnings which don't occur otherwise and therefore the check is disabled. 55*65805c2eSJani Nikula */ 56*65805c2eSJani Nikula #if IS_ENABLED(CONFIG_DRM_I915_DEBUG) && IS_ENABLED(CONFIG_PREEMPT_COUNT) && !defined(CONFIG_PREEMPT_RT) 57*65805c2eSJani Nikula # define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) WARN_ON_ONCE((ATOMIC) && !in_atomic()) 58*65805c2eSJani Nikula #else 59*65805c2eSJani Nikula # define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) do { } while (0) 60*65805c2eSJani Nikula #endif 61*65805c2eSJani Nikula 62*65805c2eSJani Nikula #define _wait_for_atomic(COND, US, ATOMIC) \ 63*65805c2eSJani Nikula ({ \ 64*65805c2eSJani Nikula int cpu, ret, timeout = (US) * 1000; \ 65*65805c2eSJani Nikula u64 base; \ 66*65805c2eSJani Nikula _WAIT_FOR_ATOMIC_CHECK(ATOMIC); \ 67*65805c2eSJani Nikula if (!(ATOMIC)) { \ 68*65805c2eSJani Nikula preempt_disable(); \ 69*65805c2eSJani Nikula cpu = smp_processor_id(); \ 70*65805c2eSJani Nikula } \ 71*65805c2eSJani Nikula base = local_clock(); \ 72*65805c2eSJani Nikula for (;;) { \ 73*65805c2eSJani Nikula u64 now = local_clock(); \ 74*65805c2eSJani Nikula if (!(ATOMIC)) \ 75*65805c2eSJani Nikula preempt_enable(); \ 76*65805c2eSJani Nikula /* Guarantee COND check prior to timeout */ \ 77*65805c2eSJani Nikula barrier(); \ 78*65805c2eSJani Nikula if (COND) { \ 79*65805c2eSJani Nikula ret = 0; \ 80*65805c2eSJani Nikula break; \ 81*65805c2eSJani Nikula } \ 82*65805c2eSJani Nikula if (now - base >= timeout) { \ 83*65805c2eSJani Nikula ret = -ETIMEDOUT; \ 84*65805c2eSJani Nikula break; \ 85*65805c2eSJani Nikula } \ 86*65805c2eSJani Nikula cpu_relax(); \ 87*65805c2eSJani Nikula if (!(ATOMIC)) { \ 88*65805c2eSJani Nikula preempt_disable(); \ 89*65805c2eSJani Nikula if (unlikely(cpu != smp_processor_id())) { \ 90*65805c2eSJani Nikula timeout -= now - base; \ 91*65805c2eSJani Nikula cpu = smp_processor_id(); \ 92*65805c2eSJani Nikula base = local_clock(); \ 93*65805c2eSJani Nikula } \ 94*65805c2eSJani Nikula } \ 95*65805c2eSJani Nikula } \ 96*65805c2eSJani Nikula ret; \ 97*65805c2eSJani Nikula }) 98*65805c2eSJani Nikula 99*65805c2eSJani Nikula #define wait_for_us(COND, US) \ 100*65805c2eSJani Nikula ({ \ 101*65805c2eSJani Nikula int ret__; \ 102*65805c2eSJani Nikula BUILD_BUG_ON(!__builtin_constant_p(US)); \ 103*65805c2eSJani Nikula if ((US) > 10) \ 104*65805c2eSJani Nikula ret__ = _wait_for((COND), (US), 10, 10); \ 105*65805c2eSJani Nikula else \ 106*65805c2eSJani Nikula ret__ = _wait_for_atomic((COND), (US), 0); \ 107*65805c2eSJani Nikula ret__; \ 108*65805c2eSJani Nikula }) 109*65805c2eSJani Nikula 110*65805c2eSJani Nikula #define wait_for_atomic_us(COND, US) \ 111*65805c2eSJani Nikula ({ \ 112*65805c2eSJani Nikula BUILD_BUG_ON(!__builtin_constant_p(US)); \ 113*65805c2eSJani Nikula BUILD_BUG_ON((US) > 50000); \ 114*65805c2eSJani Nikula _wait_for_atomic((COND), (US), 1); \ 115*65805c2eSJani Nikula }) 116*65805c2eSJani Nikula 117*65805c2eSJani Nikula #define wait_for_atomic(COND, MS) wait_for_atomic_us((COND), (MS) * 1000) 118*65805c2eSJani Nikula 119*65805c2eSJani Nikula #endif /* __I915_WAIT_UTIL_H__ */ 120