xref: /linux/arch/arm64/lib/delay.c (revision 8457669db968c98edb781892d73fa559e1efcbd4)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Delay loops based on the OpenRISC implementation.
4  *
5  * Copyright (C) 2012 ARM Limited
6  *
7  * Author: Will Deacon <will.deacon@arm.com>
8  */
9 
10 #include <linux/delay.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/timex.h>
15 
16 #include <clocksource/arm_arch_timer.h>
17 
18 #define USECS_TO_CYCLES(time_usecs)			\
19 	xloops_to_cycles((time_usecs) * 0x10C7UL)
20 
xloops_to_cycles(unsigned long xloops)21 static inline unsigned long xloops_to_cycles(unsigned long xloops)
22 {
23 	return (xloops * loops_per_jiffy * HZ) >> 32;
24 }
25 
26 /*
27  * Force the use of CNTVCT_EL0 in order to have the same base as WFxT.
28  * This avoids some annoying issues when CNTVOFF_EL2 is not reset 0 on a
29  * KVM host running at EL1 until we do a vcpu_put() on the vcpu. When
30  * running at EL2, the effective offset is always 0.
31  *
32  * Note that userspace cannot change the offset behind our back either,
33  * as the vcpu mutex is held as long as KVM_RUN is in progress.
34  */
__delay_cycles(void)35 static cycles_t notrace __delay_cycles(void)
36 {
37 	guard(preempt_notrace)();
38 	return __arch_counter_get_cntvct_stable();
39 }
40 
__delay(unsigned long cycles)41 void __delay(unsigned long cycles)
42 {
43 	cycles_t start = __delay_cycles();
44 
45 	if (alternative_has_cap_unlikely(ARM64_HAS_WFXT)) {
46 		u64 end = start + cycles;
47 
48 		/*
49 		 * Start with WFIT. If an interrupt makes us resume
50 		 * early, use a WFET loop to complete the delay.
51 		 */
52 		wfit(end);
53 		while ((__delay_cycles() - start) < cycles)
54 			wfet(end);
55 	} else 	if (arch_timer_evtstrm_available()) {
56 		const cycles_t timer_evt_period =
57 			USECS_TO_CYCLES(ARCH_TIMER_EVT_STREAM_PERIOD_US);
58 
59 		while ((__delay_cycles() - start + timer_evt_period) < cycles)
60 			wfe();
61 	}
62 
63 	while ((__delay_cycles() - start) < cycles)
64 		cpu_relax();
65 }
66 EXPORT_SYMBOL(__delay);
67 
__const_udelay(unsigned long xloops)68 inline void __const_udelay(unsigned long xloops)
69 {
70 	__delay(xloops_to_cycles(xloops));
71 }
72 EXPORT_SYMBOL(__const_udelay);
73 
__udelay(unsigned long usecs)74 void __udelay(unsigned long usecs)
75 {
76 	__const_udelay(usecs * 0x10C7UL); /* 2**32 / 1000000 (rounded up) */
77 }
78 EXPORT_SYMBOL(__udelay);
79 
__ndelay(unsigned long nsecs)80 void __ndelay(unsigned long nsecs)
81 {
82 	__const_udelay(nsecs * 0x5UL); /* 2**32 / 1000000000 (rounded up) */
83 }
84 EXPORT_SYMBOL(__ndelay);
85