xref: /linux/arch/arm64/lib/delay.c (revision 24bce201d79807b668bf9d9e0aca801c5c0d5f78)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Delay loops based on the OpenRISC implementation.
4  *
5  * Copyright (C) 2012 ARM Limited
6  *
7  * Author: Will Deacon <will.deacon@arm.com>
8  */
9 
10 #include <linux/delay.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/timex.h>
15 
16 #include <clocksource/arm_arch_timer.h>
17 
18 #define USECS_TO_CYCLES(time_usecs)			\
19 	xloops_to_cycles((time_usecs) * 0x10C7UL)
20 
21 static inline unsigned long xloops_to_cycles(unsigned long xloops)
22 {
23 	return (xloops * loops_per_jiffy * HZ) >> 32;
24 }
25 
26 void __delay(unsigned long cycles)
27 {
28 	cycles_t start = get_cycles();
29 
30 	if (cpus_have_const_cap(ARM64_HAS_WFXT)) {
31 		u64 end = start + cycles;
32 
33 		/*
34 		 * Start with WFIT. If an interrupt makes us resume
35 		 * early, use a WFET loop to complete the delay.
36 		 */
37 		wfit(end);
38 		while ((get_cycles() - start) < cycles)
39 			wfet(end);
40 	} else 	if (arch_timer_evtstrm_available()) {
41 		const cycles_t timer_evt_period =
42 			USECS_TO_CYCLES(ARCH_TIMER_EVT_STREAM_PERIOD_US);
43 
44 		while ((get_cycles() - start + timer_evt_period) < cycles)
45 			wfe();
46 	}
47 
48 	while ((get_cycles() - start) < cycles)
49 		cpu_relax();
50 }
51 EXPORT_SYMBOL(__delay);
52 
53 inline void __const_udelay(unsigned long xloops)
54 {
55 	__delay(xloops_to_cycles(xloops));
56 }
57 EXPORT_SYMBOL(__const_udelay);
58 
59 void __udelay(unsigned long usecs)
60 {
61 	__const_udelay(usecs * 0x10C7UL); /* 2**32 / 1000000 (rounded up) */
62 }
63 EXPORT_SYMBOL(__udelay);
64 
65 void __ndelay(unsigned long nsecs)
66 {
67 	__const_udelay(nsecs * 0x5UL); /* 2**32 / 1000000000 (rounded up) */
68 }
69 EXPORT_SYMBOL(__ndelay);
70