xref: /linux/arch/arc/include/asm/delay.h (revision 75bf465f0bc33e9b776a46d6a1b9b990f5fb7c37)
1*d2912cb1SThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
2d8005e6bSVineet Gupta /*
3d8005e6bSVineet Gupta  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4d8005e6bSVineet Gupta  *
5d8005e6bSVineet Gupta  * Delay routines using pre computed loops_per_jiffy value.
6d8005e6bSVineet Gupta  *
7d8005e6bSVineet Gupta  * vineetg: Feb 2012
8d8005e6bSVineet Gupta  *  -Rewrote in "C" to avoid dealing with availability of H/w MPY
9d8005e6bSVineet Gupta  *  -Also reduced the num of MPY operations from 3 to 2
10d8005e6bSVineet Gupta  *
11d8005e6bSVineet Gupta  * Amit Bhor: Codito Technologies 2004
12d8005e6bSVineet Gupta  */
13d8005e6bSVineet Gupta 
14d8005e6bSVineet Gupta #ifndef __ASM_ARC_UDELAY_H
15d8005e6bSVineet Gupta #define __ASM_ARC_UDELAY_H
16d8005e6bSVineet Gupta 
172423665eSRandy Dunlap #include <asm-generic/types.h>
18d8005e6bSVineet Gupta #include <asm/param.h>		/* HZ */
19d8005e6bSVineet Gupta 
202423665eSRandy Dunlap extern unsigned long loops_per_jiffy;
212423665eSRandy Dunlap 
__delay(unsigned long loops)22d8005e6bSVineet Gupta static inline void __delay(unsigned long loops)
23d8005e6bSVineet Gupta {
24d8005e6bSVineet Gupta 	__asm__ __volatile__(
253c7c7a2fSVineet Gupta 	"	mov lp_count, %0	\n"
268922bc30SVineet Gupta 	"	lp  1f			\n"
278922bc30SVineet Gupta 	"	nop			\n"
288922bc30SVineet Gupta 	"1:				\n"
2936425cd6SVineet Gupta 	:
3036425cd6SVineet Gupta         : "r"(loops)
3136425cd6SVineet Gupta         : "lp_count");
32d8005e6bSVineet Gupta }
33d8005e6bSVineet Gupta 
34d8005e6bSVineet Gupta extern void __bad_udelay(void);
35d8005e6bSVineet Gupta 
36d8005e6bSVineet Gupta /*
37d8005e6bSVineet Gupta  * Normal Math for computing loops in "N" usecs
38d8005e6bSVineet Gupta  *  -we have precomputed @loops_per_jiffy
39d8005e6bSVineet Gupta  *  -1 sec has HZ jiffies
40d8005e6bSVineet Gupta  * loops per "N" usecs = ((loops_per_jiffy * HZ / 1000000) * N)
41d8005e6bSVineet Gupta  *
42d8005e6bSVineet Gupta  * Approximate Division by multiplication:
43d8005e6bSVineet Gupta  *  -Mathematically if we multiply and divide a number by same value the
44d8005e6bSVineet Gupta  *   result remains unchanged:  In this case, we use 2^32
45d8005e6bSVineet Gupta  *  -> (loops_per_N_usec * 2^32 ) / 2^32
46d8005e6bSVineet Gupta  *  -> (((loops_per_jiffy * HZ / 1000000) * N) * 2^32) / 2^32
47d8005e6bSVineet Gupta  *  -> (loops_per_jiffy * HZ * N * 4295) / 2^32
48d8005e6bSVineet Gupta  *
49d8005e6bSVineet Gupta  *  -Divide by 2^32 is very simply right shift by 32
50d8005e6bSVineet Gupta  *  -We simply need to ensure that the multiply per above eqn happens in
51d8005e6bSVineet Gupta  *   64-bit precision (if CPU doesn't support it - gcc can emaulate it)
52d8005e6bSVineet Gupta  */
53d8005e6bSVineet Gupta 
__udelay(unsigned long usecs)54d8005e6bSVineet Gupta static inline void __udelay(unsigned long usecs)
55d8005e6bSVineet Gupta {
56d8005e6bSVineet Gupta 	unsigned long loops;
57d8005e6bSVineet Gupta 
587efd0da2SMischa Jonker 	/* (u64) cast ensures 64 bit MPY - real or emulated
59d8005e6bSVineet Gupta 	 * HZ * 4295 is pre-evaluated by gcc - hence only 2 mpy ops
60d8005e6bSVineet Gupta 	 */
617efd0da2SMischa Jonker 	loops = ((u64) usecs * 4295 * HZ * loops_per_jiffy) >> 32;
62d8005e6bSVineet Gupta 
63d8005e6bSVineet Gupta 	__delay(loops);
64d8005e6bSVineet Gupta }
65d8005e6bSVineet Gupta 
66d8005e6bSVineet Gupta #define udelay(n) (__builtin_constant_p(n) ? ((n) > 20000 ? __bad_udelay() \
67d8005e6bSVineet Gupta 				: __udelay(n)) : __udelay(n))
68d8005e6bSVineet Gupta 
69d8005e6bSVineet Gupta #endif /* __ASM_ARC_UDELAY_H */
70