xref: /linux/arch/m68k/include/asm/delay.h (revision 40d269c000bda9fcd276a0412a9cebd3f6e344c5)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _M68K_DELAY_H
3 #define _M68K_DELAY_H
4 
5 #include <asm/param.h>
6 
7 /*
8  * Copyright (C) 1994 Hamish Macdonald
9  * Copyright (C) 2004 Greg Ungerer <gerg@uclinux.com>
10  *
11  * Delay routines, using a pre-computed "loops_per_jiffy" value.
12  */
13 
14 #if defined(CONFIG_COLDFIRE)
15 /*
16  * The ColdFire runs the delay loop at significantly different speeds
17  * depending upon long word alignment or not.  We'll pad it to
18  * long word alignment which is the faster version.
19  * The 0x4a8e is of course a 'tstl %fp' instruction.  This is better
20  * than using a NOP (0x4e71) instruction because it executes in one
21  * cycle not three and doesn't allow for an arbitrary delay waiting
22  * for bus cycles to finish.  Also fp/a6 isn't likely to cause a
23  * stall waiting for the register to become valid if such is added
24  * to the coldfire at some stage.
25  */
26 #define	DELAY_ALIGN	".balignw 4, 0x4a8e\n\t"
27 #else
28 /*
29  * No instruction alignment required for other m68k types.
30  */
31 #define	DELAY_ALIGN
32 #endif
33 
34 static inline void __delay(unsigned long loops)
35 {
36 	__asm__ __volatile__ (
37 		DELAY_ALIGN
38 		"1: subql #1,%0\n\t"
39 		"jcc 1b"
40 		: "=d" (loops)
41 		: "0" (loops));
42 }
43 
44 extern void __bad_udelay(void);
45 
46 
47 #ifdef CONFIG_CPU_HAS_NO_MULDIV64
48 /*
49  * The simpler m68k and ColdFire processors do not have a 32*32->64
50  * multiply instruction. So we need to handle them a little differently.
51  * We use a bit of shifting and a single 32*32->32 multiply to get close.
52  */
53 #define	HZSCALE		(268435456 / (1000000 / HZ))
54 
55 #define	__const_udelay(u) \
56 	__delay(((((u) * HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6)
57 
58 #else
59 
60 static inline void __xdelay(unsigned long xloops)
61 {
62 	unsigned long tmp;
63 
64 	__asm__ ("mulul %2,%0:%1"
65 		: "=d" (xloops), "=d" (tmp)
66 		: "d" (xloops), "1" (loops_per_jiffy));
67 	__delay(xloops * HZ);
68 }
69 
70 /*
71  * The definition of __const_udelay is specifically made a macro so that
72  * the const factor (4295 = 2**32 / 1000000) can be optimized out when
73  * the delay is a const.
74  */
75 #define	__const_udelay(n)	(__xdelay((n) * 4295))
76 
77 #endif
78 
79 static inline void __udelay(unsigned long usecs)
80 {
81 	__const_udelay(usecs);
82 }
83 
84 /*
85  * Use only for very small delays ( < 1 msec).  Should probably use a
86  * lookup table, really, as the multiplications take much too long with
87  * short delays.  This is a "reasonable" implementation, though (and the
88  * first constant multiplications gets optimized away if the delay is
89  * a constant)
90  */
91 #define udelay(n) (__builtin_constant_p(n) ? \
92 	((n) > 20000 ? __bad_udelay() : __const_udelay(n)) : __udelay(n))
93 
94 /*
95  * nanosecond delay:
96  *
97  * ((((HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6) is the number of loops
98  * per microsecond
99  *
100  * 1000 / ((((HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6) is the number of
101  * nanoseconds per loop
102  *
103  * So n / ( 1000 / ((((HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6) ) would
104  * be the number of loops for n nanoseconds
105  */
106 
107 /*
108  * The simpler m68k and ColdFire processors do not have a 32*32->64
109  * multiply instruction. So we need to handle them a little differently.
110  * We use a bit of shifting and a single 32*32->32 multiply to get close.
111  * This is a macro so that the const version can factor out the first
112  * multiply and shift.
113  */
114 #define	HZSCALE		(268435456 / (1000000 / HZ))
115 
116 static inline void ndelay(unsigned long nsec)
117 {
118 	__delay(DIV_ROUND_UP(nsec *
119 			     ((((HZSCALE) >> 11) *
120 			       (loops_per_jiffy >> 11)) >> 6),
121 			     1000));
122 }
123 #define ndelay(n) ndelay(n)
124 
125 #endif /* defined(_M68K_DELAY_H) */
126