xref: /linux/arch/arm/include/asm/div64.h (revision bd628c1bed7902ec1f24ba0fe70758949146abbe)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ASM_ARM_DIV64
3 #define __ASM_ARM_DIV64
4 
5 #include <linux/types.h>
6 #include <asm/compiler.h>
7 
8 /*
9  * The semantics of __div64_32() are:
10  *
11  * uint32_t __div64_32(uint64_t *n, uint32_t base)
12  * {
13  * 	uint32_t remainder = *n % base;
14  * 	*n = *n / base;
15  * 	return remainder;
16  * }
17  *
18  * In other words, a 64-bit dividend with a 32-bit divisor producing
19  * a 64-bit result and a 32-bit remainder.  To accomplish this optimally
20  * we override the generic version in lib/div64.c to call our __do_div64
21  * assembly implementation with completely non standard calling convention
22  * for arguments and results (beware).
23  */
24 
25 #ifdef __ARMEB__
26 #define __xh "r0"
27 #define __xl "r1"
28 #else
29 #define __xl "r0"
30 #define __xh "r1"
31 #endif
32 
33 static inline uint32_t __div64_32(uint64_t *n, uint32_t base)
34 {
35 	register unsigned int __base      asm("r4") = base;
36 	register unsigned long long __n   asm("r0") = *n;
37 	register unsigned long long __res asm("r2");
38 	register unsigned int __rem       asm(__xh);
39 	asm(	__asmeq("%0", __xh)
40 		__asmeq("%1", "r2")
41 		__asmeq("%2", "r0")
42 		__asmeq("%3", "r4")
43 		"bl	__do_div64"
44 		: "=r" (__rem), "=r" (__res)
45 		: "r" (__n), "r" (__base)
46 		: "ip", "lr", "cc");
47 	*n = __res;
48 	return __rem;
49 }
50 #define __div64_32 __div64_32
51 
52 #if !defined(CONFIG_AEABI)
53 
54 /*
55  * In OABI configurations, some uses of the do_div function
56  * cause gcc to run out of registers. To work around that,
57  * we can force the use of the out-of-line version for
58  * configurations that build a OABI kernel.
59  */
60 #define do_div(n, base) __div64_32(&(n), base)
61 
62 #else
63 
64 /*
65  * gcc versions earlier than 4.0 are simply too problematic for the
66  * __div64_const32() code in asm-generic/div64.h. First there is
67  * gcc PR 15089 that tend to trig on more complex constructs, spurious
68  * .global __udivsi3 are inserted even if none of those symbols are
69  * referenced in the generated code, and those gcc versions are not able
70  * to do constant propagation on long long values anyway.
71  */
72 
73 #define __div64_const32_is_OK (__GNUC__ >= 4)
74 
75 static inline uint64_t __arch_xprod_64(uint64_t m, uint64_t n, bool bias)
76 {
77 	unsigned long long res;
78 	register unsigned int tmp asm("ip") = 0;
79 
80 	if (!bias) {
81 		asm (	"umull	%Q0, %R0, %Q1, %Q2\n\t"
82 			"mov	%Q0, #0"
83 			: "=&r" (res)
84 			: "r" (m), "r" (n)
85 			: "cc");
86 	} else if (!(m & ((1ULL << 63) | (1ULL << 31)))) {
87 		res = m;
88 		asm (	"umlal	%Q0, %R0, %Q1, %Q2\n\t"
89 			"mov	%Q0, #0"
90 			: "+&r" (res)
91 			: "r" (m), "r" (n)
92 			: "cc");
93 	} else {
94 		asm (	"umull	%Q0, %R0, %Q2, %Q3\n\t"
95 			"cmn	%Q0, %Q2\n\t"
96 			"adcs	%R0, %R0, %R2\n\t"
97 			"adc	%Q0, %1, #0"
98 			: "=&r" (res), "+&r" (tmp)
99 			: "r" (m), "r" (n)
100 			: "cc");
101 	}
102 
103 	if (!(m & ((1ULL << 63) | (1ULL << 31)))) {
104 		asm (	"umlal	%R0, %Q0, %R1, %Q2\n\t"
105 			"umlal	%R0, %Q0, %Q1, %R2\n\t"
106 			"mov	%R0, #0\n\t"
107 			"umlal	%Q0, %R0, %R1, %R2"
108 			: "+&r" (res)
109 			: "r" (m), "r" (n)
110 			: "cc");
111 	} else {
112 		asm (	"umlal	%R0, %Q0, %R2, %Q3\n\t"
113 			"umlal	%R0, %1, %Q2, %R3\n\t"
114 			"mov	%R0, #0\n\t"
115 			"adds	%Q0, %1, %Q0\n\t"
116 			"adc	%R0, %R0, #0\n\t"
117 			"umlal	%Q0, %R0, %R2, %R3"
118 			: "+&r" (res), "+&r" (tmp)
119 			: "r" (m), "r" (n)
120 			: "cc");
121 	}
122 
123 	return res;
124 }
125 #define __arch_xprod_64 __arch_xprod_64
126 
127 #include <asm-generic/div64.h>
128 
129 #endif
130 
131 #endif
132