xref: /linux/arch/sparc/math-emu/sfp-util_32.h (revision ae22a94997b8a03dcb3c922857c203246711f9d4)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/kernel.h>
3 #include <linux/sched.h>
4 #include <linux/types.h>
5 #include <asm/byteorder.h>
6 
7 #define add_ssaaaa(sh, sl, ah, al, bh, bl) 				\
8   __asm__ ("addcc %r4,%5,%1\n\t"					\
9 	   "addx %r2,%3,%0\n"						\
10 	   : "=r" (sh),							\
11 	     "=&r" (sl)							\
12 	   : "%rJ" ((USItype)(ah)),					\
13 	     "rI" ((USItype)(bh)),					\
14 	     "%rJ" ((USItype)(al)),					\
15 	     "rI" ((USItype)(bl))					\
16 	   : "cc")
17 #define sub_ddmmss(sh, sl, ah, al, bh, bl) 				\
18   __asm__ ("subcc %r4,%5,%1\n\t"					\
19 	   "subx %r2,%3,%0\n"						\
20 	   : "=r" (sh),							\
21 	     "=&r" (sl)							\
22 	   : "rJ" ((USItype)(ah)),					\
23 	     "rI" ((USItype)(bh)),					\
24 	     "rJ" ((USItype)(al)),					\
25 	     "rI" ((USItype)(bl))					\
26 	   : "cc")
27 
28 #define umul_ppmm(w1, w0, u, v) \
29   __asm__ ("! Inlined umul_ppmm\n\t"					\
30 	"wr	%%g0,%2,%%y	! SPARC has 0-3 delay insn after a wr\n\t" \
31 	"sra	%3,31,%%g2	! Don't move this insn\n\t"		\
32 	"and	%2,%%g2,%%g2	! Don't move this insn\n\t"		\
33 	"andcc	%%g0,0,%%g1	! Don't move this insn\n\t"		\
34 	"mulscc	%%g1,%3,%%g1\n\t"					\
35 	"mulscc	%%g1,%3,%%g1\n\t"					\
36 	"mulscc	%%g1,%3,%%g1\n\t"					\
37 	"mulscc	%%g1,%3,%%g1\n\t"					\
38 	"mulscc	%%g1,%3,%%g1\n\t"					\
39 	"mulscc	%%g1,%3,%%g1\n\t"					\
40 	"mulscc	%%g1,%3,%%g1\n\t"					\
41 	"mulscc	%%g1,%3,%%g1\n\t"					\
42 	"mulscc	%%g1,%3,%%g1\n\t"					\
43 	"mulscc	%%g1,%3,%%g1\n\t"					\
44 	"mulscc	%%g1,%3,%%g1\n\t"					\
45 	"mulscc	%%g1,%3,%%g1\n\t"					\
46 	"mulscc	%%g1,%3,%%g1\n\t"					\
47 	"mulscc	%%g1,%3,%%g1\n\t"					\
48 	"mulscc	%%g1,%3,%%g1\n\t"					\
49 	"mulscc	%%g1,%3,%%g1\n\t"					\
50 	"mulscc	%%g1,%3,%%g1\n\t"					\
51 	"mulscc	%%g1,%3,%%g1\n\t"					\
52 	"mulscc	%%g1,%3,%%g1\n\t"					\
53 	"mulscc	%%g1,%3,%%g1\n\t"					\
54 	"mulscc	%%g1,%3,%%g1\n\t"					\
55 	"mulscc	%%g1,%3,%%g1\n\t"					\
56 	"mulscc	%%g1,%3,%%g1\n\t"					\
57 	"mulscc	%%g1,%3,%%g1\n\t"					\
58 	"mulscc	%%g1,%3,%%g1\n\t"					\
59 	"mulscc	%%g1,%3,%%g1\n\t"					\
60 	"mulscc	%%g1,%3,%%g1\n\t"					\
61 	"mulscc	%%g1,%3,%%g1\n\t"					\
62 	"mulscc	%%g1,%3,%%g1\n\t"					\
63 	"mulscc	%%g1,%3,%%g1\n\t"					\
64 	"mulscc	%%g1,%3,%%g1\n\t"					\
65 	"mulscc	%%g1,%3,%%g1\n\t"					\
66 	"mulscc	%%g1,0,%%g1\n\t" 					\
67 	"add	%%g1,%%g2,%0\n\t" 					\
68 	"rd	%%y,%1\n"						\
69 	   : "=r" (w1),							\
70 	     "=r" (w0)							\
71 	   : "%rI" ((USItype)(u)),					\
72 	     "r" ((USItype)(v))						\
73 	   : "%g1", "%g2", "cc")
74 
75 /* It's quite necessary to add this much assembler for the sparc.
76    The default udiv_qrnnd (in C) is more than 10 times slower!  */
77 #define udiv_qrnnd(q, r, n1, n0, d) \
78   __asm__ ("! Inlined udiv_qrnnd\n\t"					\
79 	   "mov	32,%%g1\n\t"						\
80 	   "subcc	%1,%2,%%g0\n\t"					\
81 	   "1:	bcs	5f\n\t"						\
82 	   "addxcc %0,%0,%0	! shift n1n0 and a q-bit in lsb\n\t"	\
83 	   "sub	%1,%2,%1	! this kills msb of n\n\t"		\
84 	   "addx	%1,%1,%1	! so this can't give carry\n\t"	\
85 	   "subcc	%%g1,1,%%g1\n\t"				\
86 	   "2:	bne	1b\n\t"						\
87 	   "subcc	%1,%2,%%g0\n\t"					\
88 	   "bcs	3f\n\t"							\
89 	   "addxcc %0,%0,%0	! shift n1n0 and a q-bit in lsb\n\t"	\
90 	   "b		3f\n\t"						\
91 	   "sub	%1,%2,%1	! this kills msb of n\n\t"		\
92 	   "4:	sub	%1,%2,%1\n\t"					\
93 	   "5:	addxcc	%1,%1,%1\n\t"					\
94 	   "bcc	2b\n\t"							\
95 	   "subcc	%%g1,1,%%g1\n\t"				\
96 	   "! Got carry from n.  Subtract next step to cancel this carry.\n\t" \
97 	   "bne	4b\n\t"							\
98 	   "addcc	%0,%0,%0	! shift n1n0 and a 0-bit in lsb\n\t" \
99 	   "sub	%1,%2,%1\n\t"						\
100 	   "3:	xnor	%0,0,%0\n\t"					\
101 	   "! End of inline udiv_qrnnd\n"				\
102 	   : "=&r" (q),							\
103 	     "=&r" (r)							\
104 	   : "r" ((USItype)(d)),					\
105 	     "1" ((USItype)(n1)),					\
106 	     "0" ((USItype)(n0)) : "%g1", "cc")
107 #define UDIV_NEEDS_NORMALIZATION 0
108 
109 #define abort()								\
110 	return 0
111 
112 #ifdef __BIG_ENDIAN
113 #define __BYTE_ORDER __BIG_ENDIAN
114 #else
115 #define __BYTE_ORDER __LITTLE_ENDIAN
116 #endif
117