xref: /linux/arch/sh/math-emu/sfp-util.h (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /*
2  * These are copied from glibc/stdlib/longlong.h
3  */
4 
5 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
6   do {                                                                  \
7     UWtype __x;                                                         \
8     __x = (al) + (bl);                                                  \
9     (sh) = (ah) + (bh) + (__x < (al));                                  \
10     (sl) = __x;                                                         \
11   } while (0)
12 
13 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
14   do {                                                                  \
15     UWtype __x;                                                         \
16     __x = (al) - (bl);                                                  \
17     (sh) = (ah) - (bh) - (__x > (al));                                  \
18     (sl) = __x;                                                         \
19   } while (0)
20 
21 #define umul_ppmm(w1, w0, u, v) \
22   __asm__ ("dmulu.l %2,%3\n\tsts    macl,%1\n\tsts  mach,%0"	\
23 	: "=r" ((u32)(w1)), "=r" ((u32)(w0))	\
24 	:  "r" ((u32)(u)),   "r" ((u32)(v))	\
25 	: "macl", "mach")
26 
27 #define __ll_B ((UWtype) 1 << (W_TYPE_SIZE / 2))
28 #define __ll_lowpart(t) ((UWtype) (t) & (__ll_B - 1))
29 #define __ll_highpart(t) ((UWtype) (t) >> (W_TYPE_SIZE / 2))
30 
31 #define udiv_qrnnd(q, r, n1, n0, d) \
32   do {									\
33     UWtype __d1, __d0, __q1, __q0;					\
34     UWtype __r1, __r0, __m;						\
35     __d1 = __ll_highpart (d);						\
36     __d0 = __ll_lowpart (d);						\
37 									\
38     __r1 = (n1) % __d1;							\
39     __q1 = (n1) / __d1;							\
40     __m = (UWtype) __q1 * __d0;						\
41     __r1 = __r1 * __ll_B | __ll_highpart (n0);				\
42     if (__r1 < __m)							\
43       {									\
44 	__q1--, __r1 += (d);						\
45 	if (__r1 >= (d)) /* i.e. we didn't get carry when adding to __r1 */\
46 	  if (__r1 < __m)						\
47 	    __q1--, __r1 += (d);					\
48       }									\
49     __r1 -= __m;							\
50 									\
51     __r0 = __r1 % __d1;							\
52     __q0 = __r1 / __d1;							\
53     __m = (UWtype) __q0 * __d0;						\
54     __r0 = __r0 * __ll_B | __ll_lowpart (n0);				\
55     if (__r0 < __m)							\
56       {									\
57 	__q0--, __r0 += (d);						\
58 	if (__r0 >= (d))						\
59 	  if (__r0 < __m)						\
60 	    __q0--, __r0 += (d);					\
61       }									\
62     __r0 -= __m;							\
63 									\
64     (q) = (UWtype) __q1 * __ll_B | __q0;				\
65     (r) = __r0;								\
66   } while (0)
67 
68 #define abort()	return 0
69 
70 #define __BYTE_ORDER __LITTLE_ENDIAN
71 
72 
73