1 //===-- lib/fp_lib.h - Floating-point utilities -------------------*- C -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a configuration header for soft-float routines in compiler-rt.
10 // This file does not provide any part of the compiler-rt interface, but defines
11 // many useful constants and utility routines that are used in the
12 // implementation of the soft-float routines in compiler-rt.
13 //
14 // Assumes that float, double and long double correspond to the IEEE-754
15 // binary32, binary64 and binary 128 types, respectively, and that integer
16 // endianness matches floating point endianness on the target platform.
17 //
18 //===----------------------------------------------------------------------===//
19
20 #ifndef FP_LIB_HEADER
21 #define FP_LIB_HEADER
22
23 #include "int_lib.h"
24 #include "int_math.h"
25 #include "int_types.h"
26 #include <limits.h>
27 #include <stdbool.h>
28 #include <stdint.h>
29
30 #if defined SINGLE_PRECISION
31
32 typedef uint16_t half_rep_t;
33 typedef uint32_t rep_t;
34 typedef uint64_t twice_rep_t;
35 typedef int32_t srep_t;
36 typedef float fp_t;
37 #define HALF_REP_C UINT16_C
38 #define REP_C UINT32_C
39 #define significandBits 23
40
rep_clz(rep_t a)41 static __inline int rep_clz(rep_t a) { return clzsi(a); }
42
43 // 32x32 --> 64 bit multiply
wideMultiply(rep_t a,rep_t b,rep_t * hi,rep_t * lo)44 static __inline void wideMultiply(rep_t a, rep_t b, rep_t *hi, rep_t *lo) {
45 const uint64_t product = (uint64_t)a * b;
46 *hi = (rep_t)(product >> 32);
47 *lo = (rep_t)product;
48 }
49 COMPILER_RT_ABI fp_t __addsf3(fp_t a, fp_t b);
50
51 #elif defined DOUBLE_PRECISION
52
53 typedef uint32_t half_rep_t;
54 typedef uint64_t rep_t;
55 typedef int64_t srep_t;
56 typedef double fp_t;
57 #define HALF_REP_C UINT32_C
58 #define REP_C UINT64_C
59 #define significandBits 52
60
rep_clz(rep_t a)61 static inline int rep_clz(rep_t a) { return __builtin_clzll(a); }
62
63 #define loWord(a) (a & 0xffffffffU)
64 #define hiWord(a) (a >> 32)
65
66 // 64x64 -> 128 wide multiply for platforms that don't have such an operation;
67 // many 64-bit platforms have this operation, but they tend to have hardware
68 // floating-point, so we don't bother with a special case for them here.
wideMultiply(rep_t a,rep_t b,rep_t * hi,rep_t * lo)69 static __inline void wideMultiply(rep_t a, rep_t b, rep_t *hi, rep_t *lo) {
70 // Each of the component 32x32 -> 64 products
71 const uint64_t plolo = loWord(a) * loWord(b);
72 const uint64_t plohi = loWord(a) * hiWord(b);
73 const uint64_t philo = hiWord(a) * loWord(b);
74 const uint64_t phihi = hiWord(a) * hiWord(b);
75 // Sum terms that contribute to lo in a way that allows us to get the carry
76 const uint64_t r0 = loWord(plolo);
77 const uint64_t r1 = hiWord(plolo) + loWord(plohi) + loWord(philo);
78 *lo = r0 + (r1 << 32);
79 // Sum terms contributing to hi with the carry from lo
80 *hi = hiWord(plohi) + hiWord(philo) + hiWord(r1) + phihi;
81 }
82 #undef loWord
83 #undef hiWord
84
85 COMPILER_RT_ABI fp_t __adddf3(fp_t a, fp_t b);
86
87 #elif defined QUAD_PRECISION
88 #if defined(CRT_HAS_F128) && defined(CRT_HAS_128BIT)
89 typedef uint64_t half_rep_t;
90 typedef __uint128_t rep_t;
91 typedef __int128_t srep_t;
92 typedef tf_float fp_t;
93 #define HALF_REP_C UINT64_C
94 #define REP_C (__uint128_t)
95 #if defined(CRT_HAS_IEEE_TF)
96 // Note: Since there is no explicit way to tell compiler the constant is a
97 // 128-bit integer, we let the constant be casted to 128-bit integer
98 #define significandBits 112
99 #define TF_MANT_DIG (significandBits + 1)
100
rep_clz(rep_t a)101 static __inline int rep_clz(rep_t a) {
102 const union {
103 __uint128_t ll;
104 #if _YUGA_BIG_ENDIAN
105 struct {
106 uint64_t high, low;
107 } s;
108 #else
109 struct {
110 uint64_t low, high;
111 } s;
112 #endif
113 } uu = {.ll = a};
114
115 uint64_t word;
116 uint64_t add;
117
118 if (uu.s.high) {
119 word = uu.s.high;
120 add = 0;
121 } else {
122 word = uu.s.low;
123 add = 64;
124 }
125 return __builtin_clzll(word) + add;
126 }
127
128 #define Word_LoMask UINT64_C(0x00000000ffffffff)
129 #define Word_HiMask UINT64_C(0xffffffff00000000)
130 #define Word_FullMask UINT64_C(0xffffffffffffffff)
131 #define Word_1(a) (uint64_t)((a >> 96) & Word_LoMask)
132 #define Word_2(a) (uint64_t)((a >> 64) & Word_LoMask)
133 #define Word_3(a) (uint64_t)((a >> 32) & Word_LoMask)
134 #define Word_4(a) (uint64_t)(a & Word_LoMask)
135
136 // 128x128 -> 256 wide multiply for platforms that don't have such an operation;
137 // many 64-bit platforms have this operation, but they tend to have hardware
138 // floating-point, so we don't bother with a special case for them here.
wideMultiply(rep_t a,rep_t b,rep_t * hi,rep_t * lo)139 static __inline void wideMultiply(rep_t a, rep_t b, rep_t *hi, rep_t *lo) {
140
141 const uint64_t product11 = Word_1(a) * Word_1(b);
142 const uint64_t product12 = Word_1(a) * Word_2(b);
143 const uint64_t product13 = Word_1(a) * Word_3(b);
144 const uint64_t product14 = Word_1(a) * Word_4(b);
145 const uint64_t product21 = Word_2(a) * Word_1(b);
146 const uint64_t product22 = Word_2(a) * Word_2(b);
147 const uint64_t product23 = Word_2(a) * Word_3(b);
148 const uint64_t product24 = Word_2(a) * Word_4(b);
149 const uint64_t product31 = Word_3(a) * Word_1(b);
150 const uint64_t product32 = Word_3(a) * Word_2(b);
151 const uint64_t product33 = Word_3(a) * Word_3(b);
152 const uint64_t product34 = Word_3(a) * Word_4(b);
153 const uint64_t product41 = Word_4(a) * Word_1(b);
154 const uint64_t product42 = Word_4(a) * Word_2(b);
155 const uint64_t product43 = Word_4(a) * Word_3(b);
156 const uint64_t product44 = Word_4(a) * Word_4(b);
157
158 const __uint128_t sum0 = (__uint128_t)product44;
159 const __uint128_t sum1 = (__uint128_t)product34 + (__uint128_t)product43;
160 const __uint128_t sum2 =
161 (__uint128_t)product24 + (__uint128_t)product33 + (__uint128_t)product42;
162 const __uint128_t sum3 = (__uint128_t)product14 + (__uint128_t)product23 +
163 (__uint128_t)product32 + (__uint128_t)product41;
164 const __uint128_t sum4 =
165 (__uint128_t)product13 + (__uint128_t)product22 + (__uint128_t)product31;
166 const __uint128_t sum5 = (__uint128_t)product12 + (__uint128_t)product21;
167 const __uint128_t sum6 = (__uint128_t)product11;
168
169 const __uint128_t r0 = (sum0 & Word_FullMask) + ((sum1 & Word_LoMask) << 32);
170 const __uint128_t r1 = (sum0 >> 64) + ((sum1 >> 32) & Word_FullMask) +
171 (sum2 & Word_FullMask) + ((sum3 << 32) & Word_HiMask);
172
173 *lo = r0 + (r1 << 64);
174 *hi = (r1 >> 64) + (sum1 >> 96) + (sum2 >> 64) + (sum3 >> 32) + sum4 +
175 (sum5 << 32) + (sum6 << 64);
176 }
177 #undef Word_1
178 #undef Word_2
179 #undef Word_3
180 #undef Word_4
181 #undef Word_HiMask
182 #undef Word_LoMask
183 #undef Word_FullMask
184 #endif // defined(CRT_HAS_IEEE_TF)
185 #else
186 typedef long double fp_t;
187 #endif // defined(CRT_HAS_F128) && defined(CRT_HAS_128BIT)
188 #else
189 #error SINGLE_PRECISION, DOUBLE_PRECISION or QUAD_PRECISION must be defined.
190 #endif
191
192 #if defined(SINGLE_PRECISION) || defined(DOUBLE_PRECISION) || \
193 (defined(QUAD_PRECISION) && defined(CRT_HAS_TF_MODE))
194 #define typeWidth (sizeof(rep_t) * CHAR_BIT)
195
toRep(fp_t x)196 static __inline rep_t toRep(fp_t x) {
197 const union {
198 fp_t f;
199 rep_t i;
200 } rep = {.f = x};
201 return rep.i;
202 }
203
fromRep(rep_t x)204 static __inline fp_t fromRep(rep_t x) {
205 const union {
206 fp_t f;
207 rep_t i;
208 } rep = {.i = x};
209 return rep.f;
210 }
211
212 #if !defined(QUAD_PRECISION) || defined(CRT_HAS_IEEE_TF)
213 #define exponentBits (typeWidth - significandBits - 1)
214 #define maxExponent ((1 << exponentBits) - 1)
215 #define exponentBias (maxExponent >> 1)
216
217 #define implicitBit (REP_C(1) << significandBits)
218 #define significandMask (implicitBit - 1U)
219 #define signBit (REP_C(1) << (significandBits + exponentBits))
220 #define absMask (signBit - 1U)
221 #define exponentMask (absMask ^ significandMask)
222 #define oneRep ((rep_t)exponentBias << significandBits)
223 #define infRep exponentMask
224 #define quietBit (implicitBit >> 1)
225 #define qnanRep (exponentMask | quietBit)
226
normalize(rep_t * significand)227 static __inline int normalize(rep_t *significand) {
228 const int shift = rep_clz(*significand) - rep_clz(implicitBit);
229 *significand <<= shift;
230 return 1 - shift;
231 }
232
wideLeftShift(rep_t * hi,rep_t * lo,unsigned int count)233 static __inline void wideLeftShift(rep_t *hi, rep_t *lo, unsigned int count) {
234 *hi = *hi << count | *lo >> (typeWidth - count);
235 *lo = *lo << count;
236 }
237
wideRightShiftWithSticky(rep_t * hi,rep_t * lo,unsigned int count)238 static __inline void wideRightShiftWithSticky(rep_t *hi, rep_t *lo,
239 unsigned int count) {
240 if (count < typeWidth) {
241 const bool sticky = (*lo << (typeWidth - count)) != 0;
242 *lo = *hi << (typeWidth - count) | *lo >> count | sticky;
243 *hi = *hi >> count;
244 } else if (count < 2 * typeWidth) {
245 const bool sticky = *hi << (2 * typeWidth - count) | *lo;
246 *lo = *hi >> (count - typeWidth) | sticky;
247 *hi = 0;
248 } else {
249 const bool sticky = *hi | *lo;
250 *lo = sticky;
251 *hi = 0;
252 }
253 }
254
255 // Implements logb methods (logb, logbf, logbl) for IEEE-754. This avoids
256 // pulling in a libm dependency from compiler-rt, but is not meant to replace
257 // it (i.e. code calling logb() should get the one from libm, not this), hence
258 // the __compiler_rt prefix.
__compiler_rt_logbX(fp_t x)259 static __inline fp_t __compiler_rt_logbX(fp_t x) {
260 rep_t rep = toRep(x);
261 int exp = (rep & exponentMask) >> significandBits;
262
263 // Abnormal cases:
264 // 1) +/- inf returns +inf; NaN returns NaN
265 // 2) 0.0 returns -inf
266 if (exp == maxExponent) {
267 if (((rep & signBit) == 0) || (x != x)) {
268 return x; // NaN or +inf: return x
269 } else {
270 return -x; // -inf: return -x
271 }
272 } else if (x == 0.0) {
273 // 0.0: return -inf
274 return fromRep(infRep | signBit);
275 }
276
277 if (exp != 0) {
278 // Normal number
279 return exp - exponentBias; // Unbias exponent
280 } else {
281 // Subnormal number; normalize and repeat
282 rep &= absMask;
283 const int shift = 1 - normalize(&rep);
284 exp = (rep & exponentMask) >> significandBits;
285 return exp - exponentBias - shift; // Unbias exponent
286 }
287 }
288
289 // Avoid using scalbn from libm. Unlike libc/libm scalbn, this function never
290 // sets errno on underflow/overflow.
__compiler_rt_scalbnX(fp_t x,int y)291 static __inline fp_t __compiler_rt_scalbnX(fp_t x, int y) {
292 const rep_t rep = toRep(x);
293 int exp = (rep & exponentMask) >> significandBits;
294
295 if (x == 0.0 || exp == maxExponent)
296 return x; // +/- 0.0, NaN, or inf: return x
297
298 // Normalize subnormal input.
299 rep_t sig = rep & significandMask;
300 if (exp == 0) {
301 exp += normalize(&sig);
302 sig &= ~implicitBit; // clear the implicit bit again
303 }
304
305 if (__builtin_sadd_overflow(exp, y, &exp)) {
306 // Saturate the exponent, which will guarantee an underflow/overflow below.
307 exp = (y >= 0) ? INT_MAX : INT_MIN;
308 }
309
310 // Return this value: [+/-] 1.sig * 2 ** (exp - exponentBias).
311 const rep_t sign = rep & signBit;
312 if (exp >= maxExponent) {
313 // Overflow, which could produce infinity or the largest-magnitude value,
314 // depending on the rounding mode.
315 return fromRep(sign | ((rep_t)(maxExponent - 1) << significandBits)) * 2.0f;
316 } else if (exp <= 0) {
317 // Subnormal or underflow. Use floating-point multiply to handle truncation
318 // correctly.
319 fp_t tmp = fromRep(sign | (REP_C(1) << significandBits) | sig);
320 exp += exponentBias - 1;
321 if (exp < 1)
322 exp = 1;
323 tmp *= fromRep((rep_t)exp << significandBits);
324 return tmp;
325 } else
326 return fromRep(sign | ((rep_t)exp << significandBits) | sig);
327 }
328
329 #endif // !defined(QUAD_PRECISION) || defined(CRT_HAS_IEEE_TF)
330
331 // Avoid using fmax from libm.
__compiler_rt_fmaxX(fp_t x,fp_t y)332 static __inline fp_t __compiler_rt_fmaxX(fp_t x, fp_t y) {
333 // If either argument is NaN, return the other argument. If both are NaN,
334 // arbitrarily return the second one. Otherwise, if both arguments are +/-0,
335 // arbitrarily return the first one.
336 return (crt_isnan(x) || x < y) ? y : x;
337 }
338
339 #endif
340
341 #if defined(SINGLE_PRECISION)
342
__compiler_rt_logbf(fp_t x)343 static __inline fp_t __compiler_rt_logbf(fp_t x) {
344 return __compiler_rt_logbX(x);
345 }
__compiler_rt_scalbnf(fp_t x,int y)346 static __inline fp_t __compiler_rt_scalbnf(fp_t x, int y) {
347 return __compiler_rt_scalbnX(x, y);
348 }
__compiler_rt_fmaxf(fp_t x,fp_t y)349 static __inline fp_t __compiler_rt_fmaxf(fp_t x, fp_t y) {
350 #if defined(__aarch64__)
351 // Use __builtin_fmaxf which turns into an fmaxnm instruction on AArch64.
352 return __builtin_fmaxf(x, y);
353 #else
354 // __builtin_fmaxf frequently turns into a libm call, so inline the function.
355 return __compiler_rt_fmaxX(x, y);
356 #endif
357 }
358
359 #elif defined(DOUBLE_PRECISION)
360
__compiler_rt_logb(fp_t x)361 static __inline fp_t __compiler_rt_logb(fp_t x) {
362 return __compiler_rt_logbX(x);
363 }
__compiler_rt_scalbn(fp_t x,int y)364 static __inline fp_t __compiler_rt_scalbn(fp_t x, int y) {
365 return __compiler_rt_scalbnX(x, y);
366 }
__compiler_rt_fmax(fp_t x,fp_t y)367 static __inline fp_t __compiler_rt_fmax(fp_t x, fp_t y) {
368 #if defined(__aarch64__)
369 // Use __builtin_fmax which turns into an fmaxnm instruction on AArch64.
370 return __builtin_fmax(x, y);
371 #else
372 // __builtin_fmax frequently turns into a libm call, so inline the function.
373 return __compiler_rt_fmaxX(x, y);
374 #endif
375 }
376
377 #elif defined(QUAD_PRECISION) && defined(CRT_HAS_TF_MODE)
378 // The generic implementation only works for ieee754 floating point. For other
379 // floating point types, continue to rely on the libm implementation for now.
380 #if defined(CRT_HAS_IEEE_TF)
__compiler_rt_logbtf(tf_float x)381 static __inline tf_float __compiler_rt_logbtf(tf_float x) {
382 return __compiler_rt_logbX(x);
383 }
__compiler_rt_scalbntf(tf_float x,int y)384 static __inline tf_float __compiler_rt_scalbntf(tf_float x, int y) {
385 return __compiler_rt_scalbnX(x, y);
386 }
__compiler_rt_fmaxtf(tf_float x,tf_float y)387 static __inline tf_float __compiler_rt_fmaxtf(tf_float x, tf_float y) {
388 return __compiler_rt_fmaxX(x, y);
389 }
390 #define __compiler_rt_logbl __compiler_rt_logbtf
391 #define __compiler_rt_scalbnl __compiler_rt_scalbntf
392 #define __compiler_rt_fmaxl __compiler_rt_fmaxtf
393 #define crt_fabstf crt_fabsf128
394 #define crt_copysigntf crt_copysignf128
395 #elif defined(CRT_LDBL_128BIT)
__compiler_rt_logbtf(tf_float x)396 static __inline tf_float __compiler_rt_logbtf(tf_float x) {
397 return crt_logbl(x);
398 }
__compiler_rt_scalbntf(tf_float x,int y)399 static __inline tf_float __compiler_rt_scalbntf(tf_float x, int y) {
400 return crt_scalbnl(x, y);
401 }
__compiler_rt_fmaxtf(tf_float x,tf_float y)402 static __inline tf_float __compiler_rt_fmaxtf(tf_float x, tf_float y) {
403 return crt_fmaxl(x, y);
404 }
405 #define __compiler_rt_logbl crt_logbl
406 #define __compiler_rt_scalbnl crt_scalbnl
407 #define __compiler_rt_fmaxl crt_fmaxl
408 #define crt_fabstf crt_fabsl
409 #define crt_copysigntf crt_copysignl
410 #else
411 #error Unsupported TF mode type
412 #endif
413
414 #endif // *_PRECISION
415
416 #endif // FP_LIB_HEADER
417