xref: /freebsd/contrib/arm-optimized-routines/pl/math/math_config.h (revision 7ef62cebc2f965b0f640263e179276928885e33d)
1 /*
2  * Configuration for math routines.
3  *
4  * Copyright (c) 2017-2023, Arm Limited.
5  * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6  */
7 
8 #ifndef _MATH_CONFIG_H
9 #define _MATH_CONFIG_H
10 
11 #include <math.h>
12 #include <stdint.h>
13 
14 #ifndef WANT_ROUNDING
15 /* If defined to 1, return correct results for special cases in non-nearest
16    rounding modes (logf (1.0f) returns 0.0f with FE_DOWNWARD rather than -0.0f).
17    This may be set to 0 if there is no fenv support or if math functions only
18    get called in round to nearest mode.  */
19 # define WANT_ROUNDING 1
20 #endif
21 #ifndef WANT_ERRNO
22 /* If defined to 1, set errno in math functions according to ISO C.  Many math
23    libraries do not set errno, so this is 0 by default.  It may need to be
24    set to 1 if math.h has (math_errhandling & MATH_ERRNO) != 0.  */
25 # define WANT_ERRNO 0
26 #endif
27 #ifndef WANT_SIMD_EXCEPT
28 /* If defined to 1, trigger fp exceptions in vector routines, consistently with
29    behaviour expected from the corresponding scalar routine.  */
30 #define WANT_SIMD_EXCEPT 0
31 #endif
32 
33 /* Compiler can inline round as a single instruction.  */
34 #ifndef HAVE_FAST_ROUND
35 # if __aarch64__
36 #   define HAVE_FAST_ROUND 1
37 # else
38 #   define HAVE_FAST_ROUND 0
39 # endif
40 #endif
41 
42 /* Compiler can inline lround, but not (long)round(x).  */
43 #ifndef HAVE_FAST_LROUND
44 # if __aarch64__ && (100*__GNUC__ + __GNUC_MINOR__) >= 408 && __NO_MATH_ERRNO__
45 #   define HAVE_FAST_LROUND 1
46 # else
47 #   define HAVE_FAST_LROUND 0
48 # endif
49 #endif
50 
51 /* Compiler can inline fma as a single instruction.  */
52 #ifndef HAVE_FAST_FMA
53 # if defined FP_FAST_FMA || __aarch64__
54 #   define HAVE_FAST_FMA 1
55 # else
56 #   define HAVE_FAST_FMA 0
57 # endif
58 #endif
59 
60 /* Provide *_finite symbols and some of the glibc hidden symbols
61    so libmathlib can be used with binaries compiled against glibc
62    to interpose math functions with both static and dynamic linking.  */
63 #ifndef USE_GLIBC_ABI
64 # if __GNUC__
65 #   define USE_GLIBC_ABI 1
66 # else
67 #   define USE_GLIBC_ABI 0
68 # endif
69 #endif
70 
71 /* Optionally used extensions.  */
72 #ifdef __GNUC__
73 # define HIDDEN __attribute__ ((__visibility__ ("hidden")))
74 # define NOINLINE __attribute__ ((noinline))
75 # define UNUSED __attribute__ ((unused))
76 # define likely(x) __builtin_expect (!!(x), 1)
77 # define unlikely(x) __builtin_expect (x, 0)
78 # if __GNUC__ >= 9
79 #   define attribute_copy(f) __attribute__ ((copy (f)))
80 # else
81 #   define attribute_copy(f)
82 # endif
83 # define strong_alias(f, a) \
84   extern __typeof (f) a __attribute__ ((alias (#f))) attribute_copy (f);
85 # define hidden_alias(f, a) \
86   extern __typeof (f) a __attribute__ ((alias (#f), visibility ("hidden"))) \
87   attribute_copy (f);
88 #else
89 # define HIDDEN
90 # define NOINLINE
91 # define UNUSED
92 # define likely(x) (x)
93 # define unlikely(x) (x)
94 #endif
95 
96 #if HAVE_FAST_ROUND
97 /* When set, the roundtoint and converttoint functions are provided with
98    the semantics documented below.  */
99 # define TOINT_INTRINSICS 1
100 
101 /* Round x to nearest int in all rounding modes, ties have to be rounded
102    consistently with converttoint so the results match.  If the result
103    would be outside of [-2^31, 2^31-1] then the semantics is unspecified.  */
104 static inline double_t
105 roundtoint (double_t x)
106 {
107   return round (x);
108 }
109 
110 /* Convert x to nearest int in all rounding modes, ties have to be rounded
111    consistently with roundtoint.  If the result is not representible in an
112    int32_t then the semantics is unspecified.  */
113 static inline int32_t
114 converttoint (double_t x)
115 {
116 # if HAVE_FAST_LROUND
117   return lround (x);
118 # else
119   return (long) round (x);
120 # endif
121 }
122 #endif
123 
124 static inline uint32_t
125 asuint (float f)
126 {
127   union
128   {
129     float f;
130     uint32_t i;
131   } u = {f};
132   return u.i;
133 }
134 
135 static inline float
136 asfloat (uint32_t i)
137 {
138   union
139   {
140     uint32_t i;
141     float f;
142   } u = {i};
143   return u.f;
144 }
145 
146 static inline uint64_t
147 asuint64 (double f)
148 {
149   union
150   {
151     double f;
152     uint64_t i;
153   } u = {f};
154   return u.i;
155 }
156 
157 static inline double
158 asdouble (uint64_t i)
159 {
160   union
161   {
162     uint64_t i;
163     double f;
164   } u = {i};
165   return u.f;
166 }
167 
168 #ifndef IEEE_754_2008_SNAN
169 # define IEEE_754_2008_SNAN 1
170 #endif
171 static inline int
172 issignalingf_inline (float x)
173 {
174   uint32_t ix = asuint (x);
175   if (!IEEE_754_2008_SNAN)
176     return (ix & 0x7fc00000) == 0x7fc00000;
177   return 2 * (ix ^ 0x00400000) > 2u * 0x7fc00000;
178 }
179 
180 static inline int
181 issignaling_inline (double x)
182 {
183   uint64_t ix = asuint64 (x);
184   if (!IEEE_754_2008_SNAN)
185     return (ix & 0x7ff8000000000000) == 0x7ff8000000000000;
186   return 2 * (ix ^ 0x0008000000000000) > 2 * 0x7ff8000000000000ULL;
187 }
188 
189 #if __aarch64__ && __GNUC__
190 /* Prevent the optimization of a floating-point expression.  */
191 static inline float
192 opt_barrier_float (float x)
193 {
194   __asm__ __volatile__ ("" : "+w" (x));
195   return x;
196 }
197 static inline double
198 opt_barrier_double (double x)
199 {
200   __asm__ __volatile__ ("" : "+w" (x));
201   return x;
202 }
203 /* Force the evaluation of a floating-point expression for its side-effect.  */
204 static inline void
205 force_eval_float (float x)
206 {
207   __asm__ __volatile__ ("" : "+w" (x));
208 }
209 static inline void
210 force_eval_double (double x)
211 {
212   __asm__ __volatile__ ("" : "+w" (x));
213 }
214 #else
215 static inline float
216 opt_barrier_float (float x)
217 {
218   volatile float y = x;
219   return y;
220 }
221 static inline double
222 opt_barrier_double (double x)
223 {
224   volatile double y = x;
225   return y;
226 }
227 static inline void
228 force_eval_float (float x)
229 {
230   volatile float y UNUSED = x;
231 }
232 static inline void
233 force_eval_double (double x)
234 {
235   volatile double y UNUSED = x;
236 }
237 #endif
238 
239 /* Evaluate an expression as the specified type, normally a type
240    cast should be enough, but compilers implement non-standard
241    excess-precision handling, so when FLT_EVAL_METHOD != 0 then
242    these functions may need to be customized.  */
243 static inline float
244 eval_as_float (float x)
245 {
246   return x;
247 }
248 static inline double
249 eval_as_double (double x)
250 {
251   return x;
252 }
253 
254 /* Error handling tail calls for special cases, with a sign argument.
255    The sign of the return value is set if the argument is non-zero.  */
256 
257 /* The result overflows.  */
258 HIDDEN float __math_oflowf (uint32_t);
259 /* The result underflows to 0 in nearest rounding mode.  */
260 HIDDEN float __math_uflowf (uint32_t);
261 /* The result underflows to 0 in some directed rounding mode only.  */
262 HIDDEN float __math_may_uflowf (uint32_t);
263 /* Division by zero.  */
264 HIDDEN float __math_divzerof (uint32_t);
265 /* The result overflows.  */
266 HIDDEN double __math_oflow (uint32_t);
267 /* The result underflows to 0 in nearest rounding mode.  */
268 HIDDEN double __math_uflow (uint32_t);
269 /* The result underflows to 0 in some directed rounding mode only.  */
270 HIDDEN double __math_may_uflow (uint32_t);
271 /* Division by zero.  */
272 HIDDEN double __math_divzero (uint32_t);
273 
274 /* Error handling using input checking.  */
275 
276 /* Invalid input unless it is a quiet NaN.  */
277 HIDDEN float __math_invalidf (float);
278 /* Invalid input unless it is a quiet NaN.  */
279 HIDDEN double __math_invalid (double);
280 
281 /* Error handling using output checking, only for errno setting.  */
282 
283 /* Check if the result overflowed to infinity.  */
284 HIDDEN double __math_check_oflow (double);
285 /* Check if the result underflowed to 0.  */
286 HIDDEN double __math_check_uflow (double);
287 
288 /* Check if the result overflowed to infinity.  */
289 static inline double
290 check_oflow (double x)
291 {
292   return WANT_ERRNO ? __math_check_oflow (x) : x;
293 }
294 
295 /* Check if the result underflowed to 0.  */
296 static inline double
297 check_uflow (double x)
298 {
299   return WANT_ERRNO ? __math_check_uflow (x) : x;
300 }
301 
302 /* Check if the result overflowed to infinity.  */
303 HIDDEN float __math_check_oflowf (float);
304 /* Check if the result underflowed to 0.  */
305 HIDDEN float __math_check_uflowf (float);
306 
307 /* Check if the result overflowed to infinity.  */
308 static inline float
309 check_oflowf (float x)
310 {
311   return WANT_ERRNO ? __math_check_oflowf (x) : x;
312 }
313 
314 /* Check if the result underflowed to 0.  */
315 static inline float
316 check_uflowf (float x)
317 {
318   return WANT_ERRNO ? __math_check_uflowf (x) : x;
319 }
320 
321 extern const struct erff_data
322 {
323   float erff_poly_A[6];
324   float erff_poly_B[7];
325 } __erff_data HIDDEN;
326 
327 /* Data for logf and log10f.  */
328 #define LOGF_TABLE_BITS 4
329 #define LOGF_POLY_ORDER 4
330 extern const struct logf_data
331 {
332   struct
333   {
334     double invc, logc;
335   } tab[1 << LOGF_TABLE_BITS];
336   double ln2;
337   double invln10;
338   double poly[LOGF_POLY_ORDER - 1]; /* First order coefficient is 1.  */
339 } __logf_data HIDDEN;
340 
341 /* Data for low accuracy log10 (with 1/ln(10) included in coefficients).  */
342 #define LOG10_TABLE_BITS 7
343 #define LOG10_POLY_ORDER 6
344 #define LOG10_POLY1_ORDER 12
345 extern const struct log10_data
346 {
347   double ln2hi;
348   double ln2lo;
349   double invln10;
350   double poly[LOG10_POLY_ORDER - 1]; /* First coefficient is 1/log(10).  */
351   double poly1[LOG10_POLY1_ORDER - 1];
352   struct {double invc, logc;} tab[1 << LOG10_TABLE_BITS];
353 #if !HAVE_FAST_FMA
354   struct {double chi, clo;} tab2[1 << LOG10_TABLE_BITS];
355 #endif
356 } __log10_data HIDDEN;
357 
358 #define EXP_TABLE_BITS 7
359 #define EXP_POLY_ORDER 5
360 /* Use polynomial that is optimized for a wider input range.  This may be
361    needed for good precision in non-nearest rounding and !TOINT_INTRINSICS.  */
362 #define EXP_POLY_WIDE 0
363 /* Use close to nearest rounding toint when !TOINT_INTRINSICS.  This may be
364    needed for good precision in non-nearest rouning and !EXP_POLY_WIDE.  */
365 #define EXP_USE_TOINT_NARROW 0
366 #define EXP2_POLY_ORDER 5
367 #define EXP2_POLY_WIDE 0
368 extern const struct exp_data
369 {
370   double invln2N;
371   double shift;
372   double negln2hiN;
373   double negln2loN;
374   double poly[4]; /* Last four coefficients.  */
375   double exp2_shift;
376   double exp2_poly[EXP2_POLY_ORDER];
377   uint64_t tab[2*(1 << EXP_TABLE_BITS)];
378 } __exp_data HIDDEN;
379 
380 #define ERFC_NUM_INTERVALS 20
381 #define ERFC_POLY_ORDER 12
382 extern const struct erfc_data
383 {
384   double interval_bounds[ERFC_NUM_INTERVALS + 1];
385   double poly[ERFC_NUM_INTERVALS][ERFC_POLY_ORDER + 1];
386 } __erfc_data HIDDEN;
387 extern const struct v_erfc_data
388 {
389   double interval_bounds[ERFC_NUM_INTERVALS + 1];
390   double poly[ERFC_NUM_INTERVALS + 1][ERFC_POLY_ORDER + 1];
391 }  __v_erfc_data HIDDEN;
392 
393 #define ERFCF_POLY_NCOEFFS 16
394 extern const struct erfcf_poly_data
395 {
396   double poly[4][ERFCF_POLY_NCOEFFS];
397 } __erfcf_poly_data HIDDEN;
398 
399 #define V_EXP_TAIL_TABLE_BITS 8
400 extern const uint64_t __v_exp_tail_data[1 << V_EXP_TAIL_TABLE_BITS] HIDDEN;
401 
402 #define V_ERF_NINTS 49
403 #define V_ERF_NCOEFFS 10
404 extern const struct v_erf_data
405 {
406   double shifts[V_ERF_NINTS];
407   double coeffs[V_ERF_NCOEFFS][V_ERF_NINTS];
408 } __v_erf_data HIDDEN;
409 
410 #define V_ERFF_NCOEFFS 7
411 extern const struct v_erff_data
412 {
413   float coeffs[V_ERFF_NCOEFFS][2];
414 } __v_erff_data HIDDEN;
415 
416 #define ATAN_POLY_NCOEFFS 20
417 extern const struct atan_poly_data
418 {
419   double poly[ATAN_POLY_NCOEFFS];
420 } __atan_poly_data HIDDEN;
421 
422 #define ATANF_POLY_NCOEFFS 8
423 extern const struct atanf_poly_data
424 {
425   float poly[ATANF_POLY_NCOEFFS];
426 } __atanf_poly_data HIDDEN;
427 
428 #define ASINHF_NCOEFFS 8
429 extern const struct asinhf_data
430 {
431   float coeffs[ASINHF_NCOEFFS];
432 } __asinhf_data HIDDEN;
433 
434 #define LOG_TABLE_BITS 7
435 #define LOG_POLY_ORDER 6
436 #define LOG_POLY1_ORDER 12
437 extern const struct log_data
438 {
439   double ln2hi;
440   double ln2lo;
441   double poly[LOG_POLY_ORDER - 1]; /* First coefficient is 1.  */
442   double poly1[LOG_POLY1_ORDER - 1];
443   struct
444   {
445     double invc, logc;
446   } tab[1 << LOG_TABLE_BITS];
447 #if !HAVE_FAST_FMA
448   struct
449   {
450     double chi, clo;
451   } tab2[1 << LOG_TABLE_BITS];
452 #endif
453 } __log_data HIDDEN;
454 
455 #define ASINH_NCOEFFS 18
456 extern const struct asinh_data
457 {
458   double poly[ASINH_NCOEFFS];
459 } __asinh_data HIDDEN;
460 
461 #define LOG1P_NCOEFFS 19
462 extern const struct log1p_data
463 {
464   double coeffs[LOG1P_NCOEFFS];
465 } __log1p_data HIDDEN;
466 
467 #define LOG1PF_2U5
468 #define V_LOG1PF_2U5
469 #define LOG1PF_NCOEFFS 9
470 extern const struct log1pf_data
471 {
472   float coeffs[LOG1PF_NCOEFFS];
473 } __log1pf_data HIDDEN;
474 
475 #define TANF_P_POLY_NCOEFFS 6
476 /* cotan approach needs order 3 on [0, pi/4] to reach <3.5ulps.  */
477 #define TANF_Q_POLY_NCOEFFS 4
478 extern const struct tanf_poly_data
479 {
480   float poly_tan[TANF_P_POLY_NCOEFFS];
481   float poly_cotan[TANF_Q_POLY_NCOEFFS];
482 } __tanf_poly_data HIDDEN;
483 
484 #define V_LOG2F_POLY_NCOEFFS 9
485 extern const struct v_log2f_data
486 {
487   float poly[V_LOG2F_POLY_NCOEFFS];
488 } __v_log2f_data HIDDEN;
489 
490 #define V_LOG2_TABLE_BITS 7
491 #define V_LOG2_POLY_ORDER 6
492 extern const struct v_log2_data
493 {
494   double poly[V_LOG2_POLY_ORDER - 1];
495   struct
496   {
497     double invc, log2c;
498   } tab[1 << V_LOG2_TABLE_BITS];
499 } __v_log2_data HIDDEN;
500 
501 #define V_SINF_NCOEFFS 4
502 extern const struct sv_sinf_data
503 {
504   float coeffs[V_SINF_NCOEFFS];
505 } __sv_sinf_data HIDDEN;
506 
507 #define V_LOG10_TABLE_BITS 7
508 #define V_LOG10_POLY_ORDER 6
509 extern const struct v_log10_data
510 {
511   struct
512   {
513     double invc, log10c;
514   } tab[1 << V_LOG10_TABLE_BITS];
515   double poly[V_LOG10_POLY_ORDER - 1];
516   double invln10, log10_2;
517 } __v_log10_data HIDDEN;
518 
519 #define V_LOG10F_POLY_ORDER 9
520 extern const float __v_log10f_poly[V_LOG10F_POLY_ORDER - 1] HIDDEN;
521 
522 #define SV_LOGF_POLY_ORDER 8
523 extern const float __sv_logf_poly[SV_LOGF_POLY_ORDER - 1] HIDDEN;
524 
525 #define SV_LOG_POLY_ORDER 6
526 #define SV_LOG_TABLE_BITS 7
527 extern const struct sv_log_data
528 {
529   double invc[1 << SV_LOG_TABLE_BITS];
530   double logc[1 << SV_LOG_TABLE_BITS];
531   double poly[SV_LOG_POLY_ORDER - 1];
532 } __sv_log_data HIDDEN;
533 
534 #ifndef SV_EXPF_USE_FEXPA
535 #define SV_EXPF_USE_FEXPA 0
536 #endif
537 #define SV_EXPF_POLY_ORDER 6
538 extern const float __sv_expf_poly[SV_EXPF_POLY_ORDER - 1] HIDDEN;
539 
540 #define EXPM1F_POLY_ORDER 5
541 extern const float __expm1f_poly[EXPM1F_POLY_ORDER] HIDDEN;
542 
543 #define EXPF_TABLE_BITS 5
544 #define EXPF_POLY_ORDER 3
545 extern const struct expf_data
546 {
547   uint64_t tab[1 << EXPF_TABLE_BITS];
548   double invln2_scaled;
549   double poly_scaled[EXPF_POLY_ORDER];
550 } __expf_data HIDDEN;
551 
552 #define EXPM1_POLY_ORDER 11
553 extern const double __expm1_poly[EXPM1_POLY_ORDER] HIDDEN;
554 
555 extern const struct cbrtf_data
556 {
557   float poly[4];
558   float table[5];
559 } __cbrtf_data HIDDEN;
560 
561 extern const struct cbrt_data
562 {
563   double poly[4];
564   double table[5];
565 } __cbrt_data HIDDEN;
566 
567 extern const struct v_tan_data
568 {
569   double neg_half_pi_hi, neg_half_pi_lo;
570   double poly[9];
571 } __v_tan_data HIDDEN;
572 #endif
573