xref: /freebsd/contrib/arm-optimized-routines/math/aarch64/advsimd/v_expf_inline.h (revision dd21556857e8d40f66bf5ad54754d9d52669ebf7)
1 /*
2  * Helper for single-precision routines which calculate exp(ax) and do not
3  * need special-case handling
4  *
5  * Copyright (c) 2019-2024, Arm Limited.
6  * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
7  */
8 
9 #ifndef MATH_V_EXPF_INLINE_H
10 #define MATH_V_EXPF_INLINE_H
11 
12 #include "v_math.h"
13 
14 struct v_expf_data
15 {
16   float ln2_hi, ln2_lo, c0, c2;
17   float32x4_t inv_ln2, c1, c3, c4;
18   /* asuint(1.0f).  */
19   uint32x4_t exponent_bias;
20 };
21 
22 /* maxerr: 1.45358 +0.5 ulp.  */
23 #define V_EXPF_DATA                                                           \
24   {                                                                           \
25     .c0 = 0x1.0e4020p-7f, .c1 = V4 (0x1.573e2ep-5f), .c2 = 0x1.555e66p-3f,    \
26     .c3 = V4 (0x1.fffdb6p-2f), .c4 = V4 (0x1.ffffecp-1f),                     \
27     .ln2_hi = 0x1.62e4p-1f, .ln2_lo = 0x1.7f7d1cp-20f,                        \
28     .inv_ln2 = V4 (0x1.715476p+0f), .exponent_bias = V4 (0x3f800000),         \
29   }
30 
31 static inline float32x4_t
32 v_expf_inline (float32x4_t x, const struct v_expf_data *d)
33 {
34   /* Helper routine for calculating exp(ax).
35      Copied from v_expf.c, with all special-case handling removed - the
36      calling routine should handle special values if required.  */
37 
38   /* exp(ax) = 2^n (1 + poly(r)), with 1 + poly(r) in [1/sqrt(2),sqrt(2)]
39      ax = ln2*n + r, with r in [-ln2/2, ln2/2].  */
40   float32x4_t ax = vabsq_f32 (x);
41   float32x4_t ln2_c02 = vld1q_f32 (&d->ln2_hi);
42   float32x4_t n = vrndaq_f32 (vmulq_f32 (ax, d->inv_ln2));
43   float32x4_t r = vfmsq_laneq_f32 (ax, n, ln2_c02, 0);
44   r = vfmsq_laneq_f32 (r, n, ln2_c02, 1);
45   uint32x4_t e = vshlq_n_u32 (vreinterpretq_u32_s32 (vcvtq_s32_f32 (n)), 23);
46   float32x4_t scale = vreinterpretq_f32_u32 (vaddq_u32 (e, d->exponent_bias));
47 
48   /* Custom order-4 Estrin avoids building high order monomial.  */
49   float32x4_t r2 = vmulq_f32 (r, r);
50   float32x4_t p = vfmaq_laneq_f32 (d->c1, r, ln2_c02, 2);
51   float32x4_t q = vfmaq_laneq_f32 (d->c3, r, ln2_c02, 3);
52   q = vfmaq_f32 (q, p, r2);
53   p = vmulq_f32 (d->c4, r);
54   float32x4_t poly = vfmaq_f32 (p, q, r2);
55   return vfmaq_f32 (scale, poly, scale);
56 }
57 
58 #endif // MATH_V_EXPF_INLINE_H
59