xref: /freebsd/contrib/arm-optimized-routines/math/aarch64/v_exp.c (revision cb14a3fe5122c879eae1fb480ed7ce82a699ddb6)
1 /*
2  * Double-precision vector e^x function.
3  *
4  * Copyright (c) 2019-2023, Arm Limited.
5  * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6  */
7 
8 #include "mathlib.h"
9 #include "v_math.h"
10 
11 #define N (1 << V_EXP_TABLE_BITS)
12 #define IndexMask (N - 1)
13 
14 const static volatile struct
15 {
16   float64x2_t poly[3];
17   float64x2_t inv_ln2, ln2_hi, ln2_lo, shift;
18 #if !WANT_SIMD_EXCEPT
19   float64x2_t special_bound, scale_thresh;
20 #endif
21 } data = {
22   /* maxerr: 1.88 +0.5 ulp
23      rel error: 1.4337*2^-53
24      abs error: 1.4299*2^-53 in [ -ln2/256, ln2/256 ].  */
25   .poly = { V2 (0x1.ffffffffffd43p-2), V2 (0x1.55555c75adbb2p-3),
26 	    V2 (0x1.55555da646206p-5) },
27 #if !WANT_SIMD_EXCEPT
28   .scale_thresh = V2 (163840.0), /* 1280.0 * N.  */
29   .special_bound = V2 (704.0),
30 #endif
31   .inv_ln2 = V2 (0x1.71547652b82fep7), /* N/ln2.  */
32   .ln2_hi = V2 (0x1.62e42fefa39efp-8), /* ln2/N.  */
33   .ln2_lo = V2 (0x1.abc9e3b39803f3p-63),
34   .shift = V2 (0x1.8p+52)
35 };
36 
37 #define C(i) data.poly[i]
38 #define Tab __v_exp_data
39 
40 #if WANT_SIMD_EXCEPT
41 
42 # define TinyBound v_u64 (0x2000000000000000) /* asuint64 (0x1p-511).  */
43 # define BigBound v_u64 (0x4080000000000000) /* asuint64 (0x1p9).  */
44 # define SpecialBound v_u64 (0x2080000000000000) /* BigBound - TinyBound.  */
45 
46 static float64x2_t VPCS_ATTR NOINLINE
47 special_case (float64x2_t x, float64x2_t y, uint64x2_t cmp)
48 {
49   /* If fenv exceptions are to be triggered correctly, fall back to the scalar
50      routine to special lanes.  */
51   return v_call_f64 (exp, x, y, cmp);
52 }
53 
54 #else
55 
56 # define SpecialOffset v_u64 (0x6000000000000000) /* 0x1p513.  */
57 /* SpecialBias1 + SpecialBias1 = asuint(1.0).  */
58 # define SpecialBias1 v_u64 (0x7000000000000000) /* 0x1p769.  */
59 # define SpecialBias2 v_u64 (0x3010000000000000) /* 0x1p-254.  */
60 
61 static inline float64x2_t VPCS_ATTR
62 special_case (float64x2_t s, float64x2_t y, float64x2_t n)
63 {
64   /* 2^(n/N) may overflow, break it up into s1*s2.  */
65   uint64x2_t b = vandq_u64 (vcltzq_f64 (n), SpecialOffset);
66   float64x2_t s1 = vreinterpretq_f64_u64 (vsubq_u64 (SpecialBias1, b));
67   float64x2_t s2 = vreinterpretq_f64_u64 (
68       vaddq_u64 (vsubq_u64 (vreinterpretq_u64_f64 (s), SpecialBias2), b));
69   uint64x2_t cmp = vcagtq_f64 (n, data.scale_thresh);
70   float64x2_t r1 = vmulq_f64 (s1, s1);
71   float64x2_t r0 = vmulq_f64 (vfmaq_f64 (s2, y, s2), s1);
72   return vbslq_f64 (cmp, r1, r0);
73 }
74 
75 #endif
76 
77 float64x2_t VPCS_ATTR V_NAME_D1 (exp) (float64x2_t x)
78 {
79   float64x2_t n, r, r2, s, y, z;
80   uint64x2_t cmp, u, e;
81 
82 #if WANT_SIMD_EXCEPT
83   /* If any lanes are special, mask them with 1 and retain a copy of x to allow
84      special_case to fix special lanes later. This is only necessary if fenv
85      exceptions are to be triggered correctly.  */
86   float64x2_t xm = x;
87   uint64x2_t iax = vreinterpretq_u64_f64 (vabsq_f64 (x));
88   cmp = vcgeq_u64 (vsubq_u64 (iax, TinyBound), SpecialBound);
89   if (unlikely (v_any_u64 (cmp)))
90     x = vbslq_f64 (cmp, v_f64 (1), x);
91 #else
92   cmp = vcagtq_f64 (x, data.special_bound);
93 #endif
94 
95   /* n = round(x/(ln2/N)).  */
96   z = vfmaq_f64 (data.shift, x, data.inv_ln2);
97   u = vreinterpretq_u64_f64 (z);
98   n = vsubq_f64 (z, data.shift);
99 
100   /* r = x - n*ln2/N.  */
101   r = x;
102   r = vfmsq_f64 (r, data.ln2_hi, n);
103   r = vfmsq_f64 (r, data.ln2_lo, n);
104 
105   e = vshlq_n_u64 (u, 52 - V_EXP_TABLE_BITS);
106 
107   /* y = exp(r) - 1 ~= r + C0 r^2 + C1 r^3 + C2 r^4.  */
108   r2 = vmulq_f64 (r, r);
109   y = vfmaq_f64 (C (0), C (1), r);
110   y = vfmaq_f64 (y, C (2), r2);
111   y = vfmaq_f64 (r, y, r2);
112 
113   /* s = 2^(n/N).  */
114   u = (uint64x2_t){ Tab[u[0] & IndexMask], Tab[u[1] & IndexMask] };
115   s = vreinterpretq_f64_u64 (vaddq_u64 (u, e));
116 
117   if (unlikely (v_any_u64 (cmp)))
118 #if WANT_SIMD_EXCEPT
119     return special_case (xm, vfmaq_f64 (s, y, s), cmp);
120 #else
121     return special_case (s, y, n);
122 #endif
123 
124   return vfmaq_f64 (s, y, s);
125 }
126