xref: /freebsd/contrib/arm-optimized-routines/pl/math/v_pow_1u5.c (revision 96190b4fef3b4a0cc3ca0606b0c4e3e69a5e6717)
1 /*
2  * Double-precision vector pow function.
3  *
4  * Copyright (c) 2020-2023, Arm Limited.
5  * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6  */
7 
8 #include "v_math.h"
9 #include "pl_sig.h"
10 #include "pl_test.h"
11 
12 /* Defines parameters of the approximation and scalar fallback.  */
13 #include "finite_pow.h"
14 
15 #define VecSmallExp v_u64 (SmallExp)
16 #define VecThresExp v_u64 (ThresExp)
17 
18 #define VecSmallPowX v_u64 (SmallPowX)
19 #define VecThresPowX v_u64 (ThresPowX)
20 #define VecSmallPowY v_u64 (SmallPowY)
21 #define VecThresPowY v_u64 (ThresPowY)
22 
23 static const struct data
24 {
25   float64x2_t log_poly[7];
26   float64x2_t exp_poly[3];
27   float64x2_t ln2_hi, ln2_lo;
28   float64x2_t shift, inv_ln2_n, ln2_hi_n, ln2_lo_n;
29 } data = {
30   /* Coefficients copied from v_pow_log_data.c
31      relative error: 0x1.11922ap-70 in [-0x1.6bp-8, 0x1.6bp-8]
32      Coefficients are scaled to match the scaling during evaluation.  */
33   .log_poly = { V2 (-0x1p-1), V2 (0x1.555555555556p-2 * -2),
34 		V2 (-0x1.0000000000006p-2 * -2), V2 (0x1.999999959554ep-3 * 4),
35 		V2 (-0x1.555555529a47ap-3 * 4), V2 (0x1.2495b9b4845e9p-3 * -8),
36 		V2 (-0x1.0002b8b263fc3p-3 * -8) },
37   .ln2_hi = V2 (0x1.62e42fefa3800p-1),
38   .ln2_lo = V2 (0x1.ef35793c76730p-45),
39   /* Polynomial coefficients: abs error: 1.43*2^-58, ulp error: 0.549
40      (0.550 without fma) if |x| < ln2/512.  */
41   .exp_poly = { V2 (0x1.fffffffffffd4p-2), V2 (0x1.5555571d6ef9p-3),
42 		V2 (0x1.5555576a5adcep-5) },
43   .shift = V2 (0x1.8p52), /* round to nearest int. without intrinsics.  */
44   .inv_ln2_n = V2 (0x1.71547652b82fep8), /* N/ln2.  */
45   .ln2_hi_n = V2 (0x1.62e42fefc0000p-9), /* ln2/N.  */
46   .ln2_lo_n = V2 (-0x1.c610ca86c3899p-45),
47 };
48 
49 #define A(i) data.log_poly[i]
50 #define C(i) data.exp_poly[i]
51 
52 /* This version implements an algorithm close to AOR scalar pow but
53    - does not implement the trick in the exp's specialcase subroutine to avoid
54      double-rounding,
55    - does not use a tail in the exponential core computation,
56    - and pow's exp polynomial order and table bits might differ.
57 
58    Maximum measured error is 1.04 ULPs:
59    _ZGVnN2vv_pow(0x1.024a3e56b3c3p-136, 0x1.87910248b58acp-13)
60      got 0x1.f71162f473251p-1
61     want 0x1.f71162f473252p-1.  */
62 
63 static inline float64x2_t
64 v_masked_lookup_f64 (const double *table, uint64x2_t i)
65 {
66   return (float64x2_t){
67     table[(i[0] >> (52 - V_POW_LOG_TABLE_BITS)) & (N_LOG - 1)],
68     table[(i[1] >> (52 - V_POW_LOG_TABLE_BITS)) & (N_LOG - 1)]
69   };
70 }
71 
72 /* Compute y+TAIL = log(x) where the rounded result is y and TAIL has about
73    additional 15 bits precision.  IX is the bit representation of x, but
74    normalized in the subnormal range using the sign bit for the exponent.  */
75 static inline float64x2_t
76 v_log_inline (uint64x2_t ix, float64x2_t *tail, const struct data *d)
77 {
78   /* x = 2^k z; where z is in range [OFF,2*OFF) and exact.
79      The range is split into N subintervals.
80      The ith subinterval contains z and c is near its center.  */
81   uint64x2_t tmp = vsubq_u64 (ix, v_u64 (Off));
82   int64x2_t k
83       = vshrq_n_s64 (vreinterpretq_s64_u64 (tmp), 52); /* arithmetic shift.  */
84   uint64x2_t iz = vsubq_u64 (ix, vandq_u64 (tmp, v_u64 (0xfffULL << 52)));
85   float64x2_t z = vreinterpretq_f64_u64 (iz);
86   float64x2_t kd = vcvtq_f64_s64 (k);
87   /* log(x) = k*Ln2 + log(c) + log1p(z/c-1).  */
88   float64x2_t invc = v_masked_lookup_f64 (__v_pow_log_data.invc, tmp);
89   float64x2_t logc = v_masked_lookup_f64 (__v_pow_log_data.logc, tmp);
90   float64x2_t logctail = v_masked_lookup_f64 (__v_pow_log_data.logctail, tmp);
91   /* Note: 1/c is j/N or j/N/2 where j is an integer in [N,2N) and
92      |z/c - 1| < 1/N, so r = z/c - 1 is exactly representible.  */
93   float64x2_t r = vfmaq_f64 (v_f64 (-1.0), z, invc);
94   /* k*Ln2 + log(c) + r.  */
95   float64x2_t t1 = vfmaq_f64 (logc, kd, d->ln2_hi);
96   float64x2_t t2 = vaddq_f64 (t1, r);
97   float64x2_t lo1 = vfmaq_f64 (logctail, kd, d->ln2_lo);
98   float64x2_t lo2 = vaddq_f64 (vsubq_f64 (t1, t2), r);
99   /* Evaluation is optimized assuming superscalar pipelined execution.  */
100   float64x2_t ar = vmulq_f64 (A (0), r);
101   float64x2_t ar2 = vmulq_f64 (r, ar);
102   float64x2_t ar3 = vmulq_f64 (r, ar2);
103   /* k*Ln2 + log(c) + r + A[0]*r*r.  */
104   float64x2_t hi = vaddq_f64 (t2, ar2);
105   float64x2_t lo3 = vfmaq_f64 (vnegq_f64 (ar2), ar, r);
106   float64x2_t lo4 = vaddq_f64 (vsubq_f64 (t2, hi), ar2);
107   /* p = log1p(r) - r - A[0]*r*r.  */
108   float64x2_t a56 = vfmaq_f64 (A (5), r, A (6));
109   float64x2_t a34 = vfmaq_f64 (A (3), r, A (4));
110   float64x2_t a12 = vfmaq_f64 (A (1), r, A (2));
111   float64x2_t p = vfmaq_f64 (a34, ar2, a56);
112   p = vfmaq_f64 (a12, ar2, p);
113   p = vmulq_f64 (ar3, p);
114   float64x2_t lo
115       = vaddq_f64 (vaddq_f64 (vaddq_f64 (vaddq_f64 (lo1, lo2), lo3), lo4), p);
116   float64x2_t y = vaddq_f64 (hi, lo);
117   *tail = vaddq_f64 (vsubq_f64 (hi, y), lo);
118   return y;
119 }
120 
121 /* Computes sign*exp(x+xtail) where |xtail| < 2^-8/N and |xtail| <= |x|.  */
122 static inline float64x2_t
123 v_exp_inline (float64x2_t x, float64x2_t xtail, const struct data *d)
124 {
125   /* Fallback to scalar exp_inline for all lanes if any lane
126      contains value of x s.t. |x| <= 2^-54 or >= 512.  */
127   uint64x2_t abstop
128       = vandq_u64 (vshrq_n_u64 (vreinterpretq_u64_f64 (x), 52), v_u64 (0x7ff));
129   uint64x2_t uoflowx
130       = vcgeq_u64 (vsubq_u64 (abstop, VecSmallExp), VecThresExp);
131   if (unlikely (v_any_u64 (uoflowx)))
132     return v_call2_f64 (exp_nosignbias, x, xtail, x, v_u64 (-1));
133   /* exp(x) = 2^(k/N) * exp(r), with exp(r) in [2^(-1/2N),2^(1/2N)].  */
134   /* x = ln2/N*k + r, with k integer and r in [-ln2/2N, ln2/2N].  */
135   float64x2_t z = vmulq_f64 (d->inv_ln2_n, x);
136   /* z - kd is in [-1, 1] in non-nearest rounding modes.  */
137   float64x2_t kd = vaddq_f64 (z, d->shift);
138   uint64x2_t ki = vreinterpretq_u64_f64 (kd);
139   kd = vsubq_f64 (kd, d->shift);
140   float64x2_t r = vfmsq_f64 (x, kd, d->ln2_hi_n);
141   r = vfmsq_f64 (r, kd, d->ln2_lo_n);
142   /* The code assumes 2^-200 < |xtail| < 2^-8/N.  */
143   r = vaddq_f64 (r, xtail);
144   /* 2^(k/N) ~= scale.  */
145   uint64x2_t idx = vandq_u64 (ki, v_u64 (N_EXP - 1));
146   uint64x2_t top = vshlq_n_u64 (ki, 52 - V_POW_EXP_TABLE_BITS);
147   /* This is only a valid scale when -1023*N < k < 1024*N.  */
148   uint64x2_t sbits = v_lookup_u64 (SBits, idx);
149   sbits = vaddq_u64 (sbits, top);
150   /* exp(x) = 2^(k/N) * exp(r) ~= scale + scale * (exp(r) - 1).  */
151   float64x2_t r2 = vmulq_f64 (r, r);
152   float64x2_t tmp = vfmaq_f64 (C (1), r, C (2));
153   tmp = vfmaq_f64 (C (0), r, tmp);
154   tmp = vfmaq_f64 (r, r2, tmp);
155   float64x2_t scale = vreinterpretq_f64_u64 (sbits);
156   /* Note: tmp == 0 or |tmp| > 2^-200 and scale > 2^-739, so there
157      is no spurious underflow here even without fma.  */
158   return vfmaq_f64 (scale, scale, tmp);
159 }
160 
161 float64x2_t VPCS_ATTR V_NAME_D2 (pow) (float64x2_t x, float64x2_t y)
162 {
163   const struct data *d = ptr_barrier (&data);
164   /* Case of x <= 0 is too complicated to be vectorised efficiently here,
165      fallback to scalar pow for all lanes if any x < 0 detected.  */
166   if (v_any_u64 (vclezq_s64 (vreinterpretq_s64_f64 (x))))
167     return v_call2_f64 (__pl_finite_pow, x, y, x, v_u64 (-1));
168 
169   uint64x2_t vix = vreinterpretq_u64_f64 (x);
170   uint64x2_t viy = vreinterpretq_u64_f64 (y);
171   uint64x2_t vtopx = vshrq_n_u64 (vix, 52);
172   uint64x2_t vtopy = vshrq_n_u64 (viy, 52);
173   uint64x2_t vabstopx = vandq_u64 (vtopx, v_u64 (0x7ff));
174   uint64x2_t vabstopy = vandq_u64 (vtopy, v_u64 (0x7ff));
175 
176   /* Special cases of x or y.  */
177 #if WANT_SIMD_EXCEPT
178   /* Small or large.  */
179   uint64x2_t specialx
180       = vcgeq_u64 (vsubq_u64 (vtopx, VecSmallPowX), VecThresPowX);
181   uint64x2_t specialy
182       = vcgeq_u64 (vsubq_u64 (vabstopy, VecSmallPowY), VecThresPowY);
183 #else
184   /* Inf or nan.  */
185   uint64x2_t specialx = vcgeq_u64 (vabstopx, v_u64 (0x7ff));
186   uint64x2_t specialy = vcgeq_u64 (vabstopy, v_u64 (0x7ff));
187   /* The case y==0 does not trigger a special case, since in this case it is
188      necessary to fix the result only if x is a signalling nan, which already
189      triggers a special case. We test y==0 directly in the scalar fallback.  */
190 #endif
191   uint64x2_t special = vorrq_u64 (specialx, specialy);
192   /* Fallback to scalar on all lanes if any lane is inf or nan.  */
193   if (unlikely (v_any_u64 (special)))
194     return v_call2_f64 (__pl_finite_pow, x, y, x, v_u64 (-1));
195 
196   /* Small cases of x: |x| < 0x1p-126.  */
197   uint64x2_t smallx = vcltq_u64 (vabstopx, VecSmallPowX);
198   if (unlikely (v_any_u64 (smallx)))
199     {
200       /* Update ix if top 12 bits of x are 0.  */
201       uint64x2_t sub_x = vceqzq_u64 (vtopx);
202       if (unlikely (v_any_u64 (sub_x)))
203 	{
204 	  /* Normalize subnormal x so exponent becomes negative.  */
205 	  uint64x2_t vix_norm
206 	      = vreinterpretq_u64_f64 (vmulq_f64 (x, v_f64 (0x1p52)));
207 	  vix_norm = vandq_u64 (vix_norm, v_u64 (0x7fffffffffffffff));
208 	  vix_norm = vsubq_u64 (vix_norm, v_u64 (52ULL << 52));
209 	  vix = vbslq_u64 (sub_x, vix_norm, vix);
210 	}
211     }
212 
213   /* Vector Log(ix, &lo).  */
214   float64x2_t vlo;
215   float64x2_t vhi = v_log_inline (vix, &vlo, d);
216 
217   /* Vector Exp(y_loghi, y_loglo).  */
218   float64x2_t vehi = vmulq_f64 (y, vhi);
219   float64x2_t velo = vmulq_f64 (y, vlo);
220   float64x2_t vemi = vfmsq_f64 (vehi, y, vhi);
221   velo = vsubq_f64 (velo, vemi);
222   return v_exp_inline (vehi, velo, d);
223 }
224 
225 PL_SIG (V, D, 2, pow)
226 PL_TEST_ULP (V_NAME_D2 (pow), 0.55)
227 PL_TEST_EXPECT_FENV (V_NAME_D2 (pow), WANT_SIMD_EXCEPT)
228 /* Wide intervals spanning the whole domain but shared between x and y.  */
229 #define V_POW_INTERVAL2(xlo, xhi, ylo, yhi, n)                                 \
230   PL_TEST_INTERVAL2 (V_NAME_D2 (pow), xlo, xhi, ylo, yhi, n)                   \
231   PL_TEST_INTERVAL2 (V_NAME_D2 (pow), xlo, xhi, -ylo, -yhi, n)                 \
232   PL_TEST_INTERVAL2 (V_NAME_D2 (pow), -xlo, -xhi, ylo, yhi, n)                 \
233   PL_TEST_INTERVAL2 (V_NAME_D2 (pow), -xlo, -xhi, -ylo, -yhi, n)
234 #define EXPAND(str) str##000000000
235 #define SHL52(str) EXPAND (str)
236 V_POW_INTERVAL2 (0, SHL52 (SmallPowX), 0, inf, 40000)
237 V_POW_INTERVAL2 (SHL52 (SmallPowX), SHL52 (BigPowX), 0, inf, 40000)
238 V_POW_INTERVAL2 (SHL52 (BigPowX), inf, 0, inf, 40000)
239 V_POW_INTERVAL2 (0, inf, 0, SHL52 (SmallPowY), 40000)
240 V_POW_INTERVAL2 (0, inf, SHL52 (SmallPowY), SHL52 (BigPowY), 40000)
241 V_POW_INTERVAL2 (0, inf, SHL52 (BigPowY), inf, 40000)
242 V_POW_INTERVAL2 (0, inf, 0, inf, 1000)
243 /* x~1 or y~1.  */
244 V_POW_INTERVAL2 (0x1p-1, 0x1p1, 0x1p-10, 0x1p10, 10000)
245 V_POW_INTERVAL2 (0x1p-500, 0x1p500, 0x1p-1, 0x1p1, 10000)
246 V_POW_INTERVAL2 (0x1.ep-1, 0x1.1p0, 0x1p8, 0x1p16, 10000)
247 /* around argmaxs of ULP error.  */
248 V_POW_INTERVAL2 (0x1p-300, 0x1p-200, 0x1p-20, 0x1p-10, 10000)
249 V_POW_INTERVAL2 (0x1p50, 0x1p100, 0x1p-20, 0x1p-10, 10000)
250 /* x is negative, y is odd or even integer, or y is real not integer.  */
251 PL_TEST_INTERVAL2 (V_NAME_D2 (pow), -0.0, -10.0, 3.0, 3.0, 10000)
252 PL_TEST_INTERVAL2 (V_NAME_D2 (pow), -0.0, -10.0, 4.0, 4.0, 10000)
253 PL_TEST_INTERVAL2 (V_NAME_D2 (pow), -0.0, -10.0, 0.0, 10.0, 10000)
254 PL_TEST_INTERVAL2 (V_NAME_D2 (pow), 0.0, 10.0, -0.0, -10.0, 10000)
255 /* 1.0^y.  */
256 PL_TEST_INTERVAL2 (V_NAME_D2 (pow), 1.0, 1.0, 0.0, 0x1p-50, 1000)
257 PL_TEST_INTERVAL2 (V_NAME_D2 (pow), 1.0, 1.0, 0x1p-50, 1.0, 1000)
258 PL_TEST_INTERVAL2 (V_NAME_D2 (pow), 1.0, 1.0, 1.0, 0x1p100, 1000)
259 PL_TEST_INTERVAL2 (V_NAME_D2 (pow), 1.0, 1.0, -1.0, -0x1p120, 1000)
260