xref: /freebsd/contrib/arm-optimized-routines/math/aarch64/advsimd/pow.c (revision f3087bef11543b42e0d69b708f367097a4118d24)
1 /*
2  * Double-precision vector pow function.
3  *
4  * Copyright (c) 2020-2024, Arm Limited.
5  * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6  */
7 
8 #include "v_math.h"
9 #include "test_sig.h"
10 #include "test_defs.h"
11 
12 /* Defines parameters of the approximation and scalar fallback.  */
13 #include "finite_pow.h"
14 
15 #define VecSmallPowX v_u64 (SmallPowX)
16 #define VecThresPowX v_u64 (ThresPowX)
17 #define VecSmallPowY v_u64 (SmallPowY)
18 #define VecThresPowY v_u64 (ThresPowY)
19 
20 static const struct data
21 {
22   uint64x2_t inf;
23   float64x2_t small_powx;
24   uint64x2_t offset, mask;
25   uint64x2_t mask_sub_0, mask_sub_1;
26   float64x2_t log_c0, log_c2, log_c4, log_c5;
27   double log_c1, log_c3;
28   double ln2_lo, ln2_hi;
29   uint64x2_t small_exp, thres_exp;
30   double ln2_lo_n, ln2_hi_n;
31   double inv_ln2_n, exp_c2;
32   float64x2_t exp_c0, exp_c1;
33 } data = {
34   /* Power threshold.  */
35   .inf = V2 (0x7ff0000000000000),
36   .small_powx = V2 (0x1p-126),
37   .offset = V2 (Off),
38   .mask = V2 (0xfffULL << 52),
39   .mask_sub_0 = V2 (1ULL << 52),
40   .mask_sub_1 = V2 (52ULL << 52),
41   /* Coefficients copied from v_pow_log_data.c
42      relative error: 0x1.11922ap-70 in [-0x1.6bp-8, 0x1.6bp-8]
43      Coefficients are scaled to match the scaling during evaluation.  */
44   .log_c0 = V2 (0x1.555555555556p-2 * -2),
45   .log_c1 = -0x1.0000000000006p-2 * -2,
46   .log_c2 = V2 (0x1.999999959554ep-3 * 4),
47   .log_c3 = -0x1.555555529a47ap-3 * 4,
48   .log_c4 = V2 (0x1.2495b9b4845e9p-3 * -8),
49   .log_c5 = V2 (-0x1.0002b8b263fc3p-3 * -8),
50   .ln2_hi = 0x1.62e42fefa3800p-1,
51   .ln2_lo = 0x1.ef35793c76730p-45,
52   /* Polynomial coefficients: abs error: 1.43*2^-58, ulp error: 0.549
53      (0.550 without fma) if |x| < ln2/512.  */
54   .exp_c0 = V2 (0x1.fffffffffffd4p-2),
55   .exp_c1 = V2 (0x1.5555571d6ef9p-3),
56   .exp_c2 = 0x1.5555576a5adcep-5,
57   .small_exp = V2 (0x3c90000000000000),
58   .thres_exp = V2 (0x03f0000000000000),
59   .inv_ln2_n = 0x1.71547652b82fep8, /* N/ln2.  */
60   .ln2_hi_n = 0x1.62e42fefc0000p-9, /* ln2/N.  */
61   .ln2_lo_n = -0x1.c610ca86c3899p-45,
62 };
63 
64 /* This version implements an algorithm close to scalar pow but
65    - does not implement the trick in the exp's specialcase subroutine to avoid
66      double-rounding,
67    - does not use a tail in the exponential core computation,
68    - and pow's exp polynomial order and table bits might differ.
69 
70    Maximum measured error is 1.04 ULPs:
71    _ZGVnN2vv_pow(0x1.024a3e56b3c3p-136, 0x1.87910248b58acp-13)
72      got 0x1.f71162f473251p-1
73     want 0x1.f71162f473252p-1.  */
74 
75 static inline float64x2_t
v_masked_lookup_f64(const double * table,uint64x2_t i)76 v_masked_lookup_f64 (const double *table, uint64x2_t i)
77 {
78   return (float64x2_t){
79     table[(i[0] >> (52 - V_POW_LOG_TABLE_BITS)) & (N_LOG - 1)],
80     table[(i[1] >> (52 - V_POW_LOG_TABLE_BITS)) & (N_LOG - 1)]
81   };
82 }
83 
84 /* Compute y+TAIL = log(x) where the rounded result is y and TAIL has about
85    additional 15 bits precision.  IX is the bit representation of x, but
86    normalized in the subnormal range using the sign bit for the exponent.  */
87 static inline float64x2_t
v_log_inline(uint64x2_t ix,float64x2_t * tail,const struct data * d)88 v_log_inline (uint64x2_t ix, float64x2_t *tail, const struct data *d)
89 {
90   /* x = 2^k z; where z is in range [OFF,2*OFF) and exact.
91      The range is split into N subintervals.
92      The ith subinterval contains z and c is near its center.  */
93   uint64x2_t tmp = vsubq_u64 (ix, d->offset);
94   int64x2_t k = vshrq_n_s64 (vreinterpretq_s64_u64 (tmp), 52);
95   uint64x2_t iz = vsubq_u64 (ix, vandq_u64 (tmp, d->mask));
96   float64x2_t z = vreinterpretq_f64_u64 (iz);
97   float64x2_t kd = vcvtq_f64_s64 (k);
98   /* log(x) = k*Ln2 + log(c) + log1p(z/c-1).  */
99   float64x2_t invc = v_masked_lookup_f64 (__v_pow_log_data.invc, tmp);
100   float64x2_t logc = v_masked_lookup_f64 (__v_pow_log_data.logc, tmp);
101   float64x2_t logctail = v_masked_lookup_f64 (__v_pow_log_data.logctail, tmp);
102   /* Note: 1/c is j/N or j/N/2 where j is an integer in [N,2N) and
103      |z/c - 1| < 1/N, so r = z/c - 1 is exactly representible.  */
104   float64x2_t r = vfmaq_f64 (v_f64 (-1.0), z, invc);
105   /* k*Ln2 + log(c) + r.  */
106   float64x2_t ln2 = vld1q_f64 (&d->ln2_lo);
107   float64x2_t t1 = vfmaq_laneq_f64 (logc, kd, ln2, 1);
108   float64x2_t t2 = vaddq_f64 (t1, r);
109   float64x2_t lo1 = vfmaq_laneq_f64 (logctail, kd, ln2, 0);
110   float64x2_t lo2 = vaddq_f64 (vsubq_f64 (t1, t2), r);
111   /* Evaluation is optimized assuming superscalar pipelined execution.  */
112   float64x2_t ar = vmulq_f64 (v_f64 (-0.5), r);
113   float64x2_t ar2 = vmulq_f64 (r, ar);
114   float64x2_t ar3 = vmulq_f64 (r, ar2);
115   /* k*Ln2 + log(c) + r + A[0]*r*r.  */
116   float64x2_t hi = vaddq_f64 (t2, ar2);
117   float64x2_t lo3 = vfmaq_f64 (vnegq_f64 (ar2), ar, r);
118   float64x2_t lo4 = vaddq_f64 (vsubq_f64 (t2, hi), ar2);
119   /* p = log1p(r) - r - A[0]*r*r.  */
120   float64x2_t odd_coeffs = vld1q_f64 (&d->log_c1);
121   float64x2_t a56 = vfmaq_f64 (d->log_c4, r, d->log_c5);
122   float64x2_t a34 = vfmaq_laneq_f64 (d->log_c2, r, odd_coeffs, 1);
123   float64x2_t a12 = vfmaq_laneq_f64 (d->log_c0, r, odd_coeffs, 0);
124   float64x2_t p = vfmaq_f64 (a34, ar2, a56);
125   p = vfmaq_f64 (a12, ar2, p);
126   p = vmulq_f64 (ar3, p);
127   float64x2_t lo
128       = vaddq_f64 (vaddq_f64 (vaddq_f64 (vaddq_f64 (lo1, lo2), lo3), lo4), p);
129   float64x2_t y = vaddq_f64 (hi, lo);
130   *tail = vaddq_f64 (vsubq_f64 (hi, y), lo);
131   return y;
132 }
133 
134 static float64x2_t VPCS_ATTR NOINLINE
exp_special_case(float64x2_t x,float64x2_t xtail)135 exp_special_case (float64x2_t x, float64x2_t xtail)
136 {
137   return (float64x2_t){ exp_nosignbias (x[0], xtail[0]),
138 			exp_nosignbias (x[1], xtail[1]) };
139 }
140 
141 /* Computes sign*exp(x+xtail) where |xtail| < 2^-8/N and |xtail| <= |x|.  */
142 static inline float64x2_t
v_exp_inline(float64x2_t x,float64x2_t neg_xtail,const struct data * d)143 v_exp_inline (float64x2_t x, float64x2_t neg_xtail, const struct data *d)
144 {
145   /* Fallback to scalar exp_inline for all lanes if any lane
146      contains value of x s.t. |x| <= 2^-54 or >= 512.  */
147   uint64x2_t uoflowx = vcgeq_u64 (
148       vsubq_u64 (vreinterpretq_u64_f64 (vabsq_f64 (x)), d->small_exp),
149       d->thres_exp);
150   if (unlikely (v_any_u64 (uoflowx)))
151     return exp_special_case (x, vnegq_f64 (neg_xtail));
152 
153   /* exp(x) = 2^(k/N) * exp(r), with exp(r) in [2^(-1/2N),2^(1/2N)].  */
154   /* x = ln2/N*k + r, with k integer and r in [-ln2/2N, ln2/2N].  */
155   /* z - kd is in [-1, 1] in non-nearest rounding modes.  */
156   float64x2_t exp_consts = vld1q_f64 (&d->inv_ln2_n);
157   float64x2_t z = vmulq_laneq_f64 (x, exp_consts, 0);
158   float64x2_t kd = vrndnq_f64 (z);
159   uint64x2_t ki = vreinterpretq_u64_s64 (vcvtaq_s64_f64 (z));
160   float64x2_t ln2_n = vld1q_f64 (&d->ln2_lo_n);
161   float64x2_t r = vfmsq_laneq_f64 (x, kd, ln2_n, 1);
162   r = vfmsq_laneq_f64 (r, kd, ln2_n, 0);
163   /* The code assumes 2^-200 < |xtail| < 2^-8/N.  */
164   r = vsubq_f64 (r, neg_xtail);
165   /* 2^(k/N) ~= scale.  */
166   uint64x2_t idx = vandq_u64 (ki, v_u64 (N_EXP - 1));
167   uint64x2_t top = vshlq_n_u64 (ki, 52 - V_POW_EXP_TABLE_BITS);
168   /* This is only a valid scale when -1023*N < k < 1024*N.  */
169   uint64x2_t sbits = v_lookup_u64 (SBits, idx);
170   sbits = vaddq_u64 (sbits, top);
171   /* exp(x) = 2^(k/N) * exp(r) ~= scale + scale * (exp(r) - 1).  */
172   float64x2_t r2 = vmulq_f64 (r, r);
173   float64x2_t tmp = vfmaq_laneq_f64 (d->exp_c1, r, exp_consts, 1);
174   tmp = vfmaq_f64 (d->exp_c0, r, tmp);
175   tmp = vfmaq_f64 (r, r2, tmp);
176   float64x2_t scale = vreinterpretq_f64_u64 (sbits);
177   /* Note: tmp == 0 or |tmp| > 2^-200 and scale > 2^-739, so there
178      is no spurious underflow here even without fma.  */
179   return vfmaq_f64 (scale, scale, tmp);
180 }
181 
182 static float64x2_t NOINLINE VPCS_ATTR
scalar_fallback(float64x2_t x,float64x2_t y)183 scalar_fallback (float64x2_t x, float64x2_t y)
184 {
185   return (float64x2_t){ pow_scalar_special_case (x[0], y[0]),
186 			pow_scalar_special_case (x[1], y[1]) };
187 }
188 
V_NAME_D2(pow)189 float64x2_t VPCS_ATTR V_NAME_D2 (pow) (float64x2_t x, float64x2_t y)
190 {
191   const struct data *d = ptr_barrier (&data);
192   /* Case of x <= 0 is too complicated to be vectorised efficiently here,
193      fallback to scalar pow for all lanes if any x < 0 detected.  */
194   if (v_any_u64 (vclezq_s64 (vreinterpretq_s64_f64 (x))))
195     return scalar_fallback (x, y);
196 
197   uint64x2_t vix = vreinterpretq_u64_f64 (x);
198   uint64x2_t viy = vreinterpretq_u64_f64 (y);
199   uint64x2_t iay = vandq_u64 (viy, d->inf);
200 
201   /* Special cases of x or y.  */
202 #if WANT_SIMD_EXCEPT
203   /* Small or large.  */
204   uint64x2_t vtopx = vshrq_n_u64 (vix, 52);
205   uint64x2_t vabstopy = vshrq_n_u64 (iay, 52);
206   uint64x2_t specialx
207       = vcgeq_u64 (vsubq_u64 (vtopx, VecSmallPowX), VecThresPowX);
208   uint64x2_t specialy
209       = vcgeq_u64 (vsubq_u64 (vabstopy, VecSmallPowY), VecThresPowY);
210 #else
211   /* The case y==0 does not trigger a special case, since in this case it is
212      necessary to fix the result only if x is a signalling nan, which already
213      triggers a special case. We test y==0 directly in the scalar fallback.  */
214   uint64x2_t iax = vandq_u64 (vix, d->inf);
215   uint64x2_t specialx = vcgeq_u64 (iax, d->inf);
216   uint64x2_t specialy = vcgeq_u64 (iay, d->inf);
217 #endif
218   uint64x2_t special = vorrq_u64 (specialx, specialy);
219   /* Fallback to scalar on all lanes if any lane is inf or nan.  */
220   if (unlikely (v_any_u64 (special)))
221     return scalar_fallback (x, y);
222 
223   /* Small cases of x: |x| < 0x1p-126.  */
224   uint64x2_t smallx = vcaltq_f64 (x, d->small_powx);
225   if (unlikely (v_any_u64 (smallx)))
226     {
227       /* Update ix if top 12 bits of x are 0.  */
228       uint64x2_t sub_x = vceqzq_u64 (vshrq_n_u64 (vix, 52));
229       if (unlikely (v_any_u64 (sub_x)))
230 	{
231 	  /* Normalize subnormal x so exponent becomes negative.  */
232 	  uint64x2_t vix_norm = vreinterpretq_u64_f64 (
233 	      vabsq_f64 (vmulq_f64 (x, vcvtq_f64_u64 (d->mask_sub_0))));
234 	  vix_norm = vsubq_u64 (vix_norm, d->mask_sub_1);
235 	  vix = vbslq_u64 (sub_x, vix_norm, vix);
236 	}
237     }
238 
239   /* Vector Log(ix, &lo).  */
240   float64x2_t vlo;
241   float64x2_t vhi = v_log_inline (vix, &vlo, d);
242 
243   /* Vector Exp(y_loghi, y_loglo).  */
244   float64x2_t vehi = vmulq_f64 (y, vhi);
245   float64x2_t vemi = vfmsq_f64 (vehi, y, vhi);
246   float64x2_t neg_velo = vfmsq_f64 (vemi, y, vlo);
247   return v_exp_inline (vehi, neg_velo, d);
248 }
249 
250 TEST_SIG (V, D, 2, pow)
251 TEST_ULP (V_NAME_D2 (pow), 0.55)
252 TEST_DISABLE_FENV_IF_NOT (V_NAME_D2 (pow), WANT_SIMD_EXCEPT)
253 /* Wide intervals spanning the whole domain but shared between x and y.  */
254 #define V_POW_INTERVAL2(xlo, xhi, ylo, yhi, n)                                \
255   TEST_INTERVAL2 (V_NAME_D2 (pow), xlo, xhi, ylo, yhi, n)                     \
256   TEST_INTERVAL2 (V_NAME_D2 (pow), xlo, xhi, -ylo, -yhi, n)                   \
257   TEST_INTERVAL2 (V_NAME_D2 (pow), -xlo, -xhi, ylo, yhi, n)                   \
258   TEST_INTERVAL2 (V_NAME_D2 (pow), -xlo, -xhi, -ylo, -yhi, n)
259 #define EXPAND(str) str##000000000
260 #define SHL52(str) EXPAND (str)
261 V_POW_INTERVAL2 (0, SHL52 (SmallPowX), 0, inf, 40000)
262 V_POW_INTERVAL2 (SHL52 (SmallPowX), SHL52 (BigPowX), 0, inf, 40000)
263 V_POW_INTERVAL2 (SHL52 (BigPowX), inf, 0, inf, 40000)
264 V_POW_INTERVAL2 (0, inf, 0, SHL52 (SmallPowY), 40000)
265 V_POW_INTERVAL2 (0, inf, SHL52 (SmallPowY), SHL52 (BigPowY), 40000)
266 V_POW_INTERVAL2 (0, inf, SHL52 (BigPowY), inf, 40000)
267 V_POW_INTERVAL2 (0, inf, 0, inf, 1000)
268 /* x~1 or y~1.  */
269 V_POW_INTERVAL2 (0x1p-1, 0x1p1, 0x1p-10, 0x1p10, 10000)
270 V_POW_INTERVAL2 (0x1p-500, 0x1p500, 0x1p-1, 0x1p1, 10000)
271 V_POW_INTERVAL2 (0x1.ep-1, 0x1.1p0, 0x1p8, 0x1p16, 10000)
272 /* around argmaxs of ULP error.  */
273 V_POW_INTERVAL2 (0x1p-300, 0x1p-200, 0x1p-20, 0x1p-10, 10000)
274 V_POW_INTERVAL2 (0x1p50, 0x1p100, 0x1p-20, 0x1p-10, 10000)
275 /* x is negative, y is odd or even integer, or y is real not integer.  */
276 TEST_INTERVAL2 (V_NAME_D2 (pow), -0.0, -10.0, 3.0, 3.0, 10000)
277 TEST_INTERVAL2 (V_NAME_D2 (pow), -0.0, -10.0, 4.0, 4.0, 10000)
278 TEST_INTERVAL2 (V_NAME_D2 (pow), -0.0, -10.0, 0.0, 10.0, 10000)
279 TEST_INTERVAL2 (V_NAME_D2 (pow), 0.0, 10.0, -0.0, -10.0, 10000)
280 /* 1.0^y.  */
281 TEST_INTERVAL2 (V_NAME_D2 (pow), 1.0, 1.0, 0.0, 0x1p-50, 1000)
282 TEST_INTERVAL2 (V_NAME_D2 (pow), 1.0, 1.0, 0x1p-50, 1.0, 1000)
283 TEST_INTERVAL2 (V_NAME_D2 (pow), 1.0, 1.0, 1.0, 0x1p100, 1000)
284 TEST_INTERVAL2 (V_NAME_D2 (pow), 1.0, 1.0, -1.0, -0x1p120, 1000)
285