xref: /freebsd/contrib/arm-optimized-routines/pl/math/v_exp2_2u.c (revision 5a02ffc32e777041dd2dad4e651ed2a0865a0a5d)
1*5a02ffc3SAndrew Turner /*
2*5a02ffc3SAndrew Turner  * Double-precision vector 2^x function.
3*5a02ffc3SAndrew Turner  *
4*5a02ffc3SAndrew Turner  * Copyright (c) 2019-2023, Arm Limited.
5*5a02ffc3SAndrew Turner  * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6*5a02ffc3SAndrew Turner  */
7*5a02ffc3SAndrew Turner 
8*5a02ffc3SAndrew Turner #include "v_math.h"
9*5a02ffc3SAndrew Turner #include "poly_advsimd_f64.h"
10*5a02ffc3SAndrew Turner #include "pl_sig.h"
11*5a02ffc3SAndrew Turner #include "pl_test.h"
12*5a02ffc3SAndrew Turner 
13*5a02ffc3SAndrew Turner #define N (1 << V_EXP_TABLE_BITS)
14*5a02ffc3SAndrew Turner #define IndexMask (N - 1)
15*5a02ffc3SAndrew Turner #define BigBound 1022.0
16*5a02ffc3SAndrew Turner #define UOFlowBound 1280.0
17*5a02ffc3SAndrew Turner 
18*5a02ffc3SAndrew Turner static const struct data
19*5a02ffc3SAndrew Turner {
20*5a02ffc3SAndrew Turner   float64x2_t poly[4];
21*5a02ffc3SAndrew Turner   float64x2_t shift, scale_big_bound, scale_uoflow_bound;
22*5a02ffc3SAndrew Turner } data = {
23*5a02ffc3SAndrew Turner   /* Coefficients are computed using Remez algorithm with
24*5a02ffc3SAndrew Turner      minimisation of the absolute error.  */
25*5a02ffc3SAndrew Turner   .poly = { V2 (0x1.62e42fefa3686p-1), V2 (0x1.ebfbdff82c241p-3),
26*5a02ffc3SAndrew Turner 	    V2 (0x1.c6b09b16de99ap-5), V2 (0x1.3b2abf5571ad8p-7) },
27*5a02ffc3SAndrew Turner   .shift = V2 (0x1.8p52 / N),
28*5a02ffc3SAndrew Turner   .scale_big_bound = V2 (BigBound),
29*5a02ffc3SAndrew Turner   .scale_uoflow_bound = V2 (UOFlowBound),
30*5a02ffc3SAndrew Turner };
31*5a02ffc3SAndrew Turner 
32*5a02ffc3SAndrew Turner static inline uint64x2_t
lookup_sbits(uint64x2_t i)33*5a02ffc3SAndrew Turner lookup_sbits (uint64x2_t i)
34*5a02ffc3SAndrew Turner {
35*5a02ffc3SAndrew Turner   return (uint64x2_t){ __v_exp_data[i[0] & IndexMask],
36*5a02ffc3SAndrew Turner 		       __v_exp_data[i[1] & IndexMask] };
37*5a02ffc3SAndrew Turner }
38*5a02ffc3SAndrew Turner 
39*5a02ffc3SAndrew Turner #if WANT_SIMD_EXCEPT
40*5a02ffc3SAndrew Turner 
41*5a02ffc3SAndrew Turner # define TinyBound 0x2000000000000000 /* asuint64(0x1p-511).  */
42*5a02ffc3SAndrew Turner # define Thres 0x2080000000000000     /* asuint64(512.0) - TinyBound.  */
43*5a02ffc3SAndrew Turner 
44*5a02ffc3SAndrew Turner /* Call scalar exp2 as a fallback.  */
45*5a02ffc3SAndrew Turner static float64x2_t VPCS_ATTR NOINLINE
special_case(float64x2_t x,float64x2_t y,uint64x2_t is_special)46*5a02ffc3SAndrew Turner special_case (float64x2_t x, float64x2_t y, uint64x2_t is_special)
47*5a02ffc3SAndrew Turner {
48*5a02ffc3SAndrew Turner   return v_call_f64 (exp2, x, y, is_special);
49*5a02ffc3SAndrew Turner }
50*5a02ffc3SAndrew Turner 
51*5a02ffc3SAndrew Turner #else
52*5a02ffc3SAndrew Turner 
53*5a02ffc3SAndrew Turner # define SpecialOffset 0x6000000000000000 /* 0x1p513.  */
54*5a02ffc3SAndrew Turner /* SpecialBias1 + SpecialBias1 = asuint(1.0).  */
55*5a02ffc3SAndrew Turner # define SpecialBias1 0x7000000000000000 /* 0x1p769.  */
56*5a02ffc3SAndrew Turner # define SpecialBias2 0x3010000000000000 /* 0x1p-254.  */
57*5a02ffc3SAndrew Turner 
58*5a02ffc3SAndrew Turner static inline float64x2_t VPCS_ATTR
special_case(float64x2_t s,float64x2_t y,float64x2_t n,const struct data * d)59*5a02ffc3SAndrew Turner special_case (float64x2_t s, float64x2_t y, float64x2_t n,
60*5a02ffc3SAndrew Turner 	      const struct data *d)
61*5a02ffc3SAndrew Turner {
62*5a02ffc3SAndrew Turner   /* 2^(n/N) may overflow, break it up into s1*s2.  */
63*5a02ffc3SAndrew Turner   uint64x2_t b = vandq_u64 (vclezq_f64 (n), v_u64 (SpecialOffset));
64*5a02ffc3SAndrew Turner   float64x2_t s1 = vreinterpretq_f64_u64 (vsubq_u64 (v_u64 (SpecialBias1), b));
65*5a02ffc3SAndrew Turner   float64x2_t s2 = vreinterpretq_f64_u64 (
66*5a02ffc3SAndrew Turner     vaddq_u64 (vsubq_u64 (vreinterpretq_u64_f64 (s), v_u64 (SpecialBias2)), b));
67*5a02ffc3SAndrew Turner   uint64x2_t cmp = vcagtq_f64 (n, d->scale_uoflow_bound);
68*5a02ffc3SAndrew Turner   float64x2_t r1 = vmulq_f64 (s1, s1);
69*5a02ffc3SAndrew Turner   float64x2_t r0 = vmulq_f64 (vfmaq_f64 (s2, s2, y), s1);
70*5a02ffc3SAndrew Turner   return vbslq_f64 (cmp, r1, r0);
71*5a02ffc3SAndrew Turner }
72*5a02ffc3SAndrew Turner 
73*5a02ffc3SAndrew Turner #endif
74*5a02ffc3SAndrew Turner 
75*5a02ffc3SAndrew Turner /* Fast vector implementation of exp2.
76*5a02ffc3SAndrew Turner    Maximum measured error is 1.65 ulp.
77*5a02ffc3SAndrew Turner    _ZGVnN2v_exp2(-0x1.4c264ab5b559bp-6) got 0x1.f8db0d4df721fp-1
78*5a02ffc3SAndrew Turner 				       want 0x1.f8db0d4df721dp-1.  */
79*5a02ffc3SAndrew Turner VPCS_ATTR
V_NAME_D1(exp2)80*5a02ffc3SAndrew Turner float64x2_t V_NAME_D1 (exp2) (float64x2_t x)
81*5a02ffc3SAndrew Turner {
82*5a02ffc3SAndrew Turner   const struct data *d = ptr_barrier (&data);
83*5a02ffc3SAndrew Turner   uint64x2_t cmp;
84*5a02ffc3SAndrew Turner #if WANT_SIMD_EXCEPT
85*5a02ffc3SAndrew Turner   uint64x2_t ia = vreinterpretq_u64_f64 (vabsq_f64 (x));
86*5a02ffc3SAndrew Turner   cmp = vcgeq_u64 (vsubq_u64 (ia, v_u64 (TinyBound)), v_u64 (Thres));
87*5a02ffc3SAndrew Turner   /* Mask special lanes and retain a copy of x for passing to special-case
88*5a02ffc3SAndrew Turner      handler.  */
89*5a02ffc3SAndrew Turner   float64x2_t xc = x;
90*5a02ffc3SAndrew Turner   x = v_zerofy_f64 (x, cmp);
91*5a02ffc3SAndrew Turner #else
92*5a02ffc3SAndrew Turner   cmp = vcagtq_f64 (x, d->scale_big_bound);
93*5a02ffc3SAndrew Turner #endif
94*5a02ffc3SAndrew Turner 
95*5a02ffc3SAndrew Turner   /* n = round(x/N).  */
96*5a02ffc3SAndrew Turner   float64x2_t z = vaddq_f64 (d->shift, x);
97*5a02ffc3SAndrew Turner   uint64x2_t u = vreinterpretq_u64_f64 (z);
98*5a02ffc3SAndrew Turner   float64x2_t n = vsubq_f64 (z, d->shift);
99*5a02ffc3SAndrew Turner 
100*5a02ffc3SAndrew Turner   /* r = x - n/N.  */
101*5a02ffc3SAndrew Turner   float64x2_t r = vsubq_f64 (x, n);
102*5a02ffc3SAndrew Turner 
103*5a02ffc3SAndrew Turner   /* s = 2^(n/N).  */
104*5a02ffc3SAndrew Turner   uint64x2_t e = vshlq_n_u64 (u, 52 - V_EXP_TABLE_BITS);
105*5a02ffc3SAndrew Turner   u = lookup_sbits (u);
106*5a02ffc3SAndrew Turner   float64x2_t s = vreinterpretq_f64_u64 (vaddq_u64 (u, e));
107*5a02ffc3SAndrew Turner 
108*5a02ffc3SAndrew Turner   /* y ~ exp2(r) - 1.  */
109*5a02ffc3SAndrew Turner   float64x2_t r2 = vmulq_f64 (r, r);
110*5a02ffc3SAndrew Turner   float64x2_t y = v_pairwise_poly_3_f64 (r, r2, d->poly);
111*5a02ffc3SAndrew Turner   y = vmulq_f64 (r, y);
112*5a02ffc3SAndrew Turner 
113*5a02ffc3SAndrew Turner   if (unlikely (v_any_u64 (cmp)))
114*5a02ffc3SAndrew Turner #if !WANT_SIMD_EXCEPT
115*5a02ffc3SAndrew Turner     return special_case (s, y, n, d);
116*5a02ffc3SAndrew Turner #else
117*5a02ffc3SAndrew Turner     return special_case (xc, vfmaq_f64 (s, s, y), cmp);
118*5a02ffc3SAndrew Turner #endif
119*5a02ffc3SAndrew Turner   return vfmaq_f64 (s, s, y);
120*5a02ffc3SAndrew Turner }
121*5a02ffc3SAndrew Turner 
122*5a02ffc3SAndrew Turner PL_SIG (V, D, 1, exp2, -9.9, 9.9)
123*5a02ffc3SAndrew Turner PL_TEST_ULP (V_NAME_D1 (exp2), 1.15)
124*5a02ffc3SAndrew Turner PL_TEST_EXPECT_FENV (V_NAME_D1 (exp2), WANT_SIMD_EXCEPT)
125*5a02ffc3SAndrew Turner PL_TEST_SYM_INTERVAL (V_NAME_D1 (exp2), 0, TinyBound, 5000)
126*5a02ffc3SAndrew Turner PL_TEST_SYM_INTERVAL (V_NAME_D1 (exp2), TinyBound, BigBound, 10000)
127*5a02ffc3SAndrew Turner PL_TEST_SYM_INTERVAL (V_NAME_D1 (exp2), BigBound, UOFlowBound, 5000)
128*5a02ffc3SAndrew Turner PL_TEST_SYM_INTERVAL (V_NAME_D1 (exp2), UOFlowBound, inf, 10000)
129