xref: /freebsd/contrib/arm-optimized-routines/pl/math/v_cbrt_2u.c (revision 4fefe1b763573c873bf3dbf3b6f28c22de0ffada)
1 /*
2  * Double-precision vector cbrt(x) function.
3  *
4  * Copyright (c) 2022-2023, Arm Limited.
5  * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6  */
7 
8 #include "v_math.h"
9 #include "pl_sig.h"
10 #include "pl_test.h"
11 #include "poly_advsimd_f64.h"
12 
13 const static struct data
14 {
15   float64x2_t poly[4], one_third, shift;
16   int64x2_t exp_bias;
17   uint64x2_t abs_mask, tiny_bound;
18   uint32x4_t thresh;
19   double table[5];
20 } data = {
21   .shift = V2 (0x1.8p52),
22   .poly = { /* Generated with fpminimax in [0.5, 1].  */
23             V2 (0x1.c14e8ee44767p-2), V2 (0x1.dd2d3f99e4c0ep-1),
24 	    V2 (-0x1.08e83026b7e74p-1), V2 (0x1.2c74eaa3ba428p-3) },
25   .exp_bias = V2 (1022),
26   .abs_mask = V2(0x7fffffffffffffff),
27   .tiny_bound = V2(0x0010000000000000), /* Smallest normal.  */
28   .thresh = V4(0x7fe00000),   /* asuint64 (infinity) - tiny_bound.  */
29   .one_third = V2(0x1.5555555555555p-2),
30   .table = { /* table[i] = 2^((i - 2) / 3).  */
31              0x1.428a2f98d728bp-1, 0x1.965fea53d6e3dp-1, 0x1p0,
32 	     0x1.428a2f98d728bp0, 0x1.965fea53d6e3dp0 }
33 };
34 
35 #define MantissaMask v_u64 (0x000fffffffffffff)
36 
37 static float64x2_t NOINLINE VPCS_ATTR
38 special_case (float64x2_t x, float64x2_t y, uint32x2_t special)
39 {
40   return v_call_f64 (cbrt, x, y, vmovl_u32 (special));
41 }
42 
43 /* Approximation for double-precision vector cbrt(x), using low-order polynomial
44    and two Newton iterations. Greatest observed error is 1.79 ULP. Errors repeat
45    according to the exponent, for instance an error observed for double value
46    m * 2^e will be observed for any input m * 2^(e + 3*i), where i is an
47    integer.
48    __v_cbrt(0x1.fffff403f0bc6p+1) got 0x1.965fe72821e9bp+0
49 				 want 0x1.965fe72821e99p+0.  */
50 VPCS_ATTR float64x2_t V_NAME_D1 (cbrt) (float64x2_t x)
51 {
52   const struct data *d = ptr_barrier (&data);
53   uint64x2_t iax = vreinterpretq_u64_f64 (vabsq_f64 (x));
54 
55   /* Subnormal, +/-0 and special values.  */
56   uint32x2_t special
57       = vcge_u32 (vsubhn_u64 (iax, d->tiny_bound), vget_low_u32 (d->thresh));
58 
59   /* Decompose |x| into m * 2^e, where m is in [0.5, 1.0]. This is a vector
60      version of frexp, which gets subnormal values wrong - these have to be
61      special-cased as a result.  */
62   float64x2_t m = vbslq_f64 (MantissaMask, x, v_f64 (0.5));
63   int64x2_t exp_bias = d->exp_bias;
64   uint64x2_t ia12 = vshrq_n_u64 (iax, 52);
65   int64x2_t e = vsubq_s64 (vreinterpretq_s64_u64 (ia12), exp_bias);
66 
67   /* Calculate rough approximation for cbrt(m) in [0.5, 1.0], starting point for
68      Newton iterations.  */
69   float64x2_t p = v_pairwise_poly_3_f64 (m, vmulq_f64 (m, m), d->poly);
70   float64x2_t one_third = d->one_third;
71   /* Two iterations of Newton's method for iteratively approximating cbrt.  */
72   float64x2_t m_by_3 = vmulq_f64 (m, one_third);
73   float64x2_t two_thirds = vaddq_f64 (one_third, one_third);
74   float64x2_t a
75       = vfmaq_f64 (vdivq_f64 (m_by_3, vmulq_f64 (p, p)), two_thirds, p);
76   a = vfmaq_f64 (vdivq_f64 (m_by_3, vmulq_f64 (a, a)), two_thirds, a);
77 
78   /* Assemble the result by the following:
79 
80      cbrt(x) = cbrt(m) * 2 ^ (e / 3).
81 
82      We can get 2 ^ round(e / 3) using ldexp and integer divide, but since e is
83      not necessarily a multiple of 3 we lose some information.
84 
85      Let q = 2 ^ round(e / 3), then t = 2 ^ (e / 3) / q.
86 
87      Then we know t = 2 ^ (i / 3), where i is the remainder from e / 3, which is
88      an integer in [-2, 2], and can be looked up in the table T. Hence the
89      result is assembled as:
90 
91      cbrt(x) = cbrt(m) * t * 2 ^ round(e / 3) * sign.  */
92 
93   float64x2_t ef = vcvtq_f64_s64 (e);
94   float64x2_t eb3f = vrndnq_f64 (vmulq_f64 (ef, one_third));
95   int64x2_t em3 = vcvtq_s64_f64 (vfmsq_f64 (ef, eb3f, v_f64 (3)));
96   int64x2_t ey = vcvtq_s64_f64 (eb3f);
97 
98   float64x2_t my = (float64x2_t){ d->table[em3[0] + 2], d->table[em3[1] + 2] };
99   my = vmulq_f64 (my, a);
100 
101   /* Vector version of ldexp.  */
102   float64x2_t y = vreinterpretq_f64_s64 (
103       vshlq_n_s64 (vaddq_s64 (ey, vaddq_s64 (exp_bias, v_s64 (1))), 52));
104   y = vmulq_f64 (y, my);
105 
106   if (unlikely (v_any_u32h (special)))
107     return special_case (x, vbslq_f64 (d->abs_mask, y, x), special);
108 
109   /* Copy sign.  */
110   return vbslq_f64 (d->abs_mask, y, x);
111 }
112 
113 PL_TEST_ULP (V_NAME_D1 (cbrt), 1.30)
114 PL_SIG (V, D, 1, cbrt, -10.0, 10.0)
115 PL_TEST_EXPECT_FENV_ALWAYS (V_NAME_D1 (cbrt))
116 PL_TEST_SYM_INTERVAL (V_NAME_D1 (cbrt), 0, inf, 1000000)
117