xref: /freebsd/contrib/arm-optimized-routines/pl/math/v_cosh_2u.c (revision 95eb4b873b6a8b527c5bd78d7191975dfca38998)
1 /*
2  * Double-precision vector cosh(x) function.
3  *
4  * Copyright (c) 2022-2023, Arm Limited.
5  * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6  */
7 
8 #include "v_math.h"
9 #include "pl_sig.h"
10 #include "pl_test.h"
11 
12 static const struct data
13 {
14   float64x2_t poly[3];
15   float64x2_t inv_ln2, ln2, shift, thres;
16   uint64x2_t index_mask, special_bound;
17 } data = {
18   .poly = { V2 (0x1.fffffffffffd4p-2), V2 (0x1.5555571d6b68cp-3),
19 	    V2 (0x1.5555576a59599p-5), },
20 
21   .inv_ln2 = V2 (0x1.71547652b82fep8), /* N/ln2.  */
22   /* -ln2/N.  */
23   .ln2 = {-0x1.62e42fefa39efp-9, -0x1.abc9e3b39803f3p-64},
24   .shift = V2 (0x1.8p+52),
25   .thres = V2 (704.0),
26 
27   .index_mask = V2 (0xff),
28   /* 0x1.6p9, above which exp overflows.  */
29   .special_bound = V2 (0x4086000000000000),
30 };
31 
32 static float64x2_t NOINLINE VPCS_ATTR
33 special_case (float64x2_t x, float64x2_t y, uint64x2_t special)
34 {
35   return v_call_f64 (cosh, x, y, special);
36 }
37 
38 /* Helper for approximating exp(x). Copied from v_exp_tail, with no
39    special-case handling or tail.  */
40 static inline float64x2_t
41 exp_inline (float64x2_t x)
42 {
43   const struct data *d = ptr_barrier (&data);
44 
45   /* n = round(x/(ln2/N)).  */
46   float64x2_t z = vfmaq_f64 (d->shift, x, d->inv_ln2);
47   uint64x2_t u = vreinterpretq_u64_f64 (z);
48   float64x2_t n = vsubq_f64 (z, d->shift);
49 
50   /* r = x - n*ln2/N.  */
51   float64x2_t r = vfmaq_laneq_f64 (x, n, d->ln2, 0);
52   r = vfmaq_laneq_f64 (r, n, d->ln2, 1);
53 
54   uint64x2_t e = vshlq_n_u64 (u, 52 - V_EXP_TAIL_TABLE_BITS);
55   uint64x2_t i = vandq_u64 (u, d->index_mask);
56 
57   /* y = tail + exp(r) - 1 ~= r + C1 r^2 + C2 r^3 + C3 r^4.  */
58   float64x2_t y = vfmaq_f64 (d->poly[1], d->poly[2], r);
59   y = vfmaq_f64 (d->poly[0], y, r);
60   y = vmulq_f64 (vfmaq_f64 (v_f64 (1), y, r), r);
61 
62   /* s = 2^(n/N).  */
63   u = v_lookup_u64 (__v_exp_tail_data, i);
64   float64x2_t s = vreinterpretq_f64_u64 (vaddq_u64 (u, e));
65 
66   return vfmaq_f64 (s, y, s);
67 }
68 
69 /* Approximation for vector double-precision cosh(x) using exp_inline.
70    cosh(x) = (exp(x) + exp(-x)) / 2.
71    The greatest observed error is in the scalar fall-back region, so is the
72    same as the scalar routine, 1.93 ULP:
73    _ZGVnN2v_cosh (0x1.628af341989dap+9) got 0x1.fdf28623ef921p+1021
74 				       want 0x1.fdf28623ef923p+1021.
75 
76    The greatest observed error in the non-special region is 1.54 ULP:
77    _ZGVnN2v_cosh (0x1.8e205b6ecacf7p+2) got 0x1.f711dcb0c77afp+7
78 				       want 0x1.f711dcb0c77b1p+7.  */
79 float64x2_t VPCS_ATTR V_NAME_D1 (cosh) (float64x2_t x)
80 {
81   const struct data *d = ptr_barrier (&data);
82 
83   float64x2_t ax = vabsq_f64 (x);
84   uint64x2_t special
85       = vcgtq_u64 (vreinterpretq_u64_f64 (ax), d->special_bound);
86 
87   /* Up to the point that exp overflows, we can use it to calculate cosh by
88      exp(|x|) / 2 + 1 / (2 * exp(|x|)).  */
89   float64x2_t t = exp_inline (ax);
90   float64x2_t half_t = vmulq_n_f64 (t, 0.5);
91   float64x2_t half_over_t = vdivq_f64 (v_f64 (0.5), t);
92 
93   /* Fall back to scalar for any special cases.  */
94   if (unlikely (v_any_u64 (special)))
95     return special_case (x, vaddq_f64 (half_t, half_over_t), special);
96 
97   return vaddq_f64 (half_t, half_over_t);
98 }
99 
100 PL_SIG (V, D, 1, cosh, -10.0, 10.0)
101 PL_TEST_ULP (V_NAME_D1 (cosh), 1.43)
102 PL_TEST_EXPECT_FENV_ALWAYS (V_NAME_D1 (cosh))
103 PL_TEST_SYM_INTERVAL (V_NAME_D1 (cosh), 0, 0x1.6p9, 100000)
104 PL_TEST_SYM_INTERVAL (V_NAME_D1 (cosh), 0x1.6p9, inf, 1000)
105