xref: /freebsd/contrib/arm-optimized-routines/pl/math/sv_cosh_2u.c (revision 5a02ffc32e777041dd2dad4e651ed2a0865a0a5d)
1*5a02ffc3SAndrew Turner /*
2*5a02ffc3SAndrew Turner  * Double-precision SVE cosh(x) function.
3*5a02ffc3SAndrew Turner  *
4*5a02ffc3SAndrew Turner  * Copyright (c) 2023, Arm Limited.
5*5a02ffc3SAndrew Turner  * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6*5a02ffc3SAndrew Turner  */
7*5a02ffc3SAndrew Turner 
8*5a02ffc3SAndrew Turner #include "sv_math.h"
9*5a02ffc3SAndrew Turner #include "pl_sig.h"
10*5a02ffc3SAndrew Turner #include "pl_test.h"
11*5a02ffc3SAndrew Turner 
12*5a02ffc3SAndrew Turner static const struct data
13*5a02ffc3SAndrew Turner {
14*5a02ffc3SAndrew Turner   float64_t poly[3];
15*5a02ffc3SAndrew Turner   float64_t inv_ln2, ln2_hi, ln2_lo, shift, thres;
16*5a02ffc3SAndrew Turner   uint64_t index_mask, special_bound;
17*5a02ffc3SAndrew Turner } data = {
18*5a02ffc3SAndrew Turner   .poly = { 0x1.fffffffffffd4p-2, 0x1.5555571d6b68cp-3,
19*5a02ffc3SAndrew Turner 	    0x1.5555576a59599p-5, },
20*5a02ffc3SAndrew Turner 
21*5a02ffc3SAndrew Turner   .inv_ln2 = 0x1.71547652b82fep8, /* N/ln2.  */
22*5a02ffc3SAndrew Turner   /* -ln2/N.  */
23*5a02ffc3SAndrew Turner   .ln2_hi = -0x1.62e42fefa39efp-9,
24*5a02ffc3SAndrew Turner   .ln2_lo = -0x1.abc9e3b39803f3p-64,
25*5a02ffc3SAndrew Turner   .shift = 0x1.8p+52,
26*5a02ffc3SAndrew Turner   .thres = 704.0,
27*5a02ffc3SAndrew Turner 
28*5a02ffc3SAndrew Turner   .index_mask = 0xff,
29*5a02ffc3SAndrew Turner   /* 0x1.6p9, above which exp overflows.  */
30*5a02ffc3SAndrew Turner   .special_bound = 0x4086000000000000,
31*5a02ffc3SAndrew Turner };
32*5a02ffc3SAndrew Turner 
33*5a02ffc3SAndrew Turner static svfloat64_t NOINLINE
special_case(svfloat64_t x,svfloat64_t y,svbool_t special)34*5a02ffc3SAndrew Turner special_case (svfloat64_t x, svfloat64_t y, svbool_t special)
35*5a02ffc3SAndrew Turner {
36*5a02ffc3SAndrew Turner   return sv_call_f64 (cosh, x, y, special);
37*5a02ffc3SAndrew Turner }
38*5a02ffc3SAndrew Turner 
39*5a02ffc3SAndrew Turner /* Helper for approximating exp(x). Copied from sv_exp_tail, with no
40*5a02ffc3SAndrew Turner    special-case handling or tail.  */
41*5a02ffc3SAndrew Turner static inline svfloat64_t
exp_inline(svfloat64_t x,const svbool_t pg,const struct data * d)42*5a02ffc3SAndrew Turner exp_inline (svfloat64_t x, const svbool_t pg, const struct data *d)
43*5a02ffc3SAndrew Turner {
44*5a02ffc3SAndrew Turner   /* Calculate exp(x).  */
45*5a02ffc3SAndrew Turner   svfloat64_t z = svmla_x (pg, sv_f64 (d->shift), x, d->inv_ln2);
46*5a02ffc3SAndrew Turner   svfloat64_t n = svsub_x (pg, z, d->shift);
47*5a02ffc3SAndrew Turner 
48*5a02ffc3SAndrew Turner   svfloat64_t r = svmla_x (pg, x, n, d->ln2_hi);
49*5a02ffc3SAndrew Turner   r = svmla_x (pg, r, n, d->ln2_lo);
50*5a02ffc3SAndrew Turner 
51*5a02ffc3SAndrew Turner   svuint64_t u = svreinterpret_u64 (z);
52*5a02ffc3SAndrew Turner   svuint64_t e = svlsl_x (pg, u, 52 - V_EXP_TAIL_TABLE_BITS);
53*5a02ffc3SAndrew Turner   svuint64_t i = svand_x (pg, u, d->index_mask);
54*5a02ffc3SAndrew Turner 
55*5a02ffc3SAndrew Turner   svfloat64_t y = svmla_x (pg, sv_f64 (d->poly[1]), r, d->poly[2]);
56*5a02ffc3SAndrew Turner   y = svmla_x (pg, sv_f64 (d->poly[0]), r, y);
57*5a02ffc3SAndrew Turner   y = svmla_x (pg, sv_f64 (1.0), r, y);
58*5a02ffc3SAndrew Turner   y = svmul_x (pg, r, y);
59*5a02ffc3SAndrew Turner 
60*5a02ffc3SAndrew Turner   /* s = 2^(n/N).  */
61*5a02ffc3SAndrew Turner   u = svld1_gather_index (pg, __v_exp_tail_data, i);
62*5a02ffc3SAndrew Turner   svfloat64_t s = svreinterpret_f64 (svadd_x (pg, u, e));
63*5a02ffc3SAndrew Turner 
64*5a02ffc3SAndrew Turner   return svmla_x (pg, s, s, y);
65*5a02ffc3SAndrew Turner }
66*5a02ffc3SAndrew Turner 
67*5a02ffc3SAndrew Turner /* Approximation for SVE double-precision cosh(x) using exp_inline.
68*5a02ffc3SAndrew Turner    cosh(x) = (exp(x) + exp(-x)) / 2.
69*5a02ffc3SAndrew Turner    The greatest observed error is in the scalar fall-back region, so is the
70*5a02ffc3SAndrew Turner    same as the scalar routine, 1.93 ULP:
71*5a02ffc3SAndrew Turner    _ZGVsMxv_cosh (0x1.628ad45039d2fp+9) got 0x1.fd774e958236dp+1021
72*5a02ffc3SAndrew Turner 				       want 0x1.fd774e958236fp+1021.
73*5a02ffc3SAndrew Turner 
74*5a02ffc3SAndrew Turner    The greatest observed error in the non-special region is 1.54 ULP:
75*5a02ffc3SAndrew Turner    _ZGVsMxv_cosh (0x1.ba5651dd4486bp+2) got 0x1.f5e2bb8d5c98fp+8
76*5a02ffc3SAndrew Turner 				       want 0x1.f5e2bb8d5c991p+8.  */
SV_NAME_D1(cosh)77*5a02ffc3SAndrew Turner svfloat64_t SV_NAME_D1 (cosh) (svfloat64_t x, const svbool_t pg)
78*5a02ffc3SAndrew Turner {
79*5a02ffc3SAndrew Turner   const struct data *d = ptr_barrier (&data);
80*5a02ffc3SAndrew Turner 
81*5a02ffc3SAndrew Turner   svfloat64_t ax = svabs_x (pg, x);
82*5a02ffc3SAndrew Turner   svbool_t special = svcmpgt (pg, svreinterpret_u64 (ax), d->special_bound);
83*5a02ffc3SAndrew Turner 
84*5a02ffc3SAndrew Turner   /* Up to the point that exp overflows, we can use it to calculate cosh by
85*5a02ffc3SAndrew Turner      exp(|x|) / 2 + 1 / (2 * exp(|x|)).  */
86*5a02ffc3SAndrew Turner   svfloat64_t t = exp_inline (ax, pg, d);
87*5a02ffc3SAndrew Turner   svfloat64_t half_t = svmul_x (pg, t, 0.5);
88*5a02ffc3SAndrew Turner   svfloat64_t half_over_t = svdivr_x (pg, t, 0.5);
89*5a02ffc3SAndrew Turner 
90*5a02ffc3SAndrew Turner   /* Fall back to scalar for any special cases.  */
91*5a02ffc3SAndrew Turner   if (unlikely (svptest_any (pg, special)))
92*5a02ffc3SAndrew Turner     return special_case (x, svadd_x (pg, half_t, half_over_t), special);
93*5a02ffc3SAndrew Turner 
94*5a02ffc3SAndrew Turner   return svadd_x (pg, half_t, half_over_t);
95*5a02ffc3SAndrew Turner }
96*5a02ffc3SAndrew Turner 
97*5a02ffc3SAndrew Turner PL_SIG (SV, D, 1, cosh, -10.0, 10.0)
98*5a02ffc3SAndrew Turner PL_TEST_ULP (SV_NAME_D1 (cosh), 1.43)
99*5a02ffc3SAndrew Turner PL_TEST_SYM_INTERVAL (SV_NAME_D1 (cosh), 0, 0x1.6p9, 100000)
100*5a02ffc3SAndrew Turner PL_TEST_SYM_INTERVAL (SV_NAME_D1 (cosh), 0x1.6p9, inf, 1000)
101