xref: /freebsd/contrib/arm-optimized-routines/math/aarch64/sve/cosh.c (revision dd21556857e8d40f66bf5ad54754d9d52669ebf7)
1 /*
2  * Double-precision SVE cosh(x) function.
3  *
4  * Copyright (c) 2023-2025, Arm Limited.
5  * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6  */
7 
8 #include "sv_math.h"
9 #include "test_sig.h"
10 #include "test_defs.h"
11 
12 static const struct data
13 {
14   float64_t poly[3];
15   float64_t inv_ln2, ln2_hi, ln2_lo, shift, thres;
16   uint64_t special_bound;
17 } data = {
18   .poly = { 0x1.fffffffffffd4p-2, 0x1.5555571d6b68cp-3,
19 	    0x1.5555576a59599p-5, },
20 
21   .inv_ln2 = 0x1.71547652b82fep8, /* N/ln2.  */
22   /* -ln2/N.  */
23   .ln2_hi = -0x1.62e42fefa39efp-9,
24   .ln2_lo = -0x1.abc9e3b39803f3p-64,
25   .shift = 0x1.8p+52,
26   .thres = 704.0,
27 
28   /* 0x1.6p9, above which exp overflows.  */
29   .special_bound = 0x4086000000000000,
30 };
31 
32 static svfloat64_t NOINLINE
33 special_case (svfloat64_t x, svbool_t pg, svfloat64_t t, svbool_t special)
34 {
35   svfloat64_t half_t = svmul_x (svptrue_b64 (), t, 0.5);
36   svfloat64_t half_over_t = svdivr_x (pg, t, 0.5);
37   svfloat64_t y = svadd_x (pg, half_t, half_over_t);
38   return sv_call_f64 (cosh, x, y, special);
39 }
40 
41 /* Helper for approximating exp(x). Copied from sv_exp_tail, with no
42    special-case handling or tail.  */
43 static inline svfloat64_t
44 exp_inline (svfloat64_t x, const svbool_t pg, const struct data *d)
45 {
46   /* Calculate exp(x).  */
47   svfloat64_t z = svmla_x (pg, sv_f64 (d->shift), x, d->inv_ln2);
48   svfloat64_t n = svsub_x (pg, z, d->shift);
49 
50   svfloat64_t r = svmla_x (pg, x, n, d->ln2_hi);
51   r = svmla_x (pg, r, n, d->ln2_lo);
52 
53   svuint64_t u = svreinterpret_u64 (z);
54   svuint64_t e = svlsl_x (pg, u, 52 - V_EXP_TAIL_TABLE_BITS);
55   svuint64_t i = svand_x (svptrue_b64 (), u, 0xff);
56 
57   svfloat64_t y = svmla_x (pg, sv_f64 (d->poly[1]), r, d->poly[2]);
58   y = svmla_x (pg, sv_f64 (d->poly[0]), r, y);
59   y = svmla_x (pg, sv_f64 (1.0), r, y);
60   y = svmul_x (svptrue_b64 (), r, y);
61 
62   /* s = 2^(n/N).  */
63   u = svld1_gather_index (pg, __v_exp_tail_data, i);
64   svfloat64_t s = svreinterpret_f64 (svadd_x (pg, u, e));
65 
66   return svmla_x (pg, s, s, y);
67 }
68 
69 /* Approximation for SVE double-precision cosh(x) using exp_inline.
70    cosh(x) = (exp(x) + exp(-x)) / 2.
71    The greatest observed error is in the scalar fall-back region, so is the
72    same as the scalar routine, 1.93 ULP:
73    _ZGVsMxv_cosh (0x1.628ad45039d2fp+9) got 0x1.fd774e958236dp+1021
74 				       want 0x1.fd774e958236fp+1021.
75 
76    The greatest observed error in the non-special region is 1.54 ULP:
77    _ZGVsMxv_cosh (0x1.ba5651dd4486bp+2) got 0x1.f5e2bb8d5c98fp+8
78 				       want 0x1.f5e2bb8d5c991p+8.  */
79 svfloat64_t SV_NAME_D1 (cosh) (svfloat64_t x, const svbool_t pg)
80 {
81   const struct data *d = ptr_barrier (&data);
82 
83   svfloat64_t ax = svabs_x (pg, x);
84   svbool_t special = svcmpgt (pg, svreinterpret_u64 (ax), d->special_bound);
85 
86   /* Up to the point that exp overflows, we can use it to calculate cosh by
87      exp(|x|) / 2 + 1 / (2 * exp(|x|)).  */
88   svfloat64_t t = exp_inline (ax, pg, d);
89 
90   /* Fall back to scalar for any special cases.  */
91   if (unlikely (svptest_any (pg, special)))
92     return special_case (x, pg, t, special);
93 
94   svfloat64_t half_t = svmul_x (svptrue_b64 (), t, 0.5);
95   svfloat64_t half_over_t = svdivr_x (pg, t, 0.5);
96   return svadd_x (pg, half_t, half_over_t);
97 }
98 
99 TEST_SIG (SV, D, 1, cosh, -10.0, 10.0)
100 TEST_ULP (SV_NAME_D1 (cosh), 1.43)
101 TEST_DISABLE_FENV (SV_NAME_D1 (cosh))
102 TEST_SYM_INTERVAL (SV_NAME_D1 (cosh), 0, 0x1.6p9, 100000)
103 TEST_SYM_INTERVAL (SV_NAME_D1 (cosh), 0x1.6p9, inf, 1000)
104 CLOSE_SVE_ATTR