1 /* 2 * Double-precision vector cosh(x) function. 3 * 4 * Copyright (c) 2022-2023, Arm Limited. 5 * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception 6 */ 7 8 #include "v_math.h" 9 #include "pl_sig.h" 10 #include "pl_test.h" 11 #include "v_exp_tail.h" 12 13 #define C1 v_f64 (C1_scal) 14 #define C2 v_f64 (C2_scal) 15 #define C3 v_f64 (C3_scal) 16 #define InvLn2 v_f64 (InvLn2_scal) 17 #define Ln2hi v_f64 (Ln2hi_scal) 18 #define Ln2lo v_f64 (Ln2lo_scal) 19 #define IndexMask v_u64 (IndexMask_scal) 20 #define Shift v_f64 (Shift_scal) 21 #define Thres v_f64 (Thres_scal) 22 23 #define AbsMask 0x7fffffffffffffff 24 #define Half v_f64 (0.5) 25 #define SpecialBound \ 26 0x4086000000000000 /* 0x1.6p9, above which exp overflows. */ 27 28 #if V_SUPPORTED 29 30 static inline v_f64_t 31 exp_inline (v_f64_t x) 32 { 33 /* Helper for approximating exp(x). Copied from v_exp_tail, with no 34 special-case handling or tail. */ 35 36 /* n = round(x/(ln2/N)). */ 37 v_f64_t z = v_fma_f64 (x, InvLn2, Shift); 38 v_u64_t u = v_as_u64_f64 (z); 39 v_f64_t n = z - Shift; 40 41 /* r = x - n*ln2/N. */ 42 v_f64_t r = x; 43 r = v_fma_f64 (-Ln2hi, n, r); 44 r = v_fma_f64 (-Ln2lo, n, r); 45 46 v_u64_t e = u << (52 - V_EXP_TAIL_TABLE_BITS); 47 v_u64_t i = u & IndexMask; 48 49 /* y = tail + exp(r) - 1 ~= r + C1 r^2 + C2 r^3 + C3 r^4. */ 50 v_f64_t y = v_fma_f64 (C3, r, C2); 51 y = v_fma_f64 (y, r, C1); 52 y = v_fma_f64 (y, r, v_f64 (1)) * r; 53 54 /* s = 2^(n/N). */ 55 u = v_lookup_u64 (Tab, i); 56 v_f64_t s = v_as_f64_u64 (u + e); 57 58 return v_fma_f64 (y, s, s); 59 } 60 61 /* Approximation for vector double-precision cosh(x) using exp_inline. 62 cosh(x) = (exp(x) + exp(-x)) / 2. 63 The greatest observed error is in the scalar fall-back region, so is the same 64 as the scalar routine, 1.93 ULP: 65 __v_cosh(0x1.628af341989dap+9) got 0x1.fdf28623ef921p+1021 66 want 0x1.fdf28623ef923p+1021. 67 68 The greatest observed error in the non-special region is 1.54 ULP: 69 __v_cosh(0x1.8e205b6ecacf7p+2) got 0x1.f711dcb0c77afp+7 70 want 0x1.f711dcb0c77b1p+7. */ 71 VPCS_ATTR v_f64_t V_NAME (cosh) (v_f64_t x) 72 { 73 v_u64_t ix = v_as_u64_f64 (x); 74 v_u64_t iax = ix & AbsMask; 75 v_u64_t special = v_cond_u64 (iax > SpecialBound); 76 77 /* If any inputs are special, fall back to scalar for all lanes. */ 78 if (unlikely (v_any_u64 (special))) 79 return v_call_f64 (cosh, x, x, v_u64 (-1)); 80 81 v_f64_t ax = v_as_f64_u64 (iax); 82 /* Up to the point that exp overflows, we can use it to calculate cosh by 83 exp(|x|) / 2 + 1 / (2 * exp(|x|)). */ 84 v_f64_t t = exp_inline (ax); 85 return t * Half + Half / t; 86 } 87 VPCS_ALIAS 88 89 PL_SIG (V, D, 1, cosh, -10.0, 10.0) 90 PL_TEST_ULP (V_NAME (cosh), 1.43) 91 PL_TEST_EXPECT_FENV_ALWAYS (V_NAME (cosh)) 92 PL_TEST_INTERVAL (V_NAME (cosh), 0, 0x1.6p9, 100000) 93 PL_TEST_INTERVAL (V_NAME (cosh), -0, -0x1.6p9, 100000) 94 PL_TEST_INTERVAL (V_NAME (cosh), 0x1.6p9, inf, 1000) 95 PL_TEST_INTERVAL (V_NAME (cosh), -0x1.6p9, -inf, 1000) 96 #endif 97