1 /* 2 * Double-precision SVE asinh(x) function. 3 * 4 * Copyright (c) 2022-2024, Arm Limited. 5 * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception 6 */ 7 8 #include "sv_math.h" 9 #include "test_sig.h" 10 #include "test_defs.h" 11 12 #define SignMask (0x8000000000000000) 13 #define One (0x3ff0000000000000) 14 #define Thres (0x5fe0000000000000) /* asuint64 (0x1p511). */ 15 #define IndexMask (((1 << V_LOG_TABLE_BITS) - 1) << 1) 16 17 static const struct data 18 { 19 double even_coeffs[9]; 20 double ln2, p3, p1, p4, p0, p2, c1, c3, c5, c7, c9, c11, c13, c15, c17; 21 uint64_t off, mask; 22 23 } data = { 24 /* Polynomial generated using Remez on [2^-26, 1]. */ 25 .even_coeffs ={ 26 -0x1.55555555554a7p-3, 27 -0x1.6db6db68332e6p-5, 28 -0x1.6e8b8b654a621p-6, 29 -0x1.c9871d10885afp-7, 30 -0x1.3ddca533e9f54p-7, 31 -0x1.b90c7099dd397p-8, 32 -0x1.d217026a669ecp-9, 33 -0x1.e0f37daef9127p-11, 34 -0x1.021a48685e287p-14, }, 35 36 .c1 = 0x1.3333333326c7p-4, 37 .c3 = 0x1.f1c71b26fb40dp-6, 38 .c5 = 0x1.1c4daa9e67871p-6, 39 .c7 = 0x1.7a16e8d9d2ecfp-7, 40 .c9 = 0x1.0becef748dafcp-7, 41 .c11 = 0x1.541f2bb1ffe51p-8, 42 .c13 = 0x1.0b5c7977aaf7p-9, 43 .c15 = 0x1.388b5fe542a6p-12, 44 .c17 = 0x1.93d4ba83d34dap-18, 45 46 .ln2 = 0x1.62e42fefa39efp-1, 47 .p0 = -0x1.ffffffffffff7p-2, 48 .p1 = 0x1.55555555170d4p-2, 49 .p2 = -0x1.0000000399c27p-2, 50 .p3 = 0x1.999b2e90e94cap-3, 51 .p4 = -0x1.554e550bd501ep-3, 52 .off = 0x3fe6900900000000, 53 .mask = 0xfffULL << 52, 54 }; 55 56 static svfloat64_t NOINLINE 57 special_case (svfloat64_t x, svfloat64_t y, svbool_t special) 58 { 59 return sv_call_f64 (asinh, x, y, special); 60 } 61 62 static inline svfloat64_t 63 __sv_log_inline (svfloat64_t x, const struct data *d, const svbool_t pg) 64 { 65 /* Double-precision SVE log, copied from SVE log implementation with some 66 cosmetic modification and special-cases removed. See that file for details 67 of the algorithm used. */ 68 69 svuint64_t ix = svreinterpret_u64 (x); 70 svuint64_t i_off = svsub_x (pg, ix, d->off); 71 svuint64_t i 72 = svand_x (pg, svlsr_x (pg, i_off, (51 - V_LOG_TABLE_BITS)), IndexMask); 73 svuint64_t iz = svsub_x (pg, ix, svand_x (pg, i_off, d->mask)); 74 svfloat64_t z = svreinterpret_f64 (iz); 75 76 svfloat64_t invc = svld1_gather_index (pg, &__v_log_data.table[0].invc, i); 77 svfloat64_t logc = svld1_gather_index (pg, &__v_log_data.table[0].logc, i); 78 79 svfloat64_t ln2_p3 = svld1rq (svptrue_b64 (), &d->ln2); 80 svfloat64_t p1_p4 = svld1rq (svptrue_b64 (), &d->p1); 81 82 svfloat64_t r = svmla_x (pg, sv_f64 (-1.0), invc, z); 83 svfloat64_t kd 84 = svcvt_f64_x (pg, svasr_x (pg, svreinterpret_s64 (i_off), 52)); 85 86 svfloat64_t hi = svmla_lane (svadd_x (pg, logc, r), kd, ln2_p3, 0); 87 svfloat64_t r2 = svmul_x (svptrue_b64 (), r, r); 88 svfloat64_t y = svmla_lane (sv_f64 (d->p2), r, ln2_p3, 1); 89 svfloat64_t p = svmla_lane (sv_f64 (d->p0), r, p1_p4, 0); 90 91 y = svmla_lane (y, r2, p1_p4, 1); 92 y = svmla_x (pg, p, r2, y); 93 y = svmla_x (pg, hi, r2, y); 94 return y; 95 } 96 97 /* Double-precision implementation of SVE asinh(x). 98 asinh is very sensitive around 1, so it is impractical to devise a single 99 low-cost algorithm which is sufficiently accurate on a wide range of input. 100 Instead we use two different algorithms: 101 asinh(x) = sign(x) * log(|x| + sqrt(x^2 + 1) if |x| >= 1 102 = sign(x) * (|x| + |x|^3 * P(x^2)) otherwise 103 where log(x) is an optimized log approximation, and P(x) is a polynomial 104 shared with the scalar routine. The greatest observed error 2.51 ULP, in 105 |x| >= 1: 106 _ZGVsMxv_asinh(0x1.170469d024505p+0) got 0x1.e3181c43b0f36p-1 107 want 0x1.e3181c43b0f39p-1. */ 108 svfloat64_t SV_NAME_D1 (asinh) (svfloat64_t x, const svbool_t pg) 109 { 110 const struct data *d = ptr_barrier (&data); 111 112 svuint64_t ix = svreinterpret_u64 (x); 113 svuint64_t iax = svbic_x (pg, ix, SignMask); 114 svuint64_t sign = svand_x (pg, ix, SignMask); 115 svfloat64_t ax = svreinterpret_f64 (iax); 116 svbool_t ge1 = svcmpge (pg, iax, One); 117 svbool_t special = svcmpge (pg, iax, Thres); 118 119 /* Option 1: |x| >= 1. 120 Compute asinh(x) according by asinh(x) = log(x + sqrt(x^2 + 1)). */ 121 svfloat64_t option_1 = sv_f64 (0); 122 if (likely (svptest_any (pg, ge1))) 123 { 124 svfloat64_t x2 = svmul_x (svptrue_b64 (), ax, ax); 125 option_1 = __sv_log_inline ( 126 svadd_x (pg, ax, svsqrt_x (pg, svadd_x (pg, x2, 1))), d, pg); 127 } 128 129 /* Option 2: |x| < 1. 130 Compute asinh(x) using a polynomial. 131 The largest observed error in this region is 1.51 ULPs: 132 _ZGVsMxv_asinh(0x1.fe12bf8c616a2p-1) got 0x1.c1e649ee2681bp-1 133 want 0x1.c1e649ee2681dp-1. */ 134 135 svfloat64_t option_2 = sv_f64 (0); 136 if (likely (svptest_any (pg, svnot_z (pg, ge1)))) 137 { 138 svfloat64_t x2 = svmul_x (svptrue_b64 (), ax, ax); 139 svfloat64_t x4 = svmul_x (svptrue_b64 (), x2, x2); 140 /* Order-17 Pairwise Horner scheme. */ 141 svfloat64_t c13 = svld1rq (svptrue_b64 (), &d->c1); 142 svfloat64_t c57 = svld1rq (svptrue_b64 (), &d->c5); 143 svfloat64_t c911 = svld1rq (svptrue_b64 (), &d->c9); 144 svfloat64_t c1315 = svld1rq (svptrue_b64 (), &d->c13); 145 146 svfloat64_t p01 = svmla_lane (sv_f64 (d->even_coeffs[0]), x2, c13, 0); 147 svfloat64_t p23 = svmla_lane (sv_f64 (d->even_coeffs[1]), x2, c13, 1); 148 svfloat64_t p45 = svmla_lane (sv_f64 (d->even_coeffs[2]), x2, c57, 0); 149 svfloat64_t p67 = svmla_lane (sv_f64 (d->even_coeffs[3]), x2, c57, 1); 150 svfloat64_t p89 = svmla_lane (sv_f64 (d->even_coeffs[4]), x2, c911, 0); 151 svfloat64_t p1011 = svmla_lane (sv_f64 (d->even_coeffs[5]), x2, c911, 1); 152 svfloat64_t p1213 153 = svmla_lane (sv_f64 (d->even_coeffs[6]), x2, c1315, 0); 154 svfloat64_t p1415 155 = svmla_lane (sv_f64 (d->even_coeffs[7]), x2, c1315, 1); 156 svfloat64_t p1617 = svmla_x (pg, sv_f64 (d->even_coeffs[8]), x2, d->c17); 157 158 svfloat64_t p = svmla_x (pg, p1415, x4, p1617); 159 p = svmla_x (pg, p1213, x4, p); 160 p = svmla_x (pg, p1011, x4, p); 161 p = svmla_x (pg, p89, x4, p); 162 163 p = svmla_x (pg, p67, x4, p); 164 p = svmla_x (pg, p45, x4, p); 165 166 p = svmla_x (pg, p23, x4, p); 167 168 p = svmla_x (pg, p01, x4, p); 169 170 option_2 = svmla_x (pg, ax, p, svmul_x (svptrue_b64 (), x2, ax)); 171 } 172 173 if (unlikely (svptest_any (pg, special))) 174 return special_case ( 175 x, 176 svreinterpret_f64 (sveor_x ( 177 pg, svreinterpret_u64 (svsel (ge1, option_1, option_2)), sign)), 178 special); 179 180 /* Choose the right option for each lane. */ 181 svfloat64_t y = svsel (ge1, option_1, option_2); 182 return svreinterpret_f64 (sveor_x (pg, svreinterpret_u64 (y), sign)); 183 } 184 185 TEST_SIG (SV, D, 1, asinh, -10.0, 10.0) 186 TEST_ULP (SV_NAME_D1 (asinh), 2.52) 187 TEST_DISABLE_FENV (SV_NAME_D1 (asinh)) 188 TEST_SYM_INTERVAL (SV_NAME_D1 (asinh), 0, 0x1p-26, 50000) 189 TEST_SYM_INTERVAL (SV_NAME_D1 (asinh), 0x1p-26, 1, 50000) 190 TEST_SYM_INTERVAL (SV_NAME_D1 (asinh), 1, 0x1p511, 50000) 191 TEST_SYM_INTERVAL (SV_NAME_D1 (asinh), 0x1p511, inf, 40000) 192 /* Test vector asinh 3 times, with control lane < 1, > 1 and special. 193 Ensures the v_sel is choosing the right option in all cases. */ 194 TEST_CONTROL_VALUE (SV_NAME_D1 (asinh), 0.5) 195 TEST_CONTROL_VALUE (SV_NAME_D1 (asinh), 2) 196 TEST_CONTROL_VALUE (SV_NAME_D1 (asinh), 0x1p600) 197 CLOSE_SVE_ATTR 198