xref: /freebsd/contrib/arm-optimized-routines/pl/math/sv_asinh_3u0.c (revision 96190b4fef3b4a0cc3ca0606b0c4e3e69a5e6717)
1 /*
2  * Double-precision SVE asinh(x) function.
3  *
4  * Copyright (c) 2022-2023, Arm Limited.
5  * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6  */
7 
8 #include "sv_math.h"
9 #include "poly_sve_f64.h"
10 #include "pl_sig.h"
11 #include "pl_test.h"
12 
13 #define OneTop sv_u64 (0x3ff)	 /* top12(asuint64(1.0f)).  */
14 #define HugeBound sv_u64 (0x5fe) /* top12(asuint64(0x1p511)).  */
15 #define TinyBound (0x3e5)	 /* top12(asuint64(0x1p-26)).  */
16 #define SignMask (0x8000000000000000)
17 
18 /* Constants & data for log.  */
19 #define A(i) __v_log_data.poly[i]
20 #define Ln2 (0x1.62e42fefa39efp-1)
21 #define N (1 << V_LOG_TABLE_BITS)
22 #define OFF (0x3fe6900900000000)
23 
24 static svfloat64_t NOINLINE
25 special_case (svfloat64_t x, svfloat64_t y, svbool_t special)
26 {
27   return sv_call_f64 (asinh, x, y, special);
28 }
29 
30 static inline svfloat64_t
31 __sv_log_inline (svfloat64_t x, const svbool_t pg)
32 {
33   /* Double-precision SVE log, copied from pl/math/sv_log_2u5.c with some
34      cosmetic modification and special-cases removed. See that file for details
35      of the algorithm used.  */
36   svuint64_t ix = svreinterpret_u64 (x);
37   svuint64_t tmp = svsub_x (pg, ix, OFF);
38   svuint64_t i
39       = svand_x (pg, svlsr_x (pg, tmp, (51 - V_LOG_TABLE_BITS)), (N - 1) << 1);
40   svint64_t k = svasr_x (pg, svreinterpret_s64 (tmp), 52);
41   svuint64_t iz = svsub_x (pg, ix, svand_x (pg, tmp, 0xfffULL << 52));
42   svfloat64_t z = svreinterpret_f64 (iz);
43   svfloat64_t invc = svld1_gather_index (pg, &__v_log_data.table[0].invc, i);
44   svfloat64_t logc = svld1_gather_index (pg, &__v_log_data.table[0].logc, i);
45   svfloat64_t r = svmla_x (pg, sv_f64 (-1.0), invc, z);
46   svfloat64_t kd = svcvt_f64_x (pg, k);
47   svfloat64_t hi = svmla_x (pg, svadd_x (pg, logc, r), kd, Ln2);
48   svfloat64_t r2 = svmul_x (pg, r, r);
49   svfloat64_t y = svmla_x (pg, sv_f64 (A (2)), r, A (3));
50   svfloat64_t p = svmla_x (pg, sv_f64 (A (0)), r, A (1));
51   y = svmla_x (pg, y, r2, A (4));
52   y = svmla_x (pg, p, r2, y);
53   y = svmla_x (pg, hi, r2, y);
54   return y;
55 }
56 
57 /* Double-precision implementation of SVE asinh(x).
58    asinh is very sensitive around 1, so it is impractical to devise a single
59    low-cost algorithm which is sufficiently accurate on a wide range of input.
60    Instead we use two different algorithms:
61    asinh(x) = sign(x) * log(|x| + sqrt(x^2 + 1)      if |x| >= 1
62 	    = sign(x) * (|x| + |x|^3 * P(x^2))       otherwise
63    where log(x) is an optimized log approximation, and P(x) is a polynomial
64    shared with the scalar routine. The greatest observed error 2.51 ULP, in
65    |x| >= 1:
66    _ZGVsMxv_asinh(0x1.170469d024505p+0) got 0x1.e3181c43b0f36p-1
67 				       want 0x1.e3181c43b0f39p-1.  */
68 svfloat64_t SV_NAME_D1 (asinh) (svfloat64_t x, const svbool_t pg)
69 {
70   svuint64_t ix = svreinterpret_u64 (x);
71   svuint64_t iax = svbic_x (pg, ix, SignMask);
72   svuint64_t sign = svand_x (pg, ix, SignMask);
73   svfloat64_t ax = svreinterpret_f64 (iax);
74   svuint64_t top12 = svlsr_x (pg, iax, 52);
75 
76   svbool_t ge1 = svcmpge (pg, top12, OneTop);
77   svbool_t special = svcmpge (pg, top12, HugeBound);
78 
79   /* Option 1: |x| >= 1.
80      Compute asinh(x) according by asinh(x) = log(x + sqrt(x^2 + 1)).  */
81   svfloat64_t option_1 = sv_f64 (0);
82   if (likely (svptest_any (pg, ge1)))
83     {
84       svfloat64_t axax = svmul_x (pg, ax, ax);
85       option_1 = __sv_log_inline (
86 	  svadd_x (pg, ax, svsqrt_x (pg, svadd_x (pg, axax, 1))), pg);
87     }
88 
89   /* Option 2: |x| < 1.
90      Compute asinh(x) using a polynomial.
91      The largest observed error in this region is 1.51 ULPs:
92      _ZGVsMxv_asinh(0x1.fe12bf8c616a2p-1) got 0x1.c1e649ee2681bp-1
93 					 want 0x1.c1e649ee2681dp-1.  */
94   svfloat64_t option_2 = sv_f64 (0);
95   if (likely (svptest_any (pg, svnot_z (pg, ge1))))
96     {
97       svfloat64_t x2 = svmul_x (pg, ax, ax);
98       svfloat64_t z2 = svmul_x (pg, x2, x2);
99       svfloat64_t z4 = svmul_x (pg, z2, z2);
100       svfloat64_t z8 = svmul_x (pg, z4, z4);
101       svfloat64_t z16 = svmul_x (pg, z8, z8);
102       svfloat64_t p
103 	  = sv_estrin_17_f64_x (pg, x2, z2, z4, z8, z16, __asinh_data.poly);
104       option_2 = svmla_x (pg, ax, p, svmul_x (pg, x2, ax));
105     }
106 
107   /* Choose the right option for each lane.  */
108   svfloat64_t y = svsel (ge1, option_1, option_2);
109 
110   /* Apply sign of x to y.  */
111   y = svreinterpret_f64 (sveor_x (pg, svreinterpret_u64 (y), sign));
112 
113   if (unlikely (svptest_any (pg, special)))
114     return special_case (x, y, special);
115   return y;
116 }
117 
118 PL_SIG (SV, D, 1, asinh, -10.0, 10.0)
119 PL_TEST_ULP (SV_NAME_D1 (asinh), 2.52)
120 /* Test vector asinh 3 times, with control lane < 1, > 1 and special.
121    Ensures the svsel is choosing the right option in all cases.  */
122 #define SV_ASINH_INTERVAL(lo, hi, n)                                          \
123   PL_TEST_SYM_INTERVAL_C (SV_NAME_D1 (asinh), lo, hi, n, 0.5)                 \
124   PL_TEST_SYM_INTERVAL_C (SV_NAME_D1 (asinh), lo, hi, n, 2)                   \
125   PL_TEST_SYM_INTERVAL_C (SV_NAME_D1 (asinh), lo, hi, n, 0x1p600)
126 SV_ASINH_INTERVAL (0, 0x1p-26, 50000)
127 SV_ASINH_INTERVAL (0x1p-26, 1, 50000)
128 SV_ASINH_INTERVAL (1, 0x1p511, 50000)
129 SV_ASINH_INTERVAL (0x1p511, inf, 40000)
130