xref: /freebsd/contrib/arm-optimized-routines/pl/math/v_tanh_3u.c (revision 9247238cc4b8835892a47701136b0fd073f8d67c)
1 /*
2  * Double-precision vector tanh(x) function.
3  * Copyright (c) 2023, Arm Limited.
4  * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
5  */
6 
7 #include "v_math.h"
8 #include "estrin.h"
9 #include "mathlib.h"
10 #include "pl_sig.h"
11 #include "pl_test.h"
12 
13 #if V_SUPPORTED
14 
15 #define AbsMask v_u64 (0x7fffffffffffffff)
16 #define InvLn2 v_f64 (0x1.71547652b82fep0)
17 #define MLn2hi v_f64 (-0x1.62e42fefa39efp-1)
18 #define MLn2lo v_f64 (-0x1.abc9e3b39803fp-56)
19 #define Shift v_f64 (0x1.8p52)
20 #define C(i) v_f64 (__expm1_poly[i])
21 
22 #define BoringBound 0x403241bf835f9d5f /* asuint64 (0x1.241bf835f9d5fp+4).  */
23 #define TinyBound 0x3e40000000000000   /* asuint64 (0x1p-27).  */
24 #define One v_u64 (0x3ff0000000000000)
25 
26 static inline v_f64_t
27 expm1_inline (v_f64_t x)
28 {
29   /* Helper routine for calculating exp(x) - 1. Vector port of the helper from
30      the scalar variant of tanh.  */
31 
32   /* Reduce argument: f in [-ln2/2, ln2/2], i is exact.  */
33   v_f64_t j = v_fma_f64 (InvLn2, x, Shift) - Shift;
34   v_s64_t i = v_to_s64_f64 (j);
35   v_f64_t f = v_fma_f64 (j, MLn2hi, x);
36   f = v_fma_f64 (j, MLn2lo, f);
37 
38   /* Approximate expm1(f) using polynomial.  */
39   v_f64_t f2 = f * f;
40   v_f64_t f4 = f2 * f2;
41   v_f64_t p = v_fma_f64 (f2, ESTRIN_10 (f, f2, f4, f4 * f4, C), f);
42 
43   /* t = 2 ^ i.  */
44   v_f64_t t = v_as_f64_u64 (v_as_u64_s64 (i << 52) + One);
45   /* expm1(x) = p * t + (t - 1).  */
46   return v_fma_f64 (p, t, t - 1);
47 }
48 
49 static NOINLINE v_f64_t
50 special_case (v_f64_t x, v_f64_t y, v_u64_t special)
51 {
52   return v_call_f64 (tanh, x, y, special);
53 }
54 
55 /* Vector approximation for double-precision tanh(x), using a simplified
56    version of expm1. The greatest observed error is 2.75 ULP:
57    __v_tanh(-0x1.c143c3a44e087p-3) got -0x1.ba31ba4691ab7p-3
58 				  want -0x1.ba31ba4691ab4p-3.  */
59 VPCS_ATTR v_f64_t V_NAME (tanh) (v_f64_t x)
60 {
61   v_u64_t ix = v_as_u64_f64 (x);
62   v_u64_t ia = ix & AbsMask;
63 
64   /* Trigger special-cases for tiny, boring and infinity/NaN.  */
65   v_u64_t special = v_cond_u64 ((ia - TinyBound) > (BoringBound - TinyBound));
66   v_f64_t u;
67 
68   /* To trigger fp exceptions correctly, set special lanes to a neutral value.
69      They will be fixed up later by the special-case handler.  */
70   if (unlikely (v_any_u64 (special)))
71     u = v_sel_f64 (special, v_f64 (1), x) * 2;
72   else
73     u = x * 2;
74 
75   /* tanh(x) = (e^2x - 1) / (e^2x + 1).  */
76   v_f64_t q = expm1_inline (u);
77   v_f64_t y = q / (q + 2);
78 
79   if (unlikely (v_any_u64 (special)))
80     return special_case (x, y, special);
81   return y;
82 }
83 VPCS_ALIAS
84 
85 PL_SIG (V, D, 1, tanh, -10.0, 10.0)
86 PL_TEST_ULP (V_NAME (tanh), 2.26)
87 PL_TEST_EXPECT_FENV_ALWAYS (V_NAME (tanh))
88 PL_TEST_INTERVAL (V_NAME (tanh), 0, TinyBound, 1000)
89 PL_TEST_INTERVAL (V_NAME (tanh), -0, -TinyBound, 1000)
90 PL_TEST_INTERVAL (V_NAME (tanh), TinyBound, BoringBound, 100000)
91 PL_TEST_INTERVAL (V_NAME (tanh), -TinyBound, -BoringBound, 100000)
92 PL_TEST_INTERVAL (V_NAME (tanh), BoringBound, inf, 1000)
93 PL_TEST_INTERVAL (V_NAME (tanh), -BoringBound, -inf, 1000)
94 #endif
95