xref: /freebsd/contrib/arm-optimized-routines/math/aarch64/advsimd/tan.c (revision dd21556857e8d40f66bf5ad54754d9d52669ebf7)
1 /*
2  * Double-precision vector tan(x) function.
3  *
4  * Copyright (c) 2023-2024, Arm Limited.
5  * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6  */
7 
8 #include "v_math.h"
9 #include "v_poly_f64.h"
10 #include "test_sig.h"
11 #include "test_defs.h"
12 
13 static const struct data
14 {
15   float64x2_t poly[9];
16   double half_pi[2];
17   float64x2_t two_over_pi, shift;
18 #if !WANT_SIMD_EXCEPT
19   float64x2_t range_val;
20 #endif
21 } data = {
22   /* Coefficients generated using FPMinimax.  */
23   .poly = { V2 (0x1.5555555555556p-2), V2 (0x1.1111111110a63p-3),
24 	    V2 (0x1.ba1ba1bb46414p-5), V2 (0x1.664f47e5b5445p-6),
25 	    V2 (0x1.226e5e5ecdfa3p-7), V2 (0x1.d6c7ddbf87047p-9),
26 	    V2 (0x1.7ea75d05b583ep-10), V2 (0x1.289f22964a03cp-11),
27 	    V2 (0x1.4e4fd14147622p-12) },
28   .half_pi = { 0x1.921fb54442d18p0, 0x1.1a62633145c07p-54 },
29   .two_over_pi = V2 (0x1.45f306dc9c883p-1),
30   .shift = V2 (0x1.8p52),
31 #if !WANT_SIMD_EXCEPT
32   .range_val = V2 (0x1p23),
33 #endif
34 };
35 
36 #define RangeVal 0x4160000000000000  /* asuint64(0x1p23).  */
37 #define TinyBound 0x3e50000000000000 /* asuint64(2^-26).  */
38 #define Thresh 0x310000000000000     /* RangeVal - TinyBound.  */
39 
40 /* Special cases (fall back to scalar calls).  */
41 static float64x2_t VPCS_ATTR NOINLINE
42 special_case (float64x2_t x)
43 {
44   return v_call_f64 (tan, x, x, v_u64 (-1));
45 }
46 
47 /* Vector approximation for double-precision tan.
48    Maximum measured error is 3.48 ULP:
49    _ZGVnN2v_tan(0x1.4457047ef78d8p+20) got -0x1.f6ccd8ecf7dedp+37
50 				      want -0x1.f6ccd8ecf7deap+37.  */
51 float64x2_t VPCS_ATTR V_NAME_D1 (tan) (float64x2_t x)
52 {
53   const struct data *dat = ptr_barrier (&data);
54   /* Our argument reduction cannot calculate q with sufficient accuracy for
55      very large inputs. Fall back to scalar routine for all lanes if any are
56      too large, or Inf/NaN. If fenv exceptions are expected, also fall back for
57      tiny input to avoid underflow.  */
58 #if WANT_SIMD_EXCEPT
59   uint64x2_t iax = vreinterpretq_u64_f64 (vabsq_f64 (x));
60   /* iax - tiny_bound > range_val - tiny_bound.  */
61   uint64x2_t special
62       = vcgtq_u64 (vsubq_u64 (iax, v_u64 (TinyBound)), v_u64 (Thresh));
63   if (unlikely (v_any_u64 (special)))
64     return special_case (x);
65 #endif
66 
67   /* q = nearest integer to 2 * x / pi.  */
68   float64x2_t q
69       = vsubq_f64 (vfmaq_f64 (dat->shift, x, dat->two_over_pi), dat->shift);
70   int64x2_t qi = vcvtq_s64_f64 (q);
71 
72   /* Use q to reduce x to r in [-pi/4, pi/4], by:
73      r = x - q * pi/2, in extended precision.  */
74   float64x2_t r = x;
75   float64x2_t half_pi = vld1q_f64 (dat->half_pi);
76   r = vfmsq_laneq_f64 (r, q, half_pi, 0);
77   r = vfmsq_laneq_f64 (r, q, half_pi, 1);
78   /* Further reduce r to [-pi/8, pi/8], to be reconstructed using double angle
79      formula.  */
80   r = vmulq_n_f64 (r, 0.5);
81 
82   /* Approximate tan(r) using order 8 polynomial.
83      tan(x) is odd, so polynomial has the form:
84      tan(x) ~= x + C0 * x^3 + C1 * x^5 + C3 * x^7 + ...
85      Hence we first approximate P(r) = C1 + C2 * r^2 + C3 * r^4 + ...
86      Then compute the approximation by:
87      tan(r) ~= r + r^3 * (C0 + r^2 * P(r)).  */
88   float64x2_t r2 = vmulq_f64 (r, r), r4 = vmulq_f64 (r2, r2),
89 	      r8 = vmulq_f64 (r4, r4);
90   /* Offset coefficients to evaluate from C1 onwards.  */
91   float64x2_t p = v_estrin_7_f64 (r2, r4, r8, dat->poly + 1);
92   p = vfmaq_f64 (dat->poly[0], p, r2);
93   p = vfmaq_f64 (r, r2, vmulq_f64 (p, r));
94 
95   /* Recombination uses double-angle formula:
96      tan(2x) = 2 * tan(x) / (1 - (tan(x))^2)
97      and reciprocity around pi/2:
98      tan(x) = 1 / (tan(pi/2 - x))
99      to assemble result using change-of-sign and conditional selection of
100      numerator/denominator, dependent on odd/even-ness of q (hence quadrant).
101    */
102   float64x2_t n = vfmaq_f64 (v_f64 (-1), p, p);
103   float64x2_t d = vaddq_f64 (p, p);
104 
105   uint64x2_t no_recip = vtstq_u64 (vreinterpretq_u64_s64 (qi), v_u64 (1));
106 
107 #if !WANT_SIMD_EXCEPT
108   uint64x2_t special = vcageq_f64 (x, dat->range_val);
109   if (unlikely (v_any_u64 (special)))
110     return special_case (x);
111 #endif
112 
113   return vdivq_f64 (vbslq_f64 (no_recip, n, vnegq_f64 (d)),
114 		    vbslq_f64 (no_recip, d, n));
115 }
116 
117 TEST_SIG (V, D, 1, tan, -3.1, 3.1)
118 TEST_ULP (V_NAME_D1 (tan), 2.99)
119 TEST_DISABLE_FENV_IF_NOT (V_NAME_D1 (tan), WANT_SIMD_EXCEPT)
120 TEST_SYM_INTERVAL (V_NAME_D1 (tan), 0, TinyBound, 5000)
121 TEST_SYM_INTERVAL (V_NAME_D1 (tan), TinyBound, RangeVal, 100000)
122 TEST_SYM_INTERVAL (V_NAME_D1 (tan), RangeVal, inf, 5000)
123