xref: /freebsd/contrib/arm-optimized-routines/math/aarch64/advsimd/sinpi.c (revision f3087bef11543b42e0d69b708f367097a4118d24)
1*f3087befSAndrew Turner /*
2*f3087befSAndrew Turner  * Double-precision vector sinpi function.
3*f3087befSAndrew Turner  *
4*f3087befSAndrew Turner  * Copyright (c) 2023-2024, Arm Limited.
5*f3087befSAndrew Turner  * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6*f3087befSAndrew Turner  */
7*f3087befSAndrew Turner 
8*f3087befSAndrew Turner #include "mathlib.h"
9*f3087befSAndrew Turner #include "v_math.h"
10*f3087befSAndrew Turner #include "v_poly_f64.h"
11*f3087befSAndrew Turner #include "test_sig.h"
12*f3087befSAndrew Turner #include "test_defs.h"
13*f3087befSAndrew Turner 
14*f3087befSAndrew Turner static const struct data
15*f3087befSAndrew Turner {
16*f3087befSAndrew Turner   float64x2_t poly[10];
17*f3087befSAndrew Turner } data = {
18*f3087befSAndrew Turner   /* Polynomial coefficients generated using Remez algorithm,
19*f3087befSAndrew Turner      see sinpi.sollya for details.  */
20*f3087befSAndrew Turner   .poly = { V2 (0x1.921fb54442d184p1), V2 (-0x1.4abbce625be53p2),
21*f3087befSAndrew Turner 	    V2 (0x1.466bc6775ab16p1), V2 (-0x1.32d2cce62dc33p-1),
22*f3087befSAndrew Turner 	    V2 (0x1.507834891188ep-4), V2 (-0x1.e30750a28c88ep-8),
23*f3087befSAndrew Turner 	    V2 (0x1.e8f48308acda4p-12), V2 (-0x1.6fc0032b3c29fp-16),
24*f3087befSAndrew Turner 	    V2 (0x1.af86ae521260bp-21), V2 (-0x1.012a9870eeb7dp-25) },
25*f3087befSAndrew Turner };
26*f3087befSAndrew Turner 
27*f3087befSAndrew Turner #if WANT_SIMD_EXCEPT
28*f3087befSAndrew Turner # define TinyBound v_u64 (0x3bf0000000000000) /* asuint64(0x1p-64).  */
29*f3087befSAndrew Turner /* asuint64(0x1p64) - TinyBound.  */
30*f3087befSAndrew Turner # define Thresh v_u64 (0x07f0000000000000)
31*f3087befSAndrew Turner 
32*f3087befSAndrew Turner static float64x2_t VPCS_ATTR NOINLINE
special_case(float64x2_t x,float64x2_t y,uint64x2_t odd,uint64x2_t cmp)33*f3087befSAndrew Turner special_case (float64x2_t x, float64x2_t y, uint64x2_t odd, uint64x2_t cmp)
34*f3087befSAndrew Turner {
35*f3087befSAndrew Turner   /* Fall back to scalar code.  */
36*f3087befSAndrew Turner   y = vreinterpretq_f64_u64 (veorq_u64 (vreinterpretq_u64_f64 (y), odd));
37*f3087befSAndrew Turner   return v_call_f64 (arm_math_sinpi, x, y, cmp);
38*f3087befSAndrew Turner }
39*f3087befSAndrew Turner #endif
40*f3087befSAndrew Turner 
41*f3087befSAndrew Turner /* Approximation for vector double-precision sinpi(x).
42*f3087befSAndrew Turner    Maximum Error 3.05 ULP:
43*f3087befSAndrew Turner   _ZGVnN2v_sinpi(0x1.d32750db30b4ap-2) got 0x1.fb295878301c7p-1
44*f3087befSAndrew Turner 				      want 0x1.fb295878301cap-1.  */
V_NAME_D1(sinpi)45*f3087befSAndrew Turner float64x2_t VPCS_ATTR V_NAME_D1 (sinpi) (float64x2_t x)
46*f3087befSAndrew Turner {
47*f3087befSAndrew Turner   const struct data *d = ptr_barrier (&data);
48*f3087befSAndrew Turner 
49*f3087befSAndrew Turner #if WANT_SIMD_EXCEPT
50*f3087befSAndrew Turner   uint64x2_t ir = vreinterpretq_u64_f64 (vabsq_f64 (x));
51*f3087befSAndrew Turner   uint64x2_t cmp = vcgeq_u64 (vsubq_u64 (ir, TinyBound), Thresh);
52*f3087befSAndrew Turner 
53*f3087befSAndrew Turner   /* When WANT_SIMD_EXCEPT = 1, special lanes should be set to 0
54*f3087befSAndrew Turner      to avoid them under/overflowing and throwing exceptions.  */
55*f3087befSAndrew Turner   float64x2_t r = v_zerofy_f64 (x, cmp);
56*f3087befSAndrew Turner #else
57*f3087befSAndrew Turner   float64x2_t r = x;
58*f3087befSAndrew Turner #endif
59*f3087befSAndrew Turner 
60*f3087befSAndrew Turner   /* If r is odd, the sign of the result should be inverted.  */
61*f3087befSAndrew Turner   uint64x2_t odd
62*f3087befSAndrew Turner       = vshlq_n_u64 (vreinterpretq_u64_s64 (vcvtaq_s64_f64 (r)), 63);
63*f3087befSAndrew Turner 
64*f3087befSAndrew Turner   /* r = x - rint(x). Range reduction to -1/2 .. 1/2.  */
65*f3087befSAndrew Turner   r = vsubq_f64 (r, vrndaq_f64 (r));
66*f3087befSAndrew Turner 
67*f3087befSAndrew Turner   /* y = sin(r).  */
68*f3087befSAndrew Turner   float64x2_t r2 = vmulq_f64 (r, r);
69*f3087befSAndrew Turner   float64x2_t r4 = vmulq_f64 (r2, r2);
70*f3087befSAndrew Turner   float64x2_t y = vmulq_f64 (v_pw_horner_9_f64 (r2, r4, d->poly), r);
71*f3087befSAndrew Turner 
72*f3087befSAndrew Turner #if WANT_SIMD_EXCEPT
73*f3087befSAndrew Turner   if (unlikely (v_any_u64 (cmp)))
74*f3087befSAndrew Turner     return special_case (x, y, odd, cmp);
75*f3087befSAndrew Turner #endif
76*f3087befSAndrew Turner 
77*f3087befSAndrew Turner   return vreinterpretq_f64_u64 (veorq_u64 (vreinterpretq_u64_f64 (y), odd));
78*f3087befSAndrew Turner }
79*f3087befSAndrew Turner 
80*f3087befSAndrew Turner #if WANT_TRIGPI_TESTS
81*f3087befSAndrew Turner TEST_ULP (V_NAME_D1 (sinpi), 2.56)
82*f3087befSAndrew Turner TEST_DISABLE_FENV_IF_NOT (V_NAME_D1 (sinpi), WANT_SIMD_EXCEPT)
83*f3087befSAndrew Turner TEST_SYM_INTERVAL (V_NAME_D1 (sinpi), 0, 0x1p-63, 5000)
84*f3087befSAndrew Turner TEST_SYM_INTERVAL (V_NAME_D1 (sinpi), 0x1p-63, 0.5, 10000)
85*f3087befSAndrew Turner TEST_SYM_INTERVAL (V_NAME_D1 (sinpi), 0.5, 0x1p51, 10000)
86*f3087befSAndrew Turner TEST_SYM_INTERVAL (V_NAME_D1 (sinpi), 0x1p51, inf, 10000)
87*f3087befSAndrew Turner #endif
88