xref: /freebsd/contrib/arm-optimized-routines/pl/math/sv_cos_2u5.c (revision 02e9120893770924227138ba49df1edb3896112a)
1 /*
2  * Double-precision SVE cos(x) function.
3  *
4  * Copyright (c) 2019-2023, Arm Limited.
5  * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6  */
7 
8 #include "sv_math.h"
9 #include "pl_sig.h"
10 #include "pl_test.h"
11 
12 #if SV_SUPPORTED
13 
14 #define InvPio2 (sv_f64 (0x1.45f306dc9c882p-1))
15 #define NegPio2_1 (sv_f64 (-0x1.921fb50000000p+0))
16 #define NegPio2_2 (sv_f64 (-0x1.110b460000000p-26))
17 #define NegPio2_3 (sv_f64 (-0x1.1a62633145c07p-54))
18 /* Original shift used in Neon cos,
19    plus a contribution to set the bit #0 of q
20    as expected by trigonometric instructions.  */
21 #define Shift (sv_f64 (0x1.8000000000001p52))
22 #define RangeVal (sv_f64 (0x1p23))
23 #define AbsMask (0x7fffffffffffffff)
24 
25 static NOINLINE sv_f64_t
26 __sv_cos_specialcase (sv_f64_t x, sv_f64_t y, svbool_t cmp)
27 {
28   return sv_call_f64 (cos, x, y, cmp);
29 }
30 
31 /* A fast SVE implementation of cos based on trigonometric
32    instructions (FTMAD, FTSSEL, FTSMUL).
33    Maximum measured error: 2.108 ULPs.
34    __sv_cos(0x1.9b0ba158c98f3p+7) got -0x1.fddd4c65c7f07p-3
35 				 want -0x1.fddd4c65c7f05p-3.  */
36 sv_f64_t
37 __sv_cos_x (sv_f64_t x, const svbool_t pg)
38 {
39   sv_f64_t n, r, r2, y;
40   svbool_t cmp;
41 
42   r = sv_as_f64_u64 (svand_n_u64_x (pg, sv_as_u64_f64 (x), AbsMask));
43   cmp = svcmpge_u64 (pg, sv_as_u64_f64 (r), sv_as_u64_f64 (RangeVal));
44 
45   /* n = rint(|x|/(pi/2)).  */
46   sv_f64_t q = sv_fma_f64_x (pg, InvPio2, r, Shift);
47   n = svsub_f64_x (pg, q, Shift);
48 
49   /* r = |x| - n*(pi/2)  (range reduction into -pi/4 .. pi/4).  */
50   r = sv_fma_f64_x (pg, NegPio2_1, n, r);
51   r = sv_fma_f64_x (pg, NegPio2_2, n, r);
52   r = sv_fma_f64_x (pg, NegPio2_3, n, r);
53 
54   /* cos(r) poly approx.  */
55   r2 = svtsmul_f64 (r, sv_as_u64_f64 (q));
56   y = sv_f64 (0.0);
57   y = svtmad_f64 (y, r2, 7);
58   y = svtmad_f64 (y, r2, 6);
59   y = svtmad_f64 (y, r2, 5);
60   y = svtmad_f64 (y, r2, 4);
61   y = svtmad_f64 (y, r2, 3);
62   y = svtmad_f64 (y, r2, 2);
63   y = svtmad_f64 (y, r2, 1);
64   y = svtmad_f64 (y, r2, 0);
65 
66   /* Final multiplicative factor: 1.0 or x depending on bit #0 of q.  */
67   sv_f64_t f = svtssel_f64 (r, sv_as_u64_f64 (q));
68   /* Apply factor.  */
69   y = svmul_f64_x (pg, f, y);
70 
71   /* No need to pass pg to specialcase here since cmp is a strict subset,
72      guaranteed by the cmpge above.  */
73   if (unlikely (svptest_any (pg, cmp)))
74     return __sv_cos_specialcase (x, y, cmp);
75   return y;
76 }
77 
78 PL_ALIAS (__sv_cos_x, _ZGVsMxv_cos)
79 
80 PL_SIG (SV, D, 1, cos, -3.1, 3.1)
81 PL_TEST_ULP (__sv_cos, 1.61)
82 PL_TEST_INTERVAL (__sv_cos, 0, 0xffff0000, 10000)
83 PL_TEST_INTERVAL (__sv_cos, 0x1p-4, 0x1p4, 500000)
84 #endif
85