1 /*
2 * Helper for Double-precision vector sincospi function.
3 *
4 * Copyright (c) 2024, Arm Limited.
5 * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6 */
7 #include "v_math.h"
8 #include "v_poly_f64.h"
9
10 static const struct v_sincospi_data
11 {
12 float64x2_t poly[10], range_val;
13 } v_sincospi_data = {
14 /* Polynomial coefficients generated using Remez algorithm,
15 see sinpi.sollya for details. */
16 .poly = { V2 (0x1.921fb54442d184p1), V2 (-0x1.4abbce625be53p2),
17 V2 (0x1.466bc6775ab16p1), V2 (-0x1.32d2cce62dc33p-1),
18 V2 (0x1.507834891188ep-4), V2 (-0x1.e30750a28c88ep-8),
19 V2 (0x1.e8f48308acda4p-12), V2 (-0x1.6fc0032b3c29fp-16),
20 V2 (0x1.af86ae521260bp-21), V2 (-0x1.012a9870eeb7dp-25) },
21 .range_val = V2 (0x1p63),
22 };
23
24 /* Double-precision vector function allowing calculation of both sin and cos in
25 one function call, using separate argument reduction and shared low-order
26 polynomials.
27 Approximation for vector double-precision sincospi(x).
28 Maximum Error 3.09 ULP:
29 _ZGVnN2v_sincospi_sin(0x1.7a41deb4b21e1p+14) got 0x1.fd54d0b327cf1p-1
30 want 0x1.fd54d0b327cf4p-1
31 Maximum Error 3.16 ULP:
32 _ZGVnN2v_sincospi_cos(-0x1.11e3c7e284adep-5) got 0x1.fd2da484ff3ffp-1
33 want 0x1.fd2da484ff402p-1. */
34 static inline float64x2x2_t
v_sincospi_inline(float64x2_t x,const struct v_sincospi_data * d)35 v_sincospi_inline (float64x2_t x, const struct v_sincospi_data *d)
36 {
37 /* If r is odd, the sign of the result should be inverted for sinpi
38 and reintroduced for cospi. */
39 uint64x2_t cmp = vcgeq_f64 (x, d->range_val);
40 uint64x2_t odd = vshlq_n_u64 (
41 vbicq_u64 (vreinterpretq_u64_s64 (vcvtaq_s64_f64 (x)), cmp), 63);
42
43 /* r = x - rint(x). */
44 float64x2_t sr = vsubq_f64 (x, vrndaq_f64 (x));
45 /* cospi(x) = sinpi(0.5 - abs(x)) for values -1/2 .. 1/2. */
46 float64x2_t cr = vsubq_f64 (v_f64 (0.5), vabsq_f64 (sr));
47
48 /* Pairwise Horner approximation for y = sin(r * pi). */
49 float64x2_t sr2 = vmulq_f64 (sr, sr);
50 float64x2_t sr4 = vmulq_f64 (sr2, sr2);
51 float64x2_t cr2 = vmulq_f64 (cr, cr);
52 float64x2_t cr4 = vmulq_f64 (cr2, cr2);
53
54 float64x2_t ss = vmulq_f64 (v_pw_horner_9_f64 (sr2, sr4, d->poly), sr);
55 float64x2_t cc = vmulq_f64 (v_pw_horner_9_f64 (cr2, cr4, d->poly), cr);
56
57 float64x2_t sinpix
58 = vreinterpretq_f64_u64 (veorq_u64 (vreinterpretq_u64_f64 (ss), odd));
59
60 float64x2_t cospix
61 = vreinterpretq_f64_u64 (veorq_u64 (vreinterpretq_u64_f64 (cc), odd));
62
63 return (float64x2x2_t){ sinpix, cospix };
64 }
65