1 /* 2 * Double-precision vector sin function. 3 * 4 * Copyright (c) 2019-2023, Arm Limited. 5 * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception 6 */ 7 8 #include "mathlib.h" 9 #include "v_math.h" 10 11 static const struct data 12 { 13 float64x2_t poly[7]; 14 float64x2_t range_val, inv_pi, shift, pi_1, pi_2, pi_3; 15 } data = { 16 .poly = { V2 (-0x1.555555555547bp-3), V2 (0x1.1111111108a4dp-7), 17 V2 (-0x1.a01a019936f27p-13), V2 (0x1.71de37a97d93ep-19), 18 V2 (-0x1.ae633919987c6p-26), V2 (0x1.60e277ae07cecp-33), 19 V2 (-0x1.9e9540300a1p-41) }, 20 21 .range_val = V2 (0x1p23), 22 .inv_pi = V2 (0x1.45f306dc9c883p-2), 23 .pi_1 = V2 (0x1.921fb54442d18p+1), 24 .pi_2 = V2 (0x1.1a62633145c06p-53), 25 .pi_3 = V2 (0x1.c1cd129024e09p-106), 26 .shift = V2 (0x1.8p52), 27 }; 28 29 #if WANT_SIMD_EXCEPT 30 # define TinyBound v_u64 (0x3000000000000000) /* asuint64 (0x1p-255). */ 31 # define Thresh v_u64 (0x1160000000000000) /* RangeVal - TinyBound. */ 32 #endif 33 34 #define C(i) d->poly[i] 35 36 static float64x2_t VPCS_ATTR NOINLINE 37 special_case (float64x2_t x, float64x2_t y, uint64x2_t odd, uint64x2_t cmp) 38 { 39 y = vreinterpretq_f64_u64 (veorq_u64 (vreinterpretq_u64_f64 (y), odd)); 40 return v_call_f64 (sin, x, y, cmp); 41 } 42 43 /* Vector (AdvSIMD) sin approximation. 44 Maximum observed error in [-pi/2, pi/2], where argument is not reduced, 45 is 2.87 ULP: 46 _ZGVnN2v_sin (0x1.921d5c6a07142p+0) got 0x1.fffffffa7dc02p-1 47 want 0x1.fffffffa7dc05p-1 48 Maximum observed error in the entire non-special domain ([-2^23, 2^23]) 49 is 3.22 ULP: 50 _ZGVnN2v_sin (0x1.5702447b6f17bp+22) got 0x1.ffdcd125c84fbp-3 51 want 0x1.ffdcd125c84f8p-3. */ 52 float64x2_t VPCS_ATTR V_NAME_D1 (sin) (float64x2_t x) 53 { 54 const struct data *d = ptr_barrier (&data); 55 float64x2_t n, r, r2, r3, r4, y, t1, t2, t3; 56 uint64x2_t odd, cmp; 57 58 #if WANT_SIMD_EXCEPT 59 /* Detect |x| <= TinyBound or |x| >= RangeVal. If fenv exceptions are to be 60 triggered correctly, set any special lanes to 1 (which is neutral w.r.t. 61 fenv). These lanes will be fixed by special-case handler later. */ 62 uint64x2_t ir = vreinterpretq_u64_f64 (vabsq_f64 (x)); 63 cmp = vcgeq_u64 (vsubq_u64 (ir, TinyBound), Thresh); 64 r = vbslq_f64 (cmp, vreinterpretq_f64_u64 (cmp), x); 65 #else 66 r = x; 67 cmp = vcageq_f64 (x, d->range_val); 68 #endif 69 70 /* n = rint(|x|/pi). */ 71 n = vfmaq_f64 (d->shift, d->inv_pi, r); 72 odd = vshlq_n_u64 (vreinterpretq_u64_f64 (n), 63); 73 n = vsubq_f64 (n, d->shift); 74 75 /* r = |x| - n*pi (range reduction into -pi/2 .. pi/2). */ 76 r = vfmsq_f64 (r, d->pi_1, n); 77 r = vfmsq_f64 (r, d->pi_2, n); 78 r = vfmsq_f64 (r, d->pi_3, n); 79 80 /* sin(r) poly approx. */ 81 r2 = vmulq_f64 (r, r); 82 r3 = vmulq_f64 (r2, r); 83 r4 = vmulq_f64 (r2, r2); 84 85 t1 = vfmaq_f64 (C (4), C (5), r2); 86 t2 = vfmaq_f64 (C (2), C (3), r2); 87 t3 = vfmaq_f64 (C (0), C (1), r2); 88 89 y = vfmaq_f64 (t1, C (6), r4); 90 y = vfmaq_f64 (t2, y, r4); 91 y = vfmaq_f64 (t3, y, r4); 92 y = vfmaq_f64 (r, y, r3); 93 94 if (unlikely (v_any_u64 (cmp))) 95 return special_case (x, y, odd, cmp); 96 return vreinterpretq_f64_u64 (veorq_u64 (vreinterpretq_u64_f64 (y), odd)); 97 } 98