xref: /freebsd/contrib/arm-optimized-routines/math/aarch64/v_sinf.c (revision 5ca8e32633c4ffbbcd6762e5888b6a4ba0708c6c)
1 /*
2  * Single-precision vector sin function.
3  *
4  * Copyright (c) 2019-2023, Arm Limited.
5  * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6  */
7 
8 #include "mathlib.h"
9 #include "v_math.h"
10 
11 static const struct data
12 {
13   float32x4_t poly[4];
14   float32x4_t range_val, inv_pi, shift, pi_1, pi_2, pi_3;
15 } data = {
16   /* 1.886 ulp error.  */
17   .poly = { V4 (-0x1.555548p-3f), V4 (0x1.110df4p-7f), V4 (-0x1.9f42eap-13f),
18 	    V4 (0x1.5b2e76p-19f) },
19 
20   .pi_1 = V4 (0x1.921fb6p+1f),
21   .pi_2 = V4 (-0x1.777a5cp-24f),
22   .pi_3 = V4 (-0x1.ee59dap-49f),
23 
24   .inv_pi = V4 (0x1.45f306p-2f),
25   .shift = V4 (0x1.8p+23f),
26   .range_val = V4 (0x1p20f)
27 };
28 
29 #if WANT_SIMD_EXCEPT
30 # define TinyBound v_u32 (0x21000000) /* asuint32(0x1p-61f).  */
31 # define Thresh v_u32 (0x28800000)    /* RangeVal - TinyBound.  */
32 #endif
33 
34 #define C(i) d->poly[i]
35 
36 static float32x4_t VPCS_ATTR NOINLINE
37 special_case (float32x4_t x, float32x4_t y, uint32x4_t odd, uint32x4_t cmp)
38 {
39   /* Fall back to scalar code.  */
40   y = vreinterpretq_f32_u32 (veorq_u32 (vreinterpretq_u32_f32 (y), odd));
41   return v_call_f32 (sinf, x, y, cmp);
42 }
43 
44 float32x4_t VPCS_ATTR V_NAME_F1 (sin) (float32x4_t x)
45 {
46   const struct data *d = ptr_barrier (&data);
47   float32x4_t n, r, r2, y;
48   uint32x4_t odd, cmp;
49 
50 #if WANT_SIMD_EXCEPT
51   uint32x4_t ir = vreinterpretq_u32_f32 (vabsq_f32 (x));
52   cmp = vcgeq_u32 (vsubq_u32 (ir, TinyBound), Thresh);
53   /* If fenv exceptions are to be triggered correctly, set any special lanes
54      to 1 (which is neutral w.r.t. fenv). These lanes will be fixed by
55      special-case handler later.  */
56   r = vbslq_f32 (cmp, vreinterpretq_f32_u32 (cmp), x);
57 #else
58   r = x;
59   cmp = vcageq_f32 (x, d->range_val);
60 #endif
61 
62   /* n = rint(|x|/pi) */
63   n = vfmaq_f32 (d->shift, d->inv_pi, r);
64   odd = vshlq_n_u32 (vreinterpretq_u32_f32 (n), 31);
65   n = vsubq_f32 (n, d->shift);
66 
67   /* r = |x| - n*pi  (range reduction into -pi/2 .. pi/2) */
68   r = vfmsq_f32 (r, d->pi_1, n);
69   r = vfmsq_f32 (r, d->pi_2, n);
70   r = vfmsq_f32 (r, d->pi_3, n);
71 
72   /* y = sin(r) */
73   r2 = vmulq_f32 (r, r);
74   y = vfmaq_f32 (C (2), C (3), r2);
75   y = vfmaq_f32 (C (1), y, r2);
76   y = vfmaq_f32 (C (0), y, r2);
77   y = vfmaq_f32 (r, vmulq_f32 (y, r2), r);
78 
79   if (unlikely (v_any_u32 (cmp)))
80     return special_case (x, y, odd, cmp);
81   return vreinterpretq_f32_u32 (veorq_u32 (vreinterpretq_u32_f32 (y), odd));
82 }
83