xref: /freebsd/contrib/arm-optimized-routines/math/aarch64/v_cosf.c (revision e1c4c8dd8d2d10b6104f06856a77bd5b4813a801)
1 /*
2  * Single-precision vector cos function.
3  *
4  * Copyright (c) 2019-2023, Arm Limited.
5  * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6  */
7 
8 #include "mathlib.h"
9 #include "v_math.h"
10 
11 static const struct data
12 {
13   float32x4_t poly[4];
14   float32x4_t range_val, inv_pi, half_pi, shift, pi_1, pi_2, pi_3;
15 } data = {
16   /* 1.886 ulp error.  */
17   .poly = { V4 (-0x1.555548p-3f), V4 (0x1.110df4p-7f), V4 (-0x1.9f42eap-13f),
18 	    V4 (0x1.5b2e76p-19f) },
19 
20   .pi_1 = V4 (0x1.921fb6p+1f),
21   .pi_2 = V4 (-0x1.777a5cp-24f),
22   .pi_3 = V4 (-0x1.ee59dap-49f),
23 
24   .inv_pi = V4 (0x1.45f306p-2f),
25   .shift = V4 (0x1.8p+23f),
26   .half_pi = V4 (0x1.921fb6p0f),
27   .range_val = V4 (0x1p20f)
28 };
29 
30 #define C(i) d->poly[i]
31 
32 static float32x4_t VPCS_ATTR NOINLINE
33 special_case (float32x4_t x, float32x4_t y, uint32x4_t odd, uint32x4_t cmp)
34 {
35   /* Fall back to scalar code.  */
36   y = vreinterpretq_f32_u32 (veorq_u32 (vreinterpretq_u32_f32 (y), odd));
37   return v_call_f32 (cosf, x, y, cmp);
38 }
39 
40 float32x4_t VPCS_ATTR V_NAME_F1 (cos) (float32x4_t x)
41 {
42   const struct data *d = ptr_barrier (&data);
43   float32x4_t n, r, r2, r3, y;
44   uint32x4_t odd, cmp;
45 
46 #if WANT_SIMD_EXCEPT
47   r = vabsq_f32 (x);
48   cmp = vcgeq_u32 (vreinterpretq_u32_f32 (r),
49 		   vreinterpretq_u32_f32 (d->range_val));
50   if (unlikely (v_any_u32 (cmp)))
51     /* If fenv exceptions are to be triggered correctly, set any special lanes
52        to 1 (which is neutral w.r.t. fenv). These lanes will be fixed by
53        special-case handler later.  */
54     r = vbslq_f32 (cmp, v_f32 (1.0f), r);
55 #else
56   cmp = vcageq_f32 (x, d->range_val);
57   r = x;
58 #endif
59 
60   /* n = rint((|x|+pi/2)/pi) - 0.5.  */
61   n = vfmaq_f32 (d->shift, d->inv_pi, vaddq_f32 (r, d->half_pi));
62   odd = vshlq_n_u32 (vreinterpretq_u32_f32 (n), 31);
63   n = vsubq_f32 (n, d->shift);
64   n = vsubq_f32 (n, v_f32 (0.5f));
65 
66   /* r = |x| - n*pi  (range reduction into -pi/2 .. pi/2).  */
67   r = vfmsq_f32 (r, d->pi_1, n);
68   r = vfmsq_f32 (r, d->pi_2, n);
69   r = vfmsq_f32 (r, d->pi_3, n);
70 
71   /* y = sin(r).  */
72   r2 = vmulq_f32 (r, r);
73   r3 = vmulq_f32 (r2, r);
74   y = vfmaq_f32 (C (2), C (3), r2);
75   y = vfmaq_f32 (C (1), y, r2);
76   y = vfmaq_f32 (C (0), y, r2);
77   y = vfmaq_f32 (r, y, r3);
78 
79   if (unlikely (v_any_u32 (cmp)))
80     return special_case (x, y, odd, cmp);
81   return vreinterpretq_f32_u32 (veorq_u32 (vreinterpretq_u32_f32 (y), odd));
82 }
83