xref: /freebsd/contrib/arm-optimized-routines/math/aarch64/advsimd/erfcf.c (revision f3087bef11543b42e0d69b708f367097a4118d24)
1*f3087befSAndrew Turner /*
2*f3087befSAndrew Turner  * Single-precision vector erfc(x) function.
3*f3087befSAndrew Turner  *
4*f3087befSAndrew Turner  * Copyright (c) 2023-2024, Arm Limited.
5*f3087befSAndrew Turner  * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6*f3087befSAndrew Turner  */
7*f3087befSAndrew Turner 
8*f3087befSAndrew Turner #include "v_math.h"
9*f3087befSAndrew Turner #include "test_sig.h"
10*f3087befSAndrew Turner #include "test_defs.h"
11*f3087befSAndrew Turner 
12*f3087befSAndrew Turner static const struct data
13*f3087befSAndrew Turner {
14*f3087befSAndrew Turner   uint32x4_t offset, table_scale;
15*f3087befSAndrew Turner   float32x4_t max, shift;
16*f3087befSAndrew Turner   float coeffs[4];
17*f3087befSAndrew Turner   float32x4_t third, two_over_five, tenth;
18*f3087befSAndrew Turner #if WANT_SIMD_EXCEPT
19*f3087befSAndrew Turner   float32x4_t uflow_bound;
20*f3087befSAndrew Turner #endif
21*f3087befSAndrew Turner 
22*f3087befSAndrew Turner } data = {
23*f3087befSAndrew Turner   /* Set an offset so the range of the index used for lookup is 644, and it can
24*f3087befSAndrew Turner      be clamped using a saturated add.  */
25*f3087befSAndrew Turner   .offset = V4 (0xb7fffd7b),	       /* 0xffffffff - asuint(shift) - 644.  */
26*f3087befSAndrew Turner   .table_scale = V4 (0x28000000 << 1), /* asuint (2^-47) << 1.  */
27*f3087befSAndrew Turner   .max = V4 (10.0625f),		       /* 10 + 1/16 = 644/64.  */
28*f3087befSAndrew Turner   .shift = V4 (0x1p17f),
29*f3087befSAndrew Turner   /* Store 1/3, 2/3 and 2/15 in a single register for use with indexed muls and
30*f3087befSAndrew Turner      fmas.  */
31*f3087befSAndrew Turner   .coeffs = { 0x1.555556p-2f, 0x1.555556p-1f, 0x1.111112p-3f, 0 },
32*f3087befSAndrew Turner   .third = V4 (0x1.555556p-2f),
33*f3087befSAndrew Turner   .two_over_five = V4 (-0x1.99999ap-2f),
34*f3087befSAndrew Turner   .tenth = V4 (-0x1.99999ap-4f),
35*f3087befSAndrew Turner #if WANT_SIMD_EXCEPT
36*f3087befSAndrew Turner   .uflow_bound = V4 (0x1.2639cp+3f),
37*f3087befSAndrew Turner #endif
38*f3087befSAndrew Turner };
39*f3087befSAndrew Turner 
40*f3087befSAndrew Turner #define TinyBound 0x41000000 /* 0x1p-62f << 1.  */
41*f3087befSAndrew Turner #define Thres 0xbe000000     /* asuint(infinity) << 1 - TinyBound.  */
42*f3087befSAndrew Turner #define Off 0xfffffd7b	     /* 0xffffffff - 644.  */
43*f3087befSAndrew Turner 
44*f3087befSAndrew Turner struct entry
45*f3087befSAndrew Turner {
46*f3087befSAndrew Turner   float32x4_t erfc;
47*f3087befSAndrew Turner   float32x4_t scale;
48*f3087befSAndrew Turner };
49*f3087befSAndrew Turner 
50*f3087befSAndrew Turner static inline struct entry
lookup(uint32x4_t i)51*f3087befSAndrew Turner lookup (uint32x4_t i)
52*f3087befSAndrew Turner {
53*f3087befSAndrew Turner   struct entry e;
54*f3087befSAndrew Turner   float32x2_t t0
55*f3087befSAndrew Turner       = vld1_f32 (&__v_erfcf_data.tab[vgetq_lane_u32 (i, 0) - Off].erfc);
56*f3087befSAndrew Turner   float32x2_t t1
57*f3087befSAndrew Turner       = vld1_f32 (&__v_erfcf_data.tab[vgetq_lane_u32 (i, 1) - Off].erfc);
58*f3087befSAndrew Turner   float32x2_t t2
59*f3087befSAndrew Turner       = vld1_f32 (&__v_erfcf_data.tab[vgetq_lane_u32 (i, 2) - Off].erfc);
60*f3087befSAndrew Turner   float32x2_t t3
61*f3087befSAndrew Turner       = vld1_f32 (&__v_erfcf_data.tab[vgetq_lane_u32 (i, 3) - Off].erfc);
62*f3087befSAndrew Turner   float32x4_t e1 = vcombine_f32 (t0, t1);
63*f3087befSAndrew Turner   float32x4_t e2 = vcombine_f32 (t2, t3);
64*f3087befSAndrew Turner   e.erfc = vuzp1q_f32 (e1, e2);
65*f3087befSAndrew Turner   e.scale = vuzp2q_f32 (e1, e2);
66*f3087befSAndrew Turner   return e;
67*f3087befSAndrew Turner }
68*f3087befSAndrew Turner 
69*f3087befSAndrew Turner #if WANT_SIMD_EXCEPT
70*f3087befSAndrew Turner static float32x4_t VPCS_ATTR NOINLINE
special_case(float32x4_t x,float32x4_t y,uint32x4_t cmp)71*f3087befSAndrew Turner special_case (float32x4_t x, float32x4_t y, uint32x4_t cmp)
72*f3087befSAndrew Turner {
73*f3087befSAndrew Turner   return v_call_f32 (erfcf, x, y, cmp);
74*f3087befSAndrew Turner }
75*f3087befSAndrew Turner #endif
76*f3087befSAndrew Turner 
77*f3087befSAndrew Turner /* Optimized single-precision vector erfcf(x).
78*f3087befSAndrew Turner    Approximation based on series expansion near x rounded to
79*f3087befSAndrew Turner    nearest multiple of 1/64.
80*f3087befSAndrew Turner    Let d = x - r, and scale = 2 / sqrt(pi) * exp(-r^2). For x near r,
81*f3087befSAndrew Turner 
82*f3087befSAndrew Turner    erfc(x) ~ erfc(r) - scale * d * poly(r, d), with
83*f3087befSAndrew Turner 
84*f3087befSAndrew Turner    poly(r, d) = 1 - r d + (2/3 r^2 - 1/3) d^2 - r (1/3 r^2 - 1/2) d^3
85*f3087befSAndrew Turner 		+ (2/15 r^4 - 2/5 r^2 + 1/10) d^4
86*f3087befSAndrew Turner 
87*f3087befSAndrew Turner    Values of erfc(r) and scale are read from lookup tables. Stored values
88*f3087befSAndrew Turner    are scaled to avoid hitting the subnormal range.
89*f3087befSAndrew Turner 
90*f3087befSAndrew Turner    Note that for x < 0, erfc(x) = 2.0 - erfc(-x).
91*f3087befSAndrew Turner    Maximum error: 1.63 ULP (~1.0 ULP for x < 0.0).
92*f3087befSAndrew Turner    _ZGVnN4v_erfcf(0x1.1dbf7ap+3) got 0x1.f51212p-120
93*f3087befSAndrew Turner 				want 0x1.f51216p-120.  */
V_NAME_F1(erfc)94*f3087befSAndrew Turner NOINLINE VPCS_ATTR float32x4_t V_NAME_F1 (erfc) (float32x4_t x)
95*f3087befSAndrew Turner {
96*f3087befSAndrew Turner   const struct data *dat = ptr_barrier (&data);
97*f3087befSAndrew Turner 
98*f3087befSAndrew Turner #if WANT_SIMD_EXCEPT
99*f3087befSAndrew Turner   /* |x| < 2^-62. Avoid fabs by left-shifting by 1.  */
100*f3087befSAndrew Turner   uint32x4_t ix = vreinterpretq_u32_f32 (x);
101*f3087befSAndrew Turner   uint32x4_t cmp = vcltq_u32 (vaddq_u32 (ix, ix), v_u32 (TinyBound));
102*f3087befSAndrew Turner   /* x >= ~9.19 (into subnormal case and uflow case). Comparison is done in
103*f3087befSAndrew Turner      integer domain to avoid raising exceptions in presence of nans.  */
104*f3087befSAndrew Turner   uint32x4_t uflow = vcgeq_s32 (vreinterpretq_s32_f32 (x),
105*f3087befSAndrew Turner 				vreinterpretq_s32_f32 (dat->uflow_bound));
106*f3087befSAndrew Turner   cmp = vorrq_u32 (cmp, uflow);
107*f3087befSAndrew Turner   float32x4_t xm = x;
108*f3087befSAndrew Turner   /* If any lanes are special, mask them with 0 and retain a copy of x to allow
109*f3087befSAndrew Turner      special case handler to fix special lanes later. This is only necessary if
110*f3087befSAndrew Turner      fenv exceptions are to be triggered correctly.  */
111*f3087befSAndrew Turner   if (unlikely (v_any_u32 (cmp)))
112*f3087befSAndrew Turner     x = v_zerofy_f32 (x, cmp);
113*f3087befSAndrew Turner #endif
114*f3087befSAndrew Turner 
115*f3087befSAndrew Turner   float32x4_t a = vabsq_f32 (x);
116*f3087befSAndrew Turner   a = vminq_f32 (a, dat->max);
117*f3087befSAndrew Turner 
118*f3087befSAndrew Turner   /* Lookup erfc(r) and scale(r) in tables, e.g. set erfc(r) to 0 and scale to
119*f3087befSAndrew Turner      2/sqrt(pi), when x reduced to r = 0.  */
120*f3087befSAndrew Turner   float32x4_t shift = dat->shift;
121*f3087befSAndrew Turner   float32x4_t z = vaddq_f32 (a, shift);
122*f3087befSAndrew Turner 
123*f3087befSAndrew Turner   /* Clamp index to a range of 644. A naive approach would use a subtract and
124*f3087befSAndrew Turner      min. Instead we offset the table address and the index, then use a
125*f3087befSAndrew Turner      saturating add.  */
126*f3087befSAndrew Turner   uint32x4_t i = vqaddq_u32 (vreinterpretq_u32_f32 (z), dat->offset);
127*f3087befSAndrew Turner 
128*f3087befSAndrew Turner   struct entry e = lookup (i);
129*f3087befSAndrew Turner 
130*f3087befSAndrew Turner   /* erfc(x) ~ erfc(r) - scale * d * poly(r, d).  */
131*f3087befSAndrew Turner   float32x4_t r = vsubq_f32 (z, shift);
132*f3087befSAndrew Turner   float32x4_t d = vsubq_f32 (a, r);
133*f3087befSAndrew Turner   float32x4_t d2 = vmulq_f32 (d, d);
134*f3087befSAndrew Turner   float32x4_t r2 = vmulq_f32 (r, r);
135*f3087befSAndrew Turner 
136*f3087befSAndrew Turner   float32x4_t p1 = r;
137*f3087befSAndrew Turner   float32x4_t coeffs = vld1q_f32 (dat->coeffs);
138*f3087befSAndrew Turner   float32x4_t p2 = vfmsq_laneq_f32 (dat->third, r2, coeffs, 1);
139*f3087befSAndrew Turner   float32x4_t p3
140*f3087befSAndrew Turner       = vmulq_f32 (r, vfmaq_laneq_f32 (v_f32 (-0.5), r2, coeffs, 0));
141*f3087befSAndrew Turner   float32x4_t p4 = vfmaq_laneq_f32 (dat->two_over_five, r2, coeffs, 2);
142*f3087befSAndrew Turner   p4 = vfmsq_f32 (dat->tenth, r2, p4);
143*f3087befSAndrew Turner 
144*f3087befSAndrew Turner   float32x4_t y = vfmaq_f32 (p3, d, p4);
145*f3087befSAndrew Turner   y = vfmaq_f32 (p2, d, y);
146*f3087befSAndrew Turner   y = vfmaq_f32 (p1, d, y);
147*f3087befSAndrew Turner   y = vfmsq_f32 (e.erfc, e.scale, vfmsq_f32 (d, d2, y));
148*f3087befSAndrew Turner 
149*f3087befSAndrew Turner   /* Offset equals 2.0f if sign, else 0.0f.  */
150*f3087befSAndrew Turner   uint32x4_t sign = vshrq_n_u32 (vreinterpretq_u32_f32 (x), 31);
151*f3087befSAndrew Turner   float32x4_t off = vreinterpretq_f32_u32 (vshlq_n_u32 (sign, 30));
152*f3087befSAndrew Turner   /* Copy sign and scale back in a single fma. Since the bit patterns do not
153*f3087befSAndrew Turner      overlap, then logical or and addition are equivalent here.  */
154*f3087befSAndrew Turner   float32x4_t fac = vreinterpretq_f32_u32 (
155*f3087befSAndrew Turner       vsraq_n_u32 (vshlq_n_u32 (sign, 31), dat->table_scale, 1));
156*f3087befSAndrew Turner 
157*f3087befSAndrew Turner #if WANT_SIMD_EXCEPT
158*f3087befSAndrew Turner   if (unlikely (v_any_u32 (cmp)))
159*f3087befSAndrew Turner     return special_case (xm, vfmaq_f32 (off, fac, y), cmp);
160*f3087befSAndrew Turner #endif
161*f3087befSAndrew Turner 
162*f3087befSAndrew Turner   return vfmaq_f32 (off, fac, y);
163*f3087befSAndrew Turner }
164*f3087befSAndrew Turner 
165*f3087befSAndrew Turner HALF_WIDTH_ALIAS_F1 (erfc)
166*f3087befSAndrew Turner 
167*f3087befSAndrew Turner TEST_SIG (V, F, 1, erfc, -4.0, 10.0)
168*f3087befSAndrew Turner TEST_DISABLE_FENV_IF_NOT (V_NAME_F1 (erfc), WANT_SIMD_EXCEPT)
169*f3087befSAndrew Turner TEST_ULP (V_NAME_F1 (erfc), 1.14)
170*f3087befSAndrew Turner TEST_SYM_INTERVAL (V_NAME_F1 (erfc), 0, 0x1p-26, 40000)
171*f3087befSAndrew Turner TEST_INTERVAL (V_NAME_F1 (erfc), 0x1p-26, 10.0625, 40000)
172*f3087befSAndrew Turner TEST_INTERVAL (V_NAME_F1 (erfc), -0x1p-26, -4.0, 40000)
173*f3087befSAndrew Turner TEST_INTERVAL (V_NAME_F1 (erfc), 10.0625, inf, 40000)
174*f3087befSAndrew Turner TEST_INTERVAL (V_NAME_F1 (erfc), -4.0, -inf, 40000)
175