xref: /freebsd/contrib/arm-optimized-routines/math/aarch64/advsimd/erfc.c (revision dd21556857e8d40f66bf5ad54754d9d52669ebf7)
1 /*
2  * Double-precision vector erfc(x) function.
3  *
4  * Copyright (c) 2023-2024, Arm Limited.
5  * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6  */
7 
8 #include "v_math.h"
9 #include "test_sig.h"
10 #include "test_defs.h"
11 
12 static const struct data
13 {
14   uint64x2_t offset, table_scale;
15   float64x2_t max, shift;
16   float64x2_t p20, p40, p41, p51;
17   double p42, p52;
18   double qr5[2], qr6[2], qr7[2], qr8[2], qr9[2];
19 #if WANT_SIMD_EXCEPT
20   float64x2_t uflow_bound;
21 #endif
22 } data = {
23   /* Set an offset so the range of the index used for lookup is 3487, and it
24      can be clamped using a saturated add on an offset index.
25      Index offset is 0xffffffffffffffff - asuint64(shift) - 3487.  */
26   .offset = V2 (0xbd3ffffffffff260),
27   .table_scale = V2 (0x37f0000000000000 << 1), /* asuint64 (2^-128) << 1.  */
28   .max = V2 (0x1.b3ep+4),		       /* 3487/128.  */
29   .shift = V2 (0x1p45),
30   .p20 = V2 (0x1.5555555555555p-2),  /* 1/3, used to compute 2/3 and 1/6.  */
31   .p40 = V2 (-0x1.999999999999ap-4), /* 1/10.  */
32   .p41 = V2 (-0x1.999999999999ap-2), /* 2/5.  */
33   .p42 = 0x1.1111111111111p-3,	     /* 2/15.  */
34   .p51 = V2 (-0x1.c71c71c71c71cp-3), /* 2/9.  */
35   .p52 = 0x1.6c16c16c16c17p-5,	     /* 2/45.  */
36   /* Qi = (i+1) / i, Ri = -2 * i / ((i+1)*(i+2)), for i = 5, ..., 9.  */
37   .qr5 = { 0x1.3333333333333p0, -0x1.e79e79e79e79ep-3 },
38   .qr6 = { 0x1.2aaaaaaaaaaabp0, -0x1.b6db6db6db6dbp-3 },
39   .qr7 = { 0x1.2492492492492p0, -0x1.8e38e38e38e39p-3 },
40   .qr8 = { 0x1.2p0, -0x1.6c16c16c16c17p-3 },
41   .qr9 = { 0x1.1c71c71c71c72p0, -0x1.4f2094f2094f2p-3 },
42 #if WANT_SIMD_EXCEPT
43   .uflow_bound = V2 (0x1.a8b12fc6e4892p+4),
44 #endif
45 };
46 
47 #define TinyBound 0x4000000000000000 /* 0x1p-511 << 1.  */
48 #define Off 0xfffffffffffff260	     /* 0xffffffffffffffff - 3487.  */
49 
50 struct entry
51 {
52   float64x2_t erfc;
53   float64x2_t scale;
54 };
55 
56 static inline struct entry
57 lookup (uint64x2_t i)
58 {
59   struct entry e;
60   float64x2_t e1
61       = vld1q_f64 (&__v_erfc_data.tab[vgetq_lane_u64 (i, 0) - Off].erfc);
62   float64x2_t e2
63       = vld1q_f64 (&__v_erfc_data.tab[vgetq_lane_u64 (i, 1) - Off].erfc);
64   e.erfc = vuzp1q_f64 (e1, e2);
65   e.scale = vuzp2q_f64 (e1, e2);
66   return e;
67 }
68 
69 #if WANT_SIMD_EXCEPT
70 static float64x2_t VPCS_ATTR NOINLINE
71 special_case (float64x2_t x, float64x2_t y, uint64x2_t cmp)
72 {
73   return v_call_f64 (erfc, x, y, cmp);
74 }
75 #endif
76 
77 /* Optimized double-precision vector erfc(x).
78    Approximation based on series expansion near x rounded to
79    nearest multiple of 1/128.
80 
81    Let d = x - r, and scale = 2 / sqrt(pi) * exp(-r^2). For x near r,
82 
83    erfc(x) ~ erfc(r) - scale * d * poly(r, d), with
84 
85    poly(r, d) = 1 - r d + (2/3 r^2 - 1/3) d^2 - r (1/3 r^2 - 1/2) d^3
86 		+ (2/15 r^4 - 2/5 r^2 + 1/10) d^4
87 		- r * (2/45 r^4 - 2/9 r^2 + 1/6) d^5
88 		+ p6(r) d^6 + ... + p10(r) d^10
89 
90    Polynomials p6(r) to p10(r) are computed using recurrence relation
91 
92    2(i+1)p_i + 2r(i+2)p_{i+1} + (i+2)(i+3)p_{i+2} = 0,
93    with p0 = 1, and p1(r) = -r.
94 
95    Values of erfc(r) and scale are read from lookup tables. Stored values
96    are scaled to avoid hitting the subnormal range.
97 
98    Note that for x < 0, erfc(x) = 2.0 - erfc(-x).
99 
100    Maximum measured error: 1.71 ULP
101    V_NAME_D1 (erfc)(0x1.46cfe976733p+4) got 0x1.e15fcbea3e7afp-608
102 				       want 0x1.e15fcbea3e7adp-608.  */
103 VPCS_ATTR
104 float64x2_t V_NAME_D1 (erfc) (float64x2_t x)
105 {
106   const struct data *dat = ptr_barrier (&data);
107 
108 #if WANT_SIMD_EXCEPT
109   /* |x| < 2^-511. Avoid fabs by left-shifting by 1.  */
110   uint64x2_t ix = vreinterpretq_u64_f64 (x);
111   uint64x2_t cmp = vcltq_u64 (vaddq_u64 (ix, ix), v_u64 (TinyBound));
112   /* x >= ~26.54 (into subnormal case and uflow case). Comparison is done in
113      integer domain to avoid raising exceptions in presence of nans.  */
114   uint64x2_t uflow = vcgeq_s64 (vreinterpretq_s64_f64 (x),
115 				vreinterpretq_s64_f64 (dat->uflow_bound));
116   cmp = vorrq_u64 (cmp, uflow);
117   float64x2_t xm = x;
118   /* If any lanes are special, mask them with 0 and retain a copy of x to allow
119      special case handler to fix special lanes later. This is only necessary if
120      fenv exceptions are to be triggered correctly.  */
121   if (unlikely (v_any_u64 (cmp)))
122     x = v_zerofy_f64 (x, cmp);
123 #endif
124 
125   float64x2_t a = vabsq_f64 (x);
126   a = vminq_f64 (a, dat->max);
127 
128   /* Lookup erfc(r) and scale(r) in tables, e.g. set erfc(r) to 0 and scale to
129      2/sqrt(pi), when x reduced to r = 0.  */
130   float64x2_t shift = dat->shift;
131   float64x2_t z = vaddq_f64 (a, shift);
132 
133   /* Clamp index to a range of 3487. A naive approach would use a subtract and
134      min. Instead we offset the table address and the index, then use a
135      saturating add.  */
136   uint64x2_t i = vqaddq_u64 (vreinterpretq_u64_f64 (z), dat->offset);
137 
138   struct entry e = lookup (i);
139 
140   /* erfc(x) ~ erfc(r) - scale * d * poly(r, d).  */
141   float64x2_t r = vsubq_f64 (z, shift);
142   float64x2_t d = vsubq_f64 (a, r);
143   float64x2_t d2 = vmulq_f64 (d, d);
144   float64x2_t r2 = vmulq_f64 (r, r);
145 
146   float64x2_t p1 = r;
147   float64x2_t p2 = vfmsq_f64 (dat->p20, r2, vaddq_f64 (dat->p20, dat->p20));
148   float64x2_t p3 = vmulq_f64 (r, vfmaq_f64 (v_f64 (-0.5), r2, dat->p20));
149   float64x2_t p42_p52 = vld1q_f64 (&dat->p42);
150   float64x2_t p4 = vfmaq_laneq_f64 (dat->p41, r2, p42_p52, 0);
151   p4 = vfmsq_f64 (dat->p40, r2, p4);
152   float64x2_t p5 = vfmaq_laneq_f64 (dat->p51, r2, p42_p52, 1);
153   p5 = vmulq_f64 (r, vfmaq_f64 (vmulq_f64 (v_f64 (0.5), dat->p20), r2, p5));
154   /* Compute p_i using recurrence relation:
155      p_{i+2} = (p_i + r * Q_{i+1} * p_{i+1}) * R_{i+1}.  */
156   float64x2_t qr5 = vld1q_f64 (dat->qr5), qr6 = vld1q_f64 (dat->qr6),
157 	      qr7 = vld1q_f64 (dat->qr7), qr8 = vld1q_f64 (dat->qr8),
158 	      qr9 = vld1q_f64 (dat->qr9);
159   float64x2_t p6 = vfmaq_f64 (p4, p5, vmulq_laneq_f64 (r, qr5, 0));
160   p6 = vmulq_laneq_f64 (p6, qr5, 1);
161   float64x2_t p7 = vfmaq_f64 (p5, p6, vmulq_laneq_f64 (r, qr6, 0));
162   p7 = vmulq_laneq_f64 (p7, qr6, 1);
163   float64x2_t p8 = vfmaq_f64 (p6, p7, vmulq_laneq_f64 (r, qr7, 0));
164   p8 = vmulq_laneq_f64 (p8, qr7, 1);
165   float64x2_t p9 = vfmaq_f64 (p7, p8, vmulq_laneq_f64 (r, qr8, 0));
166   p9 = vmulq_laneq_f64 (p9, qr8, 1);
167   float64x2_t p10 = vfmaq_f64 (p8, p9, vmulq_laneq_f64 (r, qr9, 0));
168   p10 = vmulq_laneq_f64 (p10, qr9, 1);
169   /* Compute polynomial in d using pairwise Horner scheme.  */
170   float64x2_t p90 = vfmaq_f64 (p9, d, p10);
171   float64x2_t p78 = vfmaq_f64 (p7, d, p8);
172   float64x2_t p56 = vfmaq_f64 (p5, d, p6);
173   float64x2_t p34 = vfmaq_f64 (p3, d, p4);
174   float64x2_t p12 = vfmaq_f64 (p1, d, p2);
175   float64x2_t y = vfmaq_f64 (p78, d2, p90);
176   y = vfmaq_f64 (p56, d2, y);
177   y = vfmaq_f64 (p34, d2, y);
178   y = vfmaq_f64 (p12, d2, y);
179 
180   y = vfmsq_f64 (e.erfc, e.scale, vfmsq_f64 (d, d2, y));
181 
182   /* Offset equals 2.0 if sign, else 0.0.  */
183   uint64x2_t sign = vshrq_n_u64 (vreinterpretq_u64_f64 (x), 63);
184   float64x2_t off = vreinterpretq_f64_u64 (vshlq_n_u64 (sign, 62));
185   /* Copy sign and scale back in a single fma. Since the bit patterns do not
186      overlap, then logical or and addition are equivalent here.  */
187   float64x2_t fac = vreinterpretq_f64_u64 (
188       vsraq_n_u64 (vshlq_n_u64 (sign, 63), dat->table_scale, 1));
189 
190 #if WANT_SIMD_EXCEPT
191   if (unlikely (v_any_u64 (cmp)))
192     return special_case (xm, vfmaq_f64 (off, fac, y), cmp);
193 #endif
194 
195   return vfmaq_f64 (off, fac, y);
196 }
197 
198 TEST_SIG (V, D, 1, erfc, -6.0, 28.0)
199 TEST_ULP (V_NAME_D1 (erfc), 1.21)
200 TEST_DISABLE_FENV_IF_NOT (V_NAME_D1 (erfc), WANT_SIMD_EXCEPT)
201 TEST_SYM_INTERVAL (V_NAME_D1 (erfc), 0, 0x1p-26, 40000)
202 TEST_INTERVAL (V_NAME_D1 (erfc), 0x1p-26, 28.0, 40000)
203 TEST_INTERVAL (V_NAME_D1 (erfc), -0x1p-26, -6.0, 40000)
204 TEST_INTERVAL (V_NAME_D1 (erfc), 28.0, inf, 40000)
205 TEST_INTERVAL (V_NAME_D1 (erfc), -6.0, -inf, 40000)
206