xref: /freebsd/contrib/arm-optimized-routines/pl/math/v_erf_2u5.c (revision aa1a8ff2d6dbc51ef058f46f3db5a8bb77967145)
1 /*
2  * Double-precision vector erf(x) function.
3  *
4  * Copyright (c) 2023, Arm Limited.
5  * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6  */
7 
8 #include "v_math.h"
9 #include "pl_sig.h"
10 #include "pl_test.h"
11 
12 static const struct data
13 {
14   float64x2_t third;
15   float64x2_t tenth, two_over_five, two_over_fifteen;
16   float64x2_t two_over_nine, two_over_fortyfive;
17   float64x2_t max, shift;
18 #if WANT_SIMD_EXCEPT
19   float64x2_t tiny_bound, huge_bound, scale_minus_one;
20 #endif
21 } data = {
22   .third = V2 (0x1.5555555555556p-2), /* used to compute 2/3 and 1/6 too.  */
23   .two_over_fifteen = V2 (0x1.1111111111111p-3),
24   .tenth = V2 (-0x1.999999999999ap-4),
25   .two_over_five = V2 (-0x1.999999999999ap-2),
26   .two_over_nine = V2 (-0x1.c71c71c71c71cp-3),
27   .two_over_fortyfive = V2 (0x1.6c16c16c16c17p-5),
28   .max = V2 (5.9921875), /* 6 - 1/128.  */
29   .shift = V2 (0x1p45),
30 #if WANT_SIMD_EXCEPT
31   .huge_bound = V2 (0x1p205),
32   .tiny_bound = V2 (0x1p-226),
33   .scale_minus_one = V2 (0x1.06eba8214db69p-3), /* 2/sqrt(pi) - 1.0.  */
34 #endif
35 };
36 
37 #define AbsMask 0x7fffffffffffffff
38 
39 struct entry
40 {
41   float64x2_t erf;
42   float64x2_t scale;
43 };
44 
45 static inline struct entry
46 lookup (uint64x2_t i)
47 {
48   struct entry e;
49   float64x2_t e1 = vld1q_f64 ((float64_t *) (__erf_data.tab + i[0])),
50 	      e2 = vld1q_f64 ((float64_t *) (__erf_data.tab + i[1]));
51   e.erf = vuzp1q_f64 (e1, e2);
52   e.scale = vuzp2q_f64 (e1, e2);
53   return e;
54 }
55 
56 /* Double-precision implementation of vector erf(x).
57    Approximation based on series expansion near x rounded to
58    nearest multiple of 1/128.
59    Let d = x - r, and scale = 2 / sqrt(pi) * exp(-r^2). For x near r,
60 
61    erf(x) ~ erf(r) + scale * d * [
62        + 1
63        - r d
64        + 1/3 (2 r^2 - 1) d^2
65        - 1/6 (r (2 r^2 - 3)) d^3
66        + 1/30 (4 r^4 - 12 r^2 + 3) d^4
67        - 1/90 (4 r^4 - 20 r^2 + 15) d^5
68      ]
69 
70    Maximum measure error: 2.29 ULP
71    V_NAME_D1 (erf)(-0x1.00003c924e5d1p-8) got -0x1.20dd59132ebadp-8
72 					 want -0x1.20dd59132ebafp-8.  */
73 float64x2_t VPCS_ATTR V_NAME_D1 (erf) (float64x2_t x)
74 {
75   const struct data *dat = ptr_barrier (&data);
76 
77   float64x2_t a = vabsq_f64 (x);
78   /* Reciprocal conditions that do not catch NaNs so they can be used in BSLs
79      to return expected results.  */
80   uint64x2_t a_le_max = vcleq_f64 (a, dat->max);
81   uint64x2_t a_gt_max = vcgtq_f64 (a, dat->max);
82 
83 #if WANT_SIMD_EXCEPT
84   /* |x| huge or tiny.  */
85   uint64x2_t cmp1 = vcgtq_f64 (a, dat->huge_bound);
86   uint64x2_t cmp2 = vcltq_f64 (a, dat->tiny_bound);
87   uint64x2_t cmp = vorrq_u64 (cmp1, cmp2);
88   /* If any lanes are special, mask them with 1 for small x or 8 for large
89      values and retain a copy of a to allow special case handler to fix special
90      lanes later. This is only necessary if fenv exceptions are to be triggered
91      correctly.  */
92   if (unlikely (v_any_u64 (cmp)))
93     {
94       a = vbslq_f64 (cmp1, v_f64 (8.0), a);
95       a = vbslq_f64 (cmp2, v_f64 (1.0), a);
96     }
97 #endif
98 
99   /* Set r to multiple of 1/128 nearest to |x|.  */
100   float64x2_t shift = dat->shift;
101   float64x2_t z = vaddq_f64 (a, shift);
102 
103   /* Lookup erf(r) and scale(r) in table, without shortcut for small values,
104      but with saturated indices for large values and NaNs in order to avoid
105      segfault.  */
106   uint64x2_t i
107       = vsubq_u64 (vreinterpretq_u64_f64 (z), vreinterpretq_u64_f64 (shift));
108   i = vbslq_u64 (a_le_max, i, v_u64 (768));
109   struct entry e = lookup (i);
110 
111   float64x2_t r = vsubq_f64 (z, shift);
112 
113   /* erf(x) ~ erf(r) + scale * d * poly (r, d).  */
114   float64x2_t d = vsubq_f64 (a, r);
115   float64x2_t d2 = vmulq_f64 (d, d);
116   float64x2_t r2 = vmulq_f64 (r, r);
117 
118   /* poly (d, r) = 1 + p1(r) * d + p2(r) * d^2 + ... + p5(r) * d^5.  */
119   float64x2_t p1 = r;
120   float64x2_t p2
121       = vfmsq_f64 (dat->third, r2, vaddq_f64 (dat->third, dat->third));
122   float64x2_t p3 = vmulq_f64 (r, vfmaq_f64 (v_f64 (-0.5), r2, dat->third));
123   float64x2_t p4 = vfmaq_f64 (dat->two_over_five, r2, dat->two_over_fifteen);
124   p4 = vfmsq_f64 (dat->tenth, r2, p4);
125   float64x2_t p5 = vfmaq_f64 (dat->two_over_nine, r2, dat->two_over_fortyfive);
126   p5 = vmulq_f64 (r, vfmaq_f64 (vmulq_f64 (v_f64 (0.5), dat->third), r2, p5));
127 
128   float64x2_t p34 = vfmaq_f64 (p3, d, p4);
129   float64x2_t p12 = vfmaq_f64 (p1, d, p2);
130   float64x2_t y = vfmaq_f64 (p34, d2, p5);
131   y = vfmaq_f64 (p12, d2, y);
132 
133   y = vfmaq_f64 (e.erf, e.scale, vfmsq_f64 (d, d2, y));
134 
135   /* Solves the |x| = inf and NaN cases.  */
136   y = vbslq_f64 (a_gt_max, v_f64 (1.0), y);
137 
138   /* Copy sign.  */
139   y = vbslq_f64 (v_u64 (AbsMask), y, x);
140 
141 #if WANT_SIMD_EXCEPT
142   if (unlikely (v_any_u64 (cmp2)))
143     {
144       /* Neutralise huge values of x before fixing small values.  */
145       x = vbslq_f64 (cmp1, v_f64 (1.0), x);
146       /* Fix tiny values that trigger spurious underflow.  */
147       return vbslq_f64 (cmp2, vfmaq_f64 (x, dat->scale_minus_one, x), y);
148     }
149 #endif
150   return y;
151 }
152 
153 PL_SIG (V, D, 1, erf, -6.0, 6.0)
154 PL_TEST_ULP (V_NAME_D1 (erf), 1.79)
155 PL_TEST_EXPECT_FENV (V_NAME_D1 (erf), WANT_SIMD_EXCEPT)
156 PL_TEST_SYM_INTERVAL (V_NAME_D1 (erf), 0, 5.9921875, 40000)
157 PL_TEST_SYM_INTERVAL (V_NAME_D1 (erf), 5.9921875, inf, 40000)
158 PL_TEST_SYM_INTERVAL (V_NAME_D1 (erf), 0, inf, 40000)
159