xref: /freebsd/contrib/arm-optimized-routines/pl/math/v_erfinvf_5u.c (revision 5a02ffc32e777041dd2dad4e651ed2a0865a0a5d)
1*5a02ffc3SAndrew Turner /*
2*5a02ffc3SAndrew Turner  * Single-precision inverse error function (AdvSIMD variant).
3*5a02ffc3SAndrew Turner  *
4*5a02ffc3SAndrew Turner  * Copyright (c) 2023, Arm Limited.
5*5a02ffc3SAndrew Turner  * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6*5a02ffc3SAndrew Turner  */
7*5a02ffc3SAndrew Turner #include "v_math.h"
8*5a02ffc3SAndrew Turner #include "pl_sig.h"
9*5a02ffc3SAndrew Turner #include "pl_test.h"
10*5a02ffc3SAndrew Turner #include "poly_advsimd_f32.h"
11*5a02ffc3SAndrew Turner #include "v_logf_inline.h"
12*5a02ffc3SAndrew Turner 
13*5a02ffc3SAndrew Turner const static struct data
14*5a02ffc3SAndrew Turner {
15*5a02ffc3SAndrew Turner   /*  We use P_N and Q_N to refer to arrays of coefficients, where P_N is the
16*5a02ffc3SAndrew Turner       coeffs of the numerator in table N of Blair et al, and Q_N is the coeffs
17*5a02ffc3SAndrew Turner       of the denominator. Coefficients are stored in various interleaved
18*5a02ffc3SAndrew Turner       formats to allow for table-based (vector-to-vector) lookup.
19*5a02ffc3SAndrew Turner 
20*5a02ffc3SAndrew Turner       Plo is first two coefficients of P_10 and P_29 interleaved.
21*5a02ffc3SAndrew Turner       PQ is third coeff of P_10 and first of Q_29 interleaved.
22*5a02ffc3SAndrew Turner       Qhi is second and third coeffs of Q_29 interleaved.
23*5a02ffc3SAndrew Turner       P29_3 is a homogenous vector with fourth coeff of P_29.
24*5a02ffc3SAndrew Turner 
25*5a02ffc3SAndrew Turner       P_10 and Q_10 are also stored in homogenous vectors to allow better
26*5a02ffc3SAndrew Turner       memory access when no lanes are in a tail region.  */
27*5a02ffc3SAndrew Turner   float32x4_t Plo, PQ, Qhi, P29_3, tailshift;
28*5a02ffc3SAndrew Turner   float32x4_t P_50[6], Q_50[2];
29*5a02ffc3SAndrew Turner   float32x4_t P_10[3], Q_10[3];
30*5a02ffc3SAndrew Turner   uint8x16_t idxhi, idxlo;
31*5a02ffc3SAndrew Turner   struct v_logf_data logf_tbl;
32*5a02ffc3SAndrew Turner } data = {
33*5a02ffc3SAndrew Turner   .idxlo = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
34*5a02ffc3SAndrew Turner   .idxhi = { 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 },
35*5a02ffc3SAndrew Turner   .P29_3 = V4 (0x1.b13626p-2),
36*5a02ffc3SAndrew Turner   .tailshift = V4 (-0.87890625),
37*5a02ffc3SAndrew Turner   .Plo = { -0x1.a31268p+3, -0x1.fc0252p-4, 0x1.ac9048p+4, 0x1.119d44p+0 },
38*5a02ffc3SAndrew Turner   .PQ = { -0x1.293ff6p+3, -0x1.f59ee2p+0, -0x1.8265eep+3, -0x1.69952p-4 },
39*5a02ffc3SAndrew Turner   .Qhi = { 0x1.ef5eaep+4, 0x1.c7b7d2p-1, -0x1.12665p+4, -0x1.167d7p+1 },
40*5a02ffc3SAndrew Turner   .P_50 = { V4 (0x1.3d8948p-3), V4 (0x1.61f9eap+0), V4 (0x1.61c6bcp-1),
41*5a02ffc3SAndrew Turner 	    V4 (-0x1.20c9f2p+0), V4 (0x1.5c704cp-1), V4 (-0x1.50c6bep-3) },
42*5a02ffc3SAndrew Turner   .Q_50 = { V4 (0x1.3d7dacp-3), V4 (0x1.629e5p+0) },
43*5a02ffc3SAndrew Turner   .P_10 = { V4 (-0x1.a31268p+3), V4 (0x1.ac9048p+4), V4 (-0x1.293ff6p+3) },
44*5a02ffc3SAndrew Turner   .Q_10 = { V4 (-0x1.8265eep+3), V4 (0x1.ef5eaep+4), V4 (-0x1.12665p+4) },
45*5a02ffc3SAndrew Turner   .logf_tbl = V_LOGF_CONSTANTS
46*5a02ffc3SAndrew Turner };
47*5a02ffc3SAndrew Turner 
48*5a02ffc3SAndrew Turner static inline float32x4_t
special(float32x4_t x,const struct data * d)49*5a02ffc3SAndrew Turner special (float32x4_t x, const struct data *d)
50*5a02ffc3SAndrew Turner {
51*5a02ffc3SAndrew Turner   /* Note erfinvf(inf) should return NaN, and erfinvf(1) should return Inf.
52*5a02ffc3SAndrew Turner      By using log here, instead of log1p, we return finite values for both
53*5a02ffc3SAndrew Turner      these inputs, and values outside [-1, 1]. This is non-compliant, but is an
54*5a02ffc3SAndrew Turner      acceptable optimisation at Ofast. To get correct behaviour for all finite
55*5a02ffc3SAndrew Turner      values use the log1pf_inline helper on -abs(x) - note that erfinvf(inf)
56*5a02ffc3SAndrew Turner      will still be finite.  */
57*5a02ffc3SAndrew Turner   float32x4_t t = vdivq_f32 (
58*5a02ffc3SAndrew Turner       v_f32 (1), vsqrtq_f32 (vnegq_f32 (v_logf_inline (
59*5a02ffc3SAndrew Turner 		     vsubq_f32 (v_f32 (1), vabsq_f32 (x)), &d->logf_tbl))));
60*5a02ffc3SAndrew Turner   float32x4_t ts = vbslq_f32 (v_u32 (0x7fffffff), t, x);
61*5a02ffc3SAndrew Turner   float32x4_t q = vfmaq_f32 (d->Q_50[0], vaddq_f32 (t, d->Q_50[1]), t);
62*5a02ffc3SAndrew Turner   return vdivq_f32 (v_horner_5_f32 (t, d->P_50), vmulq_f32 (ts, q));
63*5a02ffc3SAndrew Turner }
64*5a02ffc3SAndrew Turner 
65*5a02ffc3SAndrew Turner static inline float32x4_t
notails(float32x4_t x,const struct data * d)66*5a02ffc3SAndrew Turner notails (float32x4_t x, const struct data *d)
67*5a02ffc3SAndrew Turner {
68*5a02ffc3SAndrew Turner   /* Shortcut when no input is in a tail region - no need to gather shift or
69*5a02ffc3SAndrew Turner      coefficients.  */
70*5a02ffc3SAndrew Turner   float32x4_t t = vfmaq_f32 (v_f32 (-0.5625), x, x);
71*5a02ffc3SAndrew Turner   float32x4_t q = vaddq_f32 (t, d->Q_10[2]);
72*5a02ffc3SAndrew Turner   q = vfmaq_f32 (d->Q_10[1], t, q);
73*5a02ffc3SAndrew Turner   q = vfmaq_f32 (d->Q_10[0], t, q);
74*5a02ffc3SAndrew Turner 
75*5a02ffc3SAndrew Turner   return vdivq_f32 (vmulq_f32 (x, v_horner_2_f32 (t, d->P_10)), q);
76*5a02ffc3SAndrew Turner }
77*5a02ffc3SAndrew Turner 
78*5a02ffc3SAndrew Turner static inline float32x4_t
lookup(float32x4_t tbl,uint8x16_t idx)79*5a02ffc3SAndrew Turner lookup (float32x4_t tbl, uint8x16_t idx)
80*5a02ffc3SAndrew Turner {
81*5a02ffc3SAndrew Turner   return vreinterpretq_f32_u8 (vqtbl1q_u8 (vreinterpretq_u8_f32 (tbl), idx));
82*5a02ffc3SAndrew Turner }
83*5a02ffc3SAndrew Turner 
84*5a02ffc3SAndrew Turner /* Vector implementation of Blair et al's rational approximation to inverse
85*5a02ffc3SAndrew Turner    error function in single-precision. Worst-case error is 4.98 ULP, in the
86*5a02ffc3SAndrew Turner    tail region:
87*5a02ffc3SAndrew Turner    _ZGVnN4v_erfinvf(0x1.f7dbeep-1) got 0x1.b4793p+0
88*5a02ffc3SAndrew Turner 				  want 0x1.b4793ap+0 .  */
V_NAME_F1(erfinv)89*5a02ffc3SAndrew Turner float32x4_t VPCS_ATTR V_NAME_F1 (erfinv) (float32x4_t x)
90*5a02ffc3SAndrew Turner {
91*5a02ffc3SAndrew Turner   const struct data *d = ptr_barrier (&data);
92*5a02ffc3SAndrew Turner 
93*5a02ffc3SAndrew Turner   /* Calculate inverse error using algorithm described in
94*5a02ffc3SAndrew Turner      J. M. Blair, C. A. Edwards, and J. H. Johnson,
95*5a02ffc3SAndrew Turner      "Rational Chebyshev approximations for the inverse of the error
96*5a02ffc3SAndrew Turner       function", Math. Comp. 30, pp. 827--830 (1976).
97*5a02ffc3SAndrew Turner      https://doi.org/10.1090/S0025-5718-1976-0421040-7.
98*5a02ffc3SAndrew Turner 
99*5a02ffc3SAndrew Turner     Algorithm has 3 intervals:
100*5a02ffc3SAndrew Turner      - 'Normal' region [-0.75, 0.75]
101*5a02ffc3SAndrew Turner      - Tail region [0.75, 0.9375] U [-0.9375, -0.75]
102*5a02ffc3SAndrew Turner      - Extreme tail [-1, -0.9375] U [0.9375, 1]
103*5a02ffc3SAndrew Turner      Normal and tail are both rational approximation of similar order on
104*5a02ffc3SAndrew Turner      shifted input - these are typically performed in parallel using gather
105*5a02ffc3SAndrew Turner      loads to obtain correct coefficients depending on interval.  */
106*5a02ffc3SAndrew Turner   uint32x4_t is_tail = vcageq_f32 (x, v_f32 (0.75));
107*5a02ffc3SAndrew Turner   uint32x4_t extreme_tail = vcageq_f32 (x, v_f32 (0.9375));
108*5a02ffc3SAndrew Turner 
109*5a02ffc3SAndrew Turner   if (unlikely (!v_any_u32 (is_tail)))
110*5a02ffc3SAndrew Turner     /* Shortcut for if all lanes are in [-0.75, 0.75] - can avoid having to
111*5a02ffc3SAndrew Turner        gather coefficients. If input is uniform in [-1, 1] then likelihood of
112*5a02ffc3SAndrew Turner        this is 0.75^4 ~= 0.31.  */
113*5a02ffc3SAndrew Turner     return notails (x, d);
114*5a02ffc3SAndrew Turner 
115*5a02ffc3SAndrew Turner   /* Select requisite shift depending on interval: polynomial is evaluated on
116*5a02ffc3SAndrew Turner      x * x - shift.
117*5a02ffc3SAndrew Turner      Normal shift = 0.5625
118*5a02ffc3SAndrew Turner      Tail shift   = 0.87890625.  */
119*5a02ffc3SAndrew Turner   float32x4_t t
120*5a02ffc3SAndrew Turner       = vfmaq_f32 (vbslq_f32 (is_tail, d->tailshift, v_f32 (-0.5625)), x, x);
121*5a02ffc3SAndrew Turner 
122*5a02ffc3SAndrew Turner   /* Calculate indexes for tbl: tbl is byte-wise, so:
123*5a02ffc3SAndrew Turner      [0, 1, 2, 3, 4, 5, 6, ....] copies the vector
124*5a02ffc3SAndrew Turner      Add 4 * i to a group of 4 lanes to copy 32-bit lane i. Each vector stores
125*5a02ffc3SAndrew Turner      two pairs of coeffs, so we need two idx vectors - one for each pair.  */
126*5a02ffc3SAndrew Turner   uint8x16_t off = vandq_u8 (vreinterpretq_u8_u32 (is_tail), vdupq_n_u8 (4));
127*5a02ffc3SAndrew Turner   uint8x16_t idx_lo = vaddq_u8 (d->idxlo, off);
128*5a02ffc3SAndrew Turner   uint8x16_t idx_hi = vaddq_u8 (d->idxhi, off);
129*5a02ffc3SAndrew Turner 
130*5a02ffc3SAndrew Turner   /* Load the tables.  */
131*5a02ffc3SAndrew Turner   float32x4_t p_lo = d->Plo;
132*5a02ffc3SAndrew Turner   float32x4_t pq = d->PQ;
133*5a02ffc3SAndrew Turner   float32x4_t qhi = d->Qhi;
134*5a02ffc3SAndrew Turner 
135*5a02ffc3SAndrew Turner   /* Do the lookup (and calculate p3 by masking non-tail lanes).  */
136*5a02ffc3SAndrew Turner   float32x4_t p3 = vreinterpretq_f32_u32 (
137*5a02ffc3SAndrew Turner       vandq_u32 (is_tail, vreinterpretq_u32_f32 (d->P29_3)));
138*5a02ffc3SAndrew Turner   float32x4_t p0 = lookup (p_lo, idx_lo), p1 = lookup (p_lo, idx_hi),
139*5a02ffc3SAndrew Turner 	      p2 = lookup (pq, idx_lo), q0 = lookup (pq, idx_hi),
140*5a02ffc3SAndrew Turner 	      q1 = lookup (qhi, idx_lo), q2 = lookup (qhi, idx_hi);
141*5a02ffc3SAndrew Turner 
142*5a02ffc3SAndrew Turner   float32x4_t p = vfmaq_f32 (p2, p3, t);
143*5a02ffc3SAndrew Turner   p = vfmaq_f32 (p1, p, t);
144*5a02ffc3SAndrew Turner   p = vfmaq_f32 (p0, p, t);
145*5a02ffc3SAndrew Turner   p = vmulq_f32 (x, p);
146*5a02ffc3SAndrew Turner 
147*5a02ffc3SAndrew Turner   float32x4_t q = vfmaq_f32 (q1, vaddq_f32 (q2, t), t);
148*5a02ffc3SAndrew Turner   q = vfmaq_f32 (q0, q, t);
149*5a02ffc3SAndrew Turner 
150*5a02ffc3SAndrew Turner   if (unlikely (v_any_u32 (extreme_tail)))
151*5a02ffc3SAndrew Turner     /* At least one lane is in the extreme tail - if input is uniform in
152*5a02ffc3SAndrew Turner        [-1, 1] the likelihood of this is ~0.23.  */
153*5a02ffc3SAndrew Turner     return vbslq_f32 (extreme_tail, special (x, d), vdivq_f32 (p, q));
154*5a02ffc3SAndrew Turner 
155*5a02ffc3SAndrew Turner   return vdivq_f32 (p, q);
156*5a02ffc3SAndrew Turner }
157*5a02ffc3SAndrew Turner 
158*5a02ffc3SAndrew Turner PL_SIG (V, F, 1, erfinv, -0.99, 0.99)
159*5a02ffc3SAndrew Turner PL_TEST_ULP (V_NAME_F1 (erfinv), 4.49)
160*5a02ffc3SAndrew Turner /* Test with control lane in each interval.  */
161*5a02ffc3SAndrew Turner PL_TEST_SYM_INTERVAL_C (V_NAME_F1 (erfinv), 0, 0x1.fffffep-1, 40000, 0.5)
162*5a02ffc3SAndrew Turner PL_TEST_SYM_INTERVAL_C (V_NAME_F1 (erfinv), 0, 0x1.fffffep-1, 40000, 0.8)
163*5a02ffc3SAndrew Turner PL_TEST_SYM_INTERVAL_C (V_NAME_F1 (erfinv), 0, 0x1.fffffep-1, 40000, 0.95)
164