xref: /freebsd/contrib/arm-optimized-routines/math/aarch64/advsimd/expf_1u.c (revision f3087bef11543b42e0d69b708f367097a4118d24)
1 /*
2  * Single-precision vector e^x function.
3  *
4  * Copyright (c) 2019-2024, Arm Limited.
5  * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6  */
7 #include "v_math.h"
8 #include "test_defs.h"
9 
10 static const struct data
11 {
12   float32x4_t shift, inv_ln2;
13   uint32x4_t exponent_bias;
14   float32x4_t c1, c2, c3, c4;
15   float32x4_t special_bound, scale_thresh;
16   uint32x4_t special_offset, special_bias;
17   float ln2_hi, ln2_lo, c0, nothing;
18 } data = {
19   .ln2_hi = 0x1.62e4p-1f,
20   .ln2_lo = 0x1.7f7d1cp-20f,
21   .shift = V4 (0x1.8p23f),
22   .inv_ln2 = V4 (0x1.715476p+0f),
23   .exponent_bias = V4 (0x3f800000),
24   .special_bound = V4 (126.0f),
25   .scale_thresh = V4 (192.0f),
26   .special_offset = V4 (0x83000000),
27   .special_bias = V4 (0x7f000000),
28   /*  maxerr: 0.36565 +0.5 ulp.  */
29   .c0 = 0x1.6a6000p-10f,
30   .c1 = V4 (0x1.12718ep-7f),
31   .c2 = V4 (0x1.555af0p-5f),
32   .c3 = V4 (0x1.555430p-3f),
33   .c4 = V4 (0x1.fffff4p-2f),
34 };
35 
36 static float32x4_t VPCS_ATTR NOINLINE
specialcase(float32x4_t p,float32x4_t n,uint32x4_t e,const struct data * d)37 specialcase (float32x4_t p, float32x4_t n, uint32x4_t e, const struct data *d)
38 {
39   /* 2^n may overflow, break it up into s1*s2.  */
40   uint32x4_t b = vandq_u32 (vclezq_f32 (n), d->special_offset);
41   float32x4_t s1 = vreinterpretq_f32_u32 (vaddq_u32 (b, d->special_bias));
42   float32x4_t s2 = vreinterpretq_f32_u32 (vsubq_u32 (e, b));
43   uint32x4_t cmp = vcagtq_f32 (n, d->scale_thresh);
44   float32x4_t r1 = vmulq_f32 (s1, s1);
45   float32x4_t r0 = vmulq_f32 (vmulq_f32 (p, s1), s2);
46   return vreinterpretq_f32_u32 ((cmp & vreinterpretq_u32_f32 (r1))
47 				| (~cmp & vreinterpretq_u32_f32 (r0)));
48 }
49 
50 float32x4_t VPCS_ATTR
_ZGVnN4v_expf_1u(float32x4_t x)51 _ZGVnN4v_expf_1u (float32x4_t x)
52 {
53   const struct data *d = ptr_barrier (&data);
54   float32x4_t ln2_c0 = vld1q_f32 (&d->ln2_hi);
55 
56   /* exp(x) = 2^n * poly(r), with poly(r) in [1/sqrt(2),sqrt(2)]
57      x = ln2*n + r, with r in [-ln2/2, ln2/2].  */
58   float32x4_t z = vmulq_f32 (x, d->inv_ln2);
59   float32x4_t n = vrndaq_f32 (z);
60   float32x4_t r = vfmsq_laneq_f32 (x, n, ln2_c0, 0);
61   r = vfmsq_laneq_f32 (r, n, ln2_c0, 1);
62   uint32x4_t e = vshlq_n_u32 (vreinterpretq_u32_s32 (vcvtaq_s32_f32 (z)), 23);
63   float32x4_t scale = vreinterpretq_f32_u32 (e + d->exponent_bias);
64   uint32x4_t cmp = vcagtq_f32 (n, d->special_bound);
65   float32x4_t p = vfmaq_laneq_f32 (d->c1, r, ln2_c0, 2);
66   p = vfmaq_f32 (d->c2, p, r);
67   p = vfmaq_f32 (d->c3, p, r);
68   p = vfmaq_f32 (d->c4, p, r);
69   p = vfmaq_f32 (v_f32 (1.0f), p, r);
70   p = vfmaq_f32 (v_f32 (1.0f), p, r);
71   if (unlikely (v_any_u32 (cmp)))
72     return specialcase (p, n, e, d);
73   return scale * p;
74 }
75 
76 TEST_ULP (_ZGVnN4v_expf_1u, 0.4)
77 TEST_DISABLE_FENV (_ZGVnN4v_expf_1u)
78 TEST_INTERVAL (_ZGVnN4v_expf_1u, 0, 0xffff0000, 10000)
79 TEST_SYM_INTERVAL (_ZGVnN4v_expf_1u, 0x1p-14, 0x1p8, 500000)
80