xref: /freebsd/contrib/arm-optimized-routines/pl/math/v_log10f_3u5.c (revision 5ca8e32633c4ffbbcd6762e5888b6a4ba0708c6c)
1 /*
2  * Single-precision vector log10 function.
3  *
4  * Copyright (c) 2020-2023, Arm Limited.
5  * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6  */
7 
8 #include "v_math.h"
9 #include "poly_advsimd_f32.h"
10 #include "pl_sig.h"
11 #include "pl_test.h"
12 
13 static const struct data
14 {
15   uint32x4_t min_norm;
16   uint16x8_t special_bound;
17   float32x4_t poly[8];
18   float32x4_t inv_ln10, ln2;
19   uint32x4_t off, mantissa_mask;
20 } data = {
21   /* Use order 9 for log10(1+x), i.e. order 8 for log10(1+x)/x, with x in
22       [-1/3, 1/3] (offset=2/3). Max. relative error: 0x1.068ee468p-25.  */
23   .poly = { V4 (-0x1.bcb79cp-3f), V4 (0x1.2879c8p-3f), V4 (-0x1.bcd472p-4f),
24 	    V4 (0x1.6408f8p-4f), V4 (-0x1.246f8p-4f), V4 (0x1.f0e514p-5f),
25 	    V4 (-0x1.0fc92cp-4f), V4 (0x1.f5f76ap-5f) },
26   .ln2 = V4 (0x1.62e43p-1f),
27   .inv_ln10 = V4 (0x1.bcb7b2p-2f),
28   .min_norm = V4 (0x00800000),
29   .special_bound = V8 (0x7f00), /* asuint32(inf) - min_norm.  */
30   .off = V4 (0x3f2aaaab),	/* 0.666667.  */
31   .mantissa_mask = V4 (0x007fffff),
32 };
33 
34 static float32x4_t VPCS_ATTR NOINLINE
35 special_case (float32x4_t x, float32x4_t y, float32x4_t p, float32x4_t r2,
36 	      uint16x4_t cmp)
37 {
38   /* Fall back to scalar code.  */
39   return v_call_f32 (log10f, x, vfmaq_f32 (y, p, r2), vmovl_u16 (cmp));
40 }
41 
42 /* Fast implementation of AdvSIMD log10f,
43    uses a similar approach as AdvSIMD logf with the same offset (i.e., 2/3) and
44    an order 9 polynomial.
45    Maximum error: 3.305ulps (nearest rounding.)
46    _ZGVnN4v_log10f(0x1.555c16p+0) got 0x1.ffe2fap-4
47 				 want 0x1.ffe2f4p-4.  */
48 float32x4_t VPCS_ATTR V_NAME_F1 (log10) (float32x4_t x)
49 {
50   const struct data *d = ptr_barrier (&data);
51   uint32x4_t u = vreinterpretq_u32_f32 (x);
52   uint16x4_t special = vcge_u16 (vsubhn_u32 (u, d->min_norm),
53 				 vget_low_u16 (d->special_bound));
54 
55   /* x = 2^n * (1+r), where 2/3 < 1+r < 4/3.  */
56   u = vsubq_u32 (u, d->off);
57   float32x4_t n = vcvtq_f32_s32 (
58       vshrq_n_s32 (vreinterpretq_s32_u32 (u), 23)); /* signextend.  */
59   u = vaddq_u32 (vandq_u32 (u, d->mantissa_mask), d->off);
60   float32x4_t r = vsubq_f32 (vreinterpretq_f32_u32 (u), v_f32 (1.0f));
61 
62   /* y = log10(1+r) + n * log10(2).  */
63   float32x4_t r2 = vmulq_f32 (r, r);
64   float32x4_t poly = v_pw_horner_7_f32 (r, r2, d->poly);
65   /* y = Log10(2) * n + poly * InvLn(10).  */
66   float32x4_t y = vfmaq_f32 (r, d->ln2, n);
67   y = vmulq_f32 (y, d->inv_ln10);
68 
69   if (unlikely (v_any_u16h (special)))
70     return special_case (x, y, poly, r2, special);
71   return vfmaq_f32 (y, poly, r2);
72 }
73 
74 PL_SIG (V, F, 1, log10, 0.01, 11.1)
75 PL_TEST_ULP (V_NAME_F1 (log10), 2.81)
76 PL_TEST_EXPECT_FENV_ALWAYS (V_NAME_F1 (log10))
77 PL_TEST_INTERVAL (V_NAME_F1 (log10), -0.0, -inf, 100)
78 PL_TEST_INTERVAL (V_NAME_F1 (log10), 0, 0x1p-126, 100)
79 PL_TEST_INTERVAL (V_NAME_F1 (log10), 0x1p-126, 0x1p-23, 50000)
80 PL_TEST_INTERVAL (V_NAME_F1 (log10), 0x1p-23, 1.0, 50000)
81 PL_TEST_INTERVAL (V_NAME_F1 (log10), 1.0, 100, 50000)
82 PL_TEST_INTERVAL (V_NAME_F1 (log10), 100, inf, 50000)
83