xref: /freebsd/contrib/arm-optimized-routines/math/aarch64/advsimd/log10.c (revision dd21556857e8d40f66bf5ad54754d9d52669ebf7)
1 /*
2  * Double-precision vector log10(x) function.
3  *
4  * Copyright (c) 2022-2024, Arm Limited.
5  * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6  */
7 
8 #include "v_math.h"
9 #include "test_sig.h"
10 #include "test_defs.h"
11 
12 static const struct data
13 {
14   uint64x2_t off, sign_exp_mask, offset_lower_bound;
15   uint32x4_t special_bound;
16   double invln10, log10_2;
17   double c1, c3;
18   float64x2_t c0, c2, c4;
19 } data = {
20   /* Computed from log coefficients divided by log(10) then rounded to double
21      precision.  */
22   .c0 = V2 (-0x1.bcb7b1526e506p-3),
23   .c1 = 0x1.287a7636be1d1p-3,
24   .c2 = V2 (-0x1.bcb7b158af938p-4),
25   .c3 = 0x1.63c78734e6d07p-4,
26   .c4 = V2 (-0x1.287461742fee4p-4),
27   .invln10 = 0x1.bcb7b1526e50ep-2,
28   .log10_2 = 0x1.34413509f79ffp-2,
29   .off = V2 (0x3fe6900900000000),
30   .sign_exp_mask = V2 (0xfff0000000000000),
31   /* Lower bound is 0x0010000000000000. For
32      optimised register use subnormals are detected after offset has been
33      subtracted, so lower bound - offset (which wraps around).  */
34   .offset_lower_bound = V2 (0x0010000000000000 - 0x3fe6900900000000),
35   .special_bound = V4 (0x7fe00000), /* asuint64(inf) - 0x0010000000000000.  */
36 };
37 
38 #define N (1 << V_LOG10_TABLE_BITS)
39 #define IndexMask (N - 1)
40 
41 struct entry
42 {
43   float64x2_t invc;
44   float64x2_t log10c;
45 };
46 
47 static inline struct entry
48 lookup (uint64x2_t i)
49 {
50   struct entry e;
51   uint64_t i0
52       = (vgetq_lane_u64 (i, 0) >> (52 - V_LOG10_TABLE_BITS)) & IndexMask;
53   uint64_t i1
54       = (vgetq_lane_u64 (i, 1) >> (52 - V_LOG10_TABLE_BITS)) & IndexMask;
55   float64x2_t e0 = vld1q_f64 (&__v_log10_data.table[i0].invc);
56   float64x2_t e1 = vld1q_f64 (&__v_log10_data.table[i1].invc);
57   e.invc = vuzp1q_f64 (e0, e1);
58   e.log10c = vuzp2q_f64 (e0, e1);
59   return e;
60 }
61 
62 static float64x2_t VPCS_ATTR NOINLINE
63 special_case (float64x2_t hi, uint64x2_t u_off, float64x2_t y, float64x2_t r2,
64 	      uint32x2_t special, const struct data *d)
65 {
66   float64x2_t x = vreinterpretq_f64_u64 (vaddq_u64 (u_off, d->off));
67   return v_call_f64 (log10, x, vfmaq_f64 (hi, y, r2), vmovl_u32 (special));
68 }
69 
70 /* Fast implementation of double-precision vector log10
71    is a slight modification of double-precision vector log.
72    Max ULP error: < 2.5 ulp (nearest rounding.)
73    Maximum measured at 2.46 ulp for x in [0.96, 0.97]
74    _ZGVnN2v_log10(0x1.13192407fcb46p+0) got 0x1.fff6be3cae4bbp-6
75 				       want 0x1.fff6be3cae4b9p-6.  */
76 float64x2_t VPCS_ATTR V_NAME_D1 (log10) (float64x2_t x)
77 {
78   const struct data *d = ptr_barrier (&data);
79 
80   /* To avoid having to mov x out of the way, keep u after offset has been
81      applied, and recover x by adding the offset back in the special-case
82      handler.  */
83   uint64x2_t u = vreinterpretq_u64_f64 (x);
84   uint64x2_t u_off = vsubq_u64 (u, d->off);
85 
86   /* x = 2^k z; where z is in range [OFF,2*OFF) and exact.
87      The range is split into N subintervals.
88      The ith subinterval contains z and c is near its center.  */
89   int64x2_t k = vshrq_n_s64 (vreinterpretq_s64_u64 (u_off), 52);
90   uint64x2_t iz = vsubq_u64 (u, vandq_u64 (u_off, d->sign_exp_mask));
91   float64x2_t z = vreinterpretq_f64_u64 (iz);
92 
93   struct entry e = lookup (u_off);
94 
95   uint32x2_t special = vcge_u32 (vsubhn_u64 (u_off, d->offset_lower_bound),
96 				 vget_low_u32 (d->special_bound));
97 
98   /* log10(x) = log1p(z/c-1)/log(10) + log10(c) + k*log10(2).  */
99   float64x2_t r = vfmaq_f64 (v_f64 (-1.0), z, e.invc);
100   float64x2_t kd = vcvtq_f64_s64 (k);
101 
102   /* hi = r / log(10) + log10(c) + k*log10(2).
103      Constants in v_log10_data.c are computed (in extended precision) as
104      e.log10c := e.logc * invln10.  */
105   float64x2_t cte = vld1q_f64 (&d->invln10);
106   float64x2_t hi = vfmaq_laneq_f64 (e.log10c, r, cte, 0);
107 
108   /* y = log10(1+r) + n * log10(2).  */
109   hi = vfmaq_laneq_f64 (hi, kd, cte, 1);
110 
111   /* y = r2*(A0 + r*A1 + r2*(A2 + r*A3 + r2*A4)) + hi.  */
112   float64x2_t r2 = vmulq_f64 (r, r);
113   float64x2_t odd_coeffs = vld1q_f64 (&d->c1);
114   float64x2_t y = vfmaq_laneq_f64 (d->c2, r, odd_coeffs, 1);
115   float64x2_t p = vfmaq_laneq_f64 (d->c0, r, odd_coeffs, 0);
116   y = vfmaq_f64 (y, d->c4, r2);
117   y = vfmaq_f64 (p, y, r2);
118 
119   if (unlikely (v_any_u32h (special)))
120     return special_case (hi, u_off, y, r2, special, d);
121   return vfmaq_f64 (hi, y, r2);
122 }
123 
124 TEST_SIG (V, D, 1, log10, 0.01, 11.1)
125 TEST_ULP (V_NAME_D1 (log10), 1.97)
126 TEST_INTERVAL (V_NAME_D1 (log10), -0.0, -inf, 1000)
127 TEST_INTERVAL (V_NAME_D1 (log10), 0, 0x1p-149, 1000)
128 TEST_INTERVAL (V_NAME_D1 (log10), 0x1p-149, 0x1p-126, 4000)
129 TEST_INTERVAL (V_NAME_D1 (log10), 0x1p-126, 0x1p-23, 50000)
130 TEST_INTERVAL (V_NAME_D1 (log10), 0x1p-23, 1.0, 50000)
131 TEST_INTERVAL (V_NAME_D1 (log10), 1.0, 100, 50000)
132 TEST_INTERVAL (V_NAME_D1 (log10), 100, inf, 50000)
133