xref: /freebsd/contrib/arm-optimized-routines/math/aarch64/advsimd/log2.c (revision dd21556857e8d40f66bf5ad54754d9d52669ebf7)
1 /*
2  * Double-precision vector log2 function.
3  *
4  * Copyright (c) 2022-2024, Arm Limited.
5  * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6  */
7 
8 #include "v_math.h"
9 #include "test_sig.h"
10 #include "test_defs.h"
11 
12 static const struct data
13 {
14   uint64x2_t off, sign_exp_mask, offset_lower_bound;
15   uint32x4_t special_bound;
16   float64x2_t c0, c2;
17   double c1, c3, invln2, c4;
18 } data = {
19   /* Each coefficient was generated to approximate log(r) for |r| < 0x1.fp-9
20      and N = 128, then scaled by log2(e) in extended precision and rounded back
21      to double precision.  */
22   .c0 = V2 (-0x1.71547652b8300p-1),
23   .c1 = 0x1.ec709dc340953p-2,
24   .c2 = V2 (-0x1.71547651c8f35p-2),
25   .c3 = 0x1.2777ebe12dda5p-2,
26   .c4 = -0x1.ec738d616fe26p-3,
27   .invln2 = 0x1.71547652b82fep0,
28   .off = V2 (0x3fe6900900000000),
29   .sign_exp_mask = V2 (0xfff0000000000000),
30   /* Lower bound is 0x0010000000000000. For
31      optimised register use subnormals are detected after offset has been
32      subtracted, so lower bound - offset (which wraps around).  */
33   .offset_lower_bound = V2 (0x0010000000000000 - 0x3fe6900900000000),
34   .special_bound = V4 (0x7fe00000), /* asuint64(inf) - asuint64(0x1p-1022).  */
35 };
36 
37 #define N (1 << V_LOG2_TABLE_BITS)
38 #define IndexMask (N - 1)
39 
40 struct entry
41 {
42   float64x2_t invc;
43   float64x2_t log2c;
44 };
45 
46 static inline struct entry
47 lookup (uint64x2_t i)
48 {
49   struct entry e;
50   uint64_t i0
51       = (vgetq_lane_u64 (i, 0) >> (52 - V_LOG2_TABLE_BITS)) & IndexMask;
52   uint64_t i1
53       = (vgetq_lane_u64 (i, 1) >> (52 - V_LOG2_TABLE_BITS)) & IndexMask;
54   float64x2_t e0 = vld1q_f64 (&__v_log2_data.table[i0].invc);
55   float64x2_t e1 = vld1q_f64 (&__v_log2_data.table[i1].invc);
56   e.invc = vuzp1q_f64 (e0, e1);
57   e.log2c = vuzp2q_f64 (e0, e1);
58   return e;
59 }
60 
61 static float64x2_t VPCS_ATTR NOINLINE
62 special_case (float64x2_t hi, uint64x2_t u_off, float64x2_t y, float64x2_t r2,
63 	      uint32x2_t special, const struct data *d)
64 {
65   float64x2_t x = vreinterpretq_f64_u64 (vaddq_u64 (u_off, d->off));
66   return v_call_f64 (log2, x, vfmaq_f64 (hi, y, r2), vmovl_u32 (special));
67 }
68 
69 /* Double-precision vector log2 routine. Implements the same algorithm as
70    vector log10, with coefficients and table entries scaled in extended
71    precision. The maximum observed error is 2.58 ULP:
72    _ZGVnN2v_log2(0x1.0b556b093869bp+0) got 0x1.fffb34198d9dap-5
73 				      want 0x1.fffb34198d9ddp-5.  */
74 float64x2_t VPCS_ATTR V_NAME_D1 (log2) (float64x2_t x)
75 {
76   const struct data *d = ptr_barrier (&data);
77 
78   /* To avoid having to mov x out of the way, keep u after offset has been
79      applied, and recover x by adding the offset back in the special-case
80      handler.  */
81   uint64x2_t u = vreinterpretq_u64_f64 (x);
82   uint64x2_t u_off = vsubq_u64 (u, d->off);
83 
84   /* x = 2^k z; where z is in range [Off,2*Off) and exact.
85      The range is split into N subintervals.
86      The ith subinterval contains z and c is near its center.  */
87   int64x2_t k = vshrq_n_s64 (vreinterpretq_s64_u64 (u_off), 52);
88   uint64x2_t iz = vsubq_u64 (u, vandq_u64 (u_off, d->sign_exp_mask));
89   float64x2_t z = vreinterpretq_f64_u64 (iz);
90 
91   struct entry e = lookup (u_off);
92 
93   uint32x2_t special = vcge_u32 (vsubhn_u64 (u_off, d->offset_lower_bound),
94 				 vget_low_u32 (d->special_bound));
95 
96   /* log2(x) = log1p(z/c-1)/log(2) + log2(c) + k.  */
97   float64x2_t r = vfmaq_f64 (v_f64 (-1.0), z, e.invc);
98   float64x2_t kd = vcvtq_f64_s64 (k);
99 
100   float64x2_t invln2_and_c4 = vld1q_f64 (&d->invln2);
101   float64x2_t hi
102       = vfmaq_laneq_f64 (vaddq_f64 (e.log2c, kd), r, invln2_and_c4, 0);
103 
104   float64x2_t r2 = vmulq_f64 (r, r);
105   float64x2_t odd_coeffs = vld1q_f64 (&d->c1);
106   float64x2_t y = vfmaq_laneq_f64 (d->c2, r, odd_coeffs, 1);
107   float64x2_t p = vfmaq_laneq_f64 (d->c0, r, odd_coeffs, 0);
108   y = vfmaq_laneq_f64 (y, r2, invln2_and_c4, 1);
109   y = vfmaq_f64 (p, r2, y);
110 
111   if (unlikely (v_any_u32h (special)))
112     return special_case (hi, u_off, y, r2, special, d);
113   return vfmaq_f64 (hi, y, r2);
114 }
115 
116 TEST_SIG (V, D, 1, log2, 0.01, 11.1)
117 TEST_ULP (V_NAME_D1 (log2), 2.09)
118 TEST_INTERVAL (V_NAME_D1 (log2), -0.0, -0x1p126, 100)
119 TEST_INTERVAL (V_NAME_D1 (log2), 0x1p-149, 0x1p-126, 4000)
120 TEST_INTERVAL (V_NAME_D1 (log2), 0x1p-126, 0x1p-23, 50000)
121 TEST_INTERVAL (V_NAME_D1 (log2), 0x1p-23, 1.0, 50000)
122 TEST_INTERVAL (V_NAME_D1 (log2), 1.0, 100, 50000)
123 TEST_INTERVAL (V_NAME_D1 (log2), 100, inf, 50000)
124