xref: /freebsd/contrib/arm-optimized-routines/pl/math/v_log2_3u.c (revision 911f0260390e18cf85f3dbf2c719b593efdc1e3c)
1 /*
2  * Double-precision vector log2 function.
3  *
4  * Copyright (c) 2022-2023, Arm Limited.
5  * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6  */
7 
8 #include "v_math.h"
9 #include "include/mathlib.h"
10 #include "pl_sig.h"
11 #include "pl_test.h"
12 
13 #if V_SUPPORTED
14 
15 #define InvLn2 v_f64 (0x1.71547652b82fep0)
16 #define N (1 << V_LOG2_TABLE_BITS)
17 #define OFF v_u64 (0x3fe6900900000000)
18 #define P(i) v_f64 (__v_log2_data.poly[i])
19 
20 struct entry
21 {
22   v_f64_t invc;
23   v_f64_t log2c;
24 };
25 
26 static inline struct entry
27 lookup (v_u64_t i)
28 {
29   struct entry e;
30 #ifdef SCALAR
31   e.invc = __v_log2_data.tab[i].invc;
32   e.log2c = __v_log2_data.tab[i].log2c;
33 #else
34   e.invc[0] = __v_log2_data.tab[i[0]].invc;
35   e.log2c[0] = __v_log2_data.tab[i[0]].log2c;
36   e.invc[1] = __v_log2_data.tab[i[1]].invc;
37   e.log2c[1] = __v_log2_data.tab[i[1]].log2c;
38 #endif
39   return e;
40 }
41 
42 VPCS_ATTR
43 NOINLINE static v_f64_t
44 specialcase (v_f64_t x, v_f64_t y, v_u64_t cmp)
45 {
46   return v_call_f64 (log2, x, y, cmp);
47 }
48 
49 /* Double-precision vector log2 routine. Implements the same algorithm as vector
50    log10, with coefficients and table entries scaled in extended precision.
51    The maximum observed error is 2.58 ULP:
52    __v_log2(0x1.0b556b093869bp+0) got 0x1.fffb34198d9dap-5
53 				 want 0x1.fffb34198d9ddp-5.  */
54 VPCS_ATTR
55 v_f64_t V_NAME (log2) (v_f64_t x)
56 {
57   v_u64_t ix = v_as_u64_f64 (x);
58   v_u64_t top = ix >> 48;
59   v_u64_t special
60     = v_cond_u64 (top - v_u64 (0x0010) >= v_u64 (0x7ff0 - 0x0010));
61 
62   /* x = 2^k z; where z is in range [OFF,2*OFF) and exact.
63      The range is split into N subintervals.
64      The ith subinterval contains z and c is near its center.  */
65   v_u64_t tmp = ix - OFF;
66   v_u64_t i = (tmp >> (52 - V_LOG2_TABLE_BITS)) % N;
67   v_s64_t k = v_as_s64_u64 (tmp) >> 52; /* arithmetic shift.  */
68   v_u64_t iz = ix - (tmp & v_u64 (0xfffULL << 52));
69   v_f64_t z = v_as_f64_u64 (iz);
70   struct entry e = lookup (i);
71 
72   /* log2(x) = log1p(z/c-1)/log(2) + log2(c) + k.  */
73 
74   v_f64_t r = v_fma_f64 (z, e.invc, v_f64 (-1.0));
75   v_f64_t kd = v_to_f64_s64 (k);
76   v_f64_t w = v_fma_f64 (r, InvLn2, e.log2c);
77 
78   v_f64_t r2 = r * r;
79   v_f64_t p_23 = v_fma_f64 (P (3), r, P (2));
80   v_f64_t p_01 = v_fma_f64 (P (1), r, P (0));
81   v_f64_t y = v_fma_f64 (P (4), r2, p_23);
82   y = v_fma_f64 (r2, y, p_01);
83   y = v_fma_f64 (r2, y, kd + w);
84 
85   if (unlikely (v_any_u64 (special)))
86     return specialcase (x, y, special);
87   return y;
88 }
89 VPCS_ALIAS
90 
91 PL_SIG (V, D, 1, log2, 0.01, 11.1)
92 PL_TEST_ULP (V_NAME (log2), 2.09)
93 PL_TEST_EXPECT_FENV_ALWAYS (V_NAME (log2))
94 PL_TEST_INTERVAL (V_NAME (log2), -0.0, -0x1p126, 100)
95 PL_TEST_INTERVAL (V_NAME (log2), 0x1p-149, 0x1p-126, 4000)
96 PL_TEST_INTERVAL (V_NAME (log2), 0x1p-126, 0x1p-23, 50000)
97 PL_TEST_INTERVAL (V_NAME (log2), 0x1p-23, 1.0, 50000)
98 PL_TEST_INTERVAL (V_NAME (log2), 1.0, 100, 50000)
99 PL_TEST_INTERVAL (V_NAME (log2), 100, inf, 50000)
100 #endif
101