xref: /freebsd/contrib/arm-optimized-routines/math/aarch64/sve/logf.c (revision 6c05f3a74f30934ee60919cc97e16ec69b542b06)
1 /*
2  * Single-precision vector log function.
3  *
4  * Copyright (c) 2019-2024, Arm Limited.
5  * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6  */
7 
8 #include "sv_math.h"
9 #include "test_sig.h"
10 #include "test_defs.h"
11 
12 static const struct data
13 {
14   float poly_0135[4];
15   float poly_246[3];
16   float ln2;
17   uint32_t off, lower;
18 } data = {
19   .poly_0135 = {
20     /* Coefficients copied from the AdvSIMD routine in math/, then rearranged so
21        that coeffs 0, 1, 3 and 5 can be loaded as a single quad-word, hence used
22        with _lane variant of MLA intrinsic.  */
23     -0x1.3e737cp-3f, 0x1.5a9aa2p-3f, 0x1.961348p-3f, 0x1.555d7cp-2f
24   },
25   .poly_246 = { -0x1.4f9934p-3f, -0x1.00187cp-2f, -0x1.ffffc8p-2f },
26   .ln2 = 0x1.62e43p-1f,
27   .off = 0x3f2aaaab,
28   /* Lower bound is the smallest positive normal float 0x00800000. For
29      optimised register use subnormals are detected after offset has been
30      subtracted, so lower bound is 0x0080000 - offset (which wraps around).  */
31   .lower = 0x00800000 - 0x3f2aaaab
32 };
33 
34 #define Thresh (0x7f000000) /* asuint32(inf) - 0x00800000.  */
35 #define Mask (0x007fffff)
36 
37 static svfloat32_t NOINLINE
38 special_case (svuint32_t u_off, svfloat32_t p, svfloat32_t r2, svfloat32_t y,
39 	      svbool_t cmp)
40 {
41   return sv_call_f32 (
42       logf, svreinterpret_f32 (svadd_x (svptrue_b32 (), u_off, data.off)),
43       svmla_x (svptrue_b32 (), p, r2, y), cmp);
44 }
45 
46 /* Optimised implementation of SVE logf, using the same algorithm and
47    polynomial as the AdvSIMD routine. Maximum error is 3.34 ULPs:
48    SV_NAME_F1 (log)(0x1.557298p+0) got 0x1.26edecp-2
49 				  want 0x1.26ede6p-2.  */
50 svfloat32_t SV_NAME_F1 (log) (svfloat32_t x, const svbool_t pg)
51 {
52   const struct data *d = ptr_barrier (&data);
53 
54   svuint32_t u_off = svreinterpret_u32 (x);
55 
56   u_off = svsub_x (pg, u_off, d->off);
57   svbool_t cmp = svcmpge (pg, svsub_x (pg, u_off, d->lower), Thresh);
58 
59   /* x = 2^n * (1+r), where 2/3 < 1+r < 4/3.  */
60   svfloat32_t n = svcvt_f32_x (
61       pg, svasr_x (pg, svreinterpret_s32 (u_off), 23)); /* Sign-extend.  */
62 
63   svuint32_t u = svand_x (pg, u_off, Mask);
64   u = svadd_x (pg, u, d->off);
65   svfloat32_t r = svsub_x (pg, svreinterpret_f32 (u), 1.0f);
66 
67   /* y = log(1+r) + n*ln2.  */
68   svfloat32_t r2 = svmul_x (svptrue_b32 (), r, r);
69   /* n*ln2 + r + r2*(P6 + r*P5 + r2*(P4 + r*P3 + r2*(P2 + r*P1 + r2*P0))).  */
70   svfloat32_t p_0135 = svld1rq (svptrue_b32 (), &d->poly_0135[0]);
71   svfloat32_t p = svmla_lane (sv_f32 (d->poly_246[0]), r, p_0135, 1);
72   svfloat32_t q = svmla_lane (sv_f32 (d->poly_246[1]), r, p_0135, 2);
73   svfloat32_t y = svmla_lane (sv_f32 (d->poly_246[2]), r, p_0135, 3);
74   p = svmla_lane (p, r2, p_0135, 0);
75 
76   q = svmla_x (pg, q, r2, p);
77   y = svmla_x (pg, y, r2, q);
78   p = svmla_x (pg, r, n, d->ln2);
79 
80   if (unlikely (svptest_any (pg, cmp)))
81     return special_case (u_off, p, r2, y, cmp);
82   return svmla_x (pg, p, r2, y);
83 }
84 
85 TEST_SIG (SV, F, 1, log, 0.01, 11.1)
86 TEST_ULP (SV_NAME_F1 (log), 2.85)
87 TEST_DISABLE_FENV (SV_NAME_F1 (log))
88 TEST_INTERVAL (SV_NAME_F1 (log), -0.0, -inf, 100)
89 TEST_INTERVAL (SV_NAME_F1 (log), 0, 0x1p-126, 100)
90 TEST_INTERVAL (SV_NAME_F1 (log), 0x1p-126, 0x1p-23, 50000)
91 TEST_INTERVAL (SV_NAME_F1 (log), 0x1p-23, 1.0, 50000)
92 TEST_INTERVAL (SV_NAME_F1 (log), 1.0, 100, 50000)
93 TEST_INTERVAL (SV_NAME_F1 (log), 100, inf, 50000)
94 CLOSE_SVE_ATTR
95