xref: /freebsd/contrib/arm-optimized-routines/pl/math/sv_exp10_1u5.c (revision f126890ac5386406dadf7c4cfa9566cbb56537c5)
1 /*
2  * Double-precision SVE 10^x function.
3  *
4  * Copyright (c) 2023, Arm Limited.
5  * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6  */
7 
8 #include "sv_math.h"
9 #include "pl_sig.h"
10 #include "pl_test.h"
11 #include "poly_sve_f64.h"
12 
13 #define SpecialBound 307.0 /* floor (log10 (2^1023)).  */
14 
15 static const struct data
16 {
17   double poly[5];
18   double shift, log10_2, log2_10_hi, log2_10_lo, scale_thres, special_bound;
19 } data = {
20   /* Coefficients generated using Remez algorithm.
21      rel error: 0x1.9fcb9b3p-60
22      abs error: 0x1.a20d9598p-60 in [ -log10(2)/128, log10(2)/128 ]
23      max ulp err 0.52 +0.5.  */
24   .poly = { 0x1.26bb1bbb55516p1, 0x1.53524c73cd32ap1, 0x1.0470591daeafbp1,
25 	    0x1.2bd77b1361ef6p0, 0x1.142b5d54e9621p-1 },
26   /* 1.5*2^46+1023. This value is further explained below.  */
27   .shift = 0x1.800000000ffc0p+46,
28   .log10_2 = 0x1.a934f0979a371p1,     /* 1/log2(10).  */
29   .log2_10_hi = 0x1.34413509f79ffp-2, /* log2(10).  */
30   .log2_10_lo = -0x1.9dc1da994fd21p-59,
31   .scale_thres = 1280.0,
32   .special_bound = SpecialBound,
33 };
34 
35 #define SpecialOffset 0x6000000000000000 /* 0x1p513.  */
36 /* SpecialBias1 + SpecialBias1 = asuint(1.0).  */
37 #define SpecialBias1 0x7000000000000000 /* 0x1p769.  */
38 #define SpecialBias2 0x3010000000000000 /* 0x1p-254.  */
39 
40 /* Update of both special and non-special cases, if any special case is
41    detected.  */
42 static inline svfloat64_t
43 special_case (svbool_t pg, svfloat64_t s, svfloat64_t y, svfloat64_t n,
44 	      const struct data *d)
45 {
46   /* s=2^n may overflow, break it up into s=s1*s2,
47      such that exp = s + s*y can be computed as s1*(s2+s2*y)
48      and s1*s1 overflows only if n>0.  */
49 
50   /* If n<=0 then set b to 0x6, 0 otherwise.  */
51   svbool_t p_sign = svcmple (pg, n, 0.0); /* n <= 0.  */
52   svuint64_t b = svdup_u64_z (p_sign, SpecialOffset);
53 
54   /* Set s1 to generate overflow depending on sign of exponent n.  */
55   svfloat64_t s1 = svreinterpret_f64 (svsubr_x (pg, b, SpecialBias1));
56   /* Offset s to avoid overflow in final result if n is below threshold.  */
57   svfloat64_t s2 = svreinterpret_f64 (
58       svadd_x (pg, svsub_x (pg, svreinterpret_u64 (s), SpecialBias2), b));
59 
60   /* |n| > 1280 => 2^(n) overflows.  */
61   svbool_t p_cmp = svacgt (pg, n, d->scale_thres);
62 
63   svfloat64_t r1 = svmul_x (pg, s1, s1);
64   svfloat64_t r2 = svmla_x (pg, s2, s2, y);
65   svfloat64_t r0 = svmul_x (pg, r2, s1);
66 
67   return svsel (p_cmp, r1, r0);
68 }
69 
70 /* Fast vector implementation of exp10 using FEXPA instruction.
71    Maximum measured error is 1.02 ulp.
72    SV_NAME_D1 (exp10)(-0x1.2862fec805e58p+2) got 0x1.885a89551d782p-16
73 					    want 0x1.885a89551d781p-16.  */
74 svfloat64_t SV_NAME_D1 (exp10) (svfloat64_t x, svbool_t pg)
75 {
76   const struct data *d = ptr_barrier (&data);
77   svbool_t no_big_scale = svacle (pg, x, d->special_bound);
78   svbool_t special = svnot_z (pg, no_big_scale);
79 
80   /* n = round(x/(log10(2)/N)).  */
81   svfloat64_t shift = sv_f64 (d->shift);
82   svfloat64_t z = svmla_x (pg, shift, x, d->log10_2);
83   svfloat64_t n = svsub_x (pg, z, shift);
84 
85   /* r = x - n*log10(2)/N.  */
86   svfloat64_t log2_10 = svld1rq (svptrue_b64 (), &d->log2_10_hi);
87   svfloat64_t r = x;
88   r = svmls_lane (r, n, log2_10, 0);
89   r = svmls_lane (r, n, log2_10, 1);
90 
91   /* scale = 2^(n/N), computed using FEXPA. FEXPA does not propagate NaNs, so
92      for consistent NaN handling we have to manually propagate them. This
93      comes at significant performance cost.  */
94   svuint64_t u = svreinterpret_u64 (z);
95   svfloat64_t scale = svexpa (u);
96 
97   /* Approximate exp10(r) using polynomial.  */
98   svfloat64_t r2 = svmul_x (pg, r, r);
99   svfloat64_t y = svmla_x (pg, svmul_x (pg, r, d->poly[0]), r2,
100 			   sv_pairwise_poly_3_f64_x (pg, r, r2, d->poly + 1));
101 
102   /* Assemble result as exp10(x) = 2^n * exp10(r).  If |x| > SpecialBound
103      multiplication may overflow, so use special case routine.  */
104   if (unlikely (svptest_any (pg, special)))
105     {
106       /* FEXPA zeroes the sign bit, however the sign is meaningful to the
107 	 special case function so needs to be copied.
108 	 e = sign bit of u << 46.  */
109       svuint64_t e = svand_x (pg, svlsl_x (pg, u, 46), 0x8000000000000000);
110       /* Copy sign to scale.  */
111       scale = svreinterpret_f64 (svadd_x (pg, e, svreinterpret_u64 (scale)));
112       return special_case (pg, scale, y, n, d);
113     }
114 
115   /* No special case.  */
116   return svmla_x (pg, scale, scale, y);
117 }
118 
119 PL_SIG (SV, D, 1, exp10, -9.9, 9.9)
120 PL_TEST_ULP (SV_NAME_D1 (exp10), 0.52)
121 PL_TEST_SYM_INTERVAL (SV_NAME_D1 (exp10), 0, 307, 10000)
122 PL_TEST_SYM_INTERVAL (SV_NAME_D1 (exp10), 307, inf, 1000)
123