1b83ccea3SSteve Kargl /*-
24d846d26SWarner Losh * SPDX-License-Identifier: BSD-2-Clause
35e53a4f9SPedro F. Giffuni *
4a1d69112SSteve Kargl * Copyright (c) 2009-2013 Steven G. Kargl
5b83ccea3SSteve Kargl * All rights reserved.
6b83ccea3SSteve Kargl *
7b83ccea3SSteve Kargl * Redistribution and use in source and binary forms, with or without
8b83ccea3SSteve Kargl * modification, are permitted provided that the following conditions
9b83ccea3SSteve Kargl * are met:
10b83ccea3SSteve Kargl * 1. Redistributions of source code must retain the above copyright
11b83ccea3SSteve Kargl * notice unmodified, this list of conditions, and the following
12b83ccea3SSteve Kargl * disclaimer.
13b83ccea3SSteve Kargl * 2. Redistributions in binary form must reproduce the above copyright
14b83ccea3SSteve Kargl * notice, this list of conditions and the following disclaimer in the
15b83ccea3SSteve Kargl * documentation and/or other materials provided with the distribution.
16b83ccea3SSteve Kargl *
17b83ccea3SSteve Kargl * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18b83ccea3SSteve Kargl * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19b83ccea3SSteve Kargl * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20b83ccea3SSteve Kargl * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21b83ccea3SSteve Kargl * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22b83ccea3SSteve Kargl * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23b83ccea3SSteve Kargl * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24b83ccea3SSteve Kargl * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25b83ccea3SSteve Kargl * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26b83ccea3SSteve Kargl * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27b83ccea3SSteve Kargl *
28b83ccea3SSteve Kargl * Optimized by Bruce D. Evans.
29b83ccea3SSteve Kargl */
30b83ccea3SSteve Kargl
31b419a550SSteve Kargl /**
32b83ccea3SSteve Kargl * Compute the exponential of x for Intel 80-bit format. This is based on:
33b83ccea3SSteve Kargl *
34b83ccea3SSteve Kargl * PTP Tang, "Table-driven implementation of the exponential function
35b83ccea3SSteve Kargl * in IEEE floating-point arithmetic," ACM Trans. Math. Soft., 15,
36b83ccea3SSteve Kargl * 144-157 (1989).
37b83ccea3SSteve Kargl *
388345cbd2SSteve Kargl * where the 32 table entries have been expanded to INTERVALS (see below).
39b83ccea3SSteve Kargl */
40b83ccea3SSteve Kargl
41b83ccea3SSteve Kargl #include <float.h>
42b83ccea3SSteve Kargl
43b83ccea3SSteve Kargl #ifdef __i386__
44b83ccea3SSteve Kargl #include <ieeefp.h>
45b83ccea3SSteve Kargl #endif
46b83ccea3SSteve Kargl
47b83ccea3SSteve Kargl #include "fpmath.h"
48f7cfe68fSSteve Kargl #include "math.h"
49f7cfe68fSSteve Kargl #include "math_private.h"
505f63fbd6SSteve Kargl #include "k_expl.h"
51b83ccea3SSteve Kargl
525f63fbd6SSteve Kargl /* XXX Prevent compilers from erroneously constant folding these: */
535f63fbd6SSteve Kargl static const volatile long double
545f63fbd6SSteve Kargl huge = 0x1p10000L,
555f63fbd6SSteve Kargl tiny = 0x1p-10000L;
56b83ccea3SSteve Kargl
57b83ccea3SSteve Kargl static const long double
58b83ccea3SSteve Kargl twom10000 = 0x1p-10000L;
59b83ccea3SSteve Kargl
60b83ccea3SSteve Kargl static const union IEEEl2bits
61b83ccea3SSteve Kargl /* log(2**16384 - 0.5) rounded towards zero: */
62ad36b00fSSteve Kargl /* log(2**16384 - 0.5 + 1) rounded towards zero for expm1l() is the same: */
63ad36b00fSSteve Kargl o_thresholdu = LD80C(0xb17217f7d1cf79ab, 13, 11356.5234062941439488L),
64ad36b00fSSteve Kargl #define o_threshold (o_thresholdu.e)
65b83ccea3SSteve Kargl /* log(2**(-16381-64-1)) rounded towards zero: */
66ad36b00fSSteve Kargl u_thresholdu = LD80C(0xb21dfe7f09e2baa9, 13, -11399.4985314888605581L);
67ad36b00fSSteve Kargl #define u_threshold (u_thresholdu.e)
68b83ccea3SSteve Kargl
69b83ccea3SSteve Kargl long double
expl(long double x)70b83ccea3SSteve Kargl expl(long double x)
71b83ccea3SSteve Kargl {
725f63fbd6SSteve Kargl union IEEEl2bits u;
735f63fbd6SSteve Kargl long double hi, lo, t, twopk;
745f63fbd6SSteve Kargl int k;
75b83ccea3SSteve Kargl uint16_t hx, ix;
76b83ccea3SSteve Kargl
77b83ccea3SSteve Kargl /* Filter out exceptional cases. */
78b83ccea3SSteve Kargl u.e = x;
79b83ccea3SSteve Kargl hx = u.xbits.expsign;
80b83ccea3SSteve Kargl ix = hx & 0x7fff;
81b83ccea3SSteve Kargl if (ix >= BIAS + 13) { /* |x| >= 8192 or x is NaN */
82b83ccea3SSteve Kargl if (ix == BIAS + LDBL_MAX_EXP) {
831783063fSSteve Kargl if (hx & 0x8000) /* x is -Inf, -NaN or unsupported */
84*c66a499eSSteve Kargl RETURNF(-1 / x);
85*c66a499eSSteve Kargl RETURNF(x + x); /* x is +Inf, +NaN or unsupported */
86b83ccea3SSteve Kargl }
87ad36b00fSSteve Kargl if (x > o_threshold)
88*c66a499eSSteve Kargl RETURNF(huge * huge);
89ad36b00fSSteve Kargl if (x < u_threshold)
90*c66a499eSSteve Kargl RETURNF(tiny * tiny);
915f63fbd6SSteve Kargl } else if (ix < BIAS - 75) { /* |x| < 0x1p-75 (includes pseudos) */
92*c66a499eSSteve Kargl RETURNF(1 + x); /* 1 with inexact iff x != 0 */
93b83ccea3SSteve Kargl }
94b83ccea3SSteve Kargl
95b83ccea3SSteve Kargl ENTERI();
96b83ccea3SSteve Kargl
975f63fbd6SSteve Kargl twopk = 1;
985f63fbd6SSteve Kargl __k_expl(x, &hi, &lo, &k);
995f63fbd6SSteve Kargl t = SUM2P(hi, lo);
100b83ccea3SSteve Kargl
101b83ccea3SSteve Kargl /* Scale by 2**k. */
102b83ccea3SSteve Kargl if (k >= LDBL_MIN_EXP) {
103b83ccea3SSteve Kargl if (k == LDBL_MAX_EXP)
1048cc74771SSteve Kargl RETURNI(t * 2 * 0x1p16383L);
1055f63fbd6SSteve Kargl SET_LDBL_EXPSIGN(twopk, BIAS + k);
106b83ccea3SSteve Kargl RETURNI(t * twopk);
107b83ccea3SSteve Kargl } else {
1085f63fbd6SSteve Kargl SET_LDBL_EXPSIGN(twopk, BIAS + k + 10000);
1095f63fbd6SSteve Kargl RETURNI(t * twopk * twom10000);
110b83ccea3SSteve Kargl }
111b83ccea3SSteve Kargl }
1123ffff4baSSteve Kargl
1133ffff4baSSteve Kargl /**
1143ffff4baSSteve Kargl * Compute expm1l(x) for Intel 80-bit format. This is based on:
1153ffff4baSSteve Kargl *
1163ffff4baSSteve Kargl * PTP Tang, "Table-driven implementation of the Expm1 function
1173ffff4baSSteve Kargl * in IEEE floating-point arithmetic," ACM Trans. Math. Soft., 18,
1183ffff4baSSteve Kargl * 211-222 (1992).
1193ffff4baSSteve Kargl */
1203ffff4baSSteve Kargl
1213ffff4baSSteve Kargl /*
1223ffff4baSSteve Kargl * Our T1 and T2 are chosen to be approximately the points where method
1233ffff4baSSteve Kargl * A and method B have the same accuracy. Tang's T1 and T2 are the
1243ffff4baSSteve Kargl * points where method A's accuracy changes by a full bit. For Tang,
1253ffff4baSSteve Kargl * this drop in accuracy makes method A immediately less accurate than
1263ffff4baSSteve Kargl * method B, but our larger INTERVALS makes method A 2 bits more
1273ffff4baSSteve Kargl * accurate so it remains the most accurate method significantly
1283ffff4baSSteve Kargl * closer to the origin despite losing the full bit in our extended
1293ffff4baSSteve Kargl * range for it.
1303ffff4baSSteve Kargl */
1313ffff4baSSteve Kargl static const double
1323ffff4baSSteve Kargl T1 = -0.1659, /* ~-30.625/128 * log(2) */
1333ffff4baSSteve Kargl T2 = 0.1659; /* ~30.625/128 * log(2) */
1343ffff4baSSteve Kargl
1353ffff4baSSteve Kargl /*
1365f63fbd6SSteve Kargl * Domain [-0.1659, 0.1659], range ~[-2.6155e-22, 2.5507e-23]:
1375f63fbd6SSteve Kargl * |(exp(x)-1-x-x**2/2)/x - p(x)| < 2**-71.6
1385f63fbd6SSteve Kargl *
1395f63fbd6SSteve Kargl * XXX the coeffs aren't very carefully rounded, and I get 2.8 more bits,
1405f63fbd6SSteve Kargl * but unlike for ld128 we can't drop any terms.
1413ffff4baSSteve Kargl */
1423ffff4baSSteve Kargl static const union IEEEl2bits
1433ffff4baSSteve Kargl B3 = LD80C(0xaaaaaaaaaaaaaaab, -3, 1.66666666666666666671e-1L),
1443ffff4baSSteve Kargl B4 = LD80C(0xaaaaaaaaaaaaaaac, -5, 4.16666666666666666712e-2L);
1453ffff4baSSteve Kargl
1463ffff4baSSteve Kargl static const double
1473ffff4baSSteve Kargl B5 = 8.3333333333333245e-3, /* 0x1.111111111110cp-7 */
1483ffff4baSSteve Kargl B6 = 1.3888888888888861e-3, /* 0x1.6c16c16c16c0ap-10 */
1493ffff4baSSteve Kargl B7 = 1.9841269841532042e-4, /* 0x1.a01a01a0319f9p-13 */
1503ffff4baSSteve Kargl B8 = 2.4801587302069236e-5, /* 0x1.a01a01a03cbbcp-16 */
1513ffff4baSSteve Kargl B9 = 2.7557316558468562e-6, /* 0x1.71de37fd33d67p-19 */
1523ffff4baSSteve Kargl B10 = 2.7557315829785151e-7, /* 0x1.27e4f91418144p-22 */
1533ffff4baSSteve Kargl B11 = 2.5063168199779829e-8, /* 0x1.ae94fabdc6b27p-26 */
1543ffff4baSSteve Kargl B12 = 2.0887164654459567e-9; /* 0x1.1f122d6413fe1p-29 */
1553ffff4baSSteve Kargl
1563ffff4baSSteve Kargl long double
expm1l(long double x)1573ffff4baSSteve Kargl expm1l(long double x)
1583ffff4baSSteve Kargl {
1593ffff4baSSteve Kargl union IEEEl2bits u, v;
1603ffff4baSSteve Kargl long double fn, hx2_hi, hx2_lo, q, r, r1, r2, t, twomk, twopk, x_hi;
1613ffff4baSSteve Kargl long double x_lo, x2, z;
1623ffff4baSSteve Kargl long double x4;
1633ffff4baSSteve Kargl int k, n, n2;
1643ffff4baSSteve Kargl uint16_t hx, ix;
1653ffff4baSSteve Kargl
1663ffff4baSSteve Kargl /* Filter out exceptional cases. */
1673ffff4baSSteve Kargl u.e = x;
1683ffff4baSSteve Kargl hx = u.xbits.expsign;
1693ffff4baSSteve Kargl ix = hx & 0x7fff;
1703ffff4baSSteve Kargl if (ix >= BIAS + 6) { /* |x| >= 64 or x is NaN */
1713ffff4baSSteve Kargl if (ix == BIAS + LDBL_MAX_EXP) {
1723ffff4baSSteve Kargl if (hx & 0x8000) /* x is -Inf, -NaN or unsupported */
173*c66a499eSSteve Kargl RETURNF(-1 / x - 1);
174*c66a499eSSteve Kargl RETURNF(x + x); /* x is +Inf, +NaN or unsupported */
1753ffff4baSSteve Kargl }
1763ffff4baSSteve Kargl if (x > o_threshold)
177*c66a499eSSteve Kargl RETURNF(huge * huge);
1783ffff4baSSteve Kargl /*
1793ffff4baSSteve Kargl * expm1l() never underflows, but it must avoid
1803ffff4baSSteve Kargl * unrepresentable large negative exponents. We used a
1813ffff4baSSteve Kargl * much smaller threshold for large |x| above than in
1823ffff4baSSteve Kargl * expl() so as to handle not so large negative exponents
1833ffff4baSSteve Kargl * in the same way as large ones here.
1843ffff4baSSteve Kargl */
1853ffff4baSSteve Kargl if (hx & 0x8000) /* x <= -64 */
186*c66a499eSSteve Kargl RETURNF(tiny - 1); /* good for x < -65ln2 - eps */
1873ffff4baSSteve Kargl }
1883ffff4baSSteve Kargl
1893ffff4baSSteve Kargl ENTERI();
1903ffff4baSSteve Kargl
1913ffff4baSSteve Kargl if (T1 < x && x < T2) {
1925f63fbd6SSteve Kargl if (ix < BIAS - 74) { /* |x| < 0x1p-74 (includes pseudos) */
1933ffff4baSSteve Kargl /* x (rounded) with inexact if x != 0: */
194*c66a499eSSteve Kargl RETURNI(x == 0 ? x :
1953ffff4baSSteve Kargl (0x1p100 * x + fabsl(x)) * 0x1p-100);
1963ffff4baSSteve Kargl }
1973ffff4baSSteve Kargl
1983ffff4baSSteve Kargl x2 = x * x;
1993ffff4baSSteve Kargl x4 = x2 * x2;
2003ffff4baSSteve Kargl q = x4 * (x2 * (x4 *
2013ffff4baSSteve Kargl /*
2023ffff4baSSteve Kargl * XXX the number of terms is no longer good for
2033ffff4baSSteve Kargl * pairwise grouping of all except B3, and the
2043ffff4baSSteve Kargl * grouping is no longer from highest down.
2053ffff4baSSteve Kargl */
2063ffff4baSSteve Kargl (x2 * B12 + (x * B11 + B10)) +
2073ffff4baSSteve Kargl (x2 * (x * B9 + B8) + (x * B7 + B6))) +
2083ffff4baSSteve Kargl (x * B5 + B4.e)) + x2 * x * B3.e;
2093ffff4baSSteve Kargl
2103ffff4baSSteve Kargl x_hi = (float)x;
2113ffff4baSSteve Kargl x_lo = x - x_hi;
2123ffff4baSSteve Kargl hx2_hi = x_hi * x_hi / 2;
2133ffff4baSSteve Kargl hx2_lo = x_lo * (x + x_hi) / 2;
2143ffff4baSSteve Kargl if (ix >= BIAS - 7)
215*c66a499eSSteve Kargl RETURNI((hx2_hi + x_hi) + (hx2_lo + x_lo + q));
2163ffff4baSSteve Kargl else
217*c66a499eSSteve Kargl RETURNI(x + (hx2_lo + q + hx2_hi));
2183ffff4baSSteve Kargl }
2193ffff4baSSteve Kargl
2203ffff4baSSteve Kargl /* Reduce x to (k*ln2 + endpoint[n2] + r1 + r2). */
22127aa8442SBruce Evans fn = rnintl(x * INV_L);
2223ffff4baSSteve Kargl n = irint(fn);
2233ffff4baSSteve Kargl n2 = (unsigned)n % INTERVALS;
2243ffff4baSSteve Kargl k = n >> LOG2_INTERVALS;
2253ffff4baSSteve Kargl r1 = x - fn * L1;
2263ffff4baSSteve Kargl r2 = fn * -L2;
2273ffff4baSSteve Kargl r = r1 + r2;
2283ffff4baSSteve Kargl
2293ffff4baSSteve Kargl /* Prepare scale factor. */
2303ffff4baSSteve Kargl v.e = 1;
2313ffff4baSSteve Kargl v.xbits.expsign = BIAS + k;
2323ffff4baSSteve Kargl twopk = v.e;
2333ffff4baSSteve Kargl
2343ffff4baSSteve Kargl /*
2353ffff4baSSteve Kargl * Evaluate lower terms of
2363ffff4baSSteve Kargl * expl(endpoint[n2] + r1 + r2) = tbl[n2] * expl(r1 + r2).
2373ffff4baSSteve Kargl */
2383ffff4baSSteve Kargl z = r * r;
2393ffff4baSSteve Kargl q = r2 + z * (A2 + r * A3) + z * z * (A4 + r * A5) + z * z * z * A6;
2403ffff4baSSteve Kargl
2413ffff4baSSteve Kargl t = (long double)tbl[n2].lo + tbl[n2].hi;
2423ffff4baSSteve Kargl
2433ffff4baSSteve Kargl if (k == 0) {
2445f63fbd6SSteve Kargl t = SUM2P(tbl[n2].hi - 1, tbl[n2].lo * (r1 + 1) + t * q +
2455f63fbd6SSteve Kargl tbl[n2].hi * r1);
2463ffff4baSSteve Kargl RETURNI(t);
2473ffff4baSSteve Kargl }
2483ffff4baSSteve Kargl if (k == -1) {
2495f63fbd6SSteve Kargl t = SUM2P(tbl[n2].hi - 2, tbl[n2].lo * (r1 + 1) + t * q +
2505f63fbd6SSteve Kargl tbl[n2].hi * r1);
2513ffff4baSSteve Kargl RETURNI(t / 2);
2523ffff4baSSteve Kargl }
2533ffff4baSSteve Kargl if (k < -7) {
2545f63fbd6SSteve Kargl t = SUM2P(tbl[n2].hi, tbl[n2].lo + t * (q + r1));
2553ffff4baSSteve Kargl RETURNI(t * twopk - 1);
2563ffff4baSSteve Kargl }
2573ffff4baSSteve Kargl if (k > 2 * LDBL_MANT_DIG - 1) {
2585f63fbd6SSteve Kargl t = SUM2P(tbl[n2].hi, tbl[n2].lo + t * (q + r1));
2593ffff4baSSteve Kargl if (k == LDBL_MAX_EXP)
2603ffff4baSSteve Kargl RETURNI(t * 2 * 0x1p16383L - 1);
2613ffff4baSSteve Kargl RETURNI(t * twopk - 1);
2623ffff4baSSteve Kargl }
2633ffff4baSSteve Kargl
2643ffff4baSSteve Kargl v.xbits.expsign = BIAS - k;
2653ffff4baSSteve Kargl twomk = v.e;
2663ffff4baSSteve Kargl
2673ffff4baSSteve Kargl if (k > LDBL_MANT_DIG - 1)
2685f63fbd6SSteve Kargl t = SUM2P(tbl[n2].hi, tbl[n2].lo - twomk + t * (q + r1));
2693ffff4baSSteve Kargl else
2705f63fbd6SSteve Kargl t = SUM2P(tbl[n2].hi - twomk, tbl[n2].lo + t * (q + r1));
2713ffff4baSSteve Kargl RETURNI(t * twopk);
2723ffff4baSSteve Kargl }
273