1b83ccea3SSteve Kargl /*-
24d846d26SWarner Losh * SPDX-License-Identifier: BSD-2-Clause
35e53a4f9SPedro F. Giffuni *
4a1d69112SSteve Kargl * Copyright (c) 2009-2013 Steven G. Kargl
5b83ccea3SSteve Kargl * All rights reserved.
6b83ccea3SSteve Kargl *
7b83ccea3SSteve Kargl * Redistribution and use in source and binary forms, with or without
8b83ccea3SSteve Kargl * modification, are permitted provided that the following conditions
9b83ccea3SSteve Kargl * are met:
10b83ccea3SSteve Kargl * 1. Redistributions of source code must retain the above copyright
11b83ccea3SSteve Kargl * notice unmodified, this list of conditions, and the following
12b83ccea3SSteve Kargl * disclaimer.
13b83ccea3SSteve Kargl * 2. Redistributions in binary form must reproduce the above copyright
14b83ccea3SSteve Kargl * notice, this list of conditions and the following disclaimer in the
15b83ccea3SSteve Kargl * documentation and/or other materials provided with the distribution.
16b83ccea3SSteve Kargl *
17b83ccea3SSteve Kargl * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18b83ccea3SSteve Kargl * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19b83ccea3SSteve Kargl * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20b83ccea3SSteve Kargl * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21b83ccea3SSteve Kargl * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22b83ccea3SSteve Kargl * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23b83ccea3SSteve Kargl * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24b83ccea3SSteve Kargl * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25b83ccea3SSteve Kargl * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26b83ccea3SSteve Kargl * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27a3f70b4eSSteve Kargl *
28a3f70b4eSSteve Kargl * Optimized by Bruce D. Evans.
29b83ccea3SSteve Kargl */
30b83ccea3SSteve Kargl
318f647ffdSSteve Kargl /*
328f647ffdSSteve Kargl * ld128 version of s_expl.c. See ../ld80/s_expl.c for most comments.
338f647ffdSSteve Kargl */
348f647ffdSSteve Kargl
35b83ccea3SSteve Kargl #include <float.h>
36b83ccea3SSteve Kargl
37f7cfe68fSSteve Kargl #include "fpmath.h"
38b83ccea3SSteve Kargl #include "math.h"
39b83ccea3SSteve Kargl #include "math_private.h"
405f63fbd6SSteve Kargl #include "k_expl.h"
41b83ccea3SSteve Kargl
425f63fbd6SSteve Kargl /* XXX Prevent compilers from erroneously constant folding these: */
435f63fbd6SSteve Kargl static const volatile long double
445f63fbd6SSteve Kargl huge = 0x1p10000L,
455f63fbd6SSteve Kargl tiny = 0x1p-10000L;
46b83ccea3SSteve Kargl
4731407861SSteve Kargl static const long double
481a287d1dSSteve Kargl twom10000 = 0x1p-10000L;
49b83ccea3SSteve Kargl
50b83ccea3SSteve Kargl static const long double
5131407861SSteve Kargl /* log(2**16384 - 0.5) rounded towards zero: */
5231407861SSteve Kargl /* log(2**16384 - 0.5 + 1) rounded towards zero for expm1l() is the same: */
538f647ffdSSteve Kargl o_threshold = 11356.523406294143949491931077970763428L,
5431407861SSteve Kargl /* log(2**(-16381-64-1)) rounded towards zero: */
558f647ffdSSteve Kargl u_threshold = -11433.462743336297878837243843452621503L;
56b83ccea3SSteve Kargl
57b83ccea3SSteve Kargl long double
expl(long double x)58b83ccea3SSteve Kargl expl(long double x)
59b83ccea3SSteve Kargl {
605f63fbd6SSteve Kargl union IEEEl2bits u;
615f63fbd6SSteve Kargl long double hi, lo, t, twopk;
625f63fbd6SSteve Kargl int k;
638cc74771SSteve Kargl uint16_t hx, ix;
64b83ccea3SSteve Kargl
65b83ccea3SSteve Kargl /* Filter out exceptional cases. */
66b83ccea3SSteve Kargl u.e = x;
67b83ccea3SSteve Kargl hx = u.xbits.expsign;
68f7cfe68fSSteve Kargl ix = hx & 0x7fff;
69b83ccea3SSteve Kargl if (ix >= BIAS + 13) { /* |x| >= 8192 or x is NaN */
70b83ccea3SSteve Kargl if (ix == BIAS + LDBL_MAX_EXP) {
711783063fSSteve Kargl if (hx & 0x8000) /* x is -Inf or -NaN */
72*c66a499eSSteve Kargl RETURNF(-1 / x);
73*c66a499eSSteve Kargl RETURNF(x + x); /* x is +Inf or +NaN */
74b83ccea3SSteve Kargl }
75b83ccea3SSteve Kargl if (x > o_threshold)
76*c66a499eSSteve Kargl RETURNF(huge * huge);
77b83ccea3SSteve Kargl if (x < u_threshold)
78*c66a499eSSteve Kargl RETURNF(tiny * tiny);
791783063fSSteve Kargl } else if (ix < BIAS - 114) { /* |x| < 0x1p-114 */
80*c66a499eSSteve Kargl RETURNF(1 + x); /* 1 with inexact iff x != 0 */
81b83ccea3SSteve Kargl }
82b83ccea3SSteve Kargl
838cc74771SSteve Kargl ENTERI();
848cc74771SSteve Kargl
855f63fbd6SSteve Kargl twopk = 1;
865f63fbd6SSteve Kargl __k_expl(x, &hi, &lo, &k);
875f63fbd6SSteve Kargl t = SUM2P(hi, lo);
88b83ccea3SSteve Kargl
89b83ccea3SSteve Kargl /* Scale by 2**k. */
90a8197ad3SWarner Losh /*
91a8197ad3SWarner Losh * XXX sparc64 multiplication was so slow that scalbnl() is faster,
92a8197ad3SWarner Losh * but performance on aarch64 and riscv hasn't yet been quantified.
93a8197ad3SWarner Losh */
94b83ccea3SSteve Kargl if (k >= LDBL_MIN_EXP) {
95b83ccea3SSteve Kargl if (k == LDBL_MAX_EXP)
968cc74771SSteve Kargl RETURNI(t * 2 * 0x1p16383L);
975f63fbd6SSteve Kargl SET_LDBL_EXPSIGN(twopk, BIAS + k);
988cc74771SSteve Kargl RETURNI(t * twopk);
99b83ccea3SSteve Kargl } else {
1005f63fbd6SSteve Kargl SET_LDBL_EXPSIGN(twopk, BIAS + k + 10000);
1015f63fbd6SSteve Kargl RETURNI(t * twopk * twom10000);
102b83ccea3SSteve Kargl }
103b83ccea3SSteve Kargl }
1043ffff4baSSteve Kargl
1053ffff4baSSteve Kargl /*
1063ffff4baSSteve Kargl * Our T1 and T2 are chosen to be approximately the points where method
1073ffff4baSSteve Kargl * A and method B have the same accuracy. Tang's T1 and T2 are the
1083ffff4baSSteve Kargl * points where method A's accuracy changes by a full bit. For Tang,
1093ffff4baSSteve Kargl * this drop in accuracy makes method A immediately less accurate than
1103ffff4baSSteve Kargl * method B, but our larger INTERVALS makes method A 2 bits more
1113ffff4baSSteve Kargl * accurate so it remains the most accurate method significantly
1123ffff4baSSteve Kargl * closer to the origin despite losing the full bit in our extended
1133ffff4baSSteve Kargl * range for it.
1143ffff4baSSteve Kargl *
1153ffff4baSSteve Kargl * Split the interval [T1, T2] into two intervals [T1, T3] and [T3, T2].
1163ffff4baSSteve Kargl * Setting T3 to 0 would require the |x| < 0x1p-113 condition to appear
1173ffff4baSSteve Kargl * in both subintervals, so set T3 = 2**-5, which places the condition
1183ffff4baSSteve Kargl * into the [T1, T3] interval.
1195f63fbd6SSteve Kargl *
1205f63fbd6SSteve Kargl * XXX we now do this more to (partially) balance the number of terms
1215f63fbd6SSteve Kargl * in the C and D polys than to avoid checking the condition in both
1225f63fbd6SSteve Kargl * intervals.
1235f63fbd6SSteve Kargl *
1245f63fbd6SSteve Kargl * XXX these micro-optimizations are excessive.
1253ffff4baSSteve Kargl */
1263ffff4baSSteve Kargl static const double
1273ffff4baSSteve Kargl T1 = -0.1659, /* ~-30.625/128 * log(2) */
1283ffff4baSSteve Kargl T2 = 0.1659, /* ~30.625/128 * log(2) */
1293ffff4baSSteve Kargl T3 = 0.03125;
1303ffff4baSSteve Kargl
1313ffff4baSSteve Kargl /*
1323ffff4baSSteve Kargl * Domain [-0.1659, 0.03125], range ~[2.9134e-44, 1.8404e-37]:
1333ffff4baSSteve Kargl * |(exp(x)-1-x-x**2/2)/x - p(x)| < 2**-122.03
1346202fb7bSDimitry Andric *
1355f63fbd6SSteve Kargl * XXX none of the long double C or D coeffs except C10 is correctly printed.
1365f63fbd6SSteve Kargl * If you re-print their values in %.35Le format, the result is always
1375f63fbd6SSteve Kargl * different. For example, the last 2 digits in C3 should be 59, not 67.
1385f63fbd6SSteve Kargl * 67 is apparently from rounding an extra-precision value to 36 decimal
1395f63fbd6SSteve Kargl * places.
1403ffff4baSSteve Kargl */
1413ffff4baSSteve Kargl static const long double
1423ffff4baSSteve Kargl C3 = 1.66666666666666666666666666666666667e-1L,
1433ffff4baSSteve Kargl C4 = 4.16666666666666666666666666666666645e-2L,
1443ffff4baSSteve Kargl C5 = 8.33333333333333333333333333333371638e-3L,
1453ffff4baSSteve Kargl C6 = 1.38888888888888888888888888891188658e-3L,
1463ffff4baSSteve Kargl C7 = 1.98412698412698412698412697235950394e-4L,
1473ffff4baSSteve Kargl C8 = 2.48015873015873015873015112487849040e-5L,
1483ffff4baSSteve Kargl C9 = 2.75573192239858906525606685484412005e-6L,
1493ffff4baSSteve Kargl C10 = 2.75573192239858906612966093057020362e-7L,
1503ffff4baSSteve Kargl C11 = 2.50521083854417203619031960151253944e-8L,
1513ffff4baSSteve Kargl C12 = 2.08767569878679576457272282566520649e-9L,
1523ffff4baSSteve Kargl C13 = 1.60590438367252471783548748824255707e-10L;
1533ffff4baSSteve Kargl
1545f63fbd6SSteve Kargl /*
1555f63fbd6SSteve Kargl * XXX this has 1 more coeff than needed.
1565f63fbd6SSteve Kargl * XXX can start the double coeffs but not the double mults at C10.
1575f63fbd6SSteve Kargl * With my coeffs (C10-C17 double; s = best_s):
1585f63fbd6SSteve Kargl * Domain [-0.1659, 0.03125], range ~[-1.1976e-37, 1.1976e-37]:
1595f63fbd6SSteve Kargl * |(exp(x)-1-x-x**2/2)/x - p(x)| ~< 2**-122.65
1605f63fbd6SSteve Kargl */
1613ffff4baSSteve Kargl static const double
1623ffff4baSSteve Kargl C14 = 1.1470745580491932e-11, /* 0x1.93974a81dae30p-37 */
1633ffff4baSSteve Kargl C15 = 7.6471620181090468e-13, /* 0x1.ae7f3820adab1p-41 */
1643ffff4baSSteve Kargl C16 = 4.7793721460260450e-14, /* 0x1.ae7cd18a18eacp-45 */
1653ffff4baSSteve Kargl C17 = 2.8074757356658877e-15, /* 0x1.949992a1937d9p-49 */
1663ffff4baSSteve Kargl C18 = 1.4760610323699476e-16; /* 0x1.545b43aabfbcdp-53 */
1673ffff4baSSteve Kargl
1683ffff4baSSteve Kargl /*
1693ffff4baSSteve Kargl * Domain [0.03125, 0.1659], range ~[-2.7676e-37, -1.0367e-38]:
1703ffff4baSSteve Kargl * |(exp(x)-1-x-x**2/2)/x - p(x)| < 2**-121.44
1713ffff4baSSteve Kargl */
1723ffff4baSSteve Kargl static const long double
1733ffff4baSSteve Kargl D3 = 1.66666666666666666666666666666682245e-1L,
1743ffff4baSSteve Kargl D4 = 4.16666666666666666666666666634228324e-2L,
1753ffff4baSSteve Kargl D5 = 8.33333333333333333333333364022244481e-3L,
1763ffff4baSSteve Kargl D6 = 1.38888888888888888888887138722762072e-3L,
1773ffff4baSSteve Kargl D7 = 1.98412698412698412699085805424661471e-4L,
1783ffff4baSSteve Kargl D8 = 2.48015873015873015687993712101479612e-5L,
1793ffff4baSSteve Kargl D9 = 2.75573192239858944101036288338208042e-6L,
1803ffff4baSSteve Kargl D10 = 2.75573192239853161148064676533754048e-7L,
1813ffff4baSSteve Kargl D11 = 2.50521083855084570046480450935267433e-8L,
1823ffff4baSSteve Kargl D12 = 2.08767569819738524488686318024854942e-9L,
1833ffff4baSSteve Kargl D13 = 1.60590442297008495301927448122499313e-10L;
1843ffff4baSSteve Kargl
1855f63fbd6SSteve Kargl /*
1865f63fbd6SSteve Kargl * XXX this has 1 more coeff than needed.
1875f63fbd6SSteve Kargl * XXX can start the double coeffs but not the double mults at D11.
1885f63fbd6SSteve Kargl * With my coeffs (D11-D16 double):
1895f63fbd6SSteve Kargl * Domain [0.03125, 0.1659], range ~[-1.1980e-37, 1.1980e-37]:
1905f63fbd6SSteve Kargl * |(exp(x)-1-x-x**2/2)/x - p(x)| ~< 2**-122.65
1915f63fbd6SSteve Kargl */
1923ffff4baSSteve Kargl static const double
1933ffff4baSSteve Kargl D14 = 1.1470726176204336e-11, /* 0x1.93971dc395d9ep-37 */
1943ffff4baSSteve Kargl D15 = 7.6478532249581686e-13, /* 0x1.ae892e3D16fcep-41 */
1953ffff4baSSteve Kargl D16 = 4.7628892832607741e-14, /* 0x1.ad00Dfe41feccp-45 */
1963ffff4baSSteve Kargl D17 = 3.0524857220358650e-15; /* 0x1.D7e8d886Df921p-49 */
1973ffff4baSSteve Kargl
1983ffff4baSSteve Kargl long double
expm1l(long double x)1993ffff4baSSteve Kargl expm1l(long double x)
2003ffff4baSSteve Kargl {
2013ffff4baSSteve Kargl union IEEEl2bits u, v;
2023ffff4baSSteve Kargl long double hx2_hi, hx2_lo, q, r, r1, t, twomk, twopk, x_hi;
2033ffff4baSSteve Kargl long double x_lo, x2;
2043ffff4baSSteve Kargl double dr, dx, fn, r2;
2053ffff4baSSteve Kargl int k, n, n2;
2063ffff4baSSteve Kargl uint16_t hx, ix;
2073ffff4baSSteve Kargl
2083ffff4baSSteve Kargl /* Filter out exceptional cases. */
2093ffff4baSSteve Kargl u.e = x;
2103ffff4baSSteve Kargl hx = u.xbits.expsign;
2113ffff4baSSteve Kargl ix = hx & 0x7fff;
2123ffff4baSSteve Kargl if (ix >= BIAS + 7) { /* |x| >= 128 or x is NaN */
2133ffff4baSSteve Kargl if (ix == BIAS + LDBL_MAX_EXP) {
2143ffff4baSSteve Kargl if (hx & 0x8000) /* x is -Inf or -NaN */
215*c66a499eSSteve Kargl RETURNF(-1 / x - 1);
216*c66a499eSSteve Kargl RETURNF(x + x); /* x is +Inf or +NaN */
2173ffff4baSSteve Kargl }
2183ffff4baSSteve Kargl if (x > o_threshold)
219*c66a499eSSteve Kargl RETURNF(huge * huge);
2203ffff4baSSteve Kargl /*
2213ffff4baSSteve Kargl * expm1l() never underflows, but it must avoid
2223ffff4baSSteve Kargl * unrepresentable large negative exponents. We used a
2233ffff4baSSteve Kargl * much smaller threshold for large |x| above than in
2243ffff4baSSteve Kargl * expl() so as to handle not so large negative exponents
2253ffff4baSSteve Kargl * in the same way as large ones here.
2263ffff4baSSteve Kargl */
2273ffff4baSSteve Kargl if (hx & 0x8000) /* x <= -128 */
228*c66a499eSSteve Kargl RETURNF(tiny - 1); /* good for x < -114ln2 - eps */
2293ffff4baSSteve Kargl }
2303ffff4baSSteve Kargl
2313ffff4baSSteve Kargl ENTERI();
2323ffff4baSSteve Kargl
2333ffff4baSSteve Kargl if (T1 < x && x < T2) {
2343ffff4baSSteve Kargl x2 = x * x;
2353ffff4baSSteve Kargl dx = x;
2363ffff4baSSteve Kargl
2373ffff4baSSteve Kargl if (x < T3) {
2383ffff4baSSteve Kargl if (ix < BIAS - 113) { /* |x| < 0x1p-113 */
2393ffff4baSSteve Kargl /* x (rounded) with inexact if x != 0: */
240*c66a499eSSteve Kargl RETURNI(x == 0 ? x :
2413ffff4baSSteve Kargl (0x1p200 * x + fabsl(x)) * 0x1p-200);
2423ffff4baSSteve Kargl }
2433ffff4baSSteve Kargl q = x * x2 * C3 + x2 * x2 * (C4 + x * (C5 + x * (C6 +
2443ffff4baSSteve Kargl x * (C7 + x * (C8 + x * (C9 + x * (C10 +
2453ffff4baSSteve Kargl x * (C11 + x * (C12 + x * (C13 +
2463ffff4baSSteve Kargl dx * (C14 + dx * (C15 + dx * (C16 +
2473ffff4baSSteve Kargl dx * (C17 + dx * C18))))))))))))));
2483ffff4baSSteve Kargl } else {
2493ffff4baSSteve Kargl q = x * x2 * D3 + x2 * x2 * (D4 + x * (D5 + x * (D6 +
2503ffff4baSSteve Kargl x * (D7 + x * (D8 + x * (D9 + x * (D10 +
2513ffff4baSSteve Kargl x * (D11 + x * (D12 + x * (D13 +
2523ffff4baSSteve Kargl dx * (D14 + dx * (D15 + dx * (D16 +
2533ffff4baSSteve Kargl dx * D17)))))))))))));
2543ffff4baSSteve Kargl }
2553ffff4baSSteve Kargl
2563ffff4baSSteve Kargl x_hi = (float)x;
2573ffff4baSSteve Kargl x_lo = x - x_hi;
2583ffff4baSSteve Kargl hx2_hi = x_hi * x_hi / 2;
2593ffff4baSSteve Kargl hx2_lo = x_lo * (x + x_hi) / 2;
2603ffff4baSSteve Kargl if (ix >= BIAS - 7)
261*c66a499eSSteve Kargl RETURNI((hx2_hi + x_hi) + (hx2_lo + x_lo + q));
2623ffff4baSSteve Kargl else
263*c66a499eSSteve Kargl RETURNI(x + (hx2_lo + q + hx2_hi));
2643ffff4baSSteve Kargl }
2653ffff4baSSteve Kargl
2663ffff4baSSteve Kargl /* Reduce x to (k*ln2 + endpoint[n2] + r1 + r2). */
26727aa8442SBruce Evans fn = rnint((double)x * INV_L);
2683ffff4baSSteve Kargl n = irint(fn);
2693ffff4baSSteve Kargl n2 = (unsigned)n % INTERVALS;
2703ffff4baSSteve Kargl k = n >> LOG2_INTERVALS;
2713ffff4baSSteve Kargl r1 = x - fn * L1;
2723ffff4baSSteve Kargl r2 = fn * -L2;
2733ffff4baSSteve Kargl r = r1 + r2;
2743ffff4baSSteve Kargl
2753ffff4baSSteve Kargl /* Prepare scale factor. */
2763ffff4baSSteve Kargl v.e = 1;
2773ffff4baSSteve Kargl v.xbits.expsign = BIAS + k;
2783ffff4baSSteve Kargl twopk = v.e;
2793ffff4baSSteve Kargl
2803ffff4baSSteve Kargl /*
2813ffff4baSSteve Kargl * Evaluate lower terms of
2823ffff4baSSteve Kargl * expl(endpoint[n2] + r1 + r2) = tbl[n2] * expl(r1 + r2).
2833ffff4baSSteve Kargl */
2843ffff4baSSteve Kargl dr = r;
2853ffff4baSSteve Kargl q = r2 + r * r * (A2 + r * (A3 + r * (A4 + r * (A5 + r * (A6 +
2863ffff4baSSteve Kargl dr * (A7 + dr * (A8 + dr * (A9 + dr * A10))))))));
2873ffff4baSSteve Kargl
2883ffff4baSSteve Kargl t = tbl[n2].lo + tbl[n2].hi;
2893ffff4baSSteve Kargl
2903ffff4baSSteve Kargl if (k == 0) {
2915f63fbd6SSteve Kargl t = SUM2P(tbl[n2].hi - 1, tbl[n2].lo * (r1 + 1) + t * q +
2925f63fbd6SSteve Kargl tbl[n2].hi * r1);
2933ffff4baSSteve Kargl RETURNI(t);
2943ffff4baSSteve Kargl }
2953ffff4baSSteve Kargl if (k == -1) {
2965f63fbd6SSteve Kargl t = SUM2P(tbl[n2].hi - 2, tbl[n2].lo * (r1 + 1) + t * q +
2975f63fbd6SSteve Kargl tbl[n2].hi * r1);
2983ffff4baSSteve Kargl RETURNI(t / 2);
2993ffff4baSSteve Kargl }
3003ffff4baSSteve Kargl if (k < -7) {
3015f63fbd6SSteve Kargl t = SUM2P(tbl[n2].hi, tbl[n2].lo + t * (q + r1));
3023ffff4baSSteve Kargl RETURNI(t * twopk - 1);
3033ffff4baSSteve Kargl }
3043ffff4baSSteve Kargl if (k > 2 * LDBL_MANT_DIG - 1) {
3055f63fbd6SSteve Kargl t = SUM2P(tbl[n2].hi, tbl[n2].lo + t * (q + r1));
3063ffff4baSSteve Kargl if (k == LDBL_MAX_EXP)
3073ffff4baSSteve Kargl RETURNI(t * 2 * 0x1p16383L - 1);
3083ffff4baSSteve Kargl RETURNI(t * twopk - 1);
3093ffff4baSSteve Kargl }
3103ffff4baSSteve Kargl
3113ffff4baSSteve Kargl v.xbits.expsign = BIAS - k;
3123ffff4baSSteve Kargl twomk = v.e;
3133ffff4baSSteve Kargl
3143ffff4baSSteve Kargl if (k > LDBL_MANT_DIG - 1)
3155f63fbd6SSteve Kargl t = SUM2P(tbl[n2].hi, tbl[n2].lo - twomk + t * (q + r1));
3163ffff4baSSteve Kargl else
3175f63fbd6SSteve Kargl t = SUM2P(tbl[n2].hi - twomk, tbl[n2].lo + t * (q + r1));
3183ffff4baSSteve Kargl RETURNI(t * twopk);
3193ffff4baSSteve Kargl }
320