1 /*
2 * Helper for double-precision routines which calculate exp(x) - 1 and do not
3 * need special-case handling
4 *
5 * Copyright (c) 2022-2024, Arm Limited.
6 * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
7 */
8
9 #ifndef MATH_V_EXPM1_INLINE_H
10 #define MATH_V_EXPM1_INLINE_H
11
12 #include "v_math.h"
13
14 struct v_expm1_data
15 {
16 float64x2_t c2, c4, c6, c8;
17 float64x2_t invln2;
18 int64x2_t exponent_bias;
19 double c1, c3, c5, c7, c9, c10;
20 double ln2[2];
21 };
22
23 /* Generated using fpminimax, with degree=12 in [log(2)/2, log(2)/2]. */
24 #define V_EXPM1_DATA \
25 { \
26 .c1 = 0x1.5555555555559p-3, .c2 = V2 (0x1.555555555554bp-5), \
27 .c3 = 0x1.111111110f663p-7, .c4 = V2 (0x1.6c16c16c1b5f3p-10), \
28 .c5 = 0x1.a01a01affa35dp-13, .c6 = V2 (0x1.a01a018b4ecbbp-16), \
29 .c7 = 0x1.71ddf82db5bb4p-19, .c8 = V2 (0x1.27e517fc0d54bp-22), \
30 .c9 = 0x1.af5eedae67435p-26, .c10 = 0x1.1f143d060a28ap-29, \
31 .ln2 = { 0x1.62e42fefa39efp-1, 0x1.abc9e3b39803fp-56 }, \
32 .invln2 = V2 (0x1.71547652b82fep0), \
33 .exponent_bias = V2 (0x3ff0000000000000), \
34 }
35
36 static inline float64x2_t
expm1_inline(float64x2_t x,const struct v_expm1_data * d)37 expm1_inline (float64x2_t x, const struct v_expm1_data *d)
38 {
39 /* Helper routine for calculating exp(x) - 1. */
40
41 float64x2_t ln2 = vld1q_f64 (&d->ln2[0]);
42
43 /* Reduce argument to smaller range:
44 Let i = round(x / ln2)
45 and f = x - i * ln2, then f is in [-ln2/2, ln2/2].
46 exp(x) - 1 = 2^i * (expm1(f) + 1) - 1
47 where 2^i is exact because i is an integer. */
48 float64x2_t n = vrndaq_f64 (vmulq_f64 (x, d->invln2));
49 int64x2_t i = vcvtq_s64_f64 (n);
50 float64x2_t f = vfmsq_laneq_f64 (x, n, ln2, 0);
51 f = vfmsq_laneq_f64 (f, n, ln2, 1);
52
53 /* Approximate expm1(f) using polynomial.
54 Taylor expansion for expm1(x) has the form:
55 x + ax^2 + bx^3 + cx^4 ....
56 So we calculate the polynomial P(f) = a + bf + cf^2 + ...
57 and assemble the approximation expm1(f) ~= f + f^2 * P(f). */
58 float64x2_t f2 = vmulq_f64 (f, f);
59 float64x2_t f4 = vmulq_f64 (f2, f2);
60 float64x2_t lane_consts_13 = vld1q_f64 (&d->c1);
61 float64x2_t lane_consts_57 = vld1q_f64 (&d->c5);
62 float64x2_t lane_consts_910 = vld1q_f64 (&d->c9);
63 float64x2_t p01 = vfmaq_laneq_f64 (v_f64 (0.5), f, lane_consts_13, 0);
64 float64x2_t p23 = vfmaq_laneq_f64 (d->c2, f, lane_consts_13, 1);
65 float64x2_t p45 = vfmaq_laneq_f64 (d->c4, f, lane_consts_57, 0);
66 float64x2_t p67 = vfmaq_laneq_f64 (d->c6, f, lane_consts_57, 1);
67 float64x2_t p03 = vfmaq_f64 (p01, f2, p23);
68 float64x2_t p47 = vfmaq_f64 (p45, f2, p67);
69 float64x2_t p89 = vfmaq_laneq_f64 (d->c8, f, lane_consts_910, 0);
70 float64x2_t p = vfmaq_laneq_f64 (p89, f2, lane_consts_910, 1);
71 p = vfmaq_f64 (p47, f4, p);
72 p = vfmaq_f64 (p03, f4, p);
73
74 p = vfmaq_f64 (f, f2, p);
75
76 /* Assemble the result.
77 expm1(x) ~= 2^i * (p + 1) - 1
78 Let t = 2^i. */
79 int64x2_t u = vaddq_s64 (vshlq_n_s64 (i, 52), d->exponent_bias);
80 float64x2_t t = vreinterpretq_f64_s64 (u);
81
82 /* expm1(x) ~= p * t + (t - 1). */
83 return vfmaq_f64 (vsubq_f64 (t, v_f64 (1.0)), p, t);
84 }
85
86 #endif // MATH_V_EXPM1_INLINE_H
87