Home
last modified time | relevance | path

Searched refs:inv_ln2 (Results 1 – 14 of 14) sorted by relevance

/freebsd/contrib/arm-optimized-routines/math/aarch64/sve/
H A Dsv_expm1f_inline.h19 float c0, inv_ln2, c1, c3, special_bound; member
25 .c0 = 0x1.fffffep-2, .c1 = 0x1.5554aep-3, .inv_ln2 = 0x1.715476p+0f, \
44 svfloat32_t j = svmul_x (svptrue_b32 (), x, d->inv_ln2); in expm1f_inline()
H A Dsv_expf_inline.h18 float c1, c3, inv_ln2; member
29 .c3 = 0x1.573e2ep-5f, .c4 = 0x1.0e4020p-7f, .inv_ln2 = 0x1.715476p+0f, \
45 svfloat32_t z = svmad_x (pg, sv_f32 (d->inv_ln2), x, d->shift); in expf_inline()
H A Dexpm1f.c20 float c0, inv_ln2, c1, c3, special_bound; member
25 .c4 = 0x1.6b55a2p-10, .inv_ln2 = 0x1.715476p+0f,
60 svfloat32_t j = svmul_x (svptrue_b32 (), x, d->inv_ln2); in SV_NAME_F1()
H A Dexpm1.c19 double shift, inv_ln2, special_bound; member
30 .inv_ln2 = 0x1.71547652b82fep0,
59 svfloat64_t n = svsub_x (pg, svmla_x (pg, shift, x, d->inv_ln2), shift); in SV_NAME_D1()
H A Dtanh.c16 float64_t inv_ln2, ln2_hi, ln2_lo, shift; member
26 .inv_ln2 = 0x1.71547652b82fep0,
44 = svsub_x (pg, svmla_x (pg, sv_f64 (d->shift), x, d->inv_ln2), d->shift); in expm1_inline()
H A Dsinh.c16 float64_t inv_ln2, m_ln2_hi, m_ln2_lo, shift; member
28 .inv_ln2 = 0x1.71547652b82fep0,
49 = svsub_x (pg, svmla_x (pg, sv_f64 (d->shift), x, d->inv_ln2), d->shift); in expm1_inline()
H A Dcosh.c15 float64_t inv_ln2, ln2_hi, ln2_lo, shift, thres; member
21 .inv_ln2 = 0x1.71547652b82fep8, /* N/ln2. */
47 svfloat64_t z = svmla_x (pg, sv_f64 (d->shift), x, d->inv_ln2); in exp_inline()
H A Dexp.c16 double ln2_hi, ln2_lo, inv_ln2, shift, thres; member
26 .inv_ln2 = 0x1.71547652b82fep+0,
95 svfloat64_t z = svmla_x (pg, sv_f64 (d->shift), x, d->inv_ln2); in SV_NAME_D1()
/freebsd/contrib/arm-optimized-routines/math/aarch64/advsimd/
H A Dv_expf_inline.h17 float32x4_t inv_ln2, c1, c3, c4; member
28 .inv_ln2 = V4 (0x1.715476p+0f), .exponent_bias = V4 (0x3f800000), \
42 float32x4_t n = vrndaq_f32 (vmulq_f32 (ax, d->inv_ln2)); in v_expf_inline()
H A Dcosh.c15 float64x2_t inv_ln2; member
23 .inv_ln2 = V2 (0x1.71547652b82fep8), /* N/ln2. */
48 float64x2_t z = vfmaq_f64 (d->shift, x, d->inv_ln2); in exp_inline()
H A Dexpf_1u.c12 float32x4_t shift, inv_ln2; member
22 .inv_ln2 = V4 (0x1.715476p+0f),
58 float32x4_t z = vmulq_f32 (x, d->inv_ln2); in _ZGVnN4v_expf_1u()
H A Dexp.c19 float64x2_t inv_ln2, ln2_hi, ln2_lo, shift; member
33 .inv_ln2 = V2 (0x1.71547652b82fep7), /* N/ln2. */
98 z = vfmaq_f64 (data.shift, x, data.inv_ln2); in V_NAME_D1()
H A Dexpf.c13 float32x4_t c1, c3, c4, inv_ln2; member
26 .inv_ln2 = V4 (0x1.715476p+0f),
97 float32x4_t n = vrndaq_f32 (vmulq_f32 (x, d->inv_ln2)); in V_NAME_F1()
H A Dv_expm1f_inline.h18 float c1, c3, inv_ln2, c4; member
28 .exponent_bias = V4 (0x3f800000), .inv_ln2 = 0x1.715476p+0f, \