/freebsd/contrib/arm-optimized-routines/pl/math/ |
H A D | sv_expm1f_inline.h | 19 float32_t c0, c1, c3, inv_ln2, shift; member 28 .shift = 0x1.8p23f, .inv_ln2 = 0x1.715476p+0f, .ln2_hi = 0x1.62e4p-1f, \ 47 svfloat32_t j = svmla_x (pg, sv_f32 (d->shift), x, d->inv_ln2); in expm1f_inline()
|
H A D | sv_expf_2u.c | 15 float inv_ln2, ln2_hi, ln2_lo, shift, thres; member 21 .inv_ln2 = 0x1.715476p+0f, 53 svfloat32_t invln2_and_ln2 = svld1rq (svptrue_b32 (), &d->inv_ln2); in SV_NAME_F1()
|
H A D | sv_expm1_2u5.c | 19 double shift, inv_ln2, special_bound; member 30 .inv_ln2 = 0x1.71547652b82fep0, 59 svfloat64_t n = svsub_x (pg, svmla_x (pg, shift, x, d->inv_ln2), shift); in SV_NAME_D1()
|
H A D | sv_expm1f_1u6.c | 20 float c0, c1, c3, inv_ln2, special_bound, shift; member 28 .inv_ln2 = 0x1.715476p+0f, .ln2_hi = 0x1.62e4p-1f, 63 svfloat32_t j = svmla_x (pg, sv_f32 (d->shift), x, d->inv_ln2); in SV_NAME_F1()
|
H A D | v_cosh_2u.c | 15 float64x2_t inv_ln2, ln2, shift, thres; member 21 .inv_ln2 = V2 (0x1.71547652b82fep8), /* N/ln2. */ 46 float64x2_t z = vfmaq_f64 (d->shift, x, d->inv_ln2); in exp_inline()
|
H A D | sv_tanh_3u.c | 16 float64_t inv_ln2, ln2_hi, ln2_lo, shift; member 26 .inv_ln2 = 0x1.71547652b82fep0, 44 = svsub_x (pg, svmla_x (pg, sv_f64 (d->shift), x, d->inv_ln2), d->shift); in expm1_inline()
|
H A D | v_tanh_3u.c | 16 float64x2_t inv_ln2, ln2_hi, ln2_lo, shift; member 27 .inv_ln2 = V2 (0x1.71547652b82fep0), 45 float64x2_t j = vsubq_f64 (vfmaq_f64 (d->shift, d->inv_ln2, x), d->shift); in expm1_inline()
|
H A D | sv_cosh_2u.c | 15 float64_t inv_ln2, ln2_hi, ln2_lo, shift, thres; member 21 .inv_ln2 = 0x1.71547652b82fep8, /* N/ln2. */ 45 svfloat64_t z = svmla_x (pg, sv_f64 (d->shift), x, d->inv_ln2); in exp_inline()
|
H A D | sv_sinh_3u.c | 16 float64_t inv_ln2, m_ln2_hi, m_ln2_lo, shift; member 28 .inv_ln2 = 0x1.71547652b82fep0, 49 = svsub_x (pg, svmla_x (pg, sv_f64 (d->shift), x, d->inv_ln2), d->shift); in expm1_inline()
|
H A D | sv_exp_1u5.c | 15 double ln2_hi, ln2_lo, inv_ln2, shift, thres; member 23 .inv_ln2 = 0x1.71547652b82fep+0, 93 svfloat64_t z = svmla_x (pg, sv_f64 (d->shift), x, d->inv_ln2); in SV_NAME_D1()
|
H A D | v_sinh_3u.c | 16 float64x2_t inv_ln2, m_ln2, shift; member 32 .inv_ln2 = V2 (0x1.71547652b82fep0), 58 float64x2_t j = vsubq_f64 (vfmaq_f64 (d->shift, d->inv_ln2, x), d->shift); in expm1_inline()
|
H A D | sv_expf_inline.h | 19 float inv_ln2, ln2_hi, ln2_lo, shift; member 29 .inv_ln2 = 0x1.715476p+0f, .ln2_hi = 0x1.62e4p-1f, \
|
/freebsd/contrib/arm-optimized-routines/math/aarch64/ |
H A D | v_exp.c | 17 float64x2_t inv_ln2, ln2_hi, ln2_lo, shift; member 31 .inv_ln2 = V2 (0x1.71547652b82fep7), /* N/ln2. */ 96 z = vfmaq_f64 (data.shift, x, data.inv_ln2); in V_NAME_D1()
|
H A D | v_expf.c | 14 float32x4_t shift, inv_ln2, ln2_hi, ln2_lo; member 24 .inv_ln2 = V4 (0x1.715476p+0f), 96 z = vfmaq_f32 (d->shift, x, d->inv_ln2); in V_NAME_F1()
|