/freebsd/contrib/arm-optimized-routines/math/ |
H A D | math_config.h | 418 double invc, logc; member 431 double invc, logc; member 448 double invc, logc; member 494 struct {double invc, logc;} tab[1 << LOG_TABLE_BITS]; member 509 struct {double invc, logc;} tab[1 << LOG2_TABLE_BITS]; member 523 struct {double invc, pad, logc, logctail;} tab[1 << POW_LOG_TABLE_BITS]; member 563 double invc, logc; member 574 double invc[V_POWF_LOG2_N]; member 593 double invc[1 << V_POW_LOG_TABLE_BITS]; member 605 double invc, log2c; member [all …]
|
H A D | log2f.c | 31 double_t z, r, r2, p, y, y0, invc, logc; in log2f() local 63 invc = T[i].invc; in log2f() 68 r = z * invc - 1; in log2f()
|
H A D | logf.c | 32 double_t z, r, r2, y, y0, invc, logc; in logf() local 63 invc = T[i].invc; in logf() 68 r = z * invc - 1; in logf()
|
H A D | log10f.c | 41 double_t z, r, r2, y, y0, invc, logc; in log10f() local 72 invc = T[i].invc; in log10f() 77 r = z * invc - 1; in log10f()
|
H A D | log2.c | 35 double_t z, r, r2, r4, y, invc, logc, kd, hi, lo, t1, t2, t3, p; in log2() local 98 invc = T[i].invc; in log2() 107 r = fma (z, invc, -1.0); in log2() 113 r = (z - T2[i].chi - T2[i].clo) * invc; in log2()
|
H A D | log.c | 35 double_t w, z, r, r2, r3, y, invc, logc, kd, hi, lo; in log() local 122 invc = T[i].invc; in log() 130 r = fma (z, invc, -1.0); in log() 133 r = (z - T2[i].chi - T2[i].clo) * invc; in log()
|
H A D | powf.c | 34 double_t z, r, r2, r4, p, q, y, y0, invc, logc; in log2_inline() local 46 invc = T[i].invc; in log2_inline() 51 r = z * invc - 1; in log2_inline()
|
H A D | pow.c | 41 double_t z, r, y, invc, logc, logctail, kd, hi, t1, t2, lo, lo1, lo2, p; in log_inline() local 56 invc = T[i].invc; in log_inline() 63 r = fma (z, invc, -1.0); in log_inline() 68 double_t rhi = zhi * invc - 1.0; in log_inline() 69 double_t rlo = zlo * invc; in log_inline()
|
/freebsd/contrib/arm-optimized-routines/math/aarch64/advsimd/ |
H A D | v_log_inline.h | 51 float64x2_t invc; member 62 float64x2_t e0 = vld1q_f64 (&__v_log_data.table[i0].invc); in log_lookup() 63 float64x2_t e1 = vld1q_f64 (&__v_log_data.table[i1].invc); in log_lookup() 64 e.invc = vuzp1q_f64 (e0, e1); in log_lookup() 89 r = vfmaq_f64 (v_f64 (-1.0), z, e.invc); in v_log_inline()
|
H A D | log.c | 40 float64x2_t invc; member 51 float64x2_t e0 = vld1q_f64 (&__v_log_data.table[i0].invc); in lookup() 52 float64x2_t e1 = vld1q_f64 (&__v_log_data.table[i1].invc); in lookup() 53 e.invc = vuzp1q_f64 (e0, e1); in lookup() 93 float64x2_t r = vfmaq_f64 (v_f64 (-1.0), z, e.invc); in V_NAME_D1()
|
H A D | log2.c | 42 float64x2_t invc; member 54 float64x2_t e0 = vld1q_f64 (&__v_log2_data.table[i0].invc); in lookup() 55 float64x2_t e1 = vld1q_f64 (&__v_log2_data.table[i1].invc); in lookup() 56 e.invc = vuzp1q_f64 (e0, e1); in lookup() 97 float64x2_t r = vfmaq_f64 (v_f64 (-1.0), z, e.invc); in V_NAME_D1()
|
H A D | log10.c | 43 float64x2_t invc; member 55 float64x2_t e0 = vld1q_f64 (&__v_log10_data.table[i0].invc); in lookup() 56 float64x2_t e1 = vld1q_f64 (&__v_log10_data.table[i1].invc); in lookup() 57 e.invc = vuzp1q_f64 (e0, e1); in lookup() 99 float64x2_t r = vfmaq_f64 (v_f64 (-1.0), z, e.invc); in V_NAME_D1()
|
H A D | asinh.c | 79 float64x2_t invc; member 90 float64x2_t e0 = vld1q_f64 (&__v_log_data.table[i0].invc); in lookup() 91 float64x2_t e1 = vld1q_f64 (&__v_log_data.table[i1].invc); in lookup() 92 e.invc = vuzp1q_f64 (e0, e1); in lookup() 111 float64x2_t r = vfmaq_f64 (v_f64 (-1.0), z, e.invc); in log_inline()
|
H A D | powf.c | 32 double invc, logc; member 99 float64x2_t invc, float64x2_t logc, float64x2_t y) in ylogx_core() argument 103 float64x2_t r = vfmaq_f64 (v_f64 (-1.0), iz, invc); in ylogx_core() 119 &d->log2_tab[(i >> (23 - V_POWF_LOG2_TABLE_BITS)) & Log2IdxMask].invc); in log2_lookup()
|
H A D | finite_pow.h | 61 double invc = __v_pow_log_data.invc[i]; in log_inline() local 67 double r = fma (z, invc, -1.0); in log_inline()
|
H A D | pow.c | 99 float64x2_t invc = v_masked_lookup_f64 (__v_pow_log_data.invc, tmp); in v_log_inline() local 104 float64x2_t r = vfmaq_f64 (v_f64 (-1.0), z, invc); in v_log_inline()
|
/freebsd/contrib/arm-optimized-routines/math/aarch64/experimental/ |
H A D | log10_2u.c | 44 double_t w, z, r, r2, r3, y, invc, logc, kd, hi, lo; in log10() local 102 invc = T[i].invc; in log10() 110 r = fma (z, invc, -1.0); in log10() 113 r = (z - T2[i].chi - T2[i].clo) * invc; in log10()
|
/freebsd/contrib/arm-optimized-routines/math/aarch64/sve/ |
H A D | sv_log_inline.h | 66 svfloat64_t invc = svld1_gather_index (pg, &__v_log_data.table[0].invc, i); in sv_log_inline() local 70 svfloat64_t r = svmad_x (pg, invc, z, -1); in sv_log_inline()
|
H A D | log10.c | 65 svfloat64_t invc = svld1_gather_index (pg, &__v_log10_data.table[0].invc, i); in SV_NAME_D1() local 72 svfloat64_t r = svmad_x (pg, invc, z, -1.0); in SV_NAME_D1()
|
H A D | log2.c | 64 svfloat64_t invc = svld1_gather_index (pg, &__v_log2_data.table[0].invc, i); in SV_NAME_D1() local 71 svfloat64_t r = svmad_x (pg, invc, z, -1.0); in SV_NAME_D1()
|
H A D | log.c | 63 svfloat64_t invc = svld1_gather_index (pg, &__v_log_data.table[0].invc, i); in SV_NAME_D1() local 70 svfloat64_t r = svmad_x (pg, invc, z, -1); in SV_NAME_D1()
|
H A D | asinh.c | 76 svfloat64_t invc = svld1_gather_index (pg, &__v_log_data.table[0].invc, i); in __sv_log_inline() local 82 svfloat64_t r = svmla_x (pg, sv_f64 (-1.0), invc, z); in __sv_log_inline()
|
H A D | powf.c | 14 #define Tinvc __v_powf_data.invc 168 svfloat64_t invc = svld1_gather_index (pg, Tinvc, i); in sv_powf_core_ext() local 172 svfloat64_t r = svmla_x (pg, sv_f64 (-1.0), z, invc); in sv_powf_core_ext()
|
/freebsd/contrib/arm-optimized-routines/math/aarch64/ |
H A D | v_powf_data.c | 11 .invc = { 0x1.6489890582816p+0,
|
H A D | v_pow_log_data.c | 43 .invc = { 0x1.6a00000000000p+0, 0x1.6800000000000p+0, 0x1.6600000000000p+0,
|