/freebsd/contrib/arm-optimized-routines/math/aarch64/experimental/advsimd/ |
H A D | erfinv_25u.c | 74 v_log_inline (vsubq_f64 (v_f64 (1), vabsq_f64 (x)), &d->log_tbl)); in special() 75 t = vdivq_f64 (v_f64 (1), vsqrtq_f64 (t)); in special() 93 float64x2_t t = vfmaq_f64 (v_f64 (-0.5625), x, x); in notails() 121 uint64x2_t is_tail = vcagtq_f64 (x, v_f64 (0.75)); in V_NAME_D1() 128 uint64x2_t extreme_tail = vcagtq_f64 (x, v_f64 (0.9375)); in V_NAME_D1() 133 float64x2_t t = vbslq_f64 (is_tail, d->tailshift, v_f64 (-0.5625)); in V_NAME_D1() 139 vandq_u64 (is_tail, vreinterpretq_u64_f64 (v_f64 (1)))); in V_NAME_D1()
|
/freebsd/contrib/arm-optimized-routines/math/aarch64/advsimd/ |
H A D | acos.c | 82 uint64x2_t a_le_half = vcleq_f64 (ax, v_f64 (0.5)); in V_NAME_D1() 88 vfmaq_f64 (v_f64 (0.5), v_f64 (-0.5), ax)); in V_NAME_D1() 108 float64x2_t mul = vbslq_f64 (a_le_half, v_f64 (-1.0), v_f64 (2.0)); in V_NAME_D1()
|
H A D | erf.c | 96 a = vbslq_f64 (cmp1, v_f64 (8.0), a); in V_NAME_D1() 97 a = vbslq_f64 (cmp2, v_f64 (1.0), a); in V_NAME_D1() 127 float64x2_t p3 = vmulq_f64 (r, vfmaq_f64 (v_f64 (-0.5), r2, dat->third)); in V_NAME_D1() 133 p5 = vmulq_f64 (r, vfmaq_f64 (vmulq_f64 (v_f64 (0.5), dat->third), r2, p5)); in V_NAME_D1() 143 y = vbslq_f64 (a_gt_max, v_f64 (1.0), y); in V_NAME_D1() 152 x = vbslq_f64 (cmp1, v_f64 (1.0), x); in V_NAME_D1()
|
H A D | asinh.c | 111 float64x2_t r = vfmaq_f64 (v_f64 (-1.0), z, e.invc); in log_inline() 144 uint64x2_t gt1 = vcgeq_f64 (ax, v_f64 (1)); in V_NAME_D1() 159 float64x2_t option_1 = v_f64 (0); in V_NAME_D1() 168 vaddq_f64 (xm, vsqrtq_f64 (vfmaq_f64 (v_f64 (1), xm, xm))), d); in V_NAME_D1() 179 float64x2_t option_2 = v_f64 (0); in V_NAME_D1()
|
H A D | cos.c | 53 r = vbslq_f64 (cmp, v_f64 (1.0), r); in V_NAME_D1() 60 n = vrndaq_f64 (vfmaq_f64 (v_f64 (0.5), r, d->inv_pi)); in V_NAME_D1() 62 n = vsubq_f64 (n, v_f64 (0.5f)); in V_NAME_D1()
|
H A D | tanpi.c | 44 uint64x2_t flip = vcgtq_f64 (ar, v_f64 (0.25)); in V_NAME_D1() 45 float64x2_t r = vbslq_f64 (flip, vsubq_f64 (v_f64 (0.5), ar), ar); in V_NAME_D1() 72 float64x2_t p_recip = vdivq_f64 (v_f64 (1.0), p); in V_NAME_D1()
|
H A D | v_log1p_inline.h | 81 float64x2_t m = vaddq_f64 (x, v_f64 (1.0)); in log1p_inline() 92 float64x2_t f = vsubq_f64 (vreinterpretq_f64_u64 (u_red), v_f64 (1.0)); in log1p_inline() 95 float64x2_t cm = vdivq_f64 (vsubq_f64 (x, vsubq_f64 (m, v_f64 (1.0))), m); in log1p_inline()
|
H A D | acosh.c | 48 float64x2_t xm1 = vsubq_f64 (x, v_f64 (1.0)); in V_NAME_D1() 49 float64x2_t y = vaddq_f64 (x, v_f64 (1.0)); in V_NAME_D1()
|
H A D | atan.c | 63 return v_call_f64 (atan, x, v_f64 (0), v_u64 (-1)); in V_NAME_D1() 70 uint64x2_t red = vcagtq_f64 (x, v_f64 (1.0)); in V_NAME_D1() 72 float64x2_t z = vbslq_f64 (red, vdivq_f64 (v_f64 (1.0), x), x); in V_NAME_D1()
|
H A D | cospi.c | 47 uint64x2_t cmp = vcaleq_f64 (v_f64 (0x1p64), x); in V_NAME_D1() 65 r = vsubq_f64 (v_f64 (0.5), vabsq_f64 (r)); in V_NAME_D1()
|
H A D | v_sincos_common.h | 64 c = vfmaq_f64 (v_f64 (-0.5), r2, c); in v_sincos_inline() 65 c = vfmaq_f64 (v_f64 (1), r2, c); in v_sincos_inline()
|
H A D | v_expm1_inline.h | 63 float64x2_t p01 = vfmaq_laneq_f64 (v_f64 (0.5), f, lane_consts_13, 0); in expm1_inline() 83 return vfmaq_f64 (vsubq_f64 (t, v_f64 (1.0)), p, t); in expm1_inline()
|
H A D | cosh.c | 63 y = vmulq_f64 (vfmaq_f64 (v_f64 (1), y, r), r); in exp_inline() 94 float64x2_t half_over_t = vdivq_f64 (v_f64 (0.5), t); in V_NAME_D1()
|
H A D | cbrt.c | 69 float64x2_t m = vbslq_f64 (MantissaMask, x, v_f64 (0.5)); in V_NAME_D1() 102 int64x2_t em3 = vcvtq_s64_f64 (vfmsq_f64 (ef, eb3f, v_f64 (3))); in V_NAME_D1()
|
H A D | asin.c | 77 uint64x2_t a_lt_half = vcaltq_f64 (x, v_f64 (0.5)); in V_NAME_D1() 83 vfmsq_n_f64 (v_f64 (0.5), ax, 0.5)); in V_NAME_D1()
|
H A D | erfc.c | 148 float64x2_t p3 = vmulq_f64 (r, vfmaq_f64 (v_f64 (-0.5), r2, dat->p20)); in V_NAME_D1() 153 p5 = vmulq_f64 (r, vfmaq_f64 (vmulq_f64 (v_f64 (0.5), dat->p20), r2, p5)); in V_NAME_D1()
|
H A D | v_sincospi_common.h | 46 float64x2_t cr = vsubq_f64 (v_f64 (0.5), vabsq_f64 (sr)); in v_sincospi_inline()
|
H A D | tanh.c | 55 float64x2_t qp2 = vaddq_f64 (q, v_f64 (2.0)); in V_NAME_D1()
|
H A D | sinh.c | 71 t = vaddq_f64 (t, vdivq_f64 (t, vaddq_f64 (t, v_f64 (1.0)))); in V_NAME_D1()
|
H A D | atanh.c | 42 float64x2_t halfsign = vbslq_f64 (d->sign_mask, x, v_f64 (0.5)); in V_NAME_D1()
|
H A D | pow.c | 104 float64x2_t r = vfmaq_f64 (v_f64 (-1.0), z, invc); in v_log_inline() 112 float64x2_t ar = vmulq_f64 (v_f64 (-0.5), r); in v_log_inline()
|
H A D | tan.c | 102 float64x2_t n = vfmaq_f64 (v_f64 (-1), p, p); in V_NAME_D1()
|
H A D | v_log_inline.h | 89 r = vfmaq_f64 (v_f64 (-1.0), z, e.invc); in v_log_inline()
|
H A D | log.c | 93 float64x2_t r = vfmaq_f64 (v_f64 (-1.0), z, e.invc); in V_NAME_D1()
|
H A D | log2.c | 97 float64x2_t r = vfmaq_f64 (v_f64 (-1.0), z, e.invc); in V_NAME_D1()
|