Home
last modified time | relevance | path

Searched refs:v_u64 (Results 1 – 20 of 20) sorted by relevance

/freebsd/contrib/arm-optimized-routines/math/aarch64/advsimd/
H A Dexp10.c44 #define IndexMask v_u64 (N - 1)
48 # define TinyBound v_u64 (0x2000000000000000) /* asuint64 (0x1p-511). */
49 # define BigBound v_u64 (0x4070000000000000) /* asuint64 (0x1p8). */
50 # define Thres v_u64 (0x2070000000000000) /* BigBound - TinyBound. */
62 # define SpecialOffset v_u64 (0x6000000000000000) /* 0x1p513. */
64 # define SpecialBias1 v_u64 (0x7000000000000000) /* 0x1p769. */
65 # define SpecialBias2 v_u64 (0x3010000000000000) /* 0x1p-254. */
H A Dexp.c44 # define TinyBound v_u64 (0x2000000000000000) /* asuint64 (0x1p-511). */
45 # define BigBound v_u64 (0x4080000000000000) /* asuint64 (0x1p9). */
46 # define SpecialBound v_u64 (0x2080000000000000) /* BigBound - TinyBound. */
58 # define SpecialOffset v_u64 (0x6000000000000000) /* 0x1p513. */
60 # define SpecialBias1 v_u64 (0x7000000000000000) /* 0x1p769. */
61 # define SpecialBias2 v_u64 (0x3010000000000000) /* 0x1p-254. */
H A Dexp2.c63 uint64x2_t b = vandq_u64 (vclezq_f64 (n), v_u64 (SpecialOffset)); in special_case()
64 float64x2_t s1 = vreinterpretq_f64_u64 (vsubq_u64 (v_u64 (SpecialBias1), b)); in special_case()
66 vsubq_u64 (vreinterpretq_u64_f64 (s), v_u64 (SpecialBias2)), b)); in special_case()
86 cmp = vcgeq_u64 (vsubq_u64 (ia, v_u64 (TinyBound)), v_u64 (Thres)); in V_NAME_D1()
H A Dtan.c44 return v_call_f64 (tan, x, x, v_u64 (-1)); in special_case()
62 = vcgtq_u64 (vsubq_u64 (iax, v_u64 (TinyBound)), v_u64 (Thresh)); in V_NAME_D1()
105 uint64x2_t no_recip = vtstq_u64 (vreinterpretq_u64_s64 (qi), v_u64 (1)); in V_NAME_D1()
H A Datan.c33 #define SignMask v_u64 (0x8000000000000000)
58 uint64x2_t ia12 = vandq_u64 (ix, v_u64 (0x7ff0000000000000)); in V_NAME_D1()
59 uint64x2_t special = vcgtq_u64 (vsubq_u64 (ia12, v_u64 (TinyBound)), in V_NAME_D1()
60 v_u64 (BigBound - TinyBound)); in V_NAME_D1()
63 return v_call_f64 (atan, x, v_f64 (0), v_u64 (-1)); in V_NAME_D1()
H A Dacos.c32 #define AllMask v_u64 (0xffffffffffffffff)
76 = vcgtq_u64 (vsubq_u64 (vreinterpretq_u64_f64 (ax), v_u64 (Small)), in V_NAME_D1()
77 v_u64 (Oneu - Small)); in V_NAME_D1()
H A Dasin.c30 #define AllMask v_u64 (0xffffffffffffffff)
71 = vcgtq_u64 (vsubq_u64 (vreinterpretq_u64_f64 (ax), v_u64 (Small)), in V_NAME_D1()
72 v_u64 (One - Small)); in V_NAME_D1()
H A Dpow.c15 #define VecSmallPowX v_u64 (SmallPowX)
16 #define VecThresPowX v_u64 (ThresPowX)
17 #define VecSmallPowY v_u64 (SmallPowY)
18 #define VecThresPowY v_u64 (ThresPowY)
166 uint64x2_t idx = vandq_u64 (ki, v_u64 (N_EXP - 1)); in v_exp_inline()
H A Dsinh.c39 return v_call_f64 (sinh, x, x, v_u64 (-1)); in special_case()
54 vbslq_u64 (v_u64 (0x8000000000000000), ix, d->halff)); in V_NAME_D1()
H A Dsinpi.c28 # define TinyBound v_u64 (0x3bf0000000000000) /* asuint64(0x1p-64). */
30 # define Thresh v_u64 (0x07f0000000000000)
H A Dv_sincos_common.h76 = vshlq_n_u64 (vandq_u64 (vreinterpretq_u64_s64 (n), v_u64 (2)), 62); in v_sincos_inline()
78 vandq_u64 (vreinterpretq_u64_s64 (vaddq_s64 (n, v_s64 (1))), v_u64 (2)), in v_sincos_inline()
H A Dsin.c32 # define TinyBound v_u64 (0x3020000000000000)
34 # define Thresh v_u64 (0x1160000000000000)
H A Datan2.c46 #define SignMask v_u64 (0x8000000000000000)
64 return vcgeq_u64 (vsubq_u64 (vaddq_u64 (i, i), v_u64 (1)), d->zeroinfnan); in zeroinfnan()
H A Dlog1p.c23 #define BottomMask v_u64 (0xffffffff)
H A Dcbrt.c35 #define MantissaMask v_u64 (0x000fffffffffffff)
H A Dv_log1p_inline.h41 #define BottomMask v_u64 (0xffffffff)
H A Derf.c146 y = vbslq_f64 (v_u64 (AbsMask), y, x); in V_NAME_D1()
H A Dv_math.h144 v_u64 (uint64_t x) in v_u64() function
H A Derfc.c111 uint64x2_t cmp = vcltq_u64 (vaddq_u64 (ix, ix), v_u64 (TinyBound)); in V_NAME_D1()
/freebsd/contrib/arm-optimized-routines/math/aarch64/experimental/advsimd/
H A Derfinv_25u.c76 float64x2_t ts = vbslq_f64 (v_u64 (0x7fffffffffffffff), t, x); in special()