/freebsd/contrib/arm-optimized-routines/math/aarch64/advsimd/ |
H A D | exp10.c | 44 #define IndexMask v_u64 (N - 1) 48 # define TinyBound v_u64 (0x2000000000000000) /* asuint64 (0x1p-511). */ 49 # define BigBound v_u64 (0x4070000000000000) /* asuint64 (0x1p8). */ 50 # define Thres v_u64 (0x2070000000000000) /* BigBound - TinyBound. */ 62 # define SpecialOffset v_u64 (0x6000000000000000) /* 0x1p513. */ 64 # define SpecialBias1 v_u64 (0x7000000000000000) /* 0x1p769. */ 65 # define SpecialBias2 v_u64 (0x3010000000000000) /* 0x1p-254. */
|
H A D | exp.c | 44 # define TinyBound v_u64 (0x2000000000000000) /* asuint64 (0x1p-511). */ 45 # define BigBound v_u64 (0x4080000000000000) /* asuint64 (0x1p9). */ 46 # define SpecialBound v_u64 (0x2080000000000000) /* BigBound - TinyBound. */ 58 # define SpecialOffset v_u64 (0x6000000000000000) /* 0x1p513. */ 60 # define SpecialBias1 v_u64 (0x7000000000000000) /* 0x1p769. */ 61 # define SpecialBias2 v_u64 (0x3010000000000000) /* 0x1p-254. */
|
H A D | exp2.c | 63 uint64x2_t b = vandq_u64 (vclezq_f64 (n), v_u64 (SpecialOffset)); in special_case() 64 float64x2_t s1 = vreinterpretq_f64_u64 (vsubq_u64 (v_u64 (SpecialBias1), b)); in special_case() 66 vsubq_u64 (vreinterpretq_u64_f64 (s), v_u64 (SpecialBias2)), b)); in special_case() 86 cmp = vcgeq_u64 (vsubq_u64 (ia, v_u64 (TinyBound)), v_u64 (Thres)); in V_NAME_D1()
|
H A D | tan.c | 44 return v_call_f64 (tan, x, x, v_u64 (-1)); in special_case() 62 = vcgtq_u64 (vsubq_u64 (iax, v_u64 (TinyBound)), v_u64 (Thresh)); in V_NAME_D1() 105 uint64x2_t no_recip = vtstq_u64 (vreinterpretq_u64_s64 (qi), v_u64 (1)); in V_NAME_D1()
|
H A D | atan.c | 33 #define SignMask v_u64 (0x8000000000000000) 58 uint64x2_t ia12 = vandq_u64 (ix, v_u64 (0x7ff0000000000000)); in V_NAME_D1() 59 uint64x2_t special = vcgtq_u64 (vsubq_u64 (ia12, v_u64 (TinyBound)), in V_NAME_D1() 60 v_u64 (BigBound - TinyBound)); in V_NAME_D1() 63 return v_call_f64 (atan, x, v_f64 (0), v_u64 (-1)); in V_NAME_D1()
|
H A D | acos.c | 32 #define AllMask v_u64 (0xffffffffffffffff) 76 = vcgtq_u64 (vsubq_u64 (vreinterpretq_u64_f64 (ax), v_u64 (Small)), in V_NAME_D1() 77 v_u64 (Oneu - Small)); in V_NAME_D1()
|
H A D | asin.c | 30 #define AllMask v_u64 (0xffffffffffffffff) 71 = vcgtq_u64 (vsubq_u64 (vreinterpretq_u64_f64 (ax), v_u64 (Small)), in V_NAME_D1() 72 v_u64 (One - Small)); in V_NAME_D1()
|
H A D | pow.c | 15 #define VecSmallPowX v_u64 (SmallPowX) 16 #define VecThresPowX v_u64 (ThresPowX) 17 #define VecSmallPowY v_u64 (SmallPowY) 18 #define VecThresPowY v_u64 (ThresPowY) 166 uint64x2_t idx = vandq_u64 (ki, v_u64 (N_EXP - 1)); in v_exp_inline()
|
H A D | sinh.c | 39 return v_call_f64 (sinh, x, x, v_u64 (-1)); in special_case() 54 vbslq_u64 (v_u64 (0x8000000000000000), ix, d->halff)); in V_NAME_D1()
|
H A D | sinpi.c | 28 # define TinyBound v_u64 (0x3bf0000000000000) /* asuint64(0x1p-64). */ 30 # define Thresh v_u64 (0x07f0000000000000)
|
H A D | v_sincos_common.h | 76 = vshlq_n_u64 (vandq_u64 (vreinterpretq_u64_s64 (n), v_u64 (2)), 62); in v_sincos_inline() 78 vandq_u64 (vreinterpretq_u64_s64 (vaddq_s64 (n, v_s64 (1))), v_u64 (2)), in v_sincos_inline()
|
H A D | sin.c | 32 # define TinyBound v_u64 (0x3020000000000000) 34 # define Thresh v_u64 (0x1160000000000000)
|
H A D | atan2.c | 46 #define SignMask v_u64 (0x8000000000000000) 64 return vcgeq_u64 (vsubq_u64 (vaddq_u64 (i, i), v_u64 (1)), d->zeroinfnan); in zeroinfnan()
|
H A D | log1p.c | 23 #define BottomMask v_u64 (0xffffffff)
|
H A D | cbrt.c | 35 #define MantissaMask v_u64 (0x000fffffffffffff)
|
H A D | v_log1p_inline.h | 41 #define BottomMask v_u64 (0xffffffff)
|
H A D | erf.c | 146 y = vbslq_f64 (v_u64 (AbsMask), y, x); in V_NAME_D1()
|
H A D | v_math.h | 144 v_u64 (uint64_t x) in v_u64() function
|
H A D | erfc.c | 111 uint64x2_t cmp = vcltq_u64 (vaddq_u64 (ix, ix), v_u64 (TinyBound)); in V_NAME_D1()
|
/freebsd/contrib/arm-optimized-routines/math/aarch64/experimental/advsimd/ |
H A D | erfinv_25u.c | 76 float64x2_t ts = vbslq_f64 (v_u64 (0x7fffffffffffffff), t, x); in special()
|