/freebsd/contrib/arm-optimized-routines/math/aarch64/experimental/ |
H A D | atanf_common.h | 27 float z4 = z2 * z2; in eval_poly() local 31 z4, z4 * pairwise_poly_3_f32 (z2, z4, __atanf_poly_data.poly + 4), in eval_poly() 32 pairwise_poly_3_f32 (z2, z4, __atanf_poly_data.poly)); in eval_poly()
|
H A D | asinh_2u5.c | 59 double z4 = z2 * z2; in asinh() local 60 double z8 = z4 * z4; in asinh() 61 double p = estrin_17_f64 (x2, z2, z4, z8, z8 * z8, __asinh_data.poly); in asinh()
|
H A D | asin_3u.c | 83 double z4 = z2 * z2; in asin() local 84 double z8 = z4 * z4; in asin() 86 double p = estrin_11_f64 (z2, z4, z8, z16, __asin_poly); in asin()
|
H A D | acos_2u.c | 76 double z4 = z2 * z2; in acos() local 77 double z8 = z4 * z4; in acos() 79 double p = estrin_11_f64 (z2, z4, z8, z16, __asin_poly); in acos()
|
/freebsd/contrib/arm-optimized-routines/math/aarch64/advsimd/ |
H A D | atanf.c | 86 float32x4_t z4 = vmulq_f32 (z2, z2); in V_NAME_F1() local 89 v_pairwise_poly_3_f32 (z2, z4, d->poly), z4, in V_NAME_F1() 90 vmulq_f32 (z4, v_pairwise_poly_3_f32 (z2, z4, d->poly + 4))); in V_NAME_F1()
|
H A D | asin.c | 87 float64x2_t z4 = vmulq_f64 (z2, z2); in V_NAME_D1() local 88 float64x2_t z8 = vmulq_f64 (z4, z4); in V_NAME_D1() 98 float64x2_t p03 = vfmaq_f64 (p01, z4, p23); in V_NAME_D1() 102 float64x2_t p47 = vfmaq_f64 (p45, z4, p67); in V_NAME_D1() 106 float64x2_t p811 = vfmaq_f64 (p89, z4, p1011); in V_NAME_D1()
|
H A D | atan2f.c | 92 float32x4_t z4 = vmulq_f32 (z2, z2); in V_NAME_F2() local 99 float32x4_t p03 = vfmaq_f32 (p01, z4, p23); in V_NAME_F2() 100 float32x4_t p47 = vfmaq_f32 (p45, z4, p67); in V_NAME_F2() 102 float32x4_t ret = vfmaq_f32 (p03, z4, vmulq_f32 (z4, p47)); in V_NAME_F2()
|
H A D | acos.c | 92 float64x2_t z4 = vmulq_f64 (z2, z2); in V_NAME_D1() local 93 float64x2_t z8 = vmulq_f64 (z4, z4); in V_NAME_D1() 95 float64x2_t p = v_estrin_11_f64 (z2, z4, z8, z16, d->poly); in V_NAME_D1()
|
H A D | tanf.c | 59 float32x4_t z4 = vmulq_f32 (z2, z2); in eval_poly() local 60 return v_estrin_5_f32 (z, z2, z4, d->poly); in eval_poly()
|
/freebsd/contrib/arm-optimized-routines/math/aarch64/sve/ |
H A D | atanf.c | 55 svfloat32_t z4 = svmul_x (pg, z2, z2); in SV_NAME_F1() local 56 svfloat32_t z8 = svmul_x (pg, z4, z4); in SV_NAME_F1() 58 svfloat32_t y = sv_estrin_7_f32_x (pg, z2, z4, z8, d->poly); in SV_NAME_F1()
|
H A D | asin.c | 63 svfloat64_t z4 = svmul_x (pg, z2, z2); in SV_NAME_D1() local 64 svfloat64_t z8 = svmul_x (pg, z4, z4); in SV_NAME_D1() 66 svfloat64_t p = sv_estrin_11_f64_x (pg, z2, z4, z8, z16, d->poly); in SV_NAME_D1()
|
H A D | acos.c | 63 svfloat64_t z4 = svmul_x (pg, z2, z2); in SV_NAME_D1() local 64 svfloat64_t z8 = svmul_x (pg, z4, z4); in SV_NAME_D1() 66 svfloat64_t p = sv_estrin_11_f64_x (pg, z2, z4, z8, z16, d->poly); in SV_NAME_D1()
|
H A D | tanf.c | 86 svfloat32_t z4 = svmul_x (pg, z2, z2); in SV_NAME_F1() local 87 svfloat32_t p = svmla_x (pg, p01, z4, p23); in SV_NAME_F1() 89 svfloat32_t z8 = svmul_x (pg, z4, z4); in SV_NAME_F1()
|
H A D | atan2f.c | 83 svfloat32_t z4 = svmul_x (pg, z2, z2); in SV_NAME_F2() local 84 svfloat32_t z8 = svmul_x (pg, z4, z4); in SV_NAME_F2() 86 svfloat32_t ret = sv_estrin_7_f32_x (pg, z2, z4, z8, data_ptr->poly); in SV_NAME_F2()
|
/freebsd/sys/contrib/openzfs/module/icp/algs/edonr/ |
H A D | edonr.c | 78 s5 = rotl64(z1 + z3 + x5, 40); z4 = x5 + x6; \ 79 s6 = rotl64(z2 + z4 + x0, 50); z6 = z3 + z4; \ 93 z4 = y6 + y7, z8 = z3 + z4; \ 95 t7 = rotl64(z8 + y0, 55); z7 = z2 + z4; \ 106 r3 = (z3 ^ s4) + (t0 ^ z7); z4 = s5 ^ s6; \ 107 r5 = (s3 ^ z4) + (z7 ^ t6); z6 = t2 ^ t5; \ 108 r6 = (s2 ^ z4) + (z6 ^ t7); z2 = s1 ^ s7; \ 124 uint64_t z1, z2, z3, z4, z5, z6, z7, z8; in Q512() local
|
/freebsd/sys/contrib/libsodium/src/libsodium/crypto_stream/salsa20/xmm6int/ |
H A D | u8.h | 18 __m256i z4 = _mm256_set1_epi32(x[12]); variable 27 __m256i orig4 = z4; 96 z4 = orig4; 109 z4 = _mm256_xor_si256(z4, y4); 111 z4 = _mm256_xor_si256(z4, r4); 122 y8 = _mm256_add_epi32(y8, z4); 137 y12 = z4; 241 y6 = z4; 277 z4 = _mm256_xor_si256(z4, y4); 279 z4 = _mm256_xor_si256(z4, r4); [all …]
|
H A D | u4.h | 4 __m128i z0, z1, z2, z3, z4, z5, z6, z7, z8, z9, z10, z11, z12, z13, z14, variable 31 z4 = _mm_shuffle_epi32(z3, 0x00); 39 orig4 = z4; 96 z4 = orig4; 109 z4 = _mm_xor_si128(z4, y4); 111 z4 = _mm_xor_si128(z4, r4); 122 y8 = _mm_add_epi32(y8, z4); 137 y12 = z4; 241 y6 = z4; 277 z4 = _mm_xor_si128(z4, y4); [all …]
|
/freebsd/contrib/bearssl/src/symcipher/ |
H A D | aes_ct.c | 45 uint32_t z0, z1, z2, z3, z4, z5, z6, z7, z8, z9; in br_aes_ct_bitslice_Sbox() local 145 z4 = t40 & y1; in br_aes_ct_bitslice_Sbox() 175 t58 = z4 ^ t46; in br_aes_ct_bitslice_Sbox() 181 t64 = z4 ^ t59; in br_aes_ct_bitslice_Sbox()
|
H A D | aes_ct64.c | 45 uint64_t z0, z1, z2, z3, z4, z5, z6, z7, z8, z9; in br_aes_ct64_bitslice_Sbox() local 145 z4 = t40 & y1; in br_aes_ct64_bitslice_Sbox() 175 t58 = z4 ^ t46; in br_aes_ct64_bitslice_Sbox() 181 t64 = z4 ^ t59; in br_aes_ct64_bitslice_Sbox()
|
/freebsd/contrib/llvm-project/lldb/source/Utility/ |
H A D | ARM64_DWARF_Registers.h | 119 z4, enumerator
|
H A D | ARM64_ehframe_Registers.h | 118 z4, enumerator
|
/freebsd/contrib/llvm-project/lldb/source/Plugins/Process/Utility/ |
H A D | RegisterInfos_arm64_sve.h | 415 DEFINE_VREG_SVE(v4, z4), 449 DEFINE_FPU_PSEUDO_SVE(s4, 4, z4), 482 DEFINE_FPU_PSEUDO_SVE(d4, 8, z4), 521 DEFINE_ZREG(z4),
|
/freebsd/sys/contrib/libsodium/src/libsodium/crypto_core/ed25519/ref10/ |
H A D | ed25519_ref10.c | 956 fe25519 z4; in ge25519_is_on_curve() local 968 fe25519_sq(z4, z2); in ge25519_is_on_curve() 969 fe25519_add(t1, t1, z4); in ge25519_is_on_curve()
|
/freebsd/contrib/file/magic/Magdir/ |
H A D | database | 641 # skip some Xbase Index files *.ndx and Infocom (Z-machine 4) *.z4 handled by ./adventure
|
/freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/ |
H A D | AArch64RegisterInfo.td | 862 def Z4 : AArch64Reg<4, "z4", [Q4]>, DwarfRegNum<[100]>;
|