Home
last modified time | relevance | path

Searched refs:__v2df (Results 1 – 9 of 9) sorted by relevance

/freebsd/contrib/llvm-project/clang/lib/Headers/
H A Demmintrin.h27 typedef double __v2df __attribute__((__vector_size__(16))); typedef
94 return (__m128d)((__v2df)__a + (__v2df)__b); in _mm_add_pd()
134 return (__m128d)((__v2df)__a - (__v2df)__b); in _mm_sub_pd()
173 return (__m128d)((__v2df)__a * (__v2df)__b); in _mm_mul_pd()
214 return (__m128d)((__v2df)__a / (__v2df)__b); in _mm_div_pd()
238 __m128d __c = __builtin_ia32_sqrtsd((__v2df)__b); in _mm_sqrt_sd()
254 return __builtin_ia32_sqrtpd((__v2df)__a); in _mm_sqrt_pd()
279 return __builtin_ia32_minsd((__v2df)__a, (__v2df)__b); in _mm_min_sd()
300 return __builtin_ia32_minpd((__v2df)__a, (__v2df)__b); in _mm_min_pd()
325 return __builtin_ia32_maxsd((__v2df)__a, (__v2df)__b); in _mm_max_sd()
[all …]
H A Dfma4intrin.h32 return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, (__v2df)__C); in _mm_macc_pd()
44 return (__m128d)__builtin_ia32_vfmaddsd((__v2df)__A, (__v2df)__B, (__v2df)__C); in _mm_macc_sd()
56 return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, -(__v2df)__C); in _mm_msub_pd()
68 return (__m128d)__builtin_ia32_vfmaddsd((__v2df)__A, (__v2df)__B, -(__v2df)__C); in _mm_msub_sd()
80 return (__m128d)__builtin_ia32_vfmaddpd(-(__v2df)__A, (__v2df)__B, (__v2df)__C); in _mm_nmacc_pd()
92 return (__m128d)__builtin_ia32_vfmaddsd(-(__v2df)__A, (__v2df)__B, (__v2df)__C); in _mm_nmacc_sd()
104 return (__m128d)__builtin_ia32_vfmaddpd(-(__v2df)__A, (__v2df)__B, -(__v2df)__C); in _mm_nmsub_pd()
116 return (__m128d)__builtin_ia32_vfmaddsd(-(__v2df)__A, (__v2df)__B, -(__v2df)__C); in _mm_nmsub_sd()
128 return (__m128d)__builtin_ia32_vfmaddsubpd((__v2df)__A, (__v2df)__B, (__v2df)__C); in _mm_maddsub_pd()
140 return (__m128d)__builtin_ia32_vfmaddsubpd((__v2df)__A, (__v2df)__B, -(__v2df)__C); in _mm_msubadd_pd()
H A Dfmaintrin.h58 return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, (__v2df)__C); in _mm_fmadd_pd()
116 return (__m128d)__builtin_ia32_vfmaddsd3((__v2df)__A, (__v2df)__B, (__v2df)__C); in _mm_fmadd_sd()
156 return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, -(__v2df)__C); in _mm_fmsub_pd()
214 return (__m128d)__builtin_ia32_vfmaddsd3((__v2df)__A, (__v2df)__B, -(__v2df)__C); in _mm_fmsub_sd()
254 return (__m128d)__builtin_ia32_vfmaddpd(-(__v2df)__A, (__v2df)__B, (__v2df)__C); in _mm_fnmadd_pd()
312 return (__m128d)__builtin_ia32_vfmaddsd3((__v2df)__A, -(__v2df)__B, (__v2df)__C); in _mm_fnmadd_sd()
352 return (__m128d)__builtin_ia32_vfmaddpd(-(__v2df)__A, (__v2df)__B, -(__v2df)__C); in _mm_fnmsub_pd()
410 return (__m128d)__builtin_ia32_vfmaddsd3((__v2df)__A, -(__v2df)__B, -(__v2df)__C); in _mm_fnmsub_sd()
462 return (__m128d)__builtin_ia32_vfmaddsubpd((__v2df)__A, (__v2df)__B, (__v2df)__C); in _mm_fmaddsub_pd()
514 return (__m128d)__builtin_ia32_vfmaddsubpd((__v2df)__A, (__v2df)__B, -(__v2df)__C); in _mm_fmsubadd_pd()
H A Dsmmintrin.h315 ((__m128d)__builtin_ia32_roundpd((__v2df)(__m128d)(X), (M)))
356 ((__m128d)__builtin_ia32_roundsd((__v2df)(__m128d)(X), (__v2df)(__m128d)(Y), \
385 ((__m128d)__builtin_ia32_blendpd((__v2df)(__m128d)(V1), \
386 (__v2df)(__m128d)(V2), (int)(M)))
439 return (__m128d)__builtin_ia32_blendvpd((__v2df)__V1, (__v2df)__V2, in _mm_blendv_pd()
440 (__v2df)__M); in _mm_blendv_pd()
631 ((__m128d)__builtin_ia32_dppd((__v2df)(__m128d)(X), (__v2df)(__m128d)(Y), \
H A Davxintrin.h799 return (__m128d)__builtin_ia32_vpermilvarpd((__v2df)__a, (__v2di)__c); in _mm_permutevar_pd()
1015 ((__m128d)__builtin_ia32_vpermilpd((__v2df)(__m128d)(A), (int)(C)))
2557 return __builtin_ia32_vtestzpd((__v2df)__a, (__v2df)__b); in _mm_testz_pd()
2586 return __builtin_ia32_vtestcpd((__v2df)__a, (__v2df)__b); in _mm_testc_pd()
2616 return __builtin_ia32_vtestnzcpd((__v2df)__a, (__v2df)__b); in _mm_testnzc_pd()
3105 return (__m256d)__builtin_shufflevector((__v2df)__b, (__v2df)__b, in _mm256_broadcast_pd()
3396 return (__m128d)__builtin_ia32_maskloadpd((const __v2df *)__p, (__v2di)__m); in _mm_maskload_pd()
3518 __builtin_ia32_maskstorepd((__v2df *)__p, (__v2di)__m, (__v2df)__a); in _mm_maskstore_pd()
4530 (__v2df)__a, (__v2df)__builtin_nondeterministic_value(__a), 0, 1, 2, 3); in _mm256_castpd128_pd256()
4592 return __builtin_shufflevector((__v2df)__a, (__v2df)_mm_setzero_pd(), 0, 1, 2, 3); in _mm256_zextpd128_pd256()
[all …]
H A Dxopintrin.h713 ((__m128d)__builtin_ia32_vpermil2pd((__v2df)(__m128d)(X), \
714 (__v2df)(__m128d)(Y), \
740 return (__m128d)__builtin_ia32_vfrczsd((__v2df)__A); in _mm_frcz_sd()
752 return (__m128d)__builtin_ia32_vfrczpd((__v2df)__A); in _mm_frcz_pd()
H A Davx512fp16intrin.h1599 ((__m128h)__builtin_ia32_vcvtsd2sh_round_mask((__v8hf)(A), (__v2df)(B), \
1605 (__v8hf)(A), (__v2df)(B), (__v8hf)(W), (__mmask8)(U), (int)(R)))
1608 ((__m128h)__builtin_ia32_vcvtsd2sh_round_mask((__v8hf)(A), (__v2df)(B), \
1615 (__v8hf)__A, (__v2df)__B, (__v8hf)_mm_undefined_ph(), (__mmask8)-1, in _mm_cvtsd_sh()
1624 (__v8hf)__A, (__v2df)__B, (__v8hf)__W, (__mmask8)__U, in _mm_mask_cvtsd_sh()
1631 (__v8hf)__A, (__v2df)__B, (__v8hf)_mm_setzero_ph(), (__mmask8)__U, in _mm_maskz_cvtsd_sh()
1636 ((__m128d)__builtin_ia32_vcvtsh2sd_round_mask((__v2df)(A), (__v8hf)(B), \
1637 (__v2df)_mm_undefined_pd(), \
1642 (__v2df)(A), (__v8hf)(B), (__v2df)(W), (__mmask8)(U), (int)(R)))
1645 ((__m128d)__builtin_ia32_vcvtsh2sd_round_mask((__v2df)(A), (__v8hf)(B), \
[all …]
/freebsd/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/
H A Demmintrin.h48 typedef __vector double __v2df; typedef
126 __v2df __result = (__v2df)__A; in _mm_move_sd()
127 __result[0] = ((__v2df)__B)[0]; in _mm_move_sd()
169 __v2df __tmp = _mm_load_pd(__P); in _mm_loadr_pd()
191 *__P = ((__v2df)__A)[0]; in _mm_store_sd()
197 return ((__v2df)__A)[0]; in _mm_cvtsd_f64()
210 *__P = ((__v2df)__A)[1]; in _mm_storeh_pd()
250 return (__m128d)((__v2df)__A + (__v2df)__B); in _mm_add_pd()
266 return (__m128d)((__v2df)__A - (__v2df)__B); in _mm_sub_pd()
279 return (__m128d)((__v2df)__A * (__v2df)__B); in _mm_mul_pd()
[all …]
H A Dpmmintrin.h59 const __v2df __even_n0 = {-0.0, 0.0}; in _mm_addsub_pd()
60 __v2df __even_neg_Y = vec_xor(__Y, __even_n0); in _mm_addsub_pd()
93 return (__m128d)vec_add(vec_mergeh((__v2df)__X, (__v2df)__Y), in _mm_hadd_pd()
94 vec_mergel((__v2df)__X, (__v2df)__Y)); in _mm_hadd_pd()
100 return (__m128d)vec_sub(vec_mergeh((__v2df)__X, (__v2df)__Y), in _mm_hsub_pd()
101 vec_mergel((__v2df)__X, (__v2df)__Y)); in _mm_hsub_pd()