/freebsd/contrib/llvm-project/llvm/lib/Target/X86/ |
H A D | X86LegalizerInfo.cpp |
|
/freebsd/contrib/llvm-project/llvm/lib/Target/X86/GISel/ |
H A D | X86LegalizerInfo.cpp | 50 const LLT s16 = LLT::scalar(16); in X86LegalizerInfo() local 90 return typeInSet(0, {p0, s1, s8, s16, s32, s64})(Query) || in X86LegalizerInfo() 96 return typeInSet(0, {p0, s8, s16, s32})(Query) || in X86LegalizerInfo() 140 if (typeInSet(0, {s8, s16, s32})(Query)) in X86LegalizerInfo() 155 .clampMinNumElements(0, s16, 8) in X86LegalizerInfo() 159 .clampMaxNumElements(0, s16, HasBWI ? 32 : (HasAVX2 ? 16 : 8)) in X86LegalizerInfo() 168 return typePairInSet(0, 1, {{s8, s1}, {s16, s1}, {s32, s1}})(Query) || in X86LegalizerInfo() 179 if (typeInSet(0, {s8, s16, s32})(Query)) in X86LegalizerInfo() 199 .clampMinNumElements(0, s16, 8) in X86LegalizerInfo() 202 .clampMaxNumElements(0, s16, HasBWI ? 32 : (HasAVX2 ? 16 : 8)) in X86LegalizerInfo() [all …]
|
/freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/ |
H A D | AArch64LegalizerInfo.cpp | 48 const LLT s16 = LLT::scalar(16); in AArch64LegalizerInfo() local 65 const LLT nxv8s16 = LLT::scalable_vector(8, s16); in AArch64LegalizerInfo() 75 std::initializer_list<LLT> ScalarAndPtrTypesList = {s8, s16, s32, s64, p0}; in AArch64LegalizerInfo() 87 // Some instructions only support s16 if the subtarget has full 16-bit FP in AArch64LegalizerInfo() 90 const LLT &MinFPScalar = HasFP16 ? s16 : s32; in AArch64LegalizerInfo() 97 .legalFor({p0, s8, s16, s32, s64}) in AArch64LegalizerInfo() 109 .legalFor({p0, s16, s32, s64}) in AArch64LegalizerInfo() 112 .clampScalar(0, s16, s64) in AArch64LegalizerInfo() 115 .clampMaxNumElements(0, s16, 8) in AArch64LegalizerInfo() 134 .clampMaxNumElements(0, s16, 8) in AArch64LegalizerInfo() [all …]
|
H A D | AArch64InstructionSelector.cpp | 2601 const LLT s16 = LLT::scalar(16); in select() local 2614 if (Ty != s16 && Ty != s32 && Ty != s64 && Ty != s128) { in select() 2616 << " constant, expected: " << s16 << " or " << s32 in select() 2636 if (Ty != p0 && Ty != s8 && Ty != s16) { in select() 3514 // When the scalar of G_DUP is an s8/s16 gpr, they can't be selected by in select() 3515 // imported patterns. Do it manually here. Avoiding generating s16 gpr is in select() 5992 const LLT S16 = LLT::scalar(16); in selectIntrinsicWithSideEffects() local 6017 else if (Ty == LLT::fixed_vector(4, S16)) in selectIntrinsicWithSideEffects() 6019 else if (Ty == LLT::fixed_vector(8, S16)) in selectIntrinsicWithSideEffects() 6041 else if (Ty == LLT::fixed_vector(4, S16)) in selectIntrinsicWithSideEffects() [all …]
|
/freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/ |
H A D | AMDGPULegalizerInfo.cpp | 70 /// additional element. This is mostly to handle <3 x s16> -> <4 x s16>. This 180 // <2 x s8> -> s16 in getBitcastRegisterType() 284 static const LLT S16 = LLT::scalar(16); variable 373 // TODO: Should load to s16 be legal? Most loads extend to 32-bits, but we 486 // The current selector can't handle <6 x s16>, <8 x s16>, s96, s128 etc, so 693 S32, S64, S16 in AMDGPULegalizerInfo() 697 S32, S64, S16, V2S16 in AMDGPULegalizerInfo() 700 const LLT MinScalarFPTy = ST.has16BitInsts() ? S16 : S32; in AMDGPULegalizerInfo() 708 .legalFor({S32, S64, V2S16, S16, V4S16, S1, S128, S256}) in AMDGPULegalizerInfo() 715 .clampScalar(0, S16, S256) in AMDGPULegalizerInfo() [all …]
|
/freebsd/contrib/llvm-project/llvm/lib/Target/M68k/GISel/ |
H A D | M68kLegalizerInfo.cpp | 25 const LLT s16 = LLT::scalar(16); in M68kLegalizerInfo() local 30 .legalFor({s8, s16, s32}) in M68kLegalizerInfo() 42 {s32, p0, s16, 4}, in M68kLegalizerInfo() 44 {s16, p0, s16, 2}, in M68kLegalizerInfo()
|
/freebsd/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/ |
H A D | LegalizationArtifactCombiner.h | 472 // %1:_(s16) = G_TRUNC %0(s32) in tryFoldUnmergeCast() 526 // <2 x s16> = build_vector s16, s16 in canFoldMergeOpcode() 527 // <2 x s32> = zext <2 x s16> in canFoldMergeOpcode() 528 // <2 x s16>, <2 x s16> = unmerge <2 x s32> in canFoldMergeOpcode() 531 // <2 x s16> = zext s16 <-- scalar to vector in canFoldMergeOpcode() 532 // <2 x s16> = zext s16 <-- scalar to vector in canFoldMergeOpcode() 535 // s32 = zext s16 in canFoldMergeOpcode() 536 // <2 x s16> = bitcast s32 in canFoldMergeOpcode() 537 // s32 = zext s16 in canFoldMergeOpcode() 538 // <2 x s16> = bitcast s32 in canFoldMergeOpcode() [all …]
|
/freebsd/tests/sys/sys/ |
H A D | qmath_test.c | 92 s16q_t s16; in ATF_TC_BODY() local 94 Q_INI(&s16, QTEST_IV, 0, QTEST_RPSHFT); in ATF_TC_BODY() 95 Q_TOSTR(s16, -1, 10, buf, sizeof(buf)); in ATF_TC_BODY() 97 ATF_CHECK_EQ(sizeof(s16) << 3, Q_NTBITS(s16)); in ATF_TC_BODY() 98 ATF_CHECK_EQ(QTEST_RPSHFT, Q_NFBITS(s16)); in ATF_TC_BODY() 99 ATF_CHECK_EQ(QTEST_INTBITS(s16), Q_NIBITS(s16)); in ATF_TC_BODY() 100 ATF_CHECK_EQ(QTEST_QITRUNC(s16, INT16_MAX), Q_IMAXVAL(s16)); in ATF_TC_BODY() 101 ATF_CHECK_EQ(-Q_IMAXVAL(s16), Q_IMINVAL(s16)); in ATF_TC_BODY()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/SPIRV/ |
H A D | SPIRVLegalizerInfo.cpp | 65 const LLT s16 = LLT::scalar(16); in SPIRVLegalizerInfo() local 111 p0, p1, p2, p3, p4, p5, p6, s1, s8, s16, in SPIRVLegalizerInfo() 122 s1, s8, s16, s32, s64, v2s1, v2s8, v2s16, v2s32, v2s64, in SPIRVLegalizerInfo() 126 auto allIntScalarsAndVectors = {s8, s16, s32, s64, v2s8, v2s16, in SPIRVLegalizerInfo() 133 auto allIntScalars = {s8, s16, s32, s64}; in SPIRVLegalizerInfo() 135 auto allFloatScalars = {s16, s32, s64}; in SPIRVLegalizerInfo() 138 s16, s32, s64, v2s16, v2s32, v2s64, v3s16, v3s32, v3s64, in SPIRVLegalizerInfo() 141 auto allFloatAndIntScalarsAndPtrs = {s8, s16, s32, s64, p0, p1, in SPIRVLegalizerInfo()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/RISCV/GISel/ |
H A D | RISCVLegalizerInfo.cpp | 76 const LLT s16 = LLT::scalar(16); in RISCVLegalizerInfo() local 96 const LLT nxv1s16 = LLT::scalable_vector(1, s16); in RISCVLegalizerInfo() 97 const LLT nxv2s16 = LLT::scalable_vector(2, s16); in RISCVLegalizerInfo() 98 const LLT nxv4s16 = LLT::scalable_vector(4, s16); in RISCVLegalizerInfo() 99 const LLT nxv8s16 = LLT::scalable_vector(8, s16); in RISCVLegalizerInfo() 100 const LLT nxv16s16 = LLT::scalable_vector(16, s16); in RISCVLegalizerInfo() 101 const LLT nxv32s16 = LLT::scalable_vector(32, s16); in RISCVLegalizerInfo() 266 {s32, p0, s16, 16}, in RISCVLegalizerInfo() 271 .legalForTypesWithMemDesc({{s32, p0, s8, 8}, {s32, p0, s16, 16}}); in RISCVLegalizerInfo() 274 {s64, p0, s16, 16}, in RISCVLegalizerInfo() [all …]
|
/freebsd/sys/contrib/libsodium/src/libsodium/crypto_core/ed25519/ref10/ |
H A D | ed25519_ref10.c | 1128 int64_t s16; in sc25519_muladd() local 1187 s16 = in sc25519_muladd() 1221 carry16 = (s16 + (int64_t) (1L << 20)) >> 21; in sc25519_muladd() 1223 s16 -= carry16 * ((uint64_t) 1L << 21); in sc25519_muladd() 1256 s16 += carry15; in sc25519_muladd() 1273 s16 -= s23 * 683901; in sc25519_muladd() 1325 carry16 = (s16 + (int64_t) (1L << 20)) >> 21; in sc25519_muladd() 1327 s16 -= carry16 * ((uint64_t) 1L << 21); in sc25519_muladd() 1342 s16 += carry15; in sc25519_muladd() 1352 s4 += s16 * 666643; in sc25519_muladd() [all …]
|
/freebsd/sys/contrib/dev/mediatek/mt76/ |
H A D | mt76x02.h | 33 s16 temp_offset; 133 s16 coverage_class; 186 s16 coverage_class);
|
/freebsd/sys/contrib/dev/rtw89/ |
H A D | rtw8852b.h |
|
H A D | chan.c | 997 s16 upper, lower; in __rtw89_mcc_calc_pattern_loose() 1026 upper = min_t(s16, ref->duration, res); in __rtw89_mcc_calc_pattern_loose() 1030 upper = min_t(s16, upper, ref->limit.max_toa); in __rtw89_mcc_calc_pattern_loose() 1031 lower = max_t(s16, lower, ref->duration - ref->limit.max_tob); in __rtw89_mcc_calc_pattern_loose() 1033 upper = min_t(s16, upper, in __rtw89_mcc_calc_pattern_loose() 1035 lower = max_t(s16, lower, res - aux->limit.max_tob); in __rtw89_mcc_calc_pattern_loose() 1061 s16 upper_toa_ref, lower_toa_ref; in __rtw89_mcc_calc_pattern_strict() 1062 s16 upper_tob_aux, lower_tob_aux; in __rtw89_mcc_calc_pattern_strict() 1064 s16 res; in __rtw89_mcc_calc_pattern_strict() 1094 upper_toa_ref = min_t(s16, min_toa + res, ref->duration - min_tob); in __rtw89_mcc_calc_pattern_strict() [all …]
|
/freebsd/sys/netsmb/ |
H A D | smb_crypt.c | 148 u_char S16[16], S21[21]; in smb_calcmackey() local 177 MD4Final(S16, &md4); in smb_calcmackey() 179 MD4Update(&md4, S16, 16); in smb_calcmackey() 188 bcopy(S16, S21, 16); in smb_calcmackey()
|
/freebsd/contrib/libarchive/libarchive/test/ |
H A D | test_archive_string.c | 304 #define S16 "0123456789abcdef" in test_archive_string_sprintf() macro 305 #define S32 S16 S16 in test_archive_string_sprintf() 314 #undef S16 in test_archive_string_sprintf()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/PowerPC/GISel/ |
H A D | PPCLegalizerInfo.cpp | 46 const LLT S16 = LLT::scalar(16); in PPCLegalizerInfo() local 58 .legalForCartesianProduct({S64}, {S1, S8, S16, S32}) in PPCLegalizerInfo()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/ARM/ |
H A D | ARMLegalizerInfo.cpp | 39 const LLT s16 = LLT::scalar(16); in ARMLegalizerInfo() local 52 .legalForCartesianProduct({s8, s16, s32}, {s1, s8, s16}); in ARMLegalizerInfo() 117 {s16, p0, s16, 8}, in ARMLegalizerInfo()
|
H A D | MVELaneInterleavingPass.cpp | 35 // %sa_b = VMOVLB.s16 %a 36 // %sa_t = VMOVLT.s16 %a 37 // %sb_b = VMOVLB.s16 %b 38 // %sb_t = VMOVLT.s16 %b
|
/freebsd/sys/dev/mps/mpi/ |
H A D | mpi2_type.h | 75 typedef signed short S16; typedef 118 typedef S16 *PS16;
|
/freebsd/sys/dev/mpt/mpilib/ |
H A D | mpi_type.h | 76 typedef signed short S16; typedef 117 typedef S16 *PS16;
|
/freebsd/sys/dev/mpr/mpi/ |
H A D | mpi2_type.h | 74 typedef signed short S16; typedef 117 typedef S16 *PS16;
|
/freebsd/sys/dev/mpi3mr/mpi/ |
H A D | mpi30_type.h | 62 typedef int16_t S16; typedef 89 typedef S16 *PS16;
|
/freebsd/contrib/llvm-project/llvm/lib/Target/NVPTX/ |
H A D | NVPTXRegisterInfo.cpp | 42 // not allow using .s16 or .u16 arguments for .fp16 in getNVPTXRegClassName() 47 // .reg .s16 rs16 in getNVPTXRegClassName()
|
/freebsd/sys/dev/qat/qat_api/qat_direct/include/ |
H A D | adf_kernel_types.h | 12 #define s16 int16_t macro
|