/freebsd/contrib/llvm-project/compiler-rt/lib/builtins/ |
H A D | int_div_impl.inc | 26 // 1 <= sr <= N - 1. Shifts do not trigger UB. 55 // 1 <= sr <= N - 1. Shifts do not trigger UB.
|
/freebsd/lib/libc/softfloat/bits64/ |
H A D | softfloat-macros | 35 Shifts `a' right by the number of bits given in `count'. If any nonzero 62 Shifts `a' right by the number of bits given in `count'. If any nonzero 89 Shifts the 128-bit value formed by concatenating `a0' and `a1' right by 64 136 Shifts the 128-bit value formed by concatenating `a0' and `a1' right by the 169 Shifts the 128-bit value formed by concatenating `a0' and `a1' right by the 213 Shifts the 192-bit value formed by concatenating `a0', `a1', and `a2' right 284 Shifts the 128-bit value formed by concatenating `a0' and `a1' left by the 303 Shifts the 192-bit value formed by concatenating `a0', `a1', and `a2' left
|
/freebsd/lib/libc/softfloat/bits32/ |
H A D | softfloat-macros | 34 Shifts `a' right by the number of bits given in `count'. If any nonzero 61 Shifts the 64-bit value formed by concatenating `a0' and `a1' right by the 94 Shifts the 64-bit value formed by concatenating `a0' and `a1' right by the 138 Shifts the 96-bit value formed by concatenating `a0', `a1', and `a2' right 209 Shifts the 64-bit value formed by concatenating `a0' and `a1' left by the 228 Shifts the 96-bit value formed by concatenating `a0', `a1', and `a2' left
|
/freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/ |
H A D | AArch64SchedCyclone.td | 138 // Shifts and Bitfield Operations 176 // EXTR Shifts a pair of registers and requires two micro-ops. 473 // 7.9.4 Shifts and Bitfield Operations
|
/freebsd/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/ |
H A D | CombinerHelper.cpp | 5185 SmallVector<Register, 16> Shifts, Factors; in buildUDivUsingMul() local 5192 Shifts.push_back(Shifts[0]); in buildUDivUsingMul() 5207 Shifts.push_back(MIB.buildConstant(ScalarShiftAmtTy, Shift).getReg(0)); in buildUDivUsingMul() 5219 Shift = MIB.buildBuildVector(ShiftAmtTy, Shifts).getReg(0); in buildUDivUsingMul() 5222 Shift = Shifts[0]; in buildUDivUsingMul() 5428 SmallVector<Register, 16> Shifts, Factors; in buildSDivUsingMul() local 5436 Shifts.push_back(Shifts[0]); in buildSDivUsingMul() 5452 Shifts.push_back(MIB.buildConstant(ScalarShiftAmtTy, Shift).getReg(0)); in buildSDivUsingMul() 5464 Shift = MIB.buildBuildVector(ShiftAmtTy, Shifts).getReg(0); in buildSDivUsingMul() 5467 Shift = Shifts[0]; in buildSDivUsingMul()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/ |
H A D | AMDGPULegalizerInfo.cpp | 1718 auto &Shifts = getActionDefinitionsBuilder({G_SHL, G_LSHR, G_ASHR}) in AMDGPULegalizerInfo() local 1722 Shifts.legalFor({{S16, S16}, {V2S16, V2S16}}) in AMDGPULegalizerInfo() 1725 Shifts.legalFor({{S16, S16}}); in AMDGPULegalizerInfo() 1728 Shifts.widenScalarIf( in AMDGPULegalizerInfo() 1737 Shifts.maxScalarIf(typeIs(0, S16), 1, S16); in AMDGPULegalizerInfo() 1738 Shifts.clampScalar(1, S32, S32); in AMDGPULegalizerInfo() 1739 Shifts.widenScalarToNextPow2(0, 16); in AMDGPULegalizerInfo() 1740 Shifts.clampScalar(0, S16, S64); in AMDGPULegalizerInfo() 1750 Shifts.clampScalar(1, S32, S32); in AMDGPULegalizerInfo() 1751 Shifts.widenScalarToNextPow2(0, 32); in AMDGPULegalizerInfo() [all …]
|
/freebsd/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/ |
H A D | TargetLowering.cpp | 6107 SmallVector<SDValue, 16> Shifts, Factors; in BuildExactSDIV() local 6119 Shifts.push_back(DAG.getConstant(Shift, dl, ShSVT)); in BuildExactSDIV() 6130 Shift = DAG.getBuildVector(ShVT, dl, Shifts); in BuildExactSDIV() 6133 assert(Shifts.size() == 1 && Factors.size() == 1 && in BuildExactSDIV() 6136 Shift = DAG.getSplatVector(ShVT, dl, Shifts[0]); in BuildExactSDIV() 6140 Shift = Shifts[0]; in BuildExactSDIV() 6167 SmallVector<SDValue, 16> Shifts, Factors; in BuildExactUDIV() local 6180 Shifts.push_back(DAG.getConstant(Shift, dl, ShSVT)); in BuildExactUDIV() 6193 Shift = DAG.getBuildVector(ShVT, dl, Shifts); in BuildExactUDIV() 6196 assert(Shifts.size() == 1 && Factors.size() == 1 && in BuildExactUDIV() [all …]
|
H A D | SelectionDAG.cpp | 3393 unsigned Shifts = IsLE ? i : SubScale - 1 - i; in computeKnownBits() local 3394 Known.insertBits(Known2, SubBitWidth * Shifts); in computeKnownBits() 3413 unsigned Shifts = IsLE ? i : NumElts - 1 - i; in computeKnownBits() local 3414 unsigned Offset = (Shifts % SubScale) * BitWidth; in computeKnownBits()
|
/freebsd/contrib/llvm-project/clang/include/clang/Basic/ |
H A D | arm_neon.td | 395 // E.3.11 Shifts by signed variable 402 // E.3.12 Shifts by constant 420 // E.3.13 Shifts with insert 897 // Shifts by constant
|
H A D | arm_sve.td | 669 // Shifts
|
/freebsd/contrib/llvm-project/llvm/lib/Target/M68k/ |
H A D | M68kInstrInfo.td | 511 // Used for Shifts and Rotations, since M68k immediates in these instructions
|
/freebsd/contrib/llvm-project/llvm/lib/Target/LoongArch/ |
H A D | LoongArchISelLowering.cpp | 6041 unsigned Shifts = Imm.countr_zero(); in decomposeMulByConstant() local 6043 if (Shifts >= 12) in decomposeMulByConstant() 6047 APInt ImmPop = Imm.ashr(Shifts); in decomposeMulByConstant() 6052 APInt ImmSmall = APInt(Imm.getBitWidth(), 1ULL << Shifts, true); in decomposeMulByConstant()
|
/freebsd/contrib/llvm-project/llvm/include/llvm/IR/ |
H A D | IntrinsicsARM.td | 573 // Vector Shifts:
|
H A D | IntrinsicsPowerPC.td | 1151 // Right Shifts.
|
H A D | IntrinsicsAArch64.td | 1792 // Shifts
|
/freebsd/contrib/llvm-project/llvm/lib/Target/SystemZ/ |
H A D | SystemZScheduleZEC12.td | 469 // Shifts
|
H A D | SystemZScheduleZ196.td | 458 // Shifts
|
H A D | SystemZScheduleZ13.td | 495 // Shifts
|
H A D | SystemZScheduleZ14.td | 505 // Shifts
|
H A D | SystemZScheduleZ15.td | 520 // Shifts
|
H A D | SystemZScheduleZ16.td | 520 // Shifts
|
H A D | SystemZInstrInfo.td | 1499 // Shifts
|
/freebsd/contrib/llvm-project/llvm/lib/Target/X86/ |
H A D | X86.td | 592 "Shifts are faster (or as fast) as shuffle">;
|
/freebsd/contrib/llvm-project/llvm/lib/Target/Mips/ |
H A D | MipsInstrInfo.td | 1359 // Shifts
|
/freebsd/contrib/one-true-awk/testdir/ |
H A D | funstack.ok | 267 G. W. Stewart Incorporating Origin Shifts into the QR
|