Home
last modified time | relevance | path

Searched refs:LSL (Results 1 – 25 of 49) sorted by relevance

12

/freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/
H A DAArch64ExpandImm.cpp81 AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt) }); in tryToreplicateChunks()
96 AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt) }); in tryToreplicateChunks()
227 AArch64_AM::getShifterImm(AArch64_AM::LSL, in trySequenceOfOnes()
236 AArch64_AM::getShifterImm(AArch64_AM::LSL, in trySequenceOfOnes()
501 AArch64_AM::getShifterImm(AArch64_AM::LSL, Shift) }); in expandMOVImmSimple()
519 AArch64_AM::getShifterImm(AArch64_AM::LSL, Shift) }); in expandMOVImmSimple()
600 AArch64_AM::getShifterImm(AArch64_AM::LSL, Shift) }); in expandMOVImm()
H A DAArch64SchedPredicates.td54 def CheckShiftLSL : CheckImmOperand_s<3, "AArch64_AM::LSL">;
268 // Check for LSL shift <= 4
299 // ORR Rd, ZR, Rm, LSL #0
331 // MOVI Vd, #0, LSL #0
H A DAArch64SchedPredNeoverse.td16 // Check for LSL shift == 0
H A DAArch64RegisterInfo.td1573 // LSL(8|16|32|64)
1574 def ZPR#RegWidth#AsmOpndExtLSL8 : ZPRExtendAsmOperand<"LSL", RegWidth, 8>;
1575 def ZPR#RegWidth#AsmOpndExtLSL16 : ZPRExtendAsmOperand<"LSL", RegWidth, 16>;
1576 def ZPR#RegWidth#AsmOpndExtLSL32 : ZPRExtendAsmOperand<"LSL", RegWidth, 32>;
1577 def ZPR#RegWidth#AsmOpndExtLSL64 : ZPRExtendAsmOperand<"LSL", RegWidth, 64>;
1578 def ZPR#RegWidth#ExtLSL8 : ZPRExtendRegisterOperand<0b0, 0b1, "LSL", RegWidth, 8>;
1579 def ZPR#RegWidth#ExtLSL16 : ZPRExtendRegisterOperand<0b0, 0b1, "LSL", RegWidth, 16>;
1580 def ZPR#RegWidth#ExtLSL32 : ZPRExtendRegisterOperand<0b0, 0b1, "LSL", RegWidth, 32>;
1581 def ZPR#RegWidth#ExtLSL64 : ZPRExtendRegisterOperand<0b0, 0b1, "LSL", RegWidth, 64>;
H A DAArch64SchedA510.td683 (instregex "^(ASR|LSL|LSR)_WIDE_ZPmZ_[BHS]",
684 "^(ASR|LSL|LSR)_WIDE_ZZZ_[BHS]",
685 "^(ASR|LSL|LSR)_ZPmI_[BHSD]",
686 "^(ASR|LSL|LSR)_ZPZI_[BHSD]",
687 "^(ASR|LSL|LSR)_ZPmZ_[BHSD]",
688 "^(ASR|LSL|LSR)_ZPZZ_[BHSD]",
689 "^(ASR|LSL|LSR)_ZZI_[BHSD]",
H A DAArch64SchedNeoverseV1.td524 // Arithmetic, LSL shift, shift <= 4
525 // Arithmetic, LSR/ASR/ROR shift or LSL shift > 4
531 // Arithmetic, flagset, LSL shift, shift <= 4
532 // Arithmetic, flagset, LSR/ASR/ROR shift or LSL shift > 4
1390 (instregex "^(ASR|LSL|LSR)_WIDE_Z(Pm|Z)Z_[BHS]",
1391 "^(ASR|LSL|LSR)_ZPm[IZ]_[BHSD]",
1392 "^(ASR|LSL|LSR)_ZZI_[BHSD]",
1393 "^(ASR|LSL|LSR)_ZPZ[IZ]_[BHSD]",
H A DAArch64SchedNeoverseN2.td651 // Arithmetic, LSL shift, shift <= 4
652 // Arithmetic, flagset, LSL shift, shift <= 4
653 // Arithmetic, LSR/ASR/ROR shift or LSL shift > 4
1625 (instregex "^(ASR|LSL|LSR)_WIDE_ZPmZ_[BHS]",
1626 "^(ASR|LSL|LSR)_WIDE_ZZZ_[BHS]",
1627 "^(ASR|LSL|LSR)_ZPmI_[BHSD]",
1628 "^(ASR|LSL|LSR)_ZPmZ_[BHSD]",
1629 "^(ASR|LSL|LSR)_ZZI_[BHSD]",
1630 "^(ASR|LSL|LSR)_ZPZ[IZ]_[BHSD]",
H A DAArch64SchedNeoverseV2.td1122 // Arithmetic, LSL shift, shift <= 4
1123 // Arithmetic, flagset, LSL shift, shift <= 4
1124 // Arithmetic, LSR/ASR/ROR shift or LSL shift > 4
2135 (instregex "^(ASR|LSL|LSR)_WIDE_ZPmZ_[BHS]",
2136 "^(ASR|LSL|LSR)_WIDE_ZZZ_[BHS]",
2137 "^(ASR|LSL|LSR)_ZPmI_[BHSD]",
2138 "^(ASR|LSL|LSR)_ZPmZ_[BHSD]",
2139 "^(ASR|LSL|LSR)_ZZI_[BHSD]",
2140 "^(ASR|LSL|LSR)_ZPZ[IZ]_[BHSD]",
H A DAArch64FastISel.cpp748 Addr.setExtendType(AArch64_AM::LSL); in computeAddress()
830 Addr.setExtendType(AArch64_AM::LSL); in computeAddress()
875 Addr.setExtendType(AArch64_AM::LSL); in computeAddress()
1085 Addr.getOffsetReg(), AArch64_AM::LSL, in simplifyAddress()
1257 ResultReg = emitAddSub_rs(UseAdd, RetVT, LHSReg, RHSReg, AArch64_AM::LSL, in emitAddSub()
1271 case Instruction::Shl: ShiftType = AArch64_AM::LSL; break; in emitAddSub()
1377 .addImm(getShifterImm(AArch64_AM::LSL, ShiftImm)); in emitAddSub_ri()
1739 AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftImm)); in emitLogicalOp_rs()
H A DAArch64ISelDAGToDAG.cpp472 bool isWorthFoldingALU(SDValue V, bool LSL = false) const;
613 unsigned ShVal = AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt); in SelectArithImmed()
660 return AArch64_AM::LSL; in getShiftTypeForNode()
797 unsigned ShVal = AArch64_AM::getShifterImm(AArch64_AM::LSL, LowZBits); in SelectShiftedRegisterFromAnd()
860 bool AArch64DAGToDAGISel::isWorthFoldingALU(SDValue V, bool LSL) const { in isWorthFoldingALU()
868 if (LSL && Subtarget->hasALULSLFast() && V.getOpcode() == ISD::SHL && in isWorthFoldingALU()
2854 if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSL) { in getUsefulBitsFromOrWithShiftedReg()
3379 AArch64_AM::LSL, NumTrailingZeroInShiftedMask); in isWorthFoldingIntoOrrWithShift()
3388 EncodedShiftImm = AArch64_AM::getShifterImm(AArch64_AM::LSL, ShlImm); in isWorthFoldingIntoOrrWithShift()
3454 AArch64_AM::getShifterImm(AArch64_AM::LSL, ShlImm), DL, VT)}; in tryOrrWithShift()
[all …]
H A DAArch64SchedNeoverseN1.td308 // Arithmetic, LSL shift, shift <= 4
309 // Arithmetic, flagset, LSL shift, shift <= 4
310 // Arithmetic, LSR/ASR/ROR shift or LSL shift > 4
H A DAArch64ExpandPseudoInsts.cpp1287 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0)); in expandMI()
1487 AArch64_AM::getShifterImm(AArch64_AM::LSL, 0), in expandMI()
1492 AArch64_AM::getShifterImm(AArch64_AM::LSL, 0), in expandMI()
H A DAArch64SchedAmpere1.td569 // For basic arithmetic, we have more flexibility for short shifts (LSL shift <= 4),
994 (instregex "(ASR|LSL|LSR|ROR)V(W|X)r")>;
H A DAArch64SchedAmpere1B.td525 // For basic arithmetic, we have more flexibility for short shifts (LSL shift <= 4),
976 (instregex "(ASR|LSL|LSR|ROR)V(W|X)r")>;
H A DAArch64InstrInfo.cpp943 return AArch64_AM::getShiftType(Imm) == AArch64_AM::LSL && ShiftVal <= 5; in isFalkorShiftExtFast()
3015 if (AArch64_AM::getShiftType(Shift) != AArch64_AM::ShiftExtendType::LSL) in canFoldIntoAddrMode()
4474 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0)) in copyPhysReg()
4480 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0)); in copyPhysReg()
4485 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0)); in copyPhysReg()
4602 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0)); in copyPhysReg()
4606 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0)); in copyPhysReg()
5393 AArch64_AM::getShifterImm(AArch64_AM::LSL, LocalShiftSize)); in emitFrameOffsetAdj()
9598 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0)) in probedStackAlloc()
/freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/
H A DAArch64AddressingModes.h34 LSL = 0,
55 case AArch64_AM::LSL: return "lsl"; in getShiftExtendName()
76 case 0: return AArch64_AM::LSL; in getShiftType()
104 case AArch64_AM::LSL: STEnc = 0; break; in getShifterImm()
35 LSL = 0, global() enumerator
H A DAArch64MCCodeEmitter.cpp287 assert(AArch64_AM::getShiftType(MO1.getImm()) == AArch64_AM::LSL && in getAddSubImmOpValue()
626 assert(AArch64_AM::getShiftType(ShiftOpnd) == AArch64_AM::LSL && in getMoveVecShifterOpValue()
H A DAArch64InstPrinter.cpp1249 // LSL #0 should not be printed. in printShiftedRegister()
1250 if (AArch64_AM::getShiftType(Val) == AArch64_AM::LSL && in printShiftedRegister()
1280 // UXTW/UXTX as LSL, and if the shift amount is also zero, print nothing at in printArithExtend()
2041 assert(AArch64_AM::getShiftType(Shift) == AArch64_AM::LSL && in printImm8OptLsl()
/freebsd/contrib/llvm-project/lldb/source/Plugins/Instruction/ARM64/
H A DEmulateInstructionARM64.cpp84 static inline uint64_t LSL(uint64_t x, integer shift) { in LSL() function
768 idx = LSL(llvm::SignExtend64<7>(imm7), scale); in EmulateLDPSTP()
954 offset = LSL(Bits32(opcode, 21, 10), size); in EmulateLDRSTRImm()
/freebsd/contrib/llvm-project/llvm/lib/Target/AVR/
H A DAVRISelLowering.h38 LSL, ///< Logical shift left. enumerator
H A DAVRInstrInfo.td58 def AVRlsl : SDNode<"AVRISD::LSL", SDTIntUnaryOp>;
1380 // 8-bit LSL is an alias of ADD Rd, Rd
1641 // LSL Rd
1645 def LSL : InstAlias<"lsl\t$rd", (ADDRdRr GPR8 : $rd, GPR8 : $rd)>;
2080 // Lowering of 'lsl' node to 'LSL' instruction.
2081 // LSL is an alias of 'ADD Rd, Rd'
/freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/AsmParser/
H A DAArch64AsmParser.cpp1413 if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL && in isGPR64WithShiftExtend()
1494 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR || in isShifter()
1541 ET == AArch64_AM::LSL) && in isExtend()
1560 ET == AArch64_AM::LSL) && in isExtendLSL64()
1568 return ET == AArch64_AM::LSL && getShiftExtendAmount() <= 7; in isLSLImm3Shift()
1575 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) && in isMemXExtend()
1596 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR || in isArithmeticShifter()
1607 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR || in isLogicalShifter()
1618 if (ST != AArch64_AM::LSL) in isMovImm32Shifter()
1630 if (ST != AArch64_AM::LSL) in isMovImm64Shifter()
[all …]
/freebsd/contrib/llvm-project/lldb/source/Plugins/Process/Utility/
H A DARMUtils.h102 static inline uint32_t LSL(const uint32_t value, const uint32_t amount, in LSL() function
/freebsd/contrib/llvm-project/llvm/lib/Target/Hexagon/
H A DHexagonConstPropagation.cpp2518 LatticeCell LSL, LSH; in evaluateHexRSEQ32() local
2519 if (!getCell(RL, Inputs, LSL) || !getCell(RH, Inputs, LSH)) in evaluateHexRSEQ32()
2521 if (LSL.isProperty() || LSH.isProperty()) in evaluateHexRSEQ32()
2524 unsigned LN = LSL.size(), HN = LSH.size(); in evaluateHexRSEQ32()
2527 bool Eval = constToInt(LSL.Values[i], LoVs[i]); in evaluateHexRSEQ32()
/freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/Utils/
H A DAArch64BaseInfo.h605 LSL, enumerator

12