/freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/ |
H A D | AArch64ExpandImm.cpp | 81 AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt) }); in tryToreplicateChunks() 96 AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt) }); in tryToreplicateChunks() 227 AArch64_AM::getShifterImm(AArch64_AM::LSL, in trySequenceOfOnes() 236 AArch64_AM::getShifterImm(AArch64_AM::LSL, in trySequenceOfOnes() 501 AArch64_AM::getShifterImm(AArch64_AM::LSL, Shift) }); in expandMOVImmSimple() 519 AArch64_AM::getShifterImm(AArch64_AM::LSL, Shift) }); in expandMOVImmSimple() 600 AArch64_AM::getShifterImm(AArch64_AM::LSL, Shift) }); in expandMOVImm()
|
H A D | AArch64SchedPredicates.td | 54 def CheckShiftLSL : CheckImmOperand_s<3, "AArch64_AM::LSL">; 268 // Check for LSL shift <= 4 299 // ORR Rd, ZR, Rm, LSL #0 331 // MOVI Vd, #0, LSL #0
|
H A D | AArch64SchedPredNeoverse.td | 16 // Check for LSL shift == 0
|
H A D | AArch64RegisterInfo.td | 1573 // LSL(8|16|32|64) 1574 def ZPR#RegWidth#AsmOpndExtLSL8 : ZPRExtendAsmOperand<"LSL", RegWidth, 8>; 1575 def ZPR#RegWidth#AsmOpndExtLSL16 : ZPRExtendAsmOperand<"LSL", RegWidth, 16>; 1576 def ZPR#RegWidth#AsmOpndExtLSL32 : ZPRExtendAsmOperand<"LSL", RegWidth, 32>; 1577 def ZPR#RegWidth#AsmOpndExtLSL64 : ZPRExtendAsmOperand<"LSL", RegWidth, 64>; 1578 def ZPR#RegWidth#ExtLSL8 : ZPRExtendRegisterOperand<0b0, 0b1, "LSL", RegWidth, 8>; 1579 def ZPR#RegWidth#ExtLSL16 : ZPRExtendRegisterOperand<0b0, 0b1, "LSL", RegWidth, 16>; 1580 def ZPR#RegWidth#ExtLSL32 : ZPRExtendRegisterOperand<0b0, 0b1, "LSL", RegWidth, 32>; 1581 def ZPR#RegWidth#ExtLSL64 : ZPRExtendRegisterOperand<0b0, 0b1, "LSL", RegWidth, 64>;
|
H A D | AArch64SchedA510.td | 683 (instregex "^(ASR|LSL|LSR)_WIDE_ZPmZ_[BHS]", 684 "^(ASR|LSL|LSR)_WIDE_ZZZ_[BHS]", 685 "^(ASR|LSL|LSR)_ZPmI_[BHSD]", 686 "^(ASR|LSL|LSR)_ZPZI_[BHSD]", 687 "^(ASR|LSL|LSR)_ZPmZ_[BHSD]", 688 "^(ASR|LSL|LSR)_ZPZZ_[BHSD]", 689 "^(ASR|LSL|LSR)_ZZI_[BHSD]",
|
H A D | AArch64SchedNeoverseV1.td | 524 // Arithmetic, LSL shift, shift <= 4 525 // Arithmetic, LSR/ASR/ROR shift or LSL shift > 4 531 // Arithmetic, flagset, LSL shift, shift <= 4 532 // Arithmetic, flagset, LSR/ASR/ROR shift or LSL shift > 4 1390 (instregex "^(ASR|LSL|LSR)_WIDE_Z(Pm|Z)Z_[BHS]", 1391 "^(ASR|LSL|LSR)_ZPm[IZ]_[BHSD]", 1392 "^(ASR|LSL|LSR)_ZZI_[BHSD]", 1393 "^(ASR|LSL|LSR)_ZPZ[IZ]_[BHSD]",
|
H A D | AArch64SchedNeoverseN2.td | 651 // Arithmetic, LSL shift, shift <= 4 652 // Arithmetic, flagset, LSL shift, shift <= 4 653 // Arithmetic, LSR/ASR/ROR shift or LSL shift > 4 1625 (instregex "^(ASR|LSL|LSR)_WIDE_ZPmZ_[BHS]", 1626 "^(ASR|LSL|LSR)_WIDE_ZZZ_[BHS]", 1627 "^(ASR|LSL|LSR)_ZPmI_[BHSD]", 1628 "^(ASR|LSL|LSR)_ZPmZ_[BHSD]", 1629 "^(ASR|LSL|LSR)_ZZI_[BHSD]", 1630 "^(ASR|LSL|LSR)_ZPZ[IZ]_[BHSD]",
|
H A D | AArch64SchedNeoverseV2.td | 1122 // Arithmetic, LSL shift, shift <= 4 1123 // Arithmetic, flagset, LSL shift, shift <= 4 1124 // Arithmetic, LSR/ASR/ROR shift or LSL shift > 4 2135 (instregex "^(ASR|LSL|LSR)_WIDE_ZPmZ_[BHS]", 2136 "^(ASR|LSL|LSR)_WIDE_ZZZ_[BHS]", 2137 "^(ASR|LSL|LSR)_ZPmI_[BHSD]", 2138 "^(ASR|LSL|LSR)_ZPmZ_[BHSD]", 2139 "^(ASR|LSL|LSR)_ZZI_[BHSD]", 2140 "^(ASR|LSL|LSR)_ZPZ[IZ]_[BHSD]",
|
H A D | AArch64FastISel.cpp | 748 Addr.setExtendType(AArch64_AM::LSL); in computeAddress() 830 Addr.setExtendType(AArch64_AM::LSL); in computeAddress() 875 Addr.setExtendType(AArch64_AM::LSL); in computeAddress() 1085 Addr.getOffsetReg(), AArch64_AM::LSL, in simplifyAddress() 1257 ResultReg = emitAddSub_rs(UseAdd, RetVT, LHSReg, RHSReg, AArch64_AM::LSL, in emitAddSub() 1271 case Instruction::Shl: ShiftType = AArch64_AM::LSL; break; in emitAddSub() 1377 .addImm(getShifterImm(AArch64_AM::LSL, ShiftImm)); in emitAddSub_ri() 1739 AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftImm)); in emitLogicalOp_rs()
|
H A D | AArch64ISelDAGToDAG.cpp | 472 bool isWorthFoldingALU(SDValue V, bool LSL = false) const; 613 unsigned ShVal = AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt); in SelectArithImmed() 660 return AArch64_AM::LSL; in getShiftTypeForNode() 797 unsigned ShVal = AArch64_AM::getShifterImm(AArch64_AM::LSL, LowZBits); in SelectShiftedRegisterFromAnd() 860 bool AArch64DAGToDAGISel::isWorthFoldingALU(SDValue V, bool LSL) const { in isWorthFoldingALU() 868 if (LSL && Subtarget->hasALULSLFast() && V.getOpcode() == ISD::SHL && in isWorthFoldingALU() 2854 if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSL) { in getUsefulBitsFromOrWithShiftedReg() 3379 AArch64_AM::LSL, NumTrailingZeroInShiftedMask); in isWorthFoldingIntoOrrWithShift() 3388 EncodedShiftImm = AArch64_AM::getShifterImm(AArch64_AM::LSL, ShlImm); in isWorthFoldingIntoOrrWithShift() 3454 AArch64_AM::getShifterImm(AArch64_AM::LSL, ShlImm), DL, VT)}; in tryOrrWithShift() [all …]
|
H A D | AArch64SchedNeoverseN1.td | 308 // Arithmetic, LSL shift, shift <= 4 309 // Arithmetic, flagset, LSL shift, shift <= 4 310 // Arithmetic, LSR/ASR/ROR shift or LSL shift > 4
|
H A D | AArch64ExpandPseudoInsts.cpp | 1287 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0)); in expandMI() 1487 AArch64_AM::getShifterImm(AArch64_AM::LSL, 0), in expandMI() 1492 AArch64_AM::getShifterImm(AArch64_AM::LSL, 0), in expandMI()
|
H A D | AArch64SchedAmpere1.td | 569 // For basic arithmetic, we have more flexibility for short shifts (LSL shift <= 4), 994 (instregex "(ASR|LSL|LSR|ROR)V(W|X)r")>;
|
H A D | AArch64SchedAmpere1B.td | 525 // For basic arithmetic, we have more flexibility for short shifts (LSL shift <= 4), 976 (instregex "(ASR|LSL|LSR|ROR)V(W|X)r")>;
|
H A D | AArch64InstrInfo.cpp | 943 return AArch64_AM::getShiftType(Imm) == AArch64_AM::LSL && ShiftVal <= 5; in isFalkorShiftExtFast() 3015 if (AArch64_AM::getShiftType(Shift) != AArch64_AM::ShiftExtendType::LSL) in canFoldIntoAddrMode() 4474 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0)) in copyPhysReg() 4480 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0)); in copyPhysReg() 4485 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0)); in copyPhysReg() 4602 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0)); in copyPhysReg() 4606 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0)); in copyPhysReg() 5393 AArch64_AM::getShifterImm(AArch64_AM::LSL, LocalShiftSize)); in emitFrameOffsetAdj() 9598 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0)) in probedStackAlloc()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/ |
H A D | AArch64AddressingModes.h | 34 LSL = 0, 55 case AArch64_AM::LSL: return "lsl"; in getShiftExtendName() 76 case 0: return AArch64_AM::LSL; in getShiftType() 104 case AArch64_AM::LSL: STEnc = 0; break; in getShifterImm() 35 LSL = 0, global() enumerator
|
H A D | AArch64MCCodeEmitter.cpp | 287 assert(AArch64_AM::getShiftType(MO1.getImm()) == AArch64_AM::LSL && in getAddSubImmOpValue() 626 assert(AArch64_AM::getShiftType(ShiftOpnd) == AArch64_AM::LSL && in getMoveVecShifterOpValue()
|
H A D | AArch64InstPrinter.cpp | 1249 // LSL #0 should not be printed. in printShiftedRegister() 1250 if (AArch64_AM::getShiftType(Val) == AArch64_AM::LSL && in printShiftedRegister() 1280 // UXTW/UXTX as LSL, and if the shift amount is also zero, print nothing at in printArithExtend() 2041 assert(AArch64_AM::getShiftType(Shift) == AArch64_AM::LSL && in printImm8OptLsl()
|
/freebsd/contrib/llvm-project/lldb/source/Plugins/Instruction/ARM64/ |
H A D | EmulateInstructionARM64.cpp | 84 static inline uint64_t LSL(uint64_t x, integer shift) { in LSL() function 768 idx = LSL(llvm::SignExtend64<7>(imm7), scale); in EmulateLDPSTP() 954 offset = LSL(Bits32(opcode, 21, 10), size); in EmulateLDRSTRImm()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/AVR/ |
H A D | AVRISelLowering.h | 38 LSL, ///< Logical shift left. enumerator
|
H A D | AVRInstrInfo.td | 58 def AVRlsl : SDNode<"AVRISD::LSL", SDTIntUnaryOp>; 1380 // 8-bit LSL is an alias of ADD Rd, Rd 1641 // LSL Rd 1645 def LSL : InstAlias<"lsl\t$rd", (ADDRdRr GPR8 : $rd, GPR8 : $rd)>; 2080 // Lowering of 'lsl' node to 'LSL' instruction. 2081 // LSL is an alias of 'ADD Rd, Rd'
|
/freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/AsmParser/ |
H A D | AArch64AsmParser.cpp | 1413 if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL && in isGPR64WithShiftExtend() 1494 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR || in isShifter() 1541 ET == AArch64_AM::LSL) && in isExtend() 1560 ET == AArch64_AM::LSL) && in isExtendLSL64() 1568 return ET == AArch64_AM::LSL && getShiftExtendAmount() <= 7; in isLSLImm3Shift() 1575 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) && in isMemXExtend() 1596 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR || in isArithmeticShifter() 1607 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR || in isLogicalShifter() 1618 if (ST != AArch64_AM::LSL) in isMovImm32Shifter() 1630 if (ST != AArch64_AM::LSL) in isMovImm64Shifter() [all …]
|
/freebsd/contrib/llvm-project/lldb/source/Plugins/Process/Utility/ |
H A D | ARMUtils.h | 102 static inline uint32_t LSL(const uint32_t value, const uint32_t amount, in LSL() function
|
/freebsd/contrib/llvm-project/llvm/lib/Target/Hexagon/ |
H A D | HexagonConstPropagation.cpp | 2518 LatticeCell LSL, LSH; in evaluateHexRSEQ32() local 2519 if (!getCell(RL, Inputs, LSL) || !getCell(RH, Inputs, LSH)) in evaluateHexRSEQ32() 2521 if (LSL.isProperty() || LSH.isProperty()) in evaluateHexRSEQ32() 2524 unsigned LN = LSL.size(), HN = LSH.size(); in evaluateHexRSEQ32() 2527 bool Eval = constToInt(LSL.Values[i], LoVs[i]); in evaluateHexRSEQ32()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/Utils/ |
H A D | AArch64BaseInfo.h | 605 LSL, enumerator
|