| /freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/ |
| H A D | AArch64ExpandImm.cpp | 81 AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt) }); in tryToreplicateChunks() 96 AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt) }); in tryToreplicateChunks() 227 AArch64_AM::getShifterImm(AArch64_AM::LSL, in trySequenceOfOnes() 236 AArch64_AM::getShifterImm(AArch64_AM::LSL, in trySequenceOfOnes() 501 AArch64_AM::getShifterImm(AArch64_AM::LSL, Shift) }); in expandMOVImmSimple() 519 AArch64_AM::getShifterImm(AArch64_AM::LSL, Shift) }); in expandMOVImmSimple() 600 AArch64_AM::getShifterImm(AArch64_AM::LSL, Shift) }); in expandMOVImm()
|
| H A D | AArch64SchedPredicates.td | 54 def CheckShiftLSL : CheckImmOperand_s<3, "AArch64_AM::LSL">; 268 // Check for LSL shift <= 4 299 // ORR Rd, ZR, Rm, LSL #0 331 // MOVI Vd, #0, LSL #0
|
| H A D | AArch64SchedPredNeoverse.td | 16 // Check for LSL shift == 0
|
| H A D | AArch64RegisterInfo.td | 1723 // LSL(8|16|32|64) 1724 def ZPR#RegWidth#AsmOpndExtLSL8 : ZPRExtendAsmOperand<"LSL", RegWidth, 8>; 1725 def ZPR#RegWidth#AsmOpndExtLSL16 : ZPRExtendAsmOperand<"LSL", RegWidth, 16>; 1726 def ZPR#RegWidth#AsmOpndExtLSL32 : ZPRExtendAsmOperand<"LSL", RegWidth, 32>; 1727 def ZPR#RegWidth#AsmOpndExtLSL64 : ZPRExtendAsmOperand<"LSL", RegWidth, 64>; 1728 def ZPR#RegWidth#ExtLSL8 : ZPRExtendRegisterOperand<0b0, 0b1, "LSL", RegWidth, 8>; 1729 def ZPR#RegWidth#ExtLSL16 : ZPRExtendRegisterOperand<0b0, 0b1, "LSL", RegWidth, 16>; 1730 def ZPR#RegWidth#ExtLSL32 : ZPRExtendRegisterOperand<0b0, 0b1, "LSL", RegWidth, 32>; 1731 def ZPR#RegWidth#ExtLSL64 : ZPRExtendRegisterOperand<0b0, 0b1, "LSL", RegWidth, 64>;
|
| H A D | AArch64SchedA510.td | 683 (instregex "^(ASR|LSL|LSR)_WIDE_ZPmZ_[BHS]", 684 "^(ASR|LSL|LSR)_WIDE_ZZZ_[BHS]", 685 "^(ASR|LSL|LSR)_ZPmI_[BHSD]", 686 "^(ASR|LSL|LSR)_ZPZI_[BHSD]", 687 "^(ASR|LSL|LSR)_ZPmZ_[BHSD]", 688 "^(ASR|LSL|LSR)_ZPZZ_[BHSD]", 689 "^(ASR|LSL|LSR)_ZZI_[BHSD]",
|
| H A D | AArch64SchedA320.td | 705 (instregex "^(ASR|LSL|LSR)_WIDE_ZPmZ_[BHS]", 706 "^(ASR|LSL|LSR)_WIDE_ZZZ_[BHS]", 707 "^(ASR|LSL|LSR)_ZPmI_[BHSD]", 708 "^(ASR|LSL|LSR)_ZPZI_[BHSD]", 709 "^(ASR|LSL|LSR)_ZPmZ_[BHSD]", 710 "^(ASR|LSL|LSR)_ZPZZ_[BHSD]", 711 "^(ASR|LSL|LSR)_ZZI_[BHSD]",
|
| H A D | AArch64SchedNeoverseN2.td | 687 // Arithmetic, LSL shift, shift <= 4 688 // Arithmetic, flagset, LSL shift, shift <= 4 689 // Arithmetic, LSR/ASR/ROR shift or LSL shift > 4 1666 (instregex "^(ASR|LSL|LSR)_WIDE_ZPmZ_[BHS]", 1667 "^(ASR|LSL|LSR)_WIDE_ZZZ_[BHS]", 1668 "^(ASR|LSL|LSR)_ZPmI_[BHSD]", 1669 "^(ASR|LSL|LSR)_ZPmZ_[BHSD]", 1670 "^(ASR|LSL|LSR)_ZZI_[BHSD]", 1671 "^(ASR|LSL|LSR)_ZPZ[IZ]_[BHSD]",
|
| H A D | AArch64SchedNeoverseN3.td | 592 // Arithmetic, LSL shift, shift <= 4 593 // Arithmetic, flagset, LSL shift, shift <= 4 594 // Arithmetic, LSR/ASR/ROR shift or LSL shift > 4 1636 (instregex "^(ASR|LSL|LSR)_WIDE_ZPmZ_[BHS]", 1637 "^(ASR|LSL|LSR)_WIDE_ZZZ_[BHS]", 1638 "^(ASR|LSL|LSR)_ZPmI_[BHSD]", 1639 "^(ASR|LSL|LSR)_ZPmZ_[BHSD]", 1640 "^(ASR|LSL|LSR)_ZZI_[BHSD]", 1641 "^(ASR|LSL|LSR)_ZPZ[IZ]_[BHSD]",
|
| H A D | AArch64SchedNeoverseV1.td | 608 // Arithmetic, LSL shift, shift <= 4 609 // Arithmetic, LSR/ASR/ROR shift or LSL shift > 4 615 // Arithmetic, flagset, LSL shift, shift <= 4 616 // Arithmetic, flagset, LSR/ASR/ROR shift or LSL shift > 4 1503 (instregex "^(ASR|LSL|LSR)_WIDE_Z(Pm|Z)Z_[BHS]", 1504 "^(ASR|LSL|LSR)_ZPm[IZ]_[BHSD]", 1505 "^(ASR|LSL|LSR)_ZZI_[BHSD]", 1506 "^(ASR|LSL|LSR)_ZPZ[IZ]_[BHSD]",
|
| H A D | AArch64SchedNeoverseV2.td | 1128 // Arithmetic, LSL shift, shift <= 4 1129 // Arithmetic, flagset, LSL shift, shift <= 4 1130 // Arithmetic, LSR/ASR/ROR shift or LSL shift > 4 2152 (instregex "^(ASR|LSL|LSR)_WIDE_ZPmZ_[BHS]", 2153 "^(ASR|LSL|LSR)_WIDE_ZZZ_[BHS]", 2154 "^(ASR|LSL|LSR)_ZPmI_[BHSD]", 2155 "^(ASR|LSL|LSR)_ZPmZ_[BHSD]", 2156 "^(ASR|LSL|LSR)_ZZI_[BHSD]", 2157 "^(ASR|LSL|LSR)_ZPZ[IZ]_[BHSD]",
|
| H A D | AArch64FastISel.cpp | 743 Addr.setExtendType(AArch64_AM::LSL); in computeAddress() 825 Addr.setExtendType(AArch64_AM::LSL); in computeAddress() 870 Addr.setExtendType(AArch64_AM::LSL); in computeAddress() 1080 Addr.getOffsetReg(), AArch64_AM::LSL, in simplifyAddress() 1252 ResultReg = emitAddSub_rs(UseAdd, RetVT, LHSReg, RHSReg, AArch64_AM::LSL, in emitAddSub() 1266 case Instruction::Shl: ShiftType = AArch64_AM::LSL; break; in emitAddSub() 1372 .addImm(getShifterImm(AArch64_AM::LSL, ShiftImm)); in emitAddSub_ri() 1736 AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftImm)); in emitLogicalOp_rs()
|
| H A D | AArch64ISelDAGToDAG.cpp | 470 bool isWorthFoldingALU(SDValue V, bool LSL = false) const; 613 unsigned ShVal = AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt); in SelectArithImmed() 660 return AArch64_AM::LSL; in getShiftTypeForNode() 801 unsigned ShVal = AArch64_AM::getShifterImm(AArch64_AM::LSL, LowZBits); in SelectShiftedRegisterFromAnd() 864 bool AArch64DAGToDAGISel::isWorthFoldingALU(SDValue V, bool LSL) const { in isWorthFoldingALU() 872 if (LSL && Subtarget->hasALULSLFast() && V.getOpcode() == ISD::SHL && in isWorthFoldingALU() 2956 if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSL) { in getUsefulBitsFromOrWithShiftedReg() 3481 AArch64_AM::LSL, NumTrailingZeroInShiftedMask); in isWorthFoldingIntoOrrWithShift() 3490 EncodedShiftImm = AArch64_AM::getShifterImm(AArch64_AM::LSL, ShlImm); in isWorthFoldingIntoOrrWithShift() 3556 AArch64_AM::getShifterImm(AArch64_AM::LSL, ShlImm), DL, VT)}; in tryOrrWithShift() [all …]
|
| H A D | AArch64SchedNeoverseN1.td | 308 // Arithmetic, LSL shift, shift <= 4 309 // Arithmetic, flagset, LSL shift, shift <= 4 310 // Arithmetic, LSR/ASR/ROR shift or LSL shift > 4
|
| H A D | AArch64ExpandPseudoInsts.cpp | 1315 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0)); in expandMI() 1520 AArch64_AM::getShifterImm(AArch64_AM::LSL, 0), in expandMI() 1525 AArch64_AM::getShifterImm(AArch64_AM::LSL, 0), in expandMI()
|
| H A D | AArch64SchedAmpere1.td | 569 // For basic arithmetic, we have more flexibility for short shifts (LSL shift <= 4), 994 (instregex "(ASR|LSL|LSR|ROR)V(W|X)r")>;
|
| H A D | AArch64SchedAmpere1B.td | 525 // For basic arithmetic, we have more flexibility for short shifts (LSL shift <= 4), 976 (instregex "(ASR|LSL|LSR|ROR)V(W|X)r")>;
|
| /freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/ |
| H A D | AArch64AddressingModes.h | 34 LSL = 0, enumerator 55 case AArch64_AM::LSL: return "lsl"; in getShiftExtendName() 76 case 0: return AArch64_AM::LSL; in getShiftType() 104 case AArch64_AM::LSL: STEnc = 0; break; in getShifterImm()
|
| H A D | AArch64MCCodeEmitter.cpp | 295 assert(AArch64_AM::getShiftType(MO1.getImm()) == AArch64_AM::LSL && in getAddSubImmOpValue() 659 assert(AArch64_AM::getShiftType(ShiftOpnd) == AArch64_AM::LSL && in getImm8OptLsl()
|
| /freebsd/contrib/llvm-project/lldb/source/Plugins/Instruction/ARM64/ |
| H A D | EmulateInstructionARM64.cpp | 84 static inline uint64_t LSL(uint64_t x, integer shift) { in LSL() function 768 idx = LSL(llvm::SignExtend64<7>(imm7), scale); in EmulateLDPSTP() 954 offset = LSL(Bits32(opcode, 21, 10), size); in EmulateLDRSTRImm()
|
| /freebsd/contrib/llvm-project/lldb/source/Plugins/Process/Utility/ |
| H A D | ARMUtils.h | 102 static inline uint32_t LSL(const uint32_t value, const uint32_t amount, in LSL() function
|
| /freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/AsmParser/ |
| H A D | AArch64AsmParser.cpp | 1437 if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL && in isGPR64WithShiftExtend() 1520 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR || in isShifter() 1567 ET == AArch64_AM::LSL) && in isExtend() 1586 ET == AArch64_AM::LSL) && in isExtendLSL64() 1594 return ET == AArch64_AM::LSL && getShiftExtendAmount() <= 7; in isLSLImm3Shift() 1601 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) && in isMemXExtend() 1622 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR || in isArithmeticShifter() 1633 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR || in isLogicalShifter() 1644 if (ST != AArch64_AM::LSL) in isMovImm32Shifter() 1656 if (ST != AArch64_AM::LSL) in isMovImm64Shifter() [all …]
|
| /freebsd/contrib/llvm-project/llvm/lib/Target/AVR/ |
| H A D | AVRInstrInfo.td | 79 def AVRlsl : SDNode<"AVRISD::LSL", SDTIntUnaryOp>; // Logical shift left. 1293 // 8-bit LSL is an alias of ADD Rd, Rd 1404 // LSL Rd 1408 def LSL : InstAlias<"lsl\t$rd", (ADDRdRr GPR8 : $rd, GPR8 : $rd)>; 1693 // Lowering of 'lsl' node to 'LSL' instruction. 1694 // LSL is an alias of 'ADD Rd, Rd'
|
| /freebsd/contrib/llvm-project/llvm/lib/Target/Hexagon/ |
| H A D | HexagonConstPropagation.cpp | 2519 LatticeCell LSL, LSH; in evaluateHexRSEQ32() local 2520 if (!getCell(RL, Inputs, LSL) || !getCell(RH, Inputs, LSH)) in evaluateHexRSEQ32() 2522 if (LSL.isProperty() || LSH.isProperty()) in evaluateHexRSEQ32() 2525 unsigned LN = LSL.size(), HN = LSH.size(); in evaluateHexRSEQ32() 2528 bool Eval = constToInt(LSL.Values[i], LoVs[i]); in evaluateHexRSEQ32()
|
| /freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/Utils/ |
| H A D | AArch64BaseInfo.h | 691 LSL, enumerator
|
| /freebsd/contrib/llvm-project/llvm/lib/Target/ARM/ |
| H A D | ARMScheduleM85.td | 417 // CortexM85 treats LSL #0 as needing a shifter. In practice the throughput 439 (instregex "(t|t2)(LSL|LSR|ASR|ROR|SBFX|UBFX)")>;
|