/freebsd/contrib/llvm-project/llvm/lib/Analysis/ |
H A D | IVDescriptors.cpp | 252 InstDesc ReduxDesc(false, nullptr); in AddReductionVar() 471 InstDesc IgnoredVal(false, nullptr); in AddReductionVar() 627 RecurrenceDescriptor::InstDesc 629 Instruction *I, InstDesc &Prev) { in isAnyOfPattern() 635 return InstDesc(Select, Prev.getRecKind()); in isAnyOfPattern() 640 return InstDesc(false, I); in isAnyOfPattern() 650 return InstDesc(false, I); in isAnyOfPattern() 656 return InstDesc(false, I); in isAnyOfPattern() 658 return InstDesc(I, isa<ICmpInst>(I->getOperand(0)) ? RecurKind::IAnyOf in isAnyOfPattern() 662 RecurrenceDescriptor::InstDesc [all …]
|
/freebsd/contrib/llvm-project/llvm/include/llvm/Analysis/ |
H A D | IVDescriptors.h | 88 class InstDesc { 90 InstDesc(bool IsRecur, Instruction *I, Instruction *ExactFP = nullptr) 94 InstDesc(Instruction *I, RecurKind K, Instruction *ExactFP = nullptr) 126 static InstDesc isRecurrenceInstr(Loop *L, PHINode *Phi, Instruction *I, 127 RecurKind Kind, InstDesc &Prev, 143 static InstDesc isMinMaxPattern(Instruction *I, RecurKind Kind, 144 const InstDesc &Prev); 152 static InstDesc isAnyOfPattern(Loop *Loop, PHINode *OrigPhi, Instruction *I, 153 InstDesc &Prev); 157 static InstDesc isConditionalRdxPatter [all...] |
/freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/ |
H A D | AArch64SIMDInstrOpt.cpp | 158 /// to replace the instruction InstDesc by the instructions stored in the 161 bool shouldReplaceInst(MachineFunction *MF, const MCInstrDesc *InstDesc, 215 /// to replace the instruction InstDesc by the instructions stored in the 219 shouldReplaceInst(MachineFunction *MF, const MCInstrDesc *InstDesc, in shouldReplaceInst() 224 auto InstID = std::make_pair(InstDesc->getOpcode(), Subtarget); in shouldReplaceInst() 229 unsigned SCIdx = InstDesc->getSchedClass(); in shouldReplaceInst() 257 if (SchedModel.computeInstrLatency(InstDesc->getOpcode()) > ReplCost) in shouldReplaceInst() 218 shouldReplaceInst(MachineFunction * MF,const MCInstrDesc * InstDesc,SmallVectorImpl<const MCInstrDesc * > & InstDescRepl) shouldReplaceInst() argument
|
/freebsd/contrib/llvm-project/llvm/lib/Target/ARM/MCTargetDesc/ |
H A D | ARMMCTargetDesc.h | 58 uint64_t evaluateBranchTarget(const MCInstrDesc &InstDesc, uint64_t Addr,
|
H A D | ARMMCTargetDesc.cpp | 186 uint64_t ARM_MC::evaluateBranchTarget(const MCInstrDesc &InstDesc, in evaluateBranchTarget() argument 191 ((InstDesc.TSFlags & ARMII::FormMask) == ARMII::ThumbFrm) ? 4 : 8; in evaluateBranchTarget() 198 if (InstDesc.getOpcode() == ARM::tBLXi) in evaluateBranchTarget()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/ |
H A D | SIFoldOperands.cpp | 629 const MCInstrDesc &InstDesc = MI->getDesc(); in tryAddToFoldList() local 630 const MCOperandInfo &OpInfo = InstDesc.operands()[OpNo]; in tryAddToFoldList() 635 for (unsigned i = 0, e = InstDesc.getNumOperands(); i != e; ++i) { in tryAddToFoldList() 638 !TII->isInlineConstant(Op, InstDesc.operands()[i])) in tryAddToFoldList() 1813 const MCInstrDesc &InstDesc = UseMI->getDesc(); in tryFoldRegSequence() local 1815 TII->getRegClass(InstDesc, OpIdx, TRI, *MI.getMF()); in tryFoldRegSequence()
|
H A D | SIInstrInfo.cpp | 4357 const MCInstrDesc &InstDesc = MI.getDesc(); in isImmOperandLegal() local 4358 const MCOperandInfo &OpInfo = InstDesc.operands()[OpNo]; in isImmOperandLegal() 4379 if (!isVOP3(MI) || !AMDGPU::isSISrcOperand(InstDesc, OpNo)) in isImmOperandLegal() 5733 const MCInstrDesc &InstDesc = MI.getDesc(); in isOperandLegal() local 5734 const MCOperandInfo &OpInfo = InstDesc.operands()[OpIdx]; in isOperandLegal() 5758 usesConstantBus(MRI, Op, InstDesc.operands().begin()[i])) { in isOperandLegal() 5763 } else if (AMDGPU::isSISrcOperand(InstDesc, i) && in isOperandLegal() 5764 !isInlineConstant(Op, InstDesc.operands()[i])) { in isOperandLegal() 7787 const MCInstrDesc &InstDesc = get(Opcode); in splitScalar64BitUnaryOp() local 7804 MachineInstr &LoHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub0).add(SrcReg0Sub0); in splitScalar64BitUnaryOp() [all …]
|
H A D | GCNHazardRecognizer.cpp | 2864 const MCInstrDesc &InstDesc = I.getDesc(); in fixVALUMaskWriteHazard() local 2865 const MCOperandInfo &OpInfo = InstDesc.operands()[OpNo]; in fixVALUMaskWriteHazard()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/X86/MCTargetDesc/ |
H A D | X86AsmBackend.cpp | 350 const MCInstrDesc &InstDesc = MCII->get(Jcc.getOpcode()); in isMacroFused() local 351 if (!InstDesc.isConditionalBranch()) in isMacroFused()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/AsmParser/ |
H A D | AMDGPUAsmParser.cpp | 2226 const auto& InstDesc = AsmParser->getMII()->get(Inst.getOpcode()); in addLiteralImmOperand() local 2229 assert(AMDGPU::isSISrcOperand(InstDesc, OpNum)); in addLiteralImmOperand() 2232 assert(AMDGPU::isSISrcFPOperand(InstDesc, OpNum)); in addLiteralImmOperand() 2233 const unsigned Size = Imm.IsFPImm ? sizeof(double) : getOperandSize(InstDesc, OpNum); in addLiteralImmOperand() 2238 uint8_t OpTy = InstDesc.operands()[OpNum].OperandType; in addLiteralImmOperand() 2255 if (AMDGPU::isSISrcFPOperand(InstDesc, OpNum)) { // Expected 64-bit fp operand in addLiteralImmOperand() 2384 Val = AMDGPU::isSISrcFPOperand(InstDesc, OpNum) ? (uint64_t)Val << 32 in addLiteralImmOperand()
|