/freebsd/contrib/llvm-project/clang/lib/CodeGen/ |
H A D | CGNonTrivialStruct.cpp | 33 enum { DstIdx = 0, SrcIdx = 1 }; enumerator 361 Address DstAddr = StartAddrs[DstIdx]; in visitArray() 389 CGF.Builder.CreateICmpEQ(PHIs[DstIdx], DstArrayEnd, "done"); in visitArray() 517 Address DstAddr = this->getAddrWithOffset(Addrs[DstIdx], this->Start); in flushTrivialFields() 552 Address DstAddr = this->getAddrWithOffset(Addrs[DstIdx], Offset); in visitVolatileTrivial() 562 Address DstAddr = Addrs[DstIdx].withElementType(Ty); in visitVolatileTrivial() 593 *CGF, getAddrWithOffset(Addrs[DstIdx], CurStructOffset, FD), QT); in visitARCStrong() 599 *CGF, getAddrWithOffset(Addrs[DstIdx], CurStructOffset, FD), QT); in visitARCWeak() 605 CGF->MakeAddrLValue(getAddrWithOffset(Addrs[DstIdx], Offset), FT)); in callSpecialFunction() 634 getAddrWithOffset(Addrs[DstIdx], CurStructOffset, FD), QT); in visitARCStrong() [all …]
|
H A D | CGBuiltin.cpp | 21592 Value *DstIdx = EmitScalarExpr(E->getArg(2)); in EmitWebAssemblyBuiltinExpr() local 21598 return Builder.CreateCall(Callee, {TableX, TableY, SrcIdx, DstIdx, NElems}); in EmitWebAssemblyBuiltinExpr()
|
/freebsd/contrib/llvm-project/llvm/lib/CodeGen/ |
H A D | RegisterCoalescer.h | 39 unsigned DstIdx = 0; variable 103 unsigned getDstIdx() const { return DstIdx; } in getDstIdx()
|
H A D | TwoAddressInstructionPass.cpp | 153 bool commuteInstruction(MachineInstr *MI, unsigned DstIdx, 171 unsigned SrcIdx, unsigned DstIdx, 739 unsigned DstIdx, in commuteInstruction() argument 760 Register RegA = MI->getOperand(DstIdx).getReg(); in commuteInstruction() 1314 unsigned SrcIdx, unsigned DstIdx, unsigned &Dist, bool shouldOnlyCommute) { in tryInstructionTransform() argument 1319 Register regA = MI.getOperand(DstIdx).getReg(); in tryInstructionTransform() 1328 bool Commuted = tryInstructionCommute(&MI, DstIdx, SrcIdx, regBKilled, Dist); in tryInstructionTransform() 1520 unsigned DstIdx = 0; in collectTiedOperands() local 1521 if (!MI->isRegTiedToDefOperand(SrcIdx, &DstIdx)) in collectTiedOperands() 1525 MachineOperand &DstMO = MI->getOperand(DstIdx); in collectTiedOperands() [all …]
|
H A D | RegisterCoalescer.cpp | 265 unsigned DstIdx); 454 SrcIdx = DstIdx = 0; in setRegisters() 502 SrcIdx, DstIdx); in setRegisters() 511 DstIdx = SrcSub; in setRegisters() 524 if (DstIdx && !SrcIdx) { in setRegisters() 526 std::swap(SrcIdx, DstIdx); in setRegisters() 544 std::swap(SrcIdx, DstIdx); in flip() 569 assert(!DstIdx && !SrcIdx && "Inconsistent CoalescerPair state."); in isCoalescable() 584 TRI.composeSubRegIndices(DstIdx, DstSub); in isCoalescable() 1294 unsigned DstIdx = CP.isFlipped() ? CP.getSrcIdx() : CP.getDstIdx(); in reMaterializeTrivialDef() local [all …]
|
/freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/ |
H A D | R600ExpandSpecialInstrs.cpp | 86 int DstIdx = TII->getOperandIdx(MI.getOpcode(), R600::OpName::dst); in runOnMachineFunction() local 87 assert(DstIdx != -1); in runOnMachineFunction() 88 MachineOperand &DstOp = MI.getOperand(DstIdx); in runOnMachineFunction()
|
H A D | R600Packetizer.cpp | 84 int DstIdx = TII->getOperandIdx(BI->getOpcode(), R600::OpName::dst); in getPreviousVector() local 85 if (DstIdx == -1) { in getPreviousVector() 88 Register Dst = BI->getOperand(DstIdx).getReg(); in getPreviousVector()
|
H A D | SIPeepholeSDWA.cpp | 420 auto DstIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), in convertToSDWA() local 422 auto TiedIdx = MI.findTiedOperandIdx(DstIdx); in convertToSDWA()
|
H A D | R600ISelLowering.cpp | 224 int DstIdx = TII->getOperandIdx(MI.getOpcode(), R600::OpName::dst); in EmitInstrWithCustomInserter() local 225 assert(DstIdx != -1); in EmitInstrWithCustomInserter() 229 if (!MRI.use_empty(MI.getOperand(DstIdx).getReg()) || in EmitInstrWithCustomInserter()
|
H A D | SIInstrInfo.cpp | 4774 int DstIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdst); in verifyInstruction() local 4776 for (int OpIdx : {DstIdx, Src0Idx, Src1Idx, Src2Idx}) { in verifyInstruction() 4823 if (!ST.hasSDWASdst() && DstIdx != -1) { in verifyInstruction() 4825 const MachineOperand &Dst = MI.getOperand(DstIdx); in verifyInstruction() 4850 const MachineOperand &Dst = MI.getOperand(DstIdx); in verifyInstruction() 4857 MI.getOperand(MI.findTiedOperandIdx(DstIdx)); in verifyInstruction() 4891 const uint32_t DstIdx = in verifyInstruction() local 4893 const MachineOperand &Dst = MI.getOperand(DstIdx); in verifyInstruction() 4895 const TargetRegisterClass *DstRC = getOpRegClass(MI, DstIdx); in verifyInstruction()
|
H A D | SIISelLowering.cpp | 15125 int DstIdx = in AddMemOpInit() local 15167 TRI.getRegSizeInBits(*TII->getOpRegClass(MI, DstIdx)) / 32; in AddMemOpInit() 15171 InitIdx = TRI.getRegSizeInBits(*TII->getOpRegClass(MI, DstIdx)) / 32; in AddMemOpInit() 15179 Register PrevDst = MRI.cloneVirtualRegister(MI.getOperand(DstIdx).getReg()); in AddMemOpInit() 15190 NewDst = MRI.createVirtualRegister(TII->getOpRegClass(MI, DstIdx)); in AddMemOpInit() 15208 MI.tieOperands(DstIdx, MI.getNumOperands() - 1); in AddMemOpInit()
|
/freebsd/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/ |
H A D | GISelKnownBits.cpp | 519 unsigned DstIdx = 0; in computeKnownBitsImpl() local 520 for (; DstIdx != NumOps - 1 && MI.getOperand(DstIdx).getReg() != R; in computeKnownBitsImpl() 521 ++DstIdx) in computeKnownBitsImpl() 524 Known = SrcOpKnown.extractBits(BitWidth, BitWidth * DstIdx); in computeKnownBitsImpl()
|
H A D | LegalizerHelper.cpp | 5817 unsigned DstIdx = 0; // Low bits of the result. in multiplyRegisters() local 5819 B.buildMul(NarrowTy, Src1Regs[DstIdx], Src2Regs[DstIdx]).getReg(0); in multiplyRegisters() 5820 DstRegs[DstIdx] = FactorSum; in multiplyRegisters() 5825 for (DstIdx = 1; DstIdx < DstParts; DstIdx++) { in multiplyRegisters() 5827 for (unsigned i = DstIdx + 1 < SrcParts ? 0 : DstIdx - SrcParts + 1; in multiplyRegisters() 5828 i <= std::min(DstIdx, SrcParts - 1); ++i) { in multiplyRegisters() 5830 B.buildMul(NarrowTy, Src1Regs[DstIdx - i], Src2Regs[i]); in multiplyRegisters() 5834 for (unsigned i = DstIdx < SrcParts ? 0 : DstIdx - SrcParts; in multiplyRegisters() 5835 i <= std::min(DstIdx - 1, SrcParts - 1); ++i) { in multiplyRegisters() 5837 B.buildUMulH(NarrowTy, Src1Regs[DstIdx - 1 - i], Src2Regs[i]); in multiplyRegisters() [all …]
|
/freebsd/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/ |
H A D | LegalizationArtifactCombiner.h | 1011 unsigned DstIdx = (Elt0UnmergeIdx * EltSize) / DstTy.getSizeInBits(); in tryCombineMergeLike() local 1012 replaceRegOrBuildCopy(Dst, NewUnmerge.getReg(DstIdx), MRI, MIB, in tryCombineMergeLike()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/Disassembler/ |
H A D | AMDGPUDisassembler.cpp | 409 int DstIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst); in decodeAVLdSt() local 410 if (IsAGPROperand(Inst, DstIdx, MRI)) in decodeAVLdSt()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/X86/ |
H A D | X86InstrInfo.cpp | 2432 unsigned DstIdx = (Imm >> 4) & 3; in commuteInstructionImpl() local 2437 if (DstIdx == SrcIdx && (ZMask & (1 << DstIdx)) == 0 && in commuteInstructionImpl() 2439 unsigned AltIdx = llvm::countr_zero((ZMask | (1 << DstIdx)) ^ 15); in commuteInstructionImpl() 7229 unsigned DstIdx = (Imm >> 4) & 3; in foldMemoryOperandCustom() local 7238 unsigned NewImm = (DstIdx << 4) | ZMask; in foldMemoryOperandCustom()
|
H A D | X86ISelLowering.cpp | 6009 unsigned DstIdx = 0; in getFauxShuffleMask() local 6015 DstIdx = N.getConstantOperandVal(2); in getFauxShuffleMask() 6021 Mask.push_back(i == DstIdx ? SM_SentinelZero : (int)i); in getFauxShuffleMask() 6060 unsigned DstByte = DstIdx * NumBytesPerElt; in getFauxShuffleMask() 41289 unsigned DstIdx = (InsertPSMask >> 4) & 0x3; in combineTargetShuffle() local 41293 if (((ZeroMask | (1u << DstIdx)) == 0xF) && !Op0.isUndef()) in combineTargetShuffle() 41298 if ((ZeroMask & (1u << DstIdx)) && !Op1.isUndef()) in combineTargetShuffle() 41310 InsertPSMask |= (1u << DstIdx); in combineTargetShuffle() 41333 if ((InsertPSMask & (1u << i)) || (i == (int)DstIdx)) { in combineTargetShuffle()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/AsmParser/ |
H A D | AMDGPUAsmParser.cpp | 8495 int DstIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst); in cvtVOP3DstOpSelOnly() local 8496 if (DstIdx == -1) in cvtVOP3DstOpSelOnly() 8499 const MCOperand &DstOp = Inst.getOperand(DstIdx); in cvtVOP3DstOpSelOnly()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/RISCV/ |
H A D | RISCVISelLowering.cpp | 5028 for (unsigned DstIdx = 0; DstIdx < Mask.size(); DstIdx++) { in lowerShuffleViaVRegSplitting() local 5029 int DstVecIdx = DstIdx / ElemsPerVReg; in lowerShuffleViaVRegSplitting() 5030 int DstSubIdx = DstIdx % ElemsPerVReg; in lowerShuffleViaVRegSplitting() 5031 int SrcIdx = Mask[DstIdx]; in lowerShuffleViaVRegSplitting()
|