10b57cec5SDimitry Andric //===-- lib/CodeGen/GlobalISel/GICombinerHelper.cpp -----------------------===// 20b57cec5SDimitry Andric // 30b57cec5SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 40b57cec5SDimitry Andric // See https://llvm.org/LICENSE.txt for license information. 50b57cec5SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 60b57cec5SDimitry Andric // 70b57cec5SDimitry Andric //===----------------------------------------------------------------------===// 80b57cec5SDimitry Andric #include "llvm/CodeGen/GlobalISel/CombinerHelper.h" 90b57cec5SDimitry Andric #include "llvm/CodeGen/GlobalISel/Combiner.h" 100b57cec5SDimitry Andric #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h" 118bcb0991SDimitry Andric #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h" 120b57cec5SDimitry Andric #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" 130b57cec5SDimitry Andric #include "llvm/CodeGen/GlobalISel/Utils.h" 148bcb0991SDimitry Andric #include "llvm/CodeGen/MachineDominators.h" 158bcb0991SDimitry Andric #include "llvm/CodeGen/MachineFrameInfo.h" 160b57cec5SDimitry Andric #include "llvm/CodeGen/MachineInstr.h" 170b57cec5SDimitry Andric #include "llvm/CodeGen/MachineRegisterInfo.h" 180b57cec5SDimitry Andric #include "llvm/CodeGen/TargetInstrInfo.h" 198bcb0991SDimitry Andric #include "llvm/CodeGen/TargetLowering.h" 208bcb0991SDimitry Andric #include "llvm/Target/TargetMachine.h" 210b57cec5SDimitry Andric 220b57cec5SDimitry Andric #define DEBUG_TYPE "gi-combiner" 230b57cec5SDimitry Andric 240b57cec5SDimitry Andric using namespace llvm; 250b57cec5SDimitry Andric 268bcb0991SDimitry Andric // Option to allow testing of the combiner while no targets know about indexed 278bcb0991SDimitry Andric // addressing. 288bcb0991SDimitry Andric static cl::opt<bool> 298bcb0991SDimitry Andric ForceLegalIndexing("force-legal-indexing", cl::Hidden, cl::init(false), 308bcb0991SDimitry Andric cl::desc("Force all indexed operations to be " 318bcb0991SDimitry Andric "legal for the GlobalISel combiner")); 328bcb0991SDimitry Andric 338bcb0991SDimitry Andric 340b57cec5SDimitry Andric CombinerHelper::CombinerHelper(GISelChangeObserver &Observer, 358bcb0991SDimitry Andric MachineIRBuilder &B, GISelKnownBits *KB, 368bcb0991SDimitry Andric MachineDominatorTree *MDT) 378bcb0991SDimitry Andric : Builder(B), MRI(Builder.getMF().getRegInfo()), Observer(Observer), 388bcb0991SDimitry Andric KB(KB), MDT(MDT) { 398bcb0991SDimitry Andric (void)this->KB; 408bcb0991SDimitry Andric } 410b57cec5SDimitry Andric 420b57cec5SDimitry Andric void CombinerHelper::replaceRegWith(MachineRegisterInfo &MRI, Register FromReg, 430b57cec5SDimitry Andric Register ToReg) const { 440b57cec5SDimitry Andric Observer.changingAllUsesOfReg(MRI, FromReg); 450b57cec5SDimitry Andric 460b57cec5SDimitry Andric if (MRI.constrainRegAttrs(ToReg, FromReg)) 470b57cec5SDimitry Andric MRI.replaceRegWith(FromReg, ToReg); 480b57cec5SDimitry Andric else 490b57cec5SDimitry Andric Builder.buildCopy(ToReg, FromReg); 500b57cec5SDimitry Andric 510b57cec5SDimitry Andric Observer.finishedChangingAllUsesOfReg(); 520b57cec5SDimitry Andric } 530b57cec5SDimitry Andric 540b57cec5SDimitry Andric void CombinerHelper::replaceRegOpWith(MachineRegisterInfo &MRI, 550b57cec5SDimitry Andric MachineOperand &FromRegOp, 560b57cec5SDimitry Andric Register ToReg) const { 570b57cec5SDimitry Andric assert(FromRegOp.getParent() && "Expected an operand in an MI"); 580b57cec5SDimitry Andric Observer.changingInstr(*FromRegOp.getParent()); 590b57cec5SDimitry Andric 600b57cec5SDimitry Andric FromRegOp.setReg(ToReg); 610b57cec5SDimitry Andric 620b57cec5SDimitry Andric Observer.changedInstr(*FromRegOp.getParent()); 630b57cec5SDimitry Andric } 640b57cec5SDimitry Andric 650b57cec5SDimitry Andric bool CombinerHelper::tryCombineCopy(MachineInstr &MI) { 660b57cec5SDimitry Andric if (matchCombineCopy(MI)) { 670b57cec5SDimitry Andric applyCombineCopy(MI); 680b57cec5SDimitry Andric return true; 690b57cec5SDimitry Andric } 700b57cec5SDimitry Andric return false; 710b57cec5SDimitry Andric } 720b57cec5SDimitry Andric bool CombinerHelper::matchCombineCopy(MachineInstr &MI) { 730b57cec5SDimitry Andric if (MI.getOpcode() != TargetOpcode::COPY) 740b57cec5SDimitry Andric return false; 758bcb0991SDimitry Andric Register DstReg = MI.getOperand(0).getReg(); 768bcb0991SDimitry Andric Register SrcReg = MI.getOperand(1).getReg(); 77*480093f4SDimitry Andric 78*480093f4SDimitry Andric // Give up if either DstReg or SrcReg is a physical register. 79*480093f4SDimitry Andric if (Register::isPhysicalRegister(DstReg) || 80*480093f4SDimitry Andric Register::isPhysicalRegister(SrcReg)) 81*480093f4SDimitry Andric return false; 82*480093f4SDimitry Andric 83*480093f4SDimitry Andric // Give up the types don't match. 840b57cec5SDimitry Andric LLT DstTy = MRI.getType(DstReg); 850b57cec5SDimitry Andric LLT SrcTy = MRI.getType(SrcReg); 86*480093f4SDimitry Andric // Give up if one has a valid LLT, but the other doesn't. 87*480093f4SDimitry Andric if (DstTy.isValid() != SrcTy.isValid()) 88*480093f4SDimitry Andric return false; 89*480093f4SDimitry Andric // Give up if the types don't match. 90*480093f4SDimitry Andric if (DstTy.isValid() && SrcTy.isValid() && DstTy != SrcTy) 91*480093f4SDimitry Andric return false; 92*480093f4SDimitry Andric 93*480093f4SDimitry Andric // Get the register banks and classes. 94*480093f4SDimitry Andric const RegisterBank *DstBank = MRI.getRegBankOrNull(DstReg); 95*480093f4SDimitry Andric const RegisterBank *SrcBank = MRI.getRegBankOrNull(SrcReg); 96*480093f4SDimitry Andric const TargetRegisterClass *DstRC = MRI.getRegClassOrNull(DstReg); 97*480093f4SDimitry Andric const TargetRegisterClass *SrcRC = MRI.getRegClassOrNull(SrcReg); 98*480093f4SDimitry Andric 99*480093f4SDimitry Andric // Replace if the register constraints match. 100*480093f4SDimitry Andric if ((SrcRC == DstRC) && (SrcBank == DstBank)) 1010b57cec5SDimitry Andric return true; 102*480093f4SDimitry Andric // Replace if DstReg has no constraints. 103*480093f4SDimitry Andric if (!DstBank && !DstRC) 104*480093f4SDimitry Andric return true; 105*480093f4SDimitry Andric 1060b57cec5SDimitry Andric return false; 1070b57cec5SDimitry Andric } 1080b57cec5SDimitry Andric void CombinerHelper::applyCombineCopy(MachineInstr &MI) { 1098bcb0991SDimitry Andric Register DstReg = MI.getOperand(0).getReg(); 1108bcb0991SDimitry Andric Register SrcReg = MI.getOperand(1).getReg(); 1110b57cec5SDimitry Andric MI.eraseFromParent(); 1120b57cec5SDimitry Andric replaceRegWith(MRI, DstReg, SrcReg); 1130b57cec5SDimitry Andric } 1140b57cec5SDimitry Andric 1158bcb0991SDimitry Andric bool CombinerHelper::tryCombineConcatVectors(MachineInstr &MI) { 1168bcb0991SDimitry Andric bool IsUndef = false; 1178bcb0991SDimitry Andric SmallVector<Register, 4> Ops; 1188bcb0991SDimitry Andric if (matchCombineConcatVectors(MI, IsUndef, Ops)) { 1198bcb0991SDimitry Andric applyCombineConcatVectors(MI, IsUndef, Ops); 1208bcb0991SDimitry Andric return true; 1218bcb0991SDimitry Andric } 1228bcb0991SDimitry Andric return false; 1238bcb0991SDimitry Andric } 1248bcb0991SDimitry Andric 1258bcb0991SDimitry Andric bool CombinerHelper::matchCombineConcatVectors(MachineInstr &MI, bool &IsUndef, 1268bcb0991SDimitry Andric SmallVectorImpl<Register> &Ops) { 1278bcb0991SDimitry Andric assert(MI.getOpcode() == TargetOpcode::G_CONCAT_VECTORS && 1288bcb0991SDimitry Andric "Invalid instruction"); 1298bcb0991SDimitry Andric IsUndef = true; 1308bcb0991SDimitry Andric MachineInstr *Undef = nullptr; 1318bcb0991SDimitry Andric 1328bcb0991SDimitry Andric // Walk over all the operands of concat vectors and check if they are 1338bcb0991SDimitry Andric // build_vector themselves or undef. 1348bcb0991SDimitry Andric // Then collect their operands in Ops. 135*480093f4SDimitry Andric for (const MachineOperand &MO : MI.uses()) { 1368bcb0991SDimitry Andric Register Reg = MO.getReg(); 1378bcb0991SDimitry Andric MachineInstr *Def = MRI.getVRegDef(Reg); 1388bcb0991SDimitry Andric assert(Def && "Operand not defined"); 1398bcb0991SDimitry Andric switch (Def->getOpcode()) { 1408bcb0991SDimitry Andric case TargetOpcode::G_BUILD_VECTOR: 1418bcb0991SDimitry Andric IsUndef = false; 1428bcb0991SDimitry Andric // Remember the operands of the build_vector to fold 1438bcb0991SDimitry Andric // them into the yet-to-build flattened concat vectors. 144*480093f4SDimitry Andric for (const MachineOperand &BuildVecMO : Def->uses()) 1458bcb0991SDimitry Andric Ops.push_back(BuildVecMO.getReg()); 1468bcb0991SDimitry Andric break; 1478bcb0991SDimitry Andric case TargetOpcode::G_IMPLICIT_DEF: { 1488bcb0991SDimitry Andric LLT OpType = MRI.getType(Reg); 1498bcb0991SDimitry Andric // Keep one undef value for all the undef operands. 1508bcb0991SDimitry Andric if (!Undef) { 1518bcb0991SDimitry Andric Builder.setInsertPt(*MI.getParent(), MI); 1528bcb0991SDimitry Andric Undef = Builder.buildUndef(OpType.getScalarType()); 1538bcb0991SDimitry Andric } 1548bcb0991SDimitry Andric assert(MRI.getType(Undef->getOperand(0).getReg()) == 1558bcb0991SDimitry Andric OpType.getScalarType() && 1568bcb0991SDimitry Andric "All undefs should have the same type"); 1578bcb0991SDimitry Andric // Break the undef vector in as many scalar elements as needed 1588bcb0991SDimitry Andric // for the flattening. 1598bcb0991SDimitry Andric for (unsigned EltIdx = 0, EltEnd = OpType.getNumElements(); 1608bcb0991SDimitry Andric EltIdx != EltEnd; ++EltIdx) 1618bcb0991SDimitry Andric Ops.push_back(Undef->getOperand(0).getReg()); 1628bcb0991SDimitry Andric break; 1638bcb0991SDimitry Andric } 1648bcb0991SDimitry Andric default: 1658bcb0991SDimitry Andric return false; 1668bcb0991SDimitry Andric } 1678bcb0991SDimitry Andric } 1688bcb0991SDimitry Andric return true; 1698bcb0991SDimitry Andric } 1708bcb0991SDimitry Andric void CombinerHelper::applyCombineConcatVectors( 1718bcb0991SDimitry Andric MachineInstr &MI, bool IsUndef, const ArrayRef<Register> Ops) { 1728bcb0991SDimitry Andric // We determined that the concat_vectors can be flatten. 1738bcb0991SDimitry Andric // Generate the flattened build_vector. 1748bcb0991SDimitry Andric Register DstReg = MI.getOperand(0).getReg(); 1758bcb0991SDimitry Andric Builder.setInsertPt(*MI.getParent(), MI); 1768bcb0991SDimitry Andric Register NewDstReg = MRI.cloneVirtualRegister(DstReg); 1778bcb0991SDimitry Andric 1788bcb0991SDimitry Andric // Note: IsUndef is sort of redundant. We could have determine it by 1798bcb0991SDimitry Andric // checking that at all Ops are undef. Alternatively, we could have 1808bcb0991SDimitry Andric // generate a build_vector of undefs and rely on another combine to 1818bcb0991SDimitry Andric // clean that up. For now, given we already gather this information 1828bcb0991SDimitry Andric // in tryCombineConcatVectors, just save compile time and issue the 1838bcb0991SDimitry Andric // right thing. 1848bcb0991SDimitry Andric if (IsUndef) 1858bcb0991SDimitry Andric Builder.buildUndef(NewDstReg); 1868bcb0991SDimitry Andric else 1878bcb0991SDimitry Andric Builder.buildBuildVector(NewDstReg, Ops); 1888bcb0991SDimitry Andric MI.eraseFromParent(); 1898bcb0991SDimitry Andric replaceRegWith(MRI, DstReg, NewDstReg); 1908bcb0991SDimitry Andric } 1918bcb0991SDimitry Andric 1928bcb0991SDimitry Andric bool CombinerHelper::tryCombineShuffleVector(MachineInstr &MI) { 1938bcb0991SDimitry Andric SmallVector<Register, 4> Ops; 1948bcb0991SDimitry Andric if (matchCombineShuffleVector(MI, Ops)) { 1958bcb0991SDimitry Andric applyCombineShuffleVector(MI, Ops); 1968bcb0991SDimitry Andric return true; 1978bcb0991SDimitry Andric } 1988bcb0991SDimitry Andric return false; 1998bcb0991SDimitry Andric } 2008bcb0991SDimitry Andric 2018bcb0991SDimitry Andric bool CombinerHelper::matchCombineShuffleVector(MachineInstr &MI, 2028bcb0991SDimitry Andric SmallVectorImpl<Register> &Ops) { 2038bcb0991SDimitry Andric assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR && 2048bcb0991SDimitry Andric "Invalid instruction kind"); 2058bcb0991SDimitry Andric LLT DstType = MRI.getType(MI.getOperand(0).getReg()); 2068bcb0991SDimitry Andric Register Src1 = MI.getOperand(1).getReg(); 2078bcb0991SDimitry Andric LLT SrcType = MRI.getType(Src1); 208*480093f4SDimitry Andric // As bizarre as it may look, shuffle vector can actually produce 209*480093f4SDimitry Andric // scalar! This is because at the IR level a <1 x ty> shuffle 210*480093f4SDimitry Andric // vector is perfectly valid. 211*480093f4SDimitry Andric unsigned DstNumElts = DstType.isVector() ? DstType.getNumElements() : 1; 212*480093f4SDimitry Andric unsigned SrcNumElts = SrcType.isVector() ? SrcType.getNumElements() : 1; 2138bcb0991SDimitry Andric 2148bcb0991SDimitry Andric // If the resulting vector is smaller than the size of the source 2158bcb0991SDimitry Andric // vectors being concatenated, we won't be able to replace the 2168bcb0991SDimitry Andric // shuffle vector into a concat_vectors. 2178bcb0991SDimitry Andric // 2188bcb0991SDimitry Andric // Note: We may still be able to produce a concat_vectors fed by 2198bcb0991SDimitry Andric // extract_vector_elt and so on. It is less clear that would 2208bcb0991SDimitry Andric // be better though, so don't bother for now. 221*480093f4SDimitry Andric // 222*480093f4SDimitry Andric // If the destination is a scalar, the size of the sources doesn't 223*480093f4SDimitry Andric // matter. we will lower the shuffle to a plain copy. This will 224*480093f4SDimitry Andric // work only if the source and destination have the same size. But 225*480093f4SDimitry Andric // that's covered by the next condition. 226*480093f4SDimitry Andric // 227*480093f4SDimitry Andric // TODO: If the size between the source and destination don't match 228*480093f4SDimitry Andric // we could still emit an extract vector element in that case. 229*480093f4SDimitry Andric if (DstNumElts < 2 * SrcNumElts && DstNumElts != 1) 2308bcb0991SDimitry Andric return false; 2318bcb0991SDimitry Andric 2328bcb0991SDimitry Andric // Check that the shuffle mask can be broken evenly between the 2338bcb0991SDimitry Andric // different sources. 2348bcb0991SDimitry Andric if (DstNumElts % SrcNumElts != 0) 2358bcb0991SDimitry Andric return false; 2368bcb0991SDimitry Andric 2378bcb0991SDimitry Andric // Mask length is a multiple of the source vector length. 2388bcb0991SDimitry Andric // Check if the shuffle is some kind of concatenation of the input 2398bcb0991SDimitry Andric // vectors. 2408bcb0991SDimitry Andric unsigned NumConcat = DstNumElts / SrcNumElts; 2418bcb0991SDimitry Andric SmallVector<int, 8> ConcatSrcs(NumConcat, -1); 242*480093f4SDimitry Andric ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask(); 2438bcb0991SDimitry Andric for (unsigned i = 0; i != DstNumElts; ++i) { 2448bcb0991SDimitry Andric int Idx = Mask[i]; 2458bcb0991SDimitry Andric // Undef value. 2468bcb0991SDimitry Andric if (Idx < 0) 2478bcb0991SDimitry Andric continue; 2488bcb0991SDimitry Andric // Ensure the indices in each SrcType sized piece are sequential and that 2498bcb0991SDimitry Andric // the same source is used for the whole piece. 2508bcb0991SDimitry Andric if ((Idx % SrcNumElts != (i % SrcNumElts)) || 2518bcb0991SDimitry Andric (ConcatSrcs[i / SrcNumElts] >= 0 && 2528bcb0991SDimitry Andric ConcatSrcs[i / SrcNumElts] != (int)(Idx / SrcNumElts))) 2538bcb0991SDimitry Andric return false; 2548bcb0991SDimitry Andric // Remember which source this index came from. 2558bcb0991SDimitry Andric ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts; 2568bcb0991SDimitry Andric } 2578bcb0991SDimitry Andric 2588bcb0991SDimitry Andric // The shuffle is concatenating multiple vectors together. 2598bcb0991SDimitry Andric // Collect the different operands for that. 2608bcb0991SDimitry Andric Register UndefReg; 2618bcb0991SDimitry Andric Register Src2 = MI.getOperand(2).getReg(); 2628bcb0991SDimitry Andric for (auto Src : ConcatSrcs) { 2638bcb0991SDimitry Andric if (Src < 0) { 2648bcb0991SDimitry Andric if (!UndefReg) { 2658bcb0991SDimitry Andric Builder.setInsertPt(*MI.getParent(), MI); 2668bcb0991SDimitry Andric UndefReg = Builder.buildUndef(SrcType).getReg(0); 2678bcb0991SDimitry Andric } 2688bcb0991SDimitry Andric Ops.push_back(UndefReg); 2698bcb0991SDimitry Andric } else if (Src == 0) 2708bcb0991SDimitry Andric Ops.push_back(Src1); 2718bcb0991SDimitry Andric else 2728bcb0991SDimitry Andric Ops.push_back(Src2); 2738bcb0991SDimitry Andric } 2748bcb0991SDimitry Andric return true; 2758bcb0991SDimitry Andric } 2768bcb0991SDimitry Andric 2778bcb0991SDimitry Andric void CombinerHelper::applyCombineShuffleVector(MachineInstr &MI, 2788bcb0991SDimitry Andric const ArrayRef<Register> Ops) { 2798bcb0991SDimitry Andric Register DstReg = MI.getOperand(0).getReg(); 2808bcb0991SDimitry Andric Builder.setInsertPt(*MI.getParent(), MI); 2818bcb0991SDimitry Andric Register NewDstReg = MRI.cloneVirtualRegister(DstReg); 2828bcb0991SDimitry Andric 283*480093f4SDimitry Andric if (Ops.size() == 1) 284*480093f4SDimitry Andric Builder.buildCopy(NewDstReg, Ops[0]); 285*480093f4SDimitry Andric else 286*480093f4SDimitry Andric Builder.buildMerge(NewDstReg, Ops); 2878bcb0991SDimitry Andric 2888bcb0991SDimitry Andric MI.eraseFromParent(); 2898bcb0991SDimitry Andric replaceRegWith(MRI, DstReg, NewDstReg); 2908bcb0991SDimitry Andric } 2918bcb0991SDimitry Andric 2920b57cec5SDimitry Andric namespace { 2930b57cec5SDimitry Andric 2940b57cec5SDimitry Andric /// Select a preference between two uses. CurrentUse is the current preference 2950b57cec5SDimitry Andric /// while *ForCandidate is attributes of the candidate under consideration. 2960b57cec5SDimitry Andric PreferredTuple ChoosePreferredUse(PreferredTuple &CurrentUse, 2970b57cec5SDimitry Andric const LLT &TyForCandidate, 2980b57cec5SDimitry Andric unsigned OpcodeForCandidate, 2990b57cec5SDimitry Andric MachineInstr *MIForCandidate) { 3000b57cec5SDimitry Andric if (!CurrentUse.Ty.isValid()) { 3010b57cec5SDimitry Andric if (CurrentUse.ExtendOpcode == OpcodeForCandidate || 3020b57cec5SDimitry Andric CurrentUse.ExtendOpcode == TargetOpcode::G_ANYEXT) 3030b57cec5SDimitry Andric return {TyForCandidate, OpcodeForCandidate, MIForCandidate}; 3040b57cec5SDimitry Andric return CurrentUse; 3050b57cec5SDimitry Andric } 3060b57cec5SDimitry Andric 3070b57cec5SDimitry Andric // We permit the extend to hoist through basic blocks but this is only 3080b57cec5SDimitry Andric // sensible if the target has extending loads. If you end up lowering back 3090b57cec5SDimitry Andric // into a load and extend during the legalizer then the end result is 3100b57cec5SDimitry Andric // hoisting the extend up to the load. 3110b57cec5SDimitry Andric 3120b57cec5SDimitry Andric // Prefer defined extensions to undefined extensions as these are more 3130b57cec5SDimitry Andric // likely to reduce the number of instructions. 3140b57cec5SDimitry Andric if (OpcodeForCandidate == TargetOpcode::G_ANYEXT && 3150b57cec5SDimitry Andric CurrentUse.ExtendOpcode != TargetOpcode::G_ANYEXT) 3160b57cec5SDimitry Andric return CurrentUse; 3170b57cec5SDimitry Andric else if (CurrentUse.ExtendOpcode == TargetOpcode::G_ANYEXT && 3180b57cec5SDimitry Andric OpcodeForCandidate != TargetOpcode::G_ANYEXT) 3190b57cec5SDimitry Andric return {TyForCandidate, OpcodeForCandidate, MIForCandidate}; 3200b57cec5SDimitry Andric 3210b57cec5SDimitry Andric // Prefer sign extensions to zero extensions as sign-extensions tend to be 3220b57cec5SDimitry Andric // more expensive. 3230b57cec5SDimitry Andric if (CurrentUse.Ty == TyForCandidate) { 3240b57cec5SDimitry Andric if (CurrentUse.ExtendOpcode == TargetOpcode::G_SEXT && 3250b57cec5SDimitry Andric OpcodeForCandidate == TargetOpcode::G_ZEXT) 3260b57cec5SDimitry Andric return CurrentUse; 3270b57cec5SDimitry Andric else if (CurrentUse.ExtendOpcode == TargetOpcode::G_ZEXT && 3280b57cec5SDimitry Andric OpcodeForCandidate == TargetOpcode::G_SEXT) 3290b57cec5SDimitry Andric return {TyForCandidate, OpcodeForCandidate, MIForCandidate}; 3300b57cec5SDimitry Andric } 3310b57cec5SDimitry Andric 3320b57cec5SDimitry Andric // This is potentially target specific. We've chosen the largest type 3330b57cec5SDimitry Andric // because G_TRUNC is usually free. One potential catch with this is that 3340b57cec5SDimitry Andric // some targets have a reduced number of larger registers than smaller 3350b57cec5SDimitry Andric // registers and this choice potentially increases the live-range for the 3360b57cec5SDimitry Andric // larger value. 3370b57cec5SDimitry Andric if (TyForCandidate.getSizeInBits() > CurrentUse.Ty.getSizeInBits()) { 3380b57cec5SDimitry Andric return {TyForCandidate, OpcodeForCandidate, MIForCandidate}; 3390b57cec5SDimitry Andric } 3400b57cec5SDimitry Andric return CurrentUse; 3410b57cec5SDimitry Andric } 3420b57cec5SDimitry Andric 3430b57cec5SDimitry Andric /// Find a suitable place to insert some instructions and insert them. This 3440b57cec5SDimitry Andric /// function accounts for special cases like inserting before a PHI node. 3450b57cec5SDimitry Andric /// The current strategy for inserting before PHI's is to duplicate the 3460b57cec5SDimitry Andric /// instructions for each predecessor. However, while that's ok for G_TRUNC 3470b57cec5SDimitry Andric /// on most targets since it generally requires no code, other targets/cases may 3480b57cec5SDimitry Andric /// want to try harder to find a dominating block. 3490b57cec5SDimitry Andric static void InsertInsnsWithoutSideEffectsBeforeUse( 3500b57cec5SDimitry Andric MachineIRBuilder &Builder, MachineInstr &DefMI, MachineOperand &UseMO, 3510b57cec5SDimitry Andric std::function<void(MachineBasicBlock *, MachineBasicBlock::iterator, 3520b57cec5SDimitry Andric MachineOperand &UseMO)> 3530b57cec5SDimitry Andric Inserter) { 3540b57cec5SDimitry Andric MachineInstr &UseMI = *UseMO.getParent(); 3550b57cec5SDimitry Andric 3560b57cec5SDimitry Andric MachineBasicBlock *InsertBB = UseMI.getParent(); 3570b57cec5SDimitry Andric 3580b57cec5SDimitry Andric // If the use is a PHI then we want the predecessor block instead. 3590b57cec5SDimitry Andric if (UseMI.isPHI()) { 3600b57cec5SDimitry Andric MachineOperand *PredBB = std::next(&UseMO); 3610b57cec5SDimitry Andric InsertBB = PredBB->getMBB(); 3620b57cec5SDimitry Andric } 3630b57cec5SDimitry Andric 3640b57cec5SDimitry Andric // If the block is the same block as the def then we want to insert just after 3650b57cec5SDimitry Andric // the def instead of at the start of the block. 3660b57cec5SDimitry Andric if (InsertBB == DefMI.getParent()) { 3670b57cec5SDimitry Andric MachineBasicBlock::iterator InsertPt = &DefMI; 3680b57cec5SDimitry Andric Inserter(InsertBB, std::next(InsertPt), UseMO); 3690b57cec5SDimitry Andric return; 3700b57cec5SDimitry Andric } 3710b57cec5SDimitry Andric 3720b57cec5SDimitry Andric // Otherwise we want the start of the BB 3730b57cec5SDimitry Andric Inserter(InsertBB, InsertBB->getFirstNonPHI(), UseMO); 3740b57cec5SDimitry Andric } 3750b57cec5SDimitry Andric } // end anonymous namespace 3760b57cec5SDimitry Andric 3770b57cec5SDimitry Andric bool CombinerHelper::tryCombineExtendingLoads(MachineInstr &MI) { 3780b57cec5SDimitry Andric PreferredTuple Preferred; 3790b57cec5SDimitry Andric if (matchCombineExtendingLoads(MI, Preferred)) { 3800b57cec5SDimitry Andric applyCombineExtendingLoads(MI, Preferred); 3810b57cec5SDimitry Andric return true; 3820b57cec5SDimitry Andric } 3830b57cec5SDimitry Andric return false; 3840b57cec5SDimitry Andric } 3850b57cec5SDimitry Andric 3860b57cec5SDimitry Andric bool CombinerHelper::matchCombineExtendingLoads(MachineInstr &MI, 3870b57cec5SDimitry Andric PreferredTuple &Preferred) { 3880b57cec5SDimitry Andric // We match the loads and follow the uses to the extend instead of matching 3890b57cec5SDimitry Andric // the extends and following the def to the load. This is because the load 3900b57cec5SDimitry Andric // must remain in the same position for correctness (unless we also add code 3910b57cec5SDimitry Andric // to find a safe place to sink it) whereas the extend is freely movable. 3920b57cec5SDimitry Andric // It also prevents us from duplicating the load for the volatile case or just 3930b57cec5SDimitry Andric // for performance. 3940b57cec5SDimitry Andric 3950b57cec5SDimitry Andric if (MI.getOpcode() != TargetOpcode::G_LOAD && 3960b57cec5SDimitry Andric MI.getOpcode() != TargetOpcode::G_SEXTLOAD && 3970b57cec5SDimitry Andric MI.getOpcode() != TargetOpcode::G_ZEXTLOAD) 3980b57cec5SDimitry Andric return false; 3990b57cec5SDimitry Andric 4000b57cec5SDimitry Andric auto &LoadValue = MI.getOperand(0); 4010b57cec5SDimitry Andric assert(LoadValue.isReg() && "Result wasn't a register?"); 4020b57cec5SDimitry Andric 4030b57cec5SDimitry Andric LLT LoadValueTy = MRI.getType(LoadValue.getReg()); 4040b57cec5SDimitry Andric if (!LoadValueTy.isScalar()) 4050b57cec5SDimitry Andric return false; 4060b57cec5SDimitry Andric 4070b57cec5SDimitry Andric // Most architectures are going to legalize <s8 loads into at least a 1 byte 4080b57cec5SDimitry Andric // load, and the MMOs can only describe memory accesses in multiples of bytes. 4090b57cec5SDimitry Andric // If we try to perform extload combining on those, we can end up with 4100b57cec5SDimitry Andric // %a(s8) = extload %ptr (load 1 byte from %ptr) 4110b57cec5SDimitry Andric // ... which is an illegal extload instruction. 4120b57cec5SDimitry Andric if (LoadValueTy.getSizeInBits() < 8) 4130b57cec5SDimitry Andric return false; 4140b57cec5SDimitry Andric 4150b57cec5SDimitry Andric // For non power-of-2 types, they will very likely be legalized into multiple 4160b57cec5SDimitry Andric // loads. Don't bother trying to match them into extending loads. 4170b57cec5SDimitry Andric if (!isPowerOf2_32(LoadValueTy.getSizeInBits())) 4180b57cec5SDimitry Andric return false; 4190b57cec5SDimitry Andric 4200b57cec5SDimitry Andric // Find the preferred type aside from the any-extends (unless it's the only 4210b57cec5SDimitry Andric // one) and non-extending ops. We'll emit an extending load to that type and 4220b57cec5SDimitry Andric // and emit a variant of (extend (trunc X)) for the others according to the 4230b57cec5SDimitry Andric // relative type sizes. At the same time, pick an extend to use based on the 4240b57cec5SDimitry Andric // extend involved in the chosen type. 4250b57cec5SDimitry Andric unsigned PreferredOpcode = MI.getOpcode() == TargetOpcode::G_LOAD 4260b57cec5SDimitry Andric ? TargetOpcode::G_ANYEXT 4270b57cec5SDimitry Andric : MI.getOpcode() == TargetOpcode::G_SEXTLOAD 4280b57cec5SDimitry Andric ? TargetOpcode::G_SEXT 4290b57cec5SDimitry Andric : TargetOpcode::G_ZEXT; 4300b57cec5SDimitry Andric Preferred = {LLT(), PreferredOpcode, nullptr}; 4310b57cec5SDimitry Andric for (auto &UseMI : MRI.use_instructions(LoadValue.getReg())) { 4320b57cec5SDimitry Andric if (UseMI.getOpcode() == TargetOpcode::G_SEXT || 4330b57cec5SDimitry Andric UseMI.getOpcode() == TargetOpcode::G_ZEXT || 4340b57cec5SDimitry Andric UseMI.getOpcode() == TargetOpcode::G_ANYEXT) { 4350b57cec5SDimitry Andric Preferred = ChoosePreferredUse(Preferred, 4360b57cec5SDimitry Andric MRI.getType(UseMI.getOperand(0).getReg()), 4370b57cec5SDimitry Andric UseMI.getOpcode(), &UseMI); 4380b57cec5SDimitry Andric } 4390b57cec5SDimitry Andric } 4400b57cec5SDimitry Andric 4410b57cec5SDimitry Andric // There were no extends 4420b57cec5SDimitry Andric if (!Preferred.MI) 4430b57cec5SDimitry Andric return false; 4440b57cec5SDimitry Andric // It should be impossible to chose an extend without selecting a different 4450b57cec5SDimitry Andric // type since by definition the result of an extend is larger. 4460b57cec5SDimitry Andric assert(Preferred.Ty != LoadValueTy && "Extending to same type?"); 4470b57cec5SDimitry Andric 4480b57cec5SDimitry Andric LLVM_DEBUG(dbgs() << "Preferred use is: " << *Preferred.MI); 4490b57cec5SDimitry Andric return true; 4500b57cec5SDimitry Andric } 4510b57cec5SDimitry Andric 4520b57cec5SDimitry Andric void CombinerHelper::applyCombineExtendingLoads(MachineInstr &MI, 4530b57cec5SDimitry Andric PreferredTuple &Preferred) { 4540b57cec5SDimitry Andric // Rewrite the load to the chosen extending load. 4550b57cec5SDimitry Andric Register ChosenDstReg = Preferred.MI->getOperand(0).getReg(); 4560b57cec5SDimitry Andric 4570b57cec5SDimitry Andric // Inserter to insert a truncate back to the original type at a given point 4580b57cec5SDimitry Andric // with some basic CSE to limit truncate duplication to one per BB. 4590b57cec5SDimitry Andric DenseMap<MachineBasicBlock *, MachineInstr *> EmittedInsns; 4600b57cec5SDimitry Andric auto InsertTruncAt = [&](MachineBasicBlock *InsertIntoBB, 4610b57cec5SDimitry Andric MachineBasicBlock::iterator InsertBefore, 4620b57cec5SDimitry Andric MachineOperand &UseMO) { 4630b57cec5SDimitry Andric MachineInstr *PreviouslyEmitted = EmittedInsns.lookup(InsertIntoBB); 4640b57cec5SDimitry Andric if (PreviouslyEmitted) { 4650b57cec5SDimitry Andric Observer.changingInstr(*UseMO.getParent()); 4660b57cec5SDimitry Andric UseMO.setReg(PreviouslyEmitted->getOperand(0).getReg()); 4670b57cec5SDimitry Andric Observer.changedInstr(*UseMO.getParent()); 4680b57cec5SDimitry Andric return; 4690b57cec5SDimitry Andric } 4700b57cec5SDimitry Andric 4710b57cec5SDimitry Andric Builder.setInsertPt(*InsertIntoBB, InsertBefore); 4720b57cec5SDimitry Andric Register NewDstReg = MRI.cloneVirtualRegister(MI.getOperand(0).getReg()); 4730b57cec5SDimitry Andric MachineInstr *NewMI = Builder.buildTrunc(NewDstReg, ChosenDstReg); 4740b57cec5SDimitry Andric EmittedInsns[InsertIntoBB] = NewMI; 4750b57cec5SDimitry Andric replaceRegOpWith(MRI, UseMO, NewDstReg); 4760b57cec5SDimitry Andric }; 4770b57cec5SDimitry Andric 4780b57cec5SDimitry Andric Observer.changingInstr(MI); 4790b57cec5SDimitry Andric MI.setDesc( 4800b57cec5SDimitry Andric Builder.getTII().get(Preferred.ExtendOpcode == TargetOpcode::G_SEXT 4810b57cec5SDimitry Andric ? TargetOpcode::G_SEXTLOAD 4820b57cec5SDimitry Andric : Preferred.ExtendOpcode == TargetOpcode::G_ZEXT 4830b57cec5SDimitry Andric ? TargetOpcode::G_ZEXTLOAD 4840b57cec5SDimitry Andric : TargetOpcode::G_LOAD)); 4850b57cec5SDimitry Andric 4860b57cec5SDimitry Andric // Rewrite all the uses to fix up the types. 4870b57cec5SDimitry Andric auto &LoadValue = MI.getOperand(0); 4880b57cec5SDimitry Andric SmallVector<MachineOperand *, 4> Uses; 4890b57cec5SDimitry Andric for (auto &UseMO : MRI.use_operands(LoadValue.getReg())) 4900b57cec5SDimitry Andric Uses.push_back(&UseMO); 4910b57cec5SDimitry Andric 4920b57cec5SDimitry Andric for (auto *UseMO : Uses) { 4930b57cec5SDimitry Andric MachineInstr *UseMI = UseMO->getParent(); 4940b57cec5SDimitry Andric 4950b57cec5SDimitry Andric // If the extend is compatible with the preferred extend then we should fix 4960b57cec5SDimitry Andric // up the type and extend so that it uses the preferred use. 4970b57cec5SDimitry Andric if (UseMI->getOpcode() == Preferred.ExtendOpcode || 4980b57cec5SDimitry Andric UseMI->getOpcode() == TargetOpcode::G_ANYEXT) { 4998bcb0991SDimitry Andric Register UseDstReg = UseMI->getOperand(0).getReg(); 5000b57cec5SDimitry Andric MachineOperand &UseSrcMO = UseMI->getOperand(1); 5010b57cec5SDimitry Andric const LLT &UseDstTy = MRI.getType(UseDstReg); 5020b57cec5SDimitry Andric if (UseDstReg != ChosenDstReg) { 5030b57cec5SDimitry Andric if (Preferred.Ty == UseDstTy) { 5040b57cec5SDimitry Andric // If the use has the same type as the preferred use, then merge 5050b57cec5SDimitry Andric // the vregs and erase the extend. For example: 5060b57cec5SDimitry Andric // %1:_(s8) = G_LOAD ... 5070b57cec5SDimitry Andric // %2:_(s32) = G_SEXT %1(s8) 5080b57cec5SDimitry Andric // %3:_(s32) = G_ANYEXT %1(s8) 5090b57cec5SDimitry Andric // ... = ... %3(s32) 5100b57cec5SDimitry Andric // rewrites to: 5110b57cec5SDimitry Andric // %2:_(s32) = G_SEXTLOAD ... 5120b57cec5SDimitry Andric // ... = ... %2(s32) 5130b57cec5SDimitry Andric replaceRegWith(MRI, UseDstReg, ChosenDstReg); 5140b57cec5SDimitry Andric Observer.erasingInstr(*UseMO->getParent()); 5150b57cec5SDimitry Andric UseMO->getParent()->eraseFromParent(); 5160b57cec5SDimitry Andric } else if (Preferred.Ty.getSizeInBits() < UseDstTy.getSizeInBits()) { 5170b57cec5SDimitry Andric // If the preferred size is smaller, then keep the extend but extend 5180b57cec5SDimitry Andric // from the result of the extending load. For example: 5190b57cec5SDimitry Andric // %1:_(s8) = G_LOAD ... 5200b57cec5SDimitry Andric // %2:_(s32) = G_SEXT %1(s8) 5210b57cec5SDimitry Andric // %3:_(s64) = G_ANYEXT %1(s8) 5220b57cec5SDimitry Andric // ... = ... %3(s64) 5230b57cec5SDimitry Andric /// rewrites to: 5240b57cec5SDimitry Andric // %2:_(s32) = G_SEXTLOAD ... 5250b57cec5SDimitry Andric // %3:_(s64) = G_ANYEXT %2:_(s32) 5260b57cec5SDimitry Andric // ... = ... %3(s64) 5270b57cec5SDimitry Andric replaceRegOpWith(MRI, UseSrcMO, ChosenDstReg); 5280b57cec5SDimitry Andric } else { 5290b57cec5SDimitry Andric // If the preferred size is large, then insert a truncate. For 5300b57cec5SDimitry Andric // example: 5310b57cec5SDimitry Andric // %1:_(s8) = G_LOAD ... 5320b57cec5SDimitry Andric // %2:_(s64) = G_SEXT %1(s8) 5330b57cec5SDimitry Andric // %3:_(s32) = G_ZEXT %1(s8) 5340b57cec5SDimitry Andric // ... = ... %3(s32) 5350b57cec5SDimitry Andric /// rewrites to: 5360b57cec5SDimitry Andric // %2:_(s64) = G_SEXTLOAD ... 5370b57cec5SDimitry Andric // %4:_(s8) = G_TRUNC %2:_(s32) 5380b57cec5SDimitry Andric // %3:_(s64) = G_ZEXT %2:_(s8) 5390b57cec5SDimitry Andric // ... = ... %3(s64) 5400b57cec5SDimitry Andric InsertInsnsWithoutSideEffectsBeforeUse(Builder, MI, *UseMO, 5410b57cec5SDimitry Andric InsertTruncAt); 5420b57cec5SDimitry Andric } 5430b57cec5SDimitry Andric continue; 5440b57cec5SDimitry Andric } 5450b57cec5SDimitry Andric // The use is (one of) the uses of the preferred use we chose earlier. 5460b57cec5SDimitry Andric // We're going to update the load to def this value later so just erase 5470b57cec5SDimitry Andric // the old extend. 5480b57cec5SDimitry Andric Observer.erasingInstr(*UseMO->getParent()); 5490b57cec5SDimitry Andric UseMO->getParent()->eraseFromParent(); 5500b57cec5SDimitry Andric continue; 5510b57cec5SDimitry Andric } 5520b57cec5SDimitry Andric 5530b57cec5SDimitry Andric // The use isn't an extend. Truncate back to the type we originally loaded. 5540b57cec5SDimitry Andric // This is free on many targets. 5550b57cec5SDimitry Andric InsertInsnsWithoutSideEffectsBeforeUse(Builder, MI, *UseMO, InsertTruncAt); 5560b57cec5SDimitry Andric } 5570b57cec5SDimitry Andric 5580b57cec5SDimitry Andric MI.getOperand(0).setReg(ChosenDstReg); 5590b57cec5SDimitry Andric Observer.changedInstr(MI); 5600b57cec5SDimitry Andric } 5610b57cec5SDimitry Andric 5628bcb0991SDimitry Andric bool CombinerHelper::isPredecessor(MachineInstr &DefMI, MachineInstr &UseMI) { 5638bcb0991SDimitry Andric assert(DefMI.getParent() == UseMI.getParent()); 5648bcb0991SDimitry Andric if (&DefMI == &UseMI) 5658bcb0991SDimitry Andric return false; 5668bcb0991SDimitry Andric 5678bcb0991SDimitry Andric // Loop through the basic block until we find one of the instructions. 5688bcb0991SDimitry Andric MachineBasicBlock::const_iterator I = DefMI.getParent()->begin(); 5698bcb0991SDimitry Andric for (; &*I != &DefMI && &*I != &UseMI; ++I) 5708bcb0991SDimitry Andric return &*I == &DefMI; 5718bcb0991SDimitry Andric 5728bcb0991SDimitry Andric llvm_unreachable("Block must contain instructions"); 5738bcb0991SDimitry Andric } 5748bcb0991SDimitry Andric 5758bcb0991SDimitry Andric bool CombinerHelper::dominates(MachineInstr &DefMI, MachineInstr &UseMI) { 5768bcb0991SDimitry Andric if (MDT) 5778bcb0991SDimitry Andric return MDT->dominates(&DefMI, &UseMI); 5788bcb0991SDimitry Andric else if (DefMI.getParent() != UseMI.getParent()) 5798bcb0991SDimitry Andric return false; 5808bcb0991SDimitry Andric 5818bcb0991SDimitry Andric return isPredecessor(DefMI, UseMI); 5828bcb0991SDimitry Andric } 5838bcb0991SDimitry Andric 5848bcb0991SDimitry Andric bool CombinerHelper::findPostIndexCandidate(MachineInstr &MI, Register &Addr, 5858bcb0991SDimitry Andric Register &Base, Register &Offset) { 5868bcb0991SDimitry Andric auto &MF = *MI.getParent()->getParent(); 5878bcb0991SDimitry Andric const auto &TLI = *MF.getSubtarget().getTargetLowering(); 5888bcb0991SDimitry Andric 5898bcb0991SDimitry Andric #ifndef NDEBUG 5908bcb0991SDimitry Andric unsigned Opcode = MI.getOpcode(); 5918bcb0991SDimitry Andric assert(Opcode == TargetOpcode::G_LOAD || Opcode == TargetOpcode::G_SEXTLOAD || 5928bcb0991SDimitry Andric Opcode == TargetOpcode::G_ZEXTLOAD || Opcode == TargetOpcode::G_STORE); 5938bcb0991SDimitry Andric #endif 5948bcb0991SDimitry Andric 5958bcb0991SDimitry Andric Base = MI.getOperand(1).getReg(); 5968bcb0991SDimitry Andric MachineInstr *BaseDef = MRI.getUniqueVRegDef(Base); 5978bcb0991SDimitry Andric if (BaseDef && BaseDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) 5988bcb0991SDimitry Andric return false; 5998bcb0991SDimitry Andric 6008bcb0991SDimitry Andric LLVM_DEBUG(dbgs() << "Searching for post-indexing opportunity for: " << MI); 6018bcb0991SDimitry Andric 6028bcb0991SDimitry Andric for (auto &Use : MRI.use_instructions(Base)) { 603*480093f4SDimitry Andric if (Use.getOpcode() != TargetOpcode::G_PTR_ADD) 6048bcb0991SDimitry Andric continue; 6058bcb0991SDimitry Andric 6068bcb0991SDimitry Andric Offset = Use.getOperand(2).getReg(); 6078bcb0991SDimitry Andric if (!ForceLegalIndexing && 6088bcb0991SDimitry Andric !TLI.isIndexingLegal(MI, Base, Offset, /*IsPre*/ false, MRI)) { 6098bcb0991SDimitry Andric LLVM_DEBUG(dbgs() << " Ignoring candidate with illegal addrmode: " 6108bcb0991SDimitry Andric << Use); 6118bcb0991SDimitry Andric continue; 6128bcb0991SDimitry Andric } 6138bcb0991SDimitry Andric 6148bcb0991SDimitry Andric // Make sure the offset calculation is before the potentially indexed op. 6158bcb0991SDimitry Andric // FIXME: we really care about dependency here. The offset calculation might 6168bcb0991SDimitry Andric // be movable. 6178bcb0991SDimitry Andric MachineInstr *OffsetDef = MRI.getUniqueVRegDef(Offset); 6188bcb0991SDimitry Andric if (!OffsetDef || !dominates(*OffsetDef, MI)) { 6198bcb0991SDimitry Andric LLVM_DEBUG(dbgs() << " Ignoring candidate with offset after mem-op: " 6208bcb0991SDimitry Andric << Use); 6218bcb0991SDimitry Andric continue; 6228bcb0991SDimitry Andric } 6238bcb0991SDimitry Andric 6248bcb0991SDimitry Andric // FIXME: check whether all uses of Base are load/store with foldable 6258bcb0991SDimitry Andric // addressing modes. If so, using the normal addr-modes is better than 6268bcb0991SDimitry Andric // forming an indexed one. 6278bcb0991SDimitry Andric 6288bcb0991SDimitry Andric bool MemOpDominatesAddrUses = true; 629*480093f4SDimitry Andric for (auto &PtrAddUse : MRI.use_instructions(Use.getOperand(0).getReg())) { 630*480093f4SDimitry Andric if (!dominates(MI, PtrAddUse)) { 6318bcb0991SDimitry Andric MemOpDominatesAddrUses = false; 6328bcb0991SDimitry Andric break; 6338bcb0991SDimitry Andric } 6348bcb0991SDimitry Andric } 6358bcb0991SDimitry Andric 6368bcb0991SDimitry Andric if (!MemOpDominatesAddrUses) { 6378bcb0991SDimitry Andric LLVM_DEBUG( 6388bcb0991SDimitry Andric dbgs() << " Ignoring candidate as memop does not dominate uses: " 6398bcb0991SDimitry Andric << Use); 6408bcb0991SDimitry Andric continue; 6418bcb0991SDimitry Andric } 6428bcb0991SDimitry Andric 6438bcb0991SDimitry Andric LLVM_DEBUG(dbgs() << " Found match: " << Use); 6448bcb0991SDimitry Andric Addr = Use.getOperand(0).getReg(); 6458bcb0991SDimitry Andric return true; 6468bcb0991SDimitry Andric } 6478bcb0991SDimitry Andric 6488bcb0991SDimitry Andric return false; 6498bcb0991SDimitry Andric } 6508bcb0991SDimitry Andric 6518bcb0991SDimitry Andric bool CombinerHelper::findPreIndexCandidate(MachineInstr &MI, Register &Addr, 6528bcb0991SDimitry Andric Register &Base, Register &Offset) { 6538bcb0991SDimitry Andric auto &MF = *MI.getParent()->getParent(); 6548bcb0991SDimitry Andric const auto &TLI = *MF.getSubtarget().getTargetLowering(); 6558bcb0991SDimitry Andric 6568bcb0991SDimitry Andric #ifndef NDEBUG 6578bcb0991SDimitry Andric unsigned Opcode = MI.getOpcode(); 6588bcb0991SDimitry Andric assert(Opcode == TargetOpcode::G_LOAD || Opcode == TargetOpcode::G_SEXTLOAD || 6598bcb0991SDimitry Andric Opcode == TargetOpcode::G_ZEXTLOAD || Opcode == TargetOpcode::G_STORE); 6608bcb0991SDimitry Andric #endif 6618bcb0991SDimitry Andric 6628bcb0991SDimitry Andric Addr = MI.getOperand(1).getReg(); 663*480093f4SDimitry Andric MachineInstr *AddrDef = getOpcodeDef(TargetOpcode::G_PTR_ADD, Addr, MRI); 6648bcb0991SDimitry Andric if (!AddrDef || MRI.hasOneUse(Addr)) 6658bcb0991SDimitry Andric return false; 6668bcb0991SDimitry Andric 6678bcb0991SDimitry Andric Base = AddrDef->getOperand(1).getReg(); 6688bcb0991SDimitry Andric Offset = AddrDef->getOperand(2).getReg(); 6698bcb0991SDimitry Andric 6708bcb0991SDimitry Andric LLVM_DEBUG(dbgs() << "Found potential pre-indexed load_store: " << MI); 6718bcb0991SDimitry Andric 6728bcb0991SDimitry Andric if (!ForceLegalIndexing && 6738bcb0991SDimitry Andric !TLI.isIndexingLegal(MI, Base, Offset, /*IsPre*/ true, MRI)) { 6748bcb0991SDimitry Andric LLVM_DEBUG(dbgs() << " Skipping, not legal for target"); 6758bcb0991SDimitry Andric return false; 6768bcb0991SDimitry Andric } 6778bcb0991SDimitry Andric 6788bcb0991SDimitry Andric MachineInstr *BaseDef = getDefIgnoringCopies(Base, MRI); 6798bcb0991SDimitry Andric if (BaseDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) { 6808bcb0991SDimitry Andric LLVM_DEBUG(dbgs() << " Skipping, frame index would need copy anyway."); 6818bcb0991SDimitry Andric return false; 6828bcb0991SDimitry Andric } 6838bcb0991SDimitry Andric 6848bcb0991SDimitry Andric if (MI.getOpcode() == TargetOpcode::G_STORE) { 6858bcb0991SDimitry Andric // Would require a copy. 6868bcb0991SDimitry Andric if (Base == MI.getOperand(0).getReg()) { 6878bcb0991SDimitry Andric LLVM_DEBUG(dbgs() << " Skipping, storing base so need copy anyway."); 6888bcb0991SDimitry Andric return false; 6898bcb0991SDimitry Andric } 6908bcb0991SDimitry Andric 6918bcb0991SDimitry Andric // We're expecting one use of Addr in MI, but it could also be the 6928bcb0991SDimitry Andric // value stored, which isn't actually dominated by the instruction. 6938bcb0991SDimitry Andric if (MI.getOperand(0).getReg() == Addr) { 6948bcb0991SDimitry Andric LLVM_DEBUG(dbgs() << " Skipping, does not dominate all addr uses"); 6958bcb0991SDimitry Andric return false; 6968bcb0991SDimitry Andric } 6978bcb0991SDimitry Andric } 6988bcb0991SDimitry Andric 699*480093f4SDimitry Andric // FIXME: check whether all uses of the base pointer are constant PtrAdds. 700*480093f4SDimitry Andric // That might allow us to end base's liveness here by adjusting the constant. 7018bcb0991SDimitry Andric 7028bcb0991SDimitry Andric for (auto &UseMI : MRI.use_instructions(Addr)) { 7038bcb0991SDimitry Andric if (!dominates(MI, UseMI)) { 7048bcb0991SDimitry Andric LLVM_DEBUG(dbgs() << " Skipping, does not dominate all addr uses."); 7058bcb0991SDimitry Andric return false; 7068bcb0991SDimitry Andric } 7078bcb0991SDimitry Andric } 7088bcb0991SDimitry Andric 7098bcb0991SDimitry Andric return true; 7108bcb0991SDimitry Andric } 7118bcb0991SDimitry Andric 7128bcb0991SDimitry Andric bool CombinerHelper::tryCombineIndexedLoadStore(MachineInstr &MI) { 713*480093f4SDimitry Andric IndexedLoadStoreMatchInfo MatchInfo; 714*480093f4SDimitry Andric if (matchCombineIndexedLoadStore(MI, MatchInfo)) { 715*480093f4SDimitry Andric applyCombineIndexedLoadStore(MI, MatchInfo); 716*480093f4SDimitry Andric return true; 717*480093f4SDimitry Andric } 718*480093f4SDimitry Andric return false; 719*480093f4SDimitry Andric } 720*480093f4SDimitry Andric 721*480093f4SDimitry Andric bool CombinerHelper::matchCombineIndexedLoadStore(MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo) { 7228bcb0991SDimitry Andric unsigned Opcode = MI.getOpcode(); 7238bcb0991SDimitry Andric if (Opcode != TargetOpcode::G_LOAD && Opcode != TargetOpcode::G_SEXTLOAD && 7248bcb0991SDimitry Andric Opcode != TargetOpcode::G_ZEXTLOAD && Opcode != TargetOpcode::G_STORE) 7258bcb0991SDimitry Andric return false; 7268bcb0991SDimitry Andric 727*480093f4SDimitry Andric MatchInfo.IsPre = findPreIndexCandidate(MI, MatchInfo.Addr, MatchInfo.Base, 728*480093f4SDimitry Andric MatchInfo.Offset); 729*480093f4SDimitry Andric if (!MatchInfo.IsPre && 730*480093f4SDimitry Andric !findPostIndexCandidate(MI, MatchInfo.Addr, MatchInfo.Base, 731*480093f4SDimitry Andric MatchInfo.Offset)) 7328bcb0991SDimitry Andric return false; 7338bcb0991SDimitry Andric 734*480093f4SDimitry Andric return true; 735*480093f4SDimitry Andric } 7368bcb0991SDimitry Andric 737*480093f4SDimitry Andric void CombinerHelper::applyCombineIndexedLoadStore( 738*480093f4SDimitry Andric MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo) { 739*480093f4SDimitry Andric MachineInstr &AddrDef = *MRI.getUniqueVRegDef(MatchInfo.Addr); 740*480093f4SDimitry Andric MachineIRBuilder MIRBuilder(MI); 741*480093f4SDimitry Andric unsigned Opcode = MI.getOpcode(); 742*480093f4SDimitry Andric bool IsStore = Opcode == TargetOpcode::G_STORE; 7438bcb0991SDimitry Andric unsigned NewOpcode; 7448bcb0991SDimitry Andric switch (Opcode) { 7458bcb0991SDimitry Andric case TargetOpcode::G_LOAD: 7468bcb0991SDimitry Andric NewOpcode = TargetOpcode::G_INDEXED_LOAD; 7478bcb0991SDimitry Andric break; 7488bcb0991SDimitry Andric case TargetOpcode::G_SEXTLOAD: 7498bcb0991SDimitry Andric NewOpcode = TargetOpcode::G_INDEXED_SEXTLOAD; 7508bcb0991SDimitry Andric break; 7518bcb0991SDimitry Andric case TargetOpcode::G_ZEXTLOAD: 7528bcb0991SDimitry Andric NewOpcode = TargetOpcode::G_INDEXED_ZEXTLOAD; 7538bcb0991SDimitry Andric break; 7548bcb0991SDimitry Andric case TargetOpcode::G_STORE: 7558bcb0991SDimitry Andric NewOpcode = TargetOpcode::G_INDEXED_STORE; 7568bcb0991SDimitry Andric break; 7578bcb0991SDimitry Andric default: 7588bcb0991SDimitry Andric llvm_unreachable("Unknown load/store opcode"); 7598bcb0991SDimitry Andric } 7608bcb0991SDimitry Andric 7618bcb0991SDimitry Andric auto MIB = MIRBuilder.buildInstr(NewOpcode); 7628bcb0991SDimitry Andric if (IsStore) { 763*480093f4SDimitry Andric MIB.addDef(MatchInfo.Addr); 7648bcb0991SDimitry Andric MIB.addUse(MI.getOperand(0).getReg()); 7658bcb0991SDimitry Andric } else { 7668bcb0991SDimitry Andric MIB.addDef(MI.getOperand(0).getReg()); 767*480093f4SDimitry Andric MIB.addDef(MatchInfo.Addr); 7688bcb0991SDimitry Andric } 7698bcb0991SDimitry Andric 770*480093f4SDimitry Andric MIB.addUse(MatchInfo.Base); 771*480093f4SDimitry Andric MIB.addUse(MatchInfo.Offset); 772*480093f4SDimitry Andric MIB.addImm(MatchInfo.IsPre); 7738bcb0991SDimitry Andric MI.eraseFromParent(); 7748bcb0991SDimitry Andric AddrDef.eraseFromParent(); 7758bcb0991SDimitry Andric 7768bcb0991SDimitry Andric LLVM_DEBUG(dbgs() << " Combinined to indexed operation"); 7778bcb0991SDimitry Andric } 7788bcb0991SDimitry Andric 7798bcb0991SDimitry Andric bool CombinerHelper::matchElideBrByInvertingCond(MachineInstr &MI) { 7808bcb0991SDimitry Andric if (MI.getOpcode() != TargetOpcode::G_BR) 7818bcb0991SDimitry Andric return false; 7828bcb0991SDimitry Andric 7830b57cec5SDimitry Andric // Try to match the following: 7840b57cec5SDimitry Andric // bb1: 7850b57cec5SDimitry Andric // %c(s32) = G_ICMP pred, %a, %b 7860b57cec5SDimitry Andric // %c1(s1) = G_TRUNC %c(s32) 7870b57cec5SDimitry Andric // G_BRCOND %c1, %bb2 7880b57cec5SDimitry Andric // G_BR %bb3 7890b57cec5SDimitry Andric // bb2: 7900b57cec5SDimitry Andric // ... 7910b57cec5SDimitry Andric // bb3: 7920b57cec5SDimitry Andric 7930b57cec5SDimitry Andric // The above pattern does not have a fall through to the successor bb2, always 7940b57cec5SDimitry Andric // resulting in a branch no matter which path is taken. Here we try to find 7950b57cec5SDimitry Andric // and replace that pattern with conditional branch to bb3 and otherwise 7960b57cec5SDimitry Andric // fallthrough to bb2. 7970b57cec5SDimitry Andric 7980b57cec5SDimitry Andric MachineBasicBlock *MBB = MI.getParent(); 7990b57cec5SDimitry Andric MachineBasicBlock::iterator BrIt(MI); 8000b57cec5SDimitry Andric if (BrIt == MBB->begin()) 8010b57cec5SDimitry Andric return false; 8020b57cec5SDimitry Andric assert(std::next(BrIt) == MBB->end() && "expected G_BR to be a terminator"); 8030b57cec5SDimitry Andric 8040b57cec5SDimitry Andric MachineInstr *BrCond = &*std::prev(BrIt); 8050b57cec5SDimitry Andric if (BrCond->getOpcode() != TargetOpcode::G_BRCOND) 8060b57cec5SDimitry Andric return false; 8070b57cec5SDimitry Andric 8080b57cec5SDimitry Andric // Check that the next block is the conditional branch target. 8090b57cec5SDimitry Andric if (!MBB->isLayoutSuccessor(BrCond->getOperand(1).getMBB())) 8100b57cec5SDimitry Andric return false; 8110b57cec5SDimitry Andric 8120b57cec5SDimitry Andric MachineInstr *CmpMI = MRI.getVRegDef(BrCond->getOperand(0).getReg()); 8130b57cec5SDimitry Andric if (!CmpMI || CmpMI->getOpcode() != TargetOpcode::G_ICMP || 8140b57cec5SDimitry Andric !MRI.hasOneUse(CmpMI->getOperand(0).getReg())) 8150b57cec5SDimitry Andric return false; 8160b57cec5SDimitry Andric return true; 8170b57cec5SDimitry Andric } 8180b57cec5SDimitry Andric 8198bcb0991SDimitry Andric bool CombinerHelper::tryElideBrByInvertingCond(MachineInstr &MI) { 8208bcb0991SDimitry Andric if (!matchElideBrByInvertingCond(MI)) 8210b57cec5SDimitry Andric return false; 8228bcb0991SDimitry Andric applyElideBrByInvertingCond(MI); 8238bcb0991SDimitry Andric return true; 8248bcb0991SDimitry Andric } 8258bcb0991SDimitry Andric 8268bcb0991SDimitry Andric void CombinerHelper::applyElideBrByInvertingCond(MachineInstr &MI) { 8270b57cec5SDimitry Andric MachineBasicBlock *BrTarget = MI.getOperand(0).getMBB(); 8280b57cec5SDimitry Andric MachineBasicBlock::iterator BrIt(MI); 8290b57cec5SDimitry Andric MachineInstr *BrCond = &*std::prev(BrIt); 8300b57cec5SDimitry Andric MachineInstr *CmpMI = MRI.getVRegDef(BrCond->getOperand(0).getReg()); 8310b57cec5SDimitry Andric 8320b57cec5SDimitry Andric CmpInst::Predicate InversePred = CmpInst::getInversePredicate( 8330b57cec5SDimitry Andric (CmpInst::Predicate)CmpMI->getOperand(1).getPredicate()); 8340b57cec5SDimitry Andric 8350b57cec5SDimitry Andric // Invert the G_ICMP condition. 8360b57cec5SDimitry Andric Observer.changingInstr(*CmpMI); 8370b57cec5SDimitry Andric CmpMI->getOperand(1).setPredicate(InversePred); 8380b57cec5SDimitry Andric Observer.changedInstr(*CmpMI); 8390b57cec5SDimitry Andric 8400b57cec5SDimitry Andric // Change the conditional branch target. 8410b57cec5SDimitry Andric Observer.changingInstr(*BrCond); 8420b57cec5SDimitry Andric BrCond->getOperand(1).setMBB(BrTarget); 8430b57cec5SDimitry Andric Observer.changedInstr(*BrCond); 8440b57cec5SDimitry Andric MI.eraseFromParent(); 8458bcb0991SDimitry Andric } 8468bcb0991SDimitry Andric 8478bcb0991SDimitry Andric static bool shouldLowerMemFuncForSize(const MachineFunction &MF) { 8488bcb0991SDimitry Andric // On Darwin, -Os means optimize for size without hurting performance, so 8498bcb0991SDimitry Andric // only really optimize for size when -Oz (MinSize) is used. 8508bcb0991SDimitry Andric if (MF.getTarget().getTargetTriple().isOSDarwin()) 8518bcb0991SDimitry Andric return MF.getFunction().hasMinSize(); 8528bcb0991SDimitry Andric return MF.getFunction().hasOptSize(); 8538bcb0991SDimitry Andric } 8548bcb0991SDimitry Andric 8558bcb0991SDimitry Andric // Returns a list of types to use for memory op lowering in MemOps. A partial 8568bcb0991SDimitry Andric // port of findOptimalMemOpLowering in TargetLowering. 8578bcb0991SDimitry Andric static bool findGISelOptimalMemOpLowering( 8588bcb0991SDimitry Andric std::vector<LLT> &MemOps, unsigned Limit, uint64_t Size, unsigned DstAlign, 8598bcb0991SDimitry Andric unsigned SrcAlign, bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc, 8608bcb0991SDimitry Andric bool AllowOverlap, unsigned DstAS, unsigned SrcAS, 8618bcb0991SDimitry Andric const AttributeList &FuncAttributes, const TargetLowering &TLI) { 8628bcb0991SDimitry Andric // If 'SrcAlign' is zero, that means the memory operation does not need to 8638bcb0991SDimitry Andric // load the value, i.e. memset or memcpy from constant string. Otherwise, 8648bcb0991SDimitry Andric // it's the inferred alignment of the source. 'DstAlign', on the other hand, 8658bcb0991SDimitry Andric // is the specified alignment of the memory operation. If it is zero, that 8668bcb0991SDimitry Andric // means it's possible to change the alignment of the destination. 8678bcb0991SDimitry Andric // 'MemcpyStrSrc' indicates whether the memcpy source is constant so it does 8688bcb0991SDimitry Andric // not need to be loaded. 8698bcb0991SDimitry Andric if (SrcAlign != 0 && SrcAlign < DstAlign) 8708bcb0991SDimitry Andric return false; 8718bcb0991SDimitry Andric 8728bcb0991SDimitry Andric LLT Ty = TLI.getOptimalMemOpLLT(Size, DstAlign, SrcAlign, IsMemset, 8738bcb0991SDimitry Andric ZeroMemset, MemcpyStrSrc, FuncAttributes); 8748bcb0991SDimitry Andric 8758bcb0991SDimitry Andric if (Ty == LLT()) { 8768bcb0991SDimitry Andric // Use the largest scalar type whose alignment constraints are satisfied. 8778bcb0991SDimitry Andric // We only need to check DstAlign here as SrcAlign is always greater or 8788bcb0991SDimitry Andric // equal to DstAlign (or zero). 8798bcb0991SDimitry Andric Ty = LLT::scalar(64); 8808bcb0991SDimitry Andric while (DstAlign && DstAlign < Ty.getSizeInBytes() && 8818bcb0991SDimitry Andric !TLI.allowsMisalignedMemoryAccesses(Ty, DstAS, DstAlign)) 8828bcb0991SDimitry Andric Ty = LLT::scalar(Ty.getSizeInBytes()); 8838bcb0991SDimitry Andric assert(Ty.getSizeInBits() > 0 && "Could not find valid type"); 8848bcb0991SDimitry Andric // FIXME: check for the largest legal type we can load/store to. 8858bcb0991SDimitry Andric } 8868bcb0991SDimitry Andric 8878bcb0991SDimitry Andric unsigned NumMemOps = 0; 8888bcb0991SDimitry Andric while (Size != 0) { 8898bcb0991SDimitry Andric unsigned TySize = Ty.getSizeInBytes(); 8908bcb0991SDimitry Andric while (TySize > Size) { 8918bcb0991SDimitry Andric // For now, only use non-vector load / store's for the left-over pieces. 8928bcb0991SDimitry Andric LLT NewTy = Ty; 8938bcb0991SDimitry Andric // FIXME: check for mem op safety and legality of the types. Not all of 8948bcb0991SDimitry Andric // SDAGisms map cleanly to GISel concepts. 8958bcb0991SDimitry Andric if (NewTy.isVector()) 8968bcb0991SDimitry Andric NewTy = NewTy.getSizeInBits() > 64 ? LLT::scalar(64) : LLT::scalar(32); 8978bcb0991SDimitry Andric NewTy = LLT::scalar(PowerOf2Floor(NewTy.getSizeInBits() - 1)); 8988bcb0991SDimitry Andric unsigned NewTySize = NewTy.getSizeInBytes(); 8998bcb0991SDimitry Andric assert(NewTySize > 0 && "Could not find appropriate type"); 9008bcb0991SDimitry Andric 9018bcb0991SDimitry Andric // If the new LLT cannot cover all of the remaining bits, then consider 9028bcb0991SDimitry Andric // issuing a (or a pair of) unaligned and overlapping load / store. 9038bcb0991SDimitry Andric bool Fast; 9048bcb0991SDimitry Andric // Need to get a VT equivalent for allowMisalignedMemoryAccesses(). 9058bcb0991SDimitry Andric MVT VT = getMVTForLLT(Ty); 9068bcb0991SDimitry Andric if (NumMemOps && AllowOverlap && NewTySize < Size && 9078bcb0991SDimitry Andric TLI.allowsMisalignedMemoryAccesses( 9088bcb0991SDimitry Andric VT, DstAS, DstAlign, MachineMemOperand::MONone, &Fast) && 9098bcb0991SDimitry Andric Fast) 9108bcb0991SDimitry Andric TySize = Size; 9118bcb0991SDimitry Andric else { 9128bcb0991SDimitry Andric Ty = NewTy; 9138bcb0991SDimitry Andric TySize = NewTySize; 9148bcb0991SDimitry Andric } 9158bcb0991SDimitry Andric } 9168bcb0991SDimitry Andric 9178bcb0991SDimitry Andric if (++NumMemOps > Limit) 9188bcb0991SDimitry Andric return false; 9198bcb0991SDimitry Andric 9208bcb0991SDimitry Andric MemOps.push_back(Ty); 9218bcb0991SDimitry Andric Size -= TySize; 9228bcb0991SDimitry Andric } 9238bcb0991SDimitry Andric 9240b57cec5SDimitry Andric return true; 9250b57cec5SDimitry Andric } 9260b57cec5SDimitry Andric 9278bcb0991SDimitry Andric static Type *getTypeForLLT(LLT Ty, LLVMContext &C) { 9288bcb0991SDimitry Andric if (Ty.isVector()) 9298bcb0991SDimitry Andric return VectorType::get(IntegerType::get(C, Ty.getScalarSizeInBits()), 9308bcb0991SDimitry Andric Ty.getNumElements()); 9318bcb0991SDimitry Andric return IntegerType::get(C, Ty.getSizeInBits()); 9328bcb0991SDimitry Andric } 9338bcb0991SDimitry Andric 9348bcb0991SDimitry Andric // Get a vectorized representation of the memset value operand, GISel edition. 9358bcb0991SDimitry Andric static Register getMemsetValue(Register Val, LLT Ty, MachineIRBuilder &MIB) { 9368bcb0991SDimitry Andric MachineRegisterInfo &MRI = *MIB.getMRI(); 9378bcb0991SDimitry Andric unsigned NumBits = Ty.getScalarSizeInBits(); 9388bcb0991SDimitry Andric auto ValVRegAndVal = getConstantVRegValWithLookThrough(Val, MRI); 9398bcb0991SDimitry Andric if (!Ty.isVector() && ValVRegAndVal) { 9408bcb0991SDimitry Andric unsigned KnownVal = ValVRegAndVal->Value; 9418bcb0991SDimitry Andric APInt Scalar = APInt(8, KnownVal); 9428bcb0991SDimitry Andric APInt SplatVal = APInt::getSplat(NumBits, Scalar); 9438bcb0991SDimitry Andric return MIB.buildConstant(Ty, SplatVal).getReg(0); 9448bcb0991SDimitry Andric } 9458bcb0991SDimitry Andric // FIXME: for vector types create a G_BUILD_VECTOR. 9468bcb0991SDimitry Andric if (Ty.isVector()) 9478bcb0991SDimitry Andric return Register(); 9488bcb0991SDimitry Andric 9498bcb0991SDimitry Andric // Extend the byte value to the larger type, and then multiply by a magic 9508bcb0991SDimitry Andric // value 0x010101... in order to replicate it across every byte. 9518bcb0991SDimitry Andric LLT ExtType = Ty.getScalarType(); 9528bcb0991SDimitry Andric auto ZExt = MIB.buildZExtOrTrunc(ExtType, Val); 9538bcb0991SDimitry Andric if (NumBits > 8) { 9548bcb0991SDimitry Andric APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01)); 9558bcb0991SDimitry Andric auto MagicMI = MIB.buildConstant(ExtType, Magic); 9568bcb0991SDimitry Andric Val = MIB.buildMul(ExtType, ZExt, MagicMI).getReg(0); 9578bcb0991SDimitry Andric } 9588bcb0991SDimitry Andric 9598bcb0991SDimitry Andric assert(ExtType == Ty && "Vector memset value type not supported yet"); 9608bcb0991SDimitry Andric return Val; 9618bcb0991SDimitry Andric } 9628bcb0991SDimitry Andric 9638bcb0991SDimitry Andric bool CombinerHelper::optimizeMemset(MachineInstr &MI, Register Dst, Register Val, 9648bcb0991SDimitry Andric unsigned KnownLen, unsigned Align, 9658bcb0991SDimitry Andric bool IsVolatile) { 9668bcb0991SDimitry Andric auto &MF = *MI.getParent()->getParent(); 9678bcb0991SDimitry Andric const auto &TLI = *MF.getSubtarget().getTargetLowering(); 9688bcb0991SDimitry Andric auto &DL = MF.getDataLayout(); 9698bcb0991SDimitry Andric LLVMContext &C = MF.getFunction().getContext(); 9708bcb0991SDimitry Andric 9718bcb0991SDimitry Andric assert(KnownLen != 0 && "Have a zero length memset length!"); 9728bcb0991SDimitry Andric 9738bcb0991SDimitry Andric bool DstAlignCanChange = false; 9748bcb0991SDimitry Andric MachineFrameInfo &MFI = MF.getFrameInfo(); 9758bcb0991SDimitry Andric bool OptSize = shouldLowerMemFuncForSize(MF); 9768bcb0991SDimitry Andric 9778bcb0991SDimitry Andric MachineInstr *FIDef = getOpcodeDef(TargetOpcode::G_FRAME_INDEX, Dst, MRI); 9788bcb0991SDimitry Andric if (FIDef && !MFI.isFixedObjectIndex(FIDef->getOperand(1).getIndex())) 9798bcb0991SDimitry Andric DstAlignCanChange = true; 9808bcb0991SDimitry Andric 9818bcb0991SDimitry Andric unsigned Limit = TLI.getMaxStoresPerMemset(OptSize); 9828bcb0991SDimitry Andric std::vector<LLT> MemOps; 9838bcb0991SDimitry Andric 9848bcb0991SDimitry Andric const auto &DstMMO = **MI.memoperands_begin(); 9858bcb0991SDimitry Andric MachinePointerInfo DstPtrInfo = DstMMO.getPointerInfo(); 9868bcb0991SDimitry Andric 9878bcb0991SDimitry Andric auto ValVRegAndVal = getConstantVRegValWithLookThrough(Val, MRI); 9888bcb0991SDimitry Andric bool IsZeroVal = ValVRegAndVal && ValVRegAndVal->Value == 0; 9898bcb0991SDimitry Andric 9908bcb0991SDimitry Andric if (!findGISelOptimalMemOpLowering( 9918bcb0991SDimitry Andric MemOps, Limit, KnownLen, (DstAlignCanChange ? 0 : Align), 0, 9928bcb0991SDimitry Andric /*IsMemset=*/true, 9938bcb0991SDimitry Andric /*ZeroMemset=*/IsZeroVal, /*MemcpyStrSrc=*/false, 9948bcb0991SDimitry Andric /*AllowOverlap=*/!IsVolatile, DstPtrInfo.getAddrSpace(), ~0u, 9958bcb0991SDimitry Andric MF.getFunction().getAttributes(), TLI)) 9968bcb0991SDimitry Andric return false; 9978bcb0991SDimitry Andric 9988bcb0991SDimitry Andric if (DstAlignCanChange) { 9998bcb0991SDimitry Andric // Get an estimate of the type from the LLT. 10008bcb0991SDimitry Andric Type *IRTy = getTypeForLLT(MemOps[0], C); 10018bcb0991SDimitry Andric unsigned NewAlign = (unsigned)DL.getABITypeAlignment(IRTy); 10028bcb0991SDimitry Andric if (NewAlign > Align) { 10038bcb0991SDimitry Andric Align = NewAlign; 10048bcb0991SDimitry Andric unsigned FI = FIDef->getOperand(1).getIndex(); 10058bcb0991SDimitry Andric // Give the stack frame object a larger alignment if needed. 10068bcb0991SDimitry Andric if (MFI.getObjectAlignment(FI) < Align) 10078bcb0991SDimitry Andric MFI.setObjectAlignment(FI, Align); 10088bcb0991SDimitry Andric } 10098bcb0991SDimitry Andric } 10108bcb0991SDimitry Andric 10118bcb0991SDimitry Andric MachineIRBuilder MIB(MI); 10128bcb0991SDimitry Andric // Find the largest store and generate the bit pattern for it. 10138bcb0991SDimitry Andric LLT LargestTy = MemOps[0]; 10148bcb0991SDimitry Andric for (unsigned i = 1; i < MemOps.size(); i++) 10158bcb0991SDimitry Andric if (MemOps[i].getSizeInBits() > LargestTy.getSizeInBits()) 10168bcb0991SDimitry Andric LargestTy = MemOps[i]; 10178bcb0991SDimitry Andric 10188bcb0991SDimitry Andric // The memset stored value is always defined as an s8, so in order to make it 10198bcb0991SDimitry Andric // work with larger store types we need to repeat the bit pattern across the 10208bcb0991SDimitry Andric // wider type. 10218bcb0991SDimitry Andric Register MemSetValue = getMemsetValue(Val, LargestTy, MIB); 10228bcb0991SDimitry Andric 10238bcb0991SDimitry Andric if (!MemSetValue) 10248bcb0991SDimitry Andric return false; 10258bcb0991SDimitry Andric 10268bcb0991SDimitry Andric // Generate the stores. For each store type in the list, we generate the 10278bcb0991SDimitry Andric // matching store of that type to the destination address. 10288bcb0991SDimitry Andric LLT PtrTy = MRI.getType(Dst); 10298bcb0991SDimitry Andric unsigned DstOff = 0; 10308bcb0991SDimitry Andric unsigned Size = KnownLen; 10318bcb0991SDimitry Andric for (unsigned I = 0; I < MemOps.size(); I++) { 10328bcb0991SDimitry Andric LLT Ty = MemOps[I]; 10338bcb0991SDimitry Andric unsigned TySize = Ty.getSizeInBytes(); 10348bcb0991SDimitry Andric if (TySize > Size) { 10358bcb0991SDimitry Andric // Issuing an unaligned load / store pair that overlaps with the previous 10368bcb0991SDimitry Andric // pair. Adjust the offset accordingly. 10378bcb0991SDimitry Andric assert(I == MemOps.size() - 1 && I != 0); 10388bcb0991SDimitry Andric DstOff -= TySize - Size; 10398bcb0991SDimitry Andric } 10408bcb0991SDimitry Andric 10418bcb0991SDimitry Andric // If this store is smaller than the largest store see whether we can get 10428bcb0991SDimitry Andric // the smaller value for free with a truncate. 10438bcb0991SDimitry Andric Register Value = MemSetValue; 10448bcb0991SDimitry Andric if (Ty.getSizeInBits() < LargestTy.getSizeInBits()) { 10458bcb0991SDimitry Andric MVT VT = getMVTForLLT(Ty); 10468bcb0991SDimitry Andric MVT LargestVT = getMVTForLLT(LargestTy); 10478bcb0991SDimitry Andric if (!LargestTy.isVector() && !Ty.isVector() && 10488bcb0991SDimitry Andric TLI.isTruncateFree(LargestVT, VT)) 10498bcb0991SDimitry Andric Value = MIB.buildTrunc(Ty, MemSetValue).getReg(0); 10508bcb0991SDimitry Andric else 10518bcb0991SDimitry Andric Value = getMemsetValue(Val, Ty, MIB); 10528bcb0991SDimitry Andric if (!Value) 10538bcb0991SDimitry Andric return false; 10548bcb0991SDimitry Andric } 10558bcb0991SDimitry Andric 10568bcb0991SDimitry Andric auto *StoreMMO = 10578bcb0991SDimitry Andric MF.getMachineMemOperand(&DstMMO, DstOff, Ty.getSizeInBytes()); 10588bcb0991SDimitry Andric 10598bcb0991SDimitry Andric Register Ptr = Dst; 10608bcb0991SDimitry Andric if (DstOff != 0) { 10618bcb0991SDimitry Andric auto Offset = 10628bcb0991SDimitry Andric MIB.buildConstant(LLT::scalar(PtrTy.getSizeInBits()), DstOff); 1063*480093f4SDimitry Andric Ptr = MIB.buildPtrAdd(PtrTy, Dst, Offset).getReg(0); 10648bcb0991SDimitry Andric } 10658bcb0991SDimitry Andric 10668bcb0991SDimitry Andric MIB.buildStore(Value, Ptr, *StoreMMO); 10678bcb0991SDimitry Andric DstOff += Ty.getSizeInBytes(); 10688bcb0991SDimitry Andric Size -= TySize; 10698bcb0991SDimitry Andric } 10708bcb0991SDimitry Andric 10718bcb0991SDimitry Andric MI.eraseFromParent(); 10728bcb0991SDimitry Andric return true; 10738bcb0991SDimitry Andric } 10748bcb0991SDimitry Andric 10758bcb0991SDimitry Andric 10768bcb0991SDimitry Andric bool CombinerHelper::optimizeMemcpy(MachineInstr &MI, Register Dst, 10778bcb0991SDimitry Andric Register Src, unsigned KnownLen, 10788bcb0991SDimitry Andric unsigned DstAlign, unsigned SrcAlign, 10798bcb0991SDimitry Andric bool IsVolatile) { 10808bcb0991SDimitry Andric auto &MF = *MI.getParent()->getParent(); 10818bcb0991SDimitry Andric const auto &TLI = *MF.getSubtarget().getTargetLowering(); 10828bcb0991SDimitry Andric auto &DL = MF.getDataLayout(); 10838bcb0991SDimitry Andric LLVMContext &C = MF.getFunction().getContext(); 10848bcb0991SDimitry Andric 10858bcb0991SDimitry Andric assert(KnownLen != 0 && "Have a zero length memcpy length!"); 10868bcb0991SDimitry Andric 10878bcb0991SDimitry Andric bool DstAlignCanChange = false; 10888bcb0991SDimitry Andric MachineFrameInfo &MFI = MF.getFrameInfo(); 10898bcb0991SDimitry Andric bool OptSize = shouldLowerMemFuncForSize(MF); 10908bcb0991SDimitry Andric unsigned Alignment = MinAlign(DstAlign, SrcAlign); 10918bcb0991SDimitry Andric 10928bcb0991SDimitry Andric MachineInstr *FIDef = getOpcodeDef(TargetOpcode::G_FRAME_INDEX, Dst, MRI); 10938bcb0991SDimitry Andric if (FIDef && !MFI.isFixedObjectIndex(FIDef->getOperand(1).getIndex())) 10948bcb0991SDimitry Andric DstAlignCanChange = true; 10958bcb0991SDimitry Andric 10968bcb0991SDimitry Andric // FIXME: infer better src pointer alignment like SelectionDAG does here. 10978bcb0991SDimitry Andric // FIXME: also use the equivalent of isMemSrcFromConstant and alwaysinlining 10988bcb0991SDimitry Andric // if the memcpy is in a tail call position. 10998bcb0991SDimitry Andric 11008bcb0991SDimitry Andric unsigned Limit = TLI.getMaxStoresPerMemcpy(OptSize); 11018bcb0991SDimitry Andric std::vector<LLT> MemOps; 11028bcb0991SDimitry Andric 11038bcb0991SDimitry Andric const auto &DstMMO = **MI.memoperands_begin(); 11048bcb0991SDimitry Andric const auto &SrcMMO = **std::next(MI.memoperands_begin()); 11058bcb0991SDimitry Andric MachinePointerInfo DstPtrInfo = DstMMO.getPointerInfo(); 11068bcb0991SDimitry Andric MachinePointerInfo SrcPtrInfo = SrcMMO.getPointerInfo(); 11078bcb0991SDimitry Andric 11088bcb0991SDimitry Andric if (!findGISelOptimalMemOpLowering( 11098bcb0991SDimitry Andric MemOps, Limit, KnownLen, (DstAlignCanChange ? 0 : Alignment), 11108bcb0991SDimitry Andric SrcAlign, 11118bcb0991SDimitry Andric /*IsMemset=*/false, 11128bcb0991SDimitry Andric /*ZeroMemset=*/false, /*MemcpyStrSrc=*/false, 11138bcb0991SDimitry Andric /*AllowOverlap=*/!IsVolatile, DstPtrInfo.getAddrSpace(), 11148bcb0991SDimitry Andric SrcPtrInfo.getAddrSpace(), MF.getFunction().getAttributes(), TLI)) 11158bcb0991SDimitry Andric return false; 11168bcb0991SDimitry Andric 11178bcb0991SDimitry Andric if (DstAlignCanChange) { 11188bcb0991SDimitry Andric // Get an estimate of the type from the LLT. 11198bcb0991SDimitry Andric Type *IRTy = getTypeForLLT(MemOps[0], C); 11208bcb0991SDimitry Andric unsigned NewAlign = (unsigned)DL.getABITypeAlignment(IRTy); 11218bcb0991SDimitry Andric 11228bcb0991SDimitry Andric // Don't promote to an alignment that would require dynamic stack 11238bcb0991SDimitry Andric // realignment. 11248bcb0991SDimitry Andric const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 11258bcb0991SDimitry Andric if (!TRI->needsStackRealignment(MF)) 11268bcb0991SDimitry Andric while (NewAlign > Alignment && 11278bcb0991SDimitry Andric DL.exceedsNaturalStackAlignment(Align(NewAlign))) 11288bcb0991SDimitry Andric NewAlign /= 2; 11298bcb0991SDimitry Andric 11308bcb0991SDimitry Andric if (NewAlign > Alignment) { 11318bcb0991SDimitry Andric Alignment = NewAlign; 11328bcb0991SDimitry Andric unsigned FI = FIDef->getOperand(1).getIndex(); 11338bcb0991SDimitry Andric // Give the stack frame object a larger alignment if needed. 11348bcb0991SDimitry Andric if (MFI.getObjectAlignment(FI) < Alignment) 11358bcb0991SDimitry Andric MFI.setObjectAlignment(FI, Alignment); 11368bcb0991SDimitry Andric } 11378bcb0991SDimitry Andric } 11388bcb0991SDimitry Andric 11398bcb0991SDimitry Andric LLVM_DEBUG(dbgs() << "Inlining memcpy: " << MI << " into loads & stores\n"); 11408bcb0991SDimitry Andric 11418bcb0991SDimitry Andric MachineIRBuilder MIB(MI); 11428bcb0991SDimitry Andric // Now we need to emit a pair of load and stores for each of the types we've 11438bcb0991SDimitry Andric // collected. I.e. for each type, generate a load from the source pointer of 11448bcb0991SDimitry Andric // that type width, and then generate a corresponding store to the dest buffer 11458bcb0991SDimitry Andric // of that value loaded. This can result in a sequence of loads and stores 11468bcb0991SDimitry Andric // mixed types, depending on what the target specifies as good types to use. 11478bcb0991SDimitry Andric unsigned CurrOffset = 0; 11488bcb0991SDimitry Andric LLT PtrTy = MRI.getType(Src); 11498bcb0991SDimitry Andric unsigned Size = KnownLen; 11508bcb0991SDimitry Andric for (auto CopyTy : MemOps) { 11518bcb0991SDimitry Andric // Issuing an unaligned load / store pair that overlaps with the previous 11528bcb0991SDimitry Andric // pair. Adjust the offset accordingly. 11538bcb0991SDimitry Andric if (CopyTy.getSizeInBytes() > Size) 11548bcb0991SDimitry Andric CurrOffset -= CopyTy.getSizeInBytes() - Size; 11558bcb0991SDimitry Andric 11568bcb0991SDimitry Andric // Construct MMOs for the accesses. 11578bcb0991SDimitry Andric auto *LoadMMO = 11588bcb0991SDimitry Andric MF.getMachineMemOperand(&SrcMMO, CurrOffset, CopyTy.getSizeInBytes()); 11598bcb0991SDimitry Andric auto *StoreMMO = 11608bcb0991SDimitry Andric MF.getMachineMemOperand(&DstMMO, CurrOffset, CopyTy.getSizeInBytes()); 11618bcb0991SDimitry Andric 11628bcb0991SDimitry Andric // Create the load. 11638bcb0991SDimitry Andric Register LoadPtr = Src; 11648bcb0991SDimitry Andric Register Offset; 11658bcb0991SDimitry Andric if (CurrOffset != 0) { 11668bcb0991SDimitry Andric Offset = MIB.buildConstant(LLT::scalar(PtrTy.getSizeInBits()), CurrOffset) 11678bcb0991SDimitry Andric .getReg(0); 1168*480093f4SDimitry Andric LoadPtr = MIB.buildPtrAdd(PtrTy, Src, Offset).getReg(0); 11698bcb0991SDimitry Andric } 11708bcb0991SDimitry Andric auto LdVal = MIB.buildLoad(CopyTy, LoadPtr, *LoadMMO); 11718bcb0991SDimitry Andric 11728bcb0991SDimitry Andric // Create the store. 11738bcb0991SDimitry Andric Register StorePtr = 1174*480093f4SDimitry Andric CurrOffset == 0 ? Dst : MIB.buildPtrAdd(PtrTy, Dst, Offset).getReg(0); 11758bcb0991SDimitry Andric MIB.buildStore(LdVal, StorePtr, *StoreMMO); 11768bcb0991SDimitry Andric CurrOffset += CopyTy.getSizeInBytes(); 11778bcb0991SDimitry Andric Size -= CopyTy.getSizeInBytes(); 11788bcb0991SDimitry Andric } 11798bcb0991SDimitry Andric 11808bcb0991SDimitry Andric MI.eraseFromParent(); 11818bcb0991SDimitry Andric return true; 11828bcb0991SDimitry Andric } 11838bcb0991SDimitry Andric 11848bcb0991SDimitry Andric bool CombinerHelper::optimizeMemmove(MachineInstr &MI, Register Dst, 11858bcb0991SDimitry Andric Register Src, unsigned KnownLen, 11868bcb0991SDimitry Andric unsigned DstAlign, unsigned SrcAlign, 11878bcb0991SDimitry Andric bool IsVolatile) { 11888bcb0991SDimitry Andric auto &MF = *MI.getParent()->getParent(); 11898bcb0991SDimitry Andric const auto &TLI = *MF.getSubtarget().getTargetLowering(); 11908bcb0991SDimitry Andric auto &DL = MF.getDataLayout(); 11918bcb0991SDimitry Andric LLVMContext &C = MF.getFunction().getContext(); 11928bcb0991SDimitry Andric 11938bcb0991SDimitry Andric assert(KnownLen != 0 && "Have a zero length memmove length!"); 11948bcb0991SDimitry Andric 11958bcb0991SDimitry Andric bool DstAlignCanChange = false; 11968bcb0991SDimitry Andric MachineFrameInfo &MFI = MF.getFrameInfo(); 11978bcb0991SDimitry Andric bool OptSize = shouldLowerMemFuncForSize(MF); 11988bcb0991SDimitry Andric unsigned Alignment = MinAlign(DstAlign, SrcAlign); 11998bcb0991SDimitry Andric 12008bcb0991SDimitry Andric MachineInstr *FIDef = getOpcodeDef(TargetOpcode::G_FRAME_INDEX, Dst, MRI); 12018bcb0991SDimitry Andric if (FIDef && !MFI.isFixedObjectIndex(FIDef->getOperand(1).getIndex())) 12028bcb0991SDimitry Andric DstAlignCanChange = true; 12038bcb0991SDimitry Andric 12048bcb0991SDimitry Andric unsigned Limit = TLI.getMaxStoresPerMemmove(OptSize); 12058bcb0991SDimitry Andric std::vector<LLT> MemOps; 12068bcb0991SDimitry Andric 12078bcb0991SDimitry Andric const auto &DstMMO = **MI.memoperands_begin(); 12088bcb0991SDimitry Andric const auto &SrcMMO = **std::next(MI.memoperands_begin()); 12098bcb0991SDimitry Andric MachinePointerInfo DstPtrInfo = DstMMO.getPointerInfo(); 12108bcb0991SDimitry Andric MachinePointerInfo SrcPtrInfo = SrcMMO.getPointerInfo(); 12118bcb0991SDimitry Andric 12128bcb0991SDimitry Andric // FIXME: SelectionDAG always passes false for 'AllowOverlap', apparently due 12138bcb0991SDimitry Andric // to a bug in it's findOptimalMemOpLowering implementation. For now do the 12148bcb0991SDimitry Andric // same thing here. 12158bcb0991SDimitry Andric if (!findGISelOptimalMemOpLowering( 12168bcb0991SDimitry Andric MemOps, Limit, KnownLen, (DstAlignCanChange ? 0 : Alignment), 12178bcb0991SDimitry Andric SrcAlign, 12188bcb0991SDimitry Andric /*IsMemset=*/false, 12198bcb0991SDimitry Andric /*ZeroMemset=*/false, /*MemcpyStrSrc=*/false, 12208bcb0991SDimitry Andric /*AllowOverlap=*/false, DstPtrInfo.getAddrSpace(), 12218bcb0991SDimitry Andric SrcPtrInfo.getAddrSpace(), MF.getFunction().getAttributes(), TLI)) 12228bcb0991SDimitry Andric return false; 12238bcb0991SDimitry Andric 12248bcb0991SDimitry Andric if (DstAlignCanChange) { 12258bcb0991SDimitry Andric // Get an estimate of the type from the LLT. 12268bcb0991SDimitry Andric Type *IRTy = getTypeForLLT(MemOps[0], C); 12278bcb0991SDimitry Andric unsigned NewAlign = (unsigned)DL.getABITypeAlignment(IRTy); 12288bcb0991SDimitry Andric 12298bcb0991SDimitry Andric // Don't promote to an alignment that would require dynamic stack 12308bcb0991SDimitry Andric // realignment. 12318bcb0991SDimitry Andric const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 12328bcb0991SDimitry Andric if (!TRI->needsStackRealignment(MF)) 12338bcb0991SDimitry Andric while (NewAlign > Alignment && 12348bcb0991SDimitry Andric DL.exceedsNaturalStackAlignment(Align(NewAlign))) 12358bcb0991SDimitry Andric NewAlign /= 2; 12368bcb0991SDimitry Andric 12378bcb0991SDimitry Andric if (NewAlign > Alignment) { 12388bcb0991SDimitry Andric Alignment = NewAlign; 12398bcb0991SDimitry Andric unsigned FI = FIDef->getOperand(1).getIndex(); 12408bcb0991SDimitry Andric // Give the stack frame object a larger alignment if needed. 12418bcb0991SDimitry Andric if (MFI.getObjectAlignment(FI) < Alignment) 12428bcb0991SDimitry Andric MFI.setObjectAlignment(FI, Alignment); 12438bcb0991SDimitry Andric } 12448bcb0991SDimitry Andric } 12458bcb0991SDimitry Andric 12468bcb0991SDimitry Andric LLVM_DEBUG(dbgs() << "Inlining memmove: " << MI << " into loads & stores\n"); 12478bcb0991SDimitry Andric 12488bcb0991SDimitry Andric MachineIRBuilder MIB(MI); 12498bcb0991SDimitry Andric // Memmove requires that we perform the loads first before issuing the stores. 12508bcb0991SDimitry Andric // Apart from that, this loop is pretty much doing the same thing as the 12518bcb0991SDimitry Andric // memcpy codegen function. 12528bcb0991SDimitry Andric unsigned CurrOffset = 0; 12538bcb0991SDimitry Andric LLT PtrTy = MRI.getType(Src); 12548bcb0991SDimitry Andric SmallVector<Register, 16> LoadVals; 12558bcb0991SDimitry Andric for (auto CopyTy : MemOps) { 12568bcb0991SDimitry Andric // Construct MMO for the load. 12578bcb0991SDimitry Andric auto *LoadMMO = 12588bcb0991SDimitry Andric MF.getMachineMemOperand(&SrcMMO, CurrOffset, CopyTy.getSizeInBytes()); 12598bcb0991SDimitry Andric 12608bcb0991SDimitry Andric // Create the load. 12618bcb0991SDimitry Andric Register LoadPtr = Src; 12628bcb0991SDimitry Andric if (CurrOffset != 0) { 12638bcb0991SDimitry Andric auto Offset = 12648bcb0991SDimitry Andric MIB.buildConstant(LLT::scalar(PtrTy.getSizeInBits()), CurrOffset); 1265*480093f4SDimitry Andric LoadPtr = MIB.buildPtrAdd(PtrTy, Src, Offset).getReg(0); 12668bcb0991SDimitry Andric } 12678bcb0991SDimitry Andric LoadVals.push_back(MIB.buildLoad(CopyTy, LoadPtr, *LoadMMO).getReg(0)); 12688bcb0991SDimitry Andric CurrOffset += CopyTy.getSizeInBytes(); 12698bcb0991SDimitry Andric } 12708bcb0991SDimitry Andric 12718bcb0991SDimitry Andric CurrOffset = 0; 12728bcb0991SDimitry Andric for (unsigned I = 0; I < MemOps.size(); ++I) { 12738bcb0991SDimitry Andric LLT CopyTy = MemOps[I]; 12748bcb0991SDimitry Andric // Now store the values loaded. 12758bcb0991SDimitry Andric auto *StoreMMO = 12768bcb0991SDimitry Andric MF.getMachineMemOperand(&DstMMO, CurrOffset, CopyTy.getSizeInBytes()); 12778bcb0991SDimitry Andric 12788bcb0991SDimitry Andric Register StorePtr = Dst; 12798bcb0991SDimitry Andric if (CurrOffset != 0) { 12808bcb0991SDimitry Andric auto Offset = 12818bcb0991SDimitry Andric MIB.buildConstant(LLT::scalar(PtrTy.getSizeInBits()), CurrOffset); 1282*480093f4SDimitry Andric StorePtr = MIB.buildPtrAdd(PtrTy, Dst, Offset).getReg(0); 12838bcb0991SDimitry Andric } 12848bcb0991SDimitry Andric MIB.buildStore(LoadVals[I], StorePtr, *StoreMMO); 12858bcb0991SDimitry Andric CurrOffset += CopyTy.getSizeInBytes(); 12868bcb0991SDimitry Andric } 12878bcb0991SDimitry Andric MI.eraseFromParent(); 12888bcb0991SDimitry Andric return true; 12898bcb0991SDimitry Andric } 12908bcb0991SDimitry Andric 12918bcb0991SDimitry Andric bool CombinerHelper::tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen) { 12928bcb0991SDimitry Andric // This combine is fairly complex so it's not written with a separate 12938bcb0991SDimitry Andric // matcher function. 12948bcb0991SDimitry Andric assert(MI.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS); 12958bcb0991SDimitry Andric Intrinsic::ID ID = (Intrinsic::ID)MI.getIntrinsicID(); 12968bcb0991SDimitry Andric assert((ID == Intrinsic::memcpy || ID == Intrinsic::memmove || 12978bcb0991SDimitry Andric ID == Intrinsic::memset) && 12988bcb0991SDimitry Andric "Expected a memcpy like intrinsic"); 12998bcb0991SDimitry Andric 13008bcb0991SDimitry Andric auto MMOIt = MI.memoperands_begin(); 13018bcb0991SDimitry Andric const MachineMemOperand *MemOp = *MMOIt; 13028bcb0991SDimitry Andric bool IsVolatile = MemOp->isVolatile(); 13038bcb0991SDimitry Andric // Don't try to optimize volatile. 13048bcb0991SDimitry Andric if (IsVolatile) 13058bcb0991SDimitry Andric return false; 13068bcb0991SDimitry Andric 13078bcb0991SDimitry Andric unsigned DstAlign = MemOp->getBaseAlignment(); 13088bcb0991SDimitry Andric unsigned SrcAlign = 0; 13098bcb0991SDimitry Andric Register Dst = MI.getOperand(1).getReg(); 13108bcb0991SDimitry Andric Register Src = MI.getOperand(2).getReg(); 13118bcb0991SDimitry Andric Register Len = MI.getOperand(3).getReg(); 13128bcb0991SDimitry Andric 13138bcb0991SDimitry Andric if (ID != Intrinsic::memset) { 13148bcb0991SDimitry Andric assert(MMOIt != MI.memoperands_end() && "Expected a second MMO on MI"); 13158bcb0991SDimitry Andric MemOp = *(++MMOIt); 13168bcb0991SDimitry Andric SrcAlign = MemOp->getBaseAlignment(); 13178bcb0991SDimitry Andric } 13188bcb0991SDimitry Andric 13198bcb0991SDimitry Andric // See if this is a constant length copy 13208bcb0991SDimitry Andric auto LenVRegAndVal = getConstantVRegValWithLookThrough(Len, MRI); 13218bcb0991SDimitry Andric if (!LenVRegAndVal) 13228bcb0991SDimitry Andric return false; // Leave it to the legalizer to lower it to a libcall. 13238bcb0991SDimitry Andric unsigned KnownLen = LenVRegAndVal->Value; 13248bcb0991SDimitry Andric 13258bcb0991SDimitry Andric if (KnownLen == 0) { 13268bcb0991SDimitry Andric MI.eraseFromParent(); 13278bcb0991SDimitry Andric return true; 13288bcb0991SDimitry Andric } 13298bcb0991SDimitry Andric 13308bcb0991SDimitry Andric if (MaxLen && KnownLen > MaxLen) 13318bcb0991SDimitry Andric return false; 13328bcb0991SDimitry Andric 13338bcb0991SDimitry Andric if (ID == Intrinsic::memcpy) 13348bcb0991SDimitry Andric return optimizeMemcpy(MI, Dst, Src, KnownLen, DstAlign, SrcAlign, IsVolatile); 13358bcb0991SDimitry Andric if (ID == Intrinsic::memmove) 13368bcb0991SDimitry Andric return optimizeMemmove(MI, Dst, Src, KnownLen, DstAlign, SrcAlign, IsVolatile); 13378bcb0991SDimitry Andric if (ID == Intrinsic::memset) 13388bcb0991SDimitry Andric return optimizeMemset(MI, Dst, Src, KnownLen, DstAlign, IsVolatile); 13398bcb0991SDimitry Andric return false; 13408bcb0991SDimitry Andric } 13418bcb0991SDimitry Andric 1342*480093f4SDimitry Andric bool CombinerHelper::matchPtrAddImmedChain(MachineInstr &MI, 1343*480093f4SDimitry Andric PtrAddChain &MatchInfo) { 1344*480093f4SDimitry Andric // We're trying to match the following pattern: 1345*480093f4SDimitry Andric // %t1 = G_PTR_ADD %base, G_CONSTANT imm1 1346*480093f4SDimitry Andric // %root = G_PTR_ADD %t1, G_CONSTANT imm2 1347*480093f4SDimitry Andric // --> 1348*480093f4SDimitry Andric // %root = G_PTR_ADD %base, G_CONSTANT (imm1 + imm2) 1349*480093f4SDimitry Andric 1350*480093f4SDimitry Andric if (MI.getOpcode() != TargetOpcode::G_PTR_ADD) 1351*480093f4SDimitry Andric return false; 1352*480093f4SDimitry Andric 1353*480093f4SDimitry Andric Register Add2 = MI.getOperand(1).getReg(); 1354*480093f4SDimitry Andric Register Imm1 = MI.getOperand(2).getReg(); 1355*480093f4SDimitry Andric auto MaybeImmVal = getConstantVRegValWithLookThrough(Imm1, MRI); 1356*480093f4SDimitry Andric if (!MaybeImmVal) 1357*480093f4SDimitry Andric return false; 1358*480093f4SDimitry Andric 1359*480093f4SDimitry Andric MachineInstr *Add2Def = MRI.getUniqueVRegDef(Add2); 1360*480093f4SDimitry Andric if (!Add2Def || Add2Def->getOpcode() != TargetOpcode::G_PTR_ADD) 1361*480093f4SDimitry Andric return false; 1362*480093f4SDimitry Andric 1363*480093f4SDimitry Andric Register Base = Add2Def->getOperand(1).getReg(); 1364*480093f4SDimitry Andric Register Imm2 = Add2Def->getOperand(2).getReg(); 1365*480093f4SDimitry Andric auto MaybeImm2Val = getConstantVRegValWithLookThrough(Imm2, MRI); 1366*480093f4SDimitry Andric if (!MaybeImm2Val) 1367*480093f4SDimitry Andric return false; 1368*480093f4SDimitry Andric 1369*480093f4SDimitry Andric // Pass the combined immediate to the apply function. 1370*480093f4SDimitry Andric MatchInfo.Imm = MaybeImmVal->Value + MaybeImm2Val->Value; 1371*480093f4SDimitry Andric MatchInfo.Base = Base; 1372*480093f4SDimitry Andric return true; 1373*480093f4SDimitry Andric } 1374*480093f4SDimitry Andric 1375*480093f4SDimitry Andric bool CombinerHelper::applyPtrAddImmedChain(MachineInstr &MI, 1376*480093f4SDimitry Andric PtrAddChain &MatchInfo) { 1377*480093f4SDimitry Andric assert(MI.getOpcode() == TargetOpcode::G_PTR_ADD && "Expected G_PTR_ADD"); 1378*480093f4SDimitry Andric MachineIRBuilder MIB(MI); 1379*480093f4SDimitry Andric LLT OffsetTy = MRI.getType(MI.getOperand(2).getReg()); 1380*480093f4SDimitry Andric auto NewOffset = MIB.buildConstant(OffsetTy, MatchInfo.Imm); 1381*480093f4SDimitry Andric Observer.changingInstr(MI); 1382*480093f4SDimitry Andric MI.getOperand(1).setReg(MatchInfo.Base); 1383*480093f4SDimitry Andric MI.getOperand(2).setReg(NewOffset.getReg(0)); 1384*480093f4SDimitry Andric Observer.changedInstr(MI); 1385*480093f4SDimitry Andric return true; 1386*480093f4SDimitry Andric } 1387*480093f4SDimitry Andric 13880b57cec5SDimitry Andric bool CombinerHelper::tryCombine(MachineInstr &MI) { 13890b57cec5SDimitry Andric if (tryCombineCopy(MI)) 13900b57cec5SDimitry Andric return true; 13918bcb0991SDimitry Andric if (tryCombineExtendingLoads(MI)) 13928bcb0991SDimitry Andric return true; 13938bcb0991SDimitry Andric if (tryCombineIndexedLoadStore(MI)) 13948bcb0991SDimitry Andric return true; 13958bcb0991SDimitry Andric return false; 13960b57cec5SDimitry Andric } 1397