xref: /freebsd/contrib/llvm-project/llvm/lib/CodeGen/TargetInstrInfo.cpp (revision 0fca6ea1d4eea4c934cfff25ac9ee8ad6fe95583)
1 //===-- TargetInstrInfo.cpp - Target Instruction Information --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the TargetInstrInfo class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/CodeGen/TargetInstrInfo.h"
14 #include "llvm/ADT/StringExtras.h"
15 #include "llvm/BinaryFormat/Dwarf.h"
16 #include "llvm/CodeGen/MachineCombinerPattern.h"
17 #include "llvm/CodeGen/MachineFrameInfo.h"
18 #include "llvm/CodeGen/MachineInstrBuilder.h"
19 #include "llvm/CodeGen/MachineMemOperand.h"
20 #include "llvm/CodeGen/MachineRegisterInfo.h"
21 #include "llvm/CodeGen/MachineScheduler.h"
22 #include "llvm/CodeGen/MachineTraceMetrics.h"
23 #include "llvm/CodeGen/PseudoSourceValue.h"
24 #include "llvm/CodeGen/ScoreboardHazardRecognizer.h"
25 #include "llvm/CodeGen/StackMaps.h"
26 #include "llvm/CodeGen/TargetFrameLowering.h"
27 #include "llvm/CodeGen/TargetLowering.h"
28 #include "llvm/CodeGen/TargetRegisterInfo.h"
29 #include "llvm/CodeGen/TargetSchedule.h"
30 #include "llvm/IR/DataLayout.h"
31 #include "llvm/IR/DebugInfoMetadata.h"
32 #include "llvm/MC/MCAsmInfo.h"
33 #include "llvm/MC/MCInstrItineraries.h"
34 #include "llvm/Support/CommandLine.h"
35 #include "llvm/Support/ErrorHandling.h"
36 #include "llvm/Support/raw_ostream.h"
37 #include "llvm/Target/TargetMachine.h"
38 
39 using namespace llvm;
40 
41 static cl::opt<bool> DisableHazardRecognizer(
42   "disable-sched-hazard", cl::Hidden, cl::init(false),
43   cl::desc("Disable hazard detection during preRA scheduling"));
44 
45 TargetInstrInfo::~TargetInstrInfo() = default;
46 
47 const TargetRegisterClass*
getRegClass(const MCInstrDesc & MCID,unsigned OpNum,const TargetRegisterInfo * TRI,const MachineFunction & MF) const48 TargetInstrInfo::getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
49                              const TargetRegisterInfo *TRI,
50                              const MachineFunction &MF) const {
51   if (OpNum >= MCID.getNumOperands())
52     return nullptr;
53 
54   short RegClass = MCID.operands()[OpNum].RegClass;
55   if (MCID.operands()[OpNum].isLookupPtrRegClass())
56     return TRI->getPointerRegClass(MF, RegClass);
57 
58   // Instructions like INSERT_SUBREG do not have fixed register classes.
59   if (RegClass < 0)
60     return nullptr;
61 
62   // Otherwise just look it up normally.
63   return TRI->getRegClass(RegClass);
64 }
65 
66 /// insertNoop - Insert a noop into the instruction stream at the specified
67 /// point.
insertNoop(MachineBasicBlock & MBB,MachineBasicBlock::iterator MI) const68 void TargetInstrInfo::insertNoop(MachineBasicBlock &MBB,
69                                  MachineBasicBlock::iterator MI) const {
70   llvm_unreachable("Target didn't implement insertNoop!");
71 }
72 
73 /// insertNoops - Insert noops into the instruction stream at the specified
74 /// point.
insertNoops(MachineBasicBlock & MBB,MachineBasicBlock::iterator MI,unsigned Quantity) const75 void TargetInstrInfo::insertNoops(MachineBasicBlock &MBB,
76                                   MachineBasicBlock::iterator MI,
77                                   unsigned Quantity) const {
78   for (unsigned i = 0; i < Quantity; ++i)
79     insertNoop(MBB, MI);
80 }
81 
isAsmComment(const char * Str,const MCAsmInfo & MAI)82 static bool isAsmComment(const char *Str, const MCAsmInfo &MAI) {
83   return strncmp(Str, MAI.getCommentString().data(),
84                  MAI.getCommentString().size()) == 0;
85 }
86 
87 /// Measure the specified inline asm to determine an approximation of its
88 /// length.
89 /// Comments (which run till the next SeparatorString or newline) do not
90 /// count as an instruction.
91 /// Any other non-whitespace text is considered an instruction, with
92 /// multiple instructions separated by SeparatorString or newlines.
93 /// Variable-length instructions are not handled here; this function
94 /// may be overloaded in the target code to do that.
95 /// We implement a special case of the .space directive which takes only a
96 /// single integer argument in base 10 that is the size in bytes. This is a
97 /// restricted form of the GAS directive in that we only interpret
98 /// simple--i.e. not a logical or arithmetic expression--size values without
99 /// the optional fill value. This is primarily used for creating arbitrary
100 /// sized inline asm blocks for testing purposes.
getInlineAsmLength(const char * Str,const MCAsmInfo & MAI,const TargetSubtargetInfo * STI) const101 unsigned TargetInstrInfo::getInlineAsmLength(
102   const char *Str,
103   const MCAsmInfo &MAI, const TargetSubtargetInfo *STI) const {
104   // Count the number of instructions in the asm.
105   bool AtInsnStart = true;
106   unsigned Length = 0;
107   const unsigned MaxInstLength = MAI.getMaxInstLength(STI);
108   for (; *Str; ++Str) {
109     if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(),
110                                 strlen(MAI.getSeparatorString())) == 0) {
111       AtInsnStart = true;
112     } else if (isAsmComment(Str, MAI)) {
113       // Stop counting as an instruction after a comment until the next
114       // separator.
115       AtInsnStart = false;
116     }
117 
118     if (AtInsnStart && !isSpace(static_cast<unsigned char>(*Str))) {
119       unsigned AddLength = MaxInstLength;
120       if (strncmp(Str, ".space", 6) == 0) {
121         char *EStr;
122         int SpaceSize;
123         SpaceSize = strtol(Str + 6, &EStr, 10);
124         SpaceSize = SpaceSize < 0 ? 0 : SpaceSize;
125         while (*EStr != '\n' && isSpace(static_cast<unsigned char>(*EStr)))
126           ++EStr;
127         if (*EStr == '\0' || *EStr == '\n' ||
128             isAsmComment(EStr, MAI)) // Successfully parsed .space argument
129           AddLength = SpaceSize;
130       }
131       Length += AddLength;
132       AtInsnStart = false;
133     }
134   }
135 
136   return Length;
137 }
138 
139 /// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
140 /// after it, replacing it with an unconditional branch to NewDest.
141 void
ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,MachineBasicBlock * NewDest) const142 TargetInstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
143                                          MachineBasicBlock *NewDest) const {
144   MachineBasicBlock *MBB = Tail->getParent();
145 
146   // Remove all the old successors of MBB from the CFG.
147   while (!MBB->succ_empty())
148     MBB->removeSuccessor(MBB->succ_begin());
149 
150   // Save off the debug loc before erasing the instruction.
151   DebugLoc DL = Tail->getDebugLoc();
152 
153   // Update call site info and remove all the dead instructions
154   // from the end of MBB.
155   while (Tail != MBB->end()) {
156     auto MI = Tail++;
157     if (MI->shouldUpdateCallSiteInfo())
158       MBB->getParent()->eraseCallSiteInfo(&*MI);
159     MBB->erase(MI);
160   }
161 
162   // If MBB isn't immediately before MBB, insert a branch to it.
163   if (++MachineFunction::iterator(MBB) != MachineFunction::iterator(NewDest))
164     insertBranch(*MBB, NewDest, nullptr, SmallVector<MachineOperand, 0>(), DL);
165   MBB->addSuccessor(NewDest);
166 }
167 
commuteInstructionImpl(MachineInstr & MI,bool NewMI,unsigned Idx1,unsigned Idx2) const168 MachineInstr *TargetInstrInfo::commuteInstructionImpl(MachineInstr &MI,
169                                                       bool NewMI, unsigned Idx1,
170                                                       unsigned Idx2) const {
171   const MCInstrDesc &MCID = MI.getDesc();
172   bool HasDef = MCID.getNumDefs();
173   if (HasDef && !MI.getOperand(0).isReg())
174     // No idea how to commute this instruction. Target should implement its own.
175     return nullptr;
176 
177   unsigned CommutableOpIdx1 = Idx1; (void)CommutableOpIdx1;
178   unsigned CommutableOpIdx2 = Idx2; (void)CommutableOpIdx2;
179   assert(findCommutedOpIndices(MI, CommutableOpIdx1, CommutableOpIdx2) &&
180          CommutableOpIdx1 == Idx1 && CommutableOpIdx2 == Idx2 &&
181          "TargetInstrInfo::CommuteInstructionImpl(): not commutable operands.");
182   assert(MI.getOperand(Idx1).isReg() && MI.getOperand(Idx2).isReg() &&
183          "This only knows how to commute register operands so far");
184 
185   Register Reg0 = HasDef ? MI.getOperand(0).getReg() : Register();
186   Register Reg1 = MI.getOperand(Idx1).getReg();
187   Register Reg2 = MI.getOperand(Idx2).getReg();
188   unsigned SubReg0 = HasDef ? MI.getOperand(0).getSubReg() : 0;
189   unsigned SubReg1 = MI.getOperand(Idx1).getSubReg();
190   unsigned SubReg2 = MI.getOperand(Idx2).getSubReg();
191   bool Reg1IsKill = MI.getOperand(Idx1).isKill();
192   bool Reg2IsKill = MI.getOperand(Idx2).isKill();
193   bool Reg1IsUndef = MI.getOperand(Idx1).isUndef();
194   bool Reg2IsUndef = MI.getOperand(Idx2).isUndef();
195   bool Reg1IsInternal = MI.getOperand(Idx1).isInternalRead();
196   bool Reg2IsInternal = MI.getOperand(Idx2).isInternalRead();
197   // Avoid calling isRenamable for virtual registers since we assert that
198   // renamable property is only queried/set for physical registers.
199   bool Reg1IsRenamable =
200       Reg1.isPhysical() ? MI.getOperand(Idx1).isRenamable() : false;
201   bool Reg2IsRenamable =
202       Reg2.isPhysical() ? MI.getOperand(Idx2).isRenamable() : false;
203   // If destination is tied to either of the commuted source register, then
204   // it must be updated.
205   if (HasDef && Reg0 == Reg1 &&
206       MI.getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) {
207     Reg2IsKill = false;
208     Reg0 = Reg2;
209     SubReg0 = SubReg2;
210   } else if (HasDef && Reg0 == Reg2 &&
211              MI.getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) {
212     Reg1IsKill = false;
213     Reg0 = Reg1;
214     SubReg0 = SubReg1;
215   }
216 
217   MachineInstr *CommutedMI = nullptr;
218   if (NewMI) {
219     // Create a new instruction.
220     MachineFunction &MF = *MI.getMF();
221     CommutedMI = MF.CloneMachineInstr(&MI);
222   } else {
223     CommutedMI = &MI;
224   }
225 
226   if (HasDef) {
227     CommutedMI->getOperand(0).setReg(Reg0);
228     CommutedMI->getOperand(0).setSubReg(SubReg0);
229   }
230   CommutedMI->getOperand(Idx2).setReg(Reg1);
231   CommutedMI->getOperand(Idx1).setReg(Reg2);
232   CommutedMI->getOperand(Idx2).setSubReg(SubReg1);
233   CommutedMI->getOperand(Idx1).setSubReg(SubReg2);
234   CommutedMI->getOperand(Idx2).setIsKill(Reg1IsKill);
235   CommutedMI->getOperand(Idx1).setIsKill(Reg2IsKill);
236   CommutedMI->getOperand(Idx2).setIsUndef(Reg1IsUndef);
237   CommutedMI->getOperand(Idx1).setIsUndef(Reg2IsUndef);
238   CommutedMI->getOperand(Idx2).setIsInternalRead(Reg1IsInternal);
239   CommutedMI->getOperand(Idx1).setIsInternalRead(Reg2IsInternal);
240   // Avoid calling setIsRenamable for virtual registers since we assert that
241   // renamable property is only queried/set for physical registers.
242   if (Reg1.isPhysical())
243     CommutedMI->getOperand(Idx2).setIsRenamable(Reg1IsRenamable);
244   if (Reg2.isPhysical())
245     CommutedMI->getOperand(Idx1).setIsRenamable(Reg2IsRenamable);
246   return CommutedMI;
247 }
248 
commuteInstruction(MachineInstr & MI,bool NewMI,unsigned OpIdx1,unsigned OpIdx2) const249 MachineInstr *TargetInstrInfo::commuteInstruction(MachineInstr &MI, bool NewMI,
250                                                   unsigned OpIdx1,
251                                                   unsigned OpIdx2) const {
252   // If OpIdx1 or OpIdx2 is not specified, then this method is free to choose
253   // any commutable operand, which is done in findCommutedOpIndices() method
254   // called below.
255   if ((OpIdx1 == CommuteAnyOperandIndex || OpIdx2 == CommuteAnyOperandIndex) &&
256       !findCommutedOpIndices(MI, OpIdx1, OpIdx2)) {
257     assert(MI.isCommutable() &&
258            "Precondition violation: MI must be commutable.");
259     return nullptr;
260   }
261   return commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
262 }
263 
fixCommutedOpIndices(unsigned & ResultIdx1,unsigned & ResultIdx2,unsigned CommutableOpIdx1,unsigned CommutableOpIdx2)264 bool TargetInstrInfo::fixCommutedOpIndices(unsigned &ResultIdx1,
265                                            unsigned &ResultIdx2,
266                                            unsigned CommutableOpIdx1,
267                                            unsigned CommutableOpIdx2) {
268   if (ResultIdx1 == CommuteAnyOperandIndex &&
269       ResultIdx2 == CommuteAnyOperandIndex) {
270     ResultIdx1 = CommutableOpIdx1;
271     ResultIdx2 = CommutableOpIdx2;
272   } else if (ResultIdx1 == CommuteAnyOperandIndex) {
273     if (ResultIdx2 == CommutableOpIdx1)
274       ResultIdx1 = CommutableOpIdx2;
275     else if (ResultIdx2 == CommutableOpIdx2)
276       ResultIdx1 = CommutableOpIdx1;
277     else
278       return false;
279   } else if (ResultIdx2 == CommuteAnyOperandIndex) {
280     if (ResultIdx1 == CommutableOpIdx1)
281       ResultIdx2 = CommutableOpIdx2;
282     else if (ResultIdx1 == CommutableOpIdx2)
283       ResultIdx2 = CommutableOpIdx1;
284     else
285       return false;
286   } else
287     // Check that the result operand indices match the given commutable
288     // operand indices.
289     return (ResultIdx1 == CommutableOpIdx1 && ResultIdx2 == CommutableOpIdx2) ||
290            (ResultIdx1 == CommutableOpIdx2 && ResultIdx2 == CommutableOpIdx1);
291 
292   return true;
293 }
294 
findCommutedOpIndices(const MachineInstr & MI,unsigned & SrcOpIdx1,unsigned & SrcOpIdx2) const295 bool TargetInstrInfo::findCommutedOpIndices(const MachineInstr &MI,
296                                             unsigned &SrcOpIdx1,
297                                             unsigned &SrcOpIdx2) const {
298   assert(!MI.isBundle() &&
299          "TargetInstrInfo::findCommutedOpIndices() can't handle bundles");
300 
301   const MCInstrDesc &MCID = MI.getDesc();
302   if (!MCID.isCommutable())
303     return false;
304 
305   // This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this
306   // is not true, then the target must implement this.
307   unsigned CommutableOpIdx1 = MCID.getNumDefs();
308   unsigned CommutableOpIdx2 = CommutableOpIdx1 + 1;
309   if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2,
310                             CommutableOpIdx1, CommutableOpIdx2))
311     return false;
312 
313   if (!MI.getOperand(SrcOpIdx1).isReg() || !MI.getOperand(SrcOpIdx2).isReg())
314     // No idea.
315     return false;
316   return true;
317 }
318 
isUnpredicatedTerminator(const MachineInstr & MI) const319 bool TargetInstrInfo::isUnpredicatedTerminator(const MachineInstr &MI) const {
320   if (!MI.isTerminator()) return false;
321 
322   // Conditional branch is a special case.
323   if (MI.isBranch() && !MI.isBarrier())
324     return true;
325   if (!MI.isPredicable())
326     return true;
327   return !isPredicated(MI);
328 }
329 
PredicateInstruction(MachineInstr & MI,ArrayRef<MachineOperand> Pred) const330 bool TargetInstrInfo::PredicateInstruction(
331     MachineInstr &MI, ArrayRef<MachineOperand> Pred) const {
332   bool MadeChange = false;
333 
334   assert(!MI.isBundle() &&
335          "TargetInstrInfo::PredicateInstruction() can't handle bundles");
336 
337   const MCInstrDesc &MCID = MI.getDesc();
338   if (!MI.isPredicable())
339     return false;
340 
341   for (unsigned j = 0, i = 0, e = MI.getNumOperands(); i != e; ++i) {
342     if (MCID.operands()[i].isPredicate()) {
343       MachineOperand &MO = MI.getOperand(i);
344       if (MO.isReg()) {
345         MO.setReg(Pred[j].getReg());
346         MadeChange = true;
347       } else if (MO.isImm()) {
348         MO.setImm(Pred[j].getImm());
349         MadeChange = true;
350       } else if (MO.isMBB()) {
351         MO.setMBB(Pred[j].getMBB());
352         MadeChange = true;
353       }
354       ++j;
355     }
356   }
357   return MadeChange;
358 }
359 
hasLoadFromStackSlot(const MachineInstr & MI,SmallVectorImpl<const MachineMemOperand * > & Accesses) const360 bool TargetInstrInfo::hasLoadFromStackSlot(
361     const MachineInstr &MI,
362     SmallVectorImpl<const MachineMemOperand *> &Accesses) const {
363   size_t StartSize = Accesses.size();
364   for (MachineInstr::mmo_iterator o = MI.memoperands_begin(),
365                                   oe = MI.memoperands_end();
366        o != oe; ++o) {
367     if ((*o)->isLoad() &&
368         isa_and_nonnull<FixedStackPseudoSourceValue>((*o)->getPseudoValue()))
369       Accesses.push_back(*o);
370   }
371   return Accesses.size() != StartSize;
372 }
373 
hasStoreToStackSlot(const MachineInstr & MI,SmallVectorImpl<const MachineMemOperand * > & Accesses) const374 bool TargetInstrInfo::hasStoreToStackSlot(
375     const MachineInstr &MI,
376     SmallVectorImpl<const MachineMemOperand *> &Accesses) const {
377   size_t StartSize = Accesses.size();
378   for (MachineInstr::mmo_iterator o = MI.memoperands_begin(),
379                                   oe = MI.memoperands_end();
380        o != oe; ++o) {
381     if ((*o)->isStore() &&
382         isa_and_nonnull<FixedStackPseudoSourceValue>((*o)->getPseudoValue()))
383       Accesses.push_back(*o);
384   }
385   return Accesses.size() != StartSize;
386 }
387 
getStackSlotRange(const TargetRegisterClass * RC,unsigned SubIdx,unsigned & Size,unsigned & Offset,const MachineFunction & MF) const388 bool TargetInstrInfo::getStackSlotRange(const TargetRegisterClass *RC,
389                                         unsigned SubIdx, unsigned &Size,
390                                         unsigned &Offset,
391                                         const MachineFunction &MF) const {
392   const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
393   if (!SubIdx) {
394     Size = TRI->getSpillSize(*RC);
395     Offset = 0;
396     return true;
397   }
398   unsigned BitSize = TRI->getSubRegIdxSize(SubIdx);
399   // Convert bit size to byte size.
400   if (BitSize % 8)
401     return false;
402 
403   int BitOffset = TRI->getSubRegIdxOffset(SubIdx);
404   if (BitOffset < 0 || BitOffset % 8)
405     return false;
406 
407   Size = BitSize / 8;
408   Offset = (unsigned)BitOffset / 8;
409 
410   assert(TRI->getSpillSize(*RC) >= (Offset + Size) && "bad subregister range");
411 
412   if (!MF.getDataLayout().isLittleEndian()) {
413     Offset = TRI->getSpillSize(*RC) - (Offset + Size);
414   }
415   return true;
416 }
417 
reMaterialize(MachineBasicBlock & MBB,MachineBasicBlock::iterator I,Register DestReg,unsigned SubIdx,const MachineInstr & Orig,const TargetRegisterInfo & TRI) const418 void TargetInstrInfo::reMaterialize(MachineBasicBlock &MBB,
419                                     MachineBasicBlock::iterator I,
420                                     Register DestReg, unsigned SubIdx,
421                                     const MachineInstr &Orig,
422                                     const TargetRegisterInfo &TRI) const {
423   MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig);
424   MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI);
425   MBB.insert(I, MI);
426 }
427 
produceSameValue(const MachineInstr & MI0,const MachineInstr & MI1,const MachineRegisterInfo * MRI) const428 bool TargetInstrInfo::produceSameValue(const MachineInstr &MI0,
429                                        const MachineInstr &MI1,
430                                        const MachineRegisterInfo *MRI) const {
431   return MI0.isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs);
432 }
433 
434 MachineInstr &
duplicate(MachineBasicBlock & MBB,MachineBasicBlock::iterator InsertBefore,const MachineInstr & Orig) const435 TargetInstrInfo::duplicate(MachineBasicBlock &MBB,
436                            MachineBasicBlock::iterator InsertBefore,
437                            const MachineInstr &Orig) const {
438   MachineFunction &MF = *MBB.getParent();
439   // CFI instructions are marked as non-duplicable, because Darwin compact
440   // unwind info emission can't handle multiple prologue setups.
441   assert((!Orig.isNotDuplicable() ||
442           (!MF.getTarget().getTargetTriple().isOSDarwin() &&
443            Orig.isCFIInstruction())) &&
444          "Instruction cannot be duplicated");
445 
446   return MF.cloneMachineInstrBundle(MBB, InsertBefore, Orig);
447 }
448 
449 // If the COPY instruction in MI can be folded to a stack operation, return
450 // the register class to use.
canFoldCopy(const MachineInstr & MI,const TargetInstrInfo & TII,unsigned FoldIdx)451 static const TargetRegisterClass *canFoldCopy(const MachineInstr &MI,
452                                               const TargetInstrInfo &TII,
453                                               unsigned FoldIdx) {
454   assert(TII.isCopyInstr(MI) && "MI must be a COPY instruction");
455   if (MI.getNumOperands() != 2)
456     return nullptr;
457   assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand");
458 
459   const MachineOperand &FoldOp = MI.getOperand(FoldIdx);
460   const MachineOperand &LiveOp = MI.getOperand(1 - FoldIdx);
461 
462   if (FoldOp.getSubReg() || LiveOp.getSubReg())
463     return nullptr;
464 
465   Register FoldReg = FoldOp.getReg();
466   Register LiveReg = LiveOp.getReg();
467 
468   assert(FoldReg.isVirtual() && "Cannot fold physregs");
469 
470   const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
471   const TargetRegisterClass *RC = MRI.getRegClass(FoldReg);
472 
473   if (LiveOp.getReg().isPhysical())
474     return RC->contains(LiveOp.getReg()) ? RC : nullptr;
475 
476   if (RC->hasSubClassEq(MRI.getRegClass(LiveReg)))
477     return RC;
478 
479   // FIXME: Allow folding when register classes are memory compatible.
480   return nullptr;
481 }
482 
getNop() const483 MCInst TargetInstrInfo::getNop() const { llvm_unreachable("Not implemented"); }
484 
485 std::pair<unsigned, unsigned>
getPatchpointUnfoldableRange(const MachineInstr & MI) const486 TargetInstrInfo::getPatchpointUnfoldableRange(const MachineInstr &MI) const {
487   switch (MI.getOpcode()) {
488   case TargetOpcode::STACKMAP:
489     // StackMapLiveValues are foldable
490     return std::make_pair(0, StackMapOpers(&MI).getVarIdx());
491   case TargetOpcode::PATCHPOINT:
492     // For PatchPoint, the call args are not foldable (even if reported in the
493     // stackmap e.g. via anyregcc).
494     return std::make_pair(0, PatchPointOpers(&MI).getVarIdx());
495   case TargetOpcode::STATEPOINT:
496     // For statepoints, fold deopt and gc arguments, but not call arguments.
497     return std::make_pair(MI.getNumDefs(), StatepointOpers(&MI).getVarIdx());
498   default:
499     llvm_unreachable("unexpected stackmap opcode");
500   }
501 }
502 
foldPatchpoint(MachineFunction & MF,MachineInstr & MI,ArrayRef<unsigned> Ops,int FrameIndex,const TargetInstrInfo & TII)503 static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr &MI,
504                                     ArrayRef<unsigned> Ops, int FrameIndex,
505                                     const TargetInstrInfo &TII) {
506   unsigned StartIdx = 0;
507   unsigned NumDefs = 0;
508   // getPatchpointUnfoldableRange throws guarantee if MI is not a patchpoint.
509   std::tie(NumDefs, StartIdx) = TII.getPatchpointUnfoldableRange(MI);
510 
511   unsigned DefToFoldIdx = MI.getNumOperands();
512 
513   // Return false if any operands requested for folding are not foldable (not
514   // part of the stackmap's live values).
515   for (unsigned Op : Ops) {
516     if (Op < NumDefs) {
517       assert(DefToFoldIdx == MI.getNumOperands() && "Folding multiple defs");
518       DefToFoldIdx = Op;
519     } else if (Op < StartIdx) {
520       return nullptr;
521     }
522     if (MI.getOperand(Op).isTied())
523       return nullptr;
524   }
525 
526   MachineInstr *NewMI =
527       MF.CreateMachineInstr(TII.get(MI.getOpcode()), MI.getDebugLoc(), true);
528   MachineInstrBuilder MIB(MF, NewMI);
529 
530   // No need to fold return, the meta data, and function arguments
531   for (unsigned i = 0; i < StartIdx; ++i)
532     if (i != DefToFoldIdx)
533       MIB.add(MI.getOperand(i));
534 
535   for (unsigned i = StartIdx, e = MI.getNumOperands(); i < e; ++i) {
536     MachineOperand &MO = MI.getOperand(i);
537     unsigned TiedTo = e;
538     (void)MI.isRegTiedToDefOperand(i, &TiedTo);
539 
540     if (is_contained(Ops, i)) {
541       assert(TiedTo == e && "Cannot fold tied operands");
542       unsigned SpillSize;
543       unsigned SpillOffset;
544       // Compute the spill slot size and offset.
545       const TargetRegisterClass *RC =
546         MF.getRegInfo().getRegClass(MO.getReg());
547       bool Valid =
548           TII.getStackSlotRange(RC, MO.getSubReg(), SpillSize, SpillOffset, MF);
549       if (!Valid)
550         report_fatal_error("cannot spill patchpoint subregister operand");
551       MIB.addImm(StackMaps::IndirectMemRefOp);
552       MIB.addImm(SpillSize);
553       MIB.addFrameIndex(FrameIndex);
554       MIB.addImm(SpillOffset);
555     } else {
556       MIB.add(MO);
557       if (TiedTo < e) {
558         assert(TiedTo < NumDefs && "Bad tied operand");
559         if (TiedTo > DefToFoldIdx)
560           --TiedTo;
561         NewMI->tieOperands(TiedTo, NewMI->getNumOperands() - 1);
562       }
563     }
564   }
565   return NewMI;
566 }
567 
foldInlineAsmMemOperand(MachineInstr * MI,unsigned OpNo,int FI,const TargetInstrInfo & TII)568 static void foldInlineAsmMemOperand(MachineInstr *MI, unsigned OpNo, int FI,
569                                     const TargetInstrInfo &TII) {
570   // If the machine operand is tied, untie it first.
571   if (MI->getOperand(OpNo).isTied()) {
572     unsigned TiedTo = MI->findTiedOperandIdx(OpNo);
573     MI->untieRegOperand(OpNo);
574     // Intentional recursion!
575     foldInlineAsmMemOperand(MI, TiedTo, FI, TII);
576   }
577 
578   SmallVector<MachineOperand, 5> NewOps;
579   TII.getFrameIndexOperands(NewOps, FI);
580   assert(!NewOps.empty() && "getFrameIndexOperands didn't create any operands");
581   MI->removeOperand(OpNo);
582   MI->insert(MI->operands_begin() + OpNo, NewOps);
583 
584   // Change the previous operand to a MemKind InlineAsm::Flag. The second param
585   // is the per-target number of operands that represent the memory operand
586   // excluding this one (MD). This includes MO.
587   InlineAsm::Flag F(InlineAsm::Kind::Mem, NewOps.size());
588   F.setMemConstraint(InlineAsm::ConstraintCode::m);
589   MachineOperand &MD = MI->getOperand(OpNo - 1);
590   MD.setImm(F);
591 }
592 
593 // Returns nullptr if not possible to fold.
foldInlineAsmMemOperand(MachineInstr & MI,ArrayRef<unsigned> Ops,int FI,const TargetInstrInfo & TII)594 static MachineInstr *foldInlineAsmMemOperand(MachineInstr &MI,
595                                              ArrayRef<unsigned> Ops, int FI,
596                                              const TargetInstrInfo &TII) {
597   assert(MI.isInlineAsm() && "wrong opcode");
598   if (Ops.size() > 1)
599     return nullptr;
600   unsigned Op = Ops[0];
601   assert(Op && "should never be first operand");
602   assert(MI.getOperand(Op).isReg() && "shouldn't be folding non-reg operands");
603 
604   if (!MI.mayFoldInlineAsmRegOp(Op))
605     return nullptr;
606 
607   MachineInstr &NewMI = TII.duplicate(*MI.getParent(), MI.getIterator(), MI);
608 
609   foldInlineAsmMemOperand(&NewMI, Op, FI, TII);
610 
611   // Update mayload/maystore metadata, and memoperands.
612   const VirtRegInfo &RI =
613       AnalyzeVirtRegInBundle(MI, MI.getOperand(Op).getReg());
614   MachineOperand &ExtraMO = NewMI.getOperand(InlineAsm::MIOp_ExtraInfo);
615   MachineMemOperand::Flags Flags = MachineMemOperand::MONone;
616   if (RI.Reads) {
617     ExtraMO.setImm(ExtraMO.getImm() | InlineAsm::Extra_MayLoad);
618     Flags |= MachineMemOperand::MOLoad;
619   }
620   if (RI.Writes) {
621     ExtraMO.setImm(ExtraMO.getImm() | InlineAsm::Extra_MayStore);
622     Flags |= MachineMemOperand::MOStore;
623   }
624   MachineFunction *MF = NewMI.getMF();
625   const MachineFrameInfo &MFI = MF->getFrameInfo();
626   MachineMemOperand *MMO = MF->getMachineMemOperand(
627       MachinePointerInfo::getFixedStack(*MF, FI), Flags, MFI.getObjectSize(FI),
628       MFI.getObjectAlign(FI));
629   NewMI.addMemOperand(*MF, MMO);
630 
631   return &NewMI;
632 }
633 
foldMemoryOperand(MachineInstr & MI,ArrayRef<unsigned> Ops,int FI,LiveIntervals * LIS,VirtRegMap * VRM) const634 MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI,
635                                                  ArrayRef<unsigned> Ops, int FI,
636                                                  LiveIntervals *LIS,
637                                                  VirtRegMap *VRM) const {
638   auto Flags = MachineMemOperand::MONone;
639   for (unsigned OpIdx : Ops)
640     Flags |= MI.getOperand(OpIdx).isDef() ? MachineMemOperand::MOStore
641                                           : MachineMemOperand::MOLoad;
642 
643   MachineBasicBlock *MBB = MI.getParent();
644   assert(MBB && "foldMemoryOperand needs an inserted instruction");
645   MachineFunction &MF = *MBB->getParent();
646 
647   // If we're not folding a load into a subreg, the size of the load is the
648   // size of the spill slot. But if we are, we need to figure out what the
649   // actual load size is.
650   int64_t MemSize = 0;
651   const MachineFrameInfo &MFI = MF.getFrameInfo();
652   const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
653 
654   if (Flags & MachineMemOperand::MOStore) {
655     MemSize = MFI.getObjectSize(FI);
656   } else {
657     for (unsigned OpIdx : Ops) {
658       int64_t OpSize = MFI.getObjectSize(FI);
659 
660       if (auto SubReg = MI.getOperand(OpIdx).getSubReg()) {
661         unsigned SubRegSize = TRI->getSubRegIdxSize(SubReg);
662         if (SubRegSize > 0 && !(SubRegSize % 8))
663           OpSize = SubRegSize / 8;
664       }
665 
666       MemSize = std::max(MemSize, OpSize);
667     }
668   }
669 
670   assert(MemSize && "Did not expect a zero-sized stack slot");
671 
672   MachineInstr *NewMI = nullptr;
673 
674   if (MI.getOpcode() == TargetOpcode::STACKMAP ||
675       MI.getOpcode() == TargetOpcode::PATCHPOINT ||
676       MI.getOpcode() == TargetOpcode::STATEPOINT) {
677     // Fold stackmap/patchpoint.
678     NewMI = foldPatchpoint(MF, MI, Ops, FI, *this);
679     if (NewMI)
680       MBB->insert(MI, NewMI);
681   } else if (MI.isInlineAsm()) {
682     return foldInlineAsmMemOperand(MI, Ops, FI, *this);
683   } else {
684     // Ask the target to do the actual folding.
685     NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, FI, LIS, VRM);
686   }
687 
688   if (NewMI) {
689     NewMI->setMemRefs(MF, MI.memoperands());
690     // Add a memory operand, foldMemoryOperandImpl doesn't do that.
691     assert((!(Flags & MachineMemOperand::MOStore) ||
692             NewMI->mayStore()) &&
693            "Folded a def to a non-store!");
694     assert((!(Flags & MachineMemOperand::MOLoad) ||
695             NewMI->mayLoad()) &&
696            "Folded a use to a non-load!");
697     assert(MFI.getObjectOffset(FI) != -1);
698     MachineMemOperand *MMO =
699         MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, FI),
700                                 Flags, MemSize, MFI.getObjectAlign(FI));
701     NewMI->addMemOperand(MF, MMO);
702 
703     // The pass "x86 speculative load hardening" always attaches symbols to
704     // call instructions. We need copy it form old instruction.
705     NewMI->cloneInstrSymbols(MF, MI);
706 
707     return NewMI;
708   }
709 
710   // Straight COPY may fold as load/store.
711   if (!isCopyInstr(MI) || Ops.size() != 1)
712     return nullptr;
713 
714   const TargetRegisterClass *RC = canFoldCopy(MI, *this, Ops[0]);
715   if (!RC)
716     return nullptr;
717 
718   const MachineOperand &MO = MI.getOperand(1 - Ops[0]);
719   MachineBasicBlock::iterator Pos = MI;
720 
721   if (Flags == MachineMemOperand::MOStore)
722     storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI,
723                         Register());
724   else
725     loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, TRI, Register());
726   return &*--Pos;
727 }
728 
foldMemoryOperand(MachineInstr & MI,ArrayRef<unsigned> Ops,MachineInstr & LoadMI,LiveIntervals * LIS) const729 MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI,
730                                                  ArrayRef<unsigned> Ops,
731                                                  MachineInstr &LoadMI,
732                                                  LiveIntervals *LIS) const {
733   assert(LoadMI.canFoldAsLoad() && "LoadMI isn't foldable!");
734 #ifndef NDEBUG
735   for (unsigned OpIdx : Ops)
736     assert(MI.getOperand(OpIdx).isUse() && "Folding load into def!");
737 #endif
738 
739   MachineBasicBlock &MBB = *MI.getParent();
740   MachineFunction &MF = *MBB.getParent();
741 
742   // Ask the target to do the actual folding.
743   MachineInstr *NewMI = nullptr;
744   int FrameIndex = 0;
745 
746   if ((MI.getOpcode() == TargetOpcode::STACKMAP ||
747        MI.getOpcode() == TargetOpcode::PATCHPOINT ||
748        MI.getOpcode() == TargetOpcode::STATEPOINT) &&
749       isLoadFromStackSlot(LoadMI, FrameIndex)) {
750     // Fold stackmap/patchpoint.
751     NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this);
752     if (NewMI)
753       NewMI = &*MBB.insert(MI, NewMI);
754   } else if (MI.isInlineAsm() && isLoadFromStackSlot(LoadMI, FrameIndex)) {
755     return foldInlineAsmMemOperand(MI, Ops, FrameIndex, *this);
756   } else {
757     // Ask the target to do the actual folding.
758     NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI, LIS);
759   }
760 
761   if (!NewMI)
762     return nullptr;
763 
764   // Copy the memoperands from the load to the folded instruction.
765   if (MI.memoperands_empty()) {
766     NewMI->setMemRefs(MF, LoadMI.memoperands());
767   } else {
768     // Handle the rare case of folding multiple loads.
769     NewMI->setMemRefs(MF, MI.memoperands());
770     for (MachineInstr::mmo_iterator I = LoadMI.memoperands_begin(),
771                                     E = LoadMI.memoperands_end();
772          I != E; ++I) {
773       NewMI->addMemOperand(MF, *I);
774     }
775   }
776   return NewMI;
777 }
778 
779 /// transferImplicitOperands - MI is a pseudo-instruction, and the lowered
780 /// replacement instructions immediately precede it.  Copy any implicit
781 /// operands from MI to the replacement instruction.
transferImplicitOperands(MachineInstr * MI,const TargetRegisterInfo * TRI)782 static void transferImplicitOperands(MachineInstr *MI,
783                                      const TargetRegisterInfo *TRI) {
784   MachineBasicBlock::iterator CopyMI = MI;
785   --CopyMI;
786 
787   Register DstReg = MI->getOperand(0).getReg();
788   for (const MachineOperand &MO : MI->implicit_operands()) {
789     CopyMI->addOperand(MO);
790 
791     // Be conservative about preserving kills when subregister defs are
792     // involved. If there was implicit kill of a super-register overlapping the
793     // copy result, we would kill the subregisters previous copies defined.
794 
795     if (MO.isKill() && TRI->regsOverlap(DstReg, MO.getReg()))
796       CopyMI->getOperand(CopyMI->getNumOperands() - 1).setIsKill(false);
797   }
798 }
799 
lowerCopy(MachineInstr * MI,const TargetRegisterInfo * TRI) const800 void TargetInstrInfo::lowerCopy(MachineInstr *MI,
801                                 const TargetRegisterInfo *TRI) const {
802   if (MI->allDefsAreDead()) {
803     MI->setDesc(get(TargetOpcode::KILL));
804     return;
805   }
806 
807   MachineOperand &DstMO = MI->getOperand(0);
808   MachineOperand &SrcMO = MI->getOperand(1);
809 
810   bool IdentityCopy = (SrcMO.getReg() == DstMO.getReg());
811   if (IdentityCopy || SrcMO.isUndef()) {
812     // No need to insert an identity copy instruction, but replace with a KILL
813     // if liveness is changed.
814     if (SrcMO.isUndef() || MI->getNumOperands() > 2) {
815       // We must make sure the super-register gets killed. Replace the
816       // instruction with KILL.
817       MI->setDesc(get(TargetOpcode::KILL));
818       return;
819     }
820     // Vanilla identity copy.
821     MI->eraseFromParent();
822     return;
823   }
824 
825   copyPhysReg(*MI->getParent(), MI, MI->getDebugLoc(), DstMO.getReg(),
826               SrcMO.getReg(), SrcMO.isKill());
827 
828   if (MI->getNumOperands() > 2)
829     transferImplicitOperands(MI, TRI);
830   MI->eraseFromParent();
831 }
832 
hasReassociableOperands(const MachineInstr & Inst,const MachineBasicBlock * MBB) const833 bool TargetInstrInfo::hasReassociableOperands(
834     const MachineInstr &Inst, const MachineBasicBlock *MBB) const {
835   const MachineOperand &Op1 = Inst.getOperand(1);
836   const MachineOperand &Op2 = Inst.getOperand(2);
837   const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
838 
839   // We need virtual register definitions for the operands that we will
840   // reassociate.
841   MachineInstr *MI1 = nullptr;
842   MachineInstr *MI2 = nullptr;
843   if (Op1.isReg() && Op1.getReg().isVirtual())
844     MI1 = MRI.getUniqueVRegDef(Op1.getReg());
845   if (Op2.isReg() && Op2.getReg().isVirtual())
846     MI2 = MRI.getUniqueVRegDef(Op2.getReg());
847 
848   // And at least one operand must be defined in MBB.
849   return MI1 && MI2 && (MI1->getParent() == MBB || MI2->getParent() == MBB);
850 }
851 
areOpcodesEqualOrInverse(unsigned Opcode1,unsigned Opcode2) const852 bool TargetInstrInfo::areOpcodesEqualOrInverse(unsigned Opcode1,
853                                                unsigned Opcode2) const {
854   return Opcode1 == Opcode2 || getInverseOpcode(Opcode1) == Opcode2;
855 }
856 
hasReassociableSibling(const MachineInstr & Inst,bool & Commuted) const857 bool TargetInstrInfo::hasReassociableSibling(const MachineInstr &Inst,
858                                              bool &Commuted) const {
859   const MachineBasicBlock *MBB = Inst.getParent();
860   const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
861   MachineInstr *MI1 = MRI.getUniqueVRegDef(Inst.getOperand(1).getReg());
862   MachineInstr *MI2 = MRI.getUniqueVRegDef(Inst.getOperand(2).getReg());
863   unsigned Opcode = Inst.getOpcode();
864 
865   // If only one operand has the same or inverse opcode and it's the second
866   // source operand, the operands must be commuted.
867   Commuted = !areOpcodesEqualOrInverse(Opcode, MI1->getOpcode()) &&
868              areOpcodesEqualOrInverse(Opcode, MI2->getOpcode());
869   if (Commuted)
870     std::swap(MI1, MI2);
871 
872   // 1. The previous instruction must be the same type as Inst.
873   // 2. The previous instruction must also be associative/commutative or be the
874   //    inverse of such an operation (this can be different even for
875   //    instructions with the same opcode if traits like fast-math-flags are
876   //    included).
877   // 3. The previous instruction must have virtual register definitions for its
878   //    operands in the same basic block as Inst.
879   // 4. The previous instruction's result must only be used by Inst.
880   return areOpcodesEqualOrInverse(Opcode, MI1->getOpcode()) &&
881          (isAssociativeAndCommutative(*MI1) ||
882           isAssociativeAndCommutative(*MI1, /* Invert */ true)) &&
883          hasReassociableOperands(*MI1, MBB) &&
884          MRI.hasOneNonDBGUse(MI1->getOperand(0).getReg());
885 }
886 
887 // 1. The operation must be associative and commutative or be the inverse of
888 //    such an operation.
889 // 2. The instruction must have virtual register definitions for its
890 //    operands in the same basic block.
891 // 3. The instruction must have a reassociable sibling.
isReassociationCandidate(const MachineInstr & Inst,bool & Commuted) const892 bool TargetInstrInfo::isReassociationCandidate(const MachineInstr &Inst,
893                                                bool &Commuted) const {
894   return (isAssociativeAndCommutative(Inst) ||
895           isAssociativeAndCommutative(Inst, /* Invert */ true)) &&
896          hasReassociableOperands(Inst, Inst.getParent()) &&
897          hasReassociableSibling(Inst, Commuted);
898 }
899 
900 // The concept of the reassociation pass is that these operations can benefit
901 // from this kind of transformation:
902 //
903 // A = ? op ?
904 // B = A op X (Prev)
905 // C = B op Y (Root)
906 // -->
907 // A = ? op ?
908 // B = X op Y
909 // C = A op B
910 //
911 // breaking the dependency between A and B, allowing them to be executed in
912 // parallel (or back-to-back in a pipeline) instead of depending on each other.
913 
914 // FIXME: This has the potential to be expensive (compile time) while not
915 // improving the code at all. Some ways to limit the overhead:
916 // 1. Track successful transforms; bail out if hit rate gets too low.
917 // 2. Only enable at -O3 or some other non-default optimization level.
918 // 3. Pre-screen pattern candidates here: if an operand of the previous
919 //    instruction is known to not increase the critical path, then don't match
920 //    that pattern.
getMachineCombinerPatterns(MachineInstr & Root,SmallVectorImpl<unsigned> & Patterns,bool DoRegPressureReduce) const921 bool TargetInstrInfo::getMachineCombinerPatterns(
922     MachineInstr &Root, SmallVectorImpl<unsigned> &Patterns,
923     bool DoRegPressureReduce) const {
924   bool Commute;
925   if (isReassociationCandidate(Root, Commute)) {
926     // We found a sequence of instructions that may be suitable for a
927     // reassociation of operands to increase ILP. Specify each commutation
928     // possibility for the Prev instruction in the sequence and let the
929     // machine combiner decide if changing the operands is worthwhile.
930     if (Commute) {
931       Patterns.push_back(MachineCombinerPattern::REASSOC_AX_YB);
932       Patterns.push_back(MachineCombinerPattern::REASSOC_XA_YB);
933     } else {
934       Patterns.push_back(MachineCombinerPattern::REASSOC_AX_BY);
935       Patterns.push_back(MachineCombinerPattern::REASSOC_XA_BY);
936     }
937     return true;
938   }
939 
940   return false;
941 }
942 
943 /// Return true when a code sequence can improve loop throughput.
isThroughputPattern(unsigned Pattern) const944 bool TargetInstrInfo::isThroughputPattern(unsigned Pattern) const {
945   return false;
946 }
947 
948 CombinerObjective
getCombinerObjective(unsigned Pattern) const949 TargetInstrInfo::getCombinerObjective(unsigned Pattern) const {
950   return CombinerObjective::Default;
951 }
952 
953 std::pair<unsigned, unsigned>
getReassociationOpcodes(unsigned Pattern,const MachineInstr & Root,const MachineInstr & Prev) const954 TargetInstrInfo::getReassociationOpcodes(unsigned Pattern,
955                                          const MachineInstr &Root,
956                                          const MachineInstr &Prev) const {
957   bool AssocCommutRoot = isAssociativeAndCommutative(Root);
958   bool AssocCommutPrev = isAssociativeAndCommutative(Prev);
959 
960   // Early exit if both opcodes are associative and commutative. It's a trivial
961   // reassociation when we only change operands order. In this case opcodes are
962   // not required to have inverse versions.
963   if (AssocCommutRoot && AssocCommutPrev) {
964     assert(Root.getOpcode() == Prev.getOpcode() && "Expected to be equal");
965     return std::make_pair(Root.getOpcode(), Root.getOpcode());
966   }
967 
968   // At least one instruction is not associative or commutative.
969   // Since we have matched one of the reassociation patterns, we expect that the
970   // instructions' opcodes are equal or one of them is the inversion of the
971   // other.
972   assert(areOpcodesEqualOrInverse(Root.getOpcode(), Prev.getOpcode()) &&
973          "Incorrectly matched pattern");
974   unsigned AssocCommutOpcode = Root.getOpcode();
975   unsigned InverseOpcode = *getInverseOpcode(Root.getOpcode());
976   if (!AssocCommutRoot)
977     std::swap(AssocCommutOpcode, InverseOpcode);
978 
979   // The transformation rule (`+` is any associative and commutative binary
980   // operation, `-` is the inverse):
981   // REASSOC_AX_BY:
982   //   (A + X) + Y => A + (X + Y)
983   //   (A + X) - Y => A + (X - Y)
984   //   (A - X) + Y => A - (X - Y)
985   //   (A - X) - Y => A - (X + Y)
986   // REASSOC_XA_BY:
987   //   (X + A) + Y => (X + Y) + A
988   //   (X + A) - Y => (X - Y) + A
989   //   (X - A) + Y => (X + Y) - A
990   //   (X - A) - Y => (X - Y) - A
991   // REASSOC_AX_YB:
992   //   Y + (A + X) => (Y + X) + A
993   //   Y - (A + X) => (Y - X) - A
994   //   Y + (A - X) => (Y - X) + A
995   //   Y - (A - X) => (Y + X) - A
996   // REASSOC_XA_YB:
997   //   Y + (X + A) => (Y + X) + A
998   //   Y - (X + A) => (Y - X) - A
999   //   Y + (X - A) => (Y + X) - A
1000   //   Y - (X - A) => (Y - X) + A
1001   switch (Pattern) {
1002   default:
1003     llvm_unreachable("Unexpected pattern");
1004   case MachineCombinerPattern::REASSOC_AX_BY:
1005     if (!AssocCommutRoot && AssocCommutPrev)
1006       return {AssocCommutOpcode, InverseOpcode};
1007     if (AssocCommutRoot && !AssocCommutPrev)
1008       return {InverseOpcode, InverseOpcode};
1009     if (!AssocCommutRoot && !AssocCommutPrev)
1010       return {InverseOpcode, AssocCommutOpcode};
1011     break;
1012   case MachineCombinerPattern::REASSOC_XA_BY:
1013     if (!AssocCommutRoot && AssocCommutPrev)
1014       return {AssocCommutOpcode, InverseOpcode};
1015     if (AssocCommutRoot && !AssocCommutPrev)
1016       return {InverseOpcode, AssocCommutOpcode};
1017     if (!AssocCommutRoot && !AssocCommutPrev)
1018       return {InverseOpcode, InverseOpcode};
1019     break;
1020   case MachineCombinerPattern::REASSOC_AX_YB:
1021     if (!AssocCommutRoot && AssocCommutPrev)
1022       return {InverseOpcode, InverseOpcode};
1023     if (AssocCommutRoot && !AssocCommutPrev)
1024       return {AssocCommutOpcode, InverseOpcode};
1025     if (!AssocCommutRoot && !AssocCommutPrev)
1026       return {InverseOpcode, AssocCommutOpcode};
1027     break;
1028   case MachineCombinerPattern::REASSOC_XA_YB:
1029     if (!AssocCommutRoot && AssocCommutPrev)
1030       return {InverseOpcode, InverseOpcode};
1031     if (AssocCommutRoot && !AssocCommutPrev)
1032       return {InverseOpcode, AssocCommutOpcode};
1033     if (!AssocCommutRoot && !AssocCommutPrev)
1034       return {AssocCommutOpcode, InverseOpcode};
1035     break;
1036   }
1037   llvm_unreachable("Unhandled combination");
1038 }
1039 
1040 // Return a pair of boolean flags showing if the new root and new prev operands
1041 // must be swapped. See visual example of the rule in
1042 // TargetInstrInfo::getReassociationOpcodes.
mustSwapOperands(unsigned Pattern)1043 static std::pair<bool, bool> mustSwapOperands(unsigned Pattern) {
1044   switch (Pattern) {
1045   default:
1046     llvm_unreachable("Unexpected pattern");
1047   case MachineCombinerPattern::REASSOC_AX_BY:
1048     return {false, false};
1049   case MachineCombinerPattern::REASSOC_XA_BY:
1050     return {true, false};
1051   case MachineCombinerPattern::REASSOC_AX_YB:
1052     return {true, true};
1053   case MachineCombinerPattern::REASSOC_XA_YB:
1054     return {true, true};
1055   }
1056 }
1057 
getReassociateOperandIndices(const MachineInstr & Root,unsigned Pattern,std::array<unsigned,5> & OperandIndices) const1058 void TargetInstrInfo::getReassociateOperandIndices(
1059     const MachineInstr &Root, unsigned Pattern,
1060     std::array<unsigned, 5> &OperandIndices) const {
1061   switch (Pattern) {
1062   case MachineCombinerPattern::REASSOC_AX_BY:
1063     OperandIndices = {1, 1, 1, 2, 2};
1064     break;
1065   case MachineCombinerPattern::REASSOC_AX_YB:
1066     OperandIndices = {2, 1, 2, 2, 1};
1067     break;
1068   case MachineCombinerPattern::REASSOC_XA_BY:
1069     OperandIndices = {1, 2, 1, 1, 2};
1070     break;
1071   case MachineCombinerPattern::REASSOC_XA_YB:
1072     OperandIndices = {2, 2, 2, 1, 1};
1073     break;
1074   default:
1075     llvm_unreachable("unexpected MachineCombinerPattern");
1076   }
1077 }
1078 
1079 /// Attempt the reassociation transformation to reduce critical path length.
1080 /// See the above comments before getMachineCombinerPatterns().
reassociateOps(MachineInstr & Root,MachineInstr & Prev,unsigned Pattern,SmallVectorImpl<MachineInstr * > & InsInstrs,SmallVectorImpl<MachineInstr * > & DelInstrs,ArrayRef<unsigned> OperandIndices,DenseMap<unsigned,unsigned> & InstrIdxForVirtReg) const1081 void TargetInstrInfo::reassociateOps(
1082     MachineInstr &Root, MachineInstr &Prev, unsigned Pattern,
1083     SmallVectorImpl<MachineInstr *> &InsInstrs,
1084     SmallVectorImpl<MachineInstr *> &DelInstrs,
1085     ArrayRef<unsigned> OperandIndices,
1086     DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const {
1087   MachineFunction *MF = Root.getMF();
1088   MachineRegisterInfo &MRI = MF->getRegInfo();
1089   const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
1090   const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
1091   const TargetRegisterClass *RC = Root.getRegClassConstraint(0, TII, TRI);
1092 
1093   MachineOperand &OpA = Prev.getOperand(OperandIndices[1]);
1094   MachineOperand &OpB = Root.getOperand(OperandIndices[2]);
1095   MachineOperand &OpX = Prev.getOperand(OperandIndices[3]);
1096   MachineOperand &OpY = Root.getOperand(OperandIndices[4]);
1097   MachineOperand &OpC = Root.getOperand(0);
1098 
1099   Register RegA = OpA.getReg();
1100   Register RegB = OpB.getReg();
1101   Register RegX = OpX.getReg();
1102   Register RegY = OpY.getReg();
1103   Register RegC = OpC.getReg();
1104 
1105   if (RegA.isVirtual())
1106     MRI.constrainRegClass(RegA, RC);
1107   if (RegB.isVirtual())
1108     MRI.constrainRegClass(RegB, RC);
1109   if (RegX.isVirtual())
1110     MRI.constrainRegClass(RegX, RC);
1111   if (RegY.isVirtual())
1112     MRI.constrainRegClass(RegY, RC);
1113   if (RegC.isVirtual())
1114     MRI.constrainRegClass(RegC, RC);
1115 
1116   // Create a new virtual register for the result of (X op Y) instead of
1117   // recycling RegB because the MachineCombiner's computation of the critical
1118   // path requires a new register definition rather than an existing one.
1119   Register NewVR = MRI.createVirtualRegister(RC);
1120   InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
1121 
1122   auto [NewRootOpc, NewPrevOpc] = getReassociationOpcodes(Pattern, Root, Prev);
1123   bool KillA = OpA.isKill();
1124   bool KillX = OpX.isKill();
1125   bool KillY = OpY.isKill();
1126   bool KillNewVR = true;
1127 
1128   auto [SwapRootOperands, SwapPrevOperands] = mustSwapOperands(Pattern);
1129 
1130   if (SwapPrevOperands) {
1131     std::swap(RegX, RegY);
1132     std::swap(KillX, KillY);
1133   }
1134 
1135   unsigned PrevFirstOpIdx, PrevSecondOpIdx;
1136   unsigned RootFirstOpIdx, RootSecondOpIdx;
1137   switch (Pattern) {
1138   case MachineCombinerPattern::REASSOC_AX_BY:
1139     PrevFirstOpIdx = OperandIndices[1];
1140     PrevSecondOpIdx = OperandIndices[3];
1141     RootFirstOpIdx = OperandIndices[2];
1142     RootSecondOpIdx = OperandIndices[4];
1143     break;
1144   case MachineCombinerPattern::REASSOC_AX_YB:
1145     PrevFirstOpIdx = OperandIndices[1];
1146     PrevSecondOpIdx = OperandIndices[3];
1147     RootFirstOpIdx = OperandIndices[4];
1148     RootSecondOpIdx = OperandIndices[2];
1149     break;
1150   case MachineCombinerPattern::REASSOC_XA_BY:
1151     PrevFirstOpIdx = OperandIndices[3];
1152     PrevSecondOpIdx = OperandIndices[1];
1153     RootFirstOpIdx = OperandIndices[2];
1154     RootSecondOpIdx = OperandIndices[4];
1155     break;
1156   case MachineCombinerPattern::REASSOC_XA_YB:
1157     PrevFirstOpIdx = OperandIndices[3];
1158     PrevSecondOpIdx = OperandIndices[1];
1159     RootFirstOpIdx = OperandIndices[4];
1160     RootSecondOpIdx = OperandIndices[2];
1161     break;
1162   default:
1163     llvm_unreachable("unexpected MachineCombinerPattern");
1164   }
1165 
1166   // Basically BuildMI but doesn't add implicit operands by default.
1167   auto buildMINoImplicit = [](MachineFunction &MF, const MIMetadata &MIMD,
1168                               const MCInstrDesc &MCID, Register DestReg) {
1169     return MachineInstrBuilder(
1170                MF, MF.CreateMachineInstr(MCID, MIMD.getDL(), /*NoImpl=*/true))
1171         .setPCSections(MIMD.getPCSections())
1172         .addReg(DestReg, RegState::Define);
1173   };
1174 
1175   // Create new instructions for insertion.
1176   MachineInstrBuilder MIB1 =
1177       buildMINoImplicit(*MF, MIMetadata(Prev), TII->get(NewPrevOpc), NewVR);
1178   for (const auto &MO : Prev.explicit_operands()) {
1179     unsigned Idx = MO.getOperandNo();
1180     // Skip the result operand we'd already added.
1181     if (Idx == 0)
1182       continue;
1183     if (Idx == PrevFirstOpIdx)
1184       MIB1.addReg(RegX, getKillRegState(KillX));
1185     else if (Idx == PrevSecondOpIdx)
1186       MIB1.addReg(RegY, getKillRegState(KillY));
1187     else
1188       MIB1.add(MO);
1189   }
1190   MIB1.copyImplicitOps(Prev);
1191 
1192   if (SwapRootOperands) {
1193     std::swap(RegA, NewVR);
1194     std::swap(KillA, KillNewVR);
1195   }
1196 
1197   MachineInstrBuilder MIB2 =
1198       buildMINoImplicit(*MF, MIMetadata(Root), TII->get(NewRootOpc), RegC);
1199   for (const auto &MO : Root.explicit_operands()) {
1200     unsigned Idx = MO.getOperandNo();
1201     // Skip the result operand.
1202     if (Idx == 0)
1203       continue;
1204     if (Idx == RootFirstOpIdx)
1205       MIB2 = MIB2.addReg(RegA, getKillRegState(KillA));
1206     else if (Idx == RootSecondOpIdx)
1207       MIB2 = MIB2.addReg(NewVR, getKillRegState(KillNewVR));
1208     else
1209       MIB2 = MIB2.add(MO);
1210   }
1211   MIB2.copyImplicitOps(Root);
1212 
1213   // Propagate FP flags from the original instructions.
1214   // But clear poison-generating flags because those may not be valid now.
1215   // TODO: There should be a helper function for copying only fast-math-flags.
1216   uint32_t IntersectedFlags = Root.getFlags() & Prev.getFlags();
1217   MIB1->setFlags(IntersectedFlags);
1218   MIB1->clearFlag(MachineInstr::MIFlag::NoSWrap);
1219   MIB1->clearFlag(MachineInstr::MIFlag::NoUWrap);
1220   MIB1->clearFlag(MachineInstr::MIFlag::IsExact);
1221 
1222   MIB2->setFlags(IntersectedFlags);
1223   MIB2->clearFlag(MachineInstr::MIFlag::NoSWrap);
1224   MIB2->clearFlag(MachineInstr::MIFlag::NoUWrap);
1225   MIB2->clearFlag(MachineInstr::MIFlag::IsExact);
1226 
1227   setSpecialOperandAttr(Root, Prev, *MIB1, *MIB2);
1228 
1229   // Record new instructions for insertion and old instructions for deletion.
1230   InsInstrs.push_back(MIB1);
1231   InsInstrs.push_back(MIB2);
1232   DelInstrs.push_back(&Prev);
1233   DelInstrs.push_back(&Root);
1234 
1235   // We transformed:
1236   // B = A op X (Prev)
1237   // C = B op Y (Root)
1238   // Into:
1239   // B = X op Y (MIB1)
1240   // C = A op B (MIB2)
1241   // C has the same value as before, B doesn't; as such, keep the debug number
1242   // of C but not of B.
1243   if (unsigned OldRootNum = Root.peekDebugInstrNum())
1244     MIB2.getInstr()->setDebugInstrNum(OldRootNum);
1245 }
1246 
genAlternativeCodeSequence(MachineInstr & Root,unsigned Pattern,SmallVectorImpl<MachineInstr * > & InsInstrs,SmallVectorImpl<MachineInstr * > & DelInstrs,DenseMap<unsigned,unsigned> & InstIdxForVirtReg) const1247 void TargetInstrInfo::genAlternativeCodeSequence(
1248     MachineInstr &Root, unsigned Pattern,
1249     SmallVectorImpl<MachineInstr *> &InsInstrs,
1250     SmallVectorImpl<MachineInstr *> &DelInstrs,
1251     DenseMap<unsigned, unsigned> &InstIdxForVirtReg) const {
1252   MachineRegisterInfo &MRI = Root.getMF()->getRegInfo();
1253 
1254   // Select the previous instruction in the sequence based on the input pattern.
1255   std::array<unsigned, 5> OperandIndices;
1256   getReassociateOperandIndices(Root, Pattern, OperandIndices);
1257   MachineInstr *Prev =
1258       MRI.getUniqueVRegDef(Root.getOperand(OperandIndices[0]).getReg());
1259 
1260   // Don't reassociate if Prev and Root are in different blocks.
1261   if (Prev->getParent() != Root.getParent())
1262     return;
1263 
1264   reassociateOps(Root, *Prev, Pattern, InsInstrs, DelInstrs, OperandIndices,
1265                  InstIdxForVirtReg);
1266 }
1267 
getMachineCombinerTraceStrategy() const1268 MachineTraceStrategy TargetInstrInfo::getMachineCombinerTraceStrategy() const {
1269   return MachineTraceStrategy::TS_MinInstrCount;
1270 }
1271 
isReallyTriviallyReMaterializable(const MachineInstr & MI) const1272 bool TargetInstrInfo::isReallyTriviallyReMaterializable(
1273     const MachineInstr &MI) const {
1274   const MachineFunction &MF = *MI.getMF();
1275   const MachineRegisterInfo &MRI = MF.getRegInfo();
1276 
1277   // Remat clients assume operand 0 is the defined register.
1278   if (!MI.getNumOperands() || !MI.getOperand(0).isReg())
1279     return false;
1280   Register DefReg = MI.getOperand(0).getReg();
1281 
1282   // A sub-register definition can only be rematerialized if the instruction
1283   // doesn't read the other parts of the register.  Otherwise it is really a
1284   // read-modify-write operation on the full virtual register which cannot be
1285   // moved safely.
1286   if (DefReg.isVirtual() && MI.getOperand(0).getSubReg() &&
1287       MI.readsVirtualRegister(DefReg))
1288     return false;
1289 
1290   // A load from a fixed stack slot can be rematerialized. This may be
1291   // redundant with subsequent checks, but it's target-independent,
1292   // simple, and a common case.
1293   int FrameIdx = 0;
1294   if (isLoadFromStackSlot(MI, FrameIdx) &&
1295       MF.getFrameInfo().isImmutableObjectIndex(FrameIdx))
1296     return true;
1297 
1298   // Avoid instructions obviously unsafe for remat.
1299   if (MI.isNotDuplicable() || MI.mayStore() || MI.mayRaiseFPException() ||
1300       MI.hasUnmodeledSideEffects())
1301     return false;
1302 
1303   // Don't remat inline asm. We have no idea how expensive it is
1304   // even if it's side effect free.
1305   if (MI.isInlineAsm())
1306     return false;
1307 
1308   // Avoid instructions which load from potentially varying memory.
1309   if (MI.mayLoad() && !MI.isDereferenceableInvariantLoad())
1310     return false;
1311 
1312   // If any of the registers accessed are non-constant, conservatively assume
1313   // the instruction is not rematerializable.
1314   for (const MachineOperand &MO : MI.operands()) {
1315     if (!MO.isReg()) continue;
1316     Register Reg = MO.getReg();
1317     if (Reg == 0)
1318       continue;
1319 
1320     // Check for a well-behaved physical register.
1321     if (Reg.isPhysical()) {
1322       if (MO.isUse()) {
1323         // If the physreg has no defs anywhere, it's just an ambient register
1324         // and we can freely move its uses. Alternatively, if it's allocatable,
1325         // it could get allocated to something with a def during allocation.
1326         if (!MRI.isConstantPhysReg(Reg))
1327           return false;
1328       } else {
1329         // A physreg def. We can't remat it.
1330         return false;
1331       }
1332       continue;
1333     }
1334 
1335     // Only allow one virtual-register def.  There may be multiple defs of the
1336     // same virtual register, though.
1337     if (MO.isDef() && Reg != DefReg)
1338       return false;
1339 
1340     // Don't allow any virtual-register uses. Rematting an instruction with
1341     // virtual register uses would length the live ranges of the uses, which
1342     // is not necessarily a good idea, certainly not "trivial".
1343     if (MO.isUse())
1344       return false;
1345   }
1346 
1347   // Everything checked out.
1348   return true;
1349 }
1350 
getSPAdjust(const MachineInstr & MI) const1351 int TargetInstrInfo::getSPAdjust(const MachineInstr &MI) const {
1352   const MachineFunction *MF = MI.getMF();
1353   const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
1354   bool StackGrowsDown =
1355     TFI->getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown;
1356 
1357   unsigned FrameSetupOpcode = getCallFrameSetupOpcode();
1358   unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode();
1359 
1360   if (!isFrameInstr(MI))
1361     return 0;
1362 
1363   int SPAdj = TFI->alignSPAdjust(getFrameSize(MI));
1364 
1365   if ((!StackGrowsDown && MI.getOpcode() == FrameSetupOpcode) ||
1366       (StackGrowsDown && MI.getOpcode() == FrameDestroyOpcode))
1367     SPAdj = -SPAdj;
1368 
1369   return SPAdj;
1370 }
1371 
1372 /// isSchedulingBoundary - Test if the given instruction should be
1373 /// considered a scheduling boundary. This primarily includes labels
1374 /// and terminators.
isSchedulingBoundary(const MachineInstr & MI,const MachineBasicBlock * MBB,const MachineFunction & MF) const1375 bool TargetInstrInfo::isSchedulingBoundary(const MachineInstr &MI,
1376                                            const MachineBasicBlock *MBB,
1377                                            const MachineFunction &MF) const {
1378   // Terminators and labels can't be scheduled around.
1379   if (MI.isTerminator() || MI.isPosition())
1380     return true;
1381 
1382   // INLINEASM_BR can jump to another block
1383   if (MI.getOpcode() == TargetOpcode::INLINEASM_BR)
1384     return true;
1385 
1386   // Don't attempt to schedule around any instruction that defines
1387   // a stack-oriented pointer, as it's unlikely to be profitable. This
1388   // saves compile time, because it doesn't require every single
1389   // stack slot reference to depend on the instruction that does the
1390   // modification.
1391   const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering();
1392   const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
1393   return MI.modifiesRegister(TLI.getStackPointerRegisterToSaveRestore(), TRI);
1394 }
1395 
1396 // Provide a global flag for disabling the PreRA hazard recognizer that targets
1397 // may choose to honor.
usePreRAHazardRecognizer() const1398 bool TargetInstrInfo::usePreRAHazardRecognizer() const {
1399   return !DisableHazardRecognizer;
1400 }
1401 
1402 // Default implementation of CreateTargetRAHazardRecognizer.
1403 ScheduleHazardRecognizer *TargetInstrInfo::
CreateTargetHazardRecognizer(const TargetSubtargetInfo * STI,const ScheduleDAG * DAG) const1404 CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI,
1405                              const ScheduleDAG *DAG) const {
1406   // Dummy hazard recognizer allows all instructions to issue.
1407   return new ScheduleHazardRecognizer();
1408 }
1409 
1410 // Default implementation of CreateTargetMIHazardRecognizer.
CreateTargetMIHazardRecognizer(const InstrItineraryData * II,const ScheduleDAGMI * DAG) const1411 ScheduleHazardRecognizer *TargetInstrInfo::CreateTargetMIHazardRecognizer(
1412     const InstrItineraryData *II, const ScheduleDAGMI *DAG) const {
1413   return new ScoreboardHazardRecognizer(II, DAG, "machine-scheduler");
1414 }
1415 
1416 // Default implementation of CreateTargetPostRAHazardRecognizer.
1417 ScheduleHazardRecognizer *TargetInstrInfo::
CreateTargetPostRAHazardRecognizer(const InstrItineraryData * II,const ScheduleDAG * DAG) const1418 CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
1419                                    const ScheduleDAG *DAG) const {
1420   return new ScoreboardHazardRecognizer(II, DAG, "post-RA-sched");
1421 }
1422 
1423 // Default implementation of getMemOperandWithOffset.
getMemOperandWithOffset(const MachineInstr & MI,const MachineOperand * & BaseOp,int64_t & Offset,bool & OffsetIsScalable,const TargetRegisterInfo * TRI) const1424 bool TargetInstrInfo::getMemOperandWithOffset(
1425     const MachineInstr &MI, const MachineOperand *&BaseOp, int64_t &Offset,
1426     bool &OffsetIsScalable, const TargetRegisterInfo *TRI) const {
1427   SmallVector<const MachineOperand *, 4> BaseOps;
1428   LocationSize Width = 0;
1429   if (!getMemOperandsWithOffsetWidth(MI, BaseOps, Offset, OffsetIsScalable,
1430                                      Width, TRI) ||
1431       BaseOps.size() != 1)
1432     return false;
1433   BaseOp = BaseOps.front();
1434   return true;
1435 }
1436 
1437 //===----------------------------------------------------------------------===//
1438 //  SelectionDAG latency interface.
1439 //===----------------------------------------------------------------------===//
1440 
1441 std::optional<unsigned>
getOperandLatency(const InstrItineraryData * ItinData,SDNode * DefNode,unsigned DefIdx,SDNode * UseNode,unsigned UseIdx) const1442 TargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
1443                                    SDNode *DefNode, unsigned DefIdx,
1444                                    SDNode *UseNode, unsigned UseIdx) const {
1445   if (!ItinData || ItinData->isEmpty())
1446     return std::nullopt;
1447 
1448   if (!DefNode->isMachineOpcode())
1449     return std::nullopt;
1450 
1451   unsigned DefClass = get(DefNode->getMachineOpcode()).getSchedClass();
1452   if (!UseNode->isMachineOpcode())
1453     return ItinData->getOperandCycle(DefClass, DefIdx);
1454   unsigned UseClass = get(UseNode->getMachineOpcode()).getSchedClass();
1455   return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
1456 }
1457 
getInstrLatency(const InstrItineraryData * ItinData,SDNode * N) const1458 unsigned TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
1459                                           SDNode *N) const {
1460   if (!ItinData || ItinData->isEmpty())
1461     return 1;
1462 
1463   if (!N->isMachineOpcode())
1464     return 1;
1465 
1466   return ItinData->getStageLatency(get(N->getMachineOpcode()).getSchedClass());
1467 }
1468 
1469 //===----------------------------------------------------------------------===//
1470 //  MachineInstr latency interface.
1471 //===----------------------------------------------------------------------===//
1472 
getNumMicroOps(const InstrItineraryData * ItinData,const MachineInstr & MI) const1473 unsigned TargetInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
1474                                          const MachineInstr &MI) const {
1475   if (!ItinData || ItinData->isEmpty())
1476     return 1;
1477 
1478   unsigned Class = MI.getDesc().getSchedClass();
1479   int UOps = ItinData->Itineraries[Class].NumMicroOps;
1480   if (UOps >= 0)
1481     return UOps;
1482 
1483   // The # of u-ops is dynamically determined. The specific target should
1484   // override this function to return the right number.
1485   return 1;
1486 }
1487 
1488 /// Return the default expected latency for a def based on it's opcode.
defaultDefLatency(const MCSchedModel & SchedModel,const MachineInstr & DefMI) const1489 unsigned TargetInstrInfo::defaultDefLatency(const MCSchedModel &SchedModel,
1490                                             const MachineInstr &DefMI) const {
1491   if (DefMI.isTransient())
1492     return 0;
1493   if (DefMI.mayLoad())
1494     return SchedModel.LoadLatency;
1495   if (isHighLatencyDef(DefMI.getOpcode()))
1496     return SchedModel.HighLatency;
1497   return 1;
1498 }
1499 
getPredicationCost(const MachineInstr &) const1500 unsigned TargetInstrInfo::getPredicationCost(const MachineInstr &) const {
1501   return 0;
1502 }
1503 
getInstrLatency(const InstrItineraryData * ItinData,const MachineInstr & MI,unsigned * PredCost) const1504 unsigned TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
1505                                           const MachineInstr &MI,
1506                                           unsigned *PredCost) const {
1507   // Default to one cycle for no itinerary. However, an "empty" itinerary may
1508   // still have a MinLatency property, which getStageLatency checks.
1509   if (!ItinData)
1510     return MI.mayLoad() ? 2 : 1;
1511 
1512   return ItinData->getStageLatency(MI.getDesc().getSchedClass());
1513 }
1514 
hasLowDefLatency(const TargetSchedModel & SchedModel,const MachineInstr & DefMI,unsigned DefIdx) const1515 bool TargetInstrInfo::hasLowDefLatency(const TargetSchedModel &SchedModel,
1516                                        const MachineInstr &DefMI,
1517                                        unsigned DefIdx) const {
1518   const InstrItineraryData *ItinData = SchedModel.getInstrItineraries();
1519   if (!ItinData || ItinData->isEmpty())
1520     return false;
1521 
1522   unsigned DefClass = DefMI.getDesc().getSchedClass();
1523   std::optional<unsigned> DefCycle =
1524       ItinData->getOperandCycle(DefClass, DefIdx);
1525   return DefCycle && DefCycle <= 1U;
1526 }
1527 
isFunctionSafeToSplit(const MachineFunction & MF) const1528 bool TargetInstrInfo::isFunctionSafeToSplit(const MachineFunction &MF) const {
1529   // TODO: We don't split functions where a section attribute has been set
1530   // since the split part may not be placed in a contiguous region. It may also
1531   // be more beneficial to augment the linker to ensure contiguous layout of
1532   // split functions within the same section as specified by the attribute.
1533   if (MF.getFunction().hasSection())
1534     return false;
1535 
1536   // We don't want to proceed further for cold functions
1537   // or functions of unknown hotness. Lukewarm functions have no prefix.
1538   std::optional<StringRef> SectionPrefix = MF.getFunction().getSectionPrefix();
1539   if (SectionPrefix &&
1540       (*SectionPrefix == "unlikely" || *SectionPrefix == "unknown")) {
1541     return false;
1542   }
1543 
1544   return true;
1545 }
1546 
1547 std::optional<ParamLoadedValue>
describeLoadedValue(const MachineInstr & MI,Register Reg) const1548 TargetInstrInfo::describeLoadedValue(const MachineInstr &MI,
1549                                      Register Reg) const {
1550   const MachineFunction *MF = MI.getMF();
1551   const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
1552   DIExpression *Expr = DIExpression::get(MF->getFunction().getContext(), {});
1553   int64_t Offset;
1554   bool OffsetIsScalable;
1555 
1556   // To simplify the sub-register handling, verify that we only need to
1557   // consider physical registers.
1558   assert(MF->getProperties().hasProperty(
1559       MachineFunctionProperties::Property::NoVRegs));
1560 
1561   if (auto DestSrc = isCopyInstr(MI)) {
1562     Register DestReg = DestSrc->Destination->getReg();
1563 
1564     // If the copy destination is the forwarding reg, describe the forwarding
1565     // reg using the copy source as the backup location. Example:
1566     //
1567     //   x0 = MOV x7
1568     //   call callee(x0)      ; x0 described as x7
1569     if (Reg == DestReg)
1570       return ParamLoadedValue(*DestSrc->Source, Expr);
1571 
1572     // If the target's hook couldn't describe this copy, give up.
1573     return std::nullopt;
1574   } else if (auto RegImm = isAddImmediate(MI, Reg)) {
1575     Register SrcReg = RegImm->Reg;
1576     Offset = RegImm->Imm;
1577     Expr = DIExpression::prepend(Expr, DIExpression::ApplyOffset, Offset);
1578     return ParamLoadedValue(MachineOperand::CreateReg(SrcReg, false), Expr);
1579   } else if (MI.hasOneMemOperand()) {
1580     // Only describe memory which provably does not escape the function. As
1581     // described in llvm.org/PR43343, escaped memory may be clobbered by the
1582     // callee (or by another thread).
1583     const auto &TII = MF->getSubtarget().getInstrInfo();
1584     const MachineFrameInfo &MFI = MF->getFrameInfo();
1585     const MachineMemOperand *MMO = MI.memoperands()[0];
1586     const PseudoSourceValue *PSV = MMO->getPseudoValue();
1587 
1588     // If the address points to "special" memory (e.g. a spill slot), it's
1589     // sufficient to check that it isn't aliased by any high-level IR value.
1590     if (!PSV || PSV->mayAlias(&MFI))
1591       return std::nullopt;
1592 
1593     const MachineOperand *BaseOp;
1594     if (!TII->getMemOperandWithOffset(MI, BaseOp, Offset, OffsetIsScalable,
1595                                       TRI))
1596       return std::nullopt;
1597 
1598     // FIXME: Scalable offsets are not yet handled in the offset code below.
1599     if (OffsetIsScalable)
1600       return std::nullopt;
1601 
1602     // TODO: Can currently only handle mem instructions with a single define.
1603     // An example from the x86 target:
1604     //    ...
1605     //    DIV64m $rsp, 1, $noreg, 24, $noreg, implicit-def dead $rax, implicit-def $rdx
1606     //    ...
1607     //
1608     if (MI.getNumExplicitDefs() != 1)
1609       return std::nullopt;
1610 
1611     // TODO: In what way do we need to take Reg into consideration here?
1612 
1613     SmallVector<uint64_t, 8> Ops;
1614     DIExpression::appendOffset(Ops, Offset);
1615     Ops.push_back(dwarf::DW_OP_deref_size);
1616     Ops.push_back(MMO->getSize().hasValue() ? MMO->getSize().getValue()
1617                                             : ~UINT64_C(0));
1618     Expr = DIExpression::prependOpcodes(Expr, Ops);
1619     return ParamLoadedValue(*BaseOp, Expr);
1620   }
1621 
1622   return std::nullopt;
1623 }
1624 
1625 // Get the call frame size just before MI.
getCallFrameSizeAt(MachineInstr & MI) const1626 unsigned TargetInstrInfo::getCallFrameSizeAt(MachineInstr &MI) const {
1627   // Search backwards from MI for the most recent call frame instruction.
1628   MachineBasicBlock *MBB = MI.getParent();
1629   for (auto &AdjI : reverse(make_range(MBB->instr_begin(), MI.getIterator()))) {
1630     if (AdjI.getOpcode() == getCallFrameSetupOpcode())
1631       return getFrameTotalSize(AdjI);
1632     if (AdjI.getOpcode() == getCallFrameDestroyOpcode())
1633       return 0;
1634   }
1635 
1636   // If none was found, use the call frame size from the start of the basic
1637   // block.
1638   return MBB->getCallFrameSize();
1639 }
1640 
1641 /// Both DefMI and UseMI must be valid.  By default, call directly to the
1642 /// itinerary. This may be overriden by the target.
getOperandLatency(const InstrItineraryData * ItinData,const MachineInstr & DefMI,unsigned DefIdx,const MachineInstr & UseMI,unsigned UseIdx) const1643 std::optional<unsigned> TargetInstrInfo::getOperandLatency(
1644     const InstrItineraryData *ItinData, const MachineInstr &DefMI,
1645     unsigned DefIdx, const MachineInstr &UseMI, unsigned UseIdx) const {
1646   unsigned DefClass = DefMI.getDesc().getSchedClass();
1647   unsigned UseClass = UseMI.getDesc().getSchedClass();
1648   return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
1649 }
1650 
getRegSequenceInputs(const MachineInstr & MI,unsigned DefIdx,SmallVectorImpl<RegSubRegPairAndIdx> & InputRegs) const1651 bool TargetInstrInfo::getRegSequenceInputs(
1652     const MachineInstr &MI, unsigned DefIdx,
1653     SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {
1654   assert((MI.isRegSequence() ||
1655           MI.isRegSequenceLike()) && "Instruction do not have the proper type");
1656 
1657   if (!MI.isRegSequence())
1658     return getRegSequenceLikeInputs(MI, DefIdx, InputRegs);
1659 
1660   // We are looking at:
1661   // Def = REG_SEQUENCE v0, sub0, v1, sub1, ...
1662   assert(DefIdx == 0 && "REG_SEQUENCE only has one def");
1663   for (unsigned OpIdx = 1, EndOpIdx = MI.getNumOperands(); OpIdx != EndOpIdx;
1664        OpIdx += 2) {
1665     const MachineOperand &MOReg = MI.getOperand(OpIdx);
1666     if (MOReg.isUndef())
1667       continue;
1668     const MachineOperand &MOSubIdx = MI.getOperand(OpIdx + 1);
1669     assert(MOSubIdx.isImm() &&
1670            "One of the subindex of the reg_sequence is not an immediate");
1671     // Record Reg:SubReg, SubIdx.
1672     InputRegs.push_back(RegSubRegPairAndIdx(MOReg.getReg(), MOReg.getSubReg(),
1673                                             (unsigned)MOSubIdx.getImm()));
1674   }
1675   return true;
1676 }
1677 
getExtractSubregInputs(const MachineInstr & MI,unsigned DefIdx,RegSubRegPairAndIdx & InputReg) const1678 bool TargetInstrInfo::getExtractSubregInputs(
1679     const MachineInstr &MI, unsigned DefIdx,
1680     RegSubRegPairAndIdx &InputReg) const {
1681   assert((MI.isExtractSubreg() ||
1682       MI.isExtractSubregLike()) && "Instruction do not have the proper type");
1683 
1684   if (!MI.isExtractSubreg())
1685     return getExtractSubregLikeInputs(MI, DefIdx, InputReg);
1686 
1687   // We are looking at:
1688   // Def = EXTRACT_SUBREG v0.sub1, sub0.
1689   assert(DefIdx == 0 && "EXTRACT_SUBREG only has one def");
1690   const MachineOperand &MOReg = MI.getOperand(1);
1691   if (MOReg.isUndef())
1692     return false;
1693   const MachineOperand &MOSubIdx = MI.getOperand(2);
1694   assert(MOSubIdx.isImm() &&
1695          "The subindex of the extract_subreg is not an immediate");
1696 
1697   InputReg.Reg = MOReg.getReg();
1698   InputReg.SubReg = MOReg.getSubReg();
1699   InputReg.SubIdx = (unsigned)MOSubIdx.getImm();
1700   return true;
1701 }
1702 
getInsertSubregInputs(const MachineInstr & MI,unsigned DefIdx,RegSubRegPair & BaseReg,RegSubRegPairAndIdx & InsertedReg) const1703 bool TargetInstrInfo::getInsertSubregInputs(
1704     const MachineInstr &MI, unsigned DefIdx,
1705     RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const {
1706   assert((MI.isInsertSubreg() ||
1707       MI.isInsertSubregLike()) && "Instruction do not have the proper type");
1708 
1709   if (!MI.isInsertSubreg())
1710     return getInsertSubregLikeInputs(MI, DefIdx, BaseReg, InsertedReg);
1711 
1712   // We are looking at:
1713   // Def = INSERT_SEQUENCE v0, v1, sub0.
1714   assert(DefIdx == 0 && "INSERT_SUBREG only has one def");
1715   const MachineOperand &MOBaseReg = MI.getOperand(1);
1716   const MachineOperand &MOInsertedReg = MI.getOperand(2);
1717   if (MOInsertedReg.isUndef())
1718     return false;
1719   const MachineOperand &MOSubIdx = MI.getOperand(3);
1720   assert(MOSubIdx.isImm() &&
1721          "One of the subindex of the reg_sequence is not an immediate");
1722   BaseReg.Reg = MOBaseReg.getReg();
1723   BaseReg.SubReg = MOBaseReg.getSubReg();
1724 
1725   InsertedReg.Reg = MOInsertedReg.getReg();
1726   InsertedReg.SubReg = MOInsertedReg.getSubReg();
1727   InsertedReg.SubIdx = (unsigned)MOSubIdx.getImm();
1728   return true;
1729 }
1730 
1731 // Returns a MIRPrinter comment for this machine operand.
createMIROperandComment(const MachineInstr & MI,const MachineOperand & Op,unsigned OpIdx,const TargetRegisterInfo * TRI) const1732 std::string TargetInstrInfo::createMIROperandComment(
1733     const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx,
1734     const TargetRegisterInfo *TRI) const {
1735 
1736   if (!MI.isInlineAsm())
1737     return "";
1738 
1739   std::string Flags;
1740   raw_string_ostream OS(Flags);
1741 
1742   if (OpIdx == InlineAsm::MIOp_ExtraInfo) {
1743     // Print HasSideEffects, MayLoad, MayStore, IsAlignStack
1744     unsigned ExtraInfo = Op.getImm();
1745     bool First = true;
1746     for (StringRef Info : InlineAsm::getExtraInfoNames(ExtraInfo)) {
1747       if (!First)
1748         OS << " ";
1749       First = false;
1750       OS << Info;
1751     }
1752 
1753     return Flags;
1754   }
1755 
1756   int FlagIdx = MI.findInlineAsmFlagIdx(OpIdx);
1757   if (FlagIdx < 0 || (unsigned)FlagIdx != OpIdx)
1758     return "";
1759 
1760   assert(Op.isImm() && "Expected flag operand to be an immediate");
1761   // Pretty print the inline asm operand descriptor.
1762   unsigned Flag = Op.getImm();
1763   const InlineAsm::Flag F(Flag);
1764   OS << F.getKindName();
1765 
1766   unsigned RCID;
1767   if (!F.isImmKind() && !F.isMemKind() && F.hasRegClassConstraint(RCID)) {
1768     if (TRI) {
1769       OS << ':' << TRI->getRegClassName(TRI->getRegClass(RCID));
1770     } else
1771       OS << ":RC" << RCID;
1772   }
1773 
1774   if (F.isMemKind()) {
1775     InlineAsm::ConstraintCode MCID = F.getMemoryConstraintID();
1776     OS << ":" << InlineAsm::getMemConstraintName(MCID);
1777   }
1778 
1779   unsigned TiedTo;
1780   if (F.isUseOperandTiedToDef(TiedTo))
1781     OS << " tiedto:$" << TiedTo;
1782 
1783   if ((F.isRegDefKind() || F.isRegDefEarlyClobberKind() || F.isRegUseKind()) &&
1784       F.getRegMayBeFolded())
1785     OS << " foldable";
1786 
1787   return Flags;
1788 }
1789 
1790 TargetInstrInfo::PipelinerLoopInfo::~PipelinerLoopInfo() = default;
1791 
mergeOutliningCandidateAttributes(Function & F,std::vector<outliner::Candidate> & Candidates) const1792 void TargetInstrInfo::mergeOutliningCandidateAttributes(
1793     Function &F, std::vector<outliner::Candidate> &Candidates) const {
1794   // Include target features from an arbitrary candidate for the outlined
1795   // function. This makes sure the outlined function knows what kinds of
1796   // instructions are going into it. This is fine, since all parent functions
1797   // must necessarily support the instructions that are in the outlined region.
1798   outliner::Candidate &FirstCand = Candidates.front();
1799   const Function &ParentFn = FirstCand.getMF()->getFunction();
1800   if (ParentFn.hasFnAttribute("target-features"))
1801     F.addFnAttr(ParentFn.getFnAttribute("target-features"));
1802   if (ParentFn.hasFnAttribute("target-cpu"))
1803     F.addFnAttr(ParentFn.getFnAttribute("target-cpu"));
1804 
1805   // Set nounwind, so we don't generate eh_frame.
1806   if (llvm::all_of(Candidates, [](const outliner::Candidate &C) {
1807         return C.getMF()->getFunction().hasFnAttribute(Attribute::NoUnwind);
1808       }))
1809     F.addFnAttr(Attribute::NoUnwind);
1810 }
1811 
getOutliningType(MachineBasicBlock::iterator & MIT,unsigned Flags) const1812 outliner::InstrType TargetInstrInfo::getOutliningType(
1813     MachineBasicBlock::iterator &MIT, unsigned Flags) const {
1814   MachineInstr &MI = *MIT;
1815 
1816   // NOTE: MI.isMetaInstruction() will match CFI_INSTRUCTION, but some targets
1817   // have support for outlining those. Special-case that here.
1818   if (MI.isCFIInstruction())
1819     // Just go right to the target implementation.
1820     return getOutliningTypeImpl(MIT, Flags);
1821 
1822   // Be conservative about inline assembly.
1823   if (MI.isInlineAsm())
1824     return outliner::InstrType::Illegal;
1825 
1826   // Labels generally can't safely be outlined.
1827   if (MI.isLabel())
1828     return outliner::InstrType::Illegal;
1829 
1830   // Don't let debug instructions impact analysis.
1831   if (MI.isDebugInstr())
1832     return outliner::InstrType::Invisible;
1833 
1834   // Some other special cases.
1835   switch (MI.getOpcode()) {
1836     case TargetOpcode::IMPLICIT_DEF:
1837     case TargetOpcode::KILL:
1838     case TargetOpcode::LIFETIME_START:
1839     case TargetOpcode::LIFETIME_END:
1840       return outliner::InstrType::Invisible;
1841     default:
1842       break;
1843   }
1844 
1845   // Is this a terminator for a basic block?
1846   if (MI.isTerminator()) {
1847     // If this is a branch to another block, we can't outline it.
1848     if (!MI.getParent()->succ_empty())
1849       return outliner::InstrType::Illegal;
1850 
1851     // Don't outline if the branch is not unconditional.
1852     if (isPredicated(MI))
1853       return outliner::InstrType::Illegal;
1854   }
1855 
1856   // Make sure none of the operands of this instruction do anything that
1857   // might break if they're moved outside their current function.
1858   // This includes MachineBasicBlock references, BlockAddressses,
1859   // Constant pool indices and jump table indices.
1860   //
1861   // A quick note on MO_TargetIndex:
1862   // This doesn't seem to be used in any of the architectures that the
1863   // MachineOutliner supports, but it was still filtered out in all of them.
1864   // There was one exception (RISC-V), but MO_TargetIndex also isn't used there.
1865   // As such, this check is removed both here and in the target-specific
1866   // implementations. Instead, we assert to make sure this doesn't
1867   // catch anyone off-guard somewhere down the line.
1868   for (const MachineOperand &MOP : MI.operands()) {
1869     // If you hit this assertion, please remove it and adjust
1870     // `getOutliningTypeImpl` for your target appropriately if necessary.
1871     // Adding the assertion back to other supported architectures
1872     // would be nice too :)
1873     assert(!MOP.isTargetIndex() && "This isn't used quite yet!");
1874 
1875     // CFI instructions should already have been filtered out at this point.
1876     assert(!MOP.isCFIIndex() && "CFI instructions handled elsewhere!");
1877 
1878     // PrologEpilogInserter should've already run at this point.
1879     assert(!MOP.isFI() && "FrameIndex instructions should be gone by now!");
1880 
1881     if (MOP.isMBB() || MOP.isBlockAddress() || MOP.isCPI() || MOP.isJTI())
1882       return outliner::InstrType::Illegal;
1883   }
1884 
1885   // If we don't know, delegate to the target-specific hook.
1886   return getOutliningTypeImpl(MIT, Flags);
1887 }
1888 
isMBBSafeToOutlineFrom(MachineBasicBlock & MBB,unsigned & Flags) const1889 bool TargetInstrInfo::isMBBSafeToOutlineFrom(MachineBasicBlock &MBB,
1890                                              unsigned &Flags) const {
1891   // Some instrumentations create special TargetOpcode at the start which
1892   // expands to special code sequences which must be present.
1893   auto First = MBB.getFirstNonDebugInstr();
1894   if (First == MBB.end())
1895     return true;
1896 
1897   if (First->getOpcode() == TargetOpcode::FENTRY_CALL ||
1898       First->getOpcode() == TargetOpcode::PATCHABLE_FUNCTION_ENTER)
1899     return false;
1900 
1901   // Some instrumentations create special pseudo-instructions at or just before
1902   // the end that must be present.
1903   auto Last = MBB.getLastNonDebugInstr();
1904   if (Last->getOpcode() == TargetOpcode::PATCHABLE_RET ||
1905       Last->getOpcode() == TargetOpcode::PATCHABLE_TAIL_CALL)
1906     return false;
1907 
1908   if (Last != First && Last->isReturn()) {
1909     --Last;
1910     if (Last->getOpcode() == TargetOpcode::PATCHABLE_FUNCTION_EXIT ||
1911         Last->getOpcode() == TargetOpcode::PATCHABLE_TAIL_CALL)
1912       return false;
1913   }
1914   return true;
1915 }
1916