xref: /freebsd/contrib/llvm-project/llvm/lib/CodeGen/MachineInstr.cpp (revision 770cf0a5f02dc8983a89c6568d741fbc25baa999)
1 //===- lib/CodeGen/MachineInstr.cpp ---------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Methods common to all machine instructions.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/CodeGen/MachineInstr.h"
14 #include "llvm/ADT/ArrayRef.h"
15 #include "llvm/ADT/Hashing.h"
16 #include "llvm/ADT/STLExtras.h"
17 #include "llvm/ADT/SmallBitVector.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/Analysis/AliasAnalysis.h"
20 #include "llvm/Analysis/MemoryLocation.h"
21 #include "llvm/CodeGen/LiveRegUnits.h"
22 #include "llvm/CodeGen/MachineBasicBlock.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineFunction.h"
25 #include "llvm/CodeGen/MachineInstrBuilder.h"
26 #include "llvm/CodeGen/MachineInstrBundle.h"
27 #include "llvm/CodeGen/MachineMemOperand.h"
28 #include "llvm/CodeGen/MachineModuleInfo.h"
29 #include "llvm/CodeGen/MachineOperand.h"
30 #include "llvm/CodeGen/MachineRegisterInfo.h"
31 #include "llvm/CodeGen/PseudoSourceValue.h"
32 #include "llvm/CodeGen/Register.h"
33 #include "llvm/CodeGen/StackMaps.h"
34 #include "llvm/CodeGen/TargetInstrInfo.h"
35 #include "llvm/CodeGen/TargetRegisterInfo.h"
36 #include "llvm/CodeGen/TargetSubtargetInfo.h"
37 #include "llvm/CodeGenTypes/LowLevelType.h"
38 #include "llvm/IR/Constants.h"
39 #include "llvm/IR/DebugInfoMetadata.h"
40 #include "llvm/IR/DebugLoc.h"
41 #include "llvm/IR/Function.h"
42 #include "llvm/IR/InlineAsm.h"
43 #include "llvm/IR/Instructions.h"
44 #include "llvm/IR/LLVMContext.h"
45 #include "llvm/IR/Metadata.h"
46 #include "llvm/IR/Module.h"
47 #include "llvm/IR/ModuleSlotTracker.h"
48 #include "llvm/IR/Operator.h"
49 #include "llvm/MC/MCInstrDesc.h"
50 #include "llvm/MC/MCRegisterInfo.h"
51 #include "llvm/Support/Casting.h"
52 #include "llvm/Support/Compiler.h"
53 #include "llvm/Support/Debug.h"
54 #include "llvm/Support/ErrorHandling.h"
55 #include "llvm/Support/FormattedStream.h"
56 #include "llvm/Support/raw_ostream.h"
57 #include "llvm/Target/TargetMachine.h"
58 #include <algorithm>
59 #include <cassert>
60 #include <cstdint>
61 #include <cstring>
62 #include <utility>
63 
64 using namespace llvm;
65 
66 static cl::opt<bool>
67     PrintMIAddrs("print-mi-addrs", cl::Hidden,
68                  cl::desc("Print addresses of MachineInstrs when dumping"));
69 
70 static const MachineFunction *getMFIfAvailable(const MachineInstr &MI) {
71   if (const MachineBasicBlock *MBB = MI.getParent())
72     if (const MachineFunction *MF = MBB->getParent())
73       return MF;
74   return nullptr;
75 }
76 
77 // Try to crawl up to the machine function and get TRI/MRI/TII from it.
78 static void tryToGetTargetInfo(const MachineInstr &MI,
79                                const TargetRegisterInfo *&TRI,
80                                const MachineRegisterInfo *&MRI,
81                                const TargetInstrInfo *&TII) {
82 
83   if (const MachineFunction *MF = getMFIfAvailable(MI)) {
84     TRI = MF->getSubtarget().getRegisterInfo();
85     MRI = &MF->getRegInfo();
86     TII = MF->getSubtarget().getInstrInfo();
87   }
88 }
89 
90 void MachineInstr::addImplicitDefUseOperands(MachineFunction &MF) {
91   for (MCPhysReg ImpDef : MCID->implicit_defs())
92     addOperand(MF, MachineOperand::CreateReg(ImpDef, true, true));
93   for (MCPhysReg ImpUse : MCID->implicit_uses())
94     addOperand(MF, MachineOperand::CreateReg(ImpUse, false, true));
95 }
96 
97 /// MachineInstr ctor - This constructor creates a MachineInstr and adds the
98 /// implicit operands. It reserves space for the number of operands specified by
99 /// the MCInstrDesc.
100 MachineInstr::MachineInstr(MachineFunction &MF, const MCInstrDesc &TID,
101                            DebugLoc DL, bool NoImp)
102     : MCID(&TID), NumOperands(0), Flags(0), AsmPrinterFlags(0),
103       DbgLoc(std::move(DL)), DebugInstrNum(0), Opcode(TID.Opcode) {
104   assert(DbgLoc.hasTrivialDestructor() && "Expected trivial destructor");
105 
106   // Reserve space for the expected number of operands.
107   if (unsigned NumOps = MCID->getNumOperands() + MCID->implicit_defs().size() +
108                         MCID->implicit_uses().size()) {
109     CapOperands = OperandCapacity::get(NumOps);
110     Operands = MF.allocateOperandArray(CapOperands);
111   }
112 
113   if (!NoImp)
114     addImplicitDefUseOperands(MF);
115 }
116 
117 /// MachineInstr ctor - Copies MachineInstr arg exactly.
118 /// Does not copy the number from debug instruction numbering, to preserve
119 /// uniqueness.
120 MachineInstr::MachineInstr(MachineFunction &MF, const MachineInstr &MI)
121     : MCID(&MI.getDesc()), NumOperands(0), Flags(0), AsmPrinterFlags(0),
122       Info(MI.Info), DbgLoc(MI.getDebugLoc()), DebugInstrNum(0),
123       Opcode(MI.getOpcode()) {
124   assert(DbgLoc.hasTrivialDestructor() && "Expected trivial destructor");
125 
126   CapOperands = OperandCapacity::get(MI.getNumOperands());
127   Operands = MF.allocateOperandArray(CapOperands);
128 
129   // Copy operands.
130   for (const MachineOperand &MO : MI.operands())
131     addOperand(MF, MO);
132 
133   // Replicate ties between the operands, which addOperand was not
134   // able to do reliably.
135   for (unsigned i = 0, e = getNumOperands(); i < e; ++i) {
136     MachineOperand &NewMO = getOperand(i);
137     const MachineOperand &OrigMO = MI.getOperand(i);
138     NewMO.TiedTo = OrigMO.TiedTo;
139   }
140 
141   // Copy all the sensible flags.
142   setFlags(MI.Flags);
143 }
144 
145 void MachineInstr::setDesc(const MCInstrDesc &TID) {
146   if (getParent())
147     getMF()->handleChangeDesc(*this, TID);
148   MCID = &TID;
149   Opcode = TID.Opcode;
150 }
151 
152 void MachineInstr::moveBefore(MachineInstr *MovePos) {
153   MovePos->getParent()->splice(MovePos, getParent(), getIterator());
154 }
155 
156 /// getRegInfo - If this instruction is embedded into a MachineFunction,
157 /// return the MachineRegisterInfo object for the current function, otherwise
158 /// return null.
159 MachineRegisterInfo *MachineInstr::getRegInfo() {
160   if (MachineBasicBlock *MBB = getParent())
161     return &MBB->getParent()->getRegInfo();
162   return nullptr;
163 }
164 
165 const MachineRegisterInfo *MachineInstr::getRegInfo() const {
166   if (const MachineBasicBlock *MBB = getParent())
167     return &MBB->getParent()->getRegInfo();
168   return nullptr;
169 }
170 
171 void MachineInstr::removeRegOperandsFromUseLists(MachineRegisterInfo &MRI) {
172   for (MachineOperand &MO : operands())
173     if (MO.isReg())
174       MRI.removeRegOperandFromUseList(&MO);
175 }
176 
177 void MachineInstr::addRegOperandsToUseLists(MachineRegisterInfo &MRI) {
178   for (MachineOperand &MO : operands())
179     if (MO.isReg())
180       MRI.addRegOperandToUseList(&MO);
181 }
182 
183 void MachineInstr::addOperand(const MachineOperand &Op) {
184   MachineBasicBlock *MBB = getParent();
185   assert(MBB && "Use MachineInstrBuilder to add operands to dangling instrs");
186   MachineFunction *MF = MBB->getParent();
187   assert(MF && "Use MachineInstrBuilder to add operands to dangling instrs");
188   addOperand(*MF, Op);
189 }
190 
191 /// Move NumOps MachineOperands from Src to Dst, with support for overlapping
192 /// ranges. If MRI is non-null also update use-def chains.
193 static void moveOperands(MachineOperand *Dst, MachineOperand *Src,
194                          unsigned NumOps, MachineRegisterInfo *MRI) {
195   if (MRI)
196     return MRI->moveOperands(Dst, Src, NumOps);
197   // MachineOperand is a trivially copyable type so we can just use memmove.
198   assert(Dst && Src && "Unknown operands");
199   std::memmove(Dst, Src, NumOps * sizeof(MachineOperand));
200 }
201 
202 /// addOperand - Add the specified operand to the instruction.  If it is an
203 /// implicit operand, it is added to the end of the operand list.  If it is
204 /// an explicit operand it is added at the end of the explicit operand list
205 /// (before the first implicit operand).
206 void MachineInstr::addOperand(MachineFunction &MF, const MachineOperand &Op) {
207   assert(isUInt<LLVM_MI_NUMOPERANDS_BITS>(NumOperands + 1) &&
208          "Cannot add more operands.");
209   assert(MCID && "Cannot add operands before providing an instr descriptor");
210 
211   // Check if we're adding one of our existing operands.
212   if (&Op >= Operands && &Op < Operands + NumOperands) {
213     // This is unusual: MI->addOperand(MI->getOperand(i)).
214     // If adding Op requires reallocating or moving existing operands around,
215     // the Op reference could go stale. Support it by copying Op.
216     MachineOperand CopyOp(Op);
217     return addOperand(MF, CopyOp);
218   }
219 
220   // Find the insert location for the new operand.  Implicit registers go at
221   // the end, everything else goes before the implicit regs.
222   //
223   // FIXME: Allow mixed explicit and implicit operands on inline asm.
224   // InstrEmitter::EmitSpecialNode() is marking inline asm clobbers as
225   // implicit-defs, but they must not be moved around.  See the FIXME in
226   // InstrEmitter.cpp.
227   unsigned OpNo = getNumOperands();
228   bool isImpReg = Op.isReg() && Op.isImplicit();
229   if (!isImpReg && !isInlineAsm()) {
230     while (OpNo && Operands[OpNo-1].isReg() && Operands[OpNo-1].isImplicit()) {
231       --OpNo;
232       assert(!Operands[OpNo].isTied() && "Cannot move tied operands");
233     }
234   }
235 
236   // OpNo now points as the desired insertion point.  Unless this is a variadic
237   // instruction, only implicit regs are allowed beyond MCID->getNumOperands().
238   // RegMask operands go between the explicit and implicit operands.
239   MachineRegisterInfo *MRI = getRegInfo();
240 
241   // Determine if the Operands array needs to be reallocated.
242   // Save the old capacity and operand array.
243   OperandCapacity OldCap = CapOperands;
244   MachineOperand *OldOperands = Operands;
245   if (!OldOperands || OldCap.getSize() == getNumOperands()) {
246     CapOperands = OldOperands ? OldCap.getNext() : OldCap.get(1);
247     Operands = MF.allocateOperandArray(CapOperands);
248     // Move the operands before the insertion point.
249     if (OpNo)
250       moveOperands(Operands, OldOperands, OpNo, MRI);
251   }
252 
253   // Move the operands following the insertion point.
254   if (OpNo != NumOperands)
255     moveOperands(Operands + OpNo + 1, OldOperands + OpNo, NumOperands - OpNo,
256                  MRI);
257   ++NumOperands;
258 
259   // Deallocate the old operand array.
260   if (OldOperands != Operands && OldOperands)
261     MF.deallocateOperandArray(OldCap, OldOperands);
262 
263   // Copy Op into place. It still needs to be inserted into the MRI use lists.
264   MachineOperand *NewMO = new (Operands + OpNo) MachineOperand(Op);
265   NewMO->ParentMI = this;
266 
267   // When adding a register operand, tell MRI about it.
268   if (NewMO->isReg()) {
269     // Ensure isOnRegUseList() returns false, regardless of Op's status.
270     NewMO->Contents.Reg.Prev = nullptr;
271     // Ignore existing ties. This is not a property that can be copied.
272     NewMO->TiedTo = 0;
273     // Add the new operand to MRI, but only for instructions in an MBB.
274     if (MRI)
275       MRI->addRegOperandToUseList(NewMO);
276     // The MCID operand information isn't accurate until we start adding
277     // explicit operands. The implicit operands are added first, then the
278     // explicits are inserted before them.
279     if (!isImpReg) {
280       // Tie uses to defs as indicated in MCInstrDesc.
281       if (NewMO->isUse()) {
282         int DefIdx = MCID->getOperandConstraint(OpNo, MCOI::TIED_TO);
283         if (DefIdx != -1)
284           tieOperands(DefIdx, OpNo);
285       }
286       // If the register operand is flagged as early, mark the operand as such.
287       if (MCID->getOperandConstraint(OpNo, MCOI::EARLY_CLOBBER) != -1)
288         NewMO->setIsEarlyClobber(true);
289     }
290     // Ensure debug instructions set debug flag on register uses.
291     if (NewMO->isUse() && isDebugInstr())
292       NewMO->setIsDebug();
293   }
294 }
295 
296 void MachineInstr::removeOperand(unsigned OpNo) {
297   assert(OpNo < getNumOperands() && "Invalid operand number");
298   untieRegOperand(OpNo);
299 
300 #ifndef NDEBUG
301   // Moving tied operands would break the ties.
302   for (unsigned i = OpNo + 1, e = getNumOperands(); i != e; ++i)
303     if (Operands[i].isReg())
304       assert(!Operands[i].isTied() && "Cannot move tied operands");
305 #endif
306 
307   MachineRegisterInfo *MRI = getRegInfo();
308   if (MRI && Operands[OpNo].isReg())
309     MRI->removeRegOperandFromUseList(Operands + OpNo);
310 
311   // Don't call the MachineOperand destructor. A lot of this code depends on
312   // MachineOperand having a trivial destructor anyway, and adding a call here
313   // wouldn't make it 'destructor-correct'.
314 
315   if (unsigned N = NumOperands - 1 - OpNo)
316     moveOperands(Operands + OpNo, Operands + OpNo + 1, N, MRI);
317   --NumOperands;
318 }
319 
320 void MachineInstr::setExtraInfo(MachineFunction &MF,
321                                 ArrayRef<MachineMemOperand *> MMOs,
322                                 MCSymbol *PreInstrSymbol,
323                                 MCSymbol *PostInstrSymbol,
324                                 MDNode *HeapAllocMarker, MDNode *PCSections,
325                                 uint32_t CFIType, MDNode *MMRAs) {
326   bool HasPreInstrSymbol = PreInstrSymbol != nullptr;
327   bool HasPostInstrSymbol = PostInstrSymbol != nullptr;
328   bool HasHeapAllocMarker = HeapAllocMarker != nullptr;
329   bool HasPCSections = PCSections != nullptr;
330   bool HasCFIType = CFIType != 0;
331   bool HasMMRAs = MMRAs != nullptr;
332   int NumPointers = MMOs.size() + HasPreInstrSymbol + HasPostInstrSymbol +
333                     HasHeapAllocMarker + HasPCSections + HasCFIType + HasMMRAs;
334 
335   // Drop all extra info if there is none.
336   if (NumPointers <= 0) {
337     Info.clear();
338     return;
339   }
340 
341   // If more than one pointer, then store out of line. Store heap alloc markers
342   // out of line because PointerSumType cannot hold more than 4 tag types with
343   // 32-bit pointers.
344   // FIXME: Maybe we should make the symbols in the extra info mutable?
345   else if (NumPointers > 1 || HasMMRAs || HasHeapAllocMarker || HasPCSections ||
346            HasCFIType) {
347     Info.set<EIIK_OutOfLine>(
348         MF.createMIExtraInfo(MMOs, PreInstrSymbol, PostInstrSymbol,
349                              HeapAllocMarker, PCSections, CFIType, MMRAs));
350     return;
351   }
352 
353   // Otherwise store the single pointer inline.
354   if (HasPreInstrSymbol)
355     Info.set<EIIK_PreInstrSymbol>(PreInstrSymbol);
356   else if (HasPostInstrSymbol)
357     Info.set<EIIK_PostInstrSymbol>(PostInstrSymbol);
358   else
359     Info.set<EIIK_MMO>(MMOs[0]);
360 }
361 
362 void MachineInstr::dropMemRefs(MachineFunction &MF) {
363   if (memoperands_empty())
364     return;
365 
366   setExtraInfo(MF, {}, getPreInstrSymbol(), getPostInstrSymbol(),
367                getHeapAllocMarker(), getPCSections(), getCFIType(),
368                getMMRAMetadata());
369 }
370 
371 void MachineInstr::setMemRefs(MachineFunction &MF,
372                               ArrayRef<MachineMemOperand *> MMOs) {
373   if (MMOs.empty()) {
374     dropMemRefs(MF);
375     return;
376   }
377 
378   setExtraInfo(MF, MMOs, getPreInstrSymbol(), getPostInstrSymbol(),
379                getHeapAllocMarker(), getPCSections(), getCFIType(),
380                getMMRAMetadata());
381 }
382 
383 void MachineInstr::addMemOperand(MachineFunction &MF,
384                                  MachineMemOperand *MO) {
385   SmallVector<MachineMemOperand *, 2> MMOs;
386   MMOs.append(memoperands_begin(), memoperands_end());
387   MMOs.push_back(MO);
388   setMemRefs(MF, MMOs);
389 }
390 
391 void MachineInstr::cloneMemRefs(MachineFunction &MF, const MachineInstr &MI) {
392   if (this == &MI)
393     // Nothing to do for a self-clone!
394     return;
395 
396   assert(&MF == MI.getMF() &&
397          "Invalid machine functions when cloning memory refrences!");
398   // See if we can just steal the extra info already allocated for the
399   // instruction. We can do this whenever the pre- and post-instruction symbols
400   // are the same (including null).
401   if (getPreInstrSymbol() == MI.getPreInstrSymbol() &&
402       getPostInstrSymbol() == MI.getPostInstrSymbol() &&
403       getHeapAllocMarker() == MI.getHeapAllocMarker() &&
404       getPCSections() == MI.getPCSections() && getMMRAMetadata() &&
405       MI.getMMRAMetadata()) {
406     Info = MI.Info;
407     return;
408   }
409 
410   // Otherwise, fall back on a copy-based clone.
411   setMemRefs(MF, MI.memoperands());
412 }
413 
414 /// Check to see if the MMOs pointed to by the two MemRefs arrays are
415 /// identical.
416 static bool hasIdenticalMMOs(ArrayRef<MachineMemOperand *> LHS,
417                              ArrayRef<MachineMemOperand *> RHS) {
418   if (LHS.size() != RHS.size())
419     return false;
420 
421   auto LHSPointees = make_pointee_range(LHS);
422   auto RHSPointees = make_pointee_range(RHS);
423   return std::equal(LHSPointees.begin(), LHSPointees.end(),
424                     RHSPointees.begin());
425 }
426 
427 void MachineInstr::cloneMergedMemRefs(MachineFunction &MF,
428                                       ArrayRef<const MachineInstr *> MIs) {
429   // Try handling easy numbers of MIs with simpler mechanisms.
430   if (MIs.empty()) {
431     dropMemRefs(MF);
432     return;
433   }
434   if (MIs.size() == 1) {
435     cloneMemRefs(MF, *MIs[0]);
436     return;
437   }
438   // Because an empty memoperands list provides *no* information and must be
439   // handled conservatively (assuming the instruction can do anything), the only
440   // way to merge with it is to drop all other memoperands.
441   if (MIs[0]->memoperands_empty()) {
442     dropMemRefs(MF);
443     return;
444   }
445 
446   // Handle the general case.
447   SmallVector<MachineMemOperand *, 2> MergedMMOs;
448   // Start with the first instruction.
449   assert(&MF == MIs[0]->getMF() &&
450          "Invalid machine functions when cloning memory references!");
451   MergedMMOs.append(MIs[0]->memoperands_begin(), MIs[0]->memoperands_end());
452   // Now walk all the other instructions and accumulate any different MMOs.
453   for (const MachineInstr &MI : make_pointee_range(MIs.slice(1))) {
454     assert(&MF == MI.getMF() &&
455            "Invalid machine functions when cloning memory references!");
456 
457     // Skip MIs with identical operands to the first. This is a somewhat
458     // arbitrary hack but will catch common cases without being quadratic.
459     // TODO: We could fully implement merge semantics here if needed.
460     if (hasIdenticalMMOs(MIs[0]->memoperands(), MI.memoperands()))
461       continue;
462 
463     // Because an empty memoperands list provides *no* information and must be
464     // handled conservatively (assuming the instruction can do anything), the
465     // only way to merge with it is to drop all other memoperands.
466     if (MI.memoperands_empty()) {
467       dropMemRefs(MF);
468       return;
469     }
470 
471     // Otherwise accumulate these into our temporary buffer of the merged state.
472     MergedMMOs.append(MI.memoperands_begin(), MI.memoperands_end());
473   }
474 
475   setMemRefs(MF, MergedMMOs);
476 }
477 
478 void MachineInstr::setPreInstrSymbol(MachineFunction &MF, MCSymbol *Symbol) {
479   // Do nothing if old and new symbols are the same.
480   if (Symbol == getPreInstrSymbol())
481     return;
482 
483   // If there was only one symbol and we're removing it, just clear info.
484   if (!Symbol && Info.is<EIIK_PreInstrSymbol>()) {
485     Info.clear();
486     return;
487   }
488 
489   setExtraInfo(MF, memoperands(), Symbol, getPostInstrSymbol(),
490                getHeapAllocMarker(), getPCSections(), getCFIType(),
491                getMMRAMetadata());
492 }
493 
494 void MachineInstr::setPostInstrSymbol(MachineFunction &MF, MCSymbol *Symbol) {
495   // Do nothing if old and new symbols are the same.
496   if (Symbol == getPostInstrSymbol())
497     return;
498 
499   // If there was only one symbol and we're removing it, just clear info.
500   if (!Symbol && Info.is<EIIK_PostInstrSymbol>()) {
501     Info.clear();
502     return;
503   }
504 
505   setExtraInfo(MF, memoperands(), getPreInstrSymbol(), Symbol,
506                getHeapAllocMarker(), getPCSections(), getCFIType(),
507                getMMRAMetadata());
508 }
509 
510 void MachineInstr::setHeapAllocMarker(MachineFunction &MF, MDNode *Marker) {
511   // Do nothing if old and new symbols are the same.
512   if (Marker == getHeapAllocMarker())
513     return;
514 
515   setExtraInfo(MF, memoperands(), getPreInstrSymbol(), getPostInstrSymbol(),
516                Marker, getPCSections(), getCFIType(), getMMRAMetadata());
517 }
518 
519 void MachineInstr::setPCSections(MachineFunction &MF, MDNode *PCSections) {
520   // Do nothing if old and new symbols are the same.
521   if (PCSections == getPCSections())
522     return;
523 
524   setExtraInfo(MF, memoperands(), getPreInstrSymbol(), getPostInstrSymbol(),
525                getHeapAllocMarker(), PCSections, getCFIType(),
526                getMMRAMetadata());
527 }
528 
529 void MachineInstr::setCFIType(MachineFunction &MF, uint32_t Type) {
530   // Do nothing if old and new types are the same.
531   if (Type == getCFIType())
532     return;
533 
534   setExtraInfo(MF, memoperands(), getPreInstrSymbol(), getPostInstrSymbol(),
535                getHeapAllocMarker(), getPCSections(), Type, getMMRAMetadata());
536 }
537 
538 void MachineInstr::setMMRAMetadata(MachineFunction &MF, MDNode *MMRAs) {
539   // Do nothing if old and new symbols are the same.
540   if (MMRAs == getMMRAMetadata())
541     return;
542 
543   setExtraInfo(MF, memoperands(), getPreInstrSymbol(), getPostInstrSymbol(),
544                getHeapAllocMarker(), getPCSections(), getCFIType(), MMRAs);
545 }
546 
547 void MachineInstr::cloneInstrSymbols(MachineFunction &MF,
548                                      const MachineInstr &MI) {
549   if (this == &MI)
550     // Nothing to do for a self-clone!
551     return;
552 
553   assert(&MF == MI.getMF() &&
554          "Invalid machine functions when cloning instruction symbols!");
555 
556   setPreInstrSymbol(MF, MI.getPreInstrSymbol());
557   setPostInstrSymbol(MF, MI.getPostInstrSymbol());
558   setHeapAllocMarker(MF, MI.getHeapAllocMarker());
559   setPCSections(MF, MI.getPCSections());
560   setMMRAMetadata(MF, MI.getMMRAMetadata());
561 }
562 
563 uint32_t MachineInstr::mergeFlagsWith(const MachineInstr &Other) const {
564   // For now, the just return the union of the flags. If the flags get more
565   // complicated over time, we might need more logic here.
566   return getFlags() | Other.getFlags();
567 }
568 
569 uint32_t MachineInstr::copyFlagsFromInstruction(const Instruction &I) {
570   uint32_t MIFlags = 0;
571   // Copy the wrapping flags.
572   if (const OverflowingBinaryOperator *OB =
573           dyn_cast<OverflowingBinaryOperator>(&I)) {
574     if (OB->hasNoSignedWrap())
575       MIFlags |= MachineInstr::MIFlag::NoSWrap;
576     if (OB->hasNoUnsignedWrap())
577       MIFlags |= MachineInstr::MIFlag::NoUWrap;
578   } else if (const TruncInst *TI = dyn_cast<TruncInst>(&I)) {
579     if (TI->hasNoSignedWrap())
580       MIFlags |= MachineInstr::MIFlag::NoSWrap;
581     if (TI->hasNoUnsignedWrap())
582       MIFlags |= MachineInstr::MIFlag::NoUWrap;
583   } else if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(&I)) {
584     if (GEP->hasNoUnsignedSignedWrap())
585       MIFlags |= MachineInstr::MIFlag::NoUSWrap;
586     if (GEP->hasNoUnsignedWrap())
587       MIFlags |= MachineInstr::MIFlag::NoUWrap;
588   }
589 
590   // Copy the nonneg flag.
591   if (const PossiblyNonNegInst *PNI = dyn_cast<PossiblyNonNegInst>(&I)) {
592     if (PNI->hasNonNeg())
593       MIFlags |= MachineInstr::MIFlag::NonNeg;
594     // Copy the disjoint flag.
595   } else if (const PossiblyDisjointInst *PD =
596                  dyn_cast<PossiblyDisjointInst>(&I)) {
597     if (PD->isDisjoint())
598       MIFlags |= MachineInstr::MIFlag::Disjoint;
599   }
600 
601   // Copy the samesign flag.
602   if (const ICmpInst *ICmp = dyn_cast<ICmpInst>(&I))
603     if (ICmp->hasSameSign())
604       MIFlags |= MachineInstr::MIFlag::SameSign;
605 
606   // Copy the exact flag.
607   if (const PossiblyExactOperator *PE = dyn_cast<PossiblyExactOperator>(&I))
608     if (PE->isExact())
609       MIFlags |= MachineInstr::MIFlag::IsExact;
610 
611   // Copy the fast-math flags.
612   if (const FPMathOperator *FP = dyn_cast<FPMathOperator>(&I)) {
613     const FastMathFlags Flags = FP->getFastMathFlags();
614     if (Flags.noNaNs())
615       MIFlags |= MachineInstr::MIFlag::FmNoNans;
616     if (Flags.noInfs())
617       MIFlags |= MachineInstr::MIFlag::FmNoInfs;
618     if (Flags.noSignedZeros())
619       MIFlags |= MachineInstr::MIFlag::FmNsz;
620     if (Flags.allowReciprocal())
621       MIFlags |= MachineInstr::MIFlag::FmArcp;
622     if (Flags.allowContract())
623       MIFlags |= MachineInstr::MIFlag::FmContract;
624     if (Flags.approxFunc())
625       MIFlags |= MachineInstr::MIFlag::FmAfn;
626     if (Flags.allowReassoc())
627       MIFlags |= MachineInstr::MIFlag::FmReassoc;
628   }
629 
630   if (I.getMetadata(LLVMContext::MD_unpredictable))
631     MIFlags |= MachineInstr::MIFlag::Unpredictable;
632 
633   return MIFlags;
634 }
635 
636 void MachineInstr::copyIRFlags(const Instruction &I) {
637   Flags = copyFlagsFromInstruction(I);
638 }
639 
640 bool MachineInstr::hasPropertyInBundle(uint64_t Mask, QueryType Type) const {
641   assert(!isBundledWithPred() && "Must be called on bundle header");
642   for (MachineBasicBlock::const_instr_iterator MII = getIterator();; ++MII) {
643     if (MII->getDesc().getFlags() & Mask) {
644       if (Type == AnyInBundle)
645         return true;
646     } else {
647       if (Type == AllInBundle && !MII->isBundle())
648         return false;
649     }
650     // This was the last instruction in the bundle.
651     if (!MII->isBundledWithSucc())
652       return Type == AllInBundle;
653   }
654 }
655 
656 bool MachineInstr::isIdenticalTo(const MachineInstr &Other,
657                                  MICheckType Check) const {
658   // If opcodes or number of operands are not the same then the two
659   // instructions are obviously not identical.
660   if (Other.getOpcode() != getOpcode() ||
661       Other.getNumOperands() != getNumOperands())
662     return false;
663 
664   if (isBundle()) {
665     // We have passed the test above that both instructions have the same
666     // opcode, so we know that both instructions are bundles here. Let's compare
667     // MIs inside the bundle.
668     assert(Other.isBundle() && "Expected that both instructions are bundles.");
669     MachineBasicBlock::const_instr_iterator I1 = getIterator();
670     MachineBasicBlock::const_instr_iterator I2 = Other.getIterator();
671     // Loop until we analysed the last intruction inside at least one of the
672     // bundles.
673     while (I1->isBundledWithSucc() && I2->isBundledWithSucc()) {
674       ++I1;
675       ++I2;
676       if (!I1->isIdenticalTo(*I2, Check))
677         return false;
678     }
679     // If we've reached the end of just one of the two bundles, but not both,
680     // the instructions are not identical.
681     if (I1->isBundledWithSucc() || I2->isBundledWithSucc())
682       return false;
683   }
684 
685   // Check operands to make sure they match.
686   for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
687     const MachineOperand &MO = getOperand(i);
688     const MachineOperand &OMO = Other.getOperand(i);
689     if (!MO.isReg()) {
690       if (!MO.isIdenticalTo(OMO))
691         return false;
692       continue;
693     }
694 
695     // Clients may or may not want to ignore defs when testing for equality.
696     // For example, machine CSE pass only cares about finding common
697     // subexpressions, so it's safe to ignore virtual register defs.
698     if (MO.isDef()) {
699       if (Check == IgnoreDefs)
700         continue;
701       else if (Check == IgnoreVRegDefs) {
702         if (!MO.getReg().isVirtual() || !OMO.getReg().isVirtual())
703           if (!MO.isIdenticalTo(OMO))
704             return false;
705       } else {
706         if (!MO.isIdenticalTo(OMO))
707           return false;
708         if (Check == CheckKillDead && MO.isDead() != OMO.isDead())
709           return false;
710       }
711     } else {
712       if (!MO.isIdenticalTo(OMO))
713         return false;
714       if (Check == CheckKillDead && MO.isKill() != OMO.isKill())
715         return false;
716     }
717   }
718   // If DebugLoc does not match then two debug instructions are not identical.
719   if (isDebugInstr())
720     if (getDebugLoc() && Other.getDebugLoc() &&
721         getDebugLoc() != Other.getDebugLoc())
722       return false;
723   // If pre- or post-instruction symbols do not match then the two instructions
724   // are not identical.
725   if (getPreInstrSymbol() != Other.getPreInstrSymbol() ||
726       getPostInstrSymbol() != Other.getPostInstrSymbol())
727     return false;
728   // Call instructions with different CFI types are not identical.
729   if (isCall() && getCFIType() != Other.getCFIType())
730     return false;
731 
732   return true;
733 }
734 
735 bool MachineInstr::isEquivalentDbgInstr(const MachineInstr &Other) const {
736   if (!isDebugValueLike() || !Other.isDebugValueLike())
737     return false;
738   if (getDebugLoc() != Other.getDebugLoc())
739     return false;
740   if (getDebugVariable() != Other.getDebugVariable())
741     return false;
742   if (getNumDebugOperands() != Other.getNumDebugOperands())
743     return false;
744   for (unsigned OpIdx = 0; OpIdx < getNumDebugOperands(); ++OpIdx)
745     if (!getDebugOperand(OpIdx).isIdenticalTo(Other.getDebugOperand(OpIdx)))
746       return false;
747   if (!DIExpression::isEqualExpression(
748           getDebugExpression(), isIndirectDebugValue(),
749           Other.getDebugExpression(), Other.isIndirectDebugValue()))
750     return false;
751   return true;
752 }
753 
754 const MachineFunction *MachineInstr::getMF() const {
755   return getParent()->getParent();
756 }
757 
758 MachineInstr *MachineInstr::removeFromParent() {
759   assert(getParent() && "Not embedded in a basic block!");
760   return getParent()->remove(this);
761 }
762 
763 MachineInstr *MachineInstr::removeFromBundle() {
764   assert(getParent() && "Not embedded in a basic block!");
765   return getParent()->remove_instr(this);
766 }
767 
768 void MachineInstr::eraseFromParent() {
769   assert(getParent() && "Not embedded in a basic block!");
770   getParent()->erase(this);
771 }
772 
773 void MachineInstr::eraseFromBundle() {
774   assert(getParent() && "Not embedded in a basic block!");
775   getParent()->erase_instr(this);
776 }
777 
778 bool MachineInstr::isCandidateForAdditionalCallInfo(QueryType Type) const {
779   if (!isCall(Type))
780     return false;
781   switch (getOpcode()) {
782   case TargetOpcode::PATCHPOINT:
783   case TargetOpcode::STACKMAP:
784   case TargetOpcode::STATEPOINT:
785   case TargetOpcode::FENTRY_CALL:
786     return false;
787   }
788   return true;
789 }
790 
791 bool MachineInstr::shouldUpdateAdditionalCallInfo() const {
792   if (isBundle())
793     return isCandidateForAdditionalCallInfo(MachineInstr::AnyInBundle);
794   return isCandidateForAdditionalCallInfo();
795 }
796 
797 template <typename Operand, typename Instruction>
798 static iterator_range<
799     filter_iterator<Operand *, std::function<bool(Operand &Op)>>>
800 getDebugOperandsForRegHelper(Instruction *MI, Register Reg) {
801   std::function<bool(Operand & Op)> OpUsesReg(
802       [Reg](Operand &Op) { return Op.isReg() && Op.getReg() == Reg; });
803   return make_filter_range(MI->debug_operands(), OpUsesReg);
804 }
805 
806 iterator_range<filter_iterator<const MachineOperand *,
807                                std::function<bool(const MachineOperand &Op)>>>
808 MachineInstr::getDebugOperandsForReg(Register Reg) const {
809   return getDebugOperandsForRegHelper<const MachineOperand, const MachineInstr>(
810       this, Reg);
811 }
812 
813 iterator_range<
814     filter_iterator<MachineOperand *, std::function<bool(MachineOperand &Op)>>>
815 MachineInstr::getDebugOperandsForReg(Register Reg) {
816   return getDebugOperandsForRegHelper<MachineOperand, MachineInstr>(this, Reg);
817 }
818 
819 unsigned MachineInstr::getNumExplicitOperands() const {
820   unsigned NumOperands = MCID->getNumOperands();
821   if (!MCID->isVariadic())
822     return NumOperands;
823 
824   for (const MachineOperand &MO : operands_impl().drop_front(NumOperands)) {
825     // The operands must always be in the following order:
826     // - explicit reg defs,
827     // - other explicit operands (reg uses, immediates, etc.),
828     // - implicit reg defs
829     // - implicit reg uses
830     if (MO.isReg() && MO.isImplicit())
831       break;
832     ++NumOperands;
833   }
834   return NumOperands;
835 }
836 
837 unsigned MachineInstr::getNumExplicitDefs() const {
838   unsigned NumDefs = MCID->getNumDefs();
839   if (!MCID->isVariadic())
840     return NumDefs;
841 
842   for (const MachineOperand &MO : operands_impl().drop_front(NumDefs)) {
843     if (!MO.isReg() || !MO.isDef() || MO.isImplicit())
844       break;
845     ++NumDefs;
846   }
847   return NumDefs;
848 }
849 
850 void MachineInstr::bundleWithPred() {
851   assert(!isBundledWithPred() && "MI is already bundled with its predecessor");
852   setFlag(BundledPred);
853   MachineBasicBlock::instr_iterator Pred = getIterator();
854   --Pred;
855   assert(!Pred->isBundledWithSucc() && "Inconsistent bundle flags");
856   Pred->setFlag(BundledSucc);
857 }
858 
859 void MachineInstr::bundleWithSucc() {
860   assert(!isBundledWithSucc() && "MI is already bundled with its successor");
861   setFlag(BundledSucc);
862   MachineBasicBlock::instr_iterator Succ = getIterator();
863   ++Succ;
864   assert(!Succ->isBundledWithPred() && "Inconsistent bundle flags");
865   Succ->setFlag(BundledPred);
866 }
867 
868 void MachineInstr::unbundleFromPred() {
869   assert(isBundledWithPred() && "MI isn't bundled with its predecessor");
870   clearFlag(BundledPred);
871   MachineBasicBlock::instr_iterator Pred = getIterator();
872   --Pred;
873   assert(Pred->isBundledWithSucc() && "Inconsistent bundle flags");
874   Pred->clearFlag(BundledSucc);
875 }
876 
877 void MachineInstr::unbundleFromSucc() {
878   assert(isBundledWithSucc() && "MI isn't bundled with its successor");
879   clearFlag(BundledSucc);
880   MachineBasicBlock::instr_iterator Succ = getIterator();
881   ++Succ;
882   assert(Succ->isBundledWithPred() && "Inconsistent bundle flags");
883   Succ->clearFlag(BundledPred);
884 }
885 
886 bool MachineInstr::isStackAligningInlineAsm() const {
887   if (isInlineAsm()) {
888     unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
889     if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
890       return true;
891   }
892   return false;
893 }
894 
895 InlineAsm::AsmDialect MachineInstr::getInlineAsmDialect() const {
896   assert(isInlineAsm() && "getInlineAsmDialect() only works for inline asms!");
897   unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
898   return InlineAsm::AsmDialect((ExtraInfo & InlineAsm::Extra_AsmDialect) != 0);
899 }
900 
901 int MachineInstr::findInlineAsmFlagIdx(unsigned OpIdx,
902                                        unsigned *GroupNo) const {
903   assert(isInlineAsm() && "Expected an inline asm instruction");
904   assert(OpIdx < getNumOperands() && "OpIdx out of range");
905 
906   // Ignore queries about the initial operands.
907   if (OpIdx < InlineAsm::MIOp_FirstOperand)
908     return -1;
909 
910   unsigned Group = 0;
911   unsigned NumOps;
912   for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands(); i < e;
913        i += NumOps) {
914     const MachineOperand &FlagMO = getOperand(i);
915     // If we reach the implicit register operands, stop looking.
916     if (!FlagMO.isImm())
917       return -1;
918     const InlineAsm::Flag F(FlagMO.getImm());
919     NumOps = 1 + F.getNumOperandRegisters();
920     if (i + NumOps > OpIdx) {
921       if (GroupNo)
922         *GroupNo = Group;
923       return i;
924     }
925     ++Group;
926   }
927   return -1;
928 }
929 
930 const DILabel *MachineInstr::getDebugLabel() const {
931   assert(isDebugLabel() && "not a DBG_LABEL");
932   return cast<DILabel>(getOperand(0).getMetadata());
933 }
934 
935 const MachineOperand &MachineInstr::getDebugVariableOp() const {
936   assert((isDebugValueLike()) && "not a DBG_VALUE*");
937   unsigned VariableOp = isNonListDebugValue() ? 2 : 0;
938   return getOperand(VariableOp);
939 }
940 
941 MachineOperand &MachineInstr::getDebugVariableOp() {
942   assert((isDebugValueLike()) && "not a DBG_VALUE*");
943   unsigned VariableOp = isNonListDebugValue() ? 2 : 0;
944   return getOperand(VariableOp);
945 }
946 
947 const DILocalVariable *MachineInstr::getDebugVariable() const {
948   return cast<DILocalVariable>(getDebugVariableOp().getMetadata());
949 }
950 
951 const MachineOperand &MachineInstr::getDebugExpressionOp() const {
952   assert((isDebugValueLike()) && "not a DBG_VALUE*");
953   unsigned ExpressionOp = isNonListDebugValue() ? 3 : 1;
954   return getOperand(ExpressionOp);
955 }
956 
957 MachineOperand &MachineInstr::getDebugExpressionOp() {
958   assert((isDebugValueLike()) && "not a DBG_VALUE*");
959   unsigned ExpressionOp = isNonListDebugValue() ? 3 : 1;
960   return getOperand(ExpressionOp);
961 }
962 
963 const DIExpression *MachineInstr::getDebugExpression() const {
964   return cast<DIExpression>(getDebugExpressionOp().getMetadata());
965 }
966 
967 bool MachineInstr::isDebugEntryValue() const {
968   return isDebugValue() && getDebugExpression()->isEntryValue();
969 }
970 
971 const TargetRegisterClass*
972 MachineInstr::getRegClassConstraint(unsigned OpIdx,
973                                     const TargetInstrInfo *TII,
974                                     const TargetRegisterInfo *TRI) const {
975   assert(getParent() && "Can't have an MBB reference here!");
976   assert(getMF() && "Can't have an MF reference here!");
977   const MachineFunction &MF = *getMF();
978 
979   // Most opcodes have fixed constraints in their MCInstrDesc.
980   if (!isInlineAsm())
981     return TII->getRegClass(getDesc(), OpIdx, TRI, MF);
982 
983   if (!getOperand(OpIdx).isReg())
984     return nullptr;
985 
986   // For tied uses on inline asm, get the constraint from the def.
987   unsigned DefIdx;
988   if (getOperand(OpIdx).isUse() && isRegTiedToDefOperand(OpIdx, &DefIdx))
989     OpIdx = DefIdx;
990 
991   // Inline asm stores register class constraints in the flag word.
992   int FlagIdx = findInlineAsmFlagIdx(OpIdx);
993   if (FlagIdx < 0)
994     return nullptr;
995 
996   const InlineAsm::Flag F(getOperand(FlagIdx).getImm());
997   unsigned RCID;
998   if ((F.isRegUseKind() || F.isRegDefKind() || F.isRegDefEarlyClobberKind()) &&
999       F.hasRegClassConstraint(RCID))
1000     return TRI->getRegClass(RCID);
1001 
1002   // Assume that all registers in a memory operand are pointers.
1003   if (F.isMemKind())
1004     return TRI->getPointerRegClass(MF);
1005 
1006   return nullptr;
1007 }
1008 
1009 const TargetRegisterClass *MachineInstr::getRegClassConstraintEffectForVReg(
1010     Register Reg, const TargetRegisterClass *CurRC, const TargetInstrInfo *TII,
1011     const TargetRegisterInfo *TRI, bool ExploreBundle) const {
1012   // Check every operands inside the bundle if we have
1013   // been asked to.
1014   if (ExploreBundle)
1015     for (ConstMIBundleOperands OpndIt(*this); OpndIt.isValid() && CurRC;
1016          ++OpndIt)
1017       CurRC = OpndIt->getParent()->getRegClassConstraintEffectForVRegImpl(
1018           OpndIt.getOperandNo(), Reg, CurRC, TII, TRI);
1019   else
1020     // Otherwise, just check the current operands.
1021     for (unsigned i = 0, e = NumOperands; i < e && CurRC; ++i)
1022       CurRC = getRegClassConstraintEffectForVRegImpl(i, Reg, CurRC, TII, TRI);
1023   return CurRC;
1024 }
1025 
1026 const TargetRegisterClass *MachineInstr::getRegClassConstraintEffectForVRegImpl(
1027     unsigned OpIdx, Register Reg, const TargetRegisterClass *CurRC,
1028     const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const {
1029   assert(CurRC && "Invalid initial register class");
1030   // Check if Reg is constrained by some of its use/def from MI.
1031   const MachineOperand &MO = getOperand(OpIdx);
1032   if (!MO.isReg() || MO.getReg() != Reg)
1033     return CurRC;
1034   // If yes, accumulate the constraints through the operand.
1035   return getRegClassConstraintEffect(OpIdx, CurRC, TII, TRI);
1036 }
1037 
1038 const TargetRegisterClass *MachineInstr::getRegClassConstraintEffect(
1039     unsigned OpIdx, const TargetRegisterClass *CurRC,
1040     const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const {
1041   const TargetRegisterClass *OpRC = getRegClassConstraint(OpIdx, TII, TRI);
1042   const MachineOperand &MO = getOperand(OpIdx);
1043   assert(MO.isReg() &&
1044          "Cannot get register constraints for non-register operand");
1045   assert(CurRC && "Invalid initial register class");
1046   if (unsigned SubIdx = MO.getSubReg()) {
1047     if (OpRC)
1048       CurRC = TRI->getMatchingSuperRegClass(CurRC, OpRC, SubIdx);
1049     else
1050       CurRC = TRI->getSubClassWithSubReg(CurRC, SubIdx);
1051   } else if (OpRC)
1052     CurRC = TRI->getCommonSubClass(CurRC, OpRC);
1053   return CurRC;
1054 }
1055 
1056 /// Return the number of instructions inside the MI bundle, not counting the
1057 /// header instruction.
1058 unsigned MachineInstr::getBundleSize() const {
1059   MachineBasicBlock::const_instr_iterator I = getIterator();
1060   unsigned Size = 0;
1061   while (I->isBundledWithSucc()) {
1062     ++Size;
1063     ++I;
1064   }
1065   return Size;
1066 }
1067 
1068 /// Returns true if the MachineInstr has an implicit-use operand of exactly
1069 /// the given register (not considering sub/super-registers).
1070 bool MachineInstr::hasRegisterImplicitUseOperand(Register Reg) const {
1071   for (const MachineOperand &MO : implicit_operands()) {
1072     if (MO.isReg() && MO.isUse() && MO.getReg() == Reg)
1073       return true;
1074   }
1075   return false;
1076 }
1077 
1078 /// findRegisterUseOperandIdx() - Returns the MachineOperand that is a use of
1079 /// the specific register or -1 if it is not found. It further tightens
1080 /// the search criteria to a use that kills the register if isKill is true.
1081 int MachineInstr::findRegisterUseOperandIdx(Register Reg,
1082                                             const TargetRegisterInfo *TRI,
1083                                             bool isKill) const {
1084   for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1085     const MachineOperand &MO = getOperand(i);
1086     if (!MO.isReg() || !MO.isUse())
1087       continue;
1088     Register MOReg = MO.getReg();
1089     if (!MOReg)
1090       continue;
1091     if (MOReg == Reg || (TRI && Reg && MOReg && TRI->regsOverlap(MOReg, Reg)))
1092       if (!isKill || MO.isKill())
1093         return i;
1094   }
1095   return -1;
1096 }
1097 
1098 /// readsWritesVirtualRegister - Return a pair of bools (reads, writes)
1099 /// indicating if this instruction reads or writes Reg. This also considers
1100 /// partial defines.
1101 std::pair<bool,bool>
1102 MachineInstr::readsWritesVirtualRegister(Register Reg,
1103                                          SmallVectorImpl<unsigned> *Ops) const {
1104   bool PartDef = false; // Partial redefine.
1105   bool FullDef = false; // Full define.
1106   bool Use = false;
1107 
1108   for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1109     const MachineOperand &MO = getOperand(i);
1110     if (!MO.isReg() || MO.getReg() != Reg)
1111       continue;
1112     if (Ops)
1113       Ops->push_back(i);
1114     if (MO.isUse())
1115       Use |= !MO.isUndef();
1116     else if (MO.getSubReg() && !MO.isUndef())
1117       // A partial def undef doesn't count as reading the register.
1118       PartDef = true;
1119     else
1120       FullDef = true;
1121   }
1122   // A partial redefine uses Reg unless there is also a full define.
1123   return std::make_pair(Use || (PartDef && !FullDef), PartDef || FullDef);
1124 }
1125 
1126 /// findRegisterDefOperandIdx() - Returns the operand index that is a def of
1127 /// the specified register or -1 if it is not found. If isDead is true, defs
1128 /// that are not dead are skipped. If TargetRegisterInfo is non-null, then it
1129 /// also checks if there is a def of a super-register.
1130 int MachineInstr::findRegisterDefOperandIdx(Register Reg,
1131                                             const TargetRegisterInfo *TRI,
1132                                             bool isDead, bool Overlap) const {
1133   bool isPhys = Reg.isPhysical();
1134   for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1135     const MachineOperand &MO = getOperand(i);
1136     // Accept regmask operands when Overlap is set.
1137     // Ignore them when looking for a specific def operand (Overlap == false).
1138     if (isPhys && Overlap && MO.isRegMask() && MO.clobbersPhysReg(Reg))
1139       return i;
1140     if (!MO.isReg() || !MO.isDef())
1141       continue;
1142     Register MOReg = MO.getReg();
1143     bool Found = (MOReg == Reg);
1144     if (!Found && TRI && isPhys && MOReg.isPhysical()) {
1145       if (Overlap)
1146         Found = TRI->regsOverlap(MOReg, Reg);
1147       else
1148         Found = TRI->isSubRegister(MOReg, Reg);
1149     }
1150     if (Found && (!isDead || MO.isDead()))
1151       return i;
1152   }
1153   return -1;
1154 }
1155 
1156 /// findFirstPredOperandIdx() - Find the index of the first operand in the
1157 /// operand list that is used to represent the predicate. It returns -1 if
1158 /// none is found.
1159 int MachineInstr::findFirstPredOperandIdx() const {
1160   // Don't call MCID.findFirstPredOperandIdx() because this variant
1161   // is sometimes called on an instruction that's not yet complete, and
1162   // so the number of operands is less than the MCID indicates. In
1163   // particular, the PTX target does this.
1164   const MCInstrDesc &MCID = getDesc();
1165   if (MCID.isPredicable()) {
1166     for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
1167       if (MCID.operands()[i].isPredicate())
1168         return i;
1169   }
1170 
1171   return -1;
1172 }
1173 
1174 // MachineOperand::TiedTo is 4 bits wide.
1175 const unsigned TiedMax = 15;
1176 
1177 /// tieOperands - Mark operands at DefIdx and UseIdx as tied to each other.
1178 ///
1179 /// Use and def operands can be tied together, indicated by a non-zero TiedTo
1180 /// field. TiedTo can have these values:
1181 ///
1182 /// 0:              Operand is not tied to anything.
1183 /// 1 to TiedMax-1: Tied to getOperand(TiedTo-1).
1184 /// TiedMax:        Tied to an operand >= TiedMax-1.
1185 ///
1186 /// The tied def must be one of the first TiedMax operands on a normal
1187 /// instruction. INLINEASM instructions allow more tied defs.
1188 ///
1189 void MachineInstr::tieOperands(unsigned DefIdx, unsigned UseIdx) {
1190   MachineOperand &DefMO = getOperand(DefIdx);
1191   MachineOperand &UseMO = getOperand(UseIdx);
1192   assert(DefMO.isDef() && "DefIdx must be a def operand");
1193   assert(UseMO.isUse() && "UseIdx must be a use operand");
1194   assert(!DefMO.isTied() && "Def is already tied to another use");
1195   assert(!UseMO.isTied() && "Use is already tied to another def");
1196 
1197   if (DefIdx < TiedMax) {
1198     UseMO.TiedTo = DefIdx + 1;
1199   } else {
1200     // Inline asm can use the group descriptors to find tied operands,
1201     // statepoint tied operands are trivial to match (1-1 reg def with reg use),
1202     // but on normal instruction, the tied def must be within the first TiedMax
1203     // operands.
1204     assert((isInlineAsm() || getOpcode() == TargetOpcode::STATEPOINT) &&
1205            "DefIdx out of range");
1206     UseMO.TiedTo = TiedMax;
1207   }
1208 
1209   // UseIdx can be out of range, we'll search for it in findTiedOperandIdx().
1210   DefMO.TiedTo = std::min(UseIdx + 1, TiedMax);
1211 }
1212 
1213 /// Given the index of a tied register operand, find the operand it is tied to.
1214 /// Defs are tied to uses and vice versa. Returns the index of the tied operand
1215 /// which must exist.
1216 unsigned MachineInstr::findTiedOperandIdx(unsigned OpIdx) const {
1217   const MachineOperand &MO = getOperand(OpIdx);
1218   assert(MO.isTied() && "Operand isn't tied");
1219 
1220   // Normally TiedTo is in range.
1221   if (MO.TiedTo < TiedMax)
1222     return MO.TiedTo - 1;
1223 
1224   // Uses on normal instructions can be out of range.
1225   if (!isInlineAsm() && getOpcode() != TargetOpcode::STATEPOINT) {
1226     // Normal tied defs must be in the 0..TiedMax-1 range.
1227     if (MO.isUse())
1228       return TiedMax - 1;
1229     // MO is a def. Search for the tied use.
1230     for (unsigned i = TiedMax - 1, e = getNumOperands(); i != e; ++i) {
1231       const MachineOperand &UseMO = getOperand(i);
1232       if (UseMO.isReg() && UseMO.isUse() && UseMO.TiedTo == OpIdx + 1)
1233         return i;
1234     }
1235     llvm_unreachable("Can't find tied use");
1236   }
1237 
1238   if (getOpcode() == TargetOpcode::STATEPOINT) {
1239     // In STATEPOINT defs correspond 1-1 to GC pointer operands passed
1240     // on registers.
1241     StatepointOpers SO(this);
1242     unsigned CurUseIdx = SO.getFirstGCPtrIdx();
1243     assert(CurUseIdx != -1U && "only gc pointer statepoint operands can be tied");
1244     unsigned NumDefs = getNumDefs();
1245     for (unsigned CurDefIdx = 0; CurDefIdx < NumDefs; ++CurDefIdx) {
1246       while (!getOperand(CurUseIdx).isReg())
1247         CurUseIdx = StackMaps::getNextMetaArgIdx(this, CurUseIdx);
1248       if (OpIdx == CurDefIdx)
1249         return CurUseIdx;
1250       if (OpIdx == CurUseIdx)
1251         return CurDefIdx;
1252       CurUseIdx = StackMaps::getNextMetaArgIdx(this, CurUseIdx);
1253     }
1254     llvm_unreachable("Can't find tied use");
1255   }
1256 
1257   // Now deal with inline asm by parsing the operand group descriptor flags.
1258   // Find the beginning of each operand group.
1259   SmallVector<unsigned, 8> GroupIdx;
1260   unsigned OpIdxGroup = ~0u;
1261   unsigned NumOps;
1262   for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands(); i < e;
1263        i += NumOps) {
1264     const MachineOperand &FlagMO = getOperand(i);
1265     assert(FlagMO.isImm() && "Invalid tied operand on inline asm");
1266     unsigned CurGroup = GroupIdx.size();
1267     GroupIdx.push_back(i);
1268     const InlineAsm::Flag F(FlagMO.getImm());
1269     NumOps = 1 + F.getNumOperandRegisters();
1270     // OpIdx belongs to this operand group.
1271     if (OpIdx > i && OpIdx < i + NumOps)
1272       OpIdxGroup = CurGroup;
1273     unsigned TiedGroup;
1274     if (!F.isUseOperandTiedToDef(TiedGroup))
1275       continue;
1276     // Operands in this group are tied to operands in TiedGroup which must be
1277     // earlier. Find the number of operands between the two groups.
1278     unsigned Delta = i - GroupIdx[TiedGroup];
1279 
1280     // OpIdx is a use tied to TiedGroup.
1281     if (OpIdxGroup == CurGroup)
1282       return OpIdx - Delta;
1283 
1284     // OpIdx is a def tied to this use group.
1285     if (OpIdxGroup == TiedGroup)
1286       return OpIdx + Delta;
1287   }
1288   llvm_unreachable("Invalid tied operand on inline asm");
1289 }
1290 
1291 /// clearKillInfo - Clears kill flags on all operands.
1292 ///
1293 void MachineInstr::clearKillInfo() {
1294   for (MachineOperand &MO : operands()) {
1295     if (MO.isReg() && MO.isUse())
1296       MO.setIsKill(false);
1297   }
1298 }
1299 
1300 void MachineInstr::substituteRegister(Register FromReg, Register ToReg,
1301                                       unsigned SubIdx,
1302                                       const TargetRegisterInfo &RegInfo) {
1303   if (ToReg.isPhysical()) {
1304     if (SubIdx)
1305       ToReg = RegInfo.getSubReg(ToReg, SubIdx);
1306     for (MachineOperand &MO : operands()) {
1307       if (!MO.isReg() || MO.getReg() != FromReg)
1308         continue;
1309       MO.substPhysReg(ToReg, RegInfo);
1310     }
1311   } else {
1312     for (MachineOperand &MO : operands()) {
1313       if (!MO.isReg() || MO.getReg() != FromReg)
1314         continue;
1315       MO.substVirtReg(ToReg, SubIdx, RegInfo);
1316     }
1317   }
1318 }
1319 
1320 /// isSafeToMove - Return true if it is safe to move this instruction. If
1321 /// SawStore is set to true, it means that there is a store (or call) between
1322 /// the instruction's location and its intended destination.
1323 bool MachineInstr::isSafeToMove(bool &SawStore) const {
1324   // Ignore stuff that we obviously can't move.
1325   //
1326   // Treat volatile loads as stores. This is not strictly necessary for
1327   // volatiles, but it is required for atomic loads. It is not allowed to move
1328   // a load across an atomic load with Ordering > Monotonic.
1329   if (mayStore() || isCall() || isPHI() ||
1330       (mayLoad() && hasOrderedMemoryRef())) {
1331     SawStore = true;
1332     return false;
1333   }
1334 
1335   // Don't touch instructions that have non-trivial invariants.  For example,
1336   // terminators have to be at the end of a basic block.
1337   if (isPosition() || isDebugInstr() || isTerminator() ||
1338       isJumpTableDebugInfo())
1339     return false;
1340 
1341   // Don't touch instructions which can have non-load/store effects.
1342   //
1343   // Inline asm has a "sideeffect" marker to indicate whether the asm has
1344   // intentional side-effects. Even if an inline asm is not "sideeffect",
1345   // though, it still can't be speculatively executed: the operation might
1346   // not be valid on the current target, or for some combinations of operands.
1347   // (Some transforms that move an instruction don't speculatively execute it;
1348   // we currently don't try to handle that distinction here.)
1349   //
1350   // Other instructions handled here include those that can raise FP
1351   // exceptions, x86 "DIV" instructions which trap on divide by zero, and
1352   // stack adjustments.
1353   if (mayRaiseFPException() || hasProperty(MCID::UnmodeledSideEffects) ||
1354       isInlineAsm())
1355     return false;
1356 
1357   // See if this instruction does a load.  If so, we have to guarantee that the
1358   // loaded value doesn't change between the load and the its intended
1359   // destination. The check for isInvariantLoad gives the target the chance to
1360   // classify the load as always returning a constant, e.g. a constant pool
1361   // load.
1362   if (mayLoad() && !isDereferenceableInvariantLoad())
1363     // Otherwise, this is a real load.  If there is a store between the load and
1364     // end of block, we can't move it.
1365     return !SawStore;
1366 
1367   return true;
1368 }
1369 
1370 bool MachineInstr::wouldBeTriviallyDead() const {
1371   // Don't delete frame allocation labels.
1372   // FIXME: Why is LOCAL_ESCAPE not considered in MachineInstr::isLabel?
1373   if (getOpcode() == TargetOpcode::LOCAL_ESCAPE)
1374     return false;
1375 
1376   // Don't delete FAKE_USE.
1377   // FIXME: Why is FAKE_USE not considered in MachineInstr::isPosition?
1378   if (isFakeUse())
1379     return false;
1380 
1381   // LIFETIME markers should be preserved.
1382   // FIXME: Why are LIFETIME markers not considered in MachineInstr::isPosition?
1383   if (isLifetimeMarker())
1384     return false;
1385 
1386   // If we can move an instruction, we can remove it.  Otherwise, it has
1387   // a side-effect of some sort.
1388   bool SawStore = false;
1389   return isPHI() || isSafeToMove(SawStore);
1390 }
1391 
1392 bool MachineInstr::isDead(const MachineRegisterInfo &MRI,
1393                           LiveRegUnits *LivePhysRegs) const {
1394   // Instructions without side-effects are dead iff they only define dead regs.
1395   // This function is hot and this loop returns early in the common case,
1396   // so only perform additional checks before this if absolutely necessary.
1397   for (const MachineOperand &MO : all_defs()) {
1398     Register Reg = MO.getReg();
1399     if (Reg.isPhysical()) {
1400       // Don't delete live physreg defs, or any reserved register defs.
1401       if (!LivePhysRegs || !LivePhysRegs->available(Reg) || MRI.isReserved(Reg))
1402         return false;
1403     } else {
1404       if (MO.isDead())
1405         continue;
1406       for (const MachineInstr &Use : MRI.use_nodbg_instructions(Reg)) {
1407         if (&Use != this)
1408           // This def has a non-debug use. Don't delete the instruction!
1409           return false;
1410       }
1411     }
1412   }
1413 
1414   // Technically speaking inline asm without side effects and no defs can still
1415   // be deleted. But there is so much bad inline asm code out there, we should
1416   // let them be.
1417   if (isInlineAsm())
1418     return false;
1419 
1420   // FIXME: See issue #105950 for why LIFETIME markers are considered dead here.
1421   if (isLifetimeMarker())
1422     return true;
1423 
1424   // If there are no defs with uses, then we call the instruction dead so long
1425   // as we do not suspect it may have sideeffects.
1426   return wouldBeTriviallyDead();
1427 }
1428 
1429 static bool MemOperandsHaveAlias(const MachineFrameInfo &MFI,
1430                                  BatchAAResults *AA, bool UseTBAA,
1431                                  const MachineMemOperand *MMOa,
1432                                  const MachineMemOperand *MMOb) {
1433   // The following interface to AA is fashioned after DAGCombiner::isAlias and
1434   // operates with MachineMemOperand offset with some important assumptions:
1435   //   - LLVM fundamentally assumes flat address spaces.
1436   //   - MachineOperand offset can *only* result from legalization and cannot
1437   //     affect queries other than the trivial case of overlap checking.
1438   //   - These offsets never wrap and never step outside of allocated objects.
1439   //   - There should never be any negative offsets here.
1440   //
1441   // FIXME: Modify API to hide this math from "user"
1442   // Even before we go to AA we can reason locally about some memory objects. It
1443   // can save compile time, and possibly catch some corner cases not currently
1444   // covered.
1445 
1446   int64_t OffsetA = MMOa->getOffset();
1447   int64_t OffsetB = MMOb->getOffset();
1448   int64_t MinOffset = std::min(OffsetA, OffsetB);
1449 
1450   LocationSize WidthA = MMOa->getSize();
1451   LocationSize WidthB = MMOb->getSize();
1452   bool KnownWidthA = WidthA.hasValue();
1453   bool KnownWidthB = WidthB.hasValue();
1454   bool BothMMONonScalable = !WidthA.isScalable() && !WidthB.isScalable();
1455 
1456   const Value *ValA = MMOa->getValue();
1457   const Value *ValB = MMOb->getValue();
1458   bool SameVal = (ValA && ValB && (ValA == ValB));
1459   if (!SameVal) {
1460     const PseudoSourceValue *PSVa = MMOa->getPseudoValue();
1461     const PseudoSourceValue *PSVb = MMOb->getPseudoValue();
1462     if (PSVa && ValB && !PSVa->mayAlias(&MFI))
1463       return false;
1464     if (PSVb && ValA && !PSVb->mayAlias(&MFI))
1465       return false;
1466     if (PSVa && PSVb && (PSVa == PSVb))
1467       SameVal = true;
1468   }
1469 
1470   if (SameVal && BothMMONonScalable) {
1471     if (!KnownWidthA || !KnownWidthB)
1472       return true;
1473     int64_t MaxOffset = std::max(OffsetA, OffsetB);
1474     int64_t LowWidth = (MinOffset == OffsetA)
1475                            ? WidthA.getValue().getKnownMinValue()
1476                            : WidthB.getValue().getKnownMinValue();
1477     return (MinOffset + LowWidth > MaxOffset);
1478   }
1479 
1480   if (!AA)
1481     return true;
1482 
1483   if (!ValA || !ValB)
1484     return true;
1485 
1486   assert((OffsetA >= 0) && "Negative MachineMemOperand offset");
1487   assert((OffsetB >= 0) && "Negative MachineMemOperand offset");
1488 
1489   // If Scalable Location Size has non-zero offset, Width + Offset does not work
1490   // at the moment
1491   if ((WidthA.isScalable() && OffsetA > 0) ||
1492       (WidthB.isScalable() && OffsetB > 0))
1493     return true;
1494 
1495   int64_t OverlapA =
1496       KnownWidthA ? WidthA.getValue().getKnownMinValue() + OffsetA - MinOffset
1497                   : MemoryLocation::UnknownSize;
1498   int64_t OverlapB =
1499       KnownWidthB ? WidthB.getValue().getKnownMinValue() + OffsetB - MinOffset
1500                   : MemoryLocation::UnknownSize;
1501 
1502   LocationSize LocA = (WidthA.isScalable() || !KnownWidthA)
1503                           ? WidthA
1504                           : LocationSize::precise(OverlapA);
1505   LocationSize LocB = (WidthB.isScalable() || !KnownWidthB)
1506                           ? WidthB
1507                           : LocationSize::precise(OverlapB);
1508 
1509   return !AA->isNoAlias(
1510       MemoryLocation(ValA, LocA, UseTBAA ? MMOa->getAAInfo() : AAMDNodes()),
1511       MemoryLocation(ValB, LocB, UseTBAA ? MMOb->getAAInfo() : AAMDNodes()));
1512 }
1513 
1514 bool MachineInstr::mayAlias(BatchAAResults *AA, const MachineInstr &Other,
1515                             bool UseTBAA) const {
1516   const MachineFunction *MF = getMF();
1517   const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
1518   const MachineFrameInfo &MFI = MF->getFrameInfo();
1519 
1520   // Exclude call instruction which may alter the memory but can not be handled
1521   // by this function.
1522   if (isCall() || Other.isCall())
1523     return true;
1524 
1525   // If neither instruction stores to memory, they can't alias in any
1526   // meaningful way, even if they read from the same address.
1527   if (!mayStore() && !Other.mayStore())
1528     return false;
1529 
1530   // Both instructions must be memory operations to be able to alias.
1531   if (!mayLoadOrStore() || !Other.mayLoadOrStore())
1532     return false;
1533 
1534   // Let the target decide if memory accesses cannot possibly overlap.
1535   if (TII->areMemAccessesTriviallyDisjoint(*this, Other))
1536     return false;
1537 
1538   // Memory operations without memory operands may access anything. Be
1539   // conservative and assume `MayAlias`.
1540   if (memoperands_empty() || Other.memoperands_empty())
1541     return true;
1542 
1543   // Skip if there are too many memory operands.
1544   auto NumChecks = getNumMemOperands() * Other.getNumMemOperands();
1545   if (NumChecks > TII->getMemOperandAACheckLimit())
1546     return true;
1547 
1548   // Check each pair of memory operands from both instructions, which can't
1549   // alias only if all pairs won't alias.
1550   for (auto *MMOa : memoperands())
1551     for (auto *MMOb : Other.memoperands())
1552       if (MemOperandsHaveAlias(MFI, AA, UseTBAA, MMOa, MMOb))
1553         return true;
1554 
1555   return false;
1556 }
1557 
1558 bool MachineInstr::mayAlias(AAResults *AA, const MachineInstr &Other,
1559                             bool UseTBAA) const {
1560   if (AA) {
1561     BatchAAResults BAA(*AA);
1562     return mayAlias(&BAA, Other, UseTBAA);
1563   }
1564   return mayAlias(static_cast<BatchAAResults *>(nullptr), Other, UseTBAA);
1565 }
1566 
1567 /// hasOrderedMemoryRef - Return true if this instruction may have an ordered
1568 /// or volatile memory reference, or if the information describing the memory
1569 /// reference is not available. Return false if it is known to have no ordered
1570 /// memory references.
1571 bool MachineInstr::hasOrderedMemoryRef() const {
1572   // An instruction known never to access memory won't have a volatile access.
1573   if (!mayStore() &&
1574       !mayLoad() &&
1575       !isCall() &&
1576       !hasUnmodeledSideEffects())
1577     return false;
1578 
1579   // Otherwise, if the instruction has no memory reference information,
1580   // conservatively assume it wasn't preserved.
1581   if (memoperands_empty())
1582     return true;
1583 
1584   // Check if any of our memory operands are ordered.
1585   return llvm::any_of(memoperands(), [](const MachineMemOperand *MMO) {
1586     return !MMO->isUnordered();
1587   });
1588 }
1589 
1590 /// isDereferenceableInvariantLoad - Return true if this instruction will never
1591 /// trap and is loading from a location whose value is invariant across a run of
1592 /// this function.
1593 bool MachineInstr::isDereferenceableInvariantLoad() const {
1594   // If the instruction doesn't load at all, it isn't an invariant load.
1595   if (!mayLoad())
1596     return false;
1597 
1598   // If the instruction has lost its memoperands, conservatively assume that
1599   // it may not be an invariant load.
1600   if (memoperands_empty())
1601     return false;
1602 
1603   const MachineFrameInfo &MFI = getParent()->getParent()->getFrameInfo();
1604 
1605   for (MachineMemOperand *MMO : memoperands()) {
1606     if (!MMO->isUnordered())
1607       // If the memory operand has ordering side effects, we can't move the
1608       // instruction.  Such an instruction is technically an invariant load,
1609       // but the caller code would need updated to expect that.
1610       return false;
1611     if (MMO->isStore()) return false;
1612     if (MMO->isInvariant() && MMO->isDereferenceable())
1613       continue;
1614 
1615     // A load from a constant PseudoSourceValue is invariant.
1616     if (const PseudoSourceValue *PSV = MMO->getPseudoValue()) {
1617       if (PSV->isConstant(&MFI))
1618         continue;
1619     }
1620 
1621     // Otherwise assume conservatively.
1622     return false;
1623   }
1624 
1625   // Everything checks out.
1626   return true;
1627 }
1628 
1629 Register MachineInstr::isConstantValuePHI() const {
1630   if (!isPHI())
1631     return {};
1632   assert(getNumOperands() >= 3 &&
1633          "It's illegal to have a PHI without source operands");
1634 
1635   Register Reg = getOperand(1).getReg();
1636   for (unsigned i = 3, e = getNumOperands(); i < e; i += 2)
1637     if (getOperand(i).getReg() != Reg)
1638       return {};
1639   return Reg;
1640 }
1641 
1642 bool MachineInstr::hasUnmodeledSideEffects() const {
1643   if (hasProperty(MCID::UnmodeledSideEffects))
1644     return true;
1645   if (isInlineAsm()) {
1646     unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
1647     if (ExtraInfo & InlineAsm::Extra_HasSideEffects)
1648       return true;
1649   }
1650 
1651   return false;
1652 }
1653 
1654 bool MachineInstr::isLoadFoldBarrier() const {
1655   return mayStore() || isCall() ||
1656          (hasUnmodeledSideEffects() && !isPseudoProbe());
1657 }
1658 
1659 /// allDefsAreDead - Return true if all the defs of this instruction are dead.
1660 ///
1661 bool MachineInstr::allDefsAreDead() const {
1662   for (const MachineOperand &MO : operands()) {
1663     if (!MO.isReg() || MO.isUse())
1664       continue;
1665     if (!MO.isDead())
1666       return false;
1667   }
1668   return true;
1669 }
1670 
1671 bool MachineInstr::allImplicitDefsAreDead() const {
1672   for (const MachineOperand &MO : implicit_operands()) {
1673     if (!MO.isReg() || MO.isUse())
1674       continue;
1675     if (!MO.isDead())
1676       return false;
1677   }
1678   return true;
1679 }
1680 
1681 /// copyImplicitOps - Copy implicit register operands from specified
1682 /// instruction to this instruction.
1683 void MachineInstr::copyImplicitOps(MachineFunction &MF,
1684                                    const MachineInstr &MI) {
1685   for (const MachineOperand &MO :
1686        llvm::drop_begin(MI.operands(), MI.getDesc().getNumOperands()))
1687     if ((MO.isReg() && MO.isImplicit()) || MO.isRegMask())
1688       addOperand(MF, MO);
1689 }
1690 
1691 bool MachineInstr::hasComplexRegisterTies() const {
1692   const MCInstrDesc &MCID = getDesc();
1693   if (MCID.Opcode == TargetOpcode::STATEPOINT)
1694     return true;
1695   for (unsigned I = 0, E = getNumOperands(); I < E; ++I) {
1696     const auto &Operand = getOperand(I);
1697     if (!Operand.isReg() || Operand.isDef())
1698       // Ignore the defined registers as MCID marks only the uses as tied.
1699       continue;
1700     int ExpectedTiedIdx = MCID.getOperandConstraint(I, MCOI::TIED_TO);
1701     int TiedIdx = Operand.isTied() ? int(findTiedOperandIdx(I)) : -1;
1702     if (ExpectedTiedIdx != TiedIdx)
1703       return true;
1704   }
1705   return false;
1706 }
1707 
1708 LLT MachineInstr::getTypeToPrint(unsigned OpIdx, SmallBitVector &PrintedTypes,
1709                                  const MachineRegisterInfo &MRI) const {
1710   const MachineOperand &Op = getOperand(OpIdx);
1711   if (!Op.isReg())
1712     return LLT{};
1713 
1714   if (isVariadic() || OpIdx >= getNumExplicitOperands())
1715     return MRI.getType(Op.getReg());
1716 
1717   auto &OpInfo = getDesc().operands()[OpIdx];
1718   if (!OpInfo.isGenericType())
1719     return MRI.getType(Op.getReg());
1720 
1721   if (PrintedTypes[OpInfo.getGenericTypeIndex()])
1722     return LLT{};
1723 
1724   LLT TypeToPrint = MRI.getType(Op.getReg());
1725   // Don't mark the type index printed if it wasn't actually printed: maybe
1726   // another operand with the same type index has an actual type attached:
1727   if (TypeToPrint.isValid())
1728     PrintedTypes.set(OpInfo.getGenericTypeIndex());
1729   return TypeToPrint;
1730 }
1731 
1732 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1733 LLVM_DUMP_METHOD void MachineInstr::dump() const {
1734   dbgs() << "  ";
1735   print(dbgs());
1736 }
1737 
1738 LLVM_DUMP_METHOD void MachineInstr::dumprImpl(
1739     const MachineRegisterInfo &MRI, unsigned Depth, unsigned MaxDepth,
1740     SmallPtrSetImpl<const MachineInstr *> &AlreadySeenInstrs) const {
1741   if (Depth >= MaxDepth)
1742     return;
1743   if (!AlreadySeenInstrs.insert(this).second)
1744     return;
1745   // PadToColumn always inserts at least one space.
1746   // Don't mess up the alignment if we don't want any space.
1747   if (Depth)
1748     fdbgs().PadToColumn(Depth * 2);
1749   print(fdbgs());
1750   for (const MachineOperand &MO : operands()) {
1751     if (!MO.isReg() || MO.isDef())
1752       continue;
1753     Register Reg = MO.getReg();
1754     if (Reg.isPhysical())
1755       continue;
1756     const MachineInstr *NewMI = MRI.getUniqueVRegDef(Reg);
1757     if (NewMI == nullptr)
1758       continue;
1759     NewMI->dumprImpl(MRI, Depth + 1, MaxDepth, AlreadySeenInstrs);
1760   }
1761 }
1762 
1763 LLVM_DUMP_METHOD void MachineInstr::dumpr(const MachineRegisterInfo &MRI,
1764                                           unsigned MaxDepth) const {
1765   SmallPtrSet<const MachineInstr *, 16> AlreadySeenInstrs;
1766   dumprImpl(MRI, 0, MaxDepth, AlreadySeenInstrs);
1767 }
1768 #endif
1769 
1770 void MachineInstr::print(raw_ostream &OS, bool IsStandalone, bool SkipOpers,
1771                          bool SkipDebugLoc, bool AddNewLine,
1772                          const TargetInstrInfo *TII) const {
1773   const Module *M = nullptr;
1774   const Function *F = nullptr;
1775   if (const MachineFunction *MF = getMFIfAvailable(*this)) {
1776     F = &MF->getFunction();
1777     M = F->getParent();
1778     if (!TII)
1779       TII = MF->getSubtarget().getInstrInfo();
1780   }
1781 
1782   ModuleSlotTracker MST(M);
1783   if (F)
1784     MST.incorporateFunction(*F);
1785   print(OS, MST, IsStandalone, SkipOpers, SkipDebugLoc, AddNewLine, TII);
1786 }
1787 
1788 void MachineInstr::print(raw_ostream &OS, ModuleSlotTracker &MST,
1789                          bool IsStandalone, bool SkipOpers, bool SkipDebugLoc,
1790                          bool AddNewLine, const TargetInstrInfo *TII) const {
1791   // We can be a bit tidier if we know the MachineFunction.
1792   const TargetRegisterInfo *TRI = nullptr;
1793   const MachineRegisterInfo *MRI = nullptr;
1794   tryToGetTargetInfo(*this, TRI, MRI, TII);
1795 
1796   if (isCFIInstruction())
1797     assert(getNumOperands() == 1 && "Expected 1 operand in CFI instruction");
1798 
1799   SmallBitVector PrintedTypes(8);
1800   bool ShouldPrintRegisterTies = IsStandalone || hasComplexRegisterTies();
1801   auto getTiedOperandIdx = [&](unsigned OpIdx) {
1802     if (!ShouldPrintRegisterTies)
1803       return 0U;
1804     const MachineOperand &MO = getOperand(OpIdx);
1805     if (MO.isReg() && MO.isTied() && !MO.isDef())
1806       return findTiedOperandIdx(OpIdx);
1807     return 0U;
1808   };
1809   unsigned StartOp = 0;
1810   unsigned e = getNumOperands();
1811 
1812   // Print explicitly defined operands on the left of an assignment syntax.
1813   while (StartOp < e) {
1814     const MachineOperand &MO = getOperand(StartOp);
1815     if (!MO.isReg() || !MO.isDef() || MO.isImplicit())
1816       break;
1817 
1818     if (StartOp != 0)
1819       OS << ", ";
1820 
1821     LLT TypeToPrint = MRI ? getTypeToPrint(StartOp, PrintedTypes, *MRI) : LLT{};
1822     unsigned TiedOperandIdx = getTiedOperandIdx(StartOp);
1823     MO.print(OS, MST, TypeToPrint, StartOp, /*PrintDef=*/false, IsStandalone,
1824              ShouldPrintRegisterTies, TiedOperandIdx, TRI);
1825     ++StartOp;
1826   }
1827 
1828   if (StartOp != 0)
1829     OS << " = ";
1830 
1831   if (getFlag(MachineInstr::FrameSetup))
1832     OS << "frame-setup ";
1833   if (getFlag(MachineInstr::FrameDestroy))
1834     OS << "frame-destroy ";
1835   if (getFlag(MachineInstr::FmNoNans))
1836     OS << "nnan ";
1837   if (getFlag(MachineInstr::FmNoInfs))
1838     OS << "ninf ";
1839   if (getFlag(MachineInstr::FmNsz))
1840     OS << "nsz ";
1841   if (getFlag(MachineInstr::FmArcp))
1842     OS << "arcp ";
1843   if (getFlag(MachineInstr::FmContract))
1844     OS << "contract ";
1845   if (getFlag(MachineInstr::FmAfn))
1846     OS << "afn ";
1847   if (getFlag(MachineInstr::FmReassoc))
1848     OS << "reassoc ";
1849   if (getFlag(MachineInstr::NoUWrap))
1850     OS << "nuw ";
1851   if (getFlag(MachineInstr::NoSWrap))
1852     OS << "nsw ";
1853   if (getFlag(MachineInstr::IsExact))
1854     OS << "exact ";
1855   if (getFlag(MachineInstr::NoFPExcept))
1856     OS << "nofpexcept ";
1857   if (getFlag(MachineInstr::NoMerge))
1858     OS << "nomerge ";
1859   if (getFlag(MachineInstr::NonNeg))
1860     OS << "nneg ";
1861   if (getFlag(MachineInstr::Disjoint))
1862     OS << "disjoint ";
1863   if (getFlag(MachineInstr::SameSign))
1864     OS << "samesign ";
1865 
1866   // Print the opcode name.
1867   if (TII)
1868     OS << TII->getName(getOpcode());
1869   else
1870     OS << "UNKNOWN";
1871 
1872   if (SkipOpers)
1873     return;
1874 
1875   // Print the rest of the operands.
1876   bool FirstOp = true;
1877   unsigned AsmDescOp = ~0u;
1878   unsigned AsmOpCount = 0;
1879 
1880   if (isInlineAsm() && e >= InlineAsm::MIOp_FirstOperand) {
1881     // Print asm string.
1882     OS << " ";
1883     const unsigned OpIdx = InlineAsm::MIOp_AsmString;
1884     LLT TypeToPrint = MRI ? getTypeToPrint(OpIdx, PrintedTypes, *MRI) : LLT{};
1885     unsigned TiedOperandIdx = getTiedOperandIdx(OpIdx);
1886     getOperand(OpIdx).print(OS, MST, TypeToPrint, OpIdx, /*PrintDef=*/true,
1887                             IsStandalone, ShouldPrintRegisterTies,
1888                             TiedOperandIdx, TRI);
1889 
1890     // Print HasSideEffects, MayLoad, MayStore, IsAlignStack
1891     unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
1892     if (ExtraInfo & InlineAsm::Extra_HasSideEffects)
1893       OS << " [sideeffect]";
1894     if (ExtraInfo & InlineAsm::Extra_MayLoad)
1895       OS << " [mayload]";
1896     if (ExtraInfo & InlineAsm::Extra_MayStore)
1897       OS << " [maystore]";
1898     if (ExtraInfo & InlineAsm::Extra_IsConvergent)
1899       OS << " [isconvergent]";
1900     if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
1901       OS << " [alignstack]";
1902     if (getInlineAsmDialect() == InlineAsm::AD_ATT)
1903       OS << " [attdialect]";
1904     if (getInlineAsmDialect() == InlineAsm::AD_Intel)
1905       OS << " [inteldialect]";
1906 
1907     StartOp = AsmDescOp = InlineAsm::MIOp_FirstOperand;
1908     FirstOp = false;
1909   }
1910 
1911   for (unsigned i = StartOp, e = getNumOperands(); i != e; ++i) {
1912     const MachineOperand &MO = getOperand(i);
1913 
1914     if (FirstOp) FirstOp = false; else OS << ",";
1915     OS << " ";
1916 
1917     if (isDebugValueLike() && MO.isMetadata()) {
1918       // Pretty print DBG_VALUE* instructions.
1919       auto *DIV = dyn_cast<DILocalVariable>(MO.getMetadata());
1920       if (DIV && !DIV->getName().empty())
1921         OS << "!\"" << DIV->getName() << '\"';
1922       else {
1923         LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{};
1924         unsigned TiedOperandIdx = getTiedOperandIdx(i);
1925         MO.print(OS, MST, TypeToPrint, i, /*PrintDef=*/true, IsStandalone,
1926                  ShouldPrintRegisterTies, TiedOperandIdx, TRI);
1927       }
1928     } else if (isDebugLabel() && MO.isMetadata()) {
1929       // Pretty print DBG_LABEL instructions.
1930       auto *DIL = dyn_cast<DILabel>(MO.getMetadata());
1931       if (DIL && !DIL->getName().empty())
1932         OS << "\"" << DIL->getName() << '\"';
1933       else {
1934         LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{};
1935         unsigned TiedOperandIdx = getTiedOperandIdx(i);
1936         MO.print(OS, MST, TypeToPrint, i, /*PrintDef=*/true, IsStandalone,
1937                  ShouldPrintRegisterTies, TiedOperandIdx, TRI);
1938       }
1939     } else if (i == AsmDescOp && MO.isImm()) {
1940       // Pretty print the inline asm operand descriptor.
1941       OS << '$' << AsmOpCount++;
1942       unsigned Flag = MO.getImm();
1943       const InlineAsm::Flag F(Flag);
1944       OS << ":[";
1945       OS << F.getKindName();
1946 
1947       unsigned RCID;
1948       if (!F.isImmKind() && !F.isMemKind() && F.hasRegClassConstraint(RCID)) {
1949         if (TRI) {
1950           OS << ':' << TRI->getRegClassName(TRI->getRegClass(RCID));
1951         } else
1952           OS << ":RC" << RCID;
1953       }
1954 
1955       if (F.isMemKind()) {
1956         const InlineAsm::ConstraintCode MCID = F.getMemoryConstraintID();
1957         OS << ":" << InlineAsm::getMemConstraintName(MCID);
1958       }
1959 
1960       unsigned TiedTo;
1961       if (F.isUseOperandTiedToDef(TiedTo))
1962         OS << " tiedto:$" << TiedTo;
1963 
1964       if ((F.isRegDefKind() || F.isRegDefEarlyClobberKind() ||
1965            F.isRegUseKind()) &&
1966           F.getRegMayBeFolded()) {
1967         OS << " foldable";
1968       }
1969 
1970       OS << ']';
1971 
1972       // Compute the index of the next operand descriptor.
1973       AsmDescOp += 1 + F.getNumOperandRegisters();
1974     } else {
1975       LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{};
1976       unsigned TiedOperandIdx = getTiedOperandIdx(i);
1977       if (MO.isImm() && isOperandSubregIdx(i))
1978         MachineOperand::printSubRegIdx(OS, MO.getImm(), TRI);
1979       else
1980         MO.print(OS, MST, TypeToPrint, i, /*PrintDef=*/true, IsStandalone,
1981                  ShouldPrintRegisterTies, TiedOperandIdx, TRI);
1982     }
1983   }
1984 
1985   // Print any optional symbols attached to this instruction as-if they were
1986   // operands.
1987   if (MCSymbol *PreInstrSymbol = getPreInstrSymbol()) {
1988     if (!FirstOp) {
1989       FirstOp = false;
1990       OS << ',';
1991     }
1992     OS << " pre-instr-symbol ";
1993     MachineOperand::printSymbol(OS, *PreInstrSymbol);
1994   }
1995   if (MCSymbol *PostInstrSymbol = getPostInstrSymbol()) {
1996     if (!FirstOp) {
1997       FirstOp = false;
1998       OS << ',';
1999     }
2000     OS << " post-instr-symbol ";
2001     MachineOperand::printSymbol(OS, *PostInstrSymbol);
2002   }
2003   if (MDNode *HeapAllocMarker = getHeapAllocMarker()) {
2004     if (!FirstOp) {
2005       FirstOp = false;
2006       OS << ',';
2007     }
2008     OS << " heap-alloc-marker ";
2009     HeapAllocMarker->printAsOperand(OS, MST);
2010   }
2011   if (MDNode *PCSections = getPCSections()) {
2012     if (!FirstOp) {
2013       FirstOp = false;
2014       OS << ',';
2015     }
2016     OS << " pcsections ";
2017     PCSections->printAsOperand(OS, MST);
2018   }
2019   if (MDNode *MMRA = getMMRAMetadata()) {
2020     if (!FirstOp) {
2021       FirstOp = false;
2022       OS << ',';
2023     }
2024     OS << " mmra ";
2025     MMRA->printAsOperand(OS, MST);
2026   }
2027   if (uint32_t CFIType = getCFIType()) {
2028     if (!FirstOp)
2029       OS << ',';
2030     OS << " cfi-type " << CFIType;
2031   }
2032 
2033   if (DebugInstrNum) {
2034     if (!FirstOp)
2035       OS << ",";
2036     OS << " debug-instr-number " << DebugInstrNum;
2037   }
2038 
2039   if (!SkipDebugLoc) {
2040     if (const DebugLoc &DL = getDebugLoc()) {
2041       if (!FirstOp)
2042         OS << ',';
2043       OS << " debug-location ";
2044       DL->printAsOperand(OS, MST);
2045     }
2046   }
2047 
2048   if (!memoperands_empty()) {
2049     SmallVector<StringRef, 0> SSNs;
2050     const LLVMContext *Context = nullptr;
2051     std::unique_ptr<LLVMContext> CtxPtr;
2052     const MachineFrameInfo *MFI = nullptr;
2053     if (const MachineFunction *MF = getMFIfAvailable(*this)) {
2054       MFI = &MF->getFrameInfo();
2055       Context = &MF->getFunction().getContext();
2056     } else {
2057       CtxPtr = std::make_unique<LLVMContext>();
2058       Context = CtxPtr.get();
2059     }
2060 
2061     OS << " :: ";
2062     bool NeedComma = false;
2063     for (const MachineMemOperand *Op : memoperands()) {
2064       if (NeedComma)
2065         OS << ", ";
2066       Op->print(OS, MST, SSNs, *Context, MFI, TII);
2067       NeedComma = true;
2068     }
2069   }
2070 
2071   if (SkipDebugLoc)
2072     return;
2073 
2074   bool HaveSemi = false;
2075 
2076   // Print debug location information.
2077   if (const DebugLoc &DL = getDebugLoc()) {
2078     if (!HaveSemi) {
2079       OS << ';';
2080       HaveSemi = true;
2081     }
2082     OS << ' ';
2083     DL.print(OS);
2084   }
2085 
2086   // Print extra comments for DEBUG_VALUE and friends if they are well-formed.
2087   if ((isNonListDebugValue() && getNumOperands() >= 4) ||
2088       (isDebugValueList() && getNumOperands() >= 2) ||
2089       (isDebugRef() && getNumOperands() >= 3)) {
2090     if (getDebugVariableOp().isMetadata()) {
2091       if (!HaveSemi) {
2092         OS << ";";
2093         HaveSemi = true;
2094       }
2095       auto *DV = getDebugVariable();
2096       OS << " line no:" << DV->getLine();
2097       if (isIndirectDebugValue())
2098         OS << " indirect";
2099     }
2100   }
2101   // TODO: DBG_LABEL
2102 
2103   if (PrintMIAddrs)
2104     OS << " ; " << this;
2105 
2106   if (AddNewLine)
2107     OS << '\n';
2108 }
2109 
2110 bool MachineInstr::addRegisterKilled(Register IncomingReg,
2111                                      const TargetRegisterInfo *RegInfo,
2112                                      bool AddIfNotFound) {
2113   bool isPhysReg = IncomingReg.isPhysical();
2114   bool hasAliases = isPhysReg &&
2115     MCRegAliasIterator(IncomingReg, RegInfo, false).isValid();
2116   bool Found = false;
2117   SmallVector<unsigned,4> DeadOps;
2118   for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
2119     MachineOperand &MO = getOperand(i);
2120     if (!MO.isReg() || !MO.isUse() || MO.isUndef())
2121       continue;
2122 
2123     // DEBUG_VALUE nodes do not contribute to code generation and should
2124     // always be ignored. Failure to do so may result in trying to modify
2125     // KILL flags on DEBUG_VALUE nodes.
2126     if (MO.isDebug())
2127       continue;
2128 
2129     Register Reg = MO.getReg();
2130     if (!Reg)
2131       continue;
2132 
2133     if (Reg == IncomingReg) {
2134       if (!Found) {
2135         if (MO.isKill())
2136           // The register is already marked kill.
2137           return true;
2138         if (isPhysReg && isRegTiedToDefOperand(i))
2139           // Two-address uses of physregs must not be marked kill.
2140           return true;
2141         MO.setIsKill();
2142         Found = true;
2143       }
2144     } else if (hasAliases && MO.isKill() && Reg.isPhysical()) {
2145       // A super-register kill already exists.
2146       if (RegInfo->isSuperRegister(IncomingReg, Reg))
2147         return true;
2148       if (RegInfo->isSubRegister(IncomingReg, Reg))
2149         DeadOps.push_back(i);
2150     }
2151   }
2152 
2153   // Trim unneeded kill operands.
2154   while (!DeadOps.empty()) {
2155     unsigned OpIdx = DeadOps.back();
2156     if (getOperand(OpIdx).isImplicit() &&
2157         (!isInlineAsm() || findInlineAsmFlagIdx(OpIdx) < 0))
2158       removeOperand(OpIdx);
2159     else
2160       getOperand(OpIdx).setIsKill(false);
2161     DeadOps.pop_back();
2162   }
2163 
2164   // If not found, this means an alias of one of the operands is killed. Add a
2165   // new implicit operand if required.
2166   if (!Found && AddIfNotFound) {
2167     addOperand(MachineOperand::CreateReg(IncomingReg,
2168                                          false /*IsDef*/,
2169                                          true  /*IsImp*/,
2170                                          true  /*IsKill*/));
2171     return true;
2172   }
2173   return Found;
2174 }
2175 
2176 void MachineInstr::clearRegisterKills(Register Reg,
2177                                       const TargetRegisterInfo *RegInfo) {
2178   if (!Reg.isPhysical())
2179     RegInfo = nullptr;
2180   for (MachineOperand &MO : operands()) {
2181     if (!MO.isReg() || !MO.isUse() || !MO.isKill())
2182       continue;
2183     Register OpReg = MO.getReg();
2184     if ((RegInfo && RegInfo->regsOverlap(Reg, OpReg)) || Reg == OpReg)
2185       MO.setIsKill(false);
2186   }
2187 }
2188 
2189 bool MachineInstr::addRegisterDead(Register Reg,
2190                                    const TargetRegisterInfo *RegInfo,
2191                                    bool AddIfNotFound) {
2192   bool isPhysReg = Reg.isPhysical();
2193   bool hasAliases = isPhysReg &&
2194     MCRegAliasIterator(Reg, RegInfo, false).isValid();
2195   bool Found = false;
2196   SmallVector<unsigned,4> DeadOps;
2197   for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
2198     MachineOperand &MO = getOperand(i);
2199     if (!MO.isReg() || !MO.isDef())
2200       continue;
2201     Register MOReg = MO.getReg();
2202     if (!MOReg)
2203       continue;
2204 
2205     if (MOReg == Reg) {
2206       MO.setIsDead();
2207       Found = true;
2208     } else if (hasAliases && MO.isDead() && MOReg.isPhysical()) {
2209       // There exists a super-register that's marked dead.
2210       if (RegInfo->isSuperRegister(Reg, MOReg))
2211         return true;
2212       if (RegInfo->isSubRegister(Reg, MOReg))
2213         DeadOps.push_back(i);
2214     }
2215   }
2216 
2217   // Trim unneeded dead operands.
2218   while (!DeadOps.empty()) {
2219     unsigned OpIdx = DeadOps.back();
2220     if (getOperand(OpIdx).isImplicit() &&
2221         (!isInlineAsm() || findInlineAsmFlagIdx(OpIdx) < 0))
2222       removeOperand(OpIdx);
2223     else
2224       getOperand(OpIdx).setIsDead(false);
2225     DeadOps.pop_back();
2226   }
2227 
2228   // If not found, this means an alias of one of the operands is dead. Add a
2229   // new implicit operand if required.
2230   if (Found || !AddIfNotFound)
2231     return Found;
2232 
2233   addOperand(MachineOperand::CreateReg(Reg,
2234                                        true  /*IsDef*/,
2235                                        true  /*IsImp*/,
2236                                        false /*IsKill*/,
2237                                        true  /*IsDead*/));
2238   return true;
2239 }
2240 
2241 void MachineInstr::clearRegisterDeads(Register Reg) {
2242   for (MachineOperand &MO : all_defs())
2243     if (MO.getReg() == Reg)
2244       MO.setIsDead(false);
2245 }
2246 
2247 void MachineInstr::setRegisterDefReadUndef(Register Reg, bool IsUndef) {
2248   for (MachineOperand &MO : all_defs())
2249     if (MO.getReg() == Reg && MO.getSubReg() != 0)
2250       MO.setIsUndef(IsUndef);
2251 }
2252 
2253 void MachineInstr::addRegisterDefined(Register Reg,
2254                                       const TargetRegisterInfo *RegInfo) {
2255   if (Reg.isPhysical()) {
2256     MachineOperand *MO = findRegisterDefOperand(Reg, RegInfo, false, false);
2257     if (MO)
2258       return;
2259   } else {
2260     for (const MachineOperand &MO : all_defs()) {
2261       if (MO.getReg() == Reg && MO.getSubReg() == 0)
2262         return;
2263     }
2264   }
2265   addOperand(MachineOperand::CreateReg(Reg,
2266                                        true  /*IsDef*/,
2267                                        true  /*IsImp*/));
2268 }
2269 
2270 void MachineInstr::setPhysRegsDeadExcept(ArrayRef<Register> UsedRegs,
2271                                          const TargetRegisterInfo &TRI) {
2272   bool HasRegMask = false;
2273   for (MachineOperand &MO : operands()) {
2274     if (MO.isRegMask()) {
2275       HasRegMask = true;
2276       continue;
2277     }
2278     if (!MO.isReg() || !MO.isDef()) continue;
2279     Register Reg = MO.getReg();
2280     if (!Reg.isPhysical())
2281       continue;
2282     // If there are no uses, including partial uses, the def is dead.
2283     if (llvm::none_of(UsedRegs,
2284                       [&](MCRegister Use) { return TRI.regsOverlap(Use, Reg); }))
2285       MO.setIsDead();
2286   }
2287 
2288   // This is a call with a register mask operand.
2289   // Mask clobbers are always dead, so add defs for the non-dead defines.
2290   if (HasRegMask)
2291     for (const Register &UsedReg : UsedRegs)
2292       addRegisterDefined(UsedReg, &TRI);
2293 }
2294 
2295 unsigned
2296 MachineInstrExpressionTrait::getHashValue(const MachineInstr* const &MI) {
2297   // Build up a buffer of hash code components.
2298   SmallVector<size_t, 16> HashComponents;
2299   HashComponents.reserve(MI->getNumOperands() + 1);
2300   HashComponents.push_back(MI->getOpcode());
2301   for (const MachineOperand &MO : MI->operands()) {
2302     if (MO.isReg() && MO.isDef() && MO.getReg().isVirtual())
2303       continue;  // Skip virtual register defs.
2304 
2305     HashComponents.push_back(hash_value(MO));
2306   }
2307   return hash_combine_range(HashComponents);
2308 }
2309 
2310 const MDNode *MachineInstr::getLocCookieMD() const {
2311   // Find the source location cookie.
2312   const MDNode *LocMD = nullptr;
2313   for (unsigned i = getNumOperands(); i != 0; --i) {
2314     if (getOperand(i-1).isMetadata() &&
2315         (LocMD = getOperand(i-1).getMetadata()) &&
2316         LocMD->getNumOperands() != 0) {
2317       if (mdconst::hasa<ConstantInt>(LocMD->getOperand(0)))
2318         return LocMD;
2319     }
2320   }
2321 
2322   return nullptr;
2323 }
2324 
2325 void MachineInstr::emitInlineAsmError(const Twine &Msg) const {
2326   assert(isInlineAsm());
2327   const MDNode *LocMD = getLocCookieMD();
2328   uint64_t LocCookie =
2329       LocMD
2330           ? mdconst::extract<ConstantInt>(LocMD->getOperand(0))->getZExtValue()
2331           : 0;
2332   LLVMContext &Ctx = getMF()->getFunction().getContext();
2333   Ctx.diagnose(DiagnosticInfoInlineAsm(LocCookie, Msg));
2334 }
2335 
2336 void MachineInstr::emitGenericError(const Twine &Msg) const {
2337   const Function &Fn = getMF()->getFunction();
2338   Fn.getContext().diagnose(
2339       DiagnosticInfoGenericWithLoc(Msg, Fn, getDebugLoc()));
2340 }
2341 
2342 MachineInstrBuilder llvm::BuildMI(MachineFunction &MF, const DebugLoc &DL,
2343                                   const MCInstrDesc &MCID, bool IsIndirect,
2344                                   Register Reg, const MDNode *Variable,
2345                                   const MDNode *Expr) {
2346   assert(isa<DILocalVariable>(Variable) && "not a variable");
2347   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
2348   assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) &&
2349          "Expected inlined-at fields to agree");
2350   auto MIB = BuildMI(MF, DL, MCID).addReg(Reg);
2351   if (IsIndirect)
2352     MIB.addImm(0U);
2353   else
2354     MIB.addReg(0U);
2355   return MIB.addMetadata(Variable).addMetadata(Expr);
2356 }
2357 
2358 MachineInstrBuilder llvm::BuildMI(MachineFunction &MF, const DebugLoc &DL,
2359                                   const MCInstrDesc &MCID, bool IsIndirect,
2360                                   ArrayRef<MachineOperand> DebugOps,
2361                                   const MDNode *Variable, const MDNode *Expr) {
2362   assert(isa<DILocalVariable>(Variable) && "not a variable");
2363   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
2364   assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) &&
2365          "Expected inlined-at fields to agree");
2366   if (MCID.Opcode == TargetOpcode::DBG_VALUE) {
2367     assert(DebugOps.size() == 1 &&
2368            "DBG_VALUE must contain exactly one debug operand");
2369     MachineOperand DebugOp = DebugOps[0];
2370     if (DebugOp.isReg())
2371       return BuildMI(MF, DL, MCID, IsIndirect, DebugOp.getReg(), Variable,
2372                      Expr);
2373 
2374     auto MIB = BuildMI(MF, DL, MCID).add(DebugOp);
2375     if (IsIndirect)
2376       MIB.addImm(0U);
2377     else
2378       MIB.addReg(0U);
2379     return MIB.addMetadata(Variable).addMetadata(Expr);
2380   }
2381 
2382   auto MIB = BuildMI(MF, DL, MCID);
2383   MIB.addMetadata(Variable).addMetadata(Expr);
2384   for (const MachineOperand &DebugOp : DebugOps)
2385     if (DebugOp.isReg())
2386       MIB.addReg(DebugOp.getReg());
2387     else
2388       MIB.add(DebugOp);
2389   return MIB;
2390 }
2391 
2392 MachineInstrBuilder llvm::BuildMI(MachineBasicBlock &BB,
2393                                   MachineBasicBlock::iterator I,
2394                                   const DebugLoc &DL, const MCInstrDesc &MCID,
2395                                   bool IsIndirect, Register Reg,
2396                                   const MDNode *Variable, const MDNode *Expr) {
2397   MachineFunction &MF = *BB.getParent();
2398   MachineInstr *MI = BuildMI(MF, DL, MCID, IsIndirect, Reg, Variable, Expr);
2399   BB.insert(I, MI);
2400   return MachineInstrBuilder(MF, MI);
2401 }
2402 
2403 MachineInstrBuilder llvm::BuildMI(MachineBasicBlock &BB,
2404                                   MachineBasicBlock::iterator I,
2405                                   const DebugLoc &DL, const MCInstrDesc &MCID,
2406                                   bool IsIndirect,
2407                                   ArrayRef<MachineOperand> DebugOps,
2408                                   const MDNode *Variable, const MDNode *Expr) {
2409   MachineFunction &MF = *BB.getParent();
2410   MachineInstr *MI =
2411       BuildMI(MF, DL, MCID, IsIndirect, DebugOps, Variable, Expr);
2412   BB.insert(I, MI);
2413   return MachineInstrBuilder(MF, *MI);
2414 }
2415 
2416 /// Compute the new DIExpression to use with a DBG_VALUE for a spill slot.
2417 /// This prepends DW_OP_deref when spilling an indirect DBG_VALUE.
2418 static const DIExpression *computeExprForSpill(
2419     const MachineInstr &MI,
2420     const SmallVectorImpl<const MachineOperand *> &SpilledOperands) {
2421   assert(MI.getDebugVariable()->isValidLocationForIntrinsic(MI.getDebugLoc()) &&
2422          "Expected inlined-at fields to agree");
2423 
2424   const DIExpression *Expr = MI.getDebugExpression();
2425   if (MI.isIndirectDebugValue()) {
2426     assert(MI.getDebugOffset().getImm() == 0 &&
2427            "DBG_VALUE with nonzero offset");
2428     Expr = DIExpression::prepend(Expr, DIExpression::DerefBefore);
2429   } else if (MI.isDebugValueList()) {
2430     // We will replace the spilled register with a frame index, so
2431     // immediately deref all references to the spilled register.
2432     std::array<uint64_t, 1> Ops{{dwarf::DW_OP_deref}};
2433     for (const MachineOperand *Op : SpilledOperands) {
2434       unsigned OpIdx = MI.getDebugOperandIndex(Op);
2435       Expr = DIExpression::appendOpsToArg(Expr, Ops, OpIdx);
2436     }
2437   }
2438   return Expr;
2439 }
2440 static const DIExpression *computeExprForSpill(const MachineInstr &MI,
2441                                                Register SpillReg) {
2442   assert(MI.hasDebugOperandForReg(SpillReg) && "Spill Reg is not used in MI.");
2443   SmallVector<const MachineOperand *> SpillOperands(
2444       llvm::make_pointer_range(MI.getDebugOperandsForReg(SpillReg)));
2445   return computeExprForSpill(MI, SpillOperands);
2446 }
2447 
2448 MachineInstr *llvm::buildDbgValueForSpill(MachineBasicBlock &BB,
2449                                           MachineBasicBlock::iterator I,
2450                                           const MachineInstr &Orig,
2451                                           int FrameIndex, Register SpillReg) {
2452   assert(!Orig.isDebugRef() &&
2453          "DBG_INSTR_REF should not reference a virtual register.");
2454   const DIExpression *Expr = computeExprForSpill(Orig, SpillReg);
2455   MachineInstrBuilder NewMI =
2456       BuildMI(BB, I, Orig.getDebugLoc(), Orig.getDesc());
2457   // Non-Variadic Operands: Location, Offset, Variable, Expression
2458   // Variadic Operands:     Variable, Expression, Locations...
2459   if (Orig.isNonListDebugValue())
2460     NewMI.addFrameIndex(FrameIndex).addImm(0U);
2461   NewMI.addMetadata(Orig.getDebugVariable()).addMetadata(Expr);
2462   if (Orig.isDebugValueList()) {
2463     for (const MachineOperand &Op : Orig.debug_operands())
2464       if (Op.isReg() && Op.getReg() == SpillReg)
2465         NewMI.addFrameIndex(FrameIndex);
2466       else
2467         NewMI.add(MachineOperand(Op));
2468   }
2469   return NewMI;
2470 }
2471 MachineInstr *llvm::buildDbgValueForSpill(
2472     MachineBasicBlock &BB, MachineBasicBlock::iterator I,
2473     const MachineInstr &Orig, int FrameIndex,
2474     const SmallVectorImpl<const MachineOperand *> &SpilledOperands) {
2475   const DIExpression *Expr = computeExprForSpill(Orig, SpilledOperands);
2476   MachineInstrBuilder NewMI =
2477       BuildMI(BB, I, Orig.getDebugLoc(), Orig.getDesc());
2478   // Non-Variadic Operands: Location, Offset, Variable, Expression
2479   // Variadic Operands:     Variable, Expression, Locations...
2480   if (Orig.isNonListDebugValue())
2481     NewMI.addFrameIndex(FrameIndex).addImm(0U);
2482   NewMI.addMetadata(Orig.getDebugVariable()).addMetadata(Expr);
2483   if (Orig.isDebugValueList()) {
2484     for (const MachineOperand &Op : Orig.debug_operands())
2485       if (is_contained(SpilledOperands, &Op))
2486         NewMI.addFrameIndex(FrameIndex);
2487       else
2488         NewMI.add(MachineOperand(Op));
2489   }
2490   return NewMI;
2491 }
2492 
2493 void llvm::updateDbgValueForSpill(MachineInstr &Orig, int FrameIndex,
2494                                   Register Reg) {
2495   const DIExpression *Expr = computeExprForSpill(Orig, Reg);
2496   if (Orig.isNonListDebugValue())
2497     Orig.getDebugOffset().ChangeToImmediate(0U);
2498   for (MachineOperand &Op : Orig.getDebugOperandsForReg(Reg))
2499     Op.ChangeToFrameIndex(FrameIndex);
2500   Orig.getDebugExpressionOp().setMetadata(Expr);
2501 }
2502 
2503 void MachineInstr::collectDebugValues(
2504                                 SmallVectorImpl<MachineInstr *> &DbgValues) {
2505   MachineInstr &MI = *this;
2506   if (!MI.getOperand(0).isReg())
2507     return;
2508 
2509   MachineBasicBlock::iterator DI = MI; ++DI;
2510   for (MachineBasicBlock::iterator DE = MI.getParent()->end();
2511        DI != DE; ++DI) {
2512     if (!DI->isDebugValue())
2513       return;
2514     if (DI->hasDebugOperandForReg(MI.getOperand(0).getReg()))
2515       DbgValues.push_back(&*DI);
2516   }
2517 }
2518 
2519 void MachineInstr::changeDebugValuesDefReg(Register Reg) {
2520   // Collect matching debug values.
2521   SmallVector<MachineInstr *, 2> DbgValues;
2522 
2523   if (!getOperand(0).isReg())
2524     return;
2525 
2526   Register DefReg = getOperand(0).getReg();
2527   auto *MRI = getRegInfo();
2528   for (auto &MO : MRI->use_operands(DefReg)) {
2529     auto *DI = MO.getParent();
2530     if (!DI->isDebugValue())
2531       continue;
2532     if (DI->hasDebugOperandForReg(DefReg)) {
2533       DbgValues.push_back(DI);
2534     }
2535   }
2536 
2537   // Propagate Reg to debug value instructions.
2538   for (auto *DBI : DbgValues)
2539     for (MachineOperand &Op : DBI->getDebugOperandsForReg(DefReg))
2540       Op.setReg(Reg);
2541 }
2542 
2543 using MMOList = SmallVector<const MachineMemOperand *, 2>;
2544 
2545 static LocationSize getSpillSlotSize(const MMOList &Accesses,
2546                                      const MachineFrameInfo &MFI) {
2547   std::optional<TypeSize> Size;
2548   for (const auto *A : Accesses) {
2549     if (MFI.isSpillSlotObjectIndex(
2550             cast<FixedStackPseudoSourceValue>(A->getPseudoValue())
2551                 ->getFrameIndex())) {
2552       LocationSize S = A->getSize();
2553       if (!S.hasValue())
2554         return LocationSize::beforeOrAfterPointer();
2555       if (!Size)
2556         Size = S.getValue();
2557       else
2558         Size = *Size + S.getValue();
2559     }
2560   }
2561   if (!Size)
2562     return LocationSize::precise(0);
2563   return LocationSize::precise(*Size);
2564 }
2565 
2566 std::optional<LocationSize>
2567 MachineInstr::getSpillSize(const TargetInstrInfo *TII) const {
2568   int FI;
2569   if (TII->isStoreToStackSlotPostFE(*this, FI)) {
2570     const MachineFrameInfo &MFI = getMF()->getFrameInfo();
2571     if (MFI.isSpillSlotObjectIndex(FI))
2572       return (*memoperands_begin())->getSize();
2573   }
2574   return std::nullopt;
2575 }
2576 
2577 std::optional<LocationSize>
2578 MachineInstr::getFoldedSpillSize(const TargetInstrInfo *TII) const {
2579   MMOList Accesses;
2580   if (TII->hasStoreToStackSlot(*this, Accesses))
2581     return getSpillSlotSize(Accesses, getMF()->getFrameInfo());
2582   return std::nullopt;
2583 }
2584 
2585 std::optional<LocationSize>
2586 MachineInstr::getRestoreSize(const TargetInstrInfo *TII) const {
2587   int FI;
2588   if (TII->isLoadFromStackSlotPostFE(*this, FI)) {
2589     const MachineFrameInfo &MFI = getMF()->getFrameInfo();
2590     if (MFI.isSpillSlotObjectIndex(FI))
2591       return (*memoperands_begin())->getSize();
2592   }
2593   return std::nullopt;
2594 }
2595 
2596 std::optional<LocationSize>
2597 MachineInstr::getFoldedRestoreSize(const TargetInstrInfo *TII) const {
2598   MMOList Accesses;
2599   if (TII->hasLoadFromStackSlot(*this, Accesses))
2600     return getSpillSlotSize(Accesses, getMF()->getFrameInfo());
2601   return std::nullopt;
2602 }
2603 
2604 unsigned MachineInstr::getDebugInstrNum() {
2605   if (DebugInstrNum == 0)
2606     DebugInstrNum = getParent()->getParent()->getNewDebugInstrNum();
2607   return DebugInstrNum;
2608 }
2609 
2610 unsigned MachineInstr::getDebugInstrNum(MachineFunction &MF) {
2611   if (DebugInstrNum == 0)
2612     DebugInstrNum = MF.getNewDebugInstrNum();
2613   return DebugInstrNum;
2614 }
2615 
2616 std::tuple<LLT, LLT> MachineInstr::getFirst2LLTs() const {
2617   return std::tuple(getRegInfo()->getType(getOperand(0).getReg()),
2618                     getRegInfo()->getType(getOperand(1).getReg()));
2619 }
2620 
2621 std::tuple<LLT, LLT, LLT> MachineInstr::getFirst3LLTs() const {
2622   return std::tuple(getRegInfo()->getType(getOperand(0).getReg()),
2623                     getRegInfo()->getType(getOperand(1).getReg()),
2624                     getRegInfo()->getType(getOperand(2).getReg()));
2625 }
2626 
2627 std::tuple<LLT, LLT, LLT, LLT> MachineInstr::getFirst4LLTs() const {
2628   return std::tuple(getRegInfo()->getType(getOperand(0).getReg()),
2629                     getRegInfo()->getType(getOperand(1).getReg()),
2630                     getRegInfo()->getType(getOperand(2).getReg()),
2631                     getRegInfo()->getType(getOperand(3).getReg()));
2632 }
2633 
2634 std::tuple<LLT, LLT, LLT, LLT, LLT> MachineInstr::getFirst5LLTs() const {
2635   return std::tuple(getRegInfo()->getType(getOperand(0).getReg()),
2636                     getRegInfo()->getType(getOperand(1).getReg()),
2637                     getRegInfo()->getType(getOperand(2).getReg()),
2638                     getRegInfo()->getType(getOperand(3).getReg()),
2639                     getRegInfo()->getType(getOperand(4).getReg()));
2640 }
2641 
2642 std::tuple<Register, LLT, Register, LLT>
2643 MachineInstr::getFirst2RegLLTs() const {
2644   Register Reg0 = getOperand(0).getReg();
2645   Register Reg1 = getOperand(1).getReg();
2646   return std::tuple(Reg0, getRegInfo()->getType(Reg0), Reg1,
2647                     getRegInfo()->getType(Reg1));
2648 }
2649 
2650 std::tuple<Register, LLT, Register, LLT, Register, LLT>
2651 MachineInstr::getFirst3RegLLTs() const {
2652   Register Reg0 = getOperand(0).getReg();
2653   Register Reg1 = getOperand(1).getReg();
2654   Register Reg2 = getOperand(2).getReg();
2655   return std::tuple(Reg0, getRegInfo()->getType(Reg0), Reg1,
2656                     getRegInfo()->getType(Reg1), Reg2,
2657                     getRegInfo()->getType(Reg2));
2658 }
2659 
2660 std::tuple<Register, LLT, Register, LLT, Register, LLT, Register, LLT>
2661 MachineInstr::getFirst4RegLLTs() const {
2662   Register Reg0 = getOperand(0).getReg();
2663   Register Reg1 = getOperand(1).getReg();
2664   Register Reg2 = getOperand(2).getReg();
2665   Register Reg3 = getOperand(3).getReg();
2666   return std::tuple(
2667       Reg0, getRegInfo()->getType(Reg0), Reg1, getRegInfo()->getType(Reg1),
2668       Reg2, getRegInfo()->getType(Reg2), Reg3, getRegInfo()->getType(Reg3));
2669 }
2670 
2671 std::tuple<Register, LLT, Register, LLT, Register, LLT, Register, LLT, Register,
2672            LLT>
2673 MachineInstr::getFirst5RegLLTs() const {
2674   Register Reg0 = getOperand(0).getReg();
2675   Register Reg1 = getOperand(1).getReg();
2676   Register Reg2 = getOperand(2).getReg();
2677   Register Reg3 = getOperand(3).getReg();
2678   Register Reg4 = getOperand(4).getReg();
2679   return std::tuple(
2680       Reg0, getRegInfo()->getType(Reg0), Reg1, getRegInfo()->getType(Reg1),
2681       Reg2, getRegInfo()->getType(Reg2), Reg3, getRegInfo()->getType(Reg3),
2682       Reg4, getRegInfo()->getType(Reg4));
2683 }
2684 
2685 void MachineInstr::insert(mop_iterator InsertBefore,
2686                           ArrayRef<MachineOperand> Ops) {
2687   assert(InsertBefore != nullptr && "invalid iterator");
2688   assert(InsertBefore->getParent() == this &&
2689          "iterator points to operand of other inst");
2690   if (Ops.empty())
2691     return;
2692 
2693   // Do one pass to untie operands.
2694   SmallDenseMap<unsigned, unsigned> TiedOpIndices;
2695   for (const MachineOperand &MO : operands()) {
2696     if (MO.isReg() && MO.isTied()) {
2697       unsigned OpNo = getOperandNo(&MO);
2698       unsigned TiedTo = findTiedOperandIdx(OpNo);
2699       TiedOpIndices[OpNo] = TiedTo;
2700       untieRegOperand(OpNo);
2701     }
2702   }
2703 
2704   unsigned OpIdx = getOperandNo(InsertBefore);
2705   unsigned NumOperands = getNumOperands();
2706   unsigned OpsToMove = NumOperands - OpIdx;
2707 
2708   SmallVector<MachineOperand> MovingOps;
2709   MovingOps.reserve(OpsToMove);
2710 
2711   for (unsigned I = 0; I < OpsToMove; ++I) {
2712     MovingOps.emplace_back(getOperand(OpIdx));
2713     removeOperand(OpIdx);
2714   }
2715   for (const MachineOperand &MO : Ops)
2716     addOperand(MO);
2717   for (const MachineOperand &OpMoved : MovingOps)
2718     addOperand(OpMoved);
2719 
2720   // Re-tie operands.
2721   for (auto [Tie1, Tie2] : TiedOpIndices) {
2722     if (Tie1 >= OpIdx)
2723       Tie1 += Ops.size();
2724     if (Tie2 >= OpIdx)
2725       Tie2 += Ops.size();
2726     tieOperands(Tie1, Tie2);
2727   }
2728 }
2729 
2730 bool MachineInstr::mayFoldInlineAsmRegOp(unsigned OpId) const {
2731   assert(OpId && "expected non-zero operand id");
2732   assert(isInlineAsm() && "should only be used on inline asm");
2733 
2734   if (!getOperand(OpId).isReg())
2735     return false;
2736 
2737   const MachineOperand &MD = getOperand(OpId - 1);
2738   if (!MD.isImm())
2739     return false;
2740 
2741   InlineAsm::Flag F(MD.getImm());
2742   if (F.isRegUseKind() || F.isRegDefKind() || F.isRegDefEarlyClobberKind())
2743     return F.getRegMayBeFolded();
2744   return false;
2745 }
2746