xref: /freebsd/contrib/llvm-project/llvm/lib/CodeGen/MachineVerifier.cpp (revision 5ca8e32633c4ffbbcd6762e5888b6a4ba0708c6c)
1 //===- MachineVerifier.cpp - Machine Code Verifier ------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Pass to verify generated machine code. The following is checked:
10 //
11 // Operand counts: All explicit operands must be present.
12 //
13 // Register classes: All physical and virtual register operands must be
14 // compatible with the register class required by the instruction descriptor.
15 //
16 // Register live intervals: Registers must be defined only once, and must be
17 // defined before use.
18 //
19 // The machine code verifier is enabled with the command-line option
20 // -verify-machineinstrs.
21 //===----------------------------------------------------------------------===//
22 
23 #include "llvm/ADT/BitVector.h"
24 #include "llvm/ADT/DenseMap.h"
25 #include "llvm/ADT/DenseSet.h"
26 #include "llvm/ADT/DepthFirstIterator.h"
27 #include "llvm/ADT/PostOrderIterator.h"
28 #include "llvm/ADT/STLExtras.h"
29 #include "llvm/ADT/SetOperations.h"
30 #include "llvm/ADT/SmallPtrSet.h"
31 #include "llvm/ADT/SmallVector.h"
32 #include "llvm/ADT/StringRef.h"
33 #include "llvm/ADT/Twine.h"
34 #include "llvm/CodeGen/CodeGenCommonISel.h"
35 #include "llvm/CodeGen/LiveInterval.h"
36 #include "llvm/CodeGen/LiveIntervals.h"
37 #include "llvm/CodeGen/LiveRangeCalc.h"
38 #include "llvm/CodeGen/LiveStacks.h"
39 #include "llvm/CodeGen/LiveVariables.h"
40 #include "llvm/CodeGen/LowLevelType.h"
41 #include "llvm/CodeGen/MachineBasicBlock.h"
42 #include "llvm/CodeGen/MachineFrameInfo.h"
43 #include "llvm/CodeGen/MachineFunction.h"
44 #include "llvm/CodeGen/MachineFunctionPass.h"
45 #include "llvm/CodeGen/MachineInstr.h"
46 #include "llvm/CodeGen/MachineInstrBundle.h"
47 #include "llvm/CodeGen/MachineMemOperand.h"
48 #include "llvm/CodeGen/MachineOperand.h"
49 #include "llvm/CodeGen/MachineRegisterInfo.h"
50 #include "llvm/CodeGen/PseudoSourceValue.h"
51 #include "llvm/CodeGen/RegisterBank.h"
52 #include "llvm/CodeGen/RegisterBankInfo.h"
53 #include "llvm/CodeGen/SlotIndexes.h"
54 #include "llvm/CodeGen/StackMaps.h"
55 #include "llvm/CodeGen/TargetInstrInfo.h"
56 #include "llvm/CodeGen/TargetOpcodes.h"
57 #include "llvm/CodeGen/TargetRegisterInfo.h"
58 #include "llvm/CodeGen/TargetSubtargetInfo.h"
59 #include "llvm/IR/BasicBlock.h"
60 #include "llvm/IR/Constants.h"
61 #include "llvm/IR/EHPersonalities.h"
62 #include "llvm/IR/Function.h"
63 #include "llvm/IR/InlineAsm.h"
64 #include "llvm/IR/Instructions.h"
65 #include "llvm/InitializePasses.h"
66 #include "llvm/MC/LaneBitmask.h"
67 #include "llvm/MC/MCAsmInfo.h"
68 #include "llvm/MC/MCDwarf.h"
69 #include "llvm/MC/MCInstrDesc.h"
70 #include "llvm/MC/MCRegisterInfo.h"
71 #include "llvm/MC/MCTargetOptions.h"
72 #include "llvm/Pass.h"
73 #include "llvm/Support/Casting.h"
74 #include "llvm/Support/ErrorHandling.h"
75 #include "llvm/Support/MathExtras.h"
76 #include "llvm/Support/ModRef.h"
77 #include "llvm/Support/raw_ostream.h"
78 #include "llvm/Target/TargetMachine.h"
79 #include <algorithm>
80 #include <cassert>
81 #include <cstddef>
82 #include <cstdint>
83 #include <iterator>
84 #include <string>
85 #include <utility>
86 
87 using namespace llvm;
88 
89 namespace {
90 
91   struct MachineVerifier {
92     MachineVerifier(Pass *pass, const char *b) : PASS(pass), Banner(b) {}
93 
94     unsigned verify(const MachineFunction &MF);
95 
96     Pass *const PASS;
97     const char *Banner;
98     const MachineFunction *MF = nullptr;
99     const TargetMachine *TM = nullptr;
100     const TargetInstrInfo *TII = nullptr;
101     const TargetRegisterInfo *TRI = nullptr;
102     const MachineRegisterInfo *MRI = nullptr;
103     const RegisterBankInfo *RBI = nullptr;
104 
105     unsigned foundErrors = 0;
106 
107     // Avoid querying the MachineFunctionProperties for each operand.
108     bool isFunctionRegBankSelected = false;
109     bool isFunctionSelected = false;
110     bool isFunctionTracksDebugUserValues = false;
111 
112     using RegVector = SmallVector<Register, 16>;
113     using RegMaskVector = SmallVector<const uint32_t *, 4>;
114     using RegSet = DenseSet<Register>;
115     using RegMap = DenseMap<Register, const MachineInstr *>;
116     using BlockSet = SmallPtrSet<const MachineBasicBlock *, 8>;
117 
118     const MachineInstr *FirstNonPHI = nullptr;
119     const MachineInstr *FirstTerminator = nullptr;
120     BlockSet FunctionBlocks;
121 
122     BitVector regsReserved;
123     RegSet regsLive;
124     RegVector regsDefined, regsDead, regsKilled;
125     RegMaskVector regMasks;
126 
127     SlotIndex lastIndex;
128 
129     // Add Reg and any sub-registers to RV
130     void addRegWithSubRegs(RegVector &RV, Register Reg) {
131       RV.push_back(Reg);
132       if (Reg.isPhysical())
133         append_range(RV, TRI->subregs(Reg.asMCReg()));
134     }
135 
136     struct BBInfo {
137       // Is this MBB reachable from the MF entry point?
138       bool reachable = false;
139 
140       // Vregs that must be live in because they are used without being
141       // defined. Map value is the user. vregsLiveIn doesn't include regs
142       // that only are used by PHI nodes.
143       RegMap vregsLiveIn;
144 
145       // Regs killed in MBB. They may be defined again, and will then be in both
146       // regsKilled and regsLiveOut.
147       RegSet regsKilled;
148 
149       // Regs defined in MBB and live out. Note that vregs passing through may
150       // be live out without being mentioned here.
151       RegSet regsLiveOut;
152 
153       // Vregs that pass through MBB untouched. This set is disjoint from
154       // regsKilled and regsLiveOut.
155       RegSet vregsPassed;
156 
157       // Vregs that must pass through MBB because they are needed by a successor
158       // block. This set is disjoint from regsLiveOut.
159       RegSet vregsRequired;
160 
161       // Set versions of block's predecessor and successor lists.
162       BlockSet Preds, Succs;
163 
164       BBInfo() = default;
165 
166       // Add register to vregsRequired if it belongs there. Return true if
167       // anything changed.
168       bool addRequired(Register Reg) {
169         if (!Reg.isVirtual())
170           return false;
171         if (regsLiveOut.count(Reg))
172           return false;
173         return vregsRequired.insert(Reg).second;
174       }
175 
176       // Same for a full set.
177       bool addRequired(const RegSet &RS) {
178         bool Changed = false;
179         for (Register Reg : RS)
180           Changed |= addRequired(Reg);
181         return Changed;
182       }
183 
184       // Same for a full map.
185       bool addRequired(const RegMap &RM) {
186         bool Changed = false;
187         for (const auto &I : RM)
188           Changed |= addRequired(I.first);
189         return Changed;
190       }
191 
192       // Live-out registers are either in regsLiveOut or vregsPassed.
193       bool isLiveOut(Register Reg) const {
194         return regsLiveOut.count(Reg) || vregsPassed.count(Reg);
195       }
196     };
197 
198     // Extra register info per MBB.
199     DenseMap<const MachineBasicBlock*, BBInfo> MBBInfoMap;
200 
201     bool isReserved(Register Reg) {
202       return Reg.id() < regsReserved.size() && regsReserved.test(Reg.id());
203     }
204 
205     bool isAllocatable(Register Reg) const {
206       return Reg.id() < TRI->getNumRegs() && TRI->isInAllocatableClass(Reg) &&
207              !regsReserved.test(Reg.id());
208     }
209 
210     // Analysis information if available
211     LiveVariables *LiveVars = nullptr;
212     LiveIntervals *LiveInts = nullptr;
213     LiveStacks *LiveStks = nullptr;
214     SlotIndexes *Indexes = nullptr;
215 
216     void visitMachineFunctionBefore();
217     void visitMachineBasicBlockBefore(const MachineBasicBlock *MBB);
218     void visitMachineBundleBefore(const MachineInstr *MI);
219 
220     /// Verify that all of \p MI's virtual register operands are scalars.
221     /// \returns True if all virtual register operands are scalar. False
222     /// otherwise.
223     bool verifyAllRegOpsScalar(const MachineInstr &MI,
224                                const MachineRegisterInfo &MRI);
225     bool verifyVectorElementMatch(LLT Ty0, LLT Ty1, const MachineInstr *MI);
226     void verifyPreISelGenericInstruction(const MachineInstr *MI);
227     void visitMachineInstrBefore(const MachineInstr *MI);
228     void visitMachineOperand(const MachineOperand *MO, unsigned MONum);
229     void visitMachineBundleAfter(const MachineInstr *MI);
230     void visitMachineBasicBlockAfter(const MachineBasicBlock *MBB);
231     void visitMachineFunctionAfter();
232 
233     void report(const char *msg, const MachineFunction *MF);
234     void report(const char *msg, const MachineBasicBlock *MBB);
235     void report(const char *msg, const MachineInstr *MI);
236     void report(const char *msg, const MachineOperand *MO, unsigned MONum,
237                 LLT MOVRegType = LLT{});
238     void report(const Twine &Msg, const MachineInstr *MI);
239 
240     void report_context(const LiveInterval &LI) const;
241     void report_context(const LiveRange &LR, Register VRegUnit,
242                         LaneBitmask LaneMask) const;
243     void report_context(const LiveRange::Segment &S) const;
244     void report_context(const VNInfo &VNI) const;
245     void report_context(SlotIndex Pos) const;
246     void report_context(MCPhysReg PhysReg) const;
247     void report_context_liverange(const LiveRange &LR) const;
248     void report_context_lanemask(LaneBitmask LaneMask) const;
249     void report_context_vreg(Register VReg) const;
250     void report_context_vreg_regunit(Register VRegOrUnit) const;
251 
252     void verifyInlineAsm(const MachineInstr *MI);
253 
254     void checkLiveness(const MachineOperand *MO, unsigned MONum);
255     void checkLivenessAtUse(const MachineOperand *MO, unsigned MONum,
256                             SlotIndex UseIdx, const LiveRange &LR,
257                             Register VRegOrUnit,
258                             LaneBitmask LaneMask = LaneBitmask::getNone());
259     void checkLivenessAtDef(const MachineOperand *MO, unsigned MONum,
260                             SlotIndex DefIdx, const LiveRange &LR,
261                             Register VRegOrUnit, bool SubRangeCheck = false,
262                             LaneBitmask LaneMask = LaneBitmask::getNone());
263 
264     void markReachable(const MachineBasicBlock *MBB);
265     void calcRegsPassed();
266     void checkPHIOps(const MachineBasicBlock &MBB);
267 
268     void calcRegsRequired();
269     void verifyLiveVariables();
270     void verifyLiveIntervals();
271     void verifyLiveInterval(const LiveInterval&);
272     void verifyLiveRangeValue(const LiveRange &, const VNInfo *, Register,
273                               LaneBitmask);
274     void verifyLiveRangeSegment(const LiveRange &,
275                                 const LiveRange::const_iterator I, Register,
276                                 LaneBitmask);
277     void verifyLiveRange(const LiveRange &, Register,
278                          LaneBitmask LaneMask = LaneBitmask::getNone());
279 
280     void verifyStackFrame();
281 
282     void verifySlotIndexes() const;
283     void verifyProperties(const MachineFunction &MF);
284   };
285 
286   struct MachineVerifierPass : public MachineFunctionPass {
287     static char ID; // Pass ID, replacement for typeid
288 
289     const std::string Banner;
290 
291     MachineVerifierPass(std::string banner = std::string())
292       : MachineFunctionPass(ID), Banner(std::move(banner)) {
293         initializeMachineVerifierPassPass(*PassRegistry::getPassRegistry());
294       }
295 
296     void getAnalysisUsage(AnalysisUsage &AU) const override {
297       AU.addUsedIfAvailable<LiveStacks>();
298       AU.addUsedIfAvailable<LiveVariables>();
299       AU.addUsedIfAvailable<SlotIndexes>();
300       AU.addUsedIfAvailable<LiveIntervals>();
301       AU.setPreservesAll();
302       MachineFunctionPass::getAnalysisUsage(AU);
303     }
304 
305     bool runOnMachineFunction(MachineFunction &MF) override {
306       // Skip functions that have known verification problems.
307       // FIXME: Remove this mechanism when all problematic passes have been
308       // fixed.
309       if (MF.getProperties().hasProperty(
310               MachineFunctionProperties::Property::FailsVerification))
311         return false;
312 
313       unsigned FoundErrors = MachineVerifier(this, Banner.c_str()).verify(MF);
314       if (FoundErrors)
315         report_fatal_error("Found "+Twine(FoundErrors)+" machine code errors.");
316       return false;
317     }
318   };
319 
320 } // end anonymous namespace
321 
322 char MachineVerifierPass::ID = 0;
323 
324 INITIALIZE_PASS(MachineVerifierPass, "machineverifier",
325                 "Verify generated machine code", false, false)
326 
327 FunctionPass *llvm::createMachineVerifierPass(const std::string &Banner) {
328   return new MachineVerifierPass(Banner);
329 }
330 
331 void llvm::verifyMachineFunction(MachineFunctionAnalysisManager *,
332                                  const std::string &Banner,
333                                  const MachineFunction &MF) {
334   // TODO: Use MFAM after porting below analyses.
335   // LiveVariables *LiveVars;
336   // LiveIntervals *LiveInts;
337   // LiveStacks *LiveStks;
338   // SlotIndexes *Indexes;
339   unsigned FoundErrors = MachineVerifier(nullptr, Banner.c_str()).verify(MF);
340   if (FoundErrors)
341     report_fatal_error("Found " + Twine(FoundErrors) + " machine code errors.");
342 }
343 
344 bool MachineFunction::verify(Pass *p, const char *Banner, bool AbortOnErrors)
345     const {
346   MachineFunction &MF = const_cast<MachineFunction&>(*this);
347   unsigned FoundErrors = MachineVerifier(p, Banner).verify(MF);
348   if (AbortOnErrors && FoundErrors)
349     report_fatal_error("Found "+Twine(FoundErrors)+" machine code errors.");
350   return FoundErrors == 0;
351 }
352 
353 void MachineVerifier::verifySlotIndexes() const {
354   if (Indexes == nullptr)
355     return;
356 
357   // Ensure the IdxMBB list is sorted by slot indexes.
358   SlotIndex Last;
359   for (SlotIndexes::MBBIndexIterator I = Indexes->MBBIndexBegin(),
360        E = Indexes->MBBIndexEnd(); I != E; ++I) {
361     assert(!Last.isValid() || I->first > Last);
362     Last = I->first;
363   }
364 }
365 
366 void MachineVerifier::verifyProperties(const MachineFunction &MF) {
367   // If a pass has introduced virtual registers without clearing the
368   // NoVRegs property (or set it without allocating the vregs)
369   // then report an error.
370   if (MF.getProperties().hasProperty(
371           MachineFunctionProperties::Property::NoVRegs) &&
372       MRI->getNumVirtRegs())
373     report("Function has NoVRegs property but there are VReg operands", &MF);
374 }
375 
376 unsigned MachineVerifier::verify(const MachineFunction &MF) {
377   foundErrors = 0;
378 
379   this->MF = &MF;
380   TM = &MF.getTarget();
381   TII = MF.getSubtarget().getInstrInfo();
382   TRI = MF.getSubtarget().getRegisterInfo();
383   RBI = MF.getSubtarget().getRegBankInfo();
384   MRI = &MF.getRegInfo();
385 
386   const bool isFunctionFailedISel = MF.getProperties().hasProperty(
387       MachineFunctionProperties::Property::FailedISel);
388 
389   // If we're mid-GlobalISel and we already triggered the fallback path then
390   // it's expected that the MIR is somewhat broken but that's ok since we'll
391   // reset it and clear the FailedISel attribute in ResetMachineFunctions.
392   if (isFunctionFailedISel)
393     return foundErrors;
394 
395   isFunctionRegBankSelected = MF.getProperties().hasProperty(
396       MachineFunctionProperties::Property::RegBankSelected);
397   isFunctionSelected = MF.getProperties().hasProperty(
398       MachineFunctionProperties::Property::Selected);
399   isFunctionTracksDebugUserValues = MF.getProperties().hasProperty(
400       MachineFunctionProperties::Property::TracksDebugUserValues);
401 
402   LiveVars = nullptr;
403   LiveInts = nullptr;
404   LiveStks = nullptr;
405   Indexes = nullptr;
406   if (PASS) {
407     LiveInts = PASS->getAnalysisIfAvailable<LiveIntervals>();
408     // We don't want to verify LiveVariables if LiveIntervals is available.
409     if (!LiveInts)
410       LiveVars = PASS->getAnalysisIfAvailable<LiveVariables>();
411     LiveStks = PASS->getAnalysisIfAvailable<LiveStacks>();
412     Indexes = PASS->getAnalysisIfAvailable<SlotIndexes>();
413   }
414 
415   verifySlotIndexes();
416 
417   verifyProperties(MF);
418 
419   visitMachineFunctionBefore();
420   for (const MachineBasicBlock &MBB : MF) {
421     visitMachineBasicBlockBefore(&MBB);
422     // Keep track of the current bundle header.
423     const MachineInstr *CurBundle = nullptr;
424     // Do we expect the next instruction to be part of the same bundle?
425     bool InBundle = false;
426 
427     for (const MachineInstr &MI : MBB.instrs()) {
428       if (MI.getParent() != &MBB) {
429         report("Bad instruction parent pointer", &MBB);
430         errs() << "Instruction: " << MI;
431         continue;
432       }
433 
434       // Check for consistent bundle flags.
435       if (InBundle && !MI.isBundledWithPred())
436         report("Missing BundledPred flag, "
437                "BundledSucc was set on predecessor",
438                &MI);
439       if (!InBundle && MI.isBundledWithPred())
440         report("BundledPred flag is set, "
441                "but BundledSucc not set on predecessor",
442                &MI);
443 
444       // Is this a bundle header?
445       if (!MI.isInsideBundle()) {
446         if (CurBundle)
447           visitMachineBundleAfter(CurBundle);
448         CurBundle = &MI;
449         visitMachineBundleBefore(CurBundle);
450       } else if (!CurBundle)
451         report("No bundle header", &MI);
452       visitMachineInstrBefore(&MI);
453       for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
454         const MachineOperand &Op = MI.getOperand(I);
455         if (Op.getParent() != &MI) {
456           // Make sure to use correct addOperand / removeOperand / ChangeTo
457           // functions when replacing operands of a MachineInstr.
458           report("Instruction has operand with wrong parent set", &MI);
459         }
460 
461         visitMachineOperand(&Op, I);
462       }
463 
464       // Was this the last bundled instruction?
465       InBundle = MI.isBundledWithSucc();
466     }
467     if (CurBundle)
468       visitMachineBundleAfter(CurBundle);
469     if (InBundle)
470       report("BundledSucc flag set on last instruction in block", &MBB.back());
471     visitMachineBasicBlockAfter(&MBB);
472   }
473   visitMachineFunctionAfter();
474 
475   // Clean up.
476   regsLive.clear();
477   regsDefined.clear();
478   regsDead.clear();
479   regsKilled.clear();
480   regMasks.clear();
481   MBBInfoMap.clear();
482 
483   return foundErrors;
484 }
485 
486 void MachineVerifier::report(const char *msg, const MachineFunction *MF) {
487   assert(MF);
488   errs() << '\n';
489   if (!foundErrors++) {
490     if (Banner)
491       errs() << "# " << Banner << '\n';
492     if (LiveInts != nullptr)
493       LiveInts->print(errs());
494     else
495       MF->print(errs(), Indexes);
496   }
497   errs() << "*** Bad machine code: " << msg << " ***\n"
498       << "- function:    " << MF->getName() << "\n";
499 }
500 
501 void MachineVerifier::report(const char *msg, const MachineBasicBlock *MBB) {
502   assert(MBB);
503   report(msg, MBB->getParent());
504   errs() << "- basic block: " << printMBBReference(*MBB) << ' '
505          << MBB->getName() << " (" << (const void *)MBB << ')';
506   if (Indexes)
507     errs() << " [" << Indexes->getMBBStartIdx(MBB)
508         << ';' <<  Indexes->getMBBEndIdx(MBB) << ')';
509   errs() << '\n';
510 }
511 
512 void MachineVerifier::report(const char *msg, const MachineInstr *MI) {
513   assert(MI);
514   report(msg, MI->getParent());
515   errs() << "- instruction: ";
516   if (Indexes && Indexes->hasIndex(*MI))
517     errs() << Indexes->getInstructionIndex(*MI) << '\t';
518   MI->print(errs(), /*IsStandalone=*/true);
519 }
520 
521 void MachineVerifier::report(const char *msg, const MachineOperand *MO,
522                              unsigned MONum, LLT MOVRegType) {
523   assert(MO);
524   report(msg, MO->getParent());
525   errs() << "- operand " << MONum << ":   ";
526   MO->print(errs(), MOVRegType, TRI);
527   errs() << "\n";
528 }
529 
530 void MachineVerifier::report(const Twine &Msg, const MachineInstr *MI) {
531   report(Msg.str().c_str(), MI);
532 }
533 
534 void MachineVerifier::report_context(SlotIndex Pos) const {
535   errs() << "- at:          " << Pos << '\n';
536 }
537 
538 void MachineVerifier::report_context(const LiveInterval &LI) const {
539   errs() << "- interval:    " << LI << '\n';
540 }
541 
542 void MachineVerifier::report_context(const LiveRange &LR, Register VRegUnit,
543                                      LaneBitmask LaneMask) const {
544   report_context_liverange(LR);
545   report_context_vreg_regunit(VRegUnit);
546   if (LaneMask.any())
547     report_context_lanemask(LaneMask);
548 }
549 
550 void MachineVerifier::report_context(const LiveRange::Segment &S) const {
551   errs() << "- segment:     " << S << '\n';
552 }
553 
554 void MachineVerifier::report_context(const VNInfo &VNI) const {
555   errs() << "- ValNo:       " << VNI.id << " (def " << VNI.def << ")\n";
556 }
557 
558 void MachineVerifier::report_context_liverange(const LiveRange &LR) const {
559   errs() << "- liverange:   " << LR << '\n';
560 }
561 
562 void MachineVerifier::report_context(MCPhysReg PReg) const {
563   errs() << "- p. register: " << printReg(PReg, TRI) << '\n';
564 }
565 
566 void MachineVerifier::report_context_vreg(Register VReg) const {
567   errs() << "- v. register: " << printReg(VReg, TRI) << '\n';
568 }
569 
570 void MachineVerifier::report_context_vreg_regunit(Register VRegOrUnit) const {
571   if (VRegOrUnit.isVirtual()) {
572     report_context_vreg(VRegOrUnit);
573   } else {
574     errs() << "- regunit:     " << printRegUnit(VRegOrUnit, TRI) << '\n';
575   }
576 }
577 
578 void MachineVerifier::report_context_lanemask(LaneBitmask LaneMask) const {
579   errs() << "- lanemask:    " << PrintLaneMask(LaneMask) << '\n';
580 }
581 
582 void MachineVerifier::markReachable(const MachineBasicBlock *MBB) {
583   BBInfo &MInfo = MBBInfoMap[MBB];
584   if (!MInfo.reachable) {
585     MInfo.reachable = true;
586     for (const MachineBasicBlock *Succ : MBB->successors())
587       markReachable(Succ);
588   }
589 }
590 
591 void MachineVerifier::visitMachineFunctionBefore() {
592   lastIndex = SlotIndex();
593   regsReserved = MRI->reservedRegsFrozen() ? MRI->getReservedRegs()
594                                            : TRI->getReservedRegs(*MF);
595 
596   if (!MF->empty())
597     markReachable(&MF->front());
598 
599   // Build a set of the basic blocks in the function.
600   FunctionBlocks.clear();
601   for (const auto &MBB : *MF) {
602     FunctionBlocks.insert(&MBB);
603     BBInfo &MInfo = MBBInfoMap[&MBB];
604 
605     MInfo.Preds.insert(MBB.pred_begin(), MBB.pred_end());
606     if (MInfo.Preds.size() != MBB.pred_size())
607       report("MBB has duplicate entries in its predecessor list.", &MBB);
608 
609     MInfo.Succs.insert(MBB.succ_begin(), MBB.succ_end());
610     if (MInfo.Succs.size() != MBB.succ_size())
611       report("MBB has duplicate entries in its successor list.", &MBB);
612   }
613 
614   // Check that the register use lists are sane.
615   MRI->verifyUseLists();
616 
617   if (!MF->empty())
618     verifyStackFrame();
619 }
620 
621 void
622 MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
623   FirstTerminator = nullptr;
624   FirstNonPHI = nullptr;
625 
626   if (!MF->getProperties().hasProperty(
627       MachineFunctionProperties::Property::NoPHIs) && MRI->tracksLiveness()) {
628     // If this block has allocatable physical registers live-in, check that
629     // it is an entry block or landing pad.
630     for (const auto &LI : MBB->liveins()) {
631       if (isAllocatable(LI.PhysReg) && !MBB->isEHPad() &&
632           MBB->getIterator() != MBB->getParent()->begin() &&
633           !MBB->isInlineAsmBrIndirectTarget()) {
634         report("MBB has allocatable live-in, but isn't entry, landing-pad, or "
635                "inlineasm-br-indirect-target.",
636                MBB);
637         report_context(LI.PhysReg);
638       }
639     }
640   }
641 
642   if (MBB->isIRBlockAddressTaken()) {
643     if (!MBB->getAddressTakenIRBlock()->hasAddressTaken())
644       report("ir-block-address-taken is associated with basic block not used by "
645              "a blockaddress.",
646              MBB);
647   }
648 
649   // Count the number of landing pad successors.
650   SmallPtrSet<const MachineBasicBlock*, 4> LandingPadSuccs;
651   for (const auto *succ : MBB->successors()) {
652     if (succ->isEHPad())
653       LandingPadSuccs.insert(succ);
654     if (!FunctionBlocks.count(succ))
655       report("MBB has successor that isn't part of the function.", MBB);
656     if (!MBBInfoMap[succ].Preds.count(MBB)) {
657       report("Inconsistent CFG", MBB);
658       errs() << "MBB is not in the predecessor list of the successor "
659              << printMBBReference(*succ) << ".\n";
660     }
661   }
662 
663   // Check the predecessor list.
664   for (const MachineBasicBlock *Pred : MBB->predecessors()) {
665     if (!FunctionBlocks.count(Pred))
666       report("MBB has predecessor that isn't part of the function.", MBB);
667     if (!MBBInfoMap[Pred].Succs.count(MBB)) {
668       report("Inconsistent CFG", MBB);
669       errs() << "MBB is not in the successor list of the predecessor "
670              << printMBBReference(*Pred) << ".\n";
671     }
672   }
673 
674   const MCAsmInfo *AsmInfo = TM->getMCAsmInfo();
675   const BasicBlock *BB = MBB->getBasicBlock();
676   const Function &F = MF->getFunction();
677   if (LandingPadSuccs.size() > 1 &&
678       !(AsmInfo &&
679         AsmInfo->getExceptionHandlingType() == ExceptionHandling::SjLj &&
680         BB && isa<SwitchInst>(BB->getTerminator())) &&
681       !isScopedEHPersonality(classifyEHPersonality(F.getPersonalityFn())))
682     report("MBB has more than one landing pad successor", MBB);
683 
684   // Call analyzeBranch. If it succeeds, there several more conditions to check.
685   MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
686   SmallVector<MachineOperand, 4> Cond;
687   if (!TII->analyzeBranch(*const_cast<MachineBasicBlock *>(MBB), TBB, FBB,
688                           Cond)) {
689     // Ok, analyzeBranch thinks it knows what's going on with this block. Let's
690     // check whether its answers match up with reality.
691     if (!TBB && !FBB) {
692       // Block falls through to its successor.
693       if (!MBB->empty() && MBB->back().isBarrier() &&
694           !TII->isPredicated(MBB->back())) {
695         report("MBB exits via unconditional fall-through but ends with a "
696                "barrier instruction!", MBB);
697       }
698       if (!Cond.empty()) {
699         report("MBB exits via unconditional fall-through but has a condition!",
700                MBB);
701       }
702     } else if (TBB && !FBB && Cond.empty()) {
703       // Block unconditionally branches somewhere.
704       if (MBB->empty()) {
705         report("MBB exits via unconditional branch but doesn't contain "
706                "any instructions!", MBB);
707       } else if (!MBB->back().isBarrier()) {
708         report("MBB exits via unconditional branch but doesn't end with a "
709                "barrier instruction!", MBB);
710       } else if (!MBB->back().isTerminator()) {
711         report("MBB exits via unconditional branch but the branch isn't a "
712                "terminator instruction!", MBB);
713       }
714     } else if (TBB && !FBB && !Cond.empty()) {
715       // Block conditionally branches somewhere, otherwise falls through.
716       if (MBB->empty()) {
717         report("MBB exits via conditional branch/fall-through but doesn't "
718                "contain any instructions!", MBB);
719       } else if (MBB->back().isBarrier()) {
720         report("MBB exits via conditional branch/fall-through but ends with a "
721                "barrier instruction!", MBB);
722       } else if (!MBB->back().isTerminator()) {
723         report("MBB exits via conditional branch/fall-through but the branch "
724                "isn't a terminator instruction!", MBB);
725       }
726     } else if (TBB && FBB) {
727       // Block conditionally branches somewhere, otherwise branches
728       // somewhere else.
729       if (MBB->empty()) {
730         report("MBB exits via conditional branch/branch but doesn't "
731                "contain any instructions!", MBB);
732       } else if (!MBB->back().isBarrier()) {
733         report("MBB exits via conditional branch/branch but doesn't end with a "
734                "barrier instruction!", MBB);
735       } else if (!MBB->back().isTerminator()) {
736         report("MBB exits via conditional branch/branch but the branch "
737                "isn't a terminator instruction!", MBB);
738       }
739       if (Cond.empty()) {
740         report("MBB exits via conditional branch/branch but there's no "
741                "condition!", MBB);
742       }
743     } else {
744       report("analyzeBranch returned invalid data!", MBB);
745     }
746 
747     // Now check that the successors match up with the answers reported by
748     // analyzeBranch.
749     if (TBB && !MBB->isSuccessor(TBB))
750       report("MBB exits via jump or conditional branch, but its target isn't a "
751              "CFG successor!",
752              MBB);
753     if (FBB && !MBB->isSuccessor(FBB))
754       report("MBB exits via conditional branch, but its target isn't a CFG "
755              "successor!",
756              MBB);
757 
758     // There might be a fallthrough to the next block if there's either no
759     // unconditional true branch, or if there's a condition, and one of the
760     // branches is missing.
761     bool Fallthrough = !TBB || (!Cond.empty() && !FBB);
762 
763     // A conditional fallthrough must be an actual CFG successor, not
764     // unreachable. (Conversely, an unconditional fallthrough might not really
765     // be a successor, because the block might end in unreachable.)
766     if (!Cond.empty() && !FBB) {
767       MachineFunction::const_iterator MBBI = std::next(MBB->getIterator());
768       if (MBBI == MF->end()) {
769         report("MBB conditionally falls through out of function!", MBB);
770       } else if (!MBB->isSuccessor(&*MBBI))
771         report("MBB exits via conditional branch/fall-through but the CFG "
772                "successors don't match the actual successors!",
773                MBB);
774     }
775 
776     // Verify that there aren't any extra un-accounted-for successors.
777     for (const MachineBasicBlock *SuccMBB : MBB->successors()) {
778       // If this successor is one of the branch targets, it's okay.
779       if (SuccMBB == TBB || SuccMBB == FBB)
780         continue;
781       // If we might have a fallthrough, and the successor is the fallthrough
782       // block, that's also ok.
783       if (Fallthrough && SuccMBB == MBB->getNextNode())
784         continue;
785       // Also accept successors which are for exception-handling or might be
786       // inlineasm_br targets.
787       if (SuccMBB->isEHPad() || SuccMBB->isInlineAsmBrIndirectTarget())
788         continue;
789       report("MBB has unexpected successors which are not branch targets, "
790              "fallthrough, EHPads, or inlineasm_br targets.",
791              MBB);
792     }
793   }
794 
795   regsLive.clear();
796   if (MRI->tracksLiveness()) {
797     for (const auto &LI : MBB->liveins()) {
798       if (!Register::isPhysicalRegister(LI.PhysReg)) {
799         report("MBB live-in list contains non-physical register", MBB);
800         continue;
801       }
802       for (const MCPhysReg &SubReg : TRI->subregs_inclusive(LI.PhysReg))
803         regsLive.insert(SubReg);
804     }
805   }
806 
807   const MachineFrameInfo &MFI = MF->getFrameInfo();
808   BitVector PR = MFI.getPristineRegs(*MF);
809   for (unsigned I : PR.set_bits()) {
810     for (const MCPhysReg &SubReg : TRI->subregs_inclusive(I))
811       regsLive.insert(SubReg);
812   }
813 
814   regsKilled.clear();
815   regsDefined.clear();
816 
817   if (Indexes)
818     lastIndex = Indexes->getMBBStartIdx(MBB);
819 }
820 
821 // This function gets called for all bundle headers, including normal
822 // stand-alone unbundled instructions.
823 void MachineVerifier::visitMachineBundleBefore(const MachineInstr *MI) {
824   if (Indexes && Indexes->hasIndex(*MI)) {
825     SlotIndex idx = Indexes->getInstructionIndex(*MI);
826     if (!(idx > lastIndex)) {
827       report("Instruction index out of order", MI);
828       errs() << "Last instruction was at " << lastIndex << '\n';
829     }
830     lastIndex = idx;
831   }
832 
833   // Ensure non-terminators don't follow terminators.
834   if (MI->isTerminator()) {
835     if (!FirstTerminator)
836       FirstTerminator = MI;
837   } else if (FirstTerminator) {
838     // For GlobalISel, G_INVOKE_REGION_START is a terminator that we allow to
839     // precede non-terminators.
840     if (FirstTerminator->getOpcode() != TargetOpcode::G_INVOKE_REGION_START) {
841       report("Non-terminator instruction after the first terminator", MI);
842       errs() << "First terminator was:\t" << *FirstTerminator;
843     }
844   }
845 }
846 
847 // The operands on an INLINEASM instruction must follow a template.
848 // Verify that the flag operands make sense.
849 void MachineVerifier::verifyInlineAsm(const MachineInstr *MI) {
850   // The first two operands on INLINEASM are the asm string and global flags.
851   if (MI->getNumOperands() < 2) {
852     report("Too few operands on inline asm", MI);
853     return;
854   }
855   if (!MI->getOperand(0).isSymbol())
856     report("Asm string must be an external symbol", MI);
857   if (!MI->getOperand(1).isImm())
858     report("Asm flags must be an immediate", MI);
859   // Allowed flags are Extra_HasSideEffects = 1, Extra_IsAlignStack = 2,
860   // Extra_AsmDialect = 4, Extra_MayLoad = 8, and Extra_MayStore = 16,
861   // and Extra_IsConvergent = 32.
862   if (!isUInt<6>(MI->getOperand(1).getImm()))
863     report("Unknown asm flags", &MI->getOperand(1), 1);
864 
865   static_assert(InlineAsm::MIOp_FirstOperand == 2, "Asm format changed");
866 
867   unsigned OpNo = InlineAsm::MIOp_FirstOperand;
868   unsigned NumOps;
869   for (unsigned e = MI->getNumOperands(); OpNo < e; OpNo += NumOps) {
870     const MachineOperand &MO = MI->getOperand(OpNo);
871     // There may be implicit ops after the fixed operands.
872     if (!MO.isImm())
873       break;
874     NumOps = 1 + InlineAsm::getNumOperandRegisters(MO.getImm());
875   }
876 
877   if (OpNo > MI->getNumOperands())
878     report("Missing operands in last group", MI);
879 
880   // An optional MDNode follows the groups.
881   if (OpNo < MI->getNumOperands() && MI->getOperand(OpNo).isMetadata())
882     ++OpNo;
883 
884   // All trailing operands must be implicit registers.
885   for (unsigned e = MI->getNumOperands(); OpNo < e; ++OpNo) {
886     const MachineOperand &MO = MI->getOperand(OpNo);
887     if (!MO.isReg() || !MO.isImplicit())
888       report("Expected implicit register after groups", &MO, OpNo);
889   }
890 
891   if (MI->getOpcode() == TargetOpcode::INLINEASM_BR) {
892     const MachineBasicBlock *MBB = MI->getParent();
893 
894     for (unsigned i = InlineAsm::MIOp_FirstOperand, e = MI->getNumOperands();
895          i != e; ++i) {
896       const MachineOperand &MO = MI->getOperand(i);
897 
898       if (!MO.isMBB())
899         continue;
900 
901       // Check the successor & predecessor lists look ok, assume they are
902       // not. Find the indirect target without going through the successors.
903       const MachineBasicBlock *IndirectTargetMBB = MO.getMBB();
904       if (!IndirectTargetMBB) {
905         report("INLINEASM_BR indirect target does not exist", &MO, i);
906         break;
907       }
908 
909       if (!MBB->isSuccessor(IndirectTargetMBB))
910         report("INLINEASM_BR indirect target missing from successor list", &MO,
911                i);
912 
913       if (!IndirectTargetMBB->isPredecessor(MBB))
914         report("INLINEASM_BR indirect target predecessor list missing parent",
915                &MO, i);
916     }
917   }
918 }
919 
920 bool MachineVerifier::verifyAllRegOpsScalar(const MachineInstr &MI,
921                                             const MachineRegisterInfo &MRI) {
922   if (none_of(MI.explicit_operands(), [&MRI](const MachineOperand &Op) {
923         if (!Op.isReg())
924           return false;
925         const auto Reg = Op.getReg();
926         if (Reg.isPhysical())
927           return false;
928         return !MRI.getType(Reg).isScalar();
929       }))
930     return true;
931   report("All register operands must have scalar types", &MI);
932   return false;
933 }
934 
935 /// Check that types are consistent when two operands need to have the same
936 /// number of vector elements.
937 /// \return true if the types are valid.
938 bool MachineVerifier::verifyVectorElementMatch(LLT Ty0, LLT Ty1,
939                                                const MachineInstr *MI) {
940   if (Ty0.isVector() != Ty1.isVector()) {
941     report("operand types must be all-vector or all-scalar", MI);
942     // Generally we try to report as many issues as possible at once, but in
943     // this case it's not clear what should we be comparing the size of the
944     // scalar with: the size of the whole vector or its lane. Instead of
945     // making an arbitrary choice and emitting not so helpful message, let's
946     // avoid the extra noise and stop here.
947     return false;
948   }
949 
950   if (Ty0.isVector() && Ty0.getNumElements() != Ty1.getNumElements()) {
951     report("operand types must preserve number of vector elements", MI);
952     return false;
953   }
954 
955   return true;
956 }
957 
958 void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) {
959   if (isFunctionSelected)
960     report("Unexpected generic instruction in a Selected function", MI);
961 
962   const MCInstrDesc &MCID = MI->getDesc();
963   unsigned NumOps = MI->getNumOperands();
964 
965   // Branches must reference a basic block if they are not indirect
966   if (MI->isBranch() && !MI->isIndirectBranch()) {
967     bool HasMBB = false;
968     for (const MachineOperand &Op : MI->operands()) {
969       if (Op.isMBB()) {
970         HasMBB = true;
971         break;
972       }
973     }
974 
975     if (!HasMBB) {
976       report("Branch instruction is missing a basic block operand or "
977              "isIndirectBranch property",
978              MI);
979     }
980   }
981 
982   // Check types.
983   SmallVector<LLT, 4> Types;
984   for (unsigned I = 0, E = std::min(MCID.getNumOperands(), NumOps);
985        I != E; ++I) {
986     if (!MCID.operands()[I].isGenericType())
987       continue;
988     // Generic instructions specify type equality constraints between some of
989     // their operands. Make sure these are consistent.
990     size_t TypeIdx = MCID.operands()[I].getGenericTypeIndex();
991     Types.resize(std::max(TypeIdx + 1, Types.size()));
992 
993     const MachineOperand *MO = &MI->getOperand(I);
994     if (!MO->isReg()) {
995       report("generic instruction must use register operands", MI);
996       continue;
997     }
998 
999     LLT OpTy = MRI->getType(MO->getReg());
1000     // Don't report a type mismatch if there is no actual mismatch, only a
1001     // type missing, to reduce noise:
1002     if (OpTy.isValid()) {
1003       // Only the first valid type for a type index will be printed: don't
1004       // overwrite it later so it's always clear which type was expected:
1005       if (!Types[TypeIdx].isValid())
1006         Types[TypeIdx] = OpTy;
1007       else if (Types[TypeIdx] != OpTy)
1008         report("Type mismatch in generic instruction", MO, I, OpTy);
1009     } else {
1010       // Generic instructions must have types attached to their operands.
1011       report("Generic instruction is missing a virtual register type", MO, I);
1012     }
1013   }
1014 
1015   // Generic opcodes must not have physical register operands.
1016   for (unsigned I = 0; I < MI->getNumOperands(); ++I) {
1017     const MachineOperand *MO = &MI->getOperand(I);
1018     if (MO->isReg() && MO->getReg().isPhysical())
1019       report("Generic instruction cannot have physical register", MO, I);
1020   }
1021 
1022   // Avoid out of bounds in checks below. This was already reported earlier.
1023   if (MI->getNumOperands() < MCID.getNumOperands())
1024     return;
1025 
1026   StringRef ErrorInfo;
1027   if (!TII->verifyInstruction(*MI, ErrorInfo))
1028     report(ErrorInfo.data(), MI);
1029 
1030   // Verify properties of various specific instruction types
1031   unsigned Opc = MI->getOpcode();
1032   switch (Opc) {
1033   case TargetOpcode::G_ASSERT_SEXT:
1034   case TargetOpcode::G_ASSERT_ZEXT: {
1035     std::string OpcName =
1036         Opc == TargetOpcode::G_ASSERT_ZEXT ? "G_ASSERT_ZEXT" : "G_ASSERT_SEXT";
1037     if (!MI->getOperand(2).isImm()) {
1038       report(Twine(OpcName, " expects an immediate operand #2"), MI);
1039       break;
1040     }
1041 
1042     Register Dst = MI->getOperand(0).getReg();
1043     Register Src = MI->getOperand(1).getReg();
1044     LLT SrcTy = MRI->getType(Src);
1045     int64_t Imm = MI->getOperand(2).getImm();
1046     if (Imm <= 0) {
1047       report(Twine(OpcName, " size must be >= 1"), MI);
1048       break;
1049     }
1050 
1051     if (Imm >= SrcTy.getScalarSizeInBits()) {
1052       report(Twine(OpcName, " size must be less than source bit width"), MI);
1053       break;
1054     }
1055 
1056     const RegisterBank *SrcRB = RBI->getRegBank(Src, *MRI, *TRI);
1057     const RegisterBank *DstRB = RBI->getRegBank(Dst, *MRI, *TRI);
1058 
1059     // Allow only the source bank to be set.
1060     if ((SrcRB && DstRB && SrcRB != DstRB) || (DstRB && !SrcRB)) {
1061       report(Twine(OpcName, " cannot change register bank"), MI);
1062       break;
1063     }
1064 
1065     // Don't allow a class change. Do allow member class->regbank.
1066     const TargetRegisterClass *DstRC = MRI->getRegClassOrNull(Dst);
1067     if (DstRC && DstRC != MRI->getRegClassOrNull(Src)) {
1068       report(
1069           Twine(OpcName, " source and destination register classes must match"),
1070           MI);
1071       break;
1072     }
1073 
1074     break;
1075   }
1076 
1077   case TargetOpcode::G_CONSTANT:
1078   case TargetOpcode::G_FCONSTANT: {
1079     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1080     if (DstTy.isVector())
1081       report("Instruction cannot use a vector result type", MI);
1082 
1083     if (MI->getOpcode() == TargetOpcode::G_CONSTANT) {
1084       if (!MI->getOperand(1).isCImm()) {
1085         report("G_CONSTANT operand must be cimm", MI);
1086         break;
1087       }
1088 
1089       const ConstantInt *CI = MI->getOperand(1).getCImm();
1090       if (CI->getBitWidth() != DstTy.getSizeInBits())
1091         report("inconsistent constant size", MI);
1092     } else {
1093       if (!MI->getOperand(1).isFPImm()) {
1094         report("G_FCONSTANT operand must be fpimm", MI);
1095         break;
1096       }
1097       const ConstantFP *CF = MI->getOperand(1).getFPImm();
1098 
1099       if (APFloat::getSizeInBits(CF->getValueAPF().getSemantics()) !=
1100           DstTy.getSizeInBits()) {
1101         report("inconsistent constant size", MI);
1102       }
1103     }
1104 
1105     break;
1106   }
1107   case TargetOpcode::G_LOAD:
1108   case TargetOpcode::G_STORE:
1109   case TargetOpcode::G_ZEXTLOAD:
1110   case TargetOpcode::G_SEXTLOAD: {
1111     LLT ValTy = MRI->getType(MI->getOperand(0).getReg());
1112     LLT PtrTy = MRI->getType(MI->getOperand(1).getReg());
1113     if (!PtrTy.isPointer())
1114       report("Generic memory instruction must access a pointer", MI);
1115 
1116     // Generic loads and stores must have a single MachineMemOperand
1117     // describing that access.
1118     if (!MI->hasOneMemOperand()) {
1119       report("Generic instruction accessing memory must have one mem operand",
1120              MI);
1121     } else {
1122       const MachineMemOperand &MMO = **MI->memoperands_begin();
1123       if (MI->getOpcode() == TargetOpcode::G_ZEXTLOAD ||
1124           MI->getOpcode() == TargetOpcode::G_SEXTLOAD) {
1125         if (MMO.getSizeInBits() >= ValTy.getSizeInBits())
1126           report("Generic extload must have a narrower memory type", MI);
1127       } else if (MI->getOpcode() == TargetOpcode::G_LOAD) {
1128         if (MMO.getSize() > ValTy.getSizeInBytes())
1129           report("load memory size cannot exceed result size", MI);
1130       } else if (MI->getOpcode() == TargetOpcode::G_STORE) {
1131         if (ValTy.getSizeInBytes() < MMO.getSize())
1132           report("store memory size cannot exceed value size", MI);
1133       }
1134 
1135       const AtomicOrdering Order = MMO.getSuccessOrdering();
1136       if (Opc == TargetOpcode::G_STORE) {
1137         if (Order == AtomicOrdering::Acquire ||
1138             Order == AtomicOrdering::AcquireRelease)
1139           report("atomic store cannot use acquire ordering", MI);
1140 
1141       } else {
1142         if (Order == AtomicOrdering::Release ||
1143             Order == AtomicOrdering::AcquireRelease)
1144           report("atomic load cannot use release ordering", MI);
1145       }
1146     }
1147 
1148     break;
1149   }
1150   case TargetOpcode::G_PHI: {
1151     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1152     if (!DstTy.isValid() || !all_of(drop_begin(MI->operands()),
1153                                     [this, &DstTy](const MachineOperand &MO) {
1154                                       if (!MO.isReg())
1155                                         return true;
1156                                       LLT Ty = MRI->getType(MO.getReg());
1157                                       if (!Ty.isValid() || (Ty != DstTy))
1158                                         return false;
1159                                       return true;
1160                                     }))
1161       report("Generic Instruction G_PHI has operands with incompatible/missing "
1162              "types",
1163              MI);
1164     break;
1165   }
1166   case TargetOpcode::G_BITCAST: {
1167     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1168     LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1169     if (!DstTy.isValid() || !SrcTy.isValid())
1170       break;
1171 
1172     if (SrcTy.isPointer() != DstTy.isPointer())
1173       report("bitcast cannot convert between pointers and other types", MI);
1174 
1175     if (SrcTy.getSizeInBits() != DstTy.getSizeInBits())
1176       report("bitcast sizes must match", MI);
1177 
1178     if (SrcTy == DstTy)
1179       report("bitcast must change the type", MI);
1180 
1181     break;
1182   }
1183   case TargetOpcode::G_INTTOPTR:
1184   case TargetOpcode::G_PTRTOINT:
1185   case TargetOpcode::G_ADDRSPACE_CAST: {
1186     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1187     LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1188     if (!DstTy.isValid() || !SrcTy.isValid())
1189       break;
1190 
1191     verifyVectorElementMatch(DstTy, SrcTy, MI);
1192 
1193     DstTy = DstTy.getScalarType();
1194     SrcTy = SrcTy.getScalarType();
1195 
1196     if (MI->getOpcode() == TargetOpcode::G_INTTOPTR) {
1197       if (!DstTy.isPointer())
1198         report("inttoptr result type must be a pointer", MI);
1199       if (SrcTy.isPointer())
1200         report("inttoptr source type must not be a pointer", MI);
1201     } else if (MI->getOpcode() == TargetOpcode::G_PTRTOINT) {
1202       if (!SrcTy.isPointer())
1203         report("ptrtoint source type must be a pointer", MI);
1204       if (DstTy.isPointer())
1205         report("ptrtoint result type must not be a pointer", MI);
1206     } else {
1207       assert(MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST);
1208       if (!SrcTy.isPointer() || !DstTy.isPointer())
1209         report("addrspacecast types must be pointers", MI);
1210       else {
1211         if (SrcTy.getAddressSpace() == DstTy.getAddressSpace())
1212           report("addrspacecast must convert different address spaces", MI);
1213       }
1214     }
1215 
1216     break;
1217   }
1218   case TargetOpcode::G_PTR_ADD: {
1219     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1220     LLT PtrTy = MRI->getType(MI->getOperand(1).getReg());
1221     LLT OffsetTy = MRI->getType(MI->getOperand(2).getReg());
1222     if (!DstTy.isValid() || !PtrTy.isValid() || !OffsetTy.isValid())
1223       break;
1224 
1225     if (!PtrTy.getScalarType().isPointer())
1226       report("gep first operand must be a pointer", MI);
1227 
1228     if (OffsetTy.getScalarType().isPointer())
1229       report("gep offset operand must not be a pointer", MI);
1230 
1231     // TODO: Is the offset allowed to be a scalar with a vector?
1232     break;
1233   }
1234   case TargetOpcode::G_PTRMASK: {
1235     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1236     LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1237     LLT MaskTy = MRI->getType(MI->getOperand(2).getReg());
1238     if (!DstTy.isValid() || !SrcTy.isValid() || !MaskTy.isValid())
1239       break;
1240 
1241     if (!DstTy.getScalarType().isPointer())
1242       report("ptrmask result type must be a pointer", MI);
1243 
1244     if (!MaskTy.getScalarType().isScalar())
1245       report("ptrmask mask type must be an integer", MI);
1246 
1247     verifyVectorElementMatch(DstTy, MaskTy, MI);
1248     break;
1249   }
1250   case TargetOpcode::G_SEXT:
1251   case TargetOpcode::G_ZEXT:
1252   case TargetOpcode::G_ANYEXT:
1253   case TargetOpcode::G_TRUNC:
1254   case TargetOpcode::G_FPEXT:
1255   case TargetOpcode::G_FPTRUNC: {
1256     // Number of operands and presense of types is already checked (and
1257     // reported in case of any issues), so no need to report them again. As
1258     // we're trying to report as many issues as possible at once, however, the
1259     // instructions aren't guaranteed to have the right number of operands or
1260     // types attached to them at this point
1261     assert(MCID.getNumOperands() == 2 && "Expected 2 operands G_*{EXT,TRUNC}");
1262     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1263     LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1264     if (!DstTy.isValid() || !SrcTy.isValid())
1265       break;
1266 
1267     LLT DstElTy = DstTy.getScalarType();
1268     LLT SrcElTy = SrcTy.getScalarType();
1269     if (DstElTy.isPointer() || SrcElTy.isPointer())
1270       report("Generic extend/truncate can not operate on pointers", MI);
1271 
1272     verifyVectorElementMatch(DstTy, SrcTy, MI);
1273 
1274     unsigned DstSize = DstElTy.getSizeInBits();
1275     unsigned SrcSize = SrcElTy.getSizeInBits();
1276     switch (MI->getOpcode()) {
1277     default:
1278       if (DstSize <= SrcSize)
1279         report("Generic extend has destination type no larger than source", MI);
1280       break;
1281     case TargetOpcode::G_TRUNC:
1282     case TargetOpcode::G_FPTRUNC:
1283       if (DstSize >= SrcSize)
1284         report("Generic truncate has destination type no smaller than source",
1285                MI);
1286       break;
1287     }
1288     break;
1289   }
1290   case TargetOpcode::G_SELECT: {
1291     LLT SelTy = MRI->getType(MI->getOperand(0).getReg());
1292     LLT CondTy = MRI->getType(MI->getOperand(1).getReg());
1293     if (!SelTy.isValid() || !CondTy.isValid())
1294       break;
1295 
1296     // Scalar condition select on a vector is valid.
1297     if (CondTy.isVector())
1298       verifyVectorElementMatch(SelTy, CondTy, MI);
1299     break;
1300   }
1301   case TargetOpcode::G_MERGE_VALUES: {
1302     // G_MERGE_VALUES should only be used to merge scalars into a larger scalar,
1303     // e.g. s2N = MERGE sN, sN
1304     // Merging multiple scalars into a vector is not allowed, should use
1305     // G_BUILD_VECTOR for that.
1306     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1307     LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1308     if (DstTy.isVector() || SrcTy.isVector())
1309       report("G_MERGE_VALUES cannot operate on vectors", MI);
1310 
1311     const unsigned NumOps = MI->getNumOperands();
1312     if (DstTy.getSizeInBits() != SrcTy.getSizeInBits() * (NumOps - 1))
1313       report("G_MERGE_VALUES result size is inconsistent", MI);
1314 
1315     for (unsigned I = 2; I != NumOps; ++I) {
1316       if (MRI->getType(MI->getOperand(I).getReg()) != SrcTy)
1317         report("G_MERGE_VALUES source types do not match", MI);
1318     }
1319 
1320     break;
1321   }
1322   case TargetOpcode::G_UNMERGE_VALUES: {
1323     unsigned NumDsts = MI->getNumOperands() - 1;
1324     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1325     for (unsigned i = 1; i < NumDsts; ++i) {
1326       if (MRI->getType(MI->getOperand(i).getReg()) != DstTy) {
1327         report("G_UNMERGE_VALUES destination types do not match", MI);
1328         break;
1329       }
1330     }
1331 
1332     LLT SrcTy = MRI->getType(MI->getOperand(NumDsts).getReg());
1333     if (DstTy.isVector()) {
1334       // This case is the converse of G_CONCAT_VECTORS.
1335       if (!SrcTy.isVector() || SrcTy.getScalarType() != DstTy.getScalarType() ||
1336           SrcTy.getNumElements() != NumDsts * DstTy.getNumElements())
1337         report("G_UNMERGE_VALUES source operand does not match vector "
1338                "destination operands",
1339                MI);
1340     } else if (SrcTy.isVector()) {
1341       // This case is the converse of G_BUILD_VECTOR, but relaxed to allow
1342       // mismatched types as long as the total size matches:
1343       //   %0:_(s64), %1:_(s64) = G_UNMERGE_VALUES %2:_(<4 x s32>)
1344       if (SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits())
1345         report("G_UNMERGE_VALUES vector source operand does not match scalar "
1346                "destination operands",
1347                MI);
1348     } else {
1349       // This case is the converse of G_MERGE_VALUES.
1350       if (SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits()) {
1351         report("G_UNMERGE_VALUES scalar source operand does not match scalar "
1352                "destination operands",
1353                MI);
1354       }
1355     }
1356     break;
1357   }
1358   case TargetOpcode::G_BUILD_VECTOR: {
1359     // Source types must be scalars, dest type a vector. Total size of scalars
1360     // must match the dest vector size.
1361     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1362     LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg());
1363     if (!DstTy.isVector() || SrcEltTy.isVector()) {
1364       report("G_BUILD_VECTOR must produce a vector from scalar operands", MI);
1365       break;
1366     }
1367 
1368     if (DstTy.getElementType() != SrcEltTy)
1369       report("G_BUILD_VECTOR result element type must match source type", MI);
1370 
1371     if (DstTy.getNumElements() != MI->getNumOperands() - 1)
1372       report("G_BUILD_VECTOR must have an operand for each elemement", MI);
1373 
1374     for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1375       if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1376         report("G_BUILD_VECTOR source operand types are not homogeneous", MI);
1377 
1378     break;
1379   }
1380   case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1381     // Source types must be scalars, dest type a vector. Scalar types must be
1382     // larger than the dest vector elt type, as this is a truncating operation.
1383     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1384     LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg());
1385     if (!DstTy.isVector() || SrcEltTy.isVector())
1386       report("G_BUILD_VECTOR_TRUNC must produce a vector from scalar operands",
1387              MI);
1388     for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1389       if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1390         report("G_BUILD_VECTOR_TRUNC source operand types are not homogeneous",
1391                MI);
1392     if (SrcEltTy.getSizeInBits() <= DstTy.getElementType().getSizeInBits())
1393       report("G_BUILD_VECTOR_TRUNC source operand types are not larger than "
1394              "dest elt type",
1395              MI);
1396     break;
1397   }
1398   case TargetOpcode::G_CONCAT_VECTORS: {
1399     // Source types should be vectors, and total size should match the dest
1400     // vector size.
1401     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1402     LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1403     if (!DstTy.isVector() || !SrcTy.isVector())
1404       report("G_CONCAT_VECTOR requires vector source and destination operands",
1405              MI);
1406 
1407     if (MI->getNumOperands() < 3)
1408       report("G_CONCAT_VECTOR requires at least 2 source operands", MI);
1409 
1410     for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1411       if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1412         report("G_CONCAT_VECTOR source operand types are not homogeneous", MI);
1413     if (DstTy.getNumElements() !=
1414         SrcTy.getNumElements() * (MI->getNumOperands() - 1))
1415       report("G_CONCAT_VECTOR num dest and source elements should match", MI);
1416     break;
1417   }
1418   case TargetOpcode::G_ICMP:
1419   case TargetOpcode::G_FCMP: {
1420     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1421     LLT SrcTy = MRI->getType(MI->getOperand(2).getReg());
1422 
1423     if ((DstTy.isVector() != SrcTy.isVector()) ||
1424         (DstTy.isVector() && DstTy.getNumElements() != SrcTy.getNumElements()))
1425       report("Generic vector icmp/fcmp must preserve number of lanes", MI);
1426 
1427     break;
1428   }
1429   case TargetOpcode::G_EXTRACT: {
1430     const MachineOperand &SrcOp = MI->getOperand(1);
1431     if (!SrcOp.isReg()) {
1432       report("extract source must be a register", MI);
1433       break;
1434     }
1435 
1436     const MachineOperand &OffsetOp = MI->getOperand(2);
1437     if (!OffsetOp.isImm()) {
1438       report("extract offset must be a constant", MI);
1439       break;
1440     }
1441 
1442     unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits();
1443     unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits();
1444     if (SrcSize == DstSize)
1445       report("extract source must be larger than result", MI);
1446 
1447     if (DstSize + OffsetOp.getImm() > SrcSize)
1448       report("extract reads past end of register", MI);
1449     break;
1450   }
1451   case TargetOpcode::G_INSERT: {
1452     const MachineOperand &SrcOp = MI->getOperand(2);
1453     if (!SrcOp.isReg()) {
1454       report("insert source must be a register", MI);
1455       break;
1456     }
1457 
1458     const MachineOperand &OffsetOp = MI->getOperand(3);
1459     if (!OffsetOp.isImm()) {
1460       report("insert offset must be a constant", MI);
1461       break;
1462     }
1463 
1464     unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits();
1465     unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits();
1466 
1467     if (DstSize <= SrcSize)
1468       report("inserted size must be smaller than total register", MI);
1469 
1470     if (SrcSize + OffsetOp.getImm() > DstSize)
1471       report("insert writes past end of register", MI);
1472 
1473     break;
1474   }
1475   case TargetOpcode::G_JUMP_TABLE: {
1476     if (!MI->getOperand(1).isJTI())
1477       report("G_JUMP_TABLE source operand must be a jump table index", MI);
1478     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1479     if (!DstTy.isPointer())
1480       report("G_JUMP_TABLE dest operand must have a pointer type", MI);
1481     break;
1482   }
1483   case TargetOpcode::G_BRJT: {
1484     if (!MRI->getType(MI->getOperand(0).getReg()).isPointer())
1485       report("G_BRJT src operand 0 must be a pointer type", MI);
1486 
1487     if (!MI->getOperand(1).isJTI())
1488       report("G_BRJT src operand 1 must be a jump table index", MI);
1489 
1490     const auto &IdxOp = MI->getOperand(2);
1491     if (!IdxOp.isReg() || MRI->getType(IdxOp.getReg()).isPointer())
1492       report("G_BRJT src operand 2 must be a scalar reg type", MI);
1493     break;
1494   }
1495   case TargetOpcode::G_INTRINSIC:
1496   case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS: {
1497     // TODO: Should verify number of def and use operands, but the current
1498     // interface requires passing in IR types for mangling.
1499     const MachineOperand &IntrIDOp = MI->getOperand(MI->getNumExplicitDefs());
1500     if (!IntrIDOp.isIntrinsicID()) {
1501       report("G_INTRINSIC first src operand must be an intrinsic ID", MI);
1502       break;
1503     }
1504 
1505     bool NoSideEffects = MI->getOpcode() == TargetOpcode::G_INTRINSIC;
1506     unsigned IntrID = IntrIDOp.getIntrinsicID();
1507     if (IntrID != 0 && IntrID < Intrinsic::num_intrinsics) {
1508       AttributeList Attrs = Intrinsic::getAttributes(
1509           MF->getFunction().getContext(), static_cast<Intrinsic::ID>(IntrID));
1510       bool DeclHasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
1511       if (NoSideEffects && DeclHasSideEffects) {
1512         report("G_INTRINSIC used with intrinsic that accesses memory", MI);
1513         break;
1514       }
1515       if (!NoSideEffects && !DeclHasSideEffects) {
1516         report("G_INTRINSIC_W_SIDE_EFFECTS used with readnone intrinsic", MI);
1517         break;
1518       }
1519     }
1520 
1521     break;
1522   }
1523   case TargetOpcode::G_SEXT_INREG: {
1524     if (!MI->getOperand(2).isImm()) {
1525       report("G_SEXT_INREG expects an immediate operand #2", MI);
1526       break;
1527     }
1528 
1529     LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1530     int64_t Imm = MI->getOperand(2).getImm();
1531     if (Imm <= 0)
1532       report("G_SEXT_INREG size must be >= 1", MI);
1533     if (Imm >= SrcTy.getScalarSizeInBits())
1534       report("G_SEXT_INREG size must be less than source bit width", MI);
1535     break;
1536   }
1537   case TargetOpcode::G_SHUFFLE_VECTOR: {
1538     const MachineOperand &MaskOp = MI->getOperand(3);
1539     if (!MaskOp.isShuffleMask()) {
1540       report("Incorrect mask operand type for G_SHUFFLE_VECTOR", MI);
1541       break;
1542     }
1543 
1544     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1545     LLT Src0Ty = MRI->getType(MI->getOperand(1).getReg());
1546     LLT Src1Ty = MRI->getType(MI->getOperand(2).getReg());
1547 
1548     if (Src0Ty != Src1Ty)
1549       report("Source operands must be the same type", MI);
1550 
1551     if (Src0Ty.getScalarType() != DstTy.getScalarType())
1552       report("G_SHUFFLE_VECTOR cannot change element type", MI);
1553 
1554     // Don't check that all operands are vector because scalars are used in
1555     // place of 1 element vectors.
1556     int SrcNumElts = Src0Ty.isVector() ? Src0Ty.getNumElements() : 1;
1557     int DstNumElts = DstTy.isVector() ? DstTy.getNumElements() : 1;
1558 
1559     ArrayRef<int> MaskIdxes = MaskOp.getShuffleMask();
1560 
1561     if (static_cast<int>(MaskIdxes.size()) != DstNumElts)
1562       report("Wrong result type for shufflemask", MI);
1563 
1564     for (int Idx : MaskIdxes) {
1565       if (Idx < 0)
1566         continue;
1567 
1568       if (Idx >= 2 * SrcNumElts)
1569         report("Out of bounds shuffle index", MI);
1570     }
1571 
1572     break;
1573   }
1574   case TargetOpcode::G_DYN_STACKALLOC: {
1575     const MachineOperand &DstOp = MI->getOperand(0);
1576     const MachineOperand &AllocOp = MI->getOperand(1);
1577     const MachineOperand &AlignOp = MI->getOperand(2);
1578 
1579     if (!DstOp.isReg() || !MRI->getType(DstOp.getReg()).isPointer()) {
1580       report("dst operand 0 must be a pointer type", MI);
1581       break;
1582     }
1583 
1584     if (!AllocOp.isReg() || !MRI->getType(AllocOp.getReg()).isScalar()) {
1585       report("src operand 1 must be a scalar reg type", MI);
1586       break;
1587     }
1588 
1589     if (!AlignOp.isImm()) {
1590       report("src operand 2 must be an immediate type", MI);
1591       break;
1592     }
1593     break;
1594   }
1595   case TargetOpcode::G_MEMCPY_INLINE:
1596   case TargetOpcode::G_MEMCPY:
1597   case TargetOpcode::G_MEMMOVE: {
1598     ArrayRef<MachineMemOperand *> MMOs = MI->memoperands();
1599     if (MMOs.size() != 2) {
1600       report("memcpy/memmove must have 2 memory operands", MI);
1601       break;
1602     }
1603 
1604     if ((!MMOs[0]->isStore() || MMOs[0]->isLoad()) ||
1605         (MMOs[1]->isStore() || !MMOs[1]->isLoad())) {
1606       report("wrong memory operand types", MI);
1607       break;
1608     }
1609 
1610     if (MMOs[0]->getSize() != MMOs[1]->getSize())
1611       report("inconsistent memory operand sizes", MI);
1612 
1613     LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg());
1614     LLT SrcPtrTy = MRI->getType(MI->getOperand(1).getReg());
1615 
1616     if (!DstPtrTy.isPointer() || !SrcPtrTy.isPointer()) {
1617       report("memory instruction operand must be a pointer", MI);
1618       break;
1619     }
1620 
1621     if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace())
1622       report("inconsistent store address space", MI);
1623     if (SrcPtrTy.getAddressSpace() != MMOs[1]->getAddrSpace())
1624       report("inconsistent load address space", MI);
1625 
1626     if (Opc != TargetOpcode::G_MEMCPY_INLINE)
1627       if (!MI->getOperand(3).isImm() || (MI->getOperand(3).getImm() & ~1LL))
1628         report("'tail' flag (operand 3) must be an immediate 0 or 1", MI);
1629 
1630     break;
1631   }
1632   case TargetOpcode::G_BZERO:
1633   case TargetOpcode::G_MEMSET: {
1634     ArrayRef<MachineMemOperand *> MMOs = MI->memoperands();
1635     std::string Name = Opc == TargetOpcode::G_MEMSET ? "memset" : "bzero";
1636     if (MMOs.size() != 1) {
1637       report(Twine(Name, " must have 1 memory operand"), MI);
1638       break;
1639     }
1640 
1641     if ((!MMOs[0]->isStore() || MMOs[0]->isLoad())) {
1642       report(Twine(Name, " memory operand must be a store"), MI);
1643       break;
1644     }
1645 
1646     LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg());
1647     if (!DstPtrTy.isPointer()) {
1648       report(Twine(Name, " operand must be a pointer"), MI);
1649       break;
1650     }
1651 
1652     if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace())
1653       report("inconsistent " + Twine(Name, " address space"), MI);
1654 
1655     if (!MI->getOperand(MI->getNumOperands() - 1).isImm() ||
1656         (MI->getOperand(MI->getNumOperands() - 1).getImm() & ~1LL))
1657       report("'tail' flag (last operand) must be an immediate 0 or 1", MI);
1658 
1659     break;
1660   }
1661   case TargetOpcode::G_VECREDUCE_SEQ_FADD:
1662   case TargetOpcode::G_VECREDUCE_SEQ_FMUL: {
1663     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1664     LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg());
1665     LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg());
1666     if (!DstTy.isScalar())
1667       report("Vector reduction requires a scalar destination type", MI);
1668     if (!Src1Ty.isScalar())
1669       report("Sequential FADD/FMUL vector reduction requires a scalar 1st operand", MI);
1670     if (!Src2Ty.isVector())
1671       report("Sequential FADD/FMUL vector reduction must have a vector 2nd operand", MI);
1672     break;
1673   }
1674   case TargetOpcode::G_VECREDUCE_FADD:
1675   case TargetOpcode::G_VECREDUCE_FMUL:
1676   case TargetOpcode::G_VECREDUCE_FMAX:
1677   case TargetOpcode::G_VECREDUCE_FMIN:
1678   case TargetOpcode::G_VECREDUCE_ADD:
1679   case TargetOpcode::G_VECREDUCE_MUL:
1680   case TargetOpcode::G_VECREDUCE_AND:
1681   case TargetOpcode::G_VECREDUCE_OR:
1682   case TargetOpcode::G_VECREDUCE_XOR:
1683   case TargetOpcode::G_VECREDUCE_SMAX:
1684   case TargetOpcode::G_VECREDUCE_SMIN:
1685   case TargetOpcode::G_VECREDUCE_UMAX:
1686   case TargetOpcode::G_VECREDUCE_UMIN: {
1687     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1688     if (!DstTy.isScalar())
1689       report("Vector reduction requires a scalar destination type", MI);
1690     break;
1691   }
1692 
1693   case TargetOpcode::G_SBFX:
1694   case TargetOpcode::G_UBFX: {
1695     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1696     if (DstTy.isVector()) {
1697       report("Bitfield extraction is not supported on vectors", MI);
1698       break;
1699     }
1700     break;
1701   }
1702   case TargetOpcode::G_SHL:
1703   case TargetOpcode::G_LSHR:
1704   case TargetOpcode::G_ASHR:
1705   case TargetOpcode::G_ROTR:
1706   case TargetOpcode::G_ROTL: {
1707     LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg());
1708     LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg());
1709     if (Src1Ty.isVector() != Src2Ty.isVector()) {
1710       report("Shifts and rotates require operands to be either all scalars or "
1711              "all vectors",
1712              MI);
1713       break;
1714     }
1715     break;
1716   }
1717   case TargetOpcode::G_LLROUND:
1718   case TargetOpcode::G_LROUND: {
1719     verifyAllRegOpsScalar(*MI, *MRI);
1720     break;
1721   }
1722   case TargetOpcode::G_IS_FPCLASS: {
1723     LLT DestTy = MRI->getType(MI->getOperand(0).getReg());
1724     LLT DestEltTy = DestTy.getScalarType();
1725     if (!DestEltTy.isScalar()) {
1726       report("Destination must be a scalar or vector of scalars", MI);
1727       break;
1728     }
1729     LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1730     LLT SrcEltTy = SrcTy.getScalarType();
1731     if (!SrcEltTy.isScalar()) {
1732       report("Source must be a scalar or vector of scalars", MI);
1733       break;
1734     }
1735     if (!verifyVectorElementMatch(DestTy, SrcTy, MI))
1736       break;
1737     const MachineOperand &TestMO = MI->getOperand(2);
1738     if (!TestMO.isImm()) {
1739       report("floating-point class set (operand 2) must be an immediate", MI);
1740       break;
1741     }
1742     int64_t Test = TestMO.getImm();
1743     if (Test < 0 || Test > fcAllFlags) {
1744       report("Incorrect floating-point class set (operand 2)", MI);
1745       break;
1746     }
1747     break;
1748   }
1749   case TargetOpcode::G_ASSERT_ALIGN: {
1750     if (MI->getOperand(2).getImm() < 1)
1751       report("alignment immediate must be >= 1", MI);
1752     break;
1753   }
1754   case TargetOpcode::G_CONSTANT_POOL: {
1755     if (!MI->getOperand(1).isCPI())
1756       report("Src operand 1 must be a constant pool index", MI);
1757     if (!MRI->getType(MI->getOperand(0).getReg()).isPointer())
1758       report("Dst operand 0 must be a pointer", MI);
1759     break;
1760   }
1761   default:
1762     break;
1763   }
1764 }
1765 
1766 void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
1767   const MCInstrDesc &MCID = MI->getDesc();
1768   if (MI->getNumOperands() < MCID.getNumOperands()) {
1769     report("Too few operands", MI);
1770     errs() << MCID.getNumOperands() << " operands expected, but "
1771            << MI->getNumOperands() << " given.\n";
1772   }
1773 
1774   if (MI->isPHI()) {
1775     if (MF->getProperties().hasProperty(
1776             MachineFunctionProperties::Property::NoPHIs))
1777       report("Found PHI instruction with NoPHIs property set", MI);
1778 
1779     if (FirstNonPHI)
1780       report("Found PHI instruction after non-PHI", MI);
1781   } else if (FirstNonPHI == nullptr)
1782     FirstNonPHI = MI;
1783 
1784   // Check the tied operands.
1785   if (MI->isInlineAsm())
1786     verifyInlineAsm(MI);
1787 
1788   // Check that unspillable terminators define a reg and have at most one use.
1789   if (TII->isUnspillableTerminator(MI)) {
1790     if (!MI->getOperand(0).isReg() || !MI->getOperand(0).isDef())
1791       report("Unspillable Terminator does not define a reg", MI);
1792     Register Def = MI->getOperand(0).getReg();
1793     if (Def.isVirtual() &&
1794         !MF->getProperties().hasProperty(
1795             MachineFunctionProperties::Property::NoPHIs) &&
1796         std::distance(MRI->use_nodbg_begin(Def), MRI->use_nodbg_end()) > 1)
1797       report("Unspillable Terminator expected to have at most one use!", MI);
1798   }
1799 
1800   // A fully-formed DBG_VALUE must have a location. Ignore partially formed
1801   // DBG_VALUEs: these are convenient to use in tests, but should never get
1802   // generated.
1803   if (MI->isDebugValue() && MI->getNumOperands() == 4)
1804     if (!MI->getDebugLoc())
1805       report("Missing DebugLoc for debug instruction", MI);
1806 
1807   // Meta instructions should never be the subject of debug value tracking,
1808   // they don't create a value in the output program at all.
1809   if (MI->isMetaInstruction() && MI->peekDebugInstrNum())
1810     report("Metadata instruction should not have a value tracking number", MI);
1811 
1812   // Check the MachineMemOperands for basic consistency.
1813   for (MachineMemOperand *Op : MI->memoperands()) {
1814     if (Op->isLoad() && !MI->mayLoad())
1815       report("Missing mayLoad flag", MI);
1816     if (Op->isStore() && !MI->mayStore())
1817       report("Missing mayStore flag", MI);
1818   }
1819 
1820   // Debug values must not have a slot index.
1821   // Other instructions must have one, unless they are inside a bundle.
1822   if (LiveInts) {
1823     bool mapped = !LiveInts->isNotInMIMap(*MI);
1824     if (MI->isDebugOrPseudoInstr()) {
1825       if (mapped)
1826         report("Debug instruction has a slot index", MI);
1827     } else if (MI->isInsideBundle()) {
1828       if (mapped)
1829         report("Instruction inside bundle has a slot index", MI);
1830     } else {
1831       if (!mapped)
1832         report("Missing slot index", MI);
1833     }
1834   }
1835 
1836   unsigned Opc = MCID.getOpcode();
1837   if (isPreISelGenericOpcode(Opc) || isPreISelGenericOptimizationHint(Opc)) {
1838     verifyPreISelGenericInstruction(MI);
1839     return;
1840   }
1841 
1842   StringRef ErrorInfo;
1843   if (!TII->verifyInstruction(*MI, ErrorInfo))
1844     report(ErrorInfo.data(), MI);
1845 
1846   // Verify properties of various specific instruction types
1847   switch (MI->getOpcode()) {
1848   case TargetOpcode::COPY: {
1849     const MachineOperand &DstOp = MI->getOperand(0);
1850     const MachineOperand &SrcOp = MI->getOperand(1);
1851     const Register SrcReg = SrcOp.getReg();
1852     const Register DstReg = DstOp.getReg();
1853 
1854     LLT DstTy = MRI->getType(DstReg);
1855     LLT SrcTy = MRI->getType(SrcReg);
1856     if (SrcTy.isValid() && DstTy.isValid()) {
1857       // If both types are valid, check that the types are the same.
1858       if (SrcTy != DstTy) {
1859         report("Copy Instruction is illegal with mismatching types", MI);
1860         errs() << "Def = " << DstTy << ", Src = " << SrcTy << "\n";
1861       }
1862 
1863       break;
1864     }
1865 
1866     if (!SrcTy.isValid() && !DstTy.isValid())
1867       break;
1868 
1869     // If we have only one valid type, this is likely a copy between a virtual
1870     // and physical register.
1871     unsigned SrcSize = 0;
1872     unsigned DstSize = 0;
1873     if (SrcReg.isPhysical() && DstTy.isValid()) {
1874       const TargetRegisterClass *SrcRC =
1875           TRI->getMinimalPhysRegClassLLT(SrcReg, DstTy);
1876       if (SrcRC)
1877         SrcSize = TRI->getRegSizeInBits(*SrcRC);
1878     }
1879 
1880     if (SrcSize == 0)
1881       SrcSize = TRI->getRegSizeInBits(SrcReg, *MRI);
1882 
1883     if (DstReg.isPhysical() && SrcTy.isValid()) {
1884       const TargetRegisterClass *DstRC =
1885           TRI->getMinimalPhysRegClassLLT(DstReg, SrcTy);
1886       if (DstRC)
1887         DstSize = TRI->getRegSizeInBits(*DstRC);
1888     }
1889 
1890     if (DstSize == 0)
1891       DstSize = TRI->getRegSizeInBits(DstReg, *MRI);
1892 
1893     if (SrcSize != 0 && DstSize != 0 && SrcSize != DstSize) {
1894       if (!DstOp.getSubReg() && !SrcOp.getSubReg()) {
1895         report("Copy Instruction is illegal with mismatching sizes", MI);
1896         errs() << "Def Size = " << DstSize << ", Src Size = " << SrcSize
1897                << "\n";
1898       }
1899     }
1900     break;
1901   }
1902   case TargetOpcode::STATEPOINT: {
1903     StatepointOpers SO(MI);
1904     if (!MI->getOperand(SO.getIDPos()).isImm() ||
1905         !MI->getOperand(SO.getNBytesPos()).isImm() ||
1906         !MI->getOperand(SO.getNCallArgsPos()).isImm()) {
1907       report("meta operands to STATEPOINT not constant!", MI);
1908       break;
1909     }
1910 
1911     auto VerifyStackMapConstant = [&](unsigned Offset) {
1912       if (Offset >= MI->getNumOperands()) {
1913         report("stack map constant to STATEPOINT is out of range!", MI);
1914         return;
1915       }
1916       if (!MI->getOperand(Offset - 1).isImm() ||
1917           MI->getOperand(Offset - 1).getImm() != StackMaps::ConstantOp ||
1918           !MI->getOperand(Offset).isImm())
1919         report("stack map constant to STATEPOINT not well formed!", MI);
1920     };
1921     VerifyStackMapConstant(SO.getCCIdx());
1922     VerifyStackMapConstant(SO.getFlagsIdx());
1923     VerifyStackMapConstant(SO.getNumDeoptArgsIdx());
1924     VerifyStackMapConstant(SO.getNumGCPtrIdx());
1925     VerifyStackMapConstant(SO.getNumAllocaIdx());
1926     VerifyStackMapConstant(SO.getNumGcMapEntriesIdx());
1927 
1928     // Verify that all explicit statepoint defs are tied to gc operands as
1929     // they are expected to be a relocation of gc operands.
1930     unsigned FirstGCPtrIdx = SO.getFirstGCPtrIdx();
1931     unsigned LastGCPtrIdx = SO.getNumAllocaIdx() - 2;
1932     for (unsigned Idx = 0; Idx < MI->getNumDefs(); Idx++) {
1933       unsigned UseOpIdx;
1934       if (!MI->isRegTiedToUseOperand(Idx, &UseOpIdx)) {
1935         report("STATEPOINT defs expected to be tied", MI);
1936         break;
1937       }
1938       if (UseOpIdx < FirstGCPtrIdx || UseOpIdx > LastGCPtrIdx) {
1939         report("STATEPOINT def tied to non-gc operand", MI);
1940         break;
1941       }
1942     }
1943 
1944     // TODO: verify we have properly encoded deopt arguments
1945   } break;
1946   case TargetOpcode::INSERT_SUBREG: {
1947     unsigned InsertedSize;
1948     if (unsigned SubIdx = MI->getOperand(2).getSubReg())
1949       InsertedSize = TRI->getSubRegIdxSize(SubIdx);
1950     else
1951       InsertedSize = TRI->getRegSizeInBits(MI->getOperand(2).getReg(), *MRI);
1952     unsigned SubRegSize = TRI->getSubRegIdxSize(MI->getOperand(3).getImm());
1953     if (SubRegSize < InsertedSize) {
1954       report("INSERT_SUBREG expected inserted value to have equal or lesser "
1955              "size than the subreg it was inserted into", MI);
1956       break;
1957     }
1958   } break;
1959   case TargetOpcode::REG_SEQUENCE: {
1960     unsigned NumOps = MI->getNumOperands();
1961     if (!(NumOps & 1)) {
1962       report("Invalid number of operands for REG_SEQUENCE", MI);
1963       break;
1964     }
1965 
1966     for (unsigned I = 1; I != NumOps; I += 2) {
1967       const MachineOperand &RegOp = MI->getOperand(I);
1968       const MachineOperand &SubRegOp = MI->getOperand(I + 1);
1969 
1970       if (!RegOp.isReg())
1971         report("Invalid register operand for REG_SEQUENCE", &RegOp, I);
1972 
1973       if (!SubRegOp.isImm() || SubRegOp.getImm() == 0 ||
1974           SubRegOp.getImm() >= TRI->getNumSubRegIndices()) {
1975         report("Invalid subregister index operand for REG_SEQUENCE",
1976                &SubRegOp, I + 1);
1977       }
1978     }
1979 
1980     Register DstReg = MI->getOperand(0).getReg();
1981     if (DstReg.isPhysical())
1982       report("REG_SEQUENCE does not support physical register results", MI);
1983 
1984     if (MI->getOperand(0).getSubReg())
1985       report("Invalid subreg result for REG_SEQUENCE", MI);
1986 
1987     break;
1988   }
1989   }
1990 }
1991 
1992 void
1993 MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
1994   const MachineInstr *MI = MO->getParent();
1995   const MCInstrDesc &MCID = MI->getDesc();
1996   unsigned NumDefs = MCID.getNumDefs();
1997   if (MCID.getOpcode() == TargetOpcode::PATCHPOINT)
1998     NumDefs = (MONum == 0 && MO->isReg()) ? NumDefs : 0;
1999 
2000   // The first MCID.NumDefs operands must be explicit register defines
2001   if (MONum < NumDefs) {
2002     const MCOperandInfo &MCOI = MCID.operands()[MONum];
2003     if (!MO->isReg())
2004       report("Explicit definition must be a register", MO, MONum);
2005     else if (!MO->isDef() && !MCOI.isOptionalDef())
2006       report("Explicit definition marked as use", MO, MONum);
2007     else if (MO->isImplicit())
2008       report("Explicit definition marked as implicit", MO, MONum);
2009   } else if (MONum < MCID.getNumOperands()) {
2010     const MCOperandInfo &MCOI = MCID.operands()[MONum];
2011     // Don't check if it's the last operand in a variadic instruction. See,
2012     // e.g., LDM_RET in the arm back end. Check non-variadic operands only.
2013     bool IsOptional = MI->isVariadic() && MONum == MCID.getNumOperands() - 1;
2014     if (!IsOptional) {
2015       if (MO->isReg()) {
2016         if (MO->isDef() && !MCOI.isOptionalDef() && !MCID.variadicOpsAreDefs())
2017           report("Explicit operand marked as def", MO, MONum);
2018         if (MO->isImplicit())
2019           report("Explicit operand marked as implicit", MO, MONum);
2020       }
2021 
2022       // Check that an instruction has register operands only as expected.
2023       if (MCOI.OperandType == MCOI::OPERAND_REGISTER &&
2024           !MO->isReg() && !MO->isFI())
2025         report("Expected a register operand.", MO, MONum);
2026       if (MO->isReg()) {
2027         if (MCOI.OperandType == MCOI::OPERAND_IMMEDIATE ||
2028             (MCOI.OperandType == MCOI::OPERAND_PCREL &&
2029              !TII->isPCRelRegisterOperandLegal(*MO)))
2030           report("Expected a non-register operand.", MO, MONum);
2031       }
2032     }
2033 
2034     int TiedTo = MCID.getOperandConstraint(MONum, MCOI::TIED_TO);
2035     if (TiedTo != -1) {
2036       if (!MO->isReg())
2037         report("Tied use must be a register", MO, MONum);
2038       else if (!MO->isTied())
2039         report("Operand should be tied", MO, MONum);
2040       else if (unsigned(TiedTo) != MI->findTiedOperandIdx(MONum))
2041         report("Tied def doesn't match MCInstrDesc", MO, MONum);
2042       else if (MO->getReg().isPhysical()) {
2043         const MachineOperand &MOTied = MI->getOperand(TiedTo);
2044         if (!MOTied.isReg())
2045           report("Tied counterpart must be a register", &MOTied, TiedTo);
2046         else if (MOTied.getReg().isPhysical() &&
2047                  MO->getReg() != MOTied.getReg())
2048           report("Tied physical registers must match.", &MOTied, TiedTo);
2049       }
2050     } else if (MO->isReg() && MO->isTied())
2051       report("Explicit operand should not be tied", MO, MONum);
2052   } else {
2053     // ARM adds %reg0 operands to indicate predicates. We'll allow that.
2054     if (MO->isReg() && !MO->isImplicit() && !MI->isVariadic() && MO->getReg())
2055       report("Extra explicit operand on non-variadic instruction", MO, MONum);
2056   }
2057 
2058   switch (MO->getType()) {
2059   case MachineOperand::MO_Register: {
2060     // Verify debug flag on debug instructions. Check this first because reg0
2061     // indicates an undefined debug value.
2062     if (MI->isDebugInstr() && MO->isUse()) {
2063       if (!MO->isDebug())
2064         report("Register operand must be marked debug", MO, MONum);
2065     } else if (MO->isDebug()) {
2066       report("Register operand must not be marked debug", MO, MONum);
2067     }
2068 
2069     const Register Reg = MO->getReg();
2070     if (!Reg)
2071       return;
2072     if (MRI->tracksLiveness() && !MI->isDebugInstr())
2073       checkLiveness(MO, MONum);
2074 
2075     if (MO->isDef() && MO->isUndef() && !MO->getSubReg() &&
2076         MO->getReg().isVirtual()) // TODO: Apply to physregs too
2077       report("Undef virtual register def operands require a subregister", MO, MONum);
2078 
2079     // Verify the consistency of tied operands.
2080     if (MO->isTied()) {
2081       unsigned OtherIdx = MI->findTiedOperandIdx(MONum);
2082       const MachineOperand &OtherMO = MI->getOperand(OtherIdx);
2083       if (!OtherMO.isReg())
2084         report("Must be tied to a register", MO, MONum);
2085       if (!OtherMO.isTied())
2086         report("Missing tie flags on tied operand", MO, MONum);
2087       if (MI->findTiedOperandIdx(OtherIdx) != MONum)
2088         report("Inconsistent tie links", MO, MONum);
2089       if (MONum < MCID.getNumDefs()) {
2090         if (OtherIdx < MCID.getNumOperands()) {
2091           if (-1 == MCID.getOperandConstraint(OtherIdx, MCOI::TIED_TO))
2092             report("Explicit def tied to explicit use without tie constraint",
2093                    MO, MONum);
2094         } else {
2095           if (!OtherMO.isImplicit())
2096             report("Explicit def should be tied to implicit use", MO, MONum);
2097         }
2098       }
2099     }
2100 
2101     // Verify two-address constraints after the twoaddressinstruction pass.
2102     // Both twoaddressinstruction pass and phi-node-elimination pass call
2103     // MRI->leaveSSA() to set MF as NoSSA, we should do the verification after
2104     // twoaddressinstruction pass not after phi-node-elimination pass. So we
2105     // shouldn't use the NoSSA as the condition, we should based on
2106     // TiedOpsRewritten property to verify two-address constraints, this
2107     // property will be set in twoaddressinstruction pass.
2108     unsigned DefIdx;
2109     if (MF->getProperties().hasProperty(
2110             MachineFunctionProperties::Property::TiedOpsRewritten) &&
2111         MO->isUse() && MI->isRegTiedToDefOperand(MONum, &DefIdx) &&
2112         Reg != MI->getOperand(DefIdx).getReg())
2113       report("Two-address instruction operands must be identical", MO, MONum);
2114 
2115     // Check register classes.
2116     unsigned SubIdx = MO->getSubReg();
2117 
2118     if (Reg.isPhysical()) {
2119       if (SubIdx) {
2120         report("Illegal subregister index for physical register", MO, MONum);
2121         return;
2122       }
2123       if (MONum < MCID.getNumOperands()) {
2124         if (const TargetRegisterClass *DRC =
2125               TII->getRegClass(MCID, MONum, TRI, *MF)) {
2126           if (!DRC->contains(Reg)) {
2127             report("Illegal physical register for instruction", MO, MONum);
2128             errs() << printReg(Reg, TRI) << " is not a "
2129                    << TRI->getRegClassName(DRC) << " register.\n";
2130           }
2131         }
2132       }
2133       if (MO->isRenamable()) {
2134         if (MRI->isReserved(Reg)) {
2135           report("isRenamable set on reserved register", MO, MONum);
2136           return;
2137         }
2138       }
2139     } else {
2140       // Virtual register.
2141       const TargetRegisterClass *RC = MRI->getRegClassOrNull(Reg);
2142       if (!RC) {
2143         // This is a generic virtual register.
2144 
2145         // Do not allow undef uses for generic virtual registers. This ensures
2146         // getVRegDef can never fail and return null on a generic register.
2147         //
2148         // FIXME: This restriction should probably be broadened to all SSA
2149         // MIR. However, DetectDeadLanes/ProcessImplicitDefs technically still
2150         // run on the SSA function just before phi elimination.
2151         if (MO->isUndef())
2152           report("Generic virtual register use cannot be undef", MO, MONum);
2153 
2154         // Debug value instruction is permitted to use undefined vregs.
2155         // This is a performance measure to skip the overhead of immediately
2156         // pruning unused debug operands. The final undef substitution occurs
2157         // when debug values are allocated in LDVImpl::handleDebugValue, so
2158         // these verifications always apply after this pass.
2159         if (isFunctionTracksDebugUserValues || !MO->isUse() ||
2160             !MI->isDebugValue() || !MRI->def_empty(Reg)) {
2161           // If we're post-Select, we can't have gvregs anymore.
2162           if (isFunctionSelected) {
2163             report("Generic virtual register invalid in a Selected function",
2164                    MO, MONum);
2165             return;
2166           }
2167 
2168           // The gvreg must have a type and it must not have a SubIdx.
2169           LLT Ty = MRI->getType(Reg);
2170           if (!Ty.isValid()) {
2171             report("Generic virtual register must have a valid type", MO,
2172                    MONum);
2173             return;
2174           }
2175 
2176           const RegisterBank *RegBank = MRI->getRegBankOrNull(Reg);
2177           const RegisterBankInfo *RBI = MF->getSubtarget().getRegBankInfo();
2178 
2179           // If we're post-RegBankSelect, the gvreg must have a bank.
2180           if (!RegBank && isFunctionRegBankSelected) {
2181             report("Generic virtual register must have a bank in a "
2182                    "RegBankSelected function",
2183                    MO, MONum);
2184             return;
2185           }
2186 
2187           // Make sure the register fits into its register bank if any.
2188           if (RegBank && Ty.isValid() &&
2189               RBI->getMaximumSize(RegBank->getID()) < Ty.getSizeInBits()) {
2190             report("Register bank is too small for virtual register", MO,
2191                    MONum);
2192             errs() << "Register bank " << RegBank->getName() << " too small("
2193                    << RBI->getMaximumSize(RegBank->getID()) << ") to fit "
2194                    << Ty.getSizeInBits() << "-bits\n";
2195             return;
2196           }
2197         }
2198 
2199         if (SubIdx)  {
2200           report("Generic virtual register does not allow subregister index", MO,
2201                  MONum);
2202           return;
2203         }
2204 
2205         // If this is a target specific instruction and this operand
2206         // has register class constraint, the virtual register must
2207         // comply to it.
2208         if (!isPreISelGenericOpcode(MCID.getOpcode()) &&
2209             MONum < MCID.getNumOperands() &&
2210             TII->getRegClass(MCID, MONum, TRI, *MF)) {
2211           report("Virtual register does not match instruction constraint", MO,
2212                  MONum);
2213           errs() << "Expect register class "
2214                  << TRI->getRegClassName(
2215                         TII->getRegClass(MCID, MONum, TRI, *MF))
2216                  << " but got nothing\n";
2217           return;
2218         }
2219 
2220         break;
2221       }
2222       if (SubIdx) {
2223         const TargetRegisterClass *SRC =
2224           TRI->getSubClassWithSubReg(RC, SubIdx);
2225         if (!SRC) {
2226           report("Invalid subregister index for virtual register", MO, MONum);
2227           errs() << "Register class " << TRI->getRegClassName(RC)
2228               << " does not support subreg index " << SubIdx << "\n";
2229           return;
2230         }
2231         if (RC != SRC) {
2232           report("Invalid register class for subregister index", MO, MONum);
2233           errs() << "Register class " << TRI->getRegClassName(RC)
2234               << " does not fully support subreg index " << SubIdx << "\n";
2235           return;
2236         }
2237       }
2238       if (MONum < MCID.getNumOperands()) {
2239         if (const TargetRegisterClass *DRC =
2240               TII->getRegClass(MCID, MONum, TRI, *MF)) {
2241           if (SubIdx) {
2242             const TargetRegisterClass *SuperRC =
2243                 TRI->getLargestLegalSuperClass(RC, *MF);
2244             if (!SuperRC) {
2245               report("No largest legal super class exists.", MO, MONum);
2246               return;
2247             }
2248             DRC = TRI->getMatchingSuperRegClass(SuperRC, DRC, SubIdx);
2249             if (!DRC) {
2250               report("No matching super-reg register class.", MO, MONum);
2251               return;
2252             }
2253           }
2254           if (!RC->hasSuperClassEq(DRC)) {
2255             report("Illegal virtual register for instruction", MO, MONum);
2256             errs() << "Expected a " << TRI->getRegClassName(DRC)
2257                 << " register, but got a " << TRI->getRegClassName(RC)
2258                 << " register\n";
2259           }
2260         }
2261       }
2262     }
2263     break;
2264   }
2265 
2266   case MachineOperand::MO_RegisterMask:
2267     regMasks.push_back(MO->getRegMask());
2268     break;
2269 
2270   case MachineOperand::MO_MachineBasicBlock:
2271     if (MI->isPHI() && !MO->getMBB()->isSuccessor(MI->getParent()))
2272       report("PHI operand is not in the CFG", MO, MONum);
2273     break;
2274 
2275   case MachineOperand::MO_FrameIndex:
2276     if (LiveStks && LiveStks->hasInterval(MO->getIndex()) &&
2277         LiveInts && !LiveInts->isNotInMIMap(*MI)) {
2278       int FI = MO->getIndex();
2279       LiveInterval &LI = LiveStks->getInterval(FI);
2280       SlotIndex Idx = LiveInts->getInstructionIndex(*MI);
2281 
2282       bool stores = MI->mayStore();
2283       bool loads = MI->mayLoad();
2284       // For a memory-to-memory move, we need to check if the frame
2285       // index is used for storing or loading, by inspecting the
2286       // memory operands.
2287       if (stores && loads) {
2288         for (auto *MMO : MI->memoperands()) {
2289           const PseudoSourceValue *PSV = MMO->getPseudoValue();
2290           if (PSV == nullptr) continue;
2291           const FixedStackPseudoSourceValue *Value =
2292             dyn_cast<FixedStackPseudoSourceValue>(PSV);
2293           if (Value == nullptr) continue;
2294           if (Value->getFrameIndex() != FI) continue;
2295 
2296           if (MMO->isStore())
2297             loads = false;
2298           else
2299             stores = false;
2300           break;
2301         }
2302         if (loads == stores)
2303           report("Missing fixed stack memoperand.", MI);
2304       }
2305       if (loads && !LI.liveAt(Idx.getRegSlot(true))) {
2306         report("Instruction loads from dead spill slot", MO, MONum);
2307         errs() << "Live stack: " << LI << '\n';
2308       }
2309       if (stores && !LI.liveAt(Idx.getRegSlot())) {
2310         report("Instruction stores to dead spill slot", MO, MONum);
2311         errs() << "Live stack: " << LI << '\n';
2312       }
2313     }
2314     break;
2315 
2316   case MachineOperand::MO_CFIIndex:
2317     if (MO->getCFIIndex() >= MF->getFrameInstructions().size())
2318       report("CFI instruction has invalid index", MO, MONum);
2319     break;
2320 
2321   default:
2322     break;
2323   }
2324 }
2325 
2326 void MachineVerifier::checkLivenessAtUse(const MachineOperand *MO,
2327                                          unsigned MONum, SlotIndex UseIdx,
2328                                          const LiveRange &LR,
2329                                          Register VRegOrUnit,
2330                                          LaneBitmask LaneMask) {
2331   LiveQueryResult LRQ = LR.Query(UseIdx);
2332   // Check if we have a segment at the use, note however that we only need one
2333   // live subregister range, the others may be dead.
2334   if (!LRQ.valueIn() && LaneMask.none()) {
2335     report("No live segment at use", MO, MONum);
2336     report_context_liverange(LR);
2337     report_context_vreg_regunit(VRegOrUnit);
2338     report_context(UseIdx);
2339   }
2340   if (MO->isKill() && !LRQ.isKill()) {
2341     report("Live range continues after kill flag", MO, MONum);
2342     report_context_liverange(LR);
2343     report_context_vreg_regunit(VRegOrUnit);
2344     if (LaneMask.any())
2345       report_context_lanemask(LaneMask);
2346     report_context(UseIdx);
2347   }
2348 }
2349 
2350 void MachineVerifier::checkLivenessAtDef(const MachineOperand *MO,
2351                                          unsigned MONum, SlotIndex DefIdx,
2352                                          const LiveRange &LR,
2353                                          Register VRegOrUnit,
2354                                          bool SubRangeCheck,
2355                                          LaneBitmask LaneMask) {
2356   if (const VNInfo *VNI = LR.getVNInfoAt(DefIdx)) {
2357     // The LR can correspond to the whole reg and its def slot is not obliged
2358     // to be the same as the MO' def slot. E.g. when we check here "normal"
2359     // subreg MO but there is other EC subreg MO in the same instruction so the
2360     // whole reg has EC def slot and differs from the currently checked MO' def
2361     // slot. For example:
2362     // %0 [16e,32r:0) 0@16e  L..3 [16e,32r:0) 0@16e  L..C [16r,32r:0) 0@16r
2363     // Check that there is an early-clobber def of the same superregister
2364     // somewhere is performed in visitMachineFunctionAfter()
2365     if (((SubRangeCheck || MO->getSubReg() == 0) && VNI->def != DefIdx) ||
2366         !SlotIndex::isSameInstr(VNI->def, DefIdx) ||
2367         (VNI->def != DefIdx &&
2368          (!VNI->def.isEarlyClobber() || !DefIdx.isRegister()))) {
2369       report("Inconsistent valno->def", MO, MONum);
2370       report_context_liverange(LR);
2371       report_context_vreg_regunit(VRegOrUnit);
2372       if (LaneMask.any())
2373         report_context_lanemask(LaneMask);
2374       report_context(*VNI);
2375       report_context(DefIdx);
2376     }
2377   } else {
2378     report("No live segment at def", MO, MONum);
2379     report_context_liverange(LR);
2380     report_context_vreg_regunit(VRegOrUnit);
2381     if (LaneMask.any())
2382       report_context_lanemask(LaneMask);
2383     report_context(DefIdx);
2384   }
2385   // Check that, if the dead def flag is present, LiveInts agree.
2386   if (MO->isDead()) {
2387     LiveQueryResult LRQ = LR.Query(DefIdx);
2388     if (!LRQ.isDeadDef()) {
2389       assert(VRegOrUnit.isVirtual() && "Expecting a virtual register.");
2390       // A dead subreg def only tells us that the specific subreg is dead. There
2391       // could be other non-dead defs of other subregs, or we could have other
2392       // parts of the register being live through the instruction. So unless we
2393       // are checking liveness for a subrange it is ok for the live range to
2394       // continue, given that we have a dead def of a subregister.
2395       if (SubRangeCheck || MO->getSubReg() == 0) {
2396         report("Live range continues after dead def flag", MO, MONum);
2397         report_context_liverange(LR);
2398         report_context_vreg_regunit(VRegOrUnit);
2399         if (LaneMask.any())
2400           report_context_lanemask(LaneMask);
2401       }
2402     }
2403   }
2404 }
2405 
2406 void MachineVerifier::checkLiveness(const MachineOperand *MO, unsigned MONum) {
2407   const MachineInstr *MI = MO->getParent();
2408   const Register Reg = MO->getReg();
2409   const unsigned SubRegIdx = MO->getSubReg();
2410 
2411   const LiveInterval *LI = nullptr;
2412   if (LiveInts && Reg.isVirtual()) {
2413     if (LiveInts->hasInterval(Reg)) {
2414       LI = &LiveInts->getInterval(Reg);
2415       if (SubRegIdx != 0 && (MO->isDef() || !MO->isUndef()) && !LI->empty() &&
2416           !LI->hasSubRanges() && MRI->shouldTrackSubRegLiveness(Reg))
2417         report("Live interval for subreg operand has no subranges", MO, MONum);
2418     } else {
2419       report("Virtual register has no live interval", MO, MONum);
2420     }
2421   }
2422 
2423   // Both use and def operands can read a register.
2424   if (MO->readsReg()) {
2425     if (MO->isKill())
2426       addRegWithSubRegs(regsKilled, Reg);
2427 
2428     // Check that LiveVars knows this kill (unless we are inside a bundle, in
2429     // which case we have already checked that LiveVars knows any kills on the
2430     // bundle header instead).
2431     if (LiveVars && Reg.isVirtual() && MO->isKill() &&
2432         !MI->isBundledWithPred()) {
2433       LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
2434       if (!is_contained(VI.Kills, MI))
2435         report("Kill missing from LiveVariables", MO, MONum);
2436     }
2437 
2438     // Check LiveInts liveness and kill.
2439     if (LiveInts && !LiveInts->isNotInMIMap(*MI)) {
2440       SlotIndex UseIdx = LiveInts->getInstructionIndex(*MI);
2441       // Check the cached regunit intervals.
2442       if (Reg.isPhysical() && !isReserved(Reg)) {
2443         for (MCRegUnit Unit : TRI->regunits(Reg.asMCReg())) {
2444           if (MRI->isReservedRegUnit(Unit))
2445             continue;
2446           if (const LiveRange *LR = LiveInts->getCachedRegUnit(Unit))
2447             checkLivenessAtUse(MO, MONum, UseIdx, *LR, Unit);
2448         }
2449       }
2450 
2451       if (Reg.isVirtual()) {
2452         // This is a virtual register interval.
2453         checkLivenessAtUse(MO, MONum, UseIdx, *LI, Reg);
2454 
2455         if (LI->hasSubRanges() && !MO->isDef()) {
2456           LaneBitmask MOMask = SubRegIdx != 0
2457                                    ? TRI->getSubRegIndexLaneMask(SubRegIdx)
2458                                    : MRI->getMaxLaneMaskForVReg(Reg);
2459           LaneBitmask LiveInMask;
2460           for (const LiveInterval::SubRange &SR : LI->subranges()) {
2461             if ((MOMask & SR.LaneMask).none())
2462               continue;
2463             checkLivenessAtUse(MO, MONum, UseIdx, SR, Reg, SR.LaneMask);
2464             LiveQueryResult LRQ = SR.Query(UseIdx);
2465             if (LRQ.valueIn())
2466               LiveInMask |= SR.LaneMask;
2467           }
2468           // At least parts of the register has to be live at the use.
2469           if ((LiveInMask & MOMask).none()) {
2470             report("No live subrange at use", MO, MONum);
2471             report_context(*LI);
2472             report_context(UseIdx);
2473           }
2474         }
2475       }
2476     }
2477 
2478     // Use of a dead register.
2479     if (!regsLive.count(Reg)) {
2480       if (Reg.isPhysical()) {
2481         // Reserved registers may be used even when 'dead'.
2482         bool Bad = !isReserved(Reg);
2483         // We are fine if just any subregister has a defined value.
2484         if (Bad) {
2485 
2486           for (const MCPhysReg &SubReg : TRI->subregs(Reg)) {
2487             if (regsLive.count(SubReg)) {
2488               Bad = false;
2489               break;
2490             }
2491           }
2492         }
2493         // If there is an additional implicit-use of a super register we stop
2494         // here. By definition we are fine if the super register is not
2495         // (completely) dead, if the complete super register is dead we will
2496         // get a report for its operand.
2497         if (Bad) {
2498           for (const MachineOperand &MOP : MI->uses()) {
2499             if (!MOP.isReg() || !MOP.isImplicit())
2500               continue;
2501 
2502             if (!MOP.getReg().isPhysical())
2503               continue;
2504 
2505             if (llvm::is_contained(TRI->subregs(MOP.getReg()), Reg))
2506               Bad = false;
2507           }
2508         }
2509         if (Bad)
2510           report("Using an undefined physical register", MO, MONum);
2511       } else if (MRI->def_empty(Reg)) {
2512         report("Reading virtual register without a def", MO, MONum);
2513       } else {
2514         BBInfo &MInfo = MBBInfoMap[MI->getParent()];
2515         // We don't know which virtual registers are live in, so only complain
2516         // if vreg was killed in this MBB. Otherwise keep track of vregs that
2517         // must be live in. PHI instructions are handled separately.
2518         if (MInfo.regsKilled.count(Reg))
2519           report("Using a killed virtual register", MO, MONum);
2520         else if (!MI->isPHI())
2521           MInfo.vregsLiveIn.insert(std::make_pair(Reg, MI));
2522       }
2523     }
2524   }
2525 
2526   if (MO->isDef()) {
2527     // Register defined.
2528     // TODO: verify that earlyclobber ops are not used.
2529     if (MO->isDead())
2530       addRegWithSubRegs(regsDead, Reg);
2531     else
2532       addRegWithSubRegs(regsDefined, Reg);
2533 
2534     // Verify SSA form.
2535     if (MRI->isSSA() && Reg.isVirtual() &&
2536         std::next(MRI->def_begin(Reg)) != MRI->def_end())
2537       report("Multiple virtual register defs in SSA form", MO, MONum);
2538 
2539     // Check LiveInts for a live segment, but only for virtual registers.
2540     if (LiveInts && !LiveInts->isNotInMIMap(*MI)) {
2541       SlotIndex DefIdx = LiveInts->getInstructionIndex(*MI);
2542       DefIdx = DefIdx.getRegSlot(MO->isEarlyClobber());
2543 
2544       if (Reg.isVirtual()) {
2545         checkLivenessAtDef(MO, MONum, DefIdx, *LI, Reg);
2546 
2547         if (LI->hasSubRanges()) {
2548           LaneBitmask MOMask = SubRegIdx != 0
2549                                    ? TRI->getSubRegIndexLaneMask(SubRegIdx)
2550                                    : MRI->getMaxLaneMaskForVReg(Reg);
2551           for (const LiveInterval::SubRange &SR : LI->subranges()) {
2552             if ((SR.LaneMask & MOMask).none())
2553               continue;
2554             checkLivenessAtDef(MO, MONum, DefIdx, SR, Reg, true, SR.LaneMask);
2555           }
2556         }
2557       }
2558     }
2559   }
2560 }
2561 
2562 // This function gets called after visiting all instructions in a bundle. The
2563 // argument points to the bundle header.
2564 // Normal stand-alone instructions are also considered 'bundles', and this
2565 // function is called for all of them.
2566 void MachineVerifier::visitMachineBundleAfter(const MachineInstr *MI) {
2567   BBInfo &MInfo = MBBInfoMap[MI->getParent()];
2568   set_union(MInfo.regsKilled, regsKilled);
2569   set_subtract(regsLive, regsKilled); regsKilled.clear();
2570   // Kill any masked registers.
2571   while (!regMasks.empty()) {
2572     const uint32_t *Mask = regMasks.pop_back_val();
2573     for (Register Reg : regsLive)
2574       if (Reg.isPhysical() &&
2575           MachineOperand::clobbersPhysReg(Mask, Reg.asMCReg()))
2576         regsDead.push_back(Reg);
2577   }
2578   set_subtract(regsLive, regsDead);   regsDead.clear();
2579   set_union(regsLive, regsDefined);   regsDefined.clear();
2580 }
2581 
2582 void
2583 MachineVerifier::visitMachineBasicBlockAfter(const MachineBasicBlock *MBB) {
2584   MBBInfoMap[MBB].regsLiveOut = regsLive;
2585   regsLive.clear();
2586 
2587   if (Indexes) {
2588     SlotIndex stop = Indexes->getMBBEndIdx(MBB);
2589     if (!(stop > lastIndex)) {
2590       report("Block ends before last instruction index", MBB);
2591       errs() << "Block ends at " << stop
2592           << " last instruction was at " << lastIndex << '\n';
2593     }
2594     lastIndex = stop;
2595   }
2596 }
2597 
2598 namespace {
2599 // This implements a set of registers that serves as a filter: can filter other
2600 // sets by passing through elements not in the filter and blocking those that
2601 // are. Any filter implicitly includes the full set of physical registers upon
2602 // creation, thus filtering them all out. The filter itself as a set only grows,
2603 // and needs to be as efficient as possible.
2604 struct VRegFilter {
2605   // Add elements to the filter itself. \pre Input set \p FromRegSet must have
2606   // no duplicates. Both virtual and physical registers are fine.
2607   template <typename RegSetT> void add(const RegSetT &FromRegSet) {
2608     SmallVector<Register, 0> VRegsBuffer;
2609     filterAndAdd(FromRegSet, VRegsBuffer);
2610   }
2611   // Filter \p FromRegSet through the filter and append passed elements into \p
2612   // ToVRegs. All elements appended are then added to the filter itself.
2613   // \returns true if anything changed.
2614   template <typename RegSetT>
2615   bool filterAndAdd(const RegSetT &FromRegSet,
2616                     SmallVectorImpl<Register> &ToVRegs) {
2617     unsigned SparseUniverse = Sparse.size();
2618     unsigned NewSparseUniverse = SparseUniverse;
2619     unsigned NewDenseSize = Dense.size();
2620     size_t Begin = ToVRegs.size();
2621     for (Register Reg : FromRegSet) {
2622       if (!Reg.isVirtual())
2623         continue;
2624       unsigned Index = Register::virtReg2Index(Reg);
2625       if (Index < SparseUniverseMax) {
2626         if (Index < SparseUniverse && Sparse.test(Index))
2627           continue;
2628         NewSparseUniverse = std::max(NewSparseUniverse, Index + 1);
2629       } else {
2630         if (Dense.count(Reg))
2631           continue;
2632         ++NewDenseSize;
2633       }
2634       ToVRegs.push_back(Reg);
2635     }
2636     size_t End = ToVRegs.size();
2637     if (Begin == End)
2638       return false;
2639     // Reserving space in sets once performs better than doing so continuously
2640     // and pays easily for double look-ups (even in Dense with SparseUniverseMax
2641     // tuned all the way down) and double iteration (the second one is over a
2642     // SmallVector, which is a lot cheaper compared to DenseSet or BitVector).
2643     Sparse.resize(NewSparseUniverse);
2644     Dense.reserve(NewDenseSize);
2645     for (unsigned I = Begin; I < End; ++I) {
2646       Register Reg = ToVRegs[I];
2647       unsigned Index = Register::virtReg2Index(Reg);
2648       if (Index < SparseUniverseMax)
2649         Sparse.set(Index);
2650       else
2651         Dense.insert(Reg);
2652     }
2653     return true;
2654   }
2655 
2656 private:
2657   static constexpr unsigned SparseUniverseMax = 10 * 1024 * 8;
2658   // VRegs indexed within SparseUniverseMax are tracked by Sparse, those beyound
2659   // are tracked by Dense. The only purpose of the threashold and the Dense set
2660   // is to have a reasonably growing memory usage in pathological cases (large
2661   // number of very sparse VRegFilter instances live at the same time). In
2662   // practice even in the worst-by-execution time cases having all elements
2663   // tracked by Sparse (very large SparseUniverseMax scenario) tends to be more
2664   // space efficient than if tracked by Dense. The threashold is set to keep the
2665   // worst-case memory usage within 2x of figures determined empirically for
2666   // "all Dense" scenario in such worst-by-execution-time cases.
2667   BitVector Sparse;
2668   DenseSet<unsigned> Dense;
2669 };
2670 
2671 // Implements both a transfer function and a (binary, in-place) join operator
2672 // for a dataflow over register sets with set union join and filtering transfer
2673 // (out_b = in_b \ filter_b). filter_b is expected to be set-up ahead of time.
2674 // Maintains out_b as its state, allowing for O(n) iteration over it at any
2675 // time, where n is the size of the set (as opposed to O(U) where U is the
2676 // universe). filter_b implicitly contains all physical registers at all times.
2677 class FilteringVRegSet {
2678   VRegFilter Filter;
2679   SmallVector<Register, 0> VRegs;
2680 
2681 public:
2682   // Set-up the filter_b. \pre Input register set \p RS must have no duplicates.
2683   // Both virtual and physical registers are fine.
2684   template <typename RegSetT> void addToFilter(const RegSetT &RS) {
2685     Filter.add(RS);
2686   }
2687   // Passes \p RS through the filter_b (transfer function) and adds what's left
2688   // to itself (out_b).
2689   template <typename RegSetT> bool add(const RegSetT &RS) {
2690     // Double-duty the Filter: to maintain VRegs a set (and the join operation
2691     // a set union) just add everything being added here to the Filter as well.
2692     return Filter.filterAndAdd(RS, VRegs);
2693   }
2694   using const_iterator = decltype(VRegs)::const_iterator;
2695   const_iterator begin() const { return VRegs.begin(); }
2696   const_iterator end() const { return VRegs.end(); }
2697   size_t size() const { return VRegs.size(); }
2698 };
2699 } // namespace
2700 
2701 // Calculate the largest possible vregsPassed sets. These are the registers that
2702 // can pass through an MBB live, but may not be live every time. It is assumed
2703 // that all vregsPassed sets are empty before the call.
2704 void MachineVerifier::calcRegsPassed() {
2705   if (MF->empty())
2706     // ReversePostOrderTraversal doesn't handle empty functions.
2707     return;
2708 
2709   for (const MachineBasicBlock *MB :
2710        ReversePostOrderTraversal<const MachineFunction *>(MF)) {
2711     FilteringVRegSet VRegs;
2712     BBInfo &Info = MBBInfoMap[MB];
2713     assert(Info.reachable);
2714 
2715     VRegs.addToFilter(Info.regsKilled);
2716     VRegs.addToFilter(Info.regsLiveOut);
2717     for (const MachineBasicBlock *Pred : MB->predecessors()) {
2718       const BBInfo &PredInfo = MBBInfoMap[Pred];
2719       if (!PredInfo.reachable)
2720         continue;
2721 
2722       VRegs.add(PredInfo.regsLiveOut);
2723       VRegs.add(PredInfo.vregsPassed);
2724     }
2725     Info.vregsPassed.reserve(VRegs.size());
2726     Info.vregsPassed.insert(VRegs.begin(), VRegs.end());
2727   }
2728 }
2729 
2730 // Calculate the set of virtual registers that must be passed through each basic
2731 // block in order to satisfy the requirements of successor blocks. This is very
2732 // similar to calcRegsPassed, only backwards.
2733 void MachineVerifier::calcRegsRequired() {
2734   // First push live-in regs to predecessors' vregsRequired.
2735   SmallPtrSet<const MachineBasicBlock*, 8> todo;
2736   for (const auto &MBB : *MF) {
2737     BBInfo &MInfo = MBBInfoMap[&MBB];
2738     for (const MachineBasicBlock *Pred : MBB.predecessors()) {
2739       BBInfo &PInfo = MBBInfoMap[Pred];
2740       if (PInfo.addRequired(MInfo.vregsLiveIn))
2741         todo.insert(Pred);
2742     }
2743 
2744     // Handle the PHI node.
2745     for (const MachineInstr &MI : MBB.phis()) {
2746       for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) {
2747         // Skip those Operands which are undef regs or not regs.
2748         if (!MI.getOperand(i).isReg() || !MI.getOperand(i).readsReg())
2749           continue;
2750 
2751         // Get register and predecessor for one PHI edge.
2752         Register Reg = MI.getOperand(i).getReg();
2753         const MachineBasicBlock *Pred = MI.getOperand(i + 1).getMBB();
2754 
2755         BBInfo &PInfo = MBBInfoMap[Pred];
2756         if (PInfo.addRequired(Reg))
2757           todo.insert(Pred);
2758       }
2759     }
2760   }
2761 
2762   // Iteratively push vregsRequired to predecessors. This will converge to the
2763   // same final state regardless of DenseSet iteration order.
2764   while (!todo.empty()) {
2765     const MachineBasicBlock *MBB = *todo.begin();
2766     todo.erase(MBB);
2767     BBInfo &MInfo = MBBInfoMap[MBB];
2768     for (const MachineBasicBlock *Pred : MBB->predecessors()) {
2769       if (Pred == MBB)
2770         continue;
2771       BBInfo &SInfo = MBBInfoMap[Pred];
2772       if (SInfo.addRequired(MInfo.vregsRequired))
2773         todo.insert(Pred);
2774     }
2775   }
2776 }
2777 
2778 // Check PHI instructions at the beginning of MBB. It is assumed that
2779 // calcRegsPassed has been run so BBInfo::isLiveOut is valid.
2780 void MachineVerifier::checkPHIOps(const MachineBasicBlock &MBB) {
2781   BBInfo &MInfo = MBBInfoMap[&MBB];
2782 
2783   SmallPtrSet<const MachineBasicBlock*, 8> seen;
2784   for (const MachineInstr &Phi : MBB) {
2785     if (!Phi.isPHI())
2786       break;
2787     seen.clear();
2788 
2789     const MachineOperand &MODef = Phi.getOperand(0);
2790     if (!MODef.isReg() || !MODef.isDef()) {
2791       report("Expected first PHI operand to be a register def", &MODef, 0);
2792       continue;
2793     }
2794     if (MODef.isTied() || MODef.isImplicit() || MODef.isInternalRead() ||
2795         MODef.isEarlyClobber() || MODef.isDebug())
2796       report("Unexpected flag on PHI operand", &MODef, 0);
2797     Register DefReg = MODef.getReg();
2798     if (!DefReg.isVirtual())
2799       report("Expected first PHI operand to be a virtual register", &MODef, 0);
2800 
2801     for (unsigned I = 1, E = Phi.getNumOperands(); I != E; I += 2) {
2802       const MachineOperand &MO0 = Phi.getOperand(I);
2803       if (!MO0.isReg()) {
2804         report("Expected PHI operand to be a register", &MO0, I);
2805         continue;
2806       }
2807       if (MO0.isImplicit() || MO0.isInternalRead() || MO0.isEarlyClobber() ||
2808           MO0.isDebug() || MO0.isTied())
2809         report("Unexpected flag on PHI operand", &MO0, I);
2810 
2811       const MachineOperand &MO1 = Phi.getOperand(I + 1);
2812       if (!MO1.isMBB()) {
2813         report("Expected PHI operand to be a basic block", &MO1, I + 1);
2814         continue;
2815       }
2816 
2817       const MachineBasicBlock &Pre = *MO1.getMBB();
2818       if (!Pre.isSuccessor(&MBB)) {
2819         report("PHI input is not a predecessor block", &MO1, I + 1);
2820         continue;
2821       }
2822 
2823       if (MInfo.reachable) {
2824         seen.insert(&Pre);
2825         BBInfo &PrInfo = MBBInfoMap[&Pre];
2826         if (!MO0.isUndef() && PrInfo.reachable &&
2827             !PrInfo.isLiveOut(MO0.getReg()))
2828           report("PHI operand is not live-out from predecessor", &MO0, I);
2829       }
2830     }
2831 
2832     // Did we see all predecessors?
2833     if (MInfo.reachable) {
2834       for (MachineBasicBlock *Pred : MBB.predecessors()) {
2835         if (!seen.count(Pred)) {
2836           report("Missing PHI operand", &Phi);
2837           errs() << printMBBReference(*Pred)
2838                  << " is a predecessor according to the CFG.\n";
2839         }
2840       }
2841     }
2842   }
2843 }
2844 
2845 void MachineVerifier::visitMachineFunctionAfter() {
2846   calcRegsPassed();
2847 
2848   for (const MachineBasicBlock &MBB : *MF)
2849     checkPHIOps(MBB);
2850 
2851   // Now check liveness info if available
2852   calcRegsRequired();
2853 
2854   // Check for killed virtual registers that should be live out.
2855   for (const auto &MBB : *MF) {
2856     BBInfo &MInfo = MBBInfoMap[&MBB];
2857     for (Register VReg : MInfo.vregsRequired)
2858       if (MInfo.regsKilled.count(VReg)) {
2859         report("Virtual register killed in block, but needed live out.", &MBB);
2860         errs() << "Virtual register " << printReg(VReg)
2861                << " is used after the block.\n";
2862       }
2863   }
2864 
2865   if (!MF->empty()) {
2866     BBInfo &MInfo = MBBInfoMap[&MF->front()];
2867     for (Register VReg : MInfo.vregsRequired) {
2868       report("Virtual register defs don't dominate all uses.", MF);
2869       report_context_vreg(VReg);
2870     }
2871   }
2872 
2873   if (LiveVars)
2874     verifyLiveVariables();
2875   if (LiveInts)
2876     verifyLiveIntervals();
2877 
2878   // Check live-in list of each MBB. If a register is live into MBB, check
2879   // that the register is in regsLiveOut of each predecessor block. Since
2880   // this must come from a definition in the predecesssor or its live-in
2881   // list, this will catch a live-through case where the predecessor does not
2882   // have the register in its live-in list.  This currently only checks
2883   // registers that have no aliases, are not allocatable and are not
2884   // reserved, which could mean a condition code register for instance.
2885   if (MRI->tracksLiveness())
2886     for (const auto &MBB : *MF)
2887       for (MachineBasicBlock::RegisterMaskPair P : MBB.liveins()) {
2888         MCPhysReg LiveInReg = P.PhysReg;
2889         bool hasAliases = MCRegAliasIterator(LiveInReg, TRI, false).isValid();
2890         if (hasAliases || isAllocatable(LiveInReg) || isReserved(LiveInReg))
2891           continue;
2892         for (const MachineBasicBlock *Pred : MBB.predecessors()) {
2893           BBInfo &PInfo = MBBInfoMap[Pred];
2894           if (!PInfo.regsLiveOut.count(LiveInReg)) {
2895             report("Live in register not found to be live out from predecessor.",
2896                    &MBB);
2897             errs() << TRI->getName(LiveInReg)
2898                    << " not found to be live out from "
2899                    << printMBBReference(*Pred) << "\n";
2900           }
2901         }
2902       }
2903 
2904   for (auto CSInfo : MF->getCallSitesInfo())
2905     if (!CSInfo.first->isCall())
2906       report("Call site info referencing instruction that is not call", MF);
2907 
2908   // If there's debug-info, check that we don't have any duplicate value
2909   // tracking numbers.
2910   if (MF->getFunction().getSubprogram()) {
2911     DenseSet<unsigned> SeenNumbers;
2912     for (const auto &MBB : *MF) {
2913       for (const auto &MI : MBB) {
2914         if (auto Num = MI.peekDebugInstrNum()) {
2915           auto Result = SeenNumbers.insert((unsigned)Num);
2916           if (!Result.second)
2917             report("Instruction has a duplicated value tracking number", &MI);
2918         }
2919       }
2920     }
2921   }
2922 }
2923 
2924 void MachineVerifier::verifyLiveVariables() {
2925   assert(LiveVars && "Don't call verifyLiveVariables without LiveVars");
2926   for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) {
2927     Register Reg = Register::index2VirtReg(I);
2928     LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
2929     for (const auto &MBB : *MF) {
2930       BBInfo &MInfo = MBBInfoMap[&MBB];
2931 
2932       // Our vregsRequired should be identical to LiveVariables' AliveBlocks
2933       if (MInfo.vregsRequired.count(Reg)) {
2934         if (!VI.AliveBlocks.test(MBB.getNumber())) {
2935           report("LiveVariables: Block missing from AliveBlocks", &MBB);
2936           errs() << "Virtual register " << printReg(Reg)
2937                  << " must be live through the block.\n";
2938         }
2939       } else {
2940         if (VI.AliveBlocks.test(MBB.getNumber())) {
2941           report("LiveVariables: Block should not be in AliveBlocks", &MBB);
2942           errs() << "Virtual register " << printReg(Reg)
2943                  << " is not needed live through the block.\n";
2944         }
2945       }
2946     }
2947   }
2948 }
2949 
2950 void MachineVerifier::verifyLiveIntervals() {
2951   assert(LiveInts && "Don't call verifyLiveIntervals without LiveInts");
2952   for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) {
2953     Register Reg = Register::index2VirtReg(I);
2954 
2955     // Spilling and splitting may leave unused registers around. Skip them.
2956     if (MRI->reg_nodbg_empty(Reg))
2957       continue;
2958 
2959     if (!LiveInts->hasInterval(Reg)) {
2960       report("Missing live interval for virtual register", MF);
2961       errs() << printReg(Reg, TRI) << " still has defs or uses\n";
2962       continue;
2963     }
2964 
2965     const LiveInterval &LI = LiveInts->getInterval(Reg);
2966     assert(Reg == LI.reg() && "Invalid reg to interval mapping");
2967     verifyLiveInterval(LI);
2968   }
2969 
2970   // Verify all the cached regunit intervals.
2971   for (unsigned i = 0, e = TRI->getNumRegUnits(); i != e; ++i)
2972     if (const LiveRange *LR = LiveInts->getCachedRegUnit(i))
2973       verifyLiveRange(*LR, i);
2974 }
2975 
2976 void MachineVerifier::verifyLiveRangeValue(const LiveRange &LR,
2977                                            const VNInfo *VNI, Register Reg,
2978                                            LaneBitmask LaneMask) {
2979   if (VNI->isUnused())
2980     return;
2981 
2982   const VNInfo *DefVNI = LR.getVNInfoAt(VNI->def);
2983 
2984   if (!DefVNI) {
2985     report("Value not live at VNInfo def and not marked unused", MF);
2986     report_context(LR, Reg, LaneMask);
2987     report_context(*VNI);
2988     return;
2989   }
2990 
2991   if (DefVNI != VNI) {
2992     report("Live segment at def has different VNInfo", MF);
2993     report_context(LR, Reg, LaneMask);
2994     report_context(*VNI);
2995     return;
2996   }
2997 
2998   const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(VNI->def);
2999   if (!MBB) {
3000     report("Invalid VNInfo definition index", MF);
3001     report_context(LR, Reg, LaneMask);
3002     report_context(*VNI);
3003     return;
3004   }
3005 
3006   if (VNI->isPHIDef()) {
3007     if (VNI->def != LiveInts->getMBBStartIdx(MBB)) {
3008       report("PHIDef VNInfo is not defined at MBB start", MBB);
3009       report_context(LR, Reg, LaneMask);
3010       report_context(*VNI);
3011     }
3012     return;
3013   }
3014 
3015   // Non-PHI def.
3016   const MachineInstr *MI = LiveInts->getInstructionFromIndex(VNI->def);
3017   if (!MI) {
3018     report("No instruction at VNInfo def index", MBB);
3019     report_context(LR, Reg, LaneMask);
3020     report_context(*VNI);
3021     return;
3022   }
3023 
3024   if (Reg != 0) {
3025     bool hasDef = false;
3026     bool isEarlyClobber = false;
3027     for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) {
3028       if (!MOI->isReg() || !MOI->isDef())
3029         continue;
3030       if (Reg.isVirtual()) {
3031         if (MOI->getReg() != Reg)
3032           continue;
3033       } else {
3034         if (!MOI->getReg().isPhysical() || !TRI->hasRegUnit(MOI->getReg(), Reg))
3035           continue;
3036       }
3037       if (LaneMask.any() &&
3038           (TRI->getSubRegIndexLaneMask(MOI->getSubReg()) & LaneMask).none())
3039         continue;
3040       hasDef = true;
3041       if (MOI->isEarlyClobber())
3042         isEarlyClobber = true;
3043     }
3044 
3045     if (!hasDef) {
3046       report("Defining instruction does not modify register", MI);
3047       report_context(LR, Reg, LaneMask);
3048       report_context(*VNI);
3049     }
3050 
3051     // Early clobber defs begin at USE slots, but other defs must begin at
3052     // DEF slots.
3053     if (isEarlyClobber) {
3054       if (!VNI->def.isEarlyClobber()) {
3055         report("Early clobber def must be at an early-clobber slot", MBB);
3056         report_context(LR, Reg, LaneMask);
3057         report_context(*VNI);
3058       }
3059     } else if (!VNI->def.isRegister()) {
3060       report("Non-PHI, non-early clobber def must be at a register slot", MBB);
3061       report_context(LR, Reg, LaneMask);
3062       report_context(*VNI);
3063     }
3064   }
3065 }
3066 
3067 void MachineVerifier::verifyLiveRangeSegment(const LiveRange &LR,
3068                                              const LiveRange::const_iterator I,
3069                                              Register Reg,
3070                                              LaneBitmask LaneMask) {
3071   const LiveRange::Segment &S = *I;
3072   const VNInfo *VNI = S.valno;
3073   assert(VNI && "Live segment has no valno");
3074 
3075   if (VNI->id >= LR.getNumValNums() || VNI != LR.getValNumInfo(VNI->id)) {
3076     report("Foreign valno in live segment", MF);
3077     report_context(LR, Reg, LaneMask);
3078     report_context(S);
3079     report_context(*VNI);
3080   }
3081 
3082   if (VNI->isUnused()) {
3083     report("Live segment valno is marked unused", MF);
3084     report_context(LR, Reg, LaneMask);
3085     report_context(S);
3086   }
3087 
3088   const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(S.start);
3089   if (!MBB) {
3090     report("Bad start of live segment, no basic block", MF);
3091     report_context(LR, Reg, LaneMask);
3092     report_context(S);
3093     return;
3094   }
3095   SlotIndex MBBStartIdx = LiveInts->getMBBStartIdx(MBB);
3096   if (S.start != MBBStartIdx && S.start != VNI->def) {
3097     report("Live segment must begin at MBB entry or valno def", MBB);
3098     report_context(LR, Reg, LaneMask);
3099     report_context(S);
3100   }
3101 
3102   const MachineBasicBlock *EndMBB =
3103     LiveInts->getMBBFromIndex(S.end.getPrevSlot());
3104   if (!EndMBB) {
3105     report("Bad end of live segment, no basic block", MF);
3106     report_context(LR, Reg, LaneMask);
3107     report_context(S);
3108     return;
3109   }
3110 
3111   // Checks for non-live-out segments.
3112   if (S.end != LiveInts->getMBBEndIdx(EndMBB)) {
3113     // RegUnit intervals are allowed dead phis.
3114     if (!Reg.isVirtual() && VNI->isPHIDef() && S.start == VNI->def &&
3115         S.end == VNI->def.getDeadSlot())
3116       return;
3117 
3118     // The live segment is ending inside EndMBB
3119     const MachineInstr *MI =
3120         LiveInts->getInstructionFromIndex(S.end.getPrevSlot());
3121     if (!MI) {
3122       report("Live segment doesn't end at a valid instruction", EndMBB);
3123       report_context(LR, Reg, LaneMask);
3124       report_context(S);
3125       return;
3126     }
3127 
3128     // The block slot must refer to a basic block boundary.
3129     if (S.end.isBlock()) {
3130       report("Live segment ends at B slot of an instruction", EndMBB);
3131       report_context(LR, Reg, LaneMask);
3132       report_context(S);
3133     }
3134 
3135     if (S.end.isDead()) {
3136       // Segment ends on the dead slot.
3137       // That means there must be a dead def.
3138       if (!SlotIndex::isSameInstr(S.start, S.end)) {
3139         report("Live segment ending at dead slot spans instructions", EndMBB);
3140         report_context(LR, Reg, LaneMask);
3141         report_context(S);
3142       }
3143     }
3144 
3145     // After tied operands are rewritten, a live segment can only end at an
3146     // early-clobber slot if it is being redefined by an early-clobber def.
3147     // TODO: Before tied operands are rewritten, a live segment can only end at
3148     // an early-clobber slot if the last use is tied to an early-clobber def.
3149     if (MF->getProperties().hasProperty(
3150             MachineFunctionProperties::Property::TiedOpsRewritten) &&
3151         S.end.isEarlyClobber()) {
3152       if (I + 1 == LR.end() || (I + 1)->start != S.end) {
3153         report("Live segment ending at early clobber slot must be "
3154                "redefined by an EC def in the same instruction",
3155                EndMBB);
3156         report_context(LR, Reg, LaneMask);
3157         report_context(S);
3158       }
3159     }
3160 
3161     // The following checks only apply to virtual registers. Physreg liveness
3162     // is too weird to check.
3163     if (Reg.isVirtual()) {
3164       // A live segment can end with either a redefinition, a kill flag on a
3165       // use, or a dead flag on a def.
3166       bool hasRead = false;
3167       bool hasSubRegDef = false;
3168       bool hasDeadDef = false;
3169       for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) {
3170         if (!MOI->isReg() || MOI->getReg() != Reg)
3171           continue;
3172         unsigned Sub = MOI->getSubReg();
3173         LaneBitmask SLM =
3174             Sub != 0 ? TRI->getSubRegIndexLaneMask(Sub) : LaneBitmask::getAll();
3175         if (MOI->isDef()) {
3176           if (Sub != 0) {
3177             hasSubRegDef = true;
3178             // An operand %0:sub0 reads %0:sub1..n. Invert the lane
3179             // mask for subregister defs. Read-undef defs will be handled by
3180             // readsReg below.
3181             SLM = ~SLM;
3182           }
3183           if (MOI->isDead())
3184             hasDeadDef = true;
3185         }
3186         if (LaneMask.any() && (LaneMask & SLM).none())
3187           continue;
3188         if (MOI->readsReg())
3189           hasRead = true;
3190       }
3191       if (S.end.isDead()) {
3192         // Make sure that the corresponding machine operand for a "dead" live
3193         // range has the dead flag. We cannot perform this check for subregister
3194         // liveranges as partially dead values are allowed.
3195         if (LaneMask.none() && !hasDeadDef) {
3196           report(
3197               "Instruction ending live segment on dead slot has no dead flag",
3198               MI);
3199           report_context(LR, Reg, LaneMask);
3200           report_context(S);
3201         }
3202       } else {
3203         if (!hasRead) {
3204           // When tracking subregister liveness, the main range must start new
3205           // values on partial register writes, even if there is no read.
3206           if (!MRI->shouldTrackSubRegLiveness(Reg) || LaneMask.any() ||
3207               !hasSubRegDef) {
3208             report("Instruction ending live segment doesn't read the register",
3209                    MI);
3210             report_context(LR, Reg, LaneMask);
3211             report_context(S);
3212           }
3213         }
3214       }
3215     }
3216   }
3217 
3218   // Now check all the basic blocks in this live segment.
3219   MachineFunction::const_iterator MFI = MBB->getIterator();
3220   // Is this live segment the beginning of a non-PHIDef VN?
3221   if (S.start == VNI->def && !VNI->isPHIDef()) {
3222     // Not live-in to any blocks.
3223     if (MBB == EndMBB)
3224       return;
3225     // Skip this block.
3226     ++MFI;
3227   }
3228 
3229   SmallVector<SlotIndex, 4> Undefs;
3230   if (LaneMask.any()) {
3231     LiveInterval &OwnerLI = LiveInts->getInterval(Reg);
3232     OwnerLI.computeSubRangeUndefs(Undefs, LaneMask, *MRI, *Indexes);
3233   }
3234 
3235   while (true) {
3236     assert(LiveInts->isLiveInToMBB(LR, &*MFI));
3237     // We don't know how to track physregs into a landing pad.
3238     if (!Reg.isVirtual() && MFI->isEHPad()) {
3239       if (&*MFI == EndMBB)
3240         break;
3241       ++MFI;
3242       continue;
3243     }
3244 
3245     // Is VNI a PHI-def in the current block?
3246     bool IsPHI = VNI->isPHIDef() &&
3247       VNI->def == LiveInts->getMBBStartIdx(&*MFI);
3248 
3249     // Check that VNI is live-out of all predecessors.
3250     for (const MachineBasicBlock *Pred : MFI->predecessors()) {
3251       SlotIndex PEnd = LiveInts->getMBBEndIdx(Pred);
3252       // Predecessor of landing pad live-out on last call.
3253       if (MFI->isEHPad()) {
3254         for (const MachineInstr &MI : llvm::reverse(*Pred)) {
3255           if (MI.isCall()) {
3256             PEnd = Indexes->getInstructionIndex(MI).getBoundaryIndex();
3257             break;
3258           }
3259         }
3260       }
3261       const VNInfo *PVNI = LR.getVNInfoBefore(PEnd);
3262 
3263       // All predecessors must have a live-out value. However for a phi
3264       // instruction with subregister intervals
3265       // only one of the subregisters (not necessarily the current one) needs to
3266       // be defined.
3267       if (!PVNI && (LaneMask.none() || !IsPHI)) {
3268         if (LiveRangeCalc::isJointlyDominated(Pred, Undefs, *Indexes))
3269           continue;
3270         report("Register not marked live out of predecessor", Pred);
3271         report_context(LR, Reg, LaneMask);
3272         report_context(*VNI);
3273         errs() << " live into " << printMBBReference(*MFI) << '@'
3274                << LiveInts->getMBBStartIdx(&*MFI) << ", not live before "
3275                << PEnd << '\n';
3276         continue;
3277       }
3278 
3279       // Only PHI-defs can take different predecessor values.
3280       if (!IsPHI && PVNI != VNI) {
3281         report("Different value live out of predecessor", Pred);
3282         report_context(LR, Reg, LaneMask);
3283         errs() << "Valno #" << PVNI->id << " live out of "
3284                << printMBBReference(*Pred) << '@' << PEnd << "\nValno #"
3285                << VNI->id << " live into " << printMBBReference(*MFI) << '@'
3286                << LiveInts->getMBBStartIdx(&*MFI) << '\n';
3287       }
3288     }
3289     if (&*MFI == EndMBB)
3290       break;
3291     ++MFI;
3292   }
3293 }
3294 
3295 void MachineVerifier::verifyLiveRange(const LiveRange &LR, Register Reg,
3296                                       LaneBitmask LaneMask) {
3297   for (const VNInfo *VNI : LR.valnos)
3298     verifyLiveRangeValue(LR, VNI, Reg, LaneMask);
3299 
3300   for (LiveRange::const_iterator I = LR.begin(), E = LR.end(); I != E; ++I)
3301     verifyLiveRangeSegment(LR, I, Reg, LaneMask);
3302 }
3303 
3304 void MachineVerifier::verifyLiveInterval(const LiveInterval &LI) {
3305   Register Reg = LI.reg();
3306   assert(Reg.isVirtual());
3307   verifyLiveRange(LI, Reg);
3308 
3309   LaneBitmask Mask;
3310   LaneBitmask MaxMask = MRI->getMaxLaneMaskForVReg(Reg);
3311   for (const LiveInterval::SubRange &SR : LI.subranges()) {
3312     if ((Mask & SR.LaneMask).any()) {
3313       report("Lane masks of sub ranges overlap in live interval", MF);
3314       report_context(LI);
3315     }
3316     if ((SR.LaneMask & ~MaxMask).any()) {
3317       report("Subrange lanemask is invalid", MF);
3318       report_context(LI);
3319     }
3320     if (SR.empty()) {
3321       report("Subrange must not be empty", MF);
3322       report_context(SR, LI.reg(), SR.LaneMask);
3323     }
3324     Mask |= SR.LaneMask;
3325     verifyLiveRange(SR, LI.reg(), SR.LaneMask);
3326     if (!LI.covers(SR)) {
3327       report("A Subrange is not covered by the main range", MF);
3328       report_context(LI);
3329     }
3330   }
3331 
3332   // Check the LI only has one connected component.
3333   ConnectedVNInfoEqClasses ConEQ(*LiveInts);
3334   unsigned NumComp = ConEQ.Classify(LI);
3335   if (NumComp > 1) {
3336     report("Multiple connected components in live interval", MF);
3337     report_context(LI);
3338     for (unsigned comp = 0; comp != NumComp; ++comp) {
3339       errs() << comp << ": valnos";
3340       for (const VNInfo *I : LI.valnos)
3341         if (comp == ConEQ.getEqClass(I))
3342           errs() << ' ' << I->id;
3343       errs() << '\n';
3344     }
3345   }
3346 }
3347 
3348 namespace {
3349 
3350   // FrameSetup and FrameDestroy can have zero adjustment, so using a single
3351   // integer, we can't tell whether it is a FrameSetup or FrameDestroy if the
3352   // value is zero.
3353   // We use a bool plus an integer to capture the stack state.
3354   struct StackStateOfBB {
3355     StackStateOfBB() = default;
3356     StackStateOfBB(int EntryVal, int ExitVal, bool EntrySetup, bool ExitSetup) :
3357       EntryValue(EntryVal), ExitValue(ExitVal), EntryIsSetup(EntrySetup),
3358       ExitIsSetup(ExitSetup) {}
3359 
3360     // Can be negative, which means we are setting up a frame.
3361     int EntryValue = 0;
3362     int ExitValue = 0;
3363     bool EntryIsSetup = false;
3364     bool ExitIsSetup = false;
3365   };
3366 
3367 } // end anonymous namespace
3368 
3369 /// Make sure on every path through the CFG, a FrameSetup <n> is always followed
3370 /// by a FrameDestroy <n>, stack adjustments are identical on all
3371 /// CFG edges to a merge point, and frame is destroyed at end of a return block.
3372 void MachineVerifier::verifyStackFrame() {
3373   unsigned FrameSetupOpcode   = TII->getCallFrameSetupOpcode();
3374   unsigned FrameDestroyOpcode = TII->getCallFrameDestroyOpcode();
3375   if (FrameSetupOpcode == ~0u && FrameDestroyOpcode == ~0u)
3376     return;
3377 
3378   SmallVector<StackStateOfBB, 8> SPState;
3379   SPState.resize(MF->getNumBlockIDs());
3380   df_iterator_default_set<const MachineBasicBlock*> Reachable;
3381 
3382   // Visit the MBBs in DFS order.
3383   for (df_ext_iterator<const MachineFunction *,
3384                        df_iterator_default_set<const MachineBasicBlock *>>
3385        DFI = df_ext_begin(MF, Reachable), DFE = df_ext_end(MF, Reachable);
3386        DFI != DFE; ++DFI) {
3387     const MachineBasicBlock *MBB = *DFI;
3388 
3389     StackStateOfBB BBState;
3390     // Check the exit state of the DFS stack predecessor.
3391     if (DFI.getPathLength() >= 2) {
3392       const MachineBasicBlock *StackPred = DFI.getPath(DFI.getPathLength() - 2);
3393       assert(Reachable.count(StackPred) &&
3394              "DFS stack predecessor is already visited.\n");
3395       BBState.EntryValue = SPState[StackPred->getNumber()].ExitValue;
3396       BBState.EntryIsSetup = SPState[StackPred->getNumber()].ExitIsSetup;
3397       BBState.ExitValue = BBState.EntryValue;
3398       BBState.ExitIsSetup = BBState.EntryIsSetup;
3399     }
3400 
3401     // Update stack state by checking contents of MBB.
3402     for (const auto &I : *MBB) {
3403       if (I.getOpcode() == FrameSetupOpcode) {
3404         if (BBState.ExitIsSetup)
3405           report("FrameSetup is after another FrameSetup", &I);
3406         BBState.ExitValue -= TII->getFrameTotalSize(I);
3407         BBState.ExitIsSetup = true;
3408       }
3409 
3410       if (I.getOpcode() == FrameDestroyOpcode) {
3411         int Size = TII->getFrameTotalSize(I);
3412         if (!BBState.ExitIsSetup)
3413           report("FrameDestroy is not after a FrameSetup", &I);
3414         int AbsSPAdj = BBState.ExitValue < 0 ? -BBState.ExitValue :
3415                                                BBState.ExitValue;
3416         if (BBState.ExitIsSetup && AbsSPAdj != Size) {
3417           report("FrameDestroy <n> is after FrameSetup <m>", &I);
3418           errs() << "FrameDestroy <" << Size << "> is after FrameSetup <"
3419               << AbsSPAdj << ">.\n";
3420         }
3421         BBState.ExitValue += Size;
3422         BBState.ExitIsSetup = false;
3423       }
3424     }
3425     SPState[MBB->getNumber()] = BBState;
3426 
3427     // Make sure the exit state of any predecessor is consistent with the entry
3428     // state.
3429     for (const MachineBasicBlock *Pred : MBB->predecessors()) {
3430       if (Reachable.count(Pred) &&
3431           (SPState[Pred->getNumber()].ExitValue != BBState.EntryValue ||
3432            SPState[Pred->getNumber()].ExitIsSetup != BBState.EntryIsSetup)) {
3433         report("The exit stack state of a predecessor is inconsistent.", MBB);
3434         errs() << "Predecessor " << printMBBReference(*Pred)
3435                << " has exit state (" << SPState[Pred->getNumber()].ExitValue
3436                << ", " << SPState[Pred->getNumber()].ExitIsSetup << "), while "
3437                << printMBBReference(*MBB) << " has entry state ("
3438                << BBState.EntryValue << ", " << BBState.EntryIsSetup << ").\n";
3439       }
3440     }
3441 
3442     // Make sure the entry state of any successor is consistent with the exit
3443     // state.
3444     for (const MachineBasicBlock *Succ : MBB->successors()) {
3445       if (Reachable.count(Succ) &&
3446           (SPState[Succ->getNumber()].EntryValue != BBState.ExitValue ||
3447            SPState[Succ->getNumber()].EntryIsSetup != BBState.ExitIsSetup)) {
3448         report("The entry stack state of a successor is inconsistent.", MBB);
3449         errs() << "Successor " << printMBBReference(*Succ)
3450                << " has entry state (" << SPState[Succ->getNumber()].EntryValue
3451                << ", " << SPState[Succ->getNumber()].EntryIsSetup << "), while "
3452                << printMBBReference(*MBB) << " has exit state ("
3453                << BBState.ExitValue << ", " << BBState.ExitIsSetup << ").\n";
3454       }
3455     }
3456 
3457     // Make sure a basic block with return ends with zero stack adjustment.
3458     if (!MBB->empty() && MBB->back().isReturn()) {
3459       if (BBState.ExitIsSetup)
3460         report("A return block ends with a FrameSetup.", MBB);
3461       if (BBState.ExitValue)
3462         report("A return block ends with a nonzero stack adjustment.", MBB);
3463     }
3464   }
3465 }
3466