xref: /freebsd/contrib/llvm-project/llvm/lib/CodeGen/MachineVerifier.cpp (revision 81ad626541db97eb356e2c1d4a20eb2a26a766ab)
1 //===- MachineVerifier.cpp - Machine Code Verifier ------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Pass to verify generated machine code. The following is checked:
10 //
11 // Operand counts: All explicit operands must be present.
12 //
13 // Register classes: All physical and virtual register operands must be
14 // compatible with the register class required by the instruction descriptor.
15 //
16 // Register live intervals: Registers must be defined only once, and must be
17 // defined before use.
18 //
19 // The machine code verifier is enabled with the command-line option
20 // -verify-machineinstrs.
21 //===----------------------------------------------------------------------===//
22 
23 #include "llvm/ADT/BitVector.h"
24 #include "llvm/ADT/DenseMap.h"
25 #include "llvm/ADT/DenseSet.h"
26 #include "llvm/ADT/DepthFirstIterator.h"
27 #include "llvm/ADT/PostOrderIterator.h"
28 #include "llvm/ADT/STLExtras.h"
29 #include "llvm/ADT/SetOperations.h"
30 #include "llvm/ADT/SmallPtrSet.h"
31 #include "llvm/ADT/SmallVector.h"
32 #include "llvm/ADT/StringRef.h"
33 #include "llvm/ADT/Twine.h"
34 #include "llvm/Analysis/EHPersonalities.h"
35 #include "llvm/CodeGen/CodeGenCommonISel.h"
36 #include "llvm/CodeGen/LiveInterval.h"
37 #include "llvm/CodeGen/LiveIntervals.h"
38 #include "llvm/CodeGen/LiveRangeCalc.h"
39 #include "llvm/CodeGen/LiveStacks.h"
40 #include "llvm/CodeGen/LiveVariables.h"
41 #include "llvm/CodeGen/MachineBasicBlock.h"
42 #include "llvm/CodeGen/MachineFrameInfo.h"
43 #include "llvm/CodeGen/MachineFunction.h"
44 #include "llvm/CodeGen/MachineFunctionPass.h"
45 #include "llvm/CodeGen/MachineInstr.h"
46 #include "llvm/CodeGen/MachineInstrBundle.h"
47 #include "llvm/CodeGen/MachineMemOperand.h"
48 #include "llvm/CodeGen/MachineOperand.h"
49 #include "llvm/CodeGen/MachineRegisterInfo.h"
50 #include "llvm/CodeGen/PseudoSourceValue.h"
51 #include "llvm/CodeGen/RegisterBank.h"
52 #include "llvm/CodeGen/RegisterBankInfo.h"
53 #include "llvm/CodeGen/SlotIndexes.h"
54 #include "llvm/CodeGen/StackMaps.h"
55 #include "llvm/CodeGen/TargetInstrInfo.h"
56 #include "llvm/CodeGen/TargetOpcodes.h"
57 #include "llvm/CodeGen/TargetRegisterInfo.h"
58 #include "llvm/CodeGen/TargetSubtargetInfo.h"
59 #include "llvm/IR/BasicBlock.h"
60 #include "llvm/IR/Constants.h"
61 #include "llvm/IR/Function.h"
62 #include "llvm/IR/InlineAsm.h"
63 #include "llvm/IR/Instructions.h"
64 #include "llvm/InitializePasses.h"
65 #include "llvm/MC/LaneBitmask.h"
66 #include "llvm/MC/MCAsmInfo.h"
67 #include "llvm/MC/MCDwarf.h"
68 #include "llvm/MC/MCInstrDesc.h"
69 #include "llvm/MC/MCRegisterInfo.h"
70 #include "llvm/MC/MCTargetOptions.h"
71 #include "llvm/Pass.h"
72 #include "llvm/Support/Casting.h"
73 #include "llvm/Support/ErrorHandling.h"
74 #include "llvm/Support/LowLevelTypeImpl.h"
75 #include "llvm/Support/MathExtras.h"
76 #include "llvm/Support/raw_ostream.h"
77 #include "llvm/Target/TargetMachine.h"
78 #include <algorithm>
79 #include <cassert>
80 #include <cstddef>
81 #include <cstdint>
82 #include <iterator>
83 #include <string>
84 #include <utility>
85 
86 using namespace llvm;
87 
88 namespace {
89 
90   struct MachineVerifier {
91     MachineVerifier(Pass *pass, const char *b) : PASS(pass), Banner(b) {}
92 
93     unsigned verify(const MachineFunction &MF);
94 
95     Pass *const PASS;
96     const char *Banner;
97     const MachineFunction *MF;
98     const TargetMachine *TM;
99     const TargetInstrInfo *TII;
100     const TargetRegisterInfo *TRI;
101     const MachineRegisterInfo *MRI;
102     const RegisterBankInfo *RBI;
103 
104     unsigned foundErrors;
105 
106     // Avoid querying the MachineFunctionProperties for each operand.
107     bool isFunctionRegBankSelected;
108     bool isFunctionSelected;
109     bool isFunctionTracksDebugUserValues;
110 
111     using RegVector = SmallVector<Register, 16>;
112     using RegMaskVector = SmallVector<const uint32_t *, 4>;
113     using RegSet = DenseSet<Register>;
114     using RegMap = DenseMap<Register, const MachineInstr *>;
115     using BlockSet = SmallPtrSet<const MachineBasicBlock *, 8>;
116 
117     const MachineInstr *FirstNonPHI;
118     const MachineInstr *FirstTerminator;
119     BlockSet FunctionBlocks;
120 
121     BitVector regsReserved;
122     RegSet regsLive;
123     RegVector regsDefined, regsDead, regsKilled;
124     RegMaskVector regMasks;
125 
126     SlotIndex lastIndex;
127 
128     // Add Reg and any sub-registers to RV
129     void addRegWithSubRegs(RegVector &RV, Register Reg) {
130       RV.push_back(Reg);
131       if (Reg.isPhysical())
132         append_range(RV, TRI->subregs(Reg.asMCReg()));
133     }
134 
135     struct BBInfo {
136       // Is this MBB reachable from the MF entry point?
137       bool reachable = false;
138 
139       // Vregs that must be live in because they are used without being
140       // defined. Map value is the user. vregsLiveIn doesn't include regs
141       // that only are used by PHI nodes.
142       RegMap vregsLiveIn;
143 
144       // Regs killed in MBB. They may be defined again, and will then be in both
145       // regsKilled and regsLiveOut.
146       RegSet regsKilled;
147 
148       // Regs defined in MBB and live out. Note that vregs passing through may
149       // be live out without being mentioned here.
150       RegSet regsLiveOut;
151 
152       // Vregs that pass through MBB untouched. This set is disjoint from
153       // regsKilled and regsLiveOut.
154       RegSet vregsPassed;
155 
156       // Vregs that must pass through MBB because they are needed by a successor
157       // block. This set is disjoint from regsLiveOut.
158       RegSet vregsRequired;
159 
160       // Set versions of block's predecessor and successor lists.
161       BlockSet Preds, Succs;
162 
163       BBInfo() = default;
164 
165       // Add register to vregsRequired if it belongs there. Return true if
166       // anything changed.
167       bool addRequired(Register Reg) {
168         if (!Reg.isVirtual())
169           return false;
170         if (regsLiveOut.count(Reg))
171           return false;
172         return vregsRequired.insert(Reg).second;
173       }
174 
175       // Same for a full set.
176       bool addRequired(const RegSet &RS) {
177         bool Changed = false;
178         for (Register Reg : RS)
179           Changed |= addRequired(Reg);
180         return Changed;
181       }
182 
183       // Same for a full map.
184       bool addRequired(const RegMap &RM) {
185         bool Changed = false;
186         for (const auto &I : RM)
187           Changed |= addRequired(I.first);
188         return Changed;
189       }
190 
191       // Live-out registers are either in regsLiveOut or vregsPassed.
192       bool isLiveOut(Register Reg) const {
193         return regsLiveOut.count(Reg) || vregsPassed.count(Reg);
194       }
195     };
196 
197     // Extra register info per MBB.
198     DenseMap<const MachineBasicBlock*, BBInfo> MBBInfoMap;
199 
200     bool isReserved(Register Reg) {
201       return Reg.id() < regsReserved.size() && regsReserved.test(Reg.id());
202     }
203 
204     bool isAllocatable(Register Reg) const {
205       return Reg.id() < TRI->getNumRegs() && TRI->isInAllocatableClass(Reg) &&
206              !regsReserved.test(Reg.id());
207     }
208 
209     // Analysis information if available
210     LiveVariables *LiveVars;
211     LiveIntervals *LiveInts;
212     LiveStacks *LiveStks;
213     SlotIndexes *Indexes;
214 
215     void visitMachineFunctionBefore();
216     void visitMachineBasicBlockBefore(const MachineBasicBlock *MBB);
217     void visitMachineBundleBefore(const MachineInstr *MI);
218 
219     /// Verify that all of \p MI's virtual register operands are scalars.
220     /// \returns True if all virtual register operands are scalar. False
221     /// otherwise.
222     bool verifyAllRegOpsScalar(const MachineInstr &MI,
223                                const MachineRegisterInfo &MRI);
224     bool verifyVectorElementMatch(LLT Ty0, LLT Ty1, const MachineInstr *MI);
225     void verifyPreISelGenericInstruction(const MachineInstr *MI);
226     void visitMachineInstrBefore(const MachineInstr *MI);
227     void visitMachineOperand(const MachineOperand *MO, unsigned MONum);
228     void visitMachineBundleAfter(const MachineInstr *MI);
229     void visitMachineBasicBlockAfter(const MachineBasicBlock *MBB);
230     void visitMachineFunctionAfter();
231 
232     void report(const char *msg, const MachineFunction *MF);
233     void report(const char *msg, const MachineBasicBlock *MBB);
234     void report(const char *msg, const MachineInstr *MI);
235     void report(const char *msg, const MachineOperand *MO, unsigned MONum,
236                 LLT MOVRegType = LLT{});
237     void report(const Twine &Msg, const MachineInstr *MI);
238 
239     void report_context(const LiveInterval &LI) const;
240     void report_context(const LiveRange &LR, Register VRegUnit,
241                         LaneBitmask LaneMask) const;
242     void report_context(const LiveRange::Segment &S) const;
243     void report_context(const VNInfo &VNI) const;
244     void report_context(SlotIndex Pos) const;
245     void report_context(MCPhysReg PhysReg) const;
246     void report_context_liverange(const LiveRange &LR) const;
247     void report_context_lanemask(LaneBitmask LaneMask) const;
248     void report_context_vreg(Register VReg) const;
249     void report_context_vreg_regunit(Register VRegOrUnit) const;
250 
251     void verifyInlineAsm(const MachineInstr *MI);
252 
253     void checkLiveness(const MachineOperand *MO, unsigned MONum);
254     void checkLivenessAtUse(const MachineOperand *MO, unsigned MONum,
255                             SlotIndex UseIdx, const LiveRange &LR,
256                             Register VRegOrUnit,
257                             LaneBitmask LaneMask = LaneBitmask::getNone());
258     void checkLivenessAtDef(const MachineOperand *MO, unsigned MONum,
259                             SlotIndex DefIdx, const LiveRange &LR,
260                             Register VRegOrUnit, bool SubRangeCheck = false,
261                             LaneBitmask LaneMask = LaneBitmask::getNone());
262 
263     void markReachable(const MachineBasicBlock *MBB);
264     void calcRegsPassed();
265     void checkPHIOps(const MachineBasicBlock &MBB);
266 
267     void calcRegsRequired();
268     void verifyLiveVariables();
269     void verifyLiveIntervals();
270     void verifyLiveInterval(const LiveInterval&);
271     void verifyLiveRangeValue(const LiveRange &, const VNInfo *, Register,
272                               LaneBitmask);
273     void verifyLiveRangeSegment(const LiveRange &,
274                                 const LiveRange::const_iterator I, Register,
275                                 LaneBitmask);
276     void verifyLiveRange(const LiveRange &, Register,
277                          LaneBitmask LaneMask = LaneBitmask::getNone());
278 
279     void verifyStackFrame();
280 
281     void verifySlotIndexes() const;
282     void verifyProperties(const MachineFunction &MF);
283   };
284 
285   struct MachineVerifierPass : public MachineFunctionPass {
286     static char ID; // Pass ID, replacement for typeid
287 
288     const std::string Banner;
289 
290     MachineVerifierPass(std::string banner = std::string())
291       : MachineFunctionPass(ID), Banner(std::move(banner)) {
292         initializeMachineVerifierPassPass(*PassRegistry::getPassRegistry());
293       }
294 
295     void getAnalysisUsage(AnalysisUsage &AU) const override {
296       AU.setPreservesAll();
297       MachineFunctionPass::getAnalysisUsage(AU);
298     }
299 
300     bool runOnMachineFunction(MachineFunction &MF) override {
301       // Skip functions that have known verification problems.
302       // FIXME: Remove this mechanism when all problematic passes have been
303       // fixed.
304       if (MF.getProperties().hasProperty(
305               MachineFunctionProperties::Property::FailsVerification))
306         return false;
307 
308       unsigned FoundErrors = MachineVerifier(this, Banner.c_str()).verify(MF);
309       if (FoundErrors)
310         report_fatal_error("Found "+Twine(FoundErrors)+" machine code errors.");
311       return false;
312     }
313   };
314 
315 } // end anonymous namespace
316 
317 char MachineVerifierPass::ID = 0;
318 
319 INITIALIZE_PASS(MachineVerifierPass, "machineverifier",
320                 "Verify generated machine code", false, false)
321 
322 FunctionPass *llvm::createMachineVerifierPass(const std::string &Banner) {
323   return new MachineVerifierPass(Banner);
324 }
325 
326 void llvm::verifyMachineFunction(MachineFunctionAnalysisManager *,
327                                  const std::string &Banner,
328                                  const MachineFunction &MF) {
329   // TODO: Use MFAM after porting below analyses.
330   // LiveVariables *LiveVars;
331   // LiveIntervals *LiveInts;
332   // LiveStacks *LiveStks;
333   // SlotIndexes *Indexes;
334   unsigned FoundErrors = MachineVerifier(nullptr, Banner.c_str()).verify(MF);
335   if (FoundErrors)
336     report_fatal_error("Found " + Twine(FoundErrors) + " machine code errors.");
337 }
338 
339 bool MachineFunction::verify(Pass *p, const char *Banner, bool AbortOnErrors)
340     const {
341   MachineFunction &MF = const_cast<MachineFunction&>(*this);
342   unsigned FoundErrors = MachineVerifier(p, Banner).verify(MF);
343   if (AbortOnErrors && FoundErrors)
344     report_fatal_error("Found "+Twine(FoundErrors)+" machine code errors.");
345   return FoundErrors == 0;
346 }
347 
348 void MachineVerifier::verifySlotIndexes() const {
349   if (Indexes == nullptr)
350     return;
351 
352   // Ensure the IdxMBB list is sorted by slot indexes.
353   SlotIndex Last;
354   for (SlotIndexes::MBBIndexIterator I = Indexes->MBBIndexBegin(),
355        E = Indexes->MBBIndexEnd(); I != E; ++I) {
356     assert(!Last.isValid() || I->first > Last);
357     Last = I->first;
358   }
359 }
360 
361 void MachineVerifier::verifyProperties(const MachineFunction &MF) {
362   // If a pass has introduced virtual registers without clearing the
363   // NoVRegs property (or set it without allocating the vregs)
364   // then report an error.
365   if (MF.getProperties().hasProperty(
366           MachineFunctionProperties::Property::NoVRegs) &&
367       MRI->getNumVirtRegs())
368     report("Function has NoVRegs property but there are VReg operands", &MF);
369 }
370 
371 unsigned MachineVerifier::verify(const MachineFunction &MF) {
372   foundErrors = 0;
373 
374   this->MF = &MF;
375   TM = &MF.getTarget();
376   TII = MF.getSubtarget().getInstrInfo();
377   TRI = MF.getSubtarget().getRegisterInfo();
378   RBI = MF.getSubtarget().getRegBankInfo();
379   MRI = &MF.getRegInfo();
380 
381   const bool isFunctionFailedISel = MF.getProperties().hasProperty(
382       MachineFunctionProperties::Property::FailedISel);
383 
384   // If we're mid-GlobalISel and we already triggered the fallback path then
385   // it's expected that the MIR is somewhat broken but that's ok since we'll
386   // reset it and clear the FailedISel attribute in ResetMachineFunctions.
387   if (isFunctionFailedISel)
388     return foundErrors;
389 
390   isFunctionRegBankSelected = MF.getProperties().hasProperty(
391       MachineFunctionProperties::Property::RegBankSelected);
392   isFunctionSelected = MF.getProperties().hasProperty(
393       MachineFunctionProperties::Property::Selected);
394   isFunctionTracksDebugUserValues = MF.getProperties().hasProperty(
395       MachineFunctionProperties::Property::TracksDebugUserValues);
396 
397   LiveVars = nullptr;
398   LiveInts = nullptr;
399   LiveStks = nullptr;
400   Indexes = nullptr;
401   if (PASS) {
402     LiveInts = PASS->getAnalysisIfAvailable<LiveIntervals>();
403     // We don't want to verify LiveVariables if LiveIntervals is available.
404     if (!LiveInts)
405       LiveVars = PASS->getAnalysisIfAvailable<LiveVariables>();
406     LiveStks = PASS->getAnalysisIfAvailable<LiveStacks>();
407     Indexes = PASS->getAnalysisIfAvailable<SlotIndexes>();
408   }
409 
410   verifySlotIndexes();
411 
412   verifyProperties(MF);
413 
414   visitMachineFunctionBefore();
415   for (const MachineBasicBlock &MBB : MF) {
416     visitMachineBasicBlockBefore(&MBB);
417     // Keep track of the current bundle header.
418     const MachineInstr *CurBundle = nullptr;
419     // Do we expect the next instruction to be part of the same bundle?
420     bool InBundle = false;
421 
422     for (const MachineInstr &MI : MBB.instrs()) {
423       if (MI.getParent() != &MBB) {
424         report("Bad instruction parent pointer", &MBB);
425         errs() << "Instruction: " << MI;
426         continue;
427       }
428 
429       // Check for consistent bundle flags.
430       if (InBundle && !MI.isBundledWithPred())
431         report("Missing BundledPred flag, "
432                "BundledSucc was set on predecessor",
433                &MI);
434       if (!InBundle && MI.isBundledWithPred())
435         report("BundledPred flag is set, "
436                "but BundledSucc not set on predecessor",
437                &MI);
438 
439       // Is this a bundle header?
440       if (!MI.isInsideBundle()) {
441         if (CurBundle)
442           visitMachineBundleAfter(CurBundle);
443         CurBundle = &MI;
444         visitMachineBundleBefore(CurBundle);
445       } else if (!CurBundle)
446         report("No bundle header", &MI);
447       visitMachineInstrBefore(&MI);
448       for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
449         const MachineOperand &Op = MI.getOperand(I);
450         if (Op.getParent() != &MI) {
451           // Make sure to use correct addOperand / removeOperand / ChangeTo
452           // functions when replacing operands of a MachineInstr.
453           report("Instruction has operand with wrong parent set", &MI);
454         }
455 
456         visitMachineOperand(&Op, I);
457       }
458 
459       // Was this the last bundled instruction?
460       InBundle = MI.isBundledWithSucc();
461     }
462     if (CurBundle)
463       visitMachineBundleAfter(CurBundle);
464     if (InBundle)
465       report("BundledSucc flag set on last instruction in block", &MBB.back());
466     visitMachineBasicBlockAfter(&MBB);
467   }
468   visitMachineFunctionAfter();
469 
470   // Clean up.
471   regsLive.clear();
472   regsDefined.clear();
473   regsDead.clear();
474   regsKilled.clear();
475   regMasks.clear();
476   MBBInfoMap.clear();
477 
478   return foundErrors;
479 }
480 
481 void MachineVerifier::report(const char *msg, const MachineFunction *MF) {
482   assert(MF);
483   errs() << '\n';
484   if (!foundErrors++) {
485     if (Banner)
486       errs() << "# " << Banner << '\n';
487     if (LiveInts != nullptr)
488       LiveInts->print(errs());
489     else
490       MF->print(errs(), Indexes);
491   }
492   errs() << "*** Bad machine code: " << msg << " ***\n"
493       << "- function:    " << MF->getName() << "\n";
494 }
495 
496 void MachineVerifier::report(const char *msg, const MachineBasicBlock *MBB) {
497   assert(MBB);
498   report(msg, MBB->getParent());
499   errs() << "- basic block: " << printMBBReference(*MBB) << ' '
500          << MBB->getName() << " (" << (const void *)MBB << ')';
501   if (Indexes)
502     errs() << " [" << Indexes->getMBBStartIdx(MBB)
503         << ';' <<  Indexes->getMBBEndIdx(MBB) << ')';
504   errs() << '\n';
505 }
506 
507 void MachineVerifier::report(const char *msg, const MachineInstr *MI) {
508   assert(MI);
509   report(msg, MI->getParent());
510   errs() << "- instruction: ";
511   if (Indexes && Indexes->hasIndex(*MI))
512     errs() << Indexes->getInstructionIndex(*MI) << '\t';
513   MI->print(errs(), /*IsStandalone=*/true);
514 }
515 
516 void MachineVerifier::report(const char *msg, const MachineOperand *MO,
517                              unsigned MONum, LLT MOVRegType) {
518   assert(MO);
519   report(msg, MO->getParent());
520   errs() << "- operand " << MONum << ":   ";
521   MO->print(errs(), MOVRegType, TRI);
522   errs() << "\n";
523 }
524 
525 void MachineVerifier::report(const Twine &Msg, const MachineInstr *MI) {
526   report(Msg.str().c_str(), MI);
527 }
528 
529 void MachineVerifier::report_context(SlotIndex Pos) const {
530   errs() << "- at:          " << Pos << '\n';
531 }
532 
533 void MachineVerifier::report_context(const LiveInterval &LI) const {
534   errs() << "- interval:    " << LI << '\n';
535 }
536 
537 void MachineVerifier::report_context(const LiveRange &LR, Register VRegUnit,
538                                      LaneBitmask LaneMask) const {
539   report_context_liverange(LR);
540   report_context_vreg_regunit(VRegUnit);
541   if (LaneMask.any())
542     report_context_lanemask(LaneMask);
543 }
544 
545 void MachineVerifier::report_context(const LiveRange::Segment &S) const {
546   errs() << "- segment:     " << S << '\n';
547 }
548 
549 void MachineVerifier::report_context(const VNInfo &VNI) const {
550   errs() << "- ValNo:       " << VNI.id << " (def " << VNI.def << ")\n";
551 }
552 
553 void MachineVerifier::report_context_liverange(const LiveRange &LR) const {
554   errs() << "- liverange:   " << LR << '\n';
555 }
556 
557 void MachineVerifier::report_context(MCPhysReg PReg) const {
558   errs() << "- p. register: " << printReg(PReg, TRI) << '\n';
559 }
560 
561 void MachineVerifier::report_context_vreg(Register VReg) const {
562   errs() << "- v. register: " << printReg(VReg, TRI) << '\n';
563 }
564 
565 void MachineVerifier::report_context_vreg_regunit(Register VRegOrUnit) const {
566   if (Register::isVirtualRegister(VRegOrUnit)) {
567     report_context_vreg(VRegOrUnit);
568   } else {
569     errs() << "- regunit:     " << printRegUnit(VRegOrUnit, TRI) << '\n';
570   }
571 }
572 
573 void MachineVerifier::report_context_lanemask(LaneBitmask LaneMask) const {
574   errs() << "- lanemask:    " << PrintLaneMask(LaneMask) << '\n';
575 }
576 
577 void MachineVerifier::markReachable(const MachineBasicBlock *MBB) {
578   BBInfo &MInfo = MBBInfoMap[MBB];
579   if (!MInfo.reachable) {
580     MInfo.reachable = true;
581     for (const MachineBasicBlock *Succ : MBB->successors())
582       markReachable(Succ);
583   }
584 }
585 
586 void MachineVerifier::visitMachineFunctionBefore() {
587   lastIndex = SlotIndex();
588   regsReserved = MRI->reservedRegsFrozen() ? MRI->getReservedRegs()
589                                            : TRI->getReservedRegs(*MF);
590 
591   if (!MF->empty())
592     markReachable(&MF->front());
593 
594   // Build a set of the basic blocks in the function.
595   FunctionBlocks.clear();
596   for (const auto &MBB : *MF) {
597     FunctionBlocks.insert(&MBB);
598     BBInfo &MInfo = MBBInfoMap[&MBB];
599 
600     MInfo.Preds.insert(MBB.pred_begin(), MBB.pred_end());
601     if (MInfo.Preds.size() != MBB.pred_size())
602       report("MBB has duplicate entries in its predecessor list.", &MBB);
603 
604     MInfo.Succs.insert(MBB.succ_begin(), MBB.succ_end());
605     if (MInfo.Succs.size() != MBB.succ_size())
606       report("MBB has duplicate entries in its successor list.", &MBB);
607   }
608 
609   // Check that the register use lists are sane.
610   MRI->verifyUseLists();
611 
612   if (!MF->empty())
613     verifyStackFrame();
614 }
615 
616 void
617 MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
618   FirstTerminator = nullptr;
619   FirstNonPHI = nullptr;
620 
621   if (!MF->getProperties().hasProperty(
622       MachineFunctionProperties::Property::NoPHIs) && MRI->tracksLiveness()) {
623     // If this block has allocatable physical registers live-in, check that
624     // it is an entry block or landing pad.
625     for (const auto &LI : MBB->liveins()) {
626       if (isAllocatable(LI.PhysReg) && !MBB->isEHPad() &&
627           MBB->getIterator() != MBB->getParent()->begin()) {
628         report("MBB has allocatable live-in, but isn't entry or landing-pad.", MBB);
629         report_context(LI.PhysReg);
630       }
631     }
632   }
633 
634   // Count the number of landing pad successors.
635   SmallPtrSet<const MachineBasicBlock*, 4> LandingPadSuccs;
636   for (const auto *succ : MBB->successors()) {
637     if (succ->isEHPad())
638       LandingPadSuccs.insert(succ);
639     if (!FunctionBlocks.count(succ))
640       report("MBB has successor that isn't part of the function.", MBB);
641     if (!MBBInfoMap[succ].Preds.count(MBB)) {
642       report("Inconsistent CFG", MBB);
643       errs() << "MBB is not in the predecessor list of the successor "
644              << printMBBReference(*succ) << ".\n";
645     }
646   }
647 
648   // Check the predecessor list.
649   for (const MachineBasicBlock *Pred : MBB->predecessors()) {
650     if (!FunctionBlocks.count(Pred))
651       report("MBB has predecessor that isn't part of the function.", MBB);
652     if (!MBBInfoMap[Pred].Succs.count(MBB)) {
653       report("Inconsistent CFG", MBB);
654       errs() << "MBB is not in the successor list of the predecessor "
655              << printMBBReference(*Pred) << ".\n";
656     }
657   }
658 
659   const MCAsmInfo *AsmInfo = TM->getMCAsmInfo();
660   const BasicBlock *BB = MBB->getBasicBlock();
661   const Function &F = MF->getFunction();
662   if (LandingPadSuccs.size() > 1 &&
663       !(AsmInfo &&
664         AsmInfo->getExceptionHandlingType() == ExceptionHandling::SjLj &&
665         BB && isa<SwitchInst>(BB->getTerminator())) &&
666       !isScopedEHPersonality(classifyEHPersonality(F.getPersonalityFn())))
667     report("MBB has more than one landing pad successor", MBB);
668 
669   // Call analyzeBranch. If it succeeds, there several more conditions to check.
670   MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
671   SmallVector<MachineOperand, 4> Cond;
672   if (!TII->analyzeBranch(*const_cast<MachineBasicBlock *>(MBB), TBB, FBB,
673                           Cond)) {
674     // Ok, analyzeBranch thinks it knows what's going on with this block. Let's
675     // check whether its answers match up with reality.
676     if (!TBB && !FBB) {
677       // Block falls through to its successor.
678       if (!MBB->empty() && MBB->back().isBarrier() &&
679           !TII->isPredicated(MBB->back())) {
680         report("MBB exits via unconditional fall-through but ends with a "
681                "barrier instruction!", MBB);
682       }
683       if (!Cond.empty()) {
684         report("MBB exits via unconditional fall-through but has a condition!",
685                MBB);
686       }
687     } else if (TBB && !FBB && Cond.empty()) {
688       // Block unconditionally branches somewhere.
689       if (MBB->empty()) {
690         report("MBB exits via unconditional branch but doesn't contain "
691                "any instructions!", MBB);
692       } else if (!MBB->back().isBarrier()) {
693         report("MBB exits via unconditional branch but doesn't end with a "
694                "barrier instruction!", MBB);
695       } else if (!MBB->back().isTerminator()) {
696         report("MBB exits via unconditional branch but the branch isn't a "
697                "terminator instruction!", MBB);
698       }
699     } else if (TBB && !FBB && !Cond.empty()) {
700       // Block conditionally branches somewhere, otherwise falls through.
701       if (MBB->empty()) {
702         report("MBB exits via conditional branch/fall-through but doesn't "
703                "contain any instructions!", MBB);
704       } else if (MBB->back().isBarrier()) {
705         report("MBB exits via conditional branch/fall-through but ends with a "
706                "barrier instruction!", MBB);
707       } else if (!MBB->back().isTerminator()) {
708         report("MBB exits via conditional branch/fall-through but the branch "
709                "isn't a terminator instruction!", MBB);
710       }
711     } else if (TBB && FBB) {
712       // Block conditionally branches somewhere, otherwise branches
713       // somewhere else.
714       if (MBB->empty()) {
715         report("MBB exits via conditional branch/branch but doesn't "
716                "contain any instructions!", MBB);
717       } else if (!MBB->back().isBarrier()) {
718         report("MBB exits via conditional branch/branch but doesn't end with a "
719                "barrier instruction!", MBB);
720       } else if (!MBB->back().isTerminator()) {
721         report("MBB exits via conditional branch/branch but the branch "
722                "isn't a terminator instruction!", MBB);
723       }
724       if (Cond.empty()) {
725         report("MBB exits via conditional branch/branch but there's no "
726                "condition!", MBB);
727       }
728     } else {
729       report("analyzeBranch returned invalid data!", MBB);
730     }
731 
732     // Now check that the successors match up with the answers reported by
733     // analyzeBranch.
734     if (TBB && !MBB->isSuccessor(TBB))
735       report("MBB exits via jump or conditional branch, but its target isn't a "
736              "CFG successor!",
737              MBB);
738     if (FBB && !MBB->isSuccessor(FBB))
739       report("MBB exits via conditional branch, but its target isn't a CFG "
740              "successor!",
741              MBB);
742 
743     // There might be a fallthrough to the next block if there's either no
744     // unconditional true branch, or if there's a condition, and one of the
745     // branches is missing.
746     bool Fallthrough = !TBB || (!Cond.empty() && !FBB);
747 
748     // A conditional fallthrough must be an actual CFG successor, not
749     // unreachable. (Conversely, an unconditional fallthrough might not really
750     // be a successor, because the block might end in unreachable.)
751     if (!Cond.empty() && !FBB) {
752       MachineFunction::const_iterator MBBI = std::next(MBB->getIterator());
753       if (MBBI == MF->end()) {
754         report("MBB conditionally falls through out of function!", MBB);
755       } else if (!MBB->isSuccessor(&*MBBI))
756         report("MBB exits via conditional branch/fall-through but the CFG "
757                "successors don't match the actual successors!",
758                MBB);
759     }
760 
761     // Verify that there aren't any extra un-accounted-for successors.
762     for (const MachineBasicBlock *SuccMBB : MBB->successors()) {
763       // If this successor is one of the branch targets, it's okay.
764       if (SuccMBB == TBB || SuccMBB == FBB)
765         continue;
766       // If we might have a fallthrough, and the successor is the fallthrough
767       // block, that's also ok.
768       if (Fallthrough && SuccMBB == MBB->getNextNode())
769         continue;
770       // Also accept successors which are for exception-handling or might be
771       // inlineasm_br targets.
772       if (SuccMBB->isEHPad() || SuccMBB->isInlineAsmBrIndirectTarget())
773         continue;
774       report("MBB has unexpected successors which are not branch targets, "
775              "fallthrough, EHPads, or inlineasm_br targets.",
776              MBB);
777     }
778   }
779 
780   regsLive.clear();
781   if (MRI->tracksLiveness()) {
782     for (const auto &LI : MBB->liveins()) {
783       if (!Register::isPhysicalRegister(LI.PhysReg)) {
784         report("MBB live-in list contains non-physical register", MBB);
785         continue;
786       }
787       for (const MCPhysReg &SubReg : TRI->subregs_inclusive(LI.PhysReg))
788         regsLive.insert(SubReg);
789     }
790   }
791 
792   const MachineFrameInfo &MFI = MF->getFrameInfo();
793   BitVector PR = MFI.getPristineRegs(*MF);
794   for (unsigned I : PR.set_bits()) {
795     for (const MCPhysReg &SubReg : TRI->subregs_inclusive(I))
796       regsLive.insert(SubReg);
797   }
798 
799   regsKilled.clear();
800   regsDefined.clear();
801 
802   if (Indexes)
803     lastIndex = Indexes->getMBBStartIdx(MBB);
804 }
805 
806 // This function gets called for all bundle headers, including normal
807 // stand-alone unbundled instructions.
808 void MachineVerifier::visitMachineBundleBefore(const MachineInstr *MI) {
809   if (Indexes && Indexes->hasIndex(*MI)) {
810     SlotIndex idx = Indexes->getInstructionIndex(*MI);
811     if (!(idx > lastIndex)) {
812       report("Instruction index out of order", MI);
813       errs() << "Last instruction was at " << lastIndex << '\n';
814     }
815     lastIndex = idx;
816   }
817 
818   // Ensure non-terminators don't follow terminators.
819   if (MI->isTerminator()) {
820     if (!FirstTerminator)
821       FirstTerminator = MI;
822   } else if (FirstTerminator) {
823     report("Non-terminator instruction after the first terminator", MI);
824     errs() << "First terminator was:\t" << *FirstTerminator;
825   }
826 }
827 
828 // The operands on an INLINEASM instruction must follow a template.
829 // Verify that the flag operands make sense.
830 void MachineVerifier::verifyInlineAsm(const MachineInstr *MI) {
831   // The first two operands on INLINEASM are the asm string and global flags.
832   if (MI->getNumOperands() < 2) {
833     report("Too few operands on inline asm", MI);
834     return;
835   }
836   if (!MI->getOperand(0).isSymbol())
837     report("Asm string must be an external symbol", MI);
838   if (!MI->getOperand(1).isImm())
839     report("Asm flags must be an immediate", MI);
840   // Allowed flags are Extra_HasSideEffects = 1, Extra_IsAlignStack = 2,
841   // Extra_AsmDialect = 4, Extra_MayLoad = 8, and Extra_MayStore = 16,
842   // and Extra_IsConvergent = 32.
843   if (!isUInt<6>(MI->getOperand(1).getImm()))
844     report("Unknown asm flags", &MI->getOperand(1), 1);
845 
846   static_assert(InlineAsm::MIOp_FirstOperand == 2, "Asm format changed");
847 
848   unsigned OpNo = InlineAsm::MIOp_FirstOperand;
849   unsigned NumOps;
850   for (unsigned e = MI->getNumOperands(); OpNo < e; OpNo += NumOps) {
851     const MachineOperand &MO = MI->getOperand(OpNo);
852     // There may be implicit ops after the fixed operands.
853     if (!MO.isImm())
854       break;
855     NumOps = 1 + InlineAsm::getNumOperandRegisters(MO.getImm());
856   }
857 
858   if (OpNo > MI->getNumOperands())
859     report("Missing operands in last group", MI);
860 
861   // An optional MDNode follows the groups.
862   if (OpNo < MI->getNumOperands() && MI->getOperand(OpNo).isMetadata())
863     ++OpNo;
864 
865   // All trailing operands must be implicit registers.
866   for (unsigned e = MI->getNumOperands(); OpNo < e; ++OpNo) {
867     const MachineOperand &MO = MI->getOperand(OpNo);
868     if (!MO.isReg() || !MO.isImplicit())
869       report("Expected implicit register after groups", &MO, OpNo);
870   }
871 }
872 
873 bool MachineVerifier::verifyAllRegOpsScalar(const MachineInstr &MI,
874                                             const MachineRegisterInfo &MRI) {
875   if (none_of(MI.explicit_operands(), [&MRI](const MachineOperand &Op) {
876         if (!Op.isReg())
877           return false;
878         const auto Reg = Op.getReg();
879         if (Reg.isPhysical())
880           return false;
881         return !MRI.getType(Reg).isScalar();
882       }))
883     return true;
884   report("All register operands must have scalar types", &MI);
885   return false;
886 }
887 
888 /// Check that types are consistent when two operands need to have the same
889 /// number of vector elements.
890 /// \return true if the types are valid.
891 bool MachineVerifier::verifyVectorElementMatch(LLT Ty0, LLT Ty1,
892                                                const MachineInstr *MI) {
893   if (Ty0.isVector() != Ty1.isVector()) {
894     report("operand types must be all-vector or all-scalar", MI);
895     // Generally we try to report as many issues as possible at once, but in
896     // this case it's not clear what should we be comparing the size of the
897     // scalar with: the size of the whole vector or its lane. Instead of
898     // making an arbitrary choice and emitting not so helpful message, let's
899     // avoid the extra noise and stop here.
900     return false;
901   }
902 
903   if (Ty0.isVector() && Ty0.getNumElements() != Ty1.getNumElements()) {
904     report("operand types must preserve number of vector elements", MI);
905     return false;
906   }
907 
908   return true;
909 }
910 
911 void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) {
912   if (isFunctionSelected)
913     report("Unexpected generic instruction in a Selected function", MI);
914 
915   const MCInstrDesc &MCID = MI->getDesc();
916   unsigned NumOps = MI->getNumOperands();
917 
918   // Branches must reference a basic block if they are not indirect
919   if (MI->isBranch() && !MI->isIndirectBranch()) {
920     bool HasMBB = false;
921     for (const MachineOperand &Op : MI->operands()) {
922       if (Op.isMBB()) {
923         HasMBB = true;
924         break;
925       }
926     }
927 
928     if (!HasMBB) {
929       report("Branch instruction is missing a basic block operand or "
930              "isIndirectBranch property",
931              MI);
932     }
933   }
934 
935   // Check types.
936   SmallVector<LLT, 4> Types;
937   for (unsigned I = 0, E = std::min(MCID.getNumOperands(), NumOps);
938        I != E; ++I) {
939     if (!MCID.OpInfo[I].isGenericType())
940       continue;
941     // Generic instructions specify type equality constraints between some of
942     // their operands. Make sure these are consistent.
943     size_t TypeIdx = MCID.OpInfo[I].getGenericTypeIndex();
944     Types.resize(std::max(TypeIdx + 1, Types.size()));
945 
946     const MachineOperand *MO = &MI->getOperand(I);
947     if (!MO->isReg()) {
948       report("generic instruction must use register operands", MI);
949       continue;
950     }
951 
952     LLT OpTy = MRI->getType(MO->getReg());
953     // Don't report a type mismatch if there is no actual mismatch, only a
954     // type missing, to reduce noise:
955     if (OpTy.isValid()) {
956       // Only the first valid type for a type index will be printed: don't
957       // overwrite it later so it's always clear which type was expected:
958       if (!Types[TypeIdx].isValid())
959         Types[TypeIdx] = OpTy;
960       else if (Types[TypeIdx] != OpTy)
961         report("Type mismatch in generic instruction", MO, I, OpTy);
962     } else {
963       // Generic instructions must have types attached to their operands.
964       report("Generic instruction is missing a virtual register type", MO, I);
965     }
966   }
967 
968   // Generic opcodes must not have physical register operands.
969   for (unsigned I = 0; I < MI->getNumOperands(); ++I) {
970     const MachineOperand *MO = &MI->getOperand(I);
971     if (MO->isReg() && Register::isPhysicalRegister(MO->getReg()))
972       report("Generic instruction cannot have physical register", MO, I);
973   }
974 
975   // Avoid out of bounds in checks below. This was already reported earlier.
976   if (MI->getNumOperands() < MCID.getNumOperands())
977     return;
978 
979   StringRef ErrorInfo;
980   if (!TII->verifyInstruction(*MI, ErrorInfo))
981     report(ErrorInfo.data(), MI);
982 
983   // Verify properties of various specific instruction types
984   unsigned Opc = MI->getOpcode();
985   switch (Opc) {
986   case TargetOpcode::G_ASSERT_SEXT:
987   case TargetOpcode::G_ASSERT_ZEXT: {
988     std::string OpcName =
989         Opc == TargetOpcode::G_ASSERT_ZEXT ? "G_ASSERT_ZEXT" : "G_ASSERT_SEXT";
990     if (!MI->getOperand(2).isImm()) {
991       report(Twine(OpcName, " expects an immediate operand #2"), MI);
992       break;
993     }
994 
995     Register Dst = MI->getOperand(0).getReg();
996     Register Src = MI->getOperand(1).getReg();
997     LLT SrcTy = MRI->getType(Src);
998     int64_t Imm = MI->getOperand(2).getImm();
999     if (Imm <= 0) {
1000       report(Twine(OpcName, " size must be >= 1"), MI);
1001       break;
1002     }
1003 
1004     if (Imm >= SrcTy.getScalarSizeInBits()) {
1005       report(Twine(OpcName, " size must be less than source bit width"), MI);
1006       break;
1007     }
1008 
1009     const RegisterBank *SrcRB = RBI->getRegBank(Src, *MRI, *TRI);
1010     const RegisterBank *DstRB = RBI->getRegBank(Dst, *MRI, *TRI);
1011 
1012     // Allow only the source bank to be set.
1013     if ((SrcRB && DstRB && SrcRB != DstRB) || (DstRB && !SrcRB)) {
1014       report(Twine(OpcName, " cannot change register bank"), MI);
1015       break;
1016     }
1017 
1018     // Don't allow a class change. Do allow member class->regbank.
1019     const TargetRegisterClass *DstRC = MRI->getRegClassOrNull(Dst);
1020     if (DstRC && DstRC != MRI->getRegClassOrNull(Src)) {
1021       report(
1022           Twine(OpcName, " source and destination register classes must match"),
1023           MI);
1024       break;
1025     }
1026 
1027     break;
1028   }
1029 
1030   case TargetOpcode::G_CONSTANT:
1031   case TargetOpcode::G_FCONSTANT: {
1032     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1033     if (DstTy.isVector())
1034       report("Instruction cannot use a vector result type", MI);
1035 
1036     if (MI->getOpcode() == TargetOpcode::G_CONSTANT) {
1037       if (!MI->getOperand(1).isCImm()) {
1038         report("G_CONSTANT operand must be cimm", MI);
1039         break;
1040       }
1041 
1042       const ConstantInt *CI = MI->getOperand(1).getCImm();
1043       if (CI->getBitWidth() != DstTy.getSizeInBits())
1044         report("inconsistent constant size", MI);
1045     } else {
1046       if (!MI->getOperand(1).isFPImm()) {
1047         report("G_FCONSTANT operand must be fpimm", MI);
1048         break;
1049       }
1050       const ConstantFP *CF = MI->getOperand(1).getFPImm();
1051 
1052       if (APFloat::getSizeInBits(CF->getValueAPF().getSemantics()) !=
1053           DstTy.getSizeInBits()) {
1054         report("inconsistent constant size", MI);
1055       }
1056     }
1057 
1058     break;
1059   }
1060   case TargetOpcode::G_LOAD:
1061   case TargetOpcode::G_STORE:
1062   case TargetOpcode::G_ZEXTLOAD:
1063   case TargetOpcode::G_SEXTLOAD: {
1064     LLT ValTy = MRI->getType(MI->getOperand(0).getReg());
1065     LLT PtrTy = MRI->getType(MI->getOperand(1).getReg());
1066     if (!PtrTy.isPointer())
1067       report("Generic memory instruction must access a pointer", MI);
1068 
1069     // Generic loads and stores must have a single MachineMemOperand
1070     // describing that access.
1071     if (!MI->hasOneMemOperand()) {
1072       report("Generic instruction accessing memory must have one mem operand",
1073              MI);
1074     } else {
1075       const MachineMemOperand &MMO = **MI->memoperands_begin();
1076       if (MI->getOpcode() == TargetOpcode::G_ZEXTLOAD ||
1077           MI->getOpcode() == TargetOpcode::G_SEXTLOAD) {
1078         if (MMO.getSizeInBits() >= ValTy.getSizeInBits())
1079           report("Generic extload must have a narrower memory type", MI);
1080       } else if (MI->getOpcode() == TargetOpcode::G_LOAD) {
1081         if (MMO.getSize() > ValTy.getSizeInBytes())
1082           report("load memory size cannot exceed result size", MI);
1083       } else if (MI->getOpcode() == TargetOpcode::G_STORE) {
1084         if (ValTy.getSizeInBytes() < MMO.getSize())
1085           report("store memory size cannot exceed value size", MI);
1086       }
1087 
1088       const AtomicOrdering Order = MMO.getSuccessOrdering();
1089       if (Opc == TargetOpcode::G_STORE) {
1090         if (Order == AtomicOrdering::Acquire ||
1091             Order == AtomicOrdering::AcquireRelease)
1092           report("atomic store cannot use acquire ordering", MI);
1093 
1094       } else {
1095         if (Order == AtomicOrdering::Release ||
1096             Order == AtomicOrdering::AcquireRelease)
1097           report("atomic load cannot use release ordering", MI);
1098       }
1099     }
1100 
1101     break;
1102   }
1103   case TargetOpcode::G_PHI: {
1104     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1105     if (!DstTy.isValid() || !all_of(drop_begin(MI->operands()),
1106                                     [this, &DstTy](const MachineOperand &MO) {
1107                                       if (!MO.isReg())
1108                                         return true;
1109                                       LLT Ty = MRI->getType(MO.getReg());
1110                                       if (!Ty.isValid() || (Ty != DstTy))
1111                                         return false;
1112                                       return true;
1113                                     }))
1114       report("Generic Instruction G_PHI has operands with incompatible/missing "
1115              "types",
1116              MI);
1117     break;
1118   }
1119   case TargetOpcode::G_BITCAST: {
1120     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1121     LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1122     if (!DstTy.isValid() || !SrcTy.isValid())
1123       break;
1124 
1125     if (SrcTy.isPointer() != DstTy.isPointer())
1126       report("bitcast cannot convert between pointers and other types", MI);
1127 
1128     if (SrcTy.getSizeInBits() != DstTy.getSizeInBits())
1129       report("bitcast sizes must match", MI);
1130 
1131     if (SrcTy == DstTy)
1132       report("bitcast must change the type", MI);
1133 
1134     break;
1135   }
1136   case TargetOpcode::G_INTTOPTR:
1137   case TargetOpcode::G_PTRTOINT:
1138   case TargetOpcode::G_ADDRSPACE_CAST: {
1139     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1140     LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1141     if (!DstTy.isValid() || !SrcTy.isValid())
1142       break;
1143 
1144     verifyVectorElementMatch(DstTy, SrcTy, MI);
1145 
1146     DstTy = DstTy.getScalarType();
1147     SrcTy = SrcTy.getScalarType();
1148 
1149     if (MI->getOpcode() == TargetOpcode::G_INTTOPTR) {
1150       if (!DstTy.isPointer())
1151         report("inttoptr result type must be a pointer", MI);
1152       if (SrcTy.isPointer())
1153         report("inttoptr source type must not be a pointer", MI);
1154     } else if (MI->getOpcode() == TargetOpcode::G_PTRTOINT) {
1155       if (!SrcTy.isPointer())
1156         report("ptrtoint source type must be a pointer", MI);
1157       if (DstTy.isPointer())
1158         report("ptrtoint result type must not be a pointer", MI);
1159     } else {
1160       assert(MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST);
1161       if (!SrcTy.isPointer() || !DstTy.isPointer())
1162         report("addrspacecast types must be pointers", MI);
1163       else {
1164         if (SrcTy.getAddressSpace() == DstTy.getAddressSpace())
1165           report("addrspacecast must convert different address spaces", MI);
1166       }
1167     }
1168 
1169     break;
1170   }
1171   case TargetOpcode::G_PTR_ADD: {
1172     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1173     LLT PtrTy = MRI->getType(MI->getOperand(1).getReg());
1174     LLT OffsetTy = MRI->getType(MI->getOperand(2).getReg());
1175     if (!DstTy.isValid() || !PtrTy.isValid() || !OffsetTy.isValid())
1176       break;
1177 
1178     if (!PtrTy.getScalarType().isPointer())
1179       report("gep first operand must be a pointer", MI);
1180 
1181     if (OffsetTy.getScalarType().isPointer())
1182       report("gep offset operand must not be a pointer", MI);
1183 
1184     // TODO: Is the offset allowed to be a scalar with a vector?
1185     break;
1186   }
1187   case TargetOpcode::G_PTRMASK: {
1188     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1189     LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1190     LLT MaskTy = MRI->getType(MI->getOperand(2).getReg());
1191     if (!DstTy.isValid() || !SrcTy.isValid() || !MaskTy.isValid())
1192       break;
1193 
1194     if (!DstTy.getScalarType().isPointer())
1195       report("ptrmask result type must be a pointer", MI);
1196 
1197     if (!MaskTy.getScalarType().isScalar())
1198       report("ptrmask mask type must be an integer", MI);
1199 
1200     verifyVectorElementMatch(DstTy, MaskTy, MI);
1201     break;
1202   }
1203   case TargetOpcode::G_SEXT:
1204   case TargetOpcode::G_ZEXT:
1205   case TargetOpcode::G_ANYEXT:
1206   case TargetOpcode::G_TRUNC:
1207   case TargetOpcode::G_FPEXT:
1208   case TargetOpcode::G_FPTRUNC: {
1209     // Number of operands and presense of types is already checked (and
1210     // reported in case of any issues), so no need to report them again. As
1211     // we're trying to report as many issues as possible at once, however, the
1212     // instructions aren't guaranteed to have the right number of operands or
1213     // types attached to them at this point
1214     assert(MCID.getNumOperands() == 2 && "Expected 2 operands G_*{EXT,TRUNC}");
1215     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1216     LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1217     if (!DstTy.isValid() || !SrcTy.isValid())
1218       break;
1219 
1220     LLT DstElTy = DstTy.getScalarType();
1221     LLT SrcElTy = SrcTy.getScalarType();
1222     if (DstElTy.isPointer() || SrcElTy.isPointer())
1223       report("Generic extend/truncate can not operate on pointers", MI);
1224 
1225     verifyVectorElementMatch(DstTy, SrcTy, MI);
1226 
1227     unsigned DstSize = DstElTy.getSizeInBits();
1228     unsigned SrcSize = SrcElTy.getSizeInBits();
1229     switch (MI->getOpcode()) {
1230     default:
1231       if (DstSize <= SrcSize)
1232         report("Generic extend has destination type no larger than source", MI);
1233       break;
1234     case TargetOpcode::G_TRUNC:
1235     case TargetOpcode::G_FPTRUNC:
1236       if (DstSize >= SrcSize)
1237         report("Generic truncate has destination type no smaller than source",
1238                MI);
1239       break;
1240     }
1241     break;
1242   }
1243   case TargetOpcode::G_SELECT: {
1244     LLT SelTy = MRI->getType(MI->getOperand(0).getReg());
1245     LLT CondTy = MRI->getType(MI->getOperand(1).getReg());
1246     if (!SelTy.isValid() || !CondTy.isValid())
1247       break;
1248 
1249     // Scalar condition select on a vector is valid.
1250     if (CondTy.isVector())
1251       verifyVectorElementMatch(SelTy, CondTy, MI);
1252     break;
1253   }
1254   case TargetOpcode::G_MERGE_VALUES: {
1255     // G_MERGE_VALUES should only be used to merge scalars into a larger scalar,
1256     // e.g. s2N = MERGE sN, sN
1257     // Merging multiple scalars into a vector is not allowed, should use
1258     // G_BUILD_VECTOR for that.
1259     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1260     LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1261     if (DstTy.isVector() || SrcTy.isVector())
1262       report("G_MERGE_VALUES cannot operate on vectors", MI);
1263 
1264     const unsigned NumOps = MI->getNumOperands();
1265     if (DstTy.getSizeInBits() != SrcTy.getSizeInBits() * (NumOps - 1))
1266       report("G_MERGE_VALUES result size is inconsistent", MI);
1267 
1268     for (unsigned I = 2; I != NumOps; ++I) {
1269       if (MRI->getType(MI->getOperand(I).getReg()) != SrcTy)
1270         report("G_MERGE_VALUES source types do not match", MI);
1271     }
1272 
1273     break;
1274   }
1275   case TargetOpcode::G_UNMERGE_VALUES: {
1276     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1277     LLT SrcTy = MRI->getType(MI->getOperand(MI->getNumOperands()-1).getReg());
1278     // For now G_UNMERGE can split vectors.
1279     for (unsigned i = 0; i < MI->getNumOperands()-1; ++i) {
1280       if (MRI->getType(MI->getOperand(i).getReg()) != DstTy)
1281         report("G_UNMERGE_VALUES destination types do not match", MI);
1282     }
1283     if (SrcTy.getSizeInBits() !=
1284         (DstTy.getSizeInBits() * (MI->getNumOperands() - 1))) {
1285       report("G_UNMERGE_VALUES source operand does not cover dest operands",
1286              MI);
1287     }
1288     break;
1289   }
1290   case TargetOpcode::G_BUILD_VECTOR: {
1291     // Source types must be scalars, dest type a vector. Total size of scalars
1292     // must match the dest vector size.
1293     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1294     LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg());
1295     if (!DstTy.isVector() || SrcEltTy.isVector()) {
1296       report("G_BUILD_VECTOR must produce a vector from scalar operands", MI);
1297       break;
1298     }
1299 
1300     if (DstTy.getElementType() != SrcEltTy)
1301       report("G_BUILD_VECTOR result element type must match source type", MI);
1302 
1303     if (DstTy.getNumElements() != MI->getNumOperands() - 1)
1304       report("G_BUILD_VECTOR must have an operand for each elemement", MI);
1305 
1306     for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1307       if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1308         report("G_BUILD_VECTOR source operand types are not homogeneous", MI);
1309 
1310     break;
1311   }
1312   case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1313     // Source types must be scalars, dest type a vector. Scalar types must be
1314     // larger than the dest vector elt type, as this is a truncating operation.
1315     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1316     LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg());
1317     if (!DstTy.isVector() || SrcEltTy.isVector())
1318       report("G_BUILD_VECTOR_TRUNC must produce a vector from scalar operands",
1319              MI);
1320     for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1321       if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1322         report("G_BUILD_VECTOR_TRUNC source operand types are not homogeneous",
1323                MI);
1324     if (SrcEltTy.getSizeInBits() <= DstTy.getElementType().getSizeInBits())
1325       report("G_BUILD_VECTOR_TRUNC source operand types are not larger than "
1326              "dest elt type",
1327              MI);
1328     break;
1329   }
1330   case TargetOpcode::G_CONCAT_VECTORS: {
1331     // Source types should be vectors, and total size should match the dest
1332     // vector size.
1333     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1334     LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1335     if (!DstTy.isVector() || !SrcTy.isVector())
1336       report("G_CONCAT_VECTOR requires vector source and destination operands",
1337              MI);
1338 
1339     if (MI->getNumOperands() < 3)
1340       report("G_CONCAT_VECTOR requires at least 2 source operands", MI);
1341 
1342     for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1343       if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1344         report("G_CONCAT_VECTOR source operand types are not homogeneous", MI);
1345     if (DstTy.getNumElements() !=
1346         SrcTy.getNumElements() * (MI->getNumOperands() - 1))
1347       report("G_CONCAT_VECTOR num dest and source elements should match", MI);
1348     break;
1349   }
1350   case TargetOpcode::G_ICMP:
1351   case TargetOpcode::G_FCMP: {
1352     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1353     LLT SrcTy = MRI->getType(MI->getOperand(2).getReg());
1354 
1355     if ((DstTy.isVector() != SrcTy.isVector()) ||
1356         (DstTy.isVector() && DstTy.getNumElements() != SrcTy.getNumElements()))
1357       report("Generic vector icmp/fcmp must preserve number of lanes", MI);
1358 
1359     break;
1360   }
1361   case TargetOpcode::G_EXTRACT: {
1362     const MachineOperand &SrcOp = MI->getOperand(1);
1363     if (!SrcOp.isReg()) {
1364       report("extract source must be a register", MI);
1365       break;
1366     }
1367 
1368     const MachineOperand &OffsetOp = MI->getOperand(2);
1369     if (!OffsetOp.isImm()) {
1370       report("extract offset must be a constant", MI);
1371       break;
1372     }
1373 
1374     unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits();
1375     unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits();
1376     if (SrcSize == DstSize)
1377       report("extract source must be larger than result", MI);
1378 
1379     if (DstSize + OffsetOp.getImm() > SrcSize)
1380       report("extract reads past end of register", MI);
1381     break;
1382   }
1383   case TargetOpcode::G_INSERT: {
1384     const MachineOperand &SrcOp = MI->getOperand(2);
1385     if (!SrcOp.isReg()) {
1386       report("insert source must be a register", MI);
1387       break;
1388     }
1389 
1390     const MachineOperand &OffsetOp = MI->getOperand(3);
1391     if (!OffsetOp.isImm()) {
1392       report("insert offset must be a constant", MI);
1393       break;
1394     }
1395 
1396     unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits();
1397     unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits();
1398 
1399     if (DstSize <= SrcSize)
1400       report("inserted size must be smaller than total register", MI);
1401 
1402     if (SrcSize + OffsetOp.getImm() > DstSize)
1403       report("insert writes past end of register", MI);
1404 
1405     break;
1406   }
1407   case TargetOpcode::G_JUMP_TABLE: {
1408     if (!MI->getOperand(1).isJTI())
1409       report("G_JUMP_TABLE source operand must be a jump table index", MI);
1410     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1411     if (!DstTy.isPointer())
1412       report("G_JUMP_TABLE dest operand must have a pointer type", MI);
1413     break;
1414   }
1415   case TargetOpcode::G_BRJT: {
1416     if (!MRI->getType(MI->getOperand(0).getReg()).isPointer())
1417       report("G_BRJT src operand 0 must be a pointer type", MI);
1418 
1419     if (!MI->getOperand(1).isJTI())
1420       report("G_BRJT src operand 1 must be a jump table index", MI);
1421 
1422     const auto &IdxOp = MI->getOperand(2);
1423     if (!IdxOp.isReg() || MRI->getType(IdxOp.getReg()).isPointer())
1424       report("G_BRJT src operand 2 must be a scalar reg type", MI);
1425     break;
1426   }
1427   case TargetOpcode::G_INTRINSIC:
1428   case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS: {
1429     // TODO: Should verify number of def and use operands, but the current
1430     // interface requires passing in IR types for mangling.
1431     const MachineOperand &IntrIDOp = MI->getOperand(MI->getNumExplicitDefs());
1432     if (!IntrIDOp.isIntrinsicID()) {
1433       report("G_INTRINSIC first src operand must be an intrinsic ID", MI);
1434       break;
1435     }
1436 
1437     bool NoSideEffects = MI->getOpcode() == TargetOpcode::G_INTRINSIC;
1438     unsigned IntrID = IntrIDOp.getIntrinsicID();
1439     if (IntrID != 0 && IntrID < Intrinsic::num_intrinsics) {
1440       AttributeList Attrs
1441         = Intrinsic::getAttributes(MF->getFunction().getContext(),
1442                                    static_cast<Intrinsic::ID>(IntrID));
1443       bool DeclHasSideEffects = !Attrs.hasFnAttr(Attribute::ReadNone);
1444       if (NoSideEffects && DeclHasSideEffects) {
1445         report("G_INTRINSIC used with intrinsic that accesses memory", MI);
1446         break;
1447       }
1448       if (!NoSideEffects && !DeclHasSideEffects) {
1449         report("G_INTRINSIC_W_SIDE_EFFECTS used with readnone intrinsic", MI);
1450         break;
1451       }
1452     }
1453 
1454     break;
1455   }
1456   case TargetOpcode::G_SEXT_INREG: {
1457     if (!MI->getOperand(2).isImm()) {
1458       report("G_SEXT_INREG expects an immediate operand #2", MI);
1459       break;
1460     }
1461 
1462     LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1463     int64_t Imm = MI->getOperand(2).getImm();
1464     if (Imm <= 0)
1465       report("G_SEXT_INREG size must be >= 1", MI);
1466     if (Imm >= SrcTy.getScalarSizeInBits())
1467       report("G_SEXT_INREG size must be less than source bit width", MI);
1468     break;
1469   }
1470   case TargetOpcode::G_SHUFFLE_VECTOR: {
1471     const MachineOperand &MaskOp = MI->getOperand(3);
1472     if (!MaskOp.isShuffleMask()) {
1473       report("Incorrect mask operand type for G_SHUFFLE_VECTOR", MI);
1474       break;
1475     }
1476 
1477     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1478     LLT Src0Ty = MRI->getType(MI->getOperand(1).getReg());
1479     LLT Src1Ty = MRI->getType(MI->getOperand(2).getReg());
1480 
1481     if (Src0Ty != Src1Ty)
1482       report("Source operands must be the same type", MI);
1483 
1484     if (Src0Ty.getScalarType() != DstTy.getScalarType())
1485       report("G_SHUFFLE_VECTOR cannot change element type", MI);
1486 
1487     // Don't check that all operands are vector because scalars are used in
1488     // place of 1 element vectors.
1489     int SrcNumElts = Src0Ty.isVector() ? Src0Ty.getNumElements() : 1;
1490     int DstNumElts = DstTy.isVector() ? DstTy.getNumElements() : 1;
1491 
1492     ArrayRef<int> MaskIdxes = MaskOp.getShuffleMask();
1493 
1494     if (static_cast<int>(MaskIdxes.size()) != DstNumElts)
1495       report("Wrong result type for shufflemask", MI);
1496 
1497     for (int Idx : MaskIdxes) {
1498       if (Idx < 0)
1499         continue;
1500 
1501       if (Idx >= 2 * SrcNumElts)
1502         report("Out of bounds shuffle index", MI);
1503     }
1504 
1505     break;
1506   }
1507   case TargetOpcode::G_DYN_STACKALLOC: {
1508     const MachineOperand &DstOp = MI->getOperand(0);
1509     const MachineOperand &AllocOp = MI->getOperand(1);
1510     const MachineOperand &AlignOp = MI->getOperand(2);
1511 
1512     if (!DstOp.isReg() || !MRI->getType(DstOp.getReg()).isPointer()) {
1513       report("dst operand 0 must be a pointer type", MI);
1514       break;
1515     }
1516 
1517     if (!AllocOp.isReg() || !MRI->getType(AllocOp.getReg()).isScalar()) {
1518       report("src operand 1 must be a scalar reg type", MI);
1519       break;
1520     }
1521 
1522     if (!AlignOp.isImm()) {
1523       report("src operand 2 must be an immediate type", MI);
1524       break;
1525     }
1526     break;
1527   }
1528   case TargetOpcode::G_MEMCPY_INLINE:
1529   case TargetOpcode::G_MEMCPY:
1530   case TargetOpcode::G_MEMMOVE: {
1531     ArrayRef<MachineMemOperand *> MMOs = MI->memoperands();
1532     if (MMOs.size() != 2) {
1533       report("memcpy/memmove must have 2 memory operands", MI);
1534       break;
1535     }
1536 
1537     if ((!MMOs[0]->isStore() || MMOs[0]->isLoad()) ||
1538         (MMOs[1]->isStore() || !MMOs[1]->isLoad())) {
1539       report("wrong memory operand types", MI);
1540       break;
1541     }
1542 
1543     if (MMOs[0]->getSize() != MMOs[1]->getSize())
1544       report("inconsistent memory operand sizes", MI);
1545 
1546     LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg());
1547     LLT SrcPtrTy = MRI->getType(MI->getOperand(1).getReg());
1548 
1549     if (!DstPtrTy.isPointer() || !SrcPtrTy.isPointer()) {
1550       report("memory instruction operand must be a pointer", MI);
1551       break;
1552     }
1553 
1554     if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace())
1555       report("inconsistent store address space", MI);
1556     if (SrcPtrTy.getAddressSpace() != MMOs[1]->getAddrSpace())
1557       report("inconsistent load address space", MI);
1558 
1559     if (Opc != TargetOpcode::G_MEMCPY_INLINE)
1560       if (!MI->getOperand(3).isImm() || (MI->getOperand(3).getImm() & ~1LL))
1561         report("'tail' flag (operand 3) must be an immediate 0 or 1", MI);
1562 
1563     break;
1564   }
1565   case TargetOpcode::G_BZERO:
1566   case TargetOpcode::G_MEMSET: {
1567     ArrayRef<MachineMemOperand *> MMOs = MI->memoperands();
1568     std::string Name = Opc == TargetOpcode::G_MEMSET ? "memset" : "bzero";
1569     if (MMOs.size() != 1) {
1570       report(Twine(Name, " must have 1 memory operand"), MI);
1571       break;
1572     }
1573 
1574     if ((!MMOs[0]->isStore() || MMOs[0]->isLoad())) {
1575       report(Twine(Name, " memory operand must be a store"), MI);
1576       break;
1577     }
1578 
1579     LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg());
1580     if (!DstPtrTy.isPointer()) {
1581       report(Twine(Name, " operand must be a pointer"), MI);
1582       break;
1583     }
1584 
1585     if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace())
1586       report("inconsistent " + Twine(Name, " address space"), MI);
1587 
1588     if (!MI->getOperand(MI->getNumOperands() - 1).isImm() ||
1589         (MI->getOperand(MI->getNumOperands() - 1).getImm() & ~1LL))
1590       report("'tail' flag (last operand) must be an immediate 0 or 1", MI);
1591 
1592     break;
1593   }
1594   case TargetOpcode::G_VECREDUCE_SEQ_FADD:
1595   case TargetOpcode::G_VECREDUCE_SEQ_FMUL: {
1596     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1597     LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg());
1598     LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg());
1599     if (!DstTy.isScalar())
1600       report("Vector reduction requires a scalar destination type", MI);
1601     if (!Src1Ty.isScalar())
1602       report("Sequential FADD/FMUL vector reduction requires a scalar 1st operand", MI);
1603     if (!Src2Ty.isVector())
1604       report("Sequential FADD/FMUL vector reduction must have a vector 2nd operand", MI);
1605     break;
1606   }
1607   case TargetOpcode::G_VECREDUCE_FADD:
1608   case TargetOpcode::G_VECREDUCE_FMUL:
1609   case TargetOpcode::G_VECREDUCE_FMAX:
1610   case TargetOpcode::G_VECREDUCE_FMIN:
1611   case TargetOpcode::G_VECREDUCE_ADD:
1612   case TargetOpcode::G_VECREDUCE_MUL:
1613   case TargetOpcode::G_VECREDUCE_AND:
1614   case TargetOpcode::G_VECREDUCE_OR:
1615   case TargetOpcode::G_VECREDUCE_XOR:
1616   case TargetOpcode::G_VECREDUCE_SMAX:
1617   case TargetOpcode::G_VECREDUCE_SMIN:
1618   case TargetOpcode::G_VECREDUCE_UMAX:
1619   case TargetOpcode::G_VECREDUCE_UMIN: {
1620     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1621     if (!DstTy.isScalar())
1622       report("Vector reduction requires a scalar destination type", MI);
1623     break;
1624   }
1625 
1626   case TargetOpcode::G_SBFX:
1627   case TargetOpcode::G_UBFX: {
1628     LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1629     if (DstTy.isVector()) {
1630       report("Bitfield extraction is not supported on vectors", MI);
1631       break;
1632     }
1633     break;
1634   }
1635   case TargetOpcode::G_SHL:
1636   case TargetOpcode::G_LSHR:
1637   case TargetOpcode::G_ASHR:
1638   case TargetOpcode::G_ROTR:
1639   case TargetOpcode::G_ROTL: {
1640     LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg());
1641     LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg());
1642     if (Src1Ty.isVector() != Src2Ty.isVector()) {
1643       report("Shifts and rotates require operands to be either all scalars or "
1644              "all vectors",
1645              MI);
1646       break;
1647     }
1648     break;
1649   }
1650   case TargetOpcode::G_LLROUND:
1651   case TargetOpcode::G_LROUND: {
1652     verifyAllRegOpsScalar(*MI, *MRI);
1653     break;
1654   }
1655   case TargetOpcode::G_IS_FPCLASS: {
1656     LLT DestTy = MRI->getType(MI->getOperand(0).getReg());
1657     LLT DestEltTy = DestTy.getScalarType();
1658     if (!DestEltTy.isScalar()) {
1659       report("Destination must be a scalar or vector of scalars", MI);
1660       break;
1661     }
1662     LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1663     LLT SrcEltTy = SrcTy.getScalarType();
1664     if (!SrcEltTy.isScalar()) {
1665       report("Source must be a scalar or vector of scalars", MI);
1666       break;
1667     }
1668     if (!verifyVectorElementMatch(DestTy, SrcTy, MI))
1669       break;
1670     const MachineOperand &TestMO = MI->getOperand(2);
1671     if (!TestMO.isImm()) {
1672       report("floating-point class set (operand 2) must be an immediate", MI);
1673       break;
1674     }
1675     int64_t Test = TestMO.getImm();
1676     if (Test < 0 || Test > fcAllFlags) {
1677       report("Incorrect floating-point class set (operand 2)", MI);
1678       break;
1679     }
1680     const MachineOperand &SemanticsMO = MI->getOperand(3);
1681     if (!SemanticsMO.isImm()) {
1682       report("floating-point semantics (operand 3) must be an immediate", MI);
1683       break;
1684     }
1685     int64_t Semantics = SemanticsMO.getImm();
1686     if (Semantics < 0 || Semantics > APFloat::S_MaxSemantics) {
1687       report("Incorrect floating-point semantics (operand 3)", MI);
1688       break;
1689     }
1690     break;
1691   }
1692   default:
1693     break;
1694   }
1695 }
1696 
1697 void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
1698   const MCInstrDesc &MCID = MI->getDesc();
1699   if (MI->getNumOperands() < MCID.getNumOperands()) {
1700     report("Too few operands", MI);
1701     errs() << MCID.getNumOperands() << " operands expected, but "
1702            << MI->getNumOperands() << " given.\n";
1703   }
1704 
1705   if (MI->isPHI()) {
1706     if (MF->getProperties().hasProperty(
1707             MachineFunctionProperties::Property::NoPHIs))
1708       report("Found PHI instruction with NoPHIs property set", MI);
1709 
1710     if (FirstNonPHI)
1711       report("Found PHI instruction after non-PHI", MI);
1712   } else if (FirstNonPHI == nullptr)
1713     FirstNonPHI = MI;
1714 
1715   // Check the tied operands.
1716   if (MI->isInlineAsm())
1717     verifyInlineAsm(MI);
1718 
1719   // Check that unspillable terminators define a reg and have at most one use.
1720   if (TII->isUnspillableTerminator(MI)) {
1721     if (!MI->getOperand(0).isReg() || !MI->getOperand(0).isDef())
1722       report("Unspillable Terminator does not define a reg", MI);
1723     Register Def = MI->getOperand(0).getReg();
1724     if (Def.isVirtual() &&
1725         !MF->getProperties().hasProperty(
1726             MachineFunctionProperties::Property::NoPHIs) &&
1727         std::distance(MRI->use_nodbg_begin(Def), MRI->use_nodbg_end()) > 1)
1728       report("Unspillable Terminator expected to have at most one use!", MI);
1729   }
1730 
1731   // A fully-formed DBG_VALUE must have a location. Ignore partially formed
1732   // DBG_VALUEs: these are convenient to use in tests, but should never get
1733   // generated.
1734   if (MI->isDebugValue() && MI->getNumOperands() == 4)
1735     if (!MI->getDebugLoc())
1736       report("Missing DebugLoc for debug instruction", MI);
1737 
1738   // Meta instructions should never be the subject of debug value tracking,
1739   // they don't create a value in the output program at all.
1740   if (MI->isMetaInstruction() && MI->peekDebugInstrNum())
1741     report("Metadata instruction should not have a value tracking number", MI);
1742 
1743   // Check the MachineMemOperands for basic consistency.
1744   for (MachineMemOperand *Op : MI->memoperands()) {
1745     if (Op->isLoad() && !MI->mayLoad())
1746       report("Missing mayLoad flag", MI);
1747     if (Op->isStore() && !MI->mayStore())
1748       report("Missing mayStore flag", MI);
1749   }
1750 
1751   // Debug values must not have a slot index.
1752   // Other instructions must have one, unless they are inside a bundle.
1753   if (LiveInts) {
1754     bool mapped = !LiveInts->isNotInMIMap(*MI);
1755     if (MI->isDebugOrPseudoInstr()) {
1756       if (mapped)
1757         report("Debug instruction has a slot index", MI);
1758     } else if (MI->isInsideBundle()) {
1759       if (mapped)
1760         report("Instruction inside bundle has a slot index", MI);
1761     } else {
1762       if (!mapped)
1763         report("Missing slot index", MI);
1764     }
1765   }
1766 
1767   unsigned Opc = MCID.getOpcode();
1768   if (isPreISelGenericOpcode(Opc) || isPreISelGenericOptimizationHint(Opc)) {
1769     verifyPreISelGenericInstruction(MI);
1770     return;
1771   }
1772 
1773   StringRef ErrorInfo;
1774   if (!TII->verifyInstruction(*MI, ErrorInfo))
1775     report(ErrorInfo.data(), MI);
1776 
1777   // Verify properties of various specific instruction types
1778   switch (MI->getOpcode()) {
1779   case TargetOpcode::COPY: {
1780     const MachineOperand &DstOp = MI->getOperand(0);
1781     const MachineOperand &SrcOp = MI->getOperand(1);
1782     const Register SrcReg = SrcOp.getReg();
1783     const Register DstReg = DstOp.getReg();
1784 
1785     LLT DstTy = MRI->getType(DstReg);
1786     LLT SrcTy = MRI->getType(SrcReg);
1787     if (SrcTy.isValid() && DstTy.isValid()) {
1788       // If both types are valid, check that the types are the same.
1789       if (SrcTy != DstTy) {
1790         report("Copy Instruction is illegal with mismatching types", MI);
1791         errs() << "Def = " << DstTy << ", Src = " << SrcTy << "\n";
1792       }
1793 
1794       break;
1795     }
1796 
1797     if (!SrcTy.isValid() && !DstTy.isValid())
1798       break;
1799 
1800     // If we have only one valid type, this is likely a copy between a virtual
1801     // and physical register.
1802     unsigned SrcSize = 0;
1803     unsigned DstSize = 0;
1804     if (SrcReg.isPhysical() && DstTy.isValid()) {
1805       const TargetRegisterClass *SrcRC =
1806           TRI->getMinimalPhysRegClassLLT(SrcReg, DstTy);
1807       if (SrcRC)
1808         SrcSize = TRI->getRegSizeInBits(*SrcRC);
1809     }
1810 
1811     if (SrcSize == 0)
1812       SrcSize = TRI->getRegSizeInBits(SrcReg, *MRI);
1813 
1814     if (DstReg.isPhysical() && SrcTy.isValid()) {
1815       const TargetRegisterClass *DstRC =
1816           TRI->getMinimalPhysRegClassLLT(DstReg, SrcTy);
1817       if (DstRC)
1818         DstSize = TRI->getRegSizeInBits(*DstRC);
1819     }
1820 
1821     if (DstSize == 0)
1822       DstSize = TRI->getRegSizeInBits(DstReg, *MRI);
1823 
1824     if (SrcSize != 0 && DstSize != 0 && SrcSize != DstSize) {
1825       if (!DstOp.getSubReg() && !SrcOp.getSubReg()) {
1826         report("Copy Instruction is illegal with mismatching sizes", MI);
1827         errs() << "Def Size = " << DstSize << ", Src Size = " << SrcSize
1828                << "\n";
1829       }
1830     }
1831     break;
1832   }
1833   case TargetOpcode::STATEPOINT: {
1834     StatepointOpers SO(MI);
1835     if (!MI->getOperand(SO.getIDPos()).isImm() ||
1836         !MI->getOperand(SO.getNBytesPos()).isImm() ||
1837         !MI->getOperand(SO.getNCallArgsPos()).isImm()) {
1838       report("meta operands to STATEPOINT not constant!", MI);
1839       break;
1840     }
1841 
1842     auto VerifyStackMapConstant = [&](unsigned Offset) {
1843       if (Offset >= MI->getNumOperands()) {
1844         report("stack map constant to STATEPOINT is out of range!", MI);
1845         return;
1846       }
1847       if (!MI->getOperand(Offset - 1).isImm() ||
1848           MI->getOperand(Offset - 1).getImm() != StackMaps::ConstantOp ||
1849           !MI->getOperand(Offset).isImm())
1850         report("stack map constant to STATEPOINT not well formed!", MI);
1851     };
1852     VerifyStackMapConstant(SO.getCCIdx());
1853     VerifyStackMapConstant(SO.getFlagsIdx());
1854     VerifyStackMapConstant(SO.getNumDeoptArgsIdx());
1855     VerifyStackMapConstant(SO.getNumGCPtrIdx());
1856     VerifyStackMapConstant(SO.getNumAllocaIdx());
1857     VerifyStackMapConstant(SO.getNumGcMapEntriesIdx());
1858 
1859     // Verify that all explicit statepoint defs are tied to gc operands as
1860     // they are expected to be a relocation of gc operands.
1861     unsigned FirstGCPtrIdx = SO.getFirstGCPtrIdx();
1862     unsigned LastGCPtrIdx = SO.getNumAllocaIdx() - 2;
1863     for (unsigned Idx = 0; Idx < MI->getNumDefs(); Idx++) {
1864       unsigned UseOpIdx;
1865       if (!MI->isRegTiedToUseOperand(Idx, &UseOpIdx)) {
1866         report("STATEPOINT defs expected to be tied", MI);
1867         break;
1868       }
1869       if (UseOpIdx < FirstGCPtrIdx || UseOpIdx > LastGCPtrIdx) {
1870         report("STATEPOINT def tied to non-gc operand", MI);
1871         break;
1872       }
1873     }
1874 
1875     // TODO: verify we have properly encoded deopt arguments
1876   } break;
1877   case TargetOpcode::INSERT_SUBREG: {
1878     unsigned InsertedSize;
1879     if (unsigned SubIdx = MI->getOperand(2).getSubReg())
1880       InsertedSize = TRI->getSubRegIdxSize(SubIdx);
1881     else
1882       InsertedSize = TRI->getRegSizeInBits(MI->getOperand(2).getReg(), *MRI);
1883     unsigned SubRegSize = TRI->getSubRegIdxSize(MI->getOperand(3).getImm());
1884     if (SubRegSize < InsertedSize) {
1885       report("INSERT_SUBREG expected inserted value to have equal or lesser "
1886              "size than the subreg it was inserted into", MI);
1887       break;
1888     }
1889   } break;
1890   }
1891 }
1892 
1893 void
1894 MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
1895   const MachineInstr *MI = MO->getParent();
1896   const MCInstrDesc &MCID = MI->getDesc();
1897   unsigned NumDefs = MCID.getNumDefs();
1898   if (MCID.getOpcode() == TargetOpcode::PATCHPOINT)
1899     NumDefs = (MONum == 0 && MO->isReg()) ? NumDefs : 0;
1900 
1901   // The first MCID.NumDefs operands must be explicit register defines
1902   if (MONum < NumDefs) {
1903     const MCOperandInfo &MCOI = MCID.OpInfo[MONum];
1904     if (!MO->isReg())
1905       report("Explicit definition must be a register", MO, MONum);
1906     else if (!MO->isDef() && !MCOI.isOptionalDef())
1907       report("Explicit definition marked as use", MO, MONum);
1908     else if (MO->isImplicit())
1909       report("Explicit definition marked as implicit", MO, MONum);
1910   } else if (MONum < MCID.getNumOperands()) {
1911     const MCOperandInfo &MCOI = MCID.OpInfo[MONum];
1912     // Don't check if it's the last operand in a variadic instruction. See,
1913     // e.g., LDM_RET in the arm back end. Check non-variadic operands only.
1914     bool IsOptional = MI->isVariadic() && MONum == MCID.getNumOperands() - 1;
1915     if (!IsOptional) {
1916       if (MO->isReg()) {
1917         if (MO->isDef() && !MCOI.isOptionalDef() && !MCID.variadicOpsAreDefs())
1918           report("Explicit operand marked as def", MO, MONum);
1919         if (MO->isImplicit())
1920           report("Explicit operand marked as implicit", MO, MONum);
1921       }
1922 
1923       // Check that an instruction has register operands only as expected.
1924       if (MCOI.OperandType == MCOI::OPERAND_REGISTER &&
1925           !MO->isReg() && !MO->isFI())
1926         report("Expected a register operand.", MO, MONum);
1927       if (MO->isReg()) {
1928         if (MCOI.OperandType == MCOI::OPERAND_IMMEDIATE ||
1929             (MCOI.OperandType == MCOI::OPERAND_PCREL &&
1930              !TII->isPCRelRegisterOperandLegal(*MO)))
1931           report("Expected a non-register operand.", MO, MONum);
1932       }
1933     }
1934 
1935     int TiedTo = MCID.getOperandConstraint(MONum, MCOI::TIED_TO);
1936     if (TiedTo != -1) {
1937       if (!MO->isReg())
1938         report("Tied use must be a register", MO, MONum);
1939       else if (!MO->isTied())
1940         report("Operand should be tied", MO, MONum);
1941       else if (unsigned(TiedTo) != MI->findTiedOperandIdx(MONum))
1942         report("Tied def doesn't match MCInstrDesc", MO, MONum);
1943       else if (Register::isPhysicalRegister(MO->getReg())) {
1944         const MachineOperand &MOTied = MI->getOperand(TiedTo);
1945         if (!MOTied.isReg())
1946           report("Tied counterpart must be a register", &MOTied, TiedTo);
1947         else if (Register::isPhysicalRegister(MOTied.getReg()) &&
1948                  MO->getReg() != MOTied.getReg())
1949           report("Tied physical registers must match.", &MOTied, TiedTo);
1950       }
1951     } else if (MO->isReg() && MO->isTied())
1952       report("Explicit operand should not be tied", MO, MONum);
1953   } else {
1954     // ARM adds %reg0 operands to indicate predicates. We'll allow that.
1955     if (MO->isReg() && !MO->isImplicit() && !MI->isVariadic() && MO->getReg())
1956       report("Extra explicit operand on non-variadic instruction", MO, MONum);
1957   }
1958 
1959   switch (MO->getType()) {
1960   case MachineOperand::MO_Register: {
1961     // Verify debug flag on debug instructions. Check this first because reg0
1962     // indicates an undefined debug value.
1963     if (MI->isDebugInstr() && MO->isUse()) {
1964       if (!MO->isDebug())
1965         report("Register operand must be marked debug", MO, MONum);
1966     } else if (MO->isDebug()) {
1967       report("Register operand must not be marked debug", MO, MONum);
1968     }
1969 
1970     const Register Reg = MO->getReg();
1971     if (!Reg)
1972       return;
1973     if (MRI->tracksLiveness() && !MI->isDebugInstr())
1974       checkLiveness(MO, MONum);
1975 
1976     if (MO->isDef() && MO->isUndef() && !MO->getSubReg() &&
1977         MO->getReg().isVirtual()) // TODO: Apply to physregs too
1978       report("Undef virtual register def operands require a subregister", MO, MONum);
1979 
1980     // Verify the consistency of tied operands.
1981     if (MO->isTied()) {
1982       unsigned OtherIdx = MI->findTiedOperandIdx(MONum);
1983       const MachineOperand &OtherMO = MI->getOperand(OtherIdx);
1984       if (!OtherMO.isReg())
1985         report("Must be tied to a register", MO, MONum);
1986       if (!OtherMO.isTied())
1987         report("Missing tie flags on tied operand", MO, MONum);
1988       if (MI->findTiedOperandIdx(OtherIdx) != MONum)
1989         report("Inconsistent tie links", MO, MONum);
1990       if (MONum < MCID.getNumDefs()) {
1991         if (OtherIdx < MCID.getNumOperands()) {
1992           if (-1 == MCID.getOperandConstraint(OtherIdx, MCOI::TIED_TO))
1993             report("Explicit def tied to explicit use without tie constraint",
1994                    MO, MONum);
1995         } else {
1996           if (!OtherMO.isImplicit())
1997             report("Explicit def should be tied to implicit use", MO, MONum);
1998         }
1999       }
2000     }
2001 
2002     // Verify two-address constraints after the twoaddressinstruction pass.
2003     // Both twoaddressinstruction pass and phi-node-elimination pass call
2004     // MRI->leaveSSA() to set MF as NoSSA, we should do the verification after
2005     // twoaddressinstruction pass not after phi-node-elimination pass. So we
2006     // shouldn't use the NoSSA as the condition, we should based on
2007     // TiedOpsRewritten property to verify two-address constraints, this
2008     // property will be set in twoaddressinstruction pass.
2009     unsigned DefIdx;
2010     if (MF->getProperties().hasProperty(
2011             MachineFunctionProperties::Property::TiedOpsRewritten) &&
2012         MO->isUse() && MI->isRegTiedToDefOperand(MONum, &DefIdx) &&
2013         Reg != MI->getOperand(DefIdx).getReg())
2014       report("Two-address instruction operands must be identical", MO, MONum);
2015 
2016     // Check register classes.
2017     unsigned SubIdx = MO->getSubReg();
2018 
2019     if (Register::isPhysicalRegister(Reg)) {
2020       if (SubIdx) {
2021         report("Illegal subregister index for physical register", MO, MONum);
2022         return;
2023       }
2024       if (MONum < MCID.getNumOperands()) {
2025         if (const TargetRegisterClass *DRC =
2026               TII->getRegClass(MCID, MONum, TRI, *MF)) {
2027           if (!DRC->contains(Reg)) {
2028             report("Illegal physical register for instruction", MO, MONum);
2029             errs() << printReg(Reg, TRI) << " is not a "
2030                    << TRI->getRegClassName(DRC) << " register.\n";
2031           }
2032         }
2033       }
2034       if (MO->isRenamable()) {
2035         if (MRI->isReserved(Reg)) {
2036           report("isRenamable set on reserved register", MO, MONum);
2037           return;
2038         }
2039       }
2040     } else {
2041       // Virtual register.
2042       const TargetRegisterClass *RC = MRI->getRegClassOrNull(Reg);
2043       if (!RC) {
2044         // This is a generic virtual register.
2045 
2046         // Do not allow undef uses for generic virtual registers. This ensures
2047         // getVRegDef can never fail and return null on a generic register.
2048         //
2049         // FIXME: This restriction should probably be broadened to all SSA
2050         // MIR. However, DetectDeadLanes/ProcessImplicitDefs technically still
2051         // run on the SSA function just before phi elimination.
2052         if (MO->isUndef())
2053           report("Generic virtual register use cannot be undef", MO, MONum);
2054 
2055         // Debug value instruction is permitted to use undefined vregs.
2056         // This is a performance measure to skip the overhead of immediately
2057         // pruning unused debug operands. The final undef substitution occurs
2058         // when debug values are allocated in LDVImpl::handleDebugValue, so
2059         // these verifications always apply after this pass.
2060         if (isFunctionTracksDebugUserValues || !MO->isUse() ||
2061             !MI->isDebugValue() || !MRI->def_empty(Reg)) {
2062           // If we're post-Select, we can't have gvregs anymore.
2063           if (isFunctionSelected) {
2064             report("Generic virtual register invalid in a Selected function",
2065                    MO, MONum);
2066             return;
2067           }
2068 
2069           // The gvreg must have a type and it must not have a SubIdx.
2070           LLT Ty = MRI->getType(Reg);
2071           if (!Ty.isValid()) {
2072             report("Generic virtual register must have a valid type", MO,
2073                    MONum);
2074             return;
2075           }
2076 
2077           const RegisterBank *RegBank = MRI->getRegBankOrNull(Reg);
2078 
2079           // If we're post-RegBankSelect, the gvreg must have a bank.
2080           if (!RegBank && isFunctionRegBankSelected) {
2081             report("Generic virtual register must have a bank in a "
2082                    "RegBankSelected function",
2083                    MO, MONum);
2084             return;
2085           }
2086 
2087           // Make sure the register fits into its register bank if any.
2088           if (RegBank && Ty.isValid() &&
2089               RegBank->getSize() < Ty.getSizeInBits()) {
2090             report("Register bank is too small for virtual register", MO,
2091                    MONum);
2092             errs() << "Register bank " << RegBank->getName() << " too small("
2093                    << RegBank->getSize() << ") to fit " << Ty.getSizeInBits()
2094                    << "-bits\n";
2095             return;
2096           }
2097         }
2098 
2099         if (SubIdx)  {
2100           report("Generic virtual register does not allow subregister index", MO,
2101                  MONum);
2102           return;
2103         }
2104 
2105         // If this is a target specific instruction and this operand
2106         // has register class constraint, the virtual register must
2107         // comply to it.
2108         if (!isPreISelGenericOpcode(MCID.getOpcode()) &&
2109             MONum < MCID.getNumOperands() &&
2110             TII->getRegClass(MCID, MONum, TRI, *MF)) {
2111           report("Virtual register does not match instruction constraint", MO,
2112                  MONum);
2113           errs() << "Expect register class "
2114                  << TRI->getRegClassName(
2115                         TII->getRegClass(MCID, MONum, TRI, *MF))
2116                  << " but got nothing\n";
2117           return;
2118         }
2119 
2120         break;
2121       }
2122       if (SubIdx) {
2123         const TargetRegisterClass *SRC =
2124           TRI->getSubClassWithSubReg(RC, SubIdx);
2125         if (!SRC) {
2126           report("Invalid subregister index for virtual register", MO, MONum);
2127           errs() << "Register class " << TRI->getRegClassName(RC)
2128               << " does not support subreg index " << SubIdx << "\n";
2129           return;
2130         }
2131         if (RC != SRC) {
2132           report("Invalid register class for subregister index", MO, MONum);
2133           errs() << "Register class " << TRI->getRegClassName(RC)
2134               << " does not fully support subreg index " << SubIdx << "\n";
2135           return;
2136         }
2137       }
2138       if (MONum < MCID.getNumOperands()) {
2139         if (const TargetRegisterClass *DRC =
2140               TII->getRegClass(MCID, MONum, TRI, *MF)) {
2141           if (SubIdx) {
2142             const TargetRegisterClass *SuperRC =
2143                 TRI->getLargestLegalSuperClass(RC, *MF);
2144             if (!SuperRC) {
2145               report("No largest legal super class exists.", MO, MONum);
2146               return;
2147             }
2148             DRC = TRI->getMatchingSuperRegClass(SuperRC, DRC, SubIdx);
2149             if (!DRC) {
2150               report("No matching super-reg register class.", MO, MONum);
2151               return;
2152             }
2153           }
2154           if (!RC->hasSuperClassEq(DRC)) {
2155             report("Illegal virtual register for instruction", MO, MONum);
2156             errs() << "Expected a " << TRI->getRegClassName(DRC)
2157                 << " register, but got a " << TRI->getRegClassName(RC)
2158                 << " register\n";
2159           }
2160         }
2161       }
2162     }
2163     break;
2164   }
2165 
2166   case MachineOperand::MO_RegisterMask:
2167     regMasks.push_back(MO->getRegMask());
2168     break;
2169 
2170   case MachineOperand::MO_MachineBasicBlock:
2171     if (MI->isPHI() && !MO->getMBB()->isSuccessor(MI->getParent()))
2172       report("PHI operand is not in the CFG", MO, MONum);
2173     break;
2174 
2175   case MachineOperand::MO_FrameIndex:
2176     if (LiveStks && LiveStks->hasInterval(MO->getIndex()) &&
2177         LiveInts && !LiveInts->isNotInMIMap(*MI)) {
2178       int FI = MO->getIndex();
2179       LiveInterval &LI = LiveStks->getInterval(FI);
2180       SlotIndex Idx = LiveInts->getInstructionIndex(*MI);
2181 
2182       bool stores = MI->mayStore();
2183       bool loads = MI->mayLoad();
2184       // For a memory-to-memory move, we need to check if the frame
2185       // index is used for storing or loading, by inspecting the
2186       // memory operands.
2187       if (stores && loads) {
2188         for (auto *MMO : MI->memoperands()) {
2189           const PseudoSourceValue *PSV = MMO->getPseudoValue();
2190           if (PSV == nullptr) continue;
2191           const FixedStackPseudoSourceValue *Value =
2192             dyn_cast<FixedStackPseudoSourceValue>(PSV);
2193           if (Value == nullptr) continue;
2194           if (Value->getFrameIndex() != FI) continue;
2195 
2196           if (MMO->isStore())
2197             loads = false;
2198           else
2199             stores = false;
2200           break;
2201         }
2202         if (loads == stores)
2203           report("Missing fixed stack memoperand.", MI);
2204       }
2205       if (loads && !LI.liveAt(Idx.getRegSlot(true))) {
2206         report("Instruction loads from dead spill slot", MO, MONum);
2207         errs() << "Live stack: " << LI << '\n';
2208       }
2209       if (stores && !LI.liveAt(Idx.getRegSlot())) {
2210         report("Instruction stores to dead spill slot", MO, MONum);
2211         errs() << "Live stack: " << LI << '\n';
2212       }
2213     }
2214     break;
2215 
2216   case MachineOperand::MO_CFIIndex:
2217     if (MO->getCFIIndex() >= MF->getFrameInstructions().size())
2218       report("CFI instruction has invalid index", MO, MONum);
2219     break;
2220 
2221   default:
2222     break;
2223   }
2224 }
2225 
2226 void MachineVerifier::checkLivenessAtUse(const MachineOperand *MO,
2227                                          unsigned MONum, SlotIndex UseIdx,
2228                                          const LiveRange &LR,
2229                                          Register VRegOrUnit,
2230                                          LaneBitmask LaneMask) {
2231   LiveQueryResult LRQ = LR.Query(UseIdx);
2232   // Check if we have a segment at the use, note however that we only need one
2233   // live subregister range, the others may be dead.
2234   if (!LRQ.valueIn() && LaneMask.none()) {
2235     report("No live segment at use", MO, MONum);
2236     report_context_liverange(LR);
2237     report_context_vreg_regunit(VRegOrUnit);
2238     report_context(UseIdx);
2239   }
2240   if (MO->isKill() && !LRQ.isKill()) {
2241     report("Live range continues after kill flag", MO, MONum);
2242     report_context_liverange(LR);
2243     report_context_vreg_regunit(VRegOrUnit);
2244     if (LaneMask.any())
2245       report_context_lanemask(LaneMask);
2246     report_context(UseIdx);
2247   }
2248 }
2249 
2250 void MachineVerifier::checkLivenessAtDef(const MachineOperand *MO,
2251                                          unsigned MONum, SlotIndex DefIdx,
2252                                          const LiveRange &LR,
2253                                          Register VRegOrUnit,
2254                                          bool SubRangeCheck,
2255                                          LaneBitmask LaneMask) {
2256   if (const VNInfo *VNI = LR.getVNInfoAt(DefIdx)) {
2257     assert(VNI && "NULL valno is not allowed");
2258     if (VNI->def != DefIdx) {
2259       report("Inconsistent valno->def", MO, MONum);
2260       report_context_liverange(LR);
2261       report_context_vreg_regunit(VRegOrUnit);
2262       if (LaneMask.any())
2263         report_context_lanemask(LaneMask);
2264       report_context(*VNI);
2265       report_context(DefIdx);
2266     }
2267   } else {
2268     report("No live segment at def", MO, MONum);
2269     report_context_liverange(LR);
2270     report_context_vreg_regunit(VRegOrUnit);
2271     if (LaneMask.any())
2272       report_context_lanemask(LaneMask);
2273     report_context(DefIdx);
2274   }
2275   // Check that, if the dead def flag is present, LiveInts agree.
2276   if (MO->isDead()) {
2277     LiveQueryResult LRQ = LR.Query(DefIdx);
2278     if (!LRQ.isDeadDef()) {
2279       assert(Register::isVirtualRegister(VRegOrUnit) &&
2280              "Expecting a virtual register.");
2281       // A dead subreg def only tells us that the specific subreg is dead. There
2282       // could be other non-dead defs of other subregs, or we could have other
2283       // parts of the register being live through the instruction. So unless we
2284       // are checking liveness for a subrange it is ok for the live range to
2285       // continue, given that we have a dead def of a subregister.
2286       if (SubRangeCheck || MO->getSubReg() == 0) {
2287         report("Live range continues after dead def flag", MO, MONum);
2288         report_context_liverange(LR);
2289         report_context_vreg_regunit(VRegOrUnit);
2290         if (LaneMask.any())
2291           report_context_lanemask(LaneMask);
2292       }
2293     }
2294   }
2295 }
2296 
2297 void MachineVerifier::checkLiveness(const MachineOperand *MO, unsigned MONum) {
2298   const MachineInstr *MI = MO->getParent();
2299   const Register Reg = MO->getReg();
2300   const unsigned SubRegIdx = MO->getSubReg();
2301 
2302   const LiveInterval *LI = nullptr;
2303   if (LiveInts && Reg.isVirtual()) {
2304     if (LiveInts->hasInterval(Reg)) {
2305       LI = &LiveInts->getInterval(Reg);
2306       if (SubRegIdx != 0 && (MO->isDef() || !MO->isUndef()) && !LI->empty() &&
2307           !LI->hasSubRanges() && MRI->shouldTrackSubRegLiveness(Reg))
2308         report("Live interval for subreg operand has no subranges", MO, MONum);
2309     } else {
2310       report("Virtual register has no live interval", MO, MONum);
2311     }
2312   }
2313 
2314   // Both use and def operands can read a register.
2315   if (MO->readsReg()) {
2316     if (MO->isKill())
2317       addRegWithSubRegs(regsKilled, Reg);
2318 
2319     // Check that LiveVars knows this kill (unless we are inside a bundle, in
2320     // which case we have already checked that LiveVars knows any kills on the
2321     // bundle header instead).
2322     if (LiveVars && Reg.isVirtual() && MO->isKill() &&
2323         !MI->isBundledWithPred()) {
2324       LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
2325       if (!is_contained(VI.Kills, MI))
2326         report("Kill missing from LiveVariables", MO, MONum);
2327     }
2328 
2329     // Check LiveInts liveness and kill.
2330     if (LiveInts && !LiveInts->isNotInMIMap(*MI)) {
2331       SlotIndex UseIdx = LiveInts->getInstructionIndex(*MI);
2332       // Check the cached regunit intervals.
2333       if (Reg.isPhysical() && !isReserved(Reg)) {
2334         for (MCRegUnitIterator Units(Reg.asMCReg(), TRI); Units.isValid();
2335              ++Units) {
2336           if (MRI->isReservedRegUnit(*Units))
2337             continue;
2338           if (const LiveRange *LR = LiveInts->getCachedRegUnit(*Units))
2339             checkLivenessAtUse(MO, MONum, UseIdx, *LR, *Units);
2340         }
2341       }
2342 
2343       if (Reg.isVirtual()) {
2344         // This is a virtual register interval.
2345         checkLivenessAtUse(MO, MONum, UseIdx, *LI, Reg);
2346 
2347         if (LI->hasSubRanges() && !MO->isDef()) {
2348           LaneBitmask MOMask = SubRegIdx != 0
2349                                    ? TRI->getSubRegIndexLaneMask(SubRegIdx)
2350                                    : MRI->getMaxLaneMaskForVReg(Reg);
2351           LaneBitmask LiveInMask;
2352           for (const LiveInterval::SubRange &SR : LI->subranges()) {
2353             if ((MOMask & SR.LaneMask).none())
2354               continue;
2355             checkLivenessAtUse(MO, MONum, UseIdx, SR, Reg, SR.LaneMask);
2356             LiveQueryResult LRQ = SR.Query(UseIdx);
2357             if (LRQ.valueIn())
2358               LiveInMask |= SR.LaneMask;
2359           }
2360           // At least parts of the register has to be live at the use.
2361           if ((LiveInMask & MOMask).none()) {
2362             report("No live subrange at use", MO, MONum);
2363             report_context(*LI);
2364             report_context(UseIdx);
2365           }
2366         }
2367       }
2368     }
2369 
2370     // Use of a dead register.
2371     if (!regsLive.count(Reg)) {
2372       if (Reg.isPhysical()) {
2373         // Reserved registers may be used even when 'dead'.
2374         bool Bad = !isReserved(Reg);
2375         // We are fine if just any subregister has a defined value.
2376         if (Bad) {
2377 
2378           for (const MCPhysReg &SubReg : TRI->subregs(Reg)) {
2379             if (regsLive.count(SubReg)) {
2380               Bad = false;
2381               break;
2382             }
2383           }
2384         }
2385         // If there is an additional implicit-use of a super register we stop
2386         // here. By definition we are fine if the super register is not
2387         // (completely) dead, if the complete super register is dead we will
2388         // get a report for its operand.
2389         if (Bad) {
2390           for (const MachineOperand &MOP : MI->uses()) {
2391             if (!MOP.isReg() || !MOP.isImplicit())
2392               continue;
2393 
2394             if (!MOP.getReg().isPhysical())
2395               continue;
2396 
2397             if (llvm::is_contained(TRI->subregs(MOP.getReg()), Reg))
2398               Bad = false;
2399           }
2400         }
2401         if (Bad)
2402           report("Using an undefined physical register", MO, MONum);
2403       } else if (MRI->def_empty(Reg)) {
2404         report("Reading virtual register without a def", MO, MONum);
2405       } else {
2406         BBInfo &MInfo = MBBInfoMap[MI->getParent()];
2407         // We don't know which virtual registers are live in, so only complain
2408         // if vreg was killed in this MBB. Otherwise keep track of vregs that
2409         // must be live in. PHI instructions are handled separately.
2410         if (MInfo.regsKilled.count(Reg))
2411           report("Using a killed virtual register", MO, MONum);
2412         else if (!MI->isPHI())
2413           MInfo.vregsLiveIn.insert(std::make_pair(Reg, MI));
2414       }
2415     }
2416   }
2417 
2418   if (MO->isDef()) {
2419     // Register defined.
2420     // TODO: verify that earlyclobber ops are not used.
2421     if (MO->isDead())
2422       addRegWithSubRegs(regsDead, Reg);
2423     else
2424       addRegWithSubRegs(regsDefined, Reg);
2425 
2426     // Verify SSA form.
2427     if (MRI->isSSA() && Reg.isVirtual() &&
2428         std::next(MRI->def_begin(Reg)) != MRI->def_end())
2429       report("Multiple virtual register defs in SSA form", MO, MONum);
2430 
2431     // Check LiveInts for a live segment, but only for virtual registers.
2432     if (LiveInts && !LiveInts->isNotInMIMap(*MI)) {
2433       SlotIndex DefIdx = LiveInts->getInstructionIndex(*MI);
2434       DefIdx = DefIdx.getRegSlot(MO->isEarlyClobber());
2435 
2436       if (Reg.isVirtual()) {
2437         checkLivenessAtDef(MO, MONum, DefIdx, *LI, Reg);
2438 
2439         if (LI->hasSubRanges()) {
2440           LaneBitmask MOMask = SubRegIdx != 0
2441                                    ? TRI->getSubRegIndexLaneMask(SubRegIdx)
2442                                    : MRI->getMaxLaneMaskForVReg(Reg);
2443           for (const LiveInterval::SubRange &SR : LI->subranges()) {
2444             if ((SR.LaneMask & MOMask).none())
2445               continue;
2446             checkLivenessAtDef(MO, MONum, DefIdx, SR, Reg, true, SR.LaneMask);
2447           }
2448         }
2449       }
2450     }
2451   }
2452 }
2453 
2454 // This function gets called after visiting all instructions in a bundle. The
2455 // argument points to the bundle header.
2456 // Normal stand-alone instructions are also considered 'bundles', and this
2457 // function is called for all of them.
2458 void MachineVerifier::visitMachineBundleAfter(const MachineInstr *MI) {
2459   BBInfo &MInfo = MBBInfoMap[MI->getParent()];
2460   set_union(MInfo.regsKilled, regsKilled);
2461   set_subtract(regsLive, regsKilled); regsKilled.clear();
2462   // Kill any masked registers.
2463   while (!regMasks.empty()) {
2464     const uint32_t *Mask = regMasks.pop_back_val();
2465     for (Register Reg : regsLive)
2466       if (Reg.isPhysical() &&
2467           MachineOperand::clobbersPhysReg(Mask, Reg.asMCReg()))
2468         regsDead.push_back(Reg);
2469   }
2470   set_subtract(regsLive, regsDead);   regsDead.clear();
2471   set_union(regsLive, regsDefined);   regsDefined.clear();
2472 }
2473 
2474 void
2475 MachineVerifier::visitMachineBasicBlockAfter(const MachineBasicBlock *MBB) {
2476   MBBInfoMap[MBB].regsLiveOut = regsLive;
2477   regsLive.clear();
2478 
2479   if (Indexes) {
2480     SlotIndex stop = Indexes->getMBBEndIdx(MBB);
2481     if (!(stop > lastIndex)) {
2482       report("Block ends before last instruction index", MBB);
2483       errs() << "Block ends at " << stop
2484           << " last instruction was at " << lastIndex << '\n';
2485     }
2486     lastIndex = stop;
2487   }
2488 }
2489 
2490 namespace {
2491 // This implements a set of registers that serves as a filter: can filter other
2492 // sets by passing through elements not in the filter and blocking those that
2493 // are. Any filter implicitly includes the full set of physical registers upon
2494 // creation, thus filtering them all out. The filter itself as a set only grows,
2495 // and needs to be as efficient as possible.
2496 struct VRegFilter {
2497   // Add elements to the filter itself. \pre Input set \p FromRegSet must have
2498   // no duplicates. Both virtual and physical registers are fine.
2499   template <typename RegSetT> void add(const RegSetT &FromRegSet) {
2500     SmallVector<Register, 0> VRegsBuffer;
2501     filterAndAdd(FromRegSet, VRegsBuffer);
2502   }
2503   // Filter \p FromRegSet through the filter and append passed elements into \p
2504   // ToVRegs. All elements appended are then added to the filter itself.
2505   // \returns true if anything changed.
2506   template <typename RegSetT>
2507   bool filterAndAdd(const RegSetT &FromRegSet,
2508                     SmallVectorImpl<Register> &ToVRegs) {
2509     unsigned SparseUniverse = Sparse.size();
2510     unsigned NewSparseUniverse = SparseUniverse;
2511     unsigned NewDenseSize = Dense.size();
2512     size_t Begin = ToVRegs.size();
2513     for (Register Reg : FromRegSet) {
2514       if (!Reg.isVirtual())
2515         continue;
2516       unsigned Index = Register::virtReg2Index(Reg);
2517       if (Index < SparseUniverseMax) {
2518         if (Index < SparseUniverse && Sparse.test(Index))
2519           continue;
2520         NewSparseUniverse = std::max(NewSparseUniverse, Index + 1);
2521       } else {
2522         if (Dense.count(Reg))
2523           continue;
2524         ++NewDenseSize;
2525       }
2526       ToVRegs.push_back(Reg);
2527     }
2528     size_t End = ToVRegs.size();
2529     if (Begin == End)
2530       return false;
2531     // Reserving space in sets once performs better than doing so continuously
2532     // and pays easily for double look-ups (even in Dense with SparseUniverseMax
2533     // tuned all the way down) and double iteration (the second one is over a
2534     // SmallVector, which is a lot cheaper compared to DenseSet or BitVector).
2535     Sparse.resize(NewSparseUniverse);
2536     Dense.reserve(NewDenseSize);
2537     for (unsigned I = Begin; I < End; ++I) {
2538       Register Reg = ToVRegs[I];
2539       unsigned Index = Register::virtReg2Index(Reg);
2540       if (Index < SparseUniverseMax)
2541         Sparse.set(Index);
2542       else
2543         Dense.insert(Reg);
2544     }
2545     return true;
2546   }
2547 
2548 private:
2549   static constexpr unsigned SparseUniverseMax = 10 * 1024 * 8;
2550   // VRegs indexed within SparseUniverseMax are tracked by Sparse, those beyound
2551   // are tracked by Dense. The only purpose of the threashold and the Dense set
2552   // is to have a reasonably growing memory usage in pathological cases (large
2553   // number of very sparse VRegFilter instances live at the same time). In
2554   // practice even in the worst-by-execution time cases having all elements
2555   // tracked by Sparse (very large SparseUniverseMax scenario) tends to be more
2556   // space efficient than if tracked by Dense. The threashold is set to keep the
2557   // worst-case memory usage within 2x of figures determined empirically for
2558   // "all Dense" scenario in such worst-by-execution-time cases.
2559   BitVector Sparse;
2560   DenseSet<unsigned> Dense;
2561 };
2562 
2563 // Implements both a transfer function and a (binary, in-place) join operator
2564 // for a dataflow over register sets with set union join and filtering transfer
2565 // (out_b = in_b \ filter_b). filter_b is expected to be set-up ahead of time.
2566 // Maintains out_b as its state, allowing for O(n) iteration over it at any
2567 // time, where n is the size of the set (as opposed to O(U) where U is the
2568 // universe). filter_b implicitly contains all physical registers at all times.
2569 class FilteringVRegSet {
2570   VRegFilter Filter;
2571   SmallVector<Register, 0> VRegs;
2572 
2573 public:
2574   // Set-up the filter_b. \pre Input register set \p RS must have no duplicates.
2575   // Both virtual and physical registers are fine.
2576   template <typename RegSetT> void addToFilter(const RegSetT &RS) {
2577     Filter.add(RS);
2578   }
2579   // Passes \p RS through the filter_b (transfer function) and adds what's left
2580   // to itself (out_b).
2581   template <typename RegSetT> bool add(const RegSetT &RS) {
2582     // Double-duty the Filter: to maintain VRegs a set (and the join operation
2583     // a set union) just add everything being added here to the Filter as well.
2584     return Filter.filterAndAdd(RS, VRegs);
2585   }
2586   using const_iterator = decltype(VRegs)::const_iterator;
2587   const_iterator begin() const { return VRegs.begin(); }
2588   const_iterator end() const { return VRegs.end(); }
2589   size_t size() const { return VRegs.size(); }
2590 };
2591 } // namespace
2592 
2593 // Calculate the largest possible vregsPassed sets. These are the registers that
2594 // can pass through an MBB live, but may not be live every time. It is assumed
2595 // that all vregsPassed sets are empty before the call.
2596 void MachineVerifier::calcRegsPassed() {
2597   if (MF->empty())
2598     // ReversePostOrderTraversal doesn't handle empty functions.
2599     return;
2600 
2601   for (const MachineBasicBlock *MB :
2602        ReversePostOrderTraversal<const MachineFunction *>(MF)) {
2603     FilteringVRegSet VRegs;
2604     BBInfo &Info = MBBInfoMap[MB];
2605     assert(Info.reachable);
2606 
2607     VRegs.addToFilter(Info.regsKilled);
2608     VRegs.addToFilter(Info.regsLiveOut);
2609     for (const MachineBasicBlock *Pred : MB->predecessors()) {
2610       const BBInfo &PredInfo = MBBInfoMap[Pred];
2611       if (!PredInfo.reachable)
2612         continue;
2613 
2614       VRegs.add(PredInfo.regsLiveOut);
2615       VRegs.add(PredInfo.vregsPassed);
2616     }
2617     Info.vregsPassed.reserve(VRegs.size());
2618     Info.vregsPassed.insert(VRegs.begin(), VRegs.end());
2619   }
2620 }
2621 
2622 // Calculate the set of virtual registers that must be passed through each basic
2623 // block in order to satisfy the requirements of successor blocks. This is very
2624 // similar to calcRegsPassed, only backwards.
2625 void MachineVerifier::calcRegsRequired() {
2626   // First push live-in regs to predecessors' vregsRequired.
2627   SmallPtrSet<const MachineBasicBlock*, 8> todo;
2628   for (const auto &MBB : *MF) {
2629     BBInfo &MInfo = MBBInfoMap[&MBB];
2630     for (const MachineBasicBlock *Pred : MBB.predecessors()) {
2631       BBInfo &PInfo = MBBInfoMap[Pred];
2632       if (PInfo.addRequired(MInfo.vregsLiveIn))
2633         todo.insert(Pred);
2634     }
2635 
2636     // Handle the PHI node.
2637     for (const MachineInstr &MI : MBB.phis()) {
2638       for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) {
2639         // Skip those Operands which are undef regs or not regs.
2640         if (!MI.getOperand(i).isReg() || !MI.getOperand(i).readsReg())
2641           continue;
2642 
2643         // Get register and predecessor for one PHI edge.
2644         Register Reg = MI.getOperand(i).getReg();
2645         const MachineBasicBlock *Pred = MI.getOperand(i + 1).getMBB();
2646 
2647         BBInfo &PInfo = MBBInfoMap[Pred];
2648         if (PInfo.addRequired(Reg))
2649           todo.insert(Pred);
2650       }
2651     }
2652   }
2653 
2654   // Iteratively push vregsRequired to predecessors. This will converge to the
2655   // same final state regardless of DenseSet iteration order.
2656   while (!todo.empty()) {
2657     const MachineBasicBlock *MBB = *todo.begin();
2658     todo.erase(MBB);
2659     BBInfo &MInfo = MBBInfoMap[MBB];
2660     for (const MachineBasicBlock *Pred : MBB->predecessors()) {
2661       if (Pred == MBB)
2662         continue;
2663       BBInfo &SInfo = MBBInfoMap[Pred];
2664       if (SInfo.addRequired(MInfo.vregsRequired))
2665         todo.insert(Pred);
2666     }
2667   }
2668 }
2669 
2670 // Check PHI instructions at the beginning of MBB. It is assumed that
2671 // calcRegsPassed has been run so BBInfo::isLiveOut is valid.
2672 void MachineVerifier::checkPHIOps(const MachineBasicBlock &MBB) {
2673   BBInfo &MInfo = MBBInfoMap[&MBB];
2674 
2675   SmallPtrSet<const MachineBasicBlock*, 8> seen;
2676   for (const MachineInstr &Phi : MBB) {
2677     if (!Phi.isPHI())
2678       break;
2679     seen.clear();
2680 
2681     const MachineOperand &MODef = Phi.getOperand(0);
2682     if (!MODef.isReg() || !MODef.isDef()) {
2683       report("Expected first PHI operand to be a register def", &MODef, 0);
2684       continue;
2685     }
2686     if (MODef.isTied() || MODef.isImplicit() || MODef.isInternalRead() ||
2687         MODef.isEarlyClobber() || MODef.isDebug())
2688       report("Unexpected flag on PHI operand", &MODef, 0);
2689     Register DefReg = MODef.getReg();
2690     if (!Register::isVirtualRegister(DefReg))
2691       report("Expected first PHI operand to be a virtual register", &MODef, 0);
2692 
2693     for (unsigned I = 1, E = Phi.getNumOperands(); I != E; I += 2) {
2694       const MachineOperand &MO0 = Phi.getOperand(I);
2695       if (!MO0.isReg()) {
2696         report("Expected PHI operand to be a register", &MO0, I);
2697         continue;
2698       }
2699       if (MO0.isImplicit() || MO0.isInternalRead() || MO0.isEarlyClobber() ||
2700           MO0.isDebug() || MO0.isTied())
2701         report("Unexpected flag on PHI operand", &MO0, I);
2702 
2703       const MachineOperand &MO1 = Phi.getOperand(I + 1);
2704       if (!MO1.isMBB()) {
2705         report("Expected PHI operand to be a basic block", &MO1, I + 1);
2706         continue;
2707       }
2708 
2709       const MachineBasicBlock &Pre = *MO1.getMBB();
2710       if (!Pre.isSuccessor(&MBB)) {
2711         report("PHI input is not a predecessor block", &MO1, I + 1);
2712         continue;
2713       }
2714 
2715       if (MInfo.reachable) {
2716         seen.insert(&Pre);
2717         BBInfo &PrInfo = MBBInfoMap[&Pre];
2718         if (!MO0.isUndef() && PrInfo.reachable &&
2719             !PrInfo.isLiveOut(MO0.getReg()))
2720           report("PHI operand is not live-out from predecessor", &MO0, I);
2721       }
2722     }
2723 
2724     // Did we see all predecessors?
2725     if (MInfo.reachable) {
2726       for (MachineBasicBlock *Pred : MBB.predecessors()) {
2727         if (!seen.count(Pred)) {
2728           report("Missing PHI operand", &Phi);
2729           errs() << printMBBReference(*Pred)
2730                  << " is a predecessor according to the CFG.\n";
2731         }
2732       }
2733     }
2734   }
2735 }
2736 
2737 void MachineVerifier::visitMachineFunctionAfter() {
2738   calcRegsPassed();
2739 
2740   for (const MachineBasicBlock &MBB : *MF)
2741     checkPHIOps(MBB);
2742 
2743   // Now check liveness info if available
2744   calcRegsRequired();
2745 
2746   // Check for killed virtual registers that should be live out.
2747   for (const auto &MBB : *MF) {
2748     BBInfo &MInfo = MBBInfoMap[&MBB];
2749     for (Register VReg : MInfo.vregsRequired)
2750       if (MInfo.regsKilled.count(VReg)) {
2751         report("Virtual register killed in block, but needed live out.", &MBB);
2752         errs() << "Virtual register " << printReg(VReg)
2753                << " is used after the block.\n";
2754       }
2755   }
2756 
2757   if (!MF->empty()) {
2758     BBInfo &MInfo = MBBInfoMap[&MF->front()];
2759     for (Register VReg : MInfo.vregsRequired) {
2760       report("Virtual register defs don't dominate all uses.", MF);
2761       report_context_vreg(VReg);
2762     }
2763   }
2764 
2765   if (LiveVars)
2766     verifyLiveVariables();
2767   if (LiveInts)
2768     verifyLiveIntervals();
2769 
2770   // Check live-in list of each MBB. If a register is live into MBB, check
2771   // that the register is in regsLiveOut of each predecessor block. Since
2772   // this must come from a definition in the predecesssor or its live-in
2773   // list, this will catch a live-through case where the predecessor does not
2774   // have the register in its live-in list.  This currently only checks
2775   // registers that have no aliases, are not allocatable and are not
2776   // reserved, which could mean a condition code register for instance.
2777   if (MRI->tracksLiveness())
2778     for (const auto &MBB : *MF)
2779       for (MachineBasicBlock::RegisterMaskPair P : MBB.liveins()) {
2780         MCPhysReg LiveInReg = P.PhysReg;
2781         bool hasAliases = MCRegAliasIterator(LiveInReg, TRI, false).isValid();
2782         if (hasAliases || isAllocatable(LiveInReg) || isReserved(LiveInReg))
2783           continue;
2784         for (const MachineBasicBlock *Pred : MBB.predecessors()) {
2785           BBInfo &PInfo = MBBInfoMap[Pred];
2786           if (!PInfo.regsLiveOut.count(LiveInReg)) {
2787             report("Live in register not found to be live out from predecessor.",
2788                    &MBB);
2789             errs() << TRI->getName(LiveInReg)
2790                    << " not found to be live out from "
2791                    << printMBBReference(*Pred) << "\n";
2792           }
2793         }
2794       }
2795 
2796   for (auto CSInfo : MF->getCallSitesInfo())
2797     if (!CSInfo.first->isCall())
2798       report("Call site info referencing instruction that is not call", MF);
2799 
2800   // If there's debug-info, check that we don't have any duplicate value
2801   // tracking numbers.
2802   if (MF->getFunction().getSubprogram()) {
2803     DenseSet<unsigned> SeenNumbers;
2804     for (auto &MBB : *MF) {
2805       for (auto &MI : MBB) {
2806         if (auto Num = MI.peekDebugInstrNum()) {
2807           auto Result = SeenNumbers.insert((unsigned)Num);
2808           if (!Result.second)
2809             report("Instruction has a duplicated value tracking number", &MI);
2810         }
2811       }
2812     }
2813   }
2814 }
2815 
2816 void MachineVerifier::verifyLiveVariables() {
2817   assert(LiveVars && "Don't call verifyLiveVariables without LiveVars");
2818   for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) {
2819     Register Reg = Register::index2VirtReg(I);
2820     LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
2821     for (const auto &MBB : *MF) {
2822       BBInfo &MInfo = MBBInfoMap[&MBB];
2823 
2824       // Our vregsRequired should be identical to LiveVariables' AliveBlocks
2825       if (MInfo.vregsRequired.count(Reg)) {
2826         if (!VI.AliveBlocks.test(MBB.getNumber())) {
2827           report("LiveVariables: Block missing from AliveBlocks", &MBB);
2828           errs() << "Virtual register " << printReg(Reg)
2829                  << " must be live through the block.\n";
2830         }
2831       } else {
2832         if (VI.AliveBlocks.test(MBB.getNumber())) {
2833           report("LiveVariables: Block should not be in AliveBlocks", &MBB);
2834           errs() << "Virtual register " << printReg(Reg)
2835                  << " is not needed live through the block.\n";
2836         }
2837       }
2838     }
2839   }
2840 }
2841 
2842 void MachineVerifier::verifyLiveIntervals() {
2843   assert(LiveInts && "Don't call verifyLiveIntervals without LiveInts");
2844   for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) {
2845     Register Reg = Register::index2VirtReg(I);
2846 
2847     // Spilling and splitting may leave unused registers around. Skip them.
2848     if (MRI->reg_nodbg_empty(Reg))
2849       continue;
2850 
2851     if (!LiveInts->hasInterval(Reg)) {
2852       report("Missing live interval for virtual register", MF);
2853       errs() << printReg(Reg, TRI) << " still has defs or uses\n";
2854       continue;
2855     }
2856 
2857     const LiveInterval &LI = LiveInts->getInterval(Reg);
2858     assert(Reg == LI.reg() && "Invalid reg to interval mapping");
2859     verifyLiveInterval(LI);
2860   }
2861 
2862   // Verify all the cached regunit intervals.
2863   for (unsigned i = 0, e = TRI->getNumRegUnits(); i != e; ++i)
2864     if (const LiveRange *LR = LiveInts->getCachedRegUnit(i))
2865       verifyLiveRange(*LR, i);
2866 }
2867 
2868 void MachineVerifier::verifyLiveRangeValue(const LiveRange &LR,
2869                                            const VNInfo *VNI, Register Reg,
2870                                            LaneBitmask LaneMask) {
2871   if (VNI->isUnused())
2872     return;
2873 
2874   const VNInfo *DefVNI = LR.getVNInfoAt(VNI->def);
2875 
2876   if (!DefVNI) {
2877     report("Value not live at VNInfo def and not marked unused", MF);
2878     report_context(LR, Reg, LaneMask);
2879     report_context(*VNI);
2880     return;
2881   }
2882 
2883   if (DefVNI != VNI) {
2884     report("Live segment at def has different VNInfo", MF);
2885     report_context(LR, Reg, LaneMask);
2886     report_context(*VNI);
2887     return;
2888   }
2889 
2890   const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(VNI->def);
2891   if (!MBB) {
2892     report("Invalid VNInfo definition index", MF);
2893     report_context(LR, Reg, LaneMask);
2894     report_context(*VNI);
2895     return;
2896   }
2897 
2898   if (VNI->isPHIDef()) {
2899     if (VNI->def != LiveInts->getMBBStartIdx(MBB)) {
2900       report("PHIDef VNInfo is not defined at MBB start", MBB);
2901       report_context(LR, Reg, LaneMask);
2902       report_context(*VNI);
2903     }
2904     return;
2905   }
2906 
2907   // Non-PHI def.
2908   const MachineInstr *MI = LiveInts->getInstructionFromIndex(VNI->def);
2909   if (!MI) {
2910     report("No instruction at VNInfo def index", MBB);
2911     report_context(LR, Reg, LaneMask);
2912     report_context(*VNI);
2913     return;
2914   }
2915 
2916   if (Reg != 0) {
2917     bool hasDef = false;
2918     bool isEarlyClobber = false;
2919     for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) {
2920       if (!MOI->isReg() || !MOI->isDef())
2921         continue;
2922       if (Register::isVirtualRegister(Reg)) {
2923         if (MOI->getReg() != Reg)
2924           continue;
2925       } else {
2926         if (!Register::isPhysicalRegister(MOI->getReg()) ||
2927             !TRI->hasRegUnit(MOI->getReg(), Reg))
2928           continue;
2929       }
2930       if (LaneMask.any() &&
2931           (TRI->getSubRegIndexLaneMask(MOI->getSubReg()) & LaneMask).none())
2932         continue;
2933       hasDef = true;
2934       if (MOI->isEarlyClobber())
2935         isEarlyClobber = true;
2936     }
2937 
2938     if (!hasDef) {
2939       report("Defining instruction does not modify register", MI);
2940       report_context(LR, Reg, LaneMask);
2941       report_context(*VNI);
2942     }
2943 
2944     // Early clobber defs begin at USE slots, but other defs must begin at
2945     // DEF slots.
2946     if (isEarlyClobber) {
2947       if (!VNI->def.isEarlyClobber()) {
2948         report("Early clobber def must be at an early-clobber slot", MBB);
2949         report_context(LR, Reg, LaneMask);
2950         report_context(*VNI);
2951       }
2952     } else if (!VNI->def.isRegister()) {
2953       report("Non-PHI, non-early clobber def must be at a register slot", MBB);
2954       report_context(LR, Reg, LaneMask);
2955       report_context(*VNI);
2956     }
2957   }
2958 }
2959 
2960 void MachineVerifier::verifyLiveRangeSegment(const LiveRange &LR,
2961                                              const LiveRange::const_iterator I,
2962                                              Register Reg,
2963                                              LaneBitmask LaneMask) {
2964   const LiveRange::Segment &S = *I;
2965   const VNInfo *VNI = S.valno;
2966   assert(VNI && "Live segment has no valno");
2967 
2968   if (VNI->id >= LR.getNumValNums() || VNI != LR.getValNumInfo(VNI->id)) {
2969     report("Foreign valno in live segment", MF);
2970     report_context(LR, Reg, LaneMask);
2971     report_context(S);
2972     report_context(*VNI);
2973   }
2974 
2975   if (VNI->isUnused()) {
2976     report("Live segment valno is marked unused", MF);
2977     report_context(LR, Reg, LaneMask);
2978     report_context(S);
2979   }
2980 
2981   const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(S.start);
2982   if (!MBB) {
2983     report("Bad start of live segment, no basic block", MF);
2984     report_context(LR, Reg, LaneMask);
2985     report_context(S);
2986     return;
2987   }
2988   SlotIndex MBBStartIdx = LiveInts->getMBBStartIdx(MBB);
2989   if (S.start != MBBStartIdx && S.start != VNI->def) {
2990     report("Live segment must begin at MBB entry or valno def", MBB);
2991     report_context(LR, Reg, LaneMask);
2992     report_context(S);
2993   }
2994 
2995   const MachineBasicBlock *EndMBB =
2996     LiveInts->getMBBFromIndex(S.end.getPrevSlot());
2997   if (!EndMBB) {
2998     report("Bad end of live segment, no basic block", MF);
2999     report_context(LR, Reg, LaneMask);
3000     report_context(S);
3001     return;
3002   }
3003 
3004   // No more checks for live-out segments.
3005   if (S.end == LiveInts->getMBBEndIdx(EndMBB))
3006     return;
3007 
3008   // RegUnit intervals are allowed dead phis.
3009   if (!Register::isVirtualRegister(Reg) && VNI->isPHIDef() &&
3010       S.start == VNI->def && S.end == VNI->def.getDeadSlot())
3011     return;
3012 
3013   // The live segment is ending inside EndMBB
3014   const MachineInstr *MI =
3015     LiveInts->getInstructionFromIndex(S.end.getPrevSlot());
3016   if (!MI) {
3017     report("Live segment doesn't end at a valid instruction", EndMBB);
3018     report_context(LR, Reg, LaneMask);
3019     report_context(S);
3020     return;
3021   }
3022 
3023   // The block slot must refer to a basic block boundary.
3024   if (S.end.isBlock()) {
3025     report("Live segment ends at B slot of an instruction", EndMBB);
3026     report_context(LR, Reg, LaneMask);
3027     report_context(S);
3028   }
3029 
3030   if (S.end.isDead()) {
3031     // Segment ends on the dead slot.
3032     // That means there must be a dead def.
3033     if (!SlotIndex::isSameInstr(S.start, S.end)) {
3034       report("Live segment ending at dead slot spans instructions", EndMBB);
3035       report_context(LR, Reg, LaneMask);
3036       report_context(S);
3037     }
3038   }
3039 
3040   // After tied operands are rewritten, a live segment can only end at an
3041   // early-clobber slot if it is being redefined by an early-clobber def.
3042   // TODO: Before tied operands are rewritten, a live segment can only end at an
3043   // early-clobber slot if the last use is tied to an early-clobber def.
3044   if (MF->getProperties().hasProperty(
3045           MachineFunctionProperties::Property::TiedOpsRewritten) &&
3046       S.end.isEarlyClobber()) {
3047     if (I+1 == LR.end() || (I+1)->start != S.end) {
3048       report("Live segment ending at early clobber slot must be "
3049              "redefined by an EC def in the same instruction", EndMBB);
3050       report_context(LR, Reg, LaneMask);
3051       report_context(S);
3052     }
3053   }
3054 
3055   // The following checks only apply to virtual registers. Physreg liveness
3056   // is too weird to check.
3057   if (Register::isVirtualRegister(Reg)) {
3058     // A live segment can end with either a redefinition, a kill flag on a
3059     // use, or a dead flag on a def.
3060     bool hasRead = false;
3061     bool hasSubRegDef = false;
3062     bool hasDeadDef = false;
3063     for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) {
3064       if (!MOI->isReg() || MOI->getReg() != Reg)
3065         continue;
3066       unsigned Sub = MOI->getSubReg();
3067       LaneBitmask SLM = Sub != 0 ? TRI->getSubRegIndexLaneMask(Sub)
3068                                  : LaneBitmask::getAll();
3069       if (MOI->isDef()) {
3070         if (Sub != 0) {
3071           hasSubRegDef = true;
3072           // An operand %0:sub0 reads %0:sub1..n. Invert the lane
3073           // mask for subregister defs. Read-undef defs will be handled by
3074           // readsReg below.
3075           SLM = ~SLM;
3076         }
3077         if (MOI->isDead())
3078           hasDeadDef = true;
3079       }
3080       if (LaneMask.any() && (LaneMask & SLM).none())
3081         continue;
3082       if (MOI->readsReg())
3083         hasRead = true;
3084     }
3085     if (S.end.isDead()) {
3086       // Make sure that the corresponding machine operand for a "dead" live
3087       // range has the dead flag. We cannot perform this check for subregister
3088       // liveranges as partially dead values are allowed.
3089       if (LaneMask.none() && !hasDeadDef) {
3090         report("Instruction ending live segment on dead slot has no dead flag",
3091                MI);
3092         report_context(LR, Reg, LaneMask);
3093         report_context(S);
3094       }
3095     } else {
3096       if (!hasRead) {
3097         // When tracking subregister liveness, the main range must start new
3098         // values on partial register writes, even if there is no read.
3099         if (!MRI->shouldTrackSubRegLiveness(Reg) || LaneMask.any() ||
3100             !hasSubRegDef) {
3101           report("Instruction ending live segment doesn't read the register",
3102                  MI);
3103           report_context(LR, Reg, LaneMask);
3104           report_context(S);
3105         }
3106       }
3107     }
3108   }
3109 
3110   // Now check all the basic blocks in this live segment.
3111   MachineFunction::const_iterator MFI = MBB->getIterator();
3112   // Is this live segment the beginning of a non-PHIDef VN?
3113   if (S.start == VNI->def && !VNI->isPHIDef()) {
3114     // Not live-in to any blocks.
3115     if (MBB == EndMBB)
3116       return;
3117     // Skip this block.
3118     ++MFI;
3119   }
3120 
3121   SmallVector<SlotIndex, 4> Undefs;
3122   if (LaneMask.any()) {
3123     LiveInterval &OwnerLI = LiveInts->getInterval(Reg);
3124     OwnerLI.computeSubRangeUndefs(Undefs, LaneMask, *MRI, *Indexes);
3125   }
3126 
3127   while (true) {
3128     assert(LiveInts->isLiveInToMBB(LR, &*MFI));
3129     // We don't know how to track physregs into a landing pad.
3130     if (!Register::isVirtualRegister(Reg) && MFI->isEHPad()) {
3131       if (&*MFI == EndMBB)
3132         break;
3133       ++MFI;
3134       continue;
3135     }
3136 
3137     // Is VNI a PHI-def in the current block?
3138     bool IsPHI = VNI->isPHIDef() &&
3139       VNI->def == LiveInts->getMBBStartIdx(&*MFI);
3140 
3141     // Check that VNI is live-out of all predecessors.
3142     for (const MachineBasicBlock *Pred : MFI->predecessors()) {
3143       SlotIndex PEnd = LiveInts->getMBBEndIdx(Pred);
3144       // Predecessor of landing pad live-out on last call.
3145       if (MFI->isEHPad()) {
3146         for (const MachineInstr &MI : llvm::reverse(*Pred)) {
3147           if (MI.isCall()) {
3148             PEnd = Indexes->getInstructionIndex(MI).getBoundaryIndex();
3149             break;
3150           }
3151         }
3152       }
3153       const VNInfo *PVNI = LR.getVNInfoBefore(PEnd);
3154 
3155       // All predecessors must have a live-out value. However for a phi
3156       // instruction with subregister intervals
3157       // only one of the subregisters (not necessarily the current one) needs to
3158       // be defined.
3159       if (!PVNI && (LaneMask.none() || !IsPHI)) {
3160         if (LiveRangeCalc::isJointlyDominated(Pred, Undefs, *Indexes))
3161           continue;
3162         report("Register not marked live out of predecessor", Pred);
3163         report_context(LR, Reg, LaneMask);
3164         report_context(*VNI);
3165         errs() << " live into " << printMBBReference(*MFI) << '@'
3166                << LiveInts->getMBBStartIdx(&*MFI) << ", not live before "
3167                << PEnd << '\n';
3168         continue;
3169       }
3170 
3171       // Only PHI-defs can take different predecessor values.
3172       if (!IsPHI && PVNI != VNI) {
3173         report("Different value live out of predecessor", Pred);
3174         report_context(LR, Reg, LaneMask);
3175         errs() << "Valno #" << PVNI->id << " live out of "
3176                << printMBBReference(*Pred) << '@' << PEnd << "\nValno #"
3177                << VNI->id << " live into " << printMBBReference(*MFI) << '@'
3178                << LiveInts->getMBBStartIdx(&*MFI) << '\n';
3179       }
3180     }
3181     if (&*MFI == EndMBB)
3182       break;
3183     ++MFI;
3184   }
3185 }
3186 
3187 void MachineVerifier::verifyLiveRange(const LiveRange &LR, Register Reg,
3188                                       LaneBitmask LaneMask) {
3189   for (const VNInfo *VNI : LR.valnos)
3190     verifyLiveRangeValue(LR, VNI, Reg, LaneMask);
3191 
3192   for (LiveRange::const_iterator I = LR.begin(), E = LR.end(); I != E; ++I)
3193     verifyLiveRangeSegment(LR, I, Reg, LaneMask);
3194 }
3195 
3196 void MachineVerifier::verifyLiveInterval(const LiveInterval &LI) {
3197   Register Reg = LI.reg();
3198   assert(Register::isVirtualRegister(Reg));
3199   verifyLiveRange(LI, Reg);
3200 
3201   LaneBitmask Mask;
3202   LaneBitmask MaxMask = MRI->getMaxLaneMaskForVReg(Reg);
3203   for (const LiveInterval::SubRange &SR : LI.subranges()) {
3204     if ((Mask & SR.LaneMask).any()) {
3205       report("Lane masks of sub ranges overlap in live interval", MF);
3206       report_context(LI);
3207     }
3208     if ((SR.LaneMask & ~MaxMask).any()) {
3209       report("Subrange lanemask is invalid", MF);
3210       report_context(LI);
3211     }
3212     if (SR.empty()) {
3213       report("Subrange must not be empty", MF);
3214       report_context(SR, LI.reg(), SR.LaneMask);
3215     }
3216     Mask |= SR.LaneMask;
3217     verifyLiveRange(SR, LI.reg(), SR.LaneMask);
3218     if (!LI.covers(SR)) {
3219       report("A Subrange is not covered by the main range", MF);
3220       report_context(LI);
3221     }
3222   }
3223 
3224   // Check the LI only has one connected component.
3225   ConnectedVNInfoEqClasses ConEQ(*LiveInts);
3226   unsigned NumComp = ConEQ.Classify(LI);
3227   if (NumComp > 1) {
3228     report("Multiple connected components in live interval", MF);
3229     report_context(LI);
3230     for (unsigned comp = 0; comp != NumComp; ++comp) {
3231       errs() << comp << ": valnos";
3232       for (const VNInfo *I : LI.valnos)
3233         if (comp == ConEQ.getEqClass(I))
3234           errs() << ' ' << I->id;
3235       errs() << '\n';
3236     }
3237   }
3238 }
3239 
3240 namespace {
3241 
3242   // FrameSetup and FrameDestroy can have zero adjustment, so using a single
3243   // integer, we can't tell whether it is a FrameSetup or FrameDestroy if the
3244   // value is zero.
3245   // We use a bool plus an integer to capture the stack state.
3246   struct StackStateOfBB {
3247     StackStateOfBB() = default;
3248     StackStateOfBB(int EntryVal, int ExitVal, bool EntrySetup, bool ExitSetup) :
3249       EntryValue(EntryVal), ExitValue(ExitVal), EntryIsSetup(EntrySetup),
3250       ExitIsSetup(ExitSetup) {}
3251 
3252     // Can be negative, which means we are setting up a frame.
3253     int EntryValue = 0;
3254     int ExitValue = 0;
3255     bool EntryIsSetup = false;
3256     bool ExitIsSetup = false;
3257   };
3258 
3259 } // end anonymous namespace
3260 
3261 /// Make sure on every path through the CFG, a FrameSetup <n> is always followed
3262 /// by a FrameDestroy <n>, stack adjustments are identical on all
3263 /// CFG edges to a merge point, and frame is destroyed at end of a return block.
3264 void MachineVerifier::verifyStackFrame() {
3265   unsigned FrameSetupOpcode   = TII->getCallFrameSetupOpcode();
3266   unsigned FrameDestroyOpcode = TII->getCallFrameDestroyOpcode();
3267   if (FrameSetupOpcode == ~0u && FrameDestroyOpcode == ~0u)
3268     return;
3269 
3270   SmallVector<StackStateOfBB, 8> SPState;
3271   SPState.resize(MF->getNumBlockIDs());
3272   df_iterator_default_set<const MachineBasicBlock*> Reachable;
3273 
3274   // Visit the MBBs in DFS order.
3275   for (df_ext_iterator<const MachineFunction *,
3276                        df_iterator_default_set<const MachineBasicBlock *>>
3277        DFI = df_ext_begin(MF, Reachable), DFE = df_ext_end(MF, Reachable);
3278        DFI != DFE; ++DFI) {
3279     const MachineBasicBlock *MBB = *DFI;
3280 
3281     StackStateOfBB BBState;
3282     // Check the exit state of the DFS stack predecessor.
3283     if (DFI.getPathLength() >= 2) {
3284       const MachineBasicBlock *StackPred = DFI.getPath(DFI.getPathLength() - 2);
3285       assert(Reachable.count(StackPred) &&
3286              "DFS stack predecessor is already visited.\n");
3287       BBState.EntryValue = SPState[StackPred->getNumber()].ExitValue;
3288       BBState.EntryIsSetup = SPState[StackPred->getNumber()].ExitIsSetup;
3289       BBState.ExitValue = BBState.EntryValue;
3290       BBState.ExitIsSetup = BBState.EntryIsSetup;
3291     }
3292 
3293     // Update stack state by checking contents of MBB.
3294     for (const auto &I : *MBB) {
3295       if (I.getOpcode() == FrameSetupOpcode) {
3296         if (BBState.ExitIsSetup)
3297           report("FrameSetup is after another FrameSetup", &I);
3298         BBState.ExitValue -= TII->getFrameTotalSize(I);
3299         BBState.ExitIsSetup = true;
3300       }
3301 
3302       if (I.getOpcode() == FrameDestroyOpcode) {
3303         int Size = TII->getFrameTotalSize(I);
3304         if (!BBState.ExitIsSetup)
3305           report("FrameDestroy is not after a FrameSetup", &I);
3306         int AbsSPAdj = BBState.ExitValue < 0 ? -BBState.ExitValue :
3307                                                BBState.ExitValue;
3308         if (BBState.ExitIsSetup && AbsSPAdj != Size) {
3309           report("FrameDestroy <n> is after FrameSetup <m>", &I);
3310           errs() << "FrameDestroy <" << Size << "> is after FrameSetup <"
3311               << AbsSPAdj << ">.\n";
3312         }
3313         BBState.ExitValue += Size;
3314         BBState.ExitIsSetup = false;
3315       }
3316     }
3317     SPState[MBB->getNumber()] = BBState;
3318 
3319     // Make sure the exit state of any predecessor is consistent with the entry
3320     // state.
3321     for (const MachineBasicBlock *Pred : MBB->predecessors()) {
3322       if (Reachable.count(Pred) &&
3323           (SPState[Pred->getNumber()].ExitValue != BBState.EntryValue ||
3324            SPState[Pred->getNumber()].ExitIsSetup != BBState.EntryIsSetup)) {
3325         report("The exit stack state of a predecessor is inconsistent.", MBB);
3326         errs() << "Predecessor " << printMBBReference(*Pred)
3327                << " has exit state (" << SPState[Pred->getNumber()].ExitValue
3328                << ", " << SPState[Pred->getNumber()].ExitIsSetup << "), while "
3329                << printMBBReference(*MBB) << " has entry state ("
3330                << BBState.EntryValue << ", " << BBState.EntryIsSetup << ").\n";
3331       }
3332     }
3333 
3334     // Make sure the entry state of any successor is consistent with the exit
3335     // state.
3336     for (const MachineBasicBlock *Succ : MBB->successors()) {
3337       if (Reachable.count(Succ) &&
3338           (SPState[Succ->getNumber()].EntryValue != BBState.ExitValue ||
3339            SPState[Succ->getNumber()].EntryIsSetup != BBState.ExitIsSetup)) {
3340         report("The entry stack state of a successor is inconsistent.", MBB);
3341         errs() << "Successor " << printMBBReference(*Succ)
3342                << " has entry state (" << SPState[Succ->getNumber()].EntryValue
3343                << ", " << SPState[Succ->getNumber()].EntryIsSetup << "), while "
3344                << printMBBReference(*MBB) << " has exit state ("
3345                << BBState.ExitValue << ", " << BBState.ExitIsSetup << ").\n";
3346       }
3347     }
3348 
3349     // Make sure a basic block with return ends with zero stack adjustment.
3350     if (!MBB->empty() && MBB->back().isReturn()) {
3351       if (BBState.ExitIsSetup)
3352         report("A return block ends with a FrameSetup.", MBB);
3353       if (BBState.ExitValue)
3354         report("A return block ends with a nonzero stack adjustment.", MBB);
3355     }
3356   }
3357 }
3358