1 //===- MachineVerifier.cpp - Machine Code Verifier ------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Pass to verify generated machine code. The following is checked: 10 // 11 // Operand counts: All explicit operands must be present. 12 // 13 // Register classes: All physical and virtual register operands must be 14 // compatible with the register class required by the instruction descriptor. 15 // 16 // Register live intervals: Registers must be defined only once, and must be 17 // defined before use. 18 // 19 // The machine code verifier is enabled with the command-line option 20 // -verify-machineinstrs. 21 //===----------------------------------------------------------------------===// 22 23 #include "llvm/ADT/BitVector.h" 24 #include "llvm/ADT/DenseMap.h" 25 #include "llvm/ADT/DenseSet.h" 26 #include "llvm/ADT/DepthFirstIterator.h" 27 #include "llvm/ADT/PostOrderIterator.h" 28 #include "llvm/ADT/STLExtras.h" 29 #include "llvm/ADT/SetOperations.h" 30 #include "llvm/ADT/SmallPtrSet.h" 31 #include "llvm/ADT/SmallVector.h" 32 #include "llvm/ADT/StringRef.h" 33 #include "llvm/ADT/Twine.h" 34 #include "llvm/Analysis/EHPersonalities.h" 35 #include "llvm/CodeGen/GlobalISel/RegisterBank.h" 36 #include "llvm/CodeGen/LiveInterval.h" 37 #include "llvm/CodeGen/LiveIntervalCalc.h" 38 #include "llvm/CodeGen/LiveIntervals.h" 39 #include "llvm/CodeGen/LiveStacks.h" 40 #include "llvm/CodeGen/LiveVariables.h" 41 #include "llvm/CodeGen/MachineBasicBlock.h" 42 #include "llvm/CodeGen/MachineFrameInfo.h" 43 #include "llvm/CodeGen/MachineFunction.h" 44 #include "llvm/CodeGen/MachineFunctionPass.h" 45 #include "llvm/CodeGen/MachineInstr.h" 46 #include "llvm/CodeGen/MachineInstrBundle.h" 47 #include "llvm/CodeGen/MachineMemOperand.h" 48 #include "llvm/CodeGen/MachineOperand.h" 49 #include "llvm/CodeGen/MachineRegisterInfo.h" 50 #include "llvm/CodeGen/PseudoSourceValue.h" 51 #include "llvm/CodeGen/SlotIndexes.h" 52 #include "llvm/CodeGen/StackMaps.h" 53 #include "llvm/CodeGen/TargetInstrInfo.h" 54 #include "llvm/CodeGen/TargetOpcodes.h" 55 #include "llvm/CodeGen/TargetRegisterInfo.h" 56 #include "llvm/CodeGen/TargetSubtargetInfo.h" 57 #include "llvm/IR/BasicBlock.h" 58 #include "llvm/IR/Function.h" 59 #include "llvm/IR/InlineAsm.h" 60 #include "llvm/IR/Instructions.h" 61 #include "llvm/InitializePasses.h" 62 #include "llvm/MC/LaneBitmask.h" 63 #include "llvm/MC/MCAsmInfo.h" 64 #include "llvm/MC/MCInstrDesc.h" 65 #include "llvm/MC/MCRegisterInfo.h" 66 #include "llvm/MC/MCTargetOptions.h" 67 #include "llvm/Pass.h" 68 #include "llvm/Support/Casting.h" 69 #include "llvm/Support/ErrorHandling.h" 70 #include "llvm/Support/LowLevelTypeImpl.h" 71 #include "llvm/Support/MathExtras.h" 72 #include "llvm/Support/raw_ostream.h" 73 #include "llvm/Target/TargetMachine.h" 74 #include <algorithm> 75 #include <cassert> 76 #include <cstddef> 77 #include <cstdint> 78 #include <iterator> 79 #include <string> 80 #include <utility> 81 82 using namespace llvm; 83 84 namespace { 85 86 struct MachineVerifier { 87 MachineVerifier(Pass *pass, const char *b) : PASS(pass), Banner(b) {} 88 89 unsigned verify(const MachineFunction &MF); 90 91 Pass *const PASS; 92 const char *Banner; 93 const MachineFunction *MF; 94 const TargetMachine *TM; 95 const TargetInstrInfo *TII; 96 const TargetRegisterInfo *TRI; 97 const MachineRegisterInfo *MRI; 98 99 unsigned foundErrors; 100 101 // Avoid querying the MachineFunctionProperties for each operand. 102 bool isFunctionRegBankSelected; 103 bool isFunctionSelected; 104 105 using RegVector = SmallVector<Register, 16>; 106 using RegMaskVector = SmallVector<const uint32_t *, 4>; 107 using RegSet = DenseSet<Register>; 108 using RegMap = DenseMap<Register, const MachineInstr *>; 109 using BlockSet = SmallPtrSet<const MachineBasicBlock *, 8>; 110 111 const MachineInstr *FirstNonPHI; 112 const MachineInstr *FirstTerminator; 113 BlockSet FunctionBlocks; 114 115 BitVector regsReserved; 116 RegSet regsLive; 117 RegVector regsDefined, regsDead, regsKilled; 118 RegMaskVector regMasks; 119 120 SlotIndex lastIndex; 121 122 // Add Reg and any sub-registers to RV 123 void addRegWithSubRegs(RegVector &RV, Register Reg) { 124 RV.push_back(Reg); 125 if (Reg.isPhysical()) 126 append_range(RV, TRI->subregs(Reg.asMCReg())); 127 } 128 129 struct BBInfo { 130 // Is this MBB reachable from the MF entry point? 131 bool reachable = false; 132 133 // Vregs that must be live in because they are used without being 134 // defined. Map value is the user. vregsLiveIn doesn't include regs 135 // that only are used by PHI nodes. 136 RegMap vregsLiveIn; 137 138 // Regs killed in MBB. They may be defined again, and will then be in both 139 // regsKilled and regsLiveOut. 140 RegSet regsKilled; 141 142 // Regs defined in MBB and live out. Note that vregs passing through may 143 // be live out without being mentioned here. 144 RegSet regsLiveOut; 145 146 // Vregs that pass through MBB untouched. This set is disjoint from 147 // regsKilled and regsLiveOut. 148 RegSet vregsPassed; 149 150 // Vregs that must pass through MBB because they are needed by a successor 151 // block. This set is disjoint from regsLiveOut. 152 RegSet vregsRequired; 153 154 // Set versions of block's predecessor and successor lists. 155 BlockSet Preds, Succs; 156 157 BBInfo() = default; 158 159 // Add register to vregsRequired if it belongs there. Return true if 160 // anything changed. 161 bool addRequired(Register Reg) { 162 if (!Reg.isVirtual()) 163 return false; 164 if (regsLiveOut.count(Reg)) 165 return false; 166 return vregsRequired.insert(Reg).second; 167 } 168 169 // Same for a full set. 170 bool addRequired(const RegSet &RS) { 171 bool Changed = false; 172 for (Register Reg : RS) 173 Changed |= addRequired(Reg); 174 return Changed; 175 } 176 177 // Same for a full map. 178 bool addRequired(const RegMap &RM) { 179 bool Changed = false; 180 for (const auto &I : RM) 181 Changed |= addRequired(I.first); 182 return Changed; 183 } 184 185 // Live-out registers are either in regsLiveOut or vregsPassed. 186 bool isLiveOut(Register Reg) const { 187 return regsLiveOut.count(Reg) || vregsPassed.count(Reg); 188 } 189 }; 190 191 // Extra register info per MBB. 192 DenseMap<const MachineBasicBlock*, BBInfo> MBBInfoMap; 193 194 bool isReserved(Register Reg) { 195 return Reg.id() < regsReserved.size() && regsReserved.test(Reg.id()); 196 } 197 198 bool isAllocatable(Register Reg) const { 199 return Reg.id() < TRI->getNumRegs() && TRI->isInAllocatableClass(Reg) && 200 !regsReserved.test(Reg.id()); 201 } 202 203 // Analysis information if available 204 LiveVariables *LiveVars; 205 LiveIntervals *LiveInts; 206 LiveStacks *LiveStks; 207 SlotIndexes *Indexes; 208 209 void visitMachineFunctionBefore(); 210 void visitMachineBasicBlockBefore(const MachineBasicBlock *MBB); 211 void visitMachineBundleBefore(const MachineInstr *MI); 212 213 bool verifyVectorElementMatch(LLT Ty0, LLT Ty1, const MachineInstr *MI); 214 void verifyPreISelGenericInstruction(const MachineInstr *MI); 215 void visitMachineInstrBefore(const MachineInstr *MI); 216 void visitMachineOperand(const MachineOperand *MO, unsigned MONum); 217 void visitMachineBundleAfter(const MachineInstr *MI); 218 void visitMachineBasicBlockAfter(const MachineBasicBlock *MBB); 219 void visitMachineFunctionAfter(); 220 221 void report(const char *msg, const MachineFunction *MF); 222 void report(const char *msg, const MachineBasicBlock *MBB); 223 void report(const char *msg, const MachineInstr *MI); 224 void report(const char *msg, const MachineOperand *MO, unsigned MONum, 225 LLT MOVRegType = LLT{}); 226 227 void report_context(const LiveInterval &LI) const; 228 void report_context(const LiveRange &LR, Register VRegUnit, 229 LaneBitmask LaneMask) const; 230 void report_context(const LiveRange::Segment &S) const; 231 void report_context(const VNInfo &VNI) const; 232 void report_context(SlotIndex Pos) const; 233 void report_context(MCPhysReg PhysReg) const; 234 void report_context_liverange(const LiveRange &LR) const; 235 void report_context_lanemask(LaneBitmask LaneMask) const; 236 void report_context_vreg(Register VReg) const; 237 void report_context_vreg_regunit(Register VRegOrUnit) const; 238 239 void verifyInlineAsm(const MachineInstr *MI); 240 241 void checkLiveness(const MachineOperand *MO, unsigned MONum); 242 void checkLivenessAtUse(const MachineOperand *MO, unsigned MONum, 243 SlotIndex UseIdx, const LiveRange &LR, 244 Register VRegOrUnit, 245 LaneBitmask LaneMask = LaneBitmask::getNone()); 246 void checkLivenessAtDef(const MachineOperand *MO, unsigned MONum, 247 SlotIndex DefIdx, const LiveRange &LR, 248 Register VRegOrUnit, bool SubRangeCheck = false, 249 LaneBitmask LaneMask = LaneBitmask::getNone()); 250 251 void markReachable(const MachineBasicBlock *MBB); 252 void calcRegsPassed(); 253 void checkPHIOps(const MachineBasicBlock &MBB); 254 255 void calcRegsRequired(); 256 void verifyLiveVariables(); 257 void verifyLiveIntervals(); 258 void verifyLiveInterval(const LiveInterval&); 259 void verifyLiveRangeValue(const LiveRange &, const VNInfo *, Register, 260 LaneBitmask); 261 void verifyLiveRangeSegment(const LiveRange &, 262 const LiveRange::const_iterator I, Register, 263 LaneBitmask); 264 void verifyLiveRange(const LiveRange &, Register, 265 LaneBitmask LaneMask = LaneBitmask::getNone()); 266 267 void verifyStackFrame(); 268 269 void verifySlotIndexes() const; 270 void verifyProperties(const MachineFunction &MF); 271 }; 272 273 struct MachineVerifierPass : public MachineFunctionPass { 274 static char ID; // Pass ID, replacement for typeid 275 276 const std::string Banner; 277 278 MachineVerifierPass(std::string banner = std::string()) 279 : MachineFunctionPass(ID), Banner(std::move(banner)) { 280 initializeMachineVerifierPassPass(*PassRegistry::getPassRegistry()); 281 } 282 283 void getAnalysisUsage(AnalysisUsage &AU) const override { 284 AU.setPreservesAll(); 285 MachineFunctionPass::getAnalysisUsage(AU); 286 } 287 288 bool runOnMachineFunction(MachineFunction &MF) override { 289 unsigned FoundErrors = MachineVerifier(this, Banner.c_str()).verify(MF); 290 if (FoundErrors) 291 report_fatal_error("Found "+Twine(FoundErrors)+" machine code errors."); 292 return false; 293 } 294 }; 295 296 } // end anonymous namespace 297 298 char MachineVerifierPass::ID = 0; 299 300 INITIALIZE_PASS(MachineVerifierPass, "machineverifier", 301 "Verify generated machine code", false, false) 302 303 FunctionPass *llvm::createMachineVerifierPass(const std::string &Banner) { 304 return new MachineVerifierPass(Banner); 305 } 306 307 void llvm::verifyMachineFunction(MachineFunctionAnalysisManager *, 308 const std::string &Banner, 309 const MachineFunction &MF) { 310 // TODO: Use MFAM after porting below analyses. 311 // LiveVariables *LiveVars; 312 // LiveIntervals *LiveInts; 313 // LiveStacks *LiveStks; 314 // SlotIndexes *Indexes; 315 unsigned FoundErrors = MachineVerifier(nullptr, Banner.c_str()).verify(MF); 316 if (FoundErrors) 317 report_fatal_error("Found " + Twine(FoundErrors) + " machine code errors."); 318 } 319 320 bool MachineFunction::verify(Pass *p, const char *Banner, bool AbortOnErrors) 321 const { 322 MachineFunction &MF = const_cast<MachineFunction&>(*this); 323 unsigned FoundErrors = MachineVerifier(p, Banner).verify(MF); 324 if (AbortOnErrors && FoundErrors) 325 report_fatal_error("Found "+Twine(FoundErrors)+" machine code errors."); 326 return FoundErrors == 0; 327 } 328 329 void MachineVerifier::verifySlotIndexes() const { 330 if (Indexes == nullptr) 331 return; 332 333 // Ensure the IdxMBB list is sorted by slot indexes. 334 SlotIndex Last; 335 for (SlotIndexes::MBBIndexIterator I = Indexes->MBBIndexBegin(), 336 E = Indexes->MBBIndexEnd(); I != E; ++I) { 337 assert(!Last.isValid() || I->first > Last); 338 Last = I->first; 339 } 340 } 341 342 void MachineVerifier::verifyProperties(const MachineFunction &MF) { 343 // If a pass has introduced virtual registers without clearing the 344 // NoVRegs property (or set it without allocating the vregs) 345 // then report an error. 346 if (MF.getProperties().hasProperty( 347 MachineFunctionProperties::Property::NoVRegs) && 348 MRI->getNumVirtRegs()) 349 report("Function has NoVRegs property but there are VReg operands", &MF); 350 } 351 352 unsigned MachineVerifier::verify(const MachineFunction &MF) { 353 foundErrors = 0; 354 355 this->MF = &MF; 356 TM = &MF.getTarget(); 357 TII = MF.getSubtarget().getInstrInfo(); 358 TRI = MF.getSubtarget().getRegisterInfo(); 359 MRI = &MF.getRegInfo(); 360 361 const bool isFunctionFailedISel = MF.getProperties().hasProperty( 362 MachineFunctionProperties::Property::FailedISel); 363 364 // If we're mid-GlobalISel and we already triggered the fallback path then 365 // it's expected that the MIR is somewhat broken but that's ok since we'll 366 // reset it and clear the FailedISel attribute in ResetMachineFunctions. 367 if (isFunctionFailedISel) 368 return foundErrors; 369 370 isFunctionRegBankSelected = MF.getProperties().hasProperty( 371 MachineFunctionProperties::Property::RegBankSelected); 372 isFunctionSelected = MF.getProperties().hasProperty( 373 MachineFunctionProperties::Property::Selected); 374 375 LiveVars = nullptr; 376 LiveInts = nullptr; 377 LiveStks = nullptr; 378 Indexes = nullptr; 379 if (PASS) { 380 LiveInts = PASS->getAnalysisIfAvailable<LiveIntervals>(); 381 // We don't want to verify LiveVariables if LiveIntervals is available. 382 if (!LiveInts) 383 LiveVars = PASS->getAnalysisIfAvailable<LiveVariables>(); 384 LiveStks = PASS->getAnalysisIfAvailable<LiveStacks>(); 385 Indexes = PASS->getAnalysisIfAvailable<SlotIndexes>(); 386 } 387 388 verifySlotIndexes(); 389 390 verifyProperties(MF); 391 392 visitMachineFunctionBefore(); 393 for (const MachineBasicBlock &MBB : MF) { 394 visitMachineBasicBlockBefore(&MBB); 395 // Keep track of the current bundle header. 396 const MachineInstr *CurBundle = nullptr; 397 // Do we expect the next instruction to be part of the same bundle? 398 bool InBundle = false; 399 400 for (const MachineInstr &MI : MBB.instrs()) { 401 if (MI.getParent() != &MBB) { 402 report("Bad instruction parent pointer", &MBB); 403 errs() << "Instruction: " << MI; 404 continue; 405 } 406 407 // Check for consistent bundle flags. 408 if (InBundle && !MI.isBundledWithPred()) 409 report("Missing BundledPred flag, " 410 "BundledSucc was set on predecessor", 411 &MI); 412 if (!InBundle && MI.isBundledWithPred()) 413 report("BundledPred flag is set, " 414 "but BundledSucc not set on predecessor", 415 &MI); 416 417 // Is this a bundle header? 418 if (!MI.isInsideBundle()) { 419 if (CurBundle) 420 visitMachineBundleAfter(CurBundle); 421 CurBundle = &MI; 422 visitMachineBundleBefore(CurBundle); 423 } else if (!CurBundle) 424 report("No bundle header", &MI); 425 visitMachineInstrBefore(&MI); 426 for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) { 427 const MachineOperand &Op = MI.getOperand(I); 428 if (Op.getParent() != &MI) { 429 // Make sure to use correct addOperand / RemoveOperand / ChangeTo 430 // functions when replacing operands of a MachineInstr. 431 report("Instruction has operand with wrong parent set", &MI); 432 } 433 434 visitMachineOperand(&Op, I); 435 } 436 437 // Was this the last bundled instruction? 438 InBundle = MI.isBundledWithSucc(); 439 } 440 if (CurBundle) 441 visitMachineBundleAfter(CurBundle); 442 if (InBundle) 443 report("BundledSucc flag set on last instruction in block", &MBB.back()); 444 visitMachineBasicBlockAfter(&MBB); 445 } 446 visitMachineFunctionAfter(); 447 448 // Clean up. 449 regsLive.clear(); 450 regsDefined.clear(); 451 regsDead.clear(); 452 regsKilled.clear(); 453 regMasks.clear(); 454 MBBInfoMap.clear(); 455 456 return foundErrors; 457 } 458 459 void MachineVerifier::report(const char *msg, const MachineFunction *MF) { 460 assert(MF); 461 errs() << '\n'; 462 if (!foundErrors++) { 463 if (Banner) 464 errs() << "# " << Banner << '\n'; 465 if (LiveInts != nullptr) 466 LiveInts->print(errs()); 467 else 468 MF->print(errs(), Indexes); 469 } 470 errs() << "*** Bad machine code: " << msg << " ***\n" 471 << "- function: " << MF->getName() << "\n"; 472 } 473 474 void MachineVerifier::report(const char *msg, const MachineBasicBlock *MBB) { 475 assert(MBB); 476 report(msg, MBB->getParent()); 477 errs() << "- basic block: " << printMBBReference(*MBB) << ' ' 478 << MBB->getName() << " (" << (const void *)MBB << ')'; 479 if (Indexes) 480 errs() << " [" << Indexes->getMBBStartIdx(MBB) 481 << ';' << Indexes->getMBBEndIdx(MBB) << ')'; 482 errs() << '\n'; 483 } 484 485 void MachineVerifier::report(const char *msg, const MachineInstr *MI) { 486 assert(MI); 487 report(msg, MI->getParent()); 488 errs() << "- instruction: "; 489 if (Indexes && Indexes->hasIndex(*MI)) 490 errs() << Indexes->getInstructionIndex(*MI) << '\t'; 491 MI->print(errs(), /*IsStandalone=*/true); 492 } 493 494 void MachineVerifier::report(const char *msg, const MachineOperand *MO, 495 unsigned MONum, LLT MOVRegType) { 496 assert(MO); 497 report(msg, MO->getParent()); 498 errs() << "- operand " << MONum << ": "; 499 MO->print(errs(), MOVRegType, TRI); 500 errs() << "\n"; 501 } 502 503 void MachineVerifier::report_context(SlotIndex Pos) const { 504 errs() << "- at: " << Pos << '\n'; 505 } 506 507 void MachineVerifier::report_context(const LiveInterval &LI) const { 508 errs() << "- interval: " << LI << '\n'; 509 } 510 511 void MachineVerifier::report_context(const LiveRange &LR, Register VRegUnit, 512 LaneBitmask LaneMask) const { 513 report_context_liverange(LR); 514 report_context_vreg_regunit(VRegUnit); 515 if (LaneMask.any()) 516 report_context_lanemask(LaneMask); 517 } 518 519 void MachineVerifier::report_context(const LiveRange::Segment &S) const { 520 errs() << "- segment: " << S << '\n'; 521 } 522 523 void MachineVerifier::report_context(const VNInfo &VNI) const { 524 errs() << "- ValNo: " << VNI.id << " (def " << VNI.def << ")\n"; 525 } 526 527 void MachineVerifier::report_context_liverange(const LiveRange &LR) const { 528 errs() << "- liverange: " << LR << '\n'; 529 } 530 531 void MachineVerifier::report_context(MCPhysReg PReg) const { 532 errs() << "- p. register: " << printReg(PReg, TRI) << '\n'; 533 } 534 535 void MachineVerifier::report_context_vreg(Register VReg) const { 536 errs() << "- v. register: " << printReg(VReg, TRI) << '\n'; 537 } 538 539 void MachineVerifier::report_context_vreg_regunit(Register VRegOrUnit) const { 540 if (Register::isVirtualRegister(VRegOrUnit)) { 541 report_context_vreg(VRegOrUnit); 542 } else { 543 errs() << "- regunit: " << printRegUnit(VRegOrUnit, TRI) << '\n'; 544 } 545 } 546 547 void MachineVerifier::report_context_lanemask(LaneBitmask LaneMask) const { 548 errs() << "- lanemask: " << PrintLaneMask(LaneMask) << '\n'; 549 } 550 551 void MachineVerifier::markReachable(const MachineBasicBlock *MBB) { 552 BBInfo &MInfo = MBBInfoMap[MBB]; 553 if (!MInfo.reachable) { 554 MInfo.reachable = true; 555 for (const MachineBasicBlock *Succ : MBB->successors()) 556 markReachable(Succ); 557 } 558 } 559 560 void MachineVerifier::visitMachineFunctionBefore() { 561 lastIndex = SlotIndex(); 562 regsReserved = MRI->reservedRegsFrozen() ? MRI->getReservedRegs() 563 : TRI->getReservedRegs(*MF); 564 565 if (!MF->empty()) 566 markReachable(&MF->front()); 567 568 // Build a set of the basic blocks in the function. 569 FunctionBlocks.clear(); 570 for (const auto &MBB : *MF) { 571 FunctionBlocks.insert(&MBB); 572 BBInfo &MInfo = MBBInfoMap[&MBB]; 573 574 MInfo.Preds.insert(MBB.pred_begin(), MBB.pred_end()); 575 if (MInfo.Preds.size() != MBB.pred_size()) 576 report("MBB has duplicate entries in its predecessor list.", &MBB); 577 578 MInfo.Succs.insert(MBB.succ_begin(), MBB.succ_end()); 579 if (MInfo.Succs.size() != MBB.succ_size()) 580 report("MBB has duplicate entries in its successor list.", &MBB); 581 } 582 583 // Check that the register use lists are sane. 584 MRI->verifyUseLists(); 585 586 if (!MF->empty()) 587 verifyStackFrame(); 588 } 589 590 void 591 MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) { 592 FirstTerminator = nullptr; 593 FirstNonPHI = nullptr; 594 595 if (!MF->getProperties().hasProperty( 596 MachineFunctionProperties::Property::NoPHIs) && MRI->tracksLiveness()) { 597 // If this block has allocatable physical registers live-in, check that 598 // it is an entry block or landing pad. 599 for (const auto &LI : MBB->liveins()) { 600 if (isAllocatable(LI.PhysReg) && !MBB->isEHPad() && 601 MBB->getIterator() != MBB->getParent()->begin()) { 602 report("MBB has allocatable live-in, but isn't entry or landing-pad.", MBB); 603 report_context(LI.PhysReg); 604 } 605 } 606 } 607 608 // Count the number of landing pad successors. 609 SmallPtrSet<const MachineBasicBlock*, 4> LandingPadSuccs; 610 for (const auto *succ : MBB->successors()) { 611 if (succ->isEHPad()) 612 LandingPadSuccs.insert(succ); 613 if (!FunctionBlocks.count(succ)) 614 report("MBB has successor that isn't part of the function.", MBB); 615 if (!MBBInfoMap[succ].Preds.count(MBB)) { 616 report("Inconsistent CFG", MBB); 617 errs() << "MBB is not in the predecessor list of the successor " 618 << printMBBReference(*succ) << ".\n"; 619 } 620 } 621 622 // Check the predecessor list. 623 for (const MachineBasicBlock *Pred : MBB->predecessors()) { 624 if (!FunctionBlocks.count(Pred)) 625 report("MBB has predecessor that isn't part of the function.", MBB); 626 if (!MBBInfoMap[Pred].Succs.count(MBB)) { 627 report("Inconsistent CFG", MBB); 628 errs() << "MBB is not in the successor list of the predecessor " 629 << printMBBReference(*Pred) << ".\n"; 630 } 631 } 632 633 const MCAsmInfo *AsmInfo = TM->getMCAsmInfo(); 634 const BasicBlock *BB = MBB->getBasicBlock(); 635 const Function &F = MF->getFunction(); 636 if (LandingPadSuccs.size() > 1 && 637 !(AsmInfo && 638 AsmInfo->getExceptionHandlingType() == ExceptionHandling::SjLj && 639 BB && isa<SwitchInst>(BB->getTerminator())) && 640 !isScopedEHPersonality(classifyEHPersonality(F.getPersonalityFn()))) 641 report("MBB has more than one landing pad successor", MBB); 642 643 // Call analyzeBranch. If it succeeds, there several more conditions to check. 644 MachineBasicBlock *TBB = nullptr, *FBB = nullptr; 645 SmallVector<MachineOperand, 4> Cond; 646 if (!TII->analyzeBranch(*const_cast<MachineBasicBlock *>(MBB), TBB, FBB, 647 Cond)) { 648 // Ok, analyzeBranch thinks it knows what's going on with this block. Let's 649 // check whether its answers match up with reality. 650 if (!TBB && !FBB) { 651 // Block falls through to its successor. 652 if (!MBB->empty() && MBB->back().isBarrier() && 653 !TII->isPredicated(MBB->back())) { 654 report("MBB exits via unconditional fall-through but ends with a " 655 "barrier instruction!", MBB); 656 } 657 if (!Cond.empty()) { 658 report("MBB exits via unconditional fall-through but has a condition!", 659 MBB); 660 } 661 } else if (TBB && !FBB && Cond.empty()) { 662 // Block unconditionally branches somewhere. 663 if (MBB->empty()) { 664 report("MBB exits via unconditional branch but doesn't contain " 665 "any instructions!", MBB); 666 } else if (!MBB->back().isBarrier()) { 667 report("MBB exits via unconditional branch but doesn't end with a " 668 "barrier instruction!", MBB); 669 } else if (!MBB->back().isTerminator()) { 670 report("MBB exits via unconditional branch but the branch isn't a " 671 "terminator instruction!", MBB); 672 } 673 } else if (TBB && !FBB && !Cond.empty()) { 674 // Block conditionally branches somewhere, otherwise falls through. 675 if (MBB->empty()) { 676 report("MBB exits via conditional branch/fall-through but doesn't " 677 "contain any instructions!", MBB); 678 } else if (MBB->back().isBarrier()) { 679 report("MBB exits via conditional branch/fall-through but ends with a " 680 "barrier instruction!", MBB); 681 } else if (!MBB->back().isTerminator()) { 682 report("MBB exits via conditional branch/fall-through but the branch " 683 "isn't a terminator instruction!", MBB); 684 } 685 } else if (TBB && FBB) { 686 // Block conditionally branches somewhere, otherwise branches 687 // somewhere else. 688 if (MBB->empty()) { 689 report("MBB exits via conditional branch/branch but doesn't " 690 "contain any instructions!", MBB); 691 } else if (!MBB->back().isBarrier()) { 692 report("MBB exits via conditional branch/branch but doesn't end with a " 693 "barrier instruction!", MBB); 694 } else if (!MBB->back().isTerminator()) { 695 report("MBB exits via conditional branch/branch but the branch " 696 "isn't a terminator instruction!", MBB); 697 } 698 if (Cond.empty()) { 699 report("MBB exits via conditional branch/branch but there's no " 700 "condition!", MBB); 701 } 702 } else { 703 report("analyzeBranch returned invalid data!", MBB); 704 } 705 706 // Now check that the successors match up with the answers reported by 707 // analyzeBranch. 708 if (TBB && !MBB->isSuccessor(TBB)) 709 report("MBB exits via jump or conditional branch, but its target isn't a " 710 "CFG successor!", 711 MBB); 712 if (FBB && !MBB->isSuccessor(FBB)) 713 report("MBB exits via conditional branch, but its target isn't a CFG " 714 "successor!", 715 MBB); 716 717 // There might be a fallthrough to the next block if there's either no 718 // unconditional true branch, or if there's a condition, and one of the 719 // branches is missing. 720 bool Fallthrough = !TBB || (!Cond.empty() && !FBB); 721 722 // A conditional fallthrough must be an actual CFG successor, not 723 // unreachable. (Conversely, an unconditional fallthrough might not really 724 // be a successor, because the block might end in unreachable.) 725 if (!Cond.empty() && !FBB) { 726 MachineFunction::const_iterator MBBI = std::next(MBB->getIterator()); 727 if (MBBI == MF->end()) { 728 report("MBB conditionally falls through out of function!", MBB); 729 } else if (!MBB->isSuccessor(&*MBBI)) 730 report("MBB exits via conditional branch/fall-through but the CFG " 731 "successors don't match the actual successors!", 732 MBB); 733 } 734 735 // Verify that there aren't any extra un-accounted-for successors. 736 for (const MachineBasicBlock *SuccMBB : MBB->successors()) { 737 // If this successor is one of the branch targets, it's okay. 738 if (SuccMBB == TBB || SuccMBB == FBB) 739 continue; 740 // If we might have a fallthrough, and the successor is the fallthrough 741 // block, that's also ok. 742 if (Fallthrough && SuccMBB == MBB->getNextNode()) 743 continue; 744 // Also accept successors which are for exception-handling or might be 745 // inlineasm_br targets. 746 if (SuccMBB->isEHPad() || SuccMBB->isInlineAsmBrIndirectTarget()) 747 continue; 748 report("MBB has unexpected successors which are not branch targets, " 749 "fallthrough, EHPads, or inlineasm_br targets.", 750 MBB); 751 } 752 } 753 754 regsLive.clear(); 755 if (MRI->tracksLiveness()) { 756 for (const auto &LI : MBB->liveins()) { 757 if (!Register::isPhysicalRegister(LI.PhysReg)) { 758 report("MBB live-in list contains non-physical register", MBB); 759 continue; 760 } 761 for (const MCPhysReg &SubReg : TRI->subregs_inclusive(LI.PhysReg)) 762 regsLive.insert(SubReg); 763 } 764 } 765 766 const MachineFrameInfo &MFI = MF->getFrameInfo(); 767 BitVector PR = MFI.getPristineRegs(*MF); 768 for (unsigned I : PR.set_bits()) { 769 for (const MCPhysReg &SubReg : TRI->subregs_inclusive(I)) 770 regsLive.insert(SubReg); 771 } 772 773 regsKilled.clear(); 774 regsDefined.clear(); 775 776 if (Indexes) 777 lastIndex = Indexes->getMBBStartIdx(MBB); 778 } 779 780 // This function gets called for all bundle headers, including normal 781 // stand-alone unbundled instructions. 782 void MachineVerifier::visitMachineBundleBefore(const MachineInstr *MI) { 783 if (Indexes && Indexes->hasIndex(*MI)) { 784 SlotIndex idx = Indexes->getInstructionIndex(*MI); 785 if (!(idx > lastIndex)) { 786 report("Instruction index out of order", MI); 787 errs() << "Last instruction was at " << lastIndex << '\n'; 788 } 789 lastIndex = idx; 790 } 791 792 // Ensure non-terminators don't follow terminators. 793 if (MI->isTerminator()) { 794 if (!FirstTerminator) 795 FirstTerminator = MI; 796 } else if (FirstTerminator) { 797 report("Non-terminator instruction after the first terminator", MI); 798 errs() << "First terminator was:\t" << *FirstTerminator; 799 } 800 } 801 802 // The operands on an INLINEASM instruction must follow a template. 803 // Verify that the flag operands make sense. 804 void MachineVerifier::verifyInlineAsm(const MachineInstr *MI) { 805 // The first two operands on INLINEASM are the asm string and global flags. 806 if (MI->getNumOperands() < 2) { 807 report("Too few operands on inline asm", MI); 808 return; 809 } 810 if (!MI->getOperand(0).isSymbol()) 811 report("Asm string must be an external symbol", MI); 812 if (!MI->getOperand(1).isImm()) 813 report("Asm flags must be an immediate", MI); 814 // Allowed flags are Extra_HasSideEffects = 1, Extra_IsAlignStack = 2, 815 // Extra_AsmDialect = 4, Extra_MayLoad = 8, and Extra_MayStore = 16, 816 // and Extra_IsConvergent = 32. 817 if (!isUInt<6>(MI->getOperand(1).getImm())) 818 report("Unknown asm flags", &MI->getOperand(1), 1); 819 820 static_assert(InlineAsm::MIOp_FirstOperand == 2, "Asm format changed"); 821 822 unsigned OpNo = InlineAsm::MIOp_FirstOperand; 823 unsigned NumOps; 824 for (unsigned e = MI->getNumOperands(); OpNo < e; OpNo += NumOps) { 825 const MachineOperand &MO = MI->getOperand(OpNo); 826 // There may be implicit ops after the fixed operands. 827 if (!MO.isImm()) 828 break; 829 NumOps = 1 + InlineAsm::getNumOperandRegisters(MO.getImm()); 830 } 831 832 if (OpNo > MI->getNumOperands()) 833 report("Missing operands in last group", MI); 834 835 // An optional MDNode follows the groups. 836 if (OpNo < MI->getNumOperands() && MI->getOperand(OpNo).isMetadata()) 837 ++OpNo; 838 839 // All trailing operands must be implicit registers. 840 for (unsigned e = MI->getNumOperands(); OpNo < e; ++OpNo) { 841 const MachineOperand &MO = MI->getOperand(OpNo); 842 if (!MO.isReg() || !MO.isImplicit()) 843 report("Expected implicit register after groups", &MO, OpNo); 844 } 845 } 846 847 /// Check that types are consistent when two operands need to have the same 848 /// number of vector elements. 849 /// \return true if the types are valid. 850 bool MachineVerifier::verifyVectorElementMatch(LLT Ty0, LLT Ty1, 851 const MachineInstr *MI) { 852 if (Ty0.isVector() != Ty1.isVector()) { 853 report("operand types must be all-vector or all-scalar", MI); 854 // Generally we try to report as many issues as possible at once, but in 855 // this case it's not clear what should we be comparing the size of the 856 // scalar with: the size of the whole vector or its lane. Instead of 857 // making an arbitrary choice and emitting not so helpful message, let's 858 // avoid the extra noise and stop here. 859 return false; 860 } 861 862 if (Ty0.isVector() && Ty0.getNumElements() != Ty1.getNumElements()) { 863 report("operand types must preserve number of vector elements", MI); 864 return false; 865 } 866 867 return true; 868 } 869 870 void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) { 871 if (isFunctionSelected) 872 report("Unexpected generic instruction in a Selected function", MI); 873 874 const MCInstrDesc &MCID = MI->getDesc(); 875 unsigned NumOps = MI->getNumOperands(); 876 877 // Branches must reference a basic block if they are not indirect 878 if (MI->isBranch() && !MI->isIndirectBranch()) { 879 bool HasMBB = false; 880 for (const MachineOperand &Op : MI->operands()) { 881 if (Op.isMBB()) { 882 HasMBB = true; 883 break; 884 } 885 } 886 887 if (!HasMBB) { 888 report("Branch instruction is missing a basic block operand or " 889 "isIndirectBranch property", 890 MI); 891 } 892 } 893 894 // Check types. 895 SmallVector<LLT, 4> Types; 896 for (unsigned I = 0, E = std::min(MCID.getNumOperands(), NumOps); 897 I != E; ++I) { 898 if (!MCID.OpInfo[I].isGenericType()) 899 continue; 900 // Generic instructions specify type equality constraints between some of 901 // their operands. Make sure these are consistent. 902 size_t TypeIdx = MCID.OpInfo[I].getGenericTypeIndex(); 903 Types.resize(std::max(TypeIdx + 1, Types.size())); 904 905 const MachineOperand *MO = &MI->getOperand(I); 906 if (!MO->isReg()) { 907 report("generic instruction must use register operands", MI); 908 continue; 909 } 910 911 LLT OpTy = MRI->getType(MO->getReg()); 912 // Don't report a type mismatch if there is no actual mismatch, only a 913 // type missing, to reduce noise: 914 if (OpTy.isValid()) { 915 // Only the first valid type for a type index will be printed: don't 916 // overwrite it later so it's always clear which type was expected: 917 if (!Types[TypeIdx].isValid()) 918 Types[TypeIdx] = OpTy; 919 else if (Types[TypeIdx] != OpTy) 920 report("Type mismatch in generic instruction", MO, I, OpTy); 921 } else { 922 // Generic instructions must have types attached to their operands. 923 report("Generic instruction is missing a virtual register type", MO, I); 924 } 925 } 926 927 // Generic opcodes must not have physical register operands. 928 for (unsigned I = 0; I < MI->getNumOperands(); ++I) { 929 const MachineOperand *MO = &MI->getOperand(I); 930 if (MO->isReg() && Register::isPhysicalRegister(MO->getReg())) 931 report("Generic instruction cannot have physical register", MO, I); 932 } 933 934 // Avoid out of bounds in checks below. This was already reported earlier. 935 if (MI->getNumOperands() < MCID.getNumOperands()) 936 return; 937 938 StringRef ErrorInfo; 939 if (!TII->verifyInstruction(*MI, ErrorInfo)) 940 report(ErrorInfo.data(), MI); 941 942 // Verify properties of various specific instruction types 943 switch (MI->getOpcode()) { 944 case TargetOpcode::G_CONSTANT: 945 case TargetOpcode::G_FCONSTANT: { 946 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 947 if (DstTy.isVector()) 948 report("Instruction cannot use a vector result type", MI); 949 950 if (MI->getOpcode() == TargetOpcode::G_CONSTANT) { 951 if (!MI->getOperand(1).isCImm()) { 952 report("G_CONSTANT operand must be cimm", MI); 953 break; 954 } 955 956 const ConstantInt *CI = MI->getOperand(1).getCImm(); 957 if (CI->getBitWidth() != DstTy.getSizeInBits()) 958 report("inconsistent constant size", MI); 959 } else { 960 if (!MI->getOperand(1).isFPImm()) { 961 report("G_FCONSTANT operand must be fpimm", MI); 962 break; 963 } 964 const ConstantFP *CF = MI->getOperand(1).getFPImm(); 965 966 if (APFloat::getSizeInBits(CF->getValueAPF().getSemantics()) != 967 DstTy.getSizeInBits()) { 968 report("inconsistent constant size", MI); 969 } 970 } 971 972 break; 973 } 974 case TargetOpcode::G_LOAD: 975 case TargetOpcode::G_STORE: 976 case TargetOpcode::G_ZEXTLOAD: 977 case TargetOpcode::G_SEXTLOAD: { 978 LLT ValTy = MRI->getType(MI->getOperand(0).getReg()); 979 LLT PtrTy = MRI->getType(MI->getOperand(1).getReg()); 980 if (!PtrTy.isPointer()) 981 report("Generic memory instruction must access a pointer", MI); 982 983 // Generic loads and stores must have a single MachineMemOperand 984 // describing that access. 985 if (!MI->hasOneMemOperand()) { 986 report("Generic instruction accessing memory must have one mem operand", 987 MI); 988 } else { 989 const MachineMemOperand &MMO = **MI->memoperands_begin(); 990 if (MI->getOpcode() == TargetOpcode::G_ZEXTLOAD || 991 MI->getOpcode() == TargetOpcode::G_SEXTLOAD) { 992 if (MMO.getSizeInBits() >= ValTy.getSizeInBits()) 993 report("Generic extload must have a narrower memory type", MI); 994 } else if (MI->getOpcode() == TargetOpcode::G_LOAD) { 995 if (MMO.getSize() > ValTy.getSizeInBytes()) 996 report("load memory size cannot exceed result size", MI); 997 } else if (MI->getOpcode() == TargetOpcode::G_STORE) { 998 if (ValTy.getSizeInBytes() < MMO.getSize()) 999 report("store memory size cannot exceed value size", MI); 1000 } 1001 } 1002 1003 break; 1004 } 1005 case TargetOpcode::G_PHI: { 1006 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1007 if (!DstTy.isValid() || !all_of(drop_begin(MI->operands()), 1008 [this, &DstTy](const MachineOperand &MO) { 1009 if (!MO.isReg()) 1010 return true; 1011 LLT Ty = MRI->getType(MO.getReg()); 1012 if (!Ty.isValid() || (Ty != DstTy)) 1013 return false; 1014 return true; 1015 })) 1016 report("Generic Instruction G_PHI has operands with incompatible/missing " 1017 "types", 1018 MI); 1019 break; 1020 } 1021 case TargetOpcode::G_BITCAST: { 1022 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1023 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg()); 1024 if (!DstTy.isValid() || !SrcTy.isValid()) 1025 break; 1026 1027 if (SrcTy.isPointer() != DstTy.isPointer()) 1028 report("bitcast cannot convert between pointers and other types", MI); 1029 1030 if (SrcTy.getSizeInBits() != DstTy.getSizeInBits()) 1031 report("bitcast sizes must match", MI); 1032 1033 if (SrcTy == DstTy) 1034 report("bitcast must change the type", MI); 1035 1036 break; 1037 } 1038 case TargetOpcode::G_INTTOPTR: 1039 case TargetOpcode::G_PTRTOINT: 1040 case TargetOpcode::G_ADDRSPACE_CAST: { 1041 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1042 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg()); 1043 if (!DstTy.isValid() || !SrcTy.isValid()) 1044 break; 1045 1046 verifyVectorElementMatch(DstTy, SrcTy, MI); 1047 1048 DstTy = DstTy.getScalarType(); 1049 SrcTy = SrcTy.getScalarType(); 1050 1051 if (MI->getOpcode() == TargetOpcode::G_INTTOPTR) { 1052 if (!DstTy.isPointer()) 1053 report("inttoptr result type must be a pointer", MI); 1054 if (SrcTy.isPointer()) 1055 report("inttoptr source type must not be a pointer", MI); 1056 } else if (MI->getOpcode() == TargetOpcode::G_PTRTOINT) { 1057 if (!SrcTy.isPointer()) 1058 report("ptrtoint source type must be a pointer", MI); 1059 if (DstTy.isPointer()) 1060 report("ptrtoint result type must not be a pointer", MI); 1061 } else { 1062 assert(MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST); 1063 if (!SrcTy.isPointer() || !DstTy.isPointer()) 1064 report("addrspacecast types must be pointers", MI); 1065 else { 1066 if (SrcTy.getAddressSpace() == DstTy.getAddressSpace()) 1067 report("addrspacecast must convert different address spaces", MI); 1068 } 1069 } 1070 1071 break; 1072 } 1073 case TargetOpcode::G_PTR_ADD: { 1074 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1075 LLT PtrTy = MRI->getType(MI->getOperand(1).getReg()); 1076 LLT OffsetTy = MRI->getType(MI->getOperand(2).getReg()); 1077 if (!DstTy.isValid() || !PtrTy.isValid() || !OffsetTy.isValid()) 1078 break; 1079 1080 if (!PtrTy.getScalarType().isPointer()) 1081 report("gep first operand must be a pointer", MI); 1082 1083 if (OffsetTy.getScalarType().isPointer()) 1084 report("gep offset operand must not be a pointer", MI); 1085 1086 // TODO: Is the offset allowed to be a scalar with a vector? 1087 break; 1088 } 1089 case TargetOpcode::G_PTRMASK: { 1090 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1091 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg()); 1092 LLT MaskTy = MRI->getType(MI->getOperand(2).getReg()); 1093 if (!DstTy.isValid() || !SrcTy.isValid() || !MaskTy.isValid()) 1094 break; 1095 1096 if (!DstTy.getScalarType().isPointer()) 1097 report("ptrmask result type must be a pointer", MI); 1098 1099 if (!MaskTy.getScalarType().isScalar()) 1100 report("ptrmask mask type must be an integer", MI); 1101 1102 verifyVectorElementMatch(DstTy, MaskTy, MI); 1103 break; 1104 } 1105 case TargetOpcode::G_SEXT: 1106 case TargetOpcode::G_ZEXT: 1107 case TargetOpcode::G_ANYEXT: 1108 case TargetOpcode::G_TRUNC: 1109 case TargetOpcode::G_FPEXT: 1110 case TargetOpcode::G_FPTRUNC: { 1111 // Number of operands and presense of types is already checked (and 1112 // reported in case of any issues), so no need to report them again. As 1113 // we're trying to report as many issues as possible at once, however, the 1114 // instructions aren't guaranteed to have the right number of operands or 1115 // types attached to them at this point 1116 assert(MCID.getNumOperands() == 2 && "Expected 2 operands G_*{EXT,TRUNC}"); 1117 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1118 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg()); 1119 if (!DstTy.isValid() || !SrcTy.isValid()) 1120 break; 1121 1122 LLT DstElTy = DstTy.getScalarType(); 1123 LLT SrcElTy = SrcTy.getScalarType(); 1124 if (DstElTy.isPointer() || SrcElTy.isPointer()) 1125 report("Generic extend/truncate can not operate on pointers", MI); 1126 1127 verifyVectorElementMatch(DstTy, SrcTy, MI); 1128 1129 unsigned DstSize = DstElTy.getSizeInBits(); 1130 unsigned SrcSize = SrcElTy.getSizeInBits(); 1131 switch (MI->getOpcode()) { 1132 default: 1133 if (DstSize <= SrcSize) 1134 report("Generic extend has destination type no larger than source", MI); 1135 break; 1136 case TargetOpcode::G_TRUNC: 1137 case TargetOpcode::G_FPTRUNC: 1138 if (DstSize >= SrcSize) 1139 report("Generic truncate has destination type no smaller than source", 1140 MI); 1141 break; 1142 } 1143 break; 1144 } 1145 case TargetOpcode::G_SELECT: { 1146 LLT SelTy = MRI->getType(MI->getOperand(0).getReg()); 1147 LLT CondTy = MRI->getType(MI->getOperand(1).getReg()); 1148 if (!SelTy.isValid() || !CondTy.isValid()) 1149 break; 1150 1151 // Scalar condition select on a vector is valid. 1152 if (CondTy.isVector()) 1153 verifyVectorElementMatch(SelTy, CondTy, MI); 1154 break; 1155 } 1156 case TargetOpcode::G_MERGE_VALUES: { 1157 // G_MERGE_VALUES should only be used to merge scalars into a larger scalar, 1158 // e.g. s2N = MERGE sN, sN 1159 // Merging multiple scalars into a vector is not allowed, should use 1160 // G_BUILD_VECTOR for that. 1161 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1162 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg()); 1163 if (DstTy.isVector() || SrcTy.isVector()) 1164 report("G_MERGE_VALUES cannot operate on vectors", MI); 1165 1166 const unsigned NumOps = MI->getNumOperands(); 1167 if (DstTy.getSizeInBits() != SrcTy.getSizeInBits() * (NumOps - 1)) 1168 report("G_MERGE_VALUES result size is inconsistent", MI); 1169 1170 for (unsigned I = 2; I != NumOps; ++I) { 1171 if (MRI->getType(MI->getOperand(I).getReg()) != SrcTy) 1172 report("G_MERGE_VALUES source types do not match", MI); 1173 } 1174 1175 break; 1176 } 1177 case TargetOpcode::G_UNMERGE_VALUES: { 1178 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1179 LLT SrcTy = MRI->getType(MI->getOperand(MI->getNumOperands()-1).getReg()); 1180 // For now G_UNMERGE can split vectors. 1181 for (unsigned i = 0; i < MI->getNumOperands()-1; ++i) { 1182 if (MRI->getType(MI->getOperand(i).getReg()) != DstTy) 1183 report("G_UNMERGE_VALUES destination types do not match", MI); 1184 } 1185 if (SrcTy.getSizeInBits() != 1186 (DstTy.getSizeInBits() * (MI->getNumOperands() - 1))) { 1187 report("G_UNMERGE_VALUES source operand does not cover dest operands", 1188 MI); 1189 } 1190 break; 1191 } 1192 case TargetOpcode::G_BUILD_VECTOR: { 1193 // Source types must be scalars, dest type a vector. Total size of scalars 1194 // must match the dest vector size. 1195 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1196 LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg()); 1197 if (!DstTy.isVector() || SrcEltTy.isVector()) { 1198 report("G_BUILD_VECTOR must produce a vector from scalar operands", MI); 1199 break; 1200 } 1201 1202 if (DstTy.getElementType() != SrcEltTy) 1203 report("G_BUILD_VECTOR result element type must match source type", MI); 1204 1205 if (DstTy.getNumElements() != MI->getNumOperands() - 1) 1206 report("G_BUILD_VECTOR must have an operand for each elemement", MI); 1207 1208 for (unsigned i = 2; i < MI->getNumOperands(); ++i) { 1209 if (MRI->getType(MI->getOperand(1).getReg()) != 1210 MRI->getType(MI->getOperand(i).getReg())) 1211 report("G_BUILD_VECTOR source operand types are not homogeneous", MI); 1212 } 1213 1214 break; 1215 } 1216 case TargetOpcode::G_BUILD_VECTOR_TRUNC: { 1217 // Source types must be scalars, dest type a vector. Scalar types must be 1218 // larger than the dest vector elt type, as this is a truncating operation. 1219 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1220 LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg()); 1221 if (!DstTy.isVector() || SrcEltTy.isVector()) 1222 report("G_BUILD_VECTOR_TRUNC must produce a vector from scalar operands", 1223 MI); 1224 for (unsigned i = 2; i < MI->getNumOperands(); ++i) { 1225 if (MRI->getType(MI->getOperand(1).getReg()) != 1226 MRI->getType(MI->getOperand(i).getReg())) 1227 report("G_BUILD_VECTOR_TRUNC source operand types are not homogeneous", 1228 MI); 1229 } 1230 if (SrcEltTy.getSizeInBits() <= DstTy.getElementType().getSizeInBits()) 1231 report("G_BUILD_VECTOR_TRUNC source operand types are not larger than " 1232 "dest elt type", 1233 MI); 1234 break; 1235 } 1236 case TargetOpcode::G_CONCAT_VECTORS: { 1237 // Source types should be vectors, and total size should match the dest 1238 // vector size. 1239 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1240 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg()); 1241 if (!DstTy.isVector() || !SrcTy.isVector()) 1242 report("G_CONCAT_VECTOR requires vector source and destination operands", 1243 MI); 1244 for (unsigned i = 2; i < MI->getNumOperands(); ++i) { 1245 if (MRI->getType(MI->getOperand(1).getReg()) != 1246 MRI->getType(MI->getOperand(i).getReg())) 1247 report("G_CONCAT_VECTOR source operand types are not homogeneous", MI); 1248 } 1249 if (DstTy.getNumElements() != 1250 SrcTy.getNumElements() * (MI->getNumOperands() - 1)) 1251 report("G_CONCAT_VECTOR num dest and source elements should match", MI); 1252 break; 1253 } 1254 case TargetOpcode::G_ICMP: 1255 case TargetOpcode::G_FCMP: { 1256 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1257 LLT SrcTy = MRI->getType(MI->getOperand(2).getReg()); 1258 1259 if ((DstTy.isVector() != SrcTy.isVector()) || 1260 (DstTy.isVector() && DstTy.getNumElements() != SrcTy.getNumElements())) 1261 report("Generic vector icmp/fcmp must preserve number of lanes", MI); 1262 1263 break; 1264 } 1265 case TargetOpcode::G_EXTRACT: { 1266 const MachineOperand &SrcOp = MI->getOperand(1); 1267 if (!SrcOp.isReg()) { 1268 report("extract source must be a register", MI); 1269 break; 1270 } 1271 1272 const MachineOperand &OffsetOp = MI->getOperand(2); 1273 if (!OffsetOp.isImm()) { 1274 report("extract offset must be a constant", MI); 1275 break; 1276 } 1277 1278 unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits(); 1279 unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits(); 1280 if (SrcSize == DstSize) 1281 report("extract source must be larger than result", MI); 1282 1283 if (DstSize + OffsetOp.getImm() > SrcSize) 1284 report("extract reads past end of register", MI); 1285 break; 1286 } 1287 case TargetOpcode::G_INSERT: { 1288 const MachineOperand &SrcOp = MI->getOperand(2); 1289 if (!SrcOp.isReg()) { 1290 report("insert source must be a register", MI); 1291 break; 1292 } 1293 1294 const MachineOperand &OffsetOp = MI->getOperand(3); 1295 if (!OffsetOp.isImm()) { 1296 report("insert offset must be a constant", MI); 1297 break; 1298 } 1299 1300 unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits(); 1301 unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits(); 1302 1303 if (DstSize <= SrcSize) 1304 report("inserted size must be smaller than total register", MI); 1305 1306 if (SrcSize + OffsetOp.getImm() > DstSize) 1307 report("insert writes past end of register", MI); 1308 1309 break; 1310 } 1311 case TargetOpcode::G_JUMP_TABLE: { 1312 if (!MI->getOperand(1).isJTI()) 1313 report("G_JUMP_TABLE source operand must be a jump table index", MI); 1314 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1315 if (!DstTy.isPointer()) 1316 report("G_JUMP_TABLE dest operand must have a pointer type", MI); 1317 break; 1318 } 1319 case TargetOpcode::G_BRJT: { 1320 if (!MRI->getType(MI->getOperand(0).getReg()).isPointer()) 1321 report("G_BRJT src operand 0 must be a pointer type", MI); 1322 1323 if (!MI->getOperand(1).isJTI()) 1324 report("G_BRJT src operand 1 must be a jump table index", MI); 1325 1326 const auto &IdxOp = MI->getOperand(2); 1327 if (!IdxOp.isReg() || MRI->getType(IdxOp.getReg()).isPointer()) 1328 report("G_BRJT src operand 2 must be a scalar reg type", MI); 1329 break; 1330 } 1331 case TargetOpcode::G_INTRINSIC: 1332 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS: { 1333 // TODO: Should verify number of def and use operands, but the current 1334 // interface requires passing in IR types for mangling. 1335 const MachineOperand &IntrIDOp = MI->getOperand(MI->getNumExplicitDefs()); 1336 if (!IntrIDOp.isIntrinsicID()) { 1337 report("G_INTRINSIC first src operand must be an intrinsic ID", MI); 1338 break; 1339 } 1340 1341 bool NoSideEffects = MI->getOpcode() == TargetOpcode::G_INTRINSIC; 1342 unsigned IntrID = IntrIDOp.getIntrinsicID(); 1343 if (IntrID != 0 && IntrID < Intrinsic::num_intrinsics) { 1344 AttributeList Attrs 1345 = Intrinsic::getAttributes(MF->getFunction().getContext(), 1346 static_cast<Intrinsic::ID>(IntrID)); 1347 bool DeclHasSideEffects = !Attrs.hasFnAttribute(Attribute::ReadNone); 1348 if (NoSideEffects && DeclHasSideEffects) { 1349 report("G_INTRINSIC used with intrinsic that accesses memory", MI); 1350 break; 1351 } 1352 if (!NoSideEffects && !DeclHasSideEffects) { 1353 report("G_INTRINSIC_W_SIDE_EFFECTS used with readnone intrinsic", MI); 1354 break; 1355 } 1356 } 1357 1358 break; 1359 } 1360 case TargetOpcode::G_SEXT_INREG: { 1361 if (!MI->getOperand(2).isImm()) { 1362 report("G_SEXT_INREG expects an immediate operand #2", MI); 1363 break; 1364 } 1365 1366 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1367 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg()); 1368 verifyVectorElementMatch(DstTy, SrcTy, MI); 1369 1370 int64_t Imm = MI->getOperand(2).getImm(); 1371 if (Imm <= 0) 1372 report("G_SEXT_INREG size must be >= 1", MI); 1373 if (Imm >= SrcTy.getScalarSizeInBits()) 1374 report("G_SEXT_INREG size must be less than source bit width", MI); 1375 break; 1376 } 1377 case TargetOpcode::G_SHUFFLE_VECTOR: { 1378 const MachineOperand &MaskOp = MI->getOperand(3); 1379 if (!MaskOp.isShuffleMask()) { 1380 report("Incorrect mask operand type for G_SHUFFLE_VECTOR", MI); 1381 break; 1382 } 1383 1384 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1385 LLT Src0Ty = MRI->getType(MI->getOperand(1).getReg()); 1386 LLT Src1Ty = MRI->getType(MI->getOperand(2).getReg()); 1387 1388 if (Src0Ty != Src1Ty) 1389 report("Source operands must be the same type", MI); 1390 1391 if (Src0Ty.getScalarType() != DstTy.getScalarType()) 1392 report("G_SHUFFLE_VECTOR cannot change element type", MI); 1393 1394 // Don't check that all operands are vector because scalars are used in 1395 // place of 1 element vectors. 1396 int SrcNumElts = Src0Ty.isVector() ? Src0Ty.getNumElements() : 1; 1397 int DstNumElts = DstTy.isVector() ? DstTy.getNumElements() : 1; 1398 1399 ArrayRef<int> MaskIdxes = MaskOp.getShuffleMask(); 1400 1401 if (static_cast<int>(MaskIdxes.size()) != DstNumElts) 1402 report("Wrong result type for shufflemask", MI); 1403 1404 for (int Idx : MaskIdxes) { 1405 if (Idx < 0) 1406 continue; 1407 1408 if (Idx >= 2 * SrcNumElts) 1409 report("Out of bounds shuffle index", MI); 1410 } 1411 1412 break; 1413 } 1414 case TargetOpcode::G_DYN_STACKALLOC: { 1415 const MachineOperand &DstOp = MI->getOperand(0); 1416 const MachineOperand &AllocOp = MI->getOperand(1); 1417 const MachineOperand &AlignOp = MI->getOperand(2); 1418 1419 if (!DstOp.isReg() || !MRI->getType(DstOp.getReg()).isPointer()) { 1420 report("dst operand 0 must be a pointer type", MI); 1421 break; 1422 } 1423 1424 if (!AllocOp.isReg() || !MRI->getType(AllocOp.getReg()).isScalar()) { 1425 report("src operand 1 must be a scalar reg type", MI); 1426 break; 1427 } 1428 1429 if (!AlignOp.isImm()) { 1430 report("src operand 2 must be an immediate type", MI); 1431 break; 1432 } 1433 break; 1434 } 1435 case TargetOpcode::G_MEMCPY: 1436 case TargetOpcode::G_MEMMOVE: { 1437 ArrayRef<MachineMemOperand *> MMOs = MI->memoperands(); 1438 if (MMOs.size() != 2) { 1439 report("memcpy/memmove must have 2 memory operands", MI); 1440 break; 1441 } 1442 1443 if ((!MMOs[0]->isStore() || MMOs[0]->isLoad()) || 1444 (MMOs[1]->isStore() || !MMOs[1]->isLoad())) { 1445 report("wrong memory operand types", MI); 1446 break; 1447 } 1448 1449 if (MMOs[0]->getSize() != MMOs[1]->getSize()) 1450 report("inconsistent memory operand sizes", MI); 1451 1452 LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg()); 1453 LLT SrcPtrTy = MRI->getType(MI->getOperand(1).getReg()); 1454 1455 if (!DstPtrTy.isPointer() || !SrcPtrTy.isPointer()) { 1456 report("memory instruction operand must be a pointer", MI); 1457 break; 1458 } 1459 1460 if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace()) 1461 report("inconsistent store address space", MI); 1462 if (SrcPtrTy.getAddressSpace() != MMOs[1]->getAddrSpace()) 1463 report("inconsistent load address space", MI); 1464 1465 break; 1466 } 1467 case TargetOpcode::G_MEMSET: { 1468 ArrayRef<MachineMemOperand *> MMOs = MI->memoperands(); 1469 if (MMOs.size() != 1) { 1470 report("memset must have 1 memory operand", MI); 1471 break; 1472 } 1473 1474 if ((!MMOs[0]->isStore() || MMOs[0]->isLoad())) { 1475 report("memset memory operand must be a store", MI); 1476 break; 1477 } 1478 1479 LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg()); 1480 if (!DstPtrTy.isPointer()) { 1481 report("memset operand must be a pointer", MI); 1482 break; 1483 } 1484 1485 if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace()) 1486 report("inconsistent memset address space", MI); 1487 1488 break; 1489 } 1490 case TargetOpcode::G_VECREDUCE_SEQ_FADD: 1491 case TargetOpcode::G_VECREDUCE_SEQ_FMUL: { 1492 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1493 LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg()); 1494 LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg()); 1495 if (!DstTy.isScalar()) 1496 report("Vector reduction requires a scalar destination type", MI); 1497 if (!Src1Ty.isScalar()) 1498 report("Sequential FADD/FMUL vector reduction requires a scalar 1st operand", MI); 1499 if (!Src2Ty.isVector()) 1500 report("Sequential FADD/FMUL vector reduction must have a vector 2nd operand", MI); 1501 break; 1502 } 1503 case TargetOpcode::G_VECREDUCE_FADD: 1504 case TargetOpcode::G_VECREDUCE_FMUL: 1505 case TargetOpcode::G_VECREDUCE_FMAX: 1506 case TargetOpcode::G_VECREDUCE_FMIN: 1507 case TargetOpcode::G_VECREDUCE_ADD: 1508 case TargetOpcode::G_VECREDUCE_MUL: 1509 case TargetOpcode::G_VECREDUCE_AND: 1510 case TargetOpcode::G_VECREDUCE_OR: 1511 case TargetOpcode::G_VECREDUCE_XOR: 1512 case TargetOpcode::G_VECREDUCE_SMAX: 1513 case TargetOpcode::G_VECREDUCE_SMIN: 1514 case TargetOpcode::G_VECREDUCE_UMAX: 1515 case TargetOpcode::G_VECREDUCE_UMIN: { 1516 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1517 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg()); 1518 if (!DstTy.isScalar()) 1519 report("Vector reduction requires a scalar destination type", MI); 1520 if (!SrcTy.isVector()) 1521 report("Vector reduction requires vector source=", MI); 1522 break; 1523 } 1524 default: 1525 break; 1526 } 1527 } 1528 1529 void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) { 1530 const MCInstrDesc &MCID = MI->getDesc(); 1531 if (MI->getNumOperands() < MCID.getNumOperands()) { 1532 report("Too few operands", MI); 1533 errs() << MCID.getNumOperands() << " operands expected, but " 1534 << MI->getNumOperands() << " given.\n"; 1535 } 1536 1537 if (MI->isPHI()) { 1538 if (MF->getProperties().hasProperty( 1539 MachineFunctionProperties::Property::NoPHIs)) 1540 report("Found PHI instruction with NoPHIs property set", MI); 1541 1542 if (FirstNonPHI) 1543 report("Found PHI instruction after non-PHI", MI); 1544 } else if (FirstNonPHI == nullptr) 1545 FirstNonPHI = MI; 1546 1547 // Check the tied operands. 1548 if (MI->isInlineAsm()) 1549 verifyInlineAsm(MI); 1550 1551 // Check that unspillable terminators define a reg and have at most one use. 1552 if (TII->isUnspillableTerminator(MI)) { 1553 if (!MI->getOperand(0).isReg() || !MI->getOperand(0).isDef()) 1554 report("Unspillable Terminator does not define a reg", MI); 1555 Register Def = MI->getOperand(0).getReg(); 1556 if (Def.isVirtual() && 1557 std::distance(MRI->use_nodbg_begin(Def), MRI->use_nodbg_end()) > 1) 1558 report("Unspillable Terminator expected to have at most one use!", MI); 1559 } 1560 1561 // A fully-formed DBG_VALUE must have a location. Ignore partially formed 1562 // DBG_VALUEs: these are convenient to use in tests, but should never get 1563 // generated. 1564 if (MI->isDebugValue() && MI->getNumOperands() == 4) 1565 if (!MI->getDebugLoc()) 1566 report("Missing DebugLoc for debug instruction", MI); 1567 1568 // Meta instructions should never be the subject of debug value tracking, 1569 // they don't create a value in the output program at all. 1570 if (MI->isMetaInstruction() && MI->peekDebugInstrNum()) 1571 report("Metadata instruction should not have a value tracking number", MI); 1572 1573 // Check the MachineMemOperands for basic consistency. 1574 for (MachineMemOperand *Op : MI->memoperands()) { 1575 if (Op->isLoad() && !MI->mayLoad()) 1576 report("Missing mayLoad flag", MI); 1577 if (Op->isStore() && !MI->mayStore()) 1578 report("Missing mayStore flag", MI); 1579 } 1580 1581 // Debug values must not have a slot index. 1582 // Other instructions must have one, unless they are inside a bundle. 1583 if (LiveInts) { 1584 bool mapped = !LiveInts->isNotInMIMap(*MI); 1585 if (MI->isDebugInstr()) { 1586 if (mapped) 1587 report("Debug instruction has a slot index", MI); 1588 } else if (MI->isInsideBundle()) { 1589 if (mapped) 1590 report("Instruction inside bundle has a slot index", MI); 1591 } else { 1592 if (!mapped) 1593 report("Missing slot index", MI); 1594 } 1595 } 1596 1597 if (isPreISelGenericOpcode(MCID.getOpcode())) { 1598 verifyPreISelGenericInstruction(MI); 1599 return; 1600 } 1601 1602 StringRef ErrorInfo; 1603 if (!TII->verifyInstruction(*MI, ErrorInfo)) 1604 report(ErrorInfo.data(), MI); 1605 1606 // Verify properties of various specific instruction types 1607 switch (MI->getOpcode()) { 1608 case TargetOpcode::COPY: { 1609 if (foundErrors) 1610 break; 1611 const MachineOperand &DstOp = MI->getOperand(0); 1612 const MachineOperand &SrcOp = MI->getOperand(1); 1613 LLT DstTy = MRI->getType(DstOp.getReg()); 1614 LLT SrcTy = MRI->getType(SrcOp.getReg()); 1615 if (SrcTy.isValid() && DstTy.isValid()) { 1616 // If both types are valid, check that the types are the same. 1617 if (SrcTy != DstTy) { 1618 report("Copy Instruction is illegal with mismatching types", MI); 1619 errs() << "Def = " << DstTy << ", Src = " << SrcTy << "\n"; 1620 } 1621 } 1622 if (SrcTy.isValid() || DstTy.isValid()) { 1623 // If one of them have valid types, let's just check they have the same 1624 // size. 1625 unsigned SrcSize = TRI->getRegSizeInBits(SrcOp.getReg(), *MRI); 1626 unsigned DstSize = TRI->getRegSizeInBits(DstOp.getReg(), *MRI); 1627 assert(SrcSize && "Expecting size here"); 1628 assert(DstSize && "Expecting size here"); 1629 if (SrcSize != DstSize) 1630 if (!DstOp.getSubReg() && !SrcOp.getSubReg()) { 1631 report("Copy Instruction is illegal with mismatching sizes", MI); 1632 errs() << "Def Size = " << DstSize << ", Src Size = " << SrcSize 1633 << "\n"; 1634 } 1635 } 1636 break; 1637 } 1638 case TargetOpcode::STATEPOINT: { 1639 StatepointOpers SO(MI); 1640 if (!MI->getOperand(SO.getIDPos()).isImm() || 1641 !MI->getOperand(SO.getNBytesPos()).isImm() || 1642 !MI->getOperand(SO.getNCallArgsPos()).isImm()) { 1643 report("meta operands to STATEPOINT not constant!", MI); 1644 break; 1645 } 1646 1647 auto VerifyStackMapConstant = [&](unsigned Offset) { 1648 if (Offset >= MI->getNumOperands()) { 1649 report("stack map constant to STATEPOINT is out of range!", MI); 1650 return; 1651 } 1652 if (!MI->getOperand(Offset - 1).isImm() || 1653 MI->getOperand(Offset - 1).getImm() != StackMaps::ConstantOp || 1654 !MI->getOperand(Offset).isImm()) 1655 report("stack map constant to STATEPOINT not well formed!", MI); 1656 }; 1657 VerifyStackMapConstant(SO.getCCIdx()); 1658 VerifyStackMapConstant(SO.getFlagsIdx()); 1659 VerifyStackMapConstant(SO.getNumDeoptArgsIdx()); 1660 VerifyStackMapConstant(SO.getNumGCPtrIdx()); 1661 VerifyStackMapConstant(SO.getNumAllocaIdx()); 1662 VerifyStackMapConstant(SO.getNumGcMapEntriesIdx()); 1663 1664 // Verify that all explicit statepoint defs are tied to gc operands as 1665 // they are expected to be a relocation of gc operands. 1666 unsigned FirstGCPtrIdx = SO.getFirstGCPtrIdx(); 1667 unsigned LastGCPtrIdx = SO.getNumAllocaIdx() - 2; 1668 for (unsigned Idx = 0; Idx < MI->getNumDefs(); Idx++) { 1669 unsigned UseOpIdx; 1670 if (!MI->isRegTiedToUseOperand(Idx, &UseOpIdx)) { 1671 report("STATEPOINT defs expected to be tied", MI); 1672 break; 1673 } 1674 if (UseOpIdx < FirstGCPtrIdx || UseOpIdx > LastGCPtrIdx) { 1675 report("STATEPOINT def tied to non-gc operand", MI); 1676 break; 1677 } 1678 } 1679 1680 // TODO: verify we have properly encoded deopt arguments 1681 } break; 1682 } 1683 } 1684 1685 void 1686 MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) { 1687 const MachineInstr *MI = MO->getParent(); 1688 const MCInstrDesc &MCID = MI->getDesc(); 1689 unsigned NumDefs = MCID.getNumDefs(); 1690 if (MCID.getOpcode() == TargetOpcode::PATCHPOINT) 1691 NumDefs = (MONum == 0 && MO->isReg()) ? NumDefs : 0; 1692 1693 // The first MCID.NumDefs operands must be explicit register defines 1694 if (MONum < NumDefs) { 1695 const MCOperandInfo &MCOI = MCID.OpInfo[MONum]; 1696 if (!MO->isReg()) 1697 report("Explicit definition must be a register", MO, MONum); 1698 else if (!MO->isDef() && !MCOI.isOptionalDef()) 1699 report("Explicit definition marked as use", MO, MONum); 1700 else if (MO->isImplicit()) 1701 report("Explicit definition marked as implicit", MO, MONum); 1702 } else if (MONum < MCID.getNumOperands()) { 1703 const MCOperandInfo &MCOI = MCID.OpInfo[MONum]; 1704 // Don't check if it's the last operand in a variadic instruction. See, 1705 // e.g., LDM_RET in the arm back end. Check non-variadic operands only. 1706 bool IsOptional = MI->isVariadic() && MONum == MCID.getNumOperands() - 1; 1707 if (!IsOptional) { 1708 if (MO->isReg()) { 1709 if (MO->isDef() && !MCOI.isOptionalDef() && !MCID.variadicOpsAreDefs()) 1710 report("Explicit operand marked as def", MO, MONum); 1711 if (MO->isImplicit()) 1712 report("Explicit operand marked as implicit", MO, MONum); 1713 } 1714 1715 // Check that an instruction has register operands only as expected. 1716 if (MCOI.OperandType == MCOI::OPERAND_REGISTER && 1717 !MO->isReg() && !MO->isFI()) 1718 report("Expected a register operand.", MO, MONum); 1719 if ((MCOI.OperandType == MCOI::OPERAND_IMMEDIATE || 1720 MCOI.OperandType == MCOI::OPERAND_PCREL) && MO->isReg()) 1721 report("Expected a non-register operand.", MO, MONum); 1722 } 1723 1724 int TiedTo = MCID.getOperandConstraint(MONum, MCOI::TIED_TO); 1725 if (TiedTo != -1) { 1726 if (!MO->isReg()) 1727 report("Tied use must be a register", MO, MONum); 1728 else if (!MO->isTied()) 1729 report("Operand should be tied", MO, MONum); 1730 else if (unsigned(TiedTo) != MI->findTiedOperandIdx(MONum)) 1731 report("Tied def doesn't match MCInstrDesc", MO, MONum); 1732 else if (Register::isPhysicalRegister(MO->getReg())) { 1733 const MachineOperand &MOTied = MI->getOperand(TiedTo); 1734 if (!MOTied.isReg()) 1735 report("Tied counterpart must be a register", &MOTied, TiedTo); 1736 else if (Register::isPhysicalRegister(MOTied.getReg()) && 1737 MO->getReg() != MOTied.getReg()) 1738 report("Tied physical registers must match.", &MOTied, TiedTo); 1739 } 1740 } else if (MO->isReg() && MO->isTied()) 1741 report("Explicit operand should not be tied", MO, MONum); 1742 } else { 1743 // ARM adds %reg0 operands to indicate predicates. We'll allow that. 1744 if (MO->isReg() && !MO->isImplicit() && !MI->isVariadic() && MO->getReg()) 1745 report("Extra explicit operand on non-variadic instruction", MO, MONum); 1746 } 1747 1748 switch (MO->getType()) { 1749 case MachineOperand::MO_Register: { 1750 const Register Reg = MO->getReg(); 1751 if (!Reg) 1752 return; 1753 if (MRI->tracksLiveness() && !MI->isDebugValue()) 1754 checkLiveness(MO, MONum); 1755 1756 // Verify the consistency of tied operands. 1757 if (MO->isTied()) { 1758 unsigned OtherIdx = MI->findTiedOperandIdx(MONum); 1759 const MachineOperand &OtherMO = MI->getOperand(OtherIdx); 1760 if (!OtherMO.isReg()) 1761 report("Must be tied to a register", MO, MONum); 1762 if (!OtherMO.isTied()) 1763 report("Missing tie flags on tied operand", MO, MONum); 1764 if (MI->findTiedOperandIdx(OtherIdx) != MONum) 1765 report("Inconsistent tie links", MO, MONum); 1766 if (MONum < MCID.getNumDefs()) { 1767 if (OtherIdx < MCID.getNumOperands()) { 1768 if (-1 == MCID.getOperandConstraint(OtherIdx, MCOI::TIED_TO)) 1769 report("Explicit def tied to explicit use without tie constraint", 1770 MO, MONum); 1771 } else { 1772 if (!OtherMO.isImplicit()) 1773 report("Explicit def should be tied to implicit use", MO, MONum); 1774 } 1775 } 1776 } 1777 1778 // Verify two-address constraints after the twoaddressinstruction pass. 1779 // Both twoaddressinstruction pass and phi-node-elimination pass call 1780 // MRI->leaveSSA() to set MF as NoSSA, we should do the verification after 1781 // twoaddressinstruction pass not after phi-node-elimination pass. So we 1782 // shouldn't use the NoSSA as the condition, we should based on 1783 // TiedOpsRewritten property to verify two-address constraints, this 1784 // property will be set in twoaddressinstruction pass. 1785 unsigned DefIdx; 1786 if (MF->getProperties().hasProperty( 1787 MachineFunctionProperties::Property::TiedOpsRewritten) && 1788 MO->isUse() && MI->isRegTiedToDefOperand(MONum, &DefIdx) && 1789 Reg != MI->getOperand(DefIdx).getReg()) 1790 report("Two-address instruction operands must be identical", MO, MONum); 1791 1792 // Check register classes. 1793 unsigned SubIdx = MO->getSubReg(); 1794 1795 if (Register::isPhysicalRegister(Reg)) { 1796 if (SubIdx) { 1797 report("Illegal subregister index for physical register", MO, MONum); 1798 return; 1799 } 1800 if (MONum < MCID.getNumOperands()) { 1801 if (const TargetRegisterClass *DRC = 1802 TII->getRegClass(MCID, MONum, TRI, *MF)) { 1803 if (!DRC->contains(Reg)) { 1804 report("Illegal physical register for instruction", MO, MONum); 1805 errs() << printReg(Reg, TRI) << " is not a " 1806 << TRI->getRegClassName(DRC) << " register.\n"; 1807 } 1808 } 1809 } 1810 if (MO->isRenamable()) { 1811 if (MRI->isReserved(Reg)) { 1812 report("isRenamable set on reserved register", MO, MONum); 1813 return; 1814 } 1815 } 1816 if (MI->isDebugValue() && MO->isUse() && !MO->isDebug()) { 1817 report("Use-reg is not IsDebug in a DBG_VALUE", MO, MONum); 1818 return; 1819 } 1820 } else { 1821 // Virtual register. 1822 const TargetRegisterClass *RC = MRI->getRegClassOrNull(Reg); 1823 if (!RC) { 1824 // This is a generic virtual register. 1825 1826 // Do not allow undef uses for generic virtual registers. This ensures 1827 // getVRegDef can never fail and return null on a generic register. 1828 // 1829 // FIXME: This restriction should probably be broadened to all SSA 1830 // MIR. However, DetectDeadLanes/ProcessImplicitDefs technically still 1831 // run on the SSA function just before phi elimination. 1832 if (MO->isUndef()) 1833 report("Generic virtual register use cannot be undef", MO, MONum); 1834 1835 // If we're post-Select, we can't have gvregs anymore. 1836 if (isFunctionSelected) { 1837 report("Generic virtual register invalid in a Selected function", 1838 MO, MONum); 1839 return; 1840 } 1841 1842 // The gvreg must have a type and it must not have a SubIdx. 1843 LLT Ty = MRI->getType(Reg); 1844 if (!Ty.isValid()) { 1845 report("Generic virtual register must have a valid type", MO, 1846 MONum); 1847 return; 1848 } 1849 1850 const RegisterBank *RegBank = MRI->getRegBankOrNull(Reg); 1851 1852 // If we're post-RegBankSelect, the gvreg must have a bank. 1853 if (!RegBank && isFunctionRegBankSelected) { 1854 report("Generic virtual register must have a bank in a " 1855 "RegBankSelected function", 1856 MO, MONum); 1857 return; 1858 } 1859 1860 // Make sure the register fits into its register bank if any. 1861 if (RegBank && Ty.isValid() && 1862 RegBank->getSize() < Ty.getSizeInBits()) { 1863 report("Register bank is too small for virtual register", MO, 1864 MONum); 1865 errs() << "Register bank " << RegBank->getName() << " too small(" 1866 << RegBank->getSize() << ") to fit " << Ty.getSizeInBits() 1867 << "-bits\n"; 1868 return; 1869 } 1870 if (SubIdx) { 1871 report("Generic virtual register does not allow subregister index", MO, 1872 MONum); 1873 return; 1874 } 1875 1876 // If this is a target specific instruction and this operand 1877 // has register class constraint, the virtual register must 1878 // comply to it. 1879 if (!isPreISelGenericOpcode(MCID.getOpcode()) && 1880 MONum < MCID.getNumOperands() && 1881 TII->getRegClass(MCID, MONum, TRI, *MF)) { 1882 report("Virtual register does not match instruction constraint", MO, 1883 MONum); 1884 errs() << "Expect register class " 1885 << TRI->getRegClassName( 1886 TII->getRegClass(MCID, MONum, TRI, *MF)) 1887 << " but got nothing\n"; 1888 return; 1889 } 1890 1891 break; 1892 } 1893 if (SubIdx) { 1894 const TargetRegisterClass *SRC = 1895 TRI->getSubClassWithSubReg(RC, SubIdx); 1896 if (!SRC) { 1897 report("Invalid subregister index for virtual register", MO, MONum); 1898 errs() << "Register class " << TRI->getRegClassName(RC) 1899 << " does not support subreg index " << SubIdx << "\n"; 1900 return; 1901 } 1902 if (RC != SRC) { 1903 report("Invalid register class for subregister index", MO, MONum); 1904 errs() << "Register class " << TRI->getRegClassName(RC) 1905 << " does not fully support subreg index " << SubIdx << "\n"; 1906 return; 1907 } 1908 } 1909 if (MONum < MCID.getNumOperands()) { 1910 if (const TargetRegisterClass *DRC = 1911 TII->getRegClass(MCID, MONum, TRI, *MF)) { 1912 if (SubIdx) { 1913 const TargetRegisterClass *SuperRC = 1914 TRI->getLargestLegalSuperClass(RC, *MF); 1915 if (!SuperRC) { 1916 report("No largest legal super class exists.", MO, MONum); 1917 return; 1918 } 1919 DRC = TRI->getMatchingSuperRegClass(SuperRC, DRC, SubIdx); 1920 if (!DRC) { 1921 report("No matching super-reg register class.", MO, MONum); 1922 return; 1923 } 1924 } 1925 if (!RC->hasSuperClassEq(DRC)) { 1926 report("Illegal virtual register for instruction", MO, MONum); 1927 errs() << "Expected a " << TRI->getRegClassName(DRC) 1928 << " register, but got a " << TRI->getRegClassName(RC) 1929 << " register\n"; 1930 } 1931 } 1932 } 1933 } 1934 break; 1935 } 1936 1937 case MachineOperand::MO_RegisterMask: 1938 regMasks.push_back(MO->getRegMask()); 1939 break; 1940 1941 case MachineOperand::MO_MachineBasicBlock: 1942 if (MI->isPHI() && !MO->getMBB()->isSuccessor(MI->getParent())) 1943 report("PHI operand is not in the CFG", MO, MONum); 1944 break; 1945 1946 case MachineOperand::MO_FrameIndex: 1947 if (LiveStks && LiveStks->hasInterval(MO->getIndex()) && 1948 LiveInts && !LiveInts->isNotInMIMap(*MI)) { 1949 int FI = MO->getIndex(); 1950 LiveInterval &LI = LiveStks->getInterval(FI); 1951 SlotIndex Idx = LiveInts->getInstructionIndex(*MI); 1952 1953 bool stores = MI->mayStore(); 1954 bool loads = MI->mayLoad(); 1955 // For a memory-to-memory move, we need to check if the frame 1956 // index is used for storing or loading, by inspecting the 1957 // memory operands. 1958 if (stores && loads) { 1959 for (auto *MMO : MI->memoperands()) { 1960 const PseudoSourceValue *PSV = MMO->getPseudoValue(); 1961 if (PSV == nullptr) continue; 1962 const FixedStackPseudoSourceValue *Value = 1963 dyn_cast<FixedStackPseudoSourceValue>(PSV); 1964 if (Value == nullptr) continue; 1965 if (Value->getFrameIndex() != FI) continue; 1966 1967 if (MMO->isStore()) 1968 loads = false; 1969 else 1970 stores = false; 1971 break; 1972 } 1973 if (loads == stores) 1974 report("Missing fixed stack memoperand.", MI); 1975 } 1976 if (loads && !LI.liveAt(Idx.getRegSlot(true))) { 1977 report("Instruction loads from dead spill slot", MO, MONum); 1978 errs() << "Live stack: " << LI << '\n'; 1979 } 1980 if (stores && !LI.liveAt(Idx.getRegSlot())) { 1981 report("Instruction stores to dead spill slot", MO, MONum); 1982 errs() << "Live stack: " << LI << '\n'; 1983 } 1984 } 1985 break; 1986 1987 default: 1988 break; 1989 } 1990 } 1991 1992 void MachineVerifier::checkLivenessAtUse(const MachineOperand *MO, 1993 unsigned MONum, SlotIndex UseIdx, 1994 const LiveRange &LR, 1995 Register VRegOrUnit, 1996 LaneBitmask LaneMask) { 1997 LiveQueryResult LRQ = LR.Query(UseIdx); 1998 // Check if we have a segment at the use, note however that we only need one 1999 // live subregister range, the others may be dead. 2000 if (!LRQ.valueIn() && LaneMask.none()) { 2001 report("No live segment at use", MO, MONum); 2002 report_context_liverange(LR); 2003 report_context_vreg_regunit(VRegOrUnit); 2004 report_context(UseIdx); 2005 } 2006 if (MO->isKill() && !LRQ.isKill()) { 2007 report("Live range continues after kill flag", MO, MONum); 2008 report_context_liverange(LR); 2009 report_context_vreg_regunit(VRegOrUnit); 2010 if (LaneMask.any()) 2011 report_context_lanemask(LaneMask); 2012 report_context(UseIdx); 2013 } 2014 } 2015 2016 void MachineVerifier::checkLivenessAtDef(const MachineOperand *MO, 2017 unsigned MONum, SlotIndex DefIdx, 2018 const LiveRange &LR, 2019 Register VRegOrUnit, 2020 bool SubRangeCheck, 2021 LaneBitmask LaneMask) { 2022 if (const VNInfo *VNI = LR.getVNInfoAt(DefIdx)) { 2023 assert(VNI && "NULL valno is not allowed"); 2024 if (VNI->def != DefIdx) { 2025 report("Inconsistent valno->def", MO, MONum); 2026 report_context_liverange(LR); 2027 report_context_vreg_regunit(VRegOrUnit); 2028 if (LaneMask.any()) 2029 report_context_lanemask(LaneMask); 2030 report_context(*VNI); 2031 report_context(DefIdx); 2032 } 2033 } else { 2034 report("No live segment at def", MO, MONum); 2035 report_context_liverange(LR); 2036 report_context_vreg_regunit(VRegOrUnit); 2037 if (LaneMask.any()) 2038 report_context_lanemask(LaneMask); 2039 report_context(DefIdx); 2040 } 2041 // Check that, if the dead def flag is present, LiveInts agree. 2042 if (MO->isDead()) { 2043 LiveQueryResult LRQ = LR.Query(DefIdx); 2044 if (!LRQ.isDeadDef()) { 2045 assert(Register::isVirtualRegister(VRegOrUnit) && 2046 "Expecting a virtual register."); 2047 // A dead subreg def only tells us that the specific subreg is dead. There 2048 // could be other non-dead defs of other subregs, or we could have other 2049 // parts of the register being live through the instruction. So unless we 2050 // are checking liveness for a subrange it is ok for the live range to 2051 // continue, given that we have a dead def of a subregister. 2052 if (SubRangeCheck || MO->getSubReg() == 0) { 2053 report("Live range continues after dead def flag", MO, MONum); 2054 report_context_liverange(LR); 2055 report_context_vreg_regunit(VRegOrUnit); 2056 if (LaneMask.any()) 2057 report_context_lanemask(LaneMask); 2058 } 2059 } 2060 } 2061 } 2062 2063 void MachineVerifier::checkLiveness(const MachineOperand *MO, unsigned MONum) { 2064 const MachineInstr *MI = MO->getParent(); 2065 const Register Reg = MO->getReg(); 2066 2067 // Both use and def operands can read a register. 2068 if (MO->readsReg()) { 2069 if (MO->isKill()) 2070 addRegWithSubRegs(regsKilled, Reg); 2071 2072 // Check that LiveVars knows this kill. 2073 if (LiveVars && Register::isVirtualRegister(Reg) && MO->isKill()) { 2074 LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg); 2075 if (!is_contained(VI.Kills, MI)) 2076 report("Kill missing from LiveVariables", MO, MONum); 2077 } 2078 2079 // Check LiveInts liveness and kill. 2080 if (LiveInts && !LiveInts->isNotInMIMap(*MI)) { 2081 SlotIndex UseIdx = LiveInts->getInstructionIndex(*MI); 2082 // Check the cached regunit intervals. 2083 if (Reg.isPhysical() && !isReserved(Reg)) { 2084 for (MCRegUnitIterator Units(Reg.asMCReg(), TRI); Units.isValid(); 2085 ++Units) { 2086 if (MRI->isReservedRegUnit(*Units)) 2087 continue; 2088 if (const LiveRange *LR = LiveInts->getCachedRegUnit(*Units)) 2089 checkLivenessAtUse(MO, MONum, UseIdx, *LR, *Units); 2090 } 2091 } 2092 2093 if (Register::isVirtualRegister(Reg)) { 2094 if (LiveInts->hasInterval(Reg)) { 2095 // This is a virtual register interval. 2096 const LiveInterval &LI = LiveInts->getInterval(Reg); 2097 checkLivenessAtUse(MO, MONum, UseIdx, LI, Reg); 2098 2099 if (LI.hasSubRanges() && !MO->isDef()) { 2100 unsigned SubRegIdx = MO->getSubReg(); 2101 LaneBitmask MOMask = SubRegIdx != 0 2102 ? TRI->getSubRegIndexLaneMask(SubRegIdx) 2103 : MRI->getMaxLaneMaskForVReg(Reg); 2104 LaneBitmask LiveInMask; 2105 for (const LiveInterval::SubRange &SR : LI.subranges()) { 2106 if ((MOMask & SR.LaneMask).none()) 2107 continue; 2108 checkLivenessAtUse(MO, MONum, UseIdx, SR, Reg, SR.LaneMask); 2109 LiveQueryResult LRQ = SR.Query(UseIdx); 2110 if (LRQ.valueIn()) 2111 LiveInMask |= SR.LaneMask; 2112 } 2113 // At least parts of the register has to be live at the use. 2114 if ((LiveInMask & MOMask).none()) { 2115 report("No live subrange at use", MO, MONum); 2116 report_context(LI); 2117 report_context(UseIdx); 2118 } 2119 } 2120 } else { 2121 report("Virtual register has no live interval", MO, MONum); 2122 } 2123 } 2124 } 2125 2126 // Use of a dead register. 2127 if (!regsLive.count(Reg)) { 2128 if (Register::isPhysicalRegister(Reg)) { 2129 // Reserved registers may be used even when 'dead'. 2130 bool Bad = !isReserved(Reg); 2131 // We are fine if just any subregister has a defined value. 2132 if (Bad) { 2133 2134 for (const MCPhysReg &SubReg : TRI->subregs(Reg)) { 2135 if (regsLive.count(SubReg)) { 2136 Bad = false; 2137 break; 2138 } 2139 } 2140 } 2141 // If there is an additional implicit-use of a super register we stop 2142 // here. By definition we are fine if the super register is not 2143 // (completely) dead, if the complete super register is dead we will 2144 // get a report for its operand. 2145 if (Bad) { 2146 for (const MachineOperand &MOP : MI->uses()) { 2147 if (!MOP.isReg() || !MOP.isImplicit()) 2148 continue; 2149 2150 if (!Register::isPhysicalRegister(MOP.getReg())) 2151 continue; 2152 2153 for (const MCPhysReg &SubReg : TRI->subregs(MOP.getReg())) { 2154 if (SubReg == Reg) { 2155 Bad = false; 2156 break; 2157 } 2158 } 2159 } 2160 } 2161 if (Bad) 2162 report("Using an undefined physical register", MO, MONum); 2163 } else if (MRI->def_empty(Reg)) { 2164 report("Reading virtual register without a def", MO, MONum); 2165 } else { 2166 BBInfo &MInfo = MBBInfoMap[MI->getParent()]; 2167 // We don't know which virtual registers are live in, so only complain 2168 // if vreg was killed in this MBB. Otherwise keep track of vregs that 2169 // must be live in. PHI instructions are handled separately. 2170 if (MInfo.regsKilled.count(Reg)) 2171 report("Using a killed virtual register", MO, MONum); 2172 else if (!MI->isPHI()) 2173 MInfo.vregsLiveIn.insert(std::make_pair(Reg, MI)); 2174 } 2175 } 2176 } 2177 2178 if (MO->isDef()) { 2179 // Register defined. 2180 // TODO: verify that earlyclobber ops are not used. 2181 if (MO->isDead()) 2182 addRegWithSubRegs(regsDead, Reg); 2183 else 2184 addRegWithSubRegs(regsDefined, Reg); 2185 2186 // Verify SSA form. 2187 if (MRI->isSSA() && Register::isVirtualRegister(Reg) && 2188 std::next(MRI->def_begin(Reg)) != MRI->def_end()) 2189 report("Multiple virtual register defs in SSA form", MO, MONum); 2190 2191 // Check LiveInts for a live segment, but only for virtual registers. 2192 if (LiveInts && !LiveInts->isNotInMIMap(*MI)) { 2193 SlotIndex DefIdx = LiveInts->getInstructionIndex(*MI); 2194 DefIdx = DefIdx.getRegSlot(MO->isEarlyClobber()); 2195 2196 if (Register::isVirtualRegister(Reg)) { 2197 if (LiveInts->hasInterval(Reg)) { 2198 const LiveInterval &LI = LiveInts->getInterval(Reg); 2199 checkLivenessAtDef(MO, MONum, DefIdx, LI, Reg); 2200 2201 if (LI.hasSubRanges()) { 2202 unsigned SubRegIdx = MO->getSubReg(); 2203 LaneBitmask MOMask = SubRegIdx != 0 2204 ? TRI->getSubRegIndexLaneMask(SubRegIdx) 2205 : MRI->getMaxLaneMaskForVReg(Reg); 2206 for (const LiveInterval::SubRange &SR : LI.subranges()) { 2207 if ((SR.LaneMask & MOMask).none()) 2208 continue; 2209 checkLivenessAtDef(MO, MONum, DefIdx, SR, Reg, true, SR.LaneMask); 2210 } 2211 } 2212 } else { 2213 report("Virtual register has no Live interval", MO, MONum); 2214 } 2215 } 2216 } 2217 } 2218 } 2219 2220 // This function gets called after visiting all instructions in a bundle. The 2221 // argument points to the bundle header. 2222 // Normal stand-alone instructions are also considered 'bundles', and this 2223 // function is called for all of them. 2224 void MachineVerifier::visitMachineBundleAfter(const MachineInstr *MI) { 2225 BBInfo &MInfo = MBBInfoMap[MI->getParent()]; 2226 set_union(MInfo.regsKilled, regsKilled); 2227 set_subtract(regsLive, regsKilled); regsKilled.clear(); 2228 // Kill any masked registers. 2229 while (!regMasks.empty()) { 2230 const uint32_t *Mask = regMasks.pop_back_val(); 2231 for (Register Reg : regsLive) 2232 if (Reg.isPhysical() && 2233 MachineOperand::clobbersPhysReg(Mask, Reg.asMCReg())) 2234 regsDead.push_back(Reg); 2235 } 2236 set_subtract(regsLive, regsDead); regsDead.clear(); 2237 set_union(regsLive, regsDefined); regsDefined.clear(); 2238 } 2239 2240 void 2241 MachineVerifier::visitMachineBasicBlockAfter(const MachineBasicBlock *MBB) { 2242 MBBInfoMap[MBB].regsLiveOut = regsLive; 2243 regsLive.clear(); 2244 2245 if (Indexes) { 2246 SlotIndex stop = Indexes->getMBBEndIdx(MBB); 2247 if (!(stop > lastIndex)) { 2248 report("Block ends before last instruction index", MBB); 2249 errs() << "Block ends at " << stop 2250 << " last instruction was at " << lastIndex << '\n'; 2251 } 2252 lastIndex = stop; 2253 } 2254 } 2255 2256 namespace { 2257 // This implements a set of registers that serves as a filter: can filter other 2258 // sets by passing through elements not in the filter and blocking those that 2259 // are. Any filter implicitly includes the full set of physical registers upon 2260 // creation, thus filtering them all out. The filter itself as a set only grows, 2261 // and needs to be as efficient as possible. 2262 struct VRegFilter { 2263 // Add elements to the filter itself. \pre Input set \p FromRegSet must have 2264 // no duplicates. Both virtual and physical registers are fine. 2265 template <typename RegSetT> void add(const RegSetT &FromRegSet) { 2266 SmallVector<Register, 0> VRegsBuffer; 2267 filterAndAdd(FromRegSet, VRegsBuffer); 2268 } 2269 // Filter \p FromRegSet through the filter and append passed elements into \p 2270 // ToVRegs. All elements appended are then added to the filter itself. 2271 // \returns true if anything changed. 2272 template <typename RegSetT> 2273 bool filterAndAdd(const RegSetT &FromRegSet, 2274 SmallVectorImpl<Register> &ToVRegs) { 2275 unsigned SparseUniverse = Sparse.size(); 2276 unsigned NewSparseUniverse = SparseUniverse; 2277 unsigned NewDenseSize = Dense.size(); 2278 size_t Begin = ToVRegs.size(); 2279 for (Register Reg : FromRegSet) { 2280 if (!Reg.isVirtual()) 2281 continue; 2282 unsigned Index = Register::virtReg2Index(Reg); 2283 if (Index < SparseUniverseMax) { 2284 if (Index < SparseUniverse && Sparse.test(Index)) 2285 continue; 2286 NewSparseUniverse = std::max(NewSparseUniverse, Index + 1); 2287 } else { 2288 if (Dense.count(Reg)) 2289 continue; 2290 ++NewDenseSize; 2291 } 2292 ToVRegs.push_back(Reg); 2293 } 2294 size_t End = ToVRegs.size(); 2295 if (Begin == End) 2296 return false; 2297 // Reserving space in sets once performs better than doing so continuously 2298 // and pays easily for double look-ups (even in Dense with SparseUniverseMax 2299 // tuned all the way down) and double iteration (the second one is over a 2300 // SmallVector, which is a lot cheaper compared to DenseSet or BitVector). 2301 Sparse.resize(NewSparseUniverse); 2302 Dense.reserve(NewDenseSize); 2303 for (unsigned I = Begin; I < End; ++I) { 2304 Register Reg = ToVRegs[I]; 2305 unsigned Index = Register::virtReg2Index(Reg); 2306 if (Index < SparseUniverseMax) 2307 Sparse.set(Index); 2308 else 2309 Dense.insert(Reg); 2310 } 2311 return true; 2312 } 2313 2314 private: 2315 static constexpr unsigned SparseUniverseMax = 10 * 1024 * 8; 2316 // VRegs indexed within SparseUniverseMax are tracked by Sparse, those beyound 2317 // are tracked by Dense. The only purpose of the threashold and the Dense set 2318 // is to have a reasonably growing memory usage in pathological cases (large 2319 // number of very sparse VRegFilter instances live at the same time). In 2320 // practice even in the worst-by-execution time cases having all elements 2321 // tracked by Sparse (very large SparseUniverseMax scenario) tends to be more 2322 // space efficient than if tracked by Dense. The threashold is set to keep the 2323 // worst-case memory usage within 2x of figures determined empirically for 2324 // "all Dense" scenario in such worst-by-execution-time cases. 2325 BitVector Sparse; 2326 DenseSet<unsigned> Dense; 2327 }; 2328 2329 // Implements both a transfer function and a (binary, in-place) join operator 2330 // for a dataflow over register sets with set union join and filtering transfer 2331 // (out_b = in_b \ filter_b). filter_b is expected to be set-up ahead of time. 2332 // Maintains out_b as its state, allowing for O(n) iteration over it at any 2333 // time, where n is the size of the set (as opposed to O(U) where U is the 2334 // universe). filter_b implicitly contains all physical registers at all times. 2335 class FilteringVRegSet { 2336 VRegFilter Filter; 2337 SmallVector<Register, 0> VRegs; 2338 2339 public: 2340 // Set-up the filter_b. \pre Input register set \p RS must have no duplicates. 2341 // Both virtual and physical registers are fine. 2342 template <typename RegSetT> void addToFilter(const RegSetT &RS) { 2343 Filter.add(RS); 2344 } 2345 // Passes \p RS through the filter_b (transfer function) and adds what's left 2346 // to itself (out_b). 2347 template <typename RegSetT> bool add(const RegSetT &RS) { 2348 // Double-duty the Filter: to maintain VRegs a set (and the join operation 2349 // a set union) just add everything being added here to the Filter as well. 2350 return Filter.filterAndAdd(RS, VRegs); 2351 } 2352 using const_iterator = decltype(VRegs)::const_iterator; 2353 const_iterator begin() const { return VRegs.begin(); } 2354 const_iterator end() const { return VRegs.end(); } 2355 size_t size() const { return VRegs.size(); } 2356 }; 2357 } // namespace 2358 2359 // Calculate the largest possible vregsPassed sets. These are the registers that 2360 // can pass through an MBB live, but may not be live every time. It is assumed 2361 // that all vregsPassed sets are empty before the call. 2362 void MachineVerifier::calcRegsPassed() { 2363 if (MF->empty()) 2364 // ReversePostOrderTraversal doesn't handle empty functions. 2365 return; 2366 2367 for (const MachineBasicBlock *MB : 2368 ReversePostOrderTraversal<const MachineFunction *>(MF)) { 2369 FilteringVRegSet VRegs; 2370 BBInfo &Info = MBBInfoMap[MB]; 2371 assert(Info.reachable); 2372 2373 VRegs.addToFilter(Info.regsKilled); 2374 VRegs.addToFilter(Info.regsLiveOut); 2375 for (const MachineBasicBlock *Pred : MB->predecessors()) { 2376 const BBInfo &PredInfo = MBBInfoMap[Pred]; 2377 if (!PredInfo.reachable) 2378 continue; 2379 2380 VRegs.add(PredInfo.regsLiveOut); 2381 VRegs.add(PredInfo.vregsPassed); 2382 } 2383 Info.vregsPassed.reserve(VRegs.size()); 2384 Info.vregsPassed.insert(VRegs.begin(), VRegs.end()); 2385 } 2386 } 2387 2388 // Calculate the set of virtual registers that must be passed through each basic 2389 // block in order to satisfy the requirements of successor blocks. This is very 2390 // similar to calcRegsPassed, only backwards. 2391 void MachineVerifier::calcRegsRequired() { 2392 // First push live-in regs to predecessors' vregsRequired. 2393 SmallPtrSet<const MachineBasicBlock*, 8> todo; 2394 for (const auto &MBB : *MF) { 2395 BBInfo &MInfo = MBBInfoMap[&MBB]; 2396 for (const MachineBasicBlock *Pred : MBB.predecessors()) { 2397 BBInfo &PInfo = MBBInfoMap[Pred]; 2398 if (PInfo.addRequired(MInfo.vregsLiveIn)) 2399 todo.insert(Pred); 2400 } 2401 2402 // Handle the PHI node. 2403 for (const MachineInstr &MI : MBB.phis()) { 2404 for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) { 2405 // Skip those Operands which are undef regs or not regs. 2406 if (!MI.getOperand(i).isReg() || !MI.getOperand(i).readsReg()) 2407 continue; 2408 2409 // Get register and predecessor for one PHI edge. 2410 Register Reg = MI.getOperand(i).getReg(); 2411 const MachineBasicBlock *Pred = MI.getOperand(i + 1).getMBB(); 2412 2413 BBInfo &PInfo = MBBInfoMap[Pred]; 2414 if (PInfo.addRequired(Reg)) 2415 todo.insert(Pred); 2416 } 2417 } 2418 } 2419 2420 // Iteratively push vregsRequired to predecessors. This will converge to the 2421 // same final state regardless of DenseSet iteration order. 2422 while (!todo.empty()) { 2423 const MachineBasicBlock *MBB = *todo.begin(); 2424 todo.erase(MBB); 2425 BBInfo &MInfo = MBBInfoMap[MBB]; 2426 for (const MachineBasicBlock *Pred : MBB->predecessors()) { 2427 if (Pred == MBB) 2428 continue; 2429 BBInfo &SInfo = MBBInfoMap[Pred]; 2430 if (SInfo.addRequired(MInfo.vregsRequired)) 2431 todo.insert(Pred); 2432 } 2433 } 2434 } 2435 2436 // Check PHI instructions at the beginning of MBB. It is assumed that 2437 // calcRegsPassed has been run so BBInfo::isLiveOut is valid. 2438 void MachineVerifier::checkPHIOps(const MachineBasicBlock &MBB) { 2439 BBInfo &MInfo = MBBInfoMap[&MBB]; 2440 2441 SmallPtrSet<const MachineBasicBlock*, 8> seen; 2442 for (const MachineInstr &Phi : MBB) { 2443 if (!Phi.isPHI()) 2444 break; 2445 seen.clear(); 2446 2447 const MachineOperand &MODef = Phi.getOperand(0); 2448 if (!MODef.isReg() || !MODef.isDef()) { 2449 report("Expected first PHI operand to be a register def", &MODef, 0); 2450 continue; 2451 } 2452 if (MODef.isTied() || MODef.isImplicit() || MODef.isInternalRead() || 2453 MODef.isEarlyClobber() || MODef.isDebug()) 2454 report("Unexpected flag on PHI operand", &MODef, 0); 2455 Register DefReg = MODef.getReg(); 2456 if (!Register::isVirtualRegister(DefReg)) 2457 report("Expected first PHI operand to be a virtual register", &MODef, 0); 2458 2459 for (unsigned I = 1, E = Phi.getNumOperands(); I != E; I += 2) { 2460 const MachineOperand &MO0 = Phi.getOperand(I); 2461 if (!MO0.isReg()) { 2462 report("Expected PHI operand to be a register", &MO0, I); 2463 continue; 2464 } 2465 if (MO0.isImplicit() || MO0.isInternalRead() || MO0.isEarlyClobber() || 2466 MO0.isDebug() || MO0.isTied()) 2467 report("Unexpected flag on PHI operand", &MO0, I); 2468 2469 const MachineOperand &MO1 = Phi.getOperand(I + 1); 2470 if (!MO1.isMBB()) { 2471 report("Expected PHI operand to be a basic block", &MO1, I + 1); 2472 continue; 2473 } 2474 2475 const MachineBasicBlock &Pre = *MO1.getMBB(); 2476 if (!Pre.isSuccessor(&MBB)) { 2477 report("PHI input is not a predecessor block", &MO1, I + 1); 2478 continue; 2479 } 2480 2481 if (MInfo.reachable) { 2482 seen.insert(&Pre); 2483 BBInfo &PrInfo = MBBInfoMap[&Pre]; 2484 if (!MO0.isUndef() && PrInfo.reachable && 2485 !PrInfo.isLiveOut(MO0.getReg())) 2486 report("PHI operand is not live-out from predecessor", &MO0, I); 2487 } 2488 } 2489 2490 // Did we see all predecessors? 2491 if (MInfo.reachable) { 2492 for (MachineBasicBlock *Pred : MBB.predecessors()) { 2493 if (!seen.count(Pred)) { 2494 report("Missing PHI operand", &Phi); 2495 errs() << printMBBReference(*Pred) 2496 << " is a predecessor according to the CFG.\n"; 2497 } 2498 } 2499 } 2500 } 2501 } 2502 2503 void MachineVerifier::visitMachineFunctionAfter() { 2504 calcRegsPassed(); 2505 2506 for (const MachineBasicBlock &MBB : *MF) 2507 checkPHIOps(MBB); 2508 2509 // Now check liveness info if available 2510 calcRegsRequired(); 2511 2512 // Check for killed virtual registers that should be live out. 2513 for (const auto &MBB : *MF) { 2514 BBInfo &MInfo = MBBInfoMap[&MBB]; 2515 for (Register VReg : MInfo.vregsRequired) 2516 if (MInfo.regsKilled.count(VReg)) { 2517 report("Virtual register killed in block, but needed live out.", &MBB); 2518 errs() << "Virtual register " << printReg(VReg) 2519 << " is used after the block.\n"; 2520 } 2521 } 2522 2523 if (!MF->empty()) { 2524 BBInfo &MInfo = MBBInfoMap[&MF->front()]; 2525 for (Register VReg : MInfo.vregsRequired) { 2526 report("Virtual register defs don't dominate all uses.", MF); 2527 report_context_vreg(VReg); 2528 } 2529 } 2530 2531 if (LiveVars) 2532 verifyLiveVariables(); 2533 if (LiveInts) 2534 verifyLiveIntervals(); 2535 2536 // Check live-in list of each MBB. If a register is live into MBB, check 2537 // that the register is in regsLiveOut of each predecessor block. Since 2538 // this must come from a definition in the predecesssor or its live-in 2539 // list, this will catch a live-through case where the predecessor does not 2540 // have the register in its live-in list. This currently only checks 2541 // registers that have no aliases, are not allocatable and are not 2542 // reserved, which could mean a condition code register for instance. 2543 if (MRI->tracksLiveness()) 2544 for (const auto &MBB : *MF) 2545 for (MachineBasicBlock::RegisterMaskPair P : MBB.liveins()) { 2546 MCPhysReg LiveInReg = P.PhysReg; 2547 bool hasAliases = MCRegAliasIterator(LiveInReg, TRI, false).isValid(); 2548 if (hasAliases || isAllocatable(LiveInReg) || isReserved(LiveInReg)) 2549 continue; 2550 for (const MachineBasicBlock *Pred : MBB.predecessors()) { 2551 BBInfo &PInfo = MBBInfoMap[Pred]; 2552 if (!PInfo.regsLiveOut.count(LiveInReg)) { 2553 report("Live in register not found to be live out from predecessor.", 2554 &MBB); 2555 errs() << TRI->getName(LiveInReg) 2556 << " not found to be live out from " 2557 << printMBBReference(*Pred) << "\n"; 2558 } 2559 } 2560 } 2561 2562 for (auto CSInfo : MF->getCallSitesInfo()) 2563 if (!CSInfo.first->isCall()) 2564 report("Call site info referencing instruction that is not call", MF); 2565 2566 // If there's debug-info, check that we don't have any duplicate value 2567 // tracking numbers. 2568 if (MF->getFunction().getSubprogram()) { 2569 DenseSet<unsigned> SeenNumbers; 2570 for (auto &MBB : *MF) { 2571 for (auto &MI : MBB) { 2572 if (auto Num = MI.peekDebugInstrNum()) { 2573 auto Result = SeenNumbers.insert((unsigned)Num); 2574 if (!Result.second) 2575 report("Instruction has a duplicated value tracking number", &MI); 2576 } 2577 } 2578 } 2579 } 2580 } 2581 2582 void MachineVerifier::verifyLiveVariables() { 2583 assert(LiveVars && "Don't call verifyLiveVariables without LiveVars"); 2584 for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) { 2585 Register Reg = Register::index2VirtReg(I); 2586 LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg); 2587 for (const auto &MBB : *MF) { 2588 BBInfo &MInfo = MBBInfoMap[&MBB]; 2589 2590 // Our vregsRequired should be identical to LiveVariables' AliveBlocks 2591 if (MInfo.vregsRequired.count(Reg)) { 2592 if (!VI.AliveBlocks.test(MBB.getNumber())) { 2593 report("LiveVariables: Block missing from AliveBlocks", &MBB); 2594 errs() << "Virtual register " << printReg(Reg) 2595 << " must be live through the block.\n"; 2596 } 2597 } else { 2598 if (VI.AliveBlocks.test(MBB.getNumber())) { 2599 report("LiveVariables: Block should not be in AliveBlocks", &MBB); 2600 errs() << "Virtual register " << printReg(Reg) 2601 << " is not needed live through the block.\n"; 2602 } 2603 } 2604 } 2605 } 2606 } 2607 2608 void MachineVerifier::verifyLiveIntervals() { 2609 assert(LiveInts && "Don't call verifyLiveIntervals without LiveInts"); 2610 for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) { 2611 Register Reg = Register::index2VirtReg(I); 2612 2613 // Spilling and splitting may leave unused registers around. Skip them. 2614 if (MRI->reg_nodbg_empty(Reg)) 2615 continue; 2616 2617 if (!LiveInts->hasInterval(Reg)) { 2618 report("Missing live interval for virtual register", MF); 2619 errs() << printReg(Reg, TRI) << " still has defs or uses\n"; 2620 continue; 2621 } 2622 2623 const LiveInterval &LI = LiveInts->getInterval(Reg); 2624 assert(Reg == LI.reg() && "Invalid reg to interval mapping"); 2625 verifyLiveInterval(LI); 2626 } 2627 2628 // Verify all the cached regunit intervals. 2629 for (unsigned i = 0, e = TRI->getNumRegUnits(); i != e; ++i) 2630 if (const LiveRange *LR = LiveInts->getCachedRegUnit(i)) 2631 verifyLiveRange(*LR, i); 2632 } 2633 2634 void MachineVerifier::verifyLiveRangeValue(const LiveRange &LR, 2635 const VNInfo *VNI, Register Reg, 2636 LaneBitmask LaneMask) { 2637 if (VNI->isUnused()) 2638 return; 2639 2640 const VNInfo *DefVNI = LR.getVNInfoAt(VNI->def); 2641 2642 if (!DefVNI) { 2643 report("Value not live at VNInfo def and not marked unused", MF); 2644 report_context(LR, Reg, LaneMask); 2645 report_context(*VNI); 2646 return; 2647 } 2648 2649 if (DefVNI != VNI) { 2650 report("Live segment at def has different VNInfo", MF); 2651 report_context(LR, Reg, LaneMask); 2652 report_context(*VNI); 2653 return; 2654 } 2655 2656 const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(VNI->def); 2657 if (!MBB) { 2658 report("Invalid VNInfo definition index", MF); 2659 report_context(LR, Reg, LaneMask); 2660 report_context(*VNI); 2661 return; 2662 } 2663 2664 if (VNI->isPHIDef()) { 2665 if (VNI->def != LiveInts->getMBBStartIdx(MBB)) { 2666 report("PHIDef VNInfo is not defined at MBB start", MBB); 2667 report_context(LR, Reg, LaneMask); 2668 report_context(*VNI); 2669 } 2670 return; 2671 } 2672 2673 // Non-PHI def. 2674 const MachineInstr *MI = LiveInts->getInstructionFromIndex(VNI->def); 2675 if (!MI) { 2676 report("No instruction at VNInfo def index", MBB); 2677 report_context(LR, Reg, LaneMask); 2678 report_context(*VNI); 2679 return; 2680 } 2681 2682 if (Reg != 0) { 2683 bool hasDef = false; 2684 bool isEarlyClobber = false; 2685 for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) { 2686 if (!MOI->isReg() || !MOI->isDef()) 2687 continue; 2688 if (Register::isVirtualRegister(Reg)) { 2689 if (MOI->getReg() != Reg) 2690 continue; 2691 } else { 2692 if (!Register::isPhysicalRegister(MOI->getReg()) || 2693 !TRI->hasRegUnit(MOI->getReg(), Reg)) 2694 continue; 2695 } 2696 if (LaneMask.any() && 2697 (TRI->getSubRegIndexLaneMask(MOI->getSubReg()) & LaneMask).none()) 2698 continue; 2699 hasDef = true; 2700 if (MOI->isEarlyClobber()) 2701 isEarlyClobber = true; 2702 } 2703 2704 if (!hasDef) { 2705 report("Defining instruction does not modify register", MI); 2706 report_context(LR, Reg, LaneMask); 2707 report_context(*VNI); 2708 } 2709 2710 // Early clobber defs begin at USE slots, but other defs must begin at 2711 // DEF slots. 2712 if (isEarlyClobber) { 2713 if (!VNI->def.isEarlyClobber()) { 2714 report("Early clobber def must be at an early-clobber slot", MBB); 2715 report_context(LR, Reg, LaneMask); 2716 report_context(*VNI); 2717 } 2718 } else if (!VNI->def.isRegister()) { 2719 report("Non-PHI, non-early clobber def must be at a register slot", MBB); 2720 report_context(LR, Reg, LaneMask); 2721 report_context(*VNI); 2722 } 2723 } 2724 } 2725 2726 void MachineVerifier::verifyLiveRangeSegment(const LiveRange &LR, 2727 const LiveRange::const_iterator I, 2728 Register Reg, 2729 LaneBitmask LaneMask) { 2730 const LiveRange::Segment &S = *I; 2731 const VNInfo *VNI = S.valno; 2732 assert(VNI && "Live segment has no valno"); 2733 2734 if (VNI->id >= LR.getNumValNums() || VNI != LR.getValNumInfo(VNI->id)) { 2735 report("Foreign valno in live segment", MF); 2736 report_context(LR, Reg, LaneMask); 2737 report_context(S); 2738 report_context(*VNI); 2739 } 2740 2741 if (VNI->isUnused()) { 2742 report("Live segment valno is marked unused", MF); 2743 report_context(LR, Reg, LaneMask); 2744 report_context(S); 2745 } 2746 2747 const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(S.start); 2748 if (!MBB) { 2749 report("Bad start of live segment, no basic block", MF); 2750 report_context(LR, Reg, LaneMask); 2751 report_context(S); 2752 return; 2753 } 2754 SlotIndex MBBStartIdx = LiveInts->getMBBStartIdx(MBB); 2755 if (S.start != MBBStartIdx && S.start != VNI->def) { 2756 report("Live segment must begin at MBB entry or valno def", MBB); 2757 report_context(LR, Reg, LaneMask); 2758 report_context(S); 2759 } 2760 2761 const MachineBasicBlock *EndMBB = 2762 LiveInts->getMBBFromIndex(S.end.getPrevSlot()); 2763 if (!EndMBB) { 2764 report("Bad end of live segment, no basic block", MF); 2765 report_context(LR, Reg, LaneMask); 2766 report_context(S); 2767 return; 2768 } 2769 2770 // No more checks for live-out segments. 2771 if (S.end == LiveInts->getMBBEndIdx(EndMBB)) 2772 return; 2773 2774 // RegUnit intervals are allowed dead phis. 2775 if (!Register::isVirtualRegister(Reg) && VNI->isPHIDef() && 2776 S.start == VNI->def && S.end == VNI->def.getDeadSlot()) 2777 return; 2778 2779 // The live segment is ending inside EndMBB 2780 const MachineInstr *MI = 2781 LiveInts->getInstructionFromIndex(S.end.getPrevSlot()); 2782 if (!MI) { 2783 report("Live segment doesn't end at a valid instruction", EndMBB); 2784 report_context(LR, Reg, LaneMask); 2785 report_context(S); 2786 return; 2787 } 2788 2789 // The block slot must refer to a basic block boundary. 2790 if (S.end.isBlock()) { 2791 report("Live segment ends at B slot of an instruction", EndMBB); 2792 report_context(LR, Reg, LaneMask); 2793 report_context(S); 2794 } 2795 2796 if (S.end.isDead()) { 2797 // Segment ends on the dead slot. 2798 // That means there must be a dead def. 2799 if (!SlotIndex::isSameInstr(S.start, S.end)) { 2800 report("Live segment ending at dead slot spans instructions", EndMBB); 2801 report_context(LR, Reg, LaneMask); 2802 report_context(S); 2803 } 2804 } 2805 2806 // A live segment can only end at an early-clobber slot if it is being 2807 // redefined by an early-clobber def. 2808 if (S.end.isEarlyClobber()) { 2809 if (I+1 == LR.end() || (I+1)->start != S.end) { 2810 report("Live segment ending at early clobber slot must be " 2811 "redefined by an EC def in the same instruction", EndMBB); 2812 report_context(LR, Reg, LaneMask); 2813 report_context(S); 2814 } 2815 } 2816 2817 // The following checks only apply to virtual registers. Physreg liveness 2818 // is too weird to check. 2819 if (Register::isVirtualRegister(Reg)) { 2820 // A live segment can end with either a redefinition, a kill flag on a 2821 // use, or a dead flag on a def. 2822 bool hasRead = false; 2823 bool hasSubRegDef = false; 2824 bool hasDeadDef = false; 2825 for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) { 2826 if (!MOI->isReg() || MOI->getReg() != Reg) 2827 continue; 2828 unsigned Sub = MOI->getSubReg(); 2829 LaneBitmask SLM = Sub != 0 ? TRI->getSubRegIndexLaneMask(Sub) 2830 : LaneBitmask::getAll(); 2831 if (MOI->isDef()) { 2832 if (Sub != 0) { 2833 hasSubRegDef = true; 2834 // An operand %0:sub0 reads %0:sub1..n. Invert the lane 2835 // mask for subregister defs. Read-undef defs will be handled by 2836 // readsReg below. 2837 SLM = ~SLM; 2838 } 2839 if (MOI->isDead()) 2840 hasDeadDef = true; 2841 } 2842 if (LaneMask.any() && (LaneMask & SLM).none()) 2843 continue; 2844 if (MOI->readsReg()) 2845 hasRead = true; 2846 } 2847 if (S.end.isDead()) { 2848 // Make sure that the corresponding machine operand for a "dead" live 2849 // range has the dead flag. We cannot perform this check for subregister 2850 // liveranges as partially dead values are allowed. 2851 if (LaneMask.none() && !hasDeadDef) { 2852 report("Instruction ending live segment on dead slot has no dead flag", 2853 MI); 2854 report_context(LR, Reg, LaneMask); 2855 report_context(S); 2856 } 2857 } else { 2858 if (!hasRead) { 2859 // When tracking subregister liveness, the main range must start new 2860 // values on partial register writes, even if there is no read. 2861 if (!MRI->shouldTrackSubRegLiveness(Reg) || LaneMask.any() || 2862 !hasSubRegDef) { 2863 report("Instruction ending live segment doesn't read the register", 2864 MI); 2865 report_context(LR, Reg, LaneMask); 2866 report_context(S); 2867 } 2868 } 2869 } 2870 } 2871 2872 // Now check all the basic blocks in this live segment. 2873 MachineFunction::const_iterator MFI = MBB->getIterator(); 2874 // Is this live segment the beginning of a non-PHIDef VN? 2875 if (S.start == VNI->def && !VNI->isPHIDef()) { 2876 // Not live-in to any blocks. 2877 if (MBB == EndMBB) 2878 return; 2879 // Skip this block. 2880 ++MFI; 2881 } 2882 2883 SmallVector<SlotIndex, 4> Undefs; 2884 if (LaneMask.any()) { 2885 LiveInterval &OwnerLI = LiveInts->getInterval(Reg); 2886 OwnerLI.computeSubRangeUndefs(Undefs, LaneMask, *MRI, *Indexes); 2887 } 2888 2889 while (true) { 2890 assert(LiveInts->isLiveInToMBB(LR, &*MFI)); 2891 // We don't know how to track physregs into a landing pad. 2892 if (!Register::isVirtualRegister(Reg) && MFI->isEHPad()) { 2893 if (&*MFI == EndMBB) 2894 break; 2895 ++MFI; 2896 continue; 2897 } 2898 2899 // Is VNI a PHI-def in the current block? 2900 bool IsPHI = VNI->isPHIDef() && 2901 VNI->def == LiveInts->getMBBStartIdx(&*MFI); 2902 2903 // Check that VNI is live-out of all predecessors. 2904 for (const MachineBasicBlock *Pred : MFI->predecessors()) { 2905 SlotIndex PEnd = LiveInts->getMBBEndIdx(Pred); 2906 const VNInfo *PVNI = LR.getVNInfoBefore(PEnd); 2907 2908 // All predecessors must have a live-out value. However for a phi 2909 // instruction with subregister intervals 2910 // only one of the subregisters (not necessarily the current one) needs to 2911 // be defined. 2912 if (!PVNI && (LaneMask.none() || !IsPHI)) { 2913 if (LiveRangeCalc::isJointlyDominated(Pred, Undefs, *Indexes)) 2914 continue; 2915 report("Register not marked live out of predecessor", Pred); 2916 report_context(LR, Reg, LaneMask); 2917 report_context(*VNI); 2918 errs() << " live into " << printMBBReference(*MFI) << '@' 2919 << LiveInts->getMBBStartIdx(&*MFI) << ", not live before " 2920 << PEnd << '\n'; 2921 continue; 2922 } 2923 2924 // Only PHI-defs can take different predecessor values. 2925 if (!IsPHI && PVNI != VNI) { 2926 report("Different value live out of predecessor", Pred); 2927 report_context(LR, Reg, LaneMask); 2928 errs() << "Valno #" << PVNI->id << " live out of " 2929 << printMBBReference(*Pred) << '@' << PEnd << "\nValno #" 2930 << VNI->id << " live into " << printMBBReference(*MFI) << '@' 2931 << LiveInts->getMBBStartIdx(&*MFI) << '\n'; 2932 } 2933 } 2934 if (&*MFI == EndMBB) 2935 break; 2936 ++MFI; 2937 } 2938 } 2939 2940 void MachineVerifier::verifyLiveRange(const LiveRange &LR, Register Reg, 2941 LaneBitmask LaneMask) { 2942 for (const VNInfo *VNI : LR.valnos) 2943 verifyLiveRangeValue(LR, VNI, Reg, LaneMask); 2944 2945 for (LiveRange::const_iterator I = LR.begin(), E = LR.end(); I != E; ++I) 2946 verifyLiveRangeSegment(LR, I, Reg, LaneMask); 2947 } 2948 2949 void MachineVerifier::verifyLiveInterval(const LiveInterval &LI) { 2950 Register Reg = LI.reg(); 2951 assert(Register::isVirtualRegister(Reg)); 2952 verifyLiveRange(LI, Reg); 2953 2954 LaneBitmask Mask; 2955 LaneBitmask MaxMask = MRI->getMaxLaneMaskForVReg(Reg); 2956 for (const LiveInterval::SubRange &SR : LI.subranges()) { 2957 if ((Mask & SR.LaneMask).any()) { 2958 report("Lane masks of sub ranges overlap in live interval", MF); 2959 report_context(LI); 2960 } 2961 if ((SR.LaneMask & ~MaxMask).any()) { 2962 report("Subrange lanemask is invalid", MF); 2963 report_context(LI); 2964 } 2965 if (SR.empty()) { 2966 report("Subrange must not be empty", MF); 2967 report_context(SR, LI.reg(), SR.LaneMask); 2968 } 2969 Mask |= SR.LaneMask; 2970 verifyLiveRange(SR, LI.reg(), SR.LaneMask); 2971 if (!LI.covers(SR)) { 2972 report("A Subrange is not covered by the main range", MF); 2973 report_context(LI); 2974 } 2975 } 2976 2977 // Check the LI only has one connected component. 2978 ConnectedVNInfoEqClasses ConEQ(*LiveInts); 2979 unsigned NumComp = ConEQ.Classify(LI); 2980 if (NumComp > 1) { 2981 report("Multiple connected components in live interval", MF); 2982 report_context(LI); 2983 for (unsigned comp = 0; comp != NumComp; ++comp) { 2984 errs() << comp << ": valnos"; 2985 for (const VNInfo *I : LI.valnos) 2986 if (comp == ConEQ.getEqClass(I)) 2987 errs() << ' ' << I->id; 2988 errs() << '\n'; 2989 } 2990 } 2991 } 2992 2993 namespace { 2994 2995 // FrameSetup and FrameDestroy can have zero adjustment, so using a single 2996 // integer, we can't tell whether it is a FrameSetup or FrameDestroy if the 2997 // value is zero. 2998 // We use a bool plus an integer to capture the stack state. 2999 struct StackStateOfBB { 3000 StackStateOfBB() = default; 3001 StackStateOfBB(int EntryVal, int ExitVal, bool EntrySetup, bool ExitSetup) : 3002 EntryValue(EntryVal), ExitValue(ExitVal), EntryIsSetup(EntrySetup), 3003 ExitIsSetup(ExitSetup) {} 3004 3005 // Can be negative, which means we are setting up a frame. 3006 int EntryValue = 0; 3007 int ExitValue = 0; 3008 bool EntryIsSetup = false; 3009 bool ExitIsSetup = false; 3010 }; 3011 3012 } // end anonymous namespace 3013 3014 /// Make sure on every path through the CFG, a FrameSetup <n> is always followed 3015 /// by a FrameDestroy <n>, stack adjustments are identical on all 3016 /// CFG edges to a merge point, and frame is destroyed at end of a return block. 3017 void MachineVerifier::verifyStackFrame() { 3018 unsigned FrameSetupOpcode = TII->getCallFrameSetupOpcode(); 3019 unsigned FrameDestroyOpcode = TII->getCallFrameDestroyOpcode(); 3020 if (FrameSetupOpcode == ~0u && FrameDestroyOpcode == ~0u) 3021 return; 3022 3023 SmallVector<StackStateOfBB, 8> SPState; 3024 SPState.resize(MF->getNumBlockIDs()); 3025 df_iterator_default_set<const MachineBasicBlock*> Reachable; 3026 3027 // Visit the MBBs in DFS order. 3028 for (df_ext_iterator<const MachineFunction *, 3029 df_iterator_default_set<const MachineBasicBlock *>> 3030 DFI = df_ext_begin(MF, Reachable), DFE = df_ext_end(MF, Reachable); 3031 DFI != DFE; ++DFI) { 3032 const MachineBasicBlock *MBB = *DFI; 3033 3034 StackStateOfBB BBState; 3035 // Check the exit state of the DFS stack predecessor. 3036 if (DFI.getPathLength() >= 2) { 3037 const MachineBasicBlock *StackPred = DFI.getPath(DFI.getPathLength() - 2); 3038 assert(Reachable.count(StackPred) && 3039 "DFS stack predecessor is already visited.\n"); 3040 BBState.EntryValue = SPState[StackPred->getNumber()].ExitValue; 3041 BBState.EntryIsSetup = SPState[StackPred->getNumber()].ExitIsSetup; 3042 BBState.ExitValue = BBState.EntryValue; 3043 BBState.ExitIsSetup = BBState.EntryIsSetup; 3044 } 3045 3046 // Update stack state by checking contents of MBB. 3047 for (const auto &I : *MBB) { 3048 if (I.getOpcode() == FrameSetupOpcode) { 3049 if (BBState.ExitIsSetup) 3050 report("FrameSetup is after another FrameSetup", &I); 3051 BBState.ExitValue -= TII->getFrameTotalSize(I); 3052 BBState.ExitIsSetup = true; 3053 } 3054 3055 if (I.getOpcode() == FrameDestroyOpcode) { 3056 int Size = TII->getFrameTotalSize(I); 3057 if (!BBState.ExitIsSetup) 3058 report("FrameDestroy is not after a FrameSetup", &I); 3059 int AbsSPAdj = BBState.ExitValue < 0 ? -BBState.ExitValue : 3060 BBState.ExitValue; 3061 if (BBState.ExitIsSetup && AbsSPAdj != Size) { 3062 report("FrameDestroy <n> is after FrameSetup <m>", &I); 3063 errs() << "FrameDestroy <" << Size << "> is after FrameSetup <" 3064 << AbsSPAdj << ">.\n"; 3065 } 3066 BBState.ExitValue += Size; 3067 BBState.ExitIsSetup = false; 3068 } 3069 } 3070 SPState[MBB->getNumber()] = BBState; 3071 3072 // Make sure the exit state of any predecessor is consistent with the entry 3073 // state. 3074 for (const MachineBasicBlock *Pred : MBB->predecessors()) { 3075 if (Reachable.count(Pred) && 3076 (SPState[Pred->getNumber()].ExitValue != BBState.EntryValue || 3077 SPState[Pred->getNumber()].ExitIsSetup != BBState.EntryIsSetup)) { 3078 report("The exit stack state of a predecessor is inconsistent.", MBB); 3079 errs() << "Predecessor " << printMBBReference(*Pred) 3080 << " has exit state (" << SPState[Pred->getNumber()].ExitValue 3081 << ", " << SPState[Pred->getNumber()].ExitIsSetup << "), while " 3082 << printMBBReference(*MBB) << " has entry state (" 3083 << BBState.EntryValue << ", " << BBState.EntryIsSetup << ").\n"; 3084 } 3085 } 3086 3087 // Make sure the entry state of any successor is consistent with the exit 3088 // state. 3089 for (const MachineBasicBlock *Succ : MBB->successors()) { 3090 if (Reachable.count(Succ) && 3091 (SPState[Succ->getNumber()].EntryValue != BBState.ExitValue || 3092 SPState[Succ->getNumber()].EntryIsSetup != BBState.ExitIsSetup)) { 3093 report("The entry stack state of a successor is inconsistent.", MBB); 3094 errs() << "Successor " << printMBBReference(*Succ) 3095 << " has entry state (" << SPState[Succ->getNumber()].EntryValue 3096 << ", " << SPState[Succ->getNumber()].EntryIsSetup << "), while " 3097 << printMBBReference(*MBB) << " has exit state (" 3098 << BBState.ExitValue << ", " << BBState.ExitIsSetup << ").\n"; 3099 } 3100 } 3101 3102 // Make sure a basic block with return ends with zero stack adjustment. 3103 if (!MBB->empty() && MBB->back().isReturn()) { 3104 if (BBState.ExitIsSetup) 3105 report("A return block ends with a FrameSetup.", MBB); 3106 if (BBState.ExitValue) 3107 report("A return block ends with a nonzero stack adjustment.", MBB); 3108 } 3109 } 3110 } 3111