1 //===- MachineVerifier.cpp - Machine Code Verifier ------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Pass to verify generated machine code. The following is checked: 10 // 11 // Operand counts: All explicit operands must be present. 12 // 13 // Register classes: All physical and virtual register operands must be 14 // compatible with the register class required by the instruction descriptor. 15 // 16 // Register live intervals: Registers must be defined only once, and must be 17 // defined before use. 18 // 19 // The machine code verifier is enabled with the command-line option 20 // -verify-machineinstrs. 21 //===----------------------------------------------------------------------===// 22 23 #include "llvm/ADT/BitVector.h" 24 #include "llvm/ADT/DenseMap.h" 25 #include "llvm/ADT/DenseSet.h" 26 #include "llvm/ADT/DepthFirstIterator.h" 27 #include "llvm/ADT/PostOrderIterator.h" 28 #include "llvm/ADT/STLExtras.h" 29 #include "llvm/ADT/SetOperations.h" 30 #include "llvm/ADT/SmallPtrSet.h" 31 #include "llvm/ADT/SmallVector.h" 32 #include "llvm/ADT/StringRef.h" 33 #include "llvm/ADT/Twine.h" 34 #include "llvm/Analysis/EHPersonalities.h" 35 #include "llvm/CodeGen/CodeGenCommonISel.h" 36 #include "llvm/CodeGen/LiveInterval.h" 37 #include "llvm/CodeGen/LiveIntervals.h" 38 #include "llvm/CodeGen/LiveRangeCalc.h" 39 #include "llvm/CodeGen/LiveStacks.h" 40 #include "llvm/CodeGen/LiveVariables.h" 41 #include "llvm/CodeGen/MachineBasicBlock.h" 42 #include "llvm/CodeGen/MachineFrameInfo.h" 43 #include "llvm/CodeGen/MachineFunction.h" 44 #include "llvm/CodeGen/MachineFunctionPass.h" 45 #include "llvm/CodeGen/MachineInstr.h" 46 #include "llvm/CodeGen/MachineInstrBundle.h" 47 #include "llvm/CodeGen/MachineMemOperand.h" 48 #include "llvm/CodeGen/MachineOperand.h" 49 #include "llvm/CodeGen/MachineRegisterInfo.h" 50 #include "llvm/CodeGen/PseudoSourceValue.h" 51 #include "llvm/CodeGen/RegisterBank.h" 52 #include "llvm/CodeGen/RegisterBankInfo.h" 53 #include "llvm/CodeGen/SlotIndexes.h" 54 #include "llvm/CodeGen/StackMaps.h" 55 #include "llvm/CodeGen/TargetInstrInfo.h" 56 #include "llvm/CodeGen/TargetOpcodes.h" 57 #include "llvm/CodeGen/TargetRegisterInfo.h" 58 #include "llvm/CodeGen/TargetSubtargetInfo.h" 59 #include "llvm/IR/BasicBlock.h" 60 #include "llvm/IR/Constants.h" 61 #include "llvm/IR/Function.h" 62 #include "llvm/IR/InlineAsm.h" 63 #include "llvm/IR/Instructions.h" 64 #include "llvm/InitializePasses.h" 65 #include "llvm/MC/LaneBitmask.h" 66 #include "llvm/MC/MCAsmInfo.h" 67 #include "llvm/MC/MCDwarf.h" 68 #include "llvm/MC/MCInstrDesc.h" 69 #include "llvm/MC/MCRegisterInfo.h" 70 #include "llvm/MC/MCTargetOptions.h" 71 #include "llvm/Pass.h" 72 #include "llvm/Support/Casting.h" 73 #include "llvm/Support/ErrorHandling.h" 74 #include "llvm/Support/LowLevelTypeImpl.h" 75 #include "llvm/Support/MathExtras.h" 76 #include "llvm/Support/raw_ostream.h" 77 #include "llvm/Target/TargetMachine.h" 78 #include <algorithm> 79 #include <cassert> 80 #include <cstddef> 81 #include <cstdint> 82 #include <iterator> 83 #include <string> 84 #include <utility> 85 86 using namespace llvm; 87 88 namespace { 89 90 struct MachineVerifier { 91 MachineVerifier(Pass *pass, const char *b) : PASS(pass), Banner(b) {} 92 93 unsigned verify(const MachineFunction &MF); 94 95 Pass *const PASS; 96 const char *Banner; 97 const MachineFunction *MF; 98 const TargetMachine *TM; 99 const TargetInstrInfo *TII; 100 const TargetRegisterInfo *TRI; 101 const MachineRegisterInfo *MRI; 102 const RegisterBankInfo *RBI; 103 104 unsigned foundErrors; 105 106 // Avoid querying the MachineFunctionProperties for each operand. 107 bool isFunctionRegBankSelected; 108 bool isFunctionSelected; 109 bool isFunctionTracksDebugUserValues; 110 111 using RegVector = SmallVector<Register, 16>; 112 using RegMaskVector = SmallVector<const uint32_t *, 4>; 113 using RegSet = DenseSet<Register>; 114 using RegMap = DenseMap<Register, const MachineInstr *>; 115 using BlockSet = SmallPtrSet<const MachineBasicBlock *, 8>; 116 117 const MachineInstr *FirstNonPHI; 118 const MachineInstr *FirstTerminator; 119 BlockSet FunctionBlocks; 120 121 BitVector regsReserved; 122 RegSet regsLive; 123 RegVector regsDefined, regsDead, regsKilled; 124 RegMaskVector regMasks; 125 126 SlotIndex lastIndex; 127 128 // Add Reg and any sub-registers to RV 129 void addRegWithSubRegs(RegVector &RV, Register Reg) { 130 RV.push_back(Reg); 131 if (Reg.isPhysical()) 132 append_range(RV, TRI->subregs(Reg.asMCReg())); 133 } 134 135 struct BBInfo { 136 // Is this MBB reachable from the MF entry point? 137 bool reachable = false; 138 139 // Vregs that must be live in because they are used without being 140 // defined. Map value is the user. vregsLiveIn doesn't include regs 141 // that only are used by PHI nodes. 142 RegMap vregsLiveIn; 143 144 // Regs killed in MBB. They may be defined again, and will then be in both 145 // regsKilled and regsLiveOut. 146 RegSet regsKilled; 147 148 // Regs defined in MBB and live out. Note that vregs passing through may 149 // be live out without being mentioned here. 150 RegSet regsLiveOut; 151 152 // Vregs that pass through MBB untouched. This set is disjoint from 153 // regsKilled and regsLiveOut. 154 RegSet vregsPassed; 155 156 // Vregs that must pass through MBB because they are needed by a successor 157 // block. This set is disjoint from regsLiveOut. 158 RegSet vregsRequired; 159 160 // Set versions of block's predecessor and successor lists. 161 BlockSet Preds, Succs; 162 163 BBInfo() = default; 164 165 // Add register to vregsRequired if it belongs there. Return true if 166 // anything changed. 167 bool addRequired(Register Reg) { 168 if (!Reg.isVirtual()) 169 return false; 170 if (regsLiveOut.count(Reg)) 171 return false; 172 return vregsRequired.insert(Reg).second; 173 } 174 175 // Same for a full set. 176 bool addRequired(const RegSet &RS) { 177 bool Changed = false; 178 for (Register Reg : RS) 179 Changed |= addRequired(Reg); 180 return Changed; 181 } 182 183 // Same for a full map. 184 bool addRequired(const RegMap &RM) { 185 bool Changed = false; 186 for (const auto &I : RM) 187 Changed |= addRequired(I.first); 188 return Changed; 189 } 190 191 // Live-out registers are either in regsLiveOut or vregsPassed. 192 bool isLiveOut(Register Reg) const { 193 return regsLiveOut.count(Reg) || vregsPassed.count(Reg); 194 } 195 }; 196 197 // Extra register info per MBB. 198 DenseMap<const MachineBasicBlock*, BBInfo> MBBInfoMap; 199 200 bool isReserved(Register Reg) { 201 return Reg.id() < regsReserved.size() && regsReserved.test(Reg.id()); 202 } 203 204 bool isAllocatable(Register Reg) const { 205 return Reg.id() < TRI->getNumRegs() && TRI->isInAllocatableClass(Reg) && 206 !regsReserved.test(Reg.id()); 207 } 208 209 // Analysis information if available 210 LiveVariables *LiveVars; 211 LiveIntervals *LiveInts; 212 LiveStacks *LiveStks; 213 SlotIndexes *Indexes; 214 215 void visitMachineFunctionBefore(); 216 void visitMachineBasicBlockBefore(const MachineBasicBlock *MBB); 217 void visitMachineBundleBefore(const MachineInstr *MI); 218 219 /// Verify that all of \p MI's virtual register operands are scalars. 220 /// \returns True if all virtual register operands are scalar. False 221 /// otherwise. 222 bool verifyAllRegOpsScalar(const MachineInstr &MI, 223 const MachineRegisterInfo &MRI); 224 bool verifyVectorElementMatch(LLT Ty0, LLT Ty1, const MachineInstr *MI); 225 void verifyPreISelGenericInstruction(const MachineInstr *MI); 226 void visitMachineInstrBefore(const MachineInstr *MI); 227 void visitMachineOperand(const MachineOperand *MO, unsigned MONum); 228 void visitMachineBundleAfter(const MachineInstr *MI); 229 void visitMachineBasicBlockAfter(const MachineBasicBlock *MBB); 230 void visitMachineFunctionAfter(); 231 232 void report(const char *msg, const MachineFunction *MF); 233 void report(const char *msg, const MachineBasicBlock *MBB); 234 void report(const char *msg, const MachineInstr *MI); 235 void report(const char *msg, const MachineOperand *MO, unsigned MONum, 236 LLT MOVRegType = LLT{}); 237 void report(const Twine &Msg, const MachineInstr *MI); 238 239 void report_context(const LiveInterval &LI) const; 240 void report_context(const LiveRange &LR, Register VRegUnit, 241 LaneBitmask LaneMask) const; 242 void report_context(const LiveRange::Segment &S) const; 243 void report_context(const VNInfo &VNI) const; 244 void report_context(SlotIndex Pos) const; 245 void report_context(MCPhysReg PhysReg) const; 246 void report_context_liverange(const LiveRange &LR) const; 247 void report_context_lanemask(LaneBitmask LaneMask) const; 248 void report_context_vreg(Register VReg) const; 249 void report_context_vreg_regunit(Register VRegOrUnit) const; 250 251 void verifyInlineAsm(const MachineInstr *MI); 252 253 void checkLiveness(const MachineOperand *MO, unsigned MONum); 254 void checkLivenessAtUse(const MachineOperand *MO, unsigned MONum, 255 SlotIndex UseIdx, const LiveRange &LR, 256 Register VRegOrUnit, 257 LaneBitmask LaneMask = LaneBitmask::getNone()); 258 void checkLivenessAtDef(const MachineOperand *MO, unsigned MONum, 259 SlotIndex DefIdx, const LiveRange &LR, 260 Register VRegOrUnit, bool SubRangeCheck = false, 261 LaneBitmask LaneMask = LaneBitmask::getNone()); 262 263 void markReachable(const MachineBasicBlock *MBB); 264 void calcRegsPassed(); 265 void checkPHIOps(const MachineBasicBlock &MBB); 266 267 void calcRegsRequired(); 268 void verifyLiveVariables(); 269 void verifyLiveIntervals(); 270 void verifyLiveInterval(const LiveInterval&); 271 void verifyLiveRangeValue(const LiveRange &, const VNInfo *, Register, 272 LaneBitmask); 273 void verifyLiveRangeSegment(const LiveRange &, 274 const LiveRange::const_iterator I, Register, 275 LaneBitmask); 276 void verifyLiveRange(const LiveRange &, Register, 277 LaneBitmask LaneMask = LaneBitmask::getNone()); 278 279 void verifyStackFrame(); 280 281 void verifySlotIndexes() const; 282 void verifyProperties(const MachineFunction &MF); 283 }; 284 285 struct MachineVerifierPass : public MachineFunctionPass { 286 static char ID; // Pass ID, replacement for typeid 287 288 const std::string Banner; 289 290 MachineVerifierPass(std::string banner = std::string()) 291 : MachineFunctionPass(ID), Banner(std::move(banner)) { 292 initializeMachineVerifierPassPass(*PassRegistry::getPassRegistry()); 293 } 294 295 void getAnalysisUsage(AnalysisUsage &AU) const override { 296 AU.addUsedIfAvailable<LiveStacks>(); 297 AU.setPreservesAll(); 298 MachineFunctionPass::getAnalysisUsage(AU); 299 } 300 301 bool runOnMachineFunction(MachineFunction &MF) override { 302 // Skip functions that have known verification problems. 303 // FIXME: Remove this mechanism when all problematic passes have been 304 // fixed. 305 if (MF.getProperties().hasProperty( 306 MachineFunctionProperties::Property::FailsVerification)) 307 return false; 308 309 unsigned FoundErrors = MachineVerifier(this, Banner.c_str()).verify(MF); 310 if (FoundErrors) 311 report_fatal_error("Found "+Twine(FoundErrors)+" machine code errors."); 312 return false; 313 } 314 }; 315 316 } // end anonymous namespace 317 318 char MachineVerifierPass::ID = 0; 319 320 INITIALIZE_PASS(MachineVerifierPass, "machineverifier", 321 "Verify generated machine code", false, false) 322 323 FunctionPass *llvm::createMachineVerifierPass(const std::string &Banner) { 324 return new MachineVerifierPass(Banner); 325 } 326 327 void llvm::verifyMachineFunction(MachineFunctionAnalysisManager *, 328 const std::string &Banner, 329 const MachineFunction &MF) { 330 // TODO: Use MFAM after porting below analyses. 331 // LiveVariables *LiveVars; 332 // LiveIntervals *LiveInts; 333 // LiveStacks *LiveStks; 334 // SlotIndexes *Indexes; 335 unsigned FoundErrors = MachineVerifier(nullptr, Banner.c_str()).verify(MF); 336 if (FoundErrors) 337 report_fatal_error("Found " + Twine(FoundErrors) + " machine code errors."); 338 } 339 340 bool MachineFunction::verify(Pass *p, const char *Banner, bool AbortOnErrors) 341 const { 342 MachineFunction &MF = const_cast<MachineFunction&>(*this); 343 unsigned FoundErrors = MachineVerifier(p, Banner).verify(MF); 344 if (AbortOnErrors && FoundErrors) 345 report_fatal_error("Found "+Twine(FoundErrors)+" machine code errors."); 346 return FoundErrors == 0; 347 } 348 349 void MachineVerifier::verifySlotIndexes() const { 350 if (Indexes == nullptr) 351 return; 352 353 // Ensure the IdxMBB list is sorted by slot indexes. 354 SlotIndex Last; 355 for (SlotIndexes::MBBIndexIterator I = Indexes->MBBIndexBegin(), 356 E = Indexes->MBBIndexEnd(); I != E; ++I) { 357 assert(!Last.isValid() || I->first > Last); 358 Last = I->first; 359 } 360 } 361 362 void MachineVerifier::verifyProperties(const MachineFunction &MF) { 363 // If a pass has introduced virtual registers without clearing the 364 // NoVRegs property (or set it without allocating the vregs) 365 // then report an error. 366 if (MF.getProperties().hasProperty( 367 MachineFunctionProperties::Property::NoVRegs) && 368 MRI->getNumVirtRegs()) 369 report("Function has NoVRegs property but there are VReg operands", &MF); 370 } 371 372 unsigned MachineVerifier::verify(const MachineFunction &MF) { 373 foundErrors = 0; 374 375 this->MF = &MF; 376 TM = &MF.getTarget(); 377 TII = MF.getSubtarget().getInstrInfo(); 378 TRI = MF.getSubtarget().getRegisterInfo(); 379 RBI = MF.getSubtarget().getRegBankInfo(); 380 MRI = &MF.getRegInfo(); 381 382 const bool isFunctionFailedISel = MF.getProperties().hasProperty( 383 MachineFunctionProperties::Property::FailedISel); 384 385 // If we're mid-GlobalISel and we already triggered the fallback path then 386 // it's expected that the MIR is somewhat broken but that's ok since we'll 387 // reset it and clear the FailedISel attribute in ResetMachineFunctions. 388 if (isFunctionFailedISel) 389 return foundErrors; 390 391 isFunctionRegBankSelected = MF.getProperties().hasProperty( 392 MachineFunctionProperties::Property::RegBankSelected); 393 isFunctionSelected = MF.getProperties().hasProperty( 394 MachineFunctionProperties::Property::Selected); 395 isFunctionTracksDebugUserValues = MF.getProperties().hasProperty( 396 MachineFunctionProperties::Property::TracksDebugUserValues); 397 398 LiveVars = nullptr; 399 LiveInts = nullptr; 400 LiveStks = nullptr; 401 Indexes = nullptr; 402 if (PASS) { 403 LiveInts = PASS->getAnalysisIfAvailable<LiveIntervals>(); 404 // We don't want to verify LiveVariables if LiveIntervals is available. 405 if (!LiveInts) 406 LiveVars = PASS->getAnalysisIfAvailable<LiveVariables>(); 407 LiveStks = PASS->getAnalysisIfAvailable<LiveStacks>(); 408 Indexes = PASS->getAnalysisIfAvailable<SlotIndexes>(); 409 } 410 411 verifySlotIndexes(); 412 413 verifyProperties(MF); 414 415 visitMachineFunctionBefore(); 416 for (const MachineBasicBlock &MBB : MF) { 417 visitMachineBasicBlockBefore(&MBB); 418 // Keep track of the current bundle header. 419 const MachineInstr *CurBundle = nullptr; 420 // Do we expect the next instruction to be part of the same bundle? 421 bool InBundle = false; 422 423 for (const MachineInstr &MI : MBB.instrs()) { 424 if (MI.getParent() != &MBB) { 425 report("Bad instruction parent pointer", &MBB); 426 errs() << "Instruction: " << MI; 427 continue; 428 } 429 430 // Check for consistent bundle flags. 431 if (InBundle && !MI.isBundledWithPred()) 432 report("Missing BundledPred flag, " 433 "BundledSucc was set on predecessor", 434 &MI); 435 if (!InBundle && MI.isBundledWithPred()) 436 report("BundledPred flag is set, " 437 "but BundledSucc not set on predecessor", 438 &MI); 439 440 // Is this a bundle header? 441 if (!MI.isInsideBundle()) { 442 if (CurBundle) 443 visitMachineBundleAfter(CurBundle); 444 CurBundle = &MI; 445 visitMachineBundleBefore(CurBundle); 446 } else if (!CurBundle) 447 report("No bundle header", &MI); 448 visitMachineInstrBefore(&MI); 449 for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) { 450 const MachineOperand &Op = MI.getOperand(I); 451 if (Op.getParent() != &MI) { 452 // Make sure to use correct addOperand / removeOperand / ChangeTo 453 // functions when replacing operands of a MachineInstr. 454 report("Instruction has operand with wrong parent set", &MI); 455 } 456 457 visitMachineOperand(&Op, I); 458 } 459 460 // Was this the last bundled instruction? 461 InBundle = MI.isBundledWithSucc(); 462 } 463 if (CurBundle) 464 visitMachineBundleAfter(CurBundle); 465 if (InBundle) 466 report("BundledSucc flag set on last instruction in block", &MBB.back()); 467 visitMachineBasicBlockAfter(&MBB); 468 } 469 visitMachineFunctionAfter(); 470 471 // Clean up. 472 regsLive.clear(); 473 regsDefined.clear(); 474 regsDead.clear(); 475 regsKilled.clear(); 476 regMasks.clear(); 477 MBBInfoMap.clear(); 478 479 return foundErrors; 480 } 481 482 void MachineVerifier::report(const char *msg, const MachineFunction *MF) { 483 assert(MF); 484 errs() << '\n'; 485 if (!foundErrors++) { 486 if (Banner) 487 errs() << "# " << Banner << '\n'; 488 if (LiveInts != nullptr) 489 LiveInts->print(errs()); 490 else 491 MF->print(errs(), Indexes); 492 } 493 errs() << "*** Bad machine code: " << msg << " ***\n" 494 << "- function: " << MF->getName() << "\n"; 495 } 496 497 void MachineVerifier::report(const char *msg, const MachineBasicBlock *MBB) { 498 assert(MBB); 499 report(msg, MBB->getParent()); 500 errs() << "- basic block: " << printMBBReference(*MBB) << ' ' 501 << MBB->getName() << " (" << (const void *)MBB << ')'; 502 if (Indexes) 503 errs() << " [" << Indexes->getMBBStartIdx(MBB) 504 << ';' << Indexes->getMBBEndIdx(MBB) << ')'; 505 errs() << '\n'; 506 } 507 508 void MachineVerifier::report(const char *msg, const MachineInstr *MI) { 509 assert(MI); 510 report(msg, MI->getParent()); 511 errs() << "- instruction: "; 512 if (Indexes && Indexes->hasIndex(*MI)) 513 errs() << Indexes->getInstructionIndex(*MI) << '\t'; 514 MI->print(errs(), /*IsStandalone=*/true); 515 } 516 517 void MachineVerifier::report(const char *msg, const MachineOperand *MO, 518 unsigned MONum, LLT MOVRegType) { 519 assert(MO); 520 report(msg, MO->getParent()); 521 errs() << "- operand " << MONum << ": "; 522 MO->print(errs(), MOVRegType, TRI); 523 errs() << "\n"; 524 } 525 526 void MachineVerifier::report(const Twine &Msg, const MachineInstr *MI) { 527 report(Msg.str().c_str(), MI); 528 } 529 530 void MachineVerifier::report_context(SlotIndex Pos) const { 531 errs() << "- at: " << Pos << '\n'; 532 } 533 534 void MachineVerifier::report_context(const LiveInterval &LI) const { 535 errs() << "- interval: " << LI << '\n'; 536 } 537 538 void MachineVerifier::report_context(const LiveRange &LR, Register VRegUnit, 539 LaneBitmask LaneMask) const { 540 report_context_liverange(LR); 541 report_context_vreg_regunit(VRegUnit); 542 if (LaneMask.any()) 543 report_context_lanemask(LaneMask); 544 } 545 546 void MachineVerifier::report_context(const LiveRange::Segment &S) const { 547 errs() << "- segment: " << S << '\n'; 548 } 549 550 void MachineVerifier::report_context(const VNInfo &VNI) const { 551 errs() << "- ValNo: " << VNI.id << " (def " << VNI.def << ")\n"; 552 } 553 554 void MachineVerifier::report_context_liverange(const LiveRange &LR) const { 555 errs() << "- liverange: " << LR << '\n'; 556 } 557 558 void MachineVerifier::report_context(MCPhysReg PReg) const { 559 errs() << "- p. register: " << printReg(PReg, TRI) << '\n'; 560 } 561 562 void MachineVerifier::report_context_vreg(Register VReg) const { 563 errs() << "- v. register: " << printReg(VReg, TRI) << '\n'; 564 } 565 566 void MachineVerifier::report_context_vreg_regunit(Register VRegOrUnit) const { 567 if (Register::isVirtualRegister(VRegOrUnit)) { 568 report_context_vreg(VRegOrUnit); 569 } else { 570 errs() << "- regunit: " << printRegUnit(VRegOrUnit, TRI) << '\n'; 571 } 572 } 573 574 void MachineVerifier::report_context_lanemask(LaneBitmask LaneMask) const { 575 errs() << "- lanemask: " << PrintLaneMask(LaneMask) << '\n'; 576 } 577 578 void MachineVerifier::markReachable(const MachineBasicBlock *MBB) { 579 BBInfo &MInfo = MBBInfoMap[MBB]; 580 if (!MInfo.reachable) { 581 MInfo.reachable = true; 582 for (const MachineBasicBlock *Succ : MBB->successors()) 583 markReachable(Succ); 584 } 585 } 586 587 void MachineVerifier::visitMachineFunctionBefore() { 588 lastIndex = SlotIndex(); 589 regsReserved = MRI->reservedRegsFrozen() ? MRI->getReservedRegs() 590 : TRI->getReservedRegs(*MF); 591 592 if (!MF->empty()) 593 markReachable(&MF->front()); 594 595 // Build a set of the basic blocks in the function. 596 FunctionBlocks.clear(); 597 for (const auto &MBB : *MF) { 598 FunctionBlocks.insert(&MBB); 599 BBInfo &MInfo = MBBInfoMap[&MBB]; 600 601 MInfo.Preds.insert(MBB.pred_begin(), MBB.pred_end()); 602 if (MInfo.Preds.size() != MBB.pred_size()) 603 report("MBB has duplicate entries in its predecessor list.", &MBB); 604 605 MInfo.Succs.insert(MBB.succ_begin(), MBB.succ_end()); 606 if (MInfo.Succs.size() != MBB.succ_size()) 607 report("MBB has duplicate entries in its successor list.", &MBB); 608 } 609 610 // Check that the register use lists are sane. 611 MRI->verifyUseLists(); 612 613 if (!MF->empty()) 614 verifyStackFrame(); 615 } 616 617 void 618 MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) { 619 FirstTerminator = nullptr; 620 FirstNonPHI = nullptr; 621 622 if (!MF->getProperties().hasProperty( 623 MachineFunctionProperties::Property::NoPHIs) && MRI->tracksLiveness()) { 624 // If this block has allocatable physical registers live-in, check that 625 // it is an entry block or landing pad. 626 for (const auto &LI : MBB->liveins()) { 627 if (isAllocatable(LI.PhysReg) && !MBB->isEHPad() && 628 MBB->getIterator() != MBB->getParent()->begin()) { 629 report("MBB has allocatable live-in, but isn't entry or landing-pad.", MBB); 630 report_context(LI.PhysReg); 631 } 632 } 633 } 634 635 // Count the number of landing pad successors. 636 SmallPtrSet<const MachineBasicBlock*, 4> LandingPadSuccs; 637 for (const auto *succ : MBB->successors()) { 638 if (succ->isEHPad()) 639 LandingPadSuccs.insert(succ); 640 if (!FunctionBlocks.count(succ)) 641 report("MBB has successor that isn't part of the function.", MBB); 642 if (!MBBInfoMap[succ].Preds.count(MBB)) { 643 report("Inconsistent CFG", MBB); 644 errs() << "MBB is not in the predecessor list of the successor " 645 << printMBBReference(*succ) << ".\n"; 646 } 647 } 648 649 // Check the predecessor list. 650 for (const MachineBasicBlock *Pred : MBB->predecessors()) { 651 if (!FunctionBlocks.count(Pred)) 652 report("MBB has predecessor that isn't part of the function.", MBB); 653 if (!MBBInfoMap[Pred].Succs.count(MBB)) { 654 report("Inconsistent CFG", MBB); 655 errs() << "MBB is not in the successor list of the predecessor " 656 << printMBBReference(*Pred) << ".\n"; 657 } 658 } 659 660 const MCAsmInfo *AsmInfo = TM->getMCAsmInfo(); 661 const BasicBlock *BB = MBB->getBasicBlock(); 662 const Function &F = MF->getFunction(); 663 if (LandingPadSuccs.size() > 1 && 664 !(AsmInfo && 665 AsmInfo->getExceptionHandlingType() == ExceptionHandling::SjLj && 666 BB && isa<SwitchInst>(BB->getTerminator())) && 667 !isScopedEHPersonality(classifyEHPersonality(F.getPersonalityFn()))) 668 report("MBB has more than one landing pad successor", MBB); 669 670 // Call analyzeBranch. If it succeeds, there several more conditions to check. 671 MachineBasicBlock *TBB = nullptr, *FBB = nullptr; 672 SmallVector<MachineOperand, 4> Cond; 673 if (!TII->analyzeBranch(*const_cast<MachineBasicBlock *>(MBB), TBB, FBB, 674 Cond)) { 675 // Ok, analyzeBranch thinks it knows what's going on with this block. Let's 676 // check whether its answers match up with reality. 677 if (!TBB && !FBB) { 678 // Block falls through to its successor. 679 if (!MBB->empty() && MBB->back().isBarrier() && 680 !TII->isPredicated(MBB->back())) { 681 report("MBB exits via unconditional fall-through but ends with a " 682 "barrier instruction!", MBB); 683 } 684 if (!Cond.empty()) { 685 report("MBB exits via unconditional fall-through but has a condition!", 686 MBB); 687 } 688 } else if (TBB && !FBB && Cond.empty()) { 689 // Block unconditionally branches somewhere. 690 if (MBB->empty()) { 691 report("MBB exits via unconditional branch but doesn't contain " 692 "any instructions!", MBB); 693 } else if (!MBB->back().isBarrier()) { 694 report("MBB exits via unconditional branch but doesn't end with a " 695 "barrier instruction!", MBB); 696 } else if (!MBB->back().isTerminator()) { 697 report("MBB exits via unconditional branch but the branch isn't a " 698 "terminator instruction!", MBB); 699 } 700 } else if (TBB && !FBB && !Cond.empty()) { 701 // Block conditionally branches somewhere, otherwise falls through. 702 if (MBB->empty()) { 703 report("MBB exits via conditional branch/fall-through but doesn't " 704 "contain any instructions!", MBB); 705 } else if (MBB->back().isBarrier()) { 706 report("MBB exits via conditional branch/fall-through but ends with a " 707 "barrier instruction!", MBB); 708 } else if (!MBB->back().isTerminator()) { 709 report("MBB exits via conditional branch/fall-through but the branch " 710 "isn't a terminator instruction!", MBB); 711 } 712 } else if (TBB && FBB) { 713 // Block conditionally branches somewhere, otherwise branches 714 // somewhere else. 715 if (MBB->empty()) { 716 report("MBB exits via conditional branch/branch but doesn't " 717 "contain any instructions!", MBB); 718 } else if (!MBB->back().isBarrier()) { 719 report("MBB exits via conditional branch/branch but doesn't end with a " 720 "barrier instruction!", MBB); 721 } else if (!MBB->back().isTerminator()) { 722 report("MBB exits via conditional branch/branch but the branch " 723 "isn't a terminator instruction!", MBB); 724 } 725 if (Cond.empty()) { 726 report("MBB exits via conditional branch/branch but there's no " 727 "condition!", MBB); 728 } 729 } else { 730 report("analyzeBranch returned invalid data!", MBB); 731 } 732 733 // Now check that the successors match up with the answers reported by 734 // analyzeBranch. 735 if (TBB && !MBB->isSuccessor(TBB)) 736 report("MBB exits via jump or conditional branch, but its target isn't a " 737 "CFG successor!", 738 MBB); 739 if (FBB && !MBB->isSuccessor(FBB)) 740 report("MBB exits via conditional branch, but its target isn't a CFG " 741 "successor!", 742 MBB); 743 744 // There might be a fallthrough to the next block if there's either no 745 // unconditional true branch, or if there's a condition, and one of the 746 // branches is missing. 747 bool Fallthrough = !TBB || (!Cond.empty() && !FBB); 748 749 // A conditional fallthrough must be an actual CFG successor, not 750 // unreachable. (Conversely, an unconditional fallthrough might not really 751 // be a successor, because the block might end in unreachable.) 752 if (!Cond.empty() && !FBB) { 753 MachineFunction::const_iterator MBBI = std::next(MBB->getIterator()); 754 if (MBBI == MF->end()) { 755 report("MBB conditionally falls through out of function!", MBB); 756 } else if (!MBB->isSuccessor(&*MBBI)) 757 report("MBB exits via conditional branch/fall-through but the CFG " 758 "successors don't match the actual successors!", 759 MBB); 760 } 761 762 // Verify that there aren't any extra un-accounted-for successors. 763 for (const MachineBasicBlock *SuccMBB : MBB->successors()) { 764 // If this successor is one of the branch targets, it's okay. 765 if (SuccMBB == TBB || SuccMBB == FBB) 766 continue; 767 // If we might have a fallthrough, and the successor is the fallthrough 768 // block, that's also ok. 769 if (Fallthrough && SuccMBB == MBB->getNextNode()) 770 continue; 771 // Also accept successors which are for exception-handling or might be 772 // inlineasm_br targets. 773 if (SuccMBB->isEHPad() || SuccMBB->isInlineAsmBrIndirectTarget()) 774 continue; 775 report("MBB has unexpected successors which are not branch targets, " 776 "fallthrough, EHPads, or inlineasm_br targets.", 777 MBB); 778 } 779 } 780 781 regsLive.clear(); 782 if (MRI->tracksLiveness()) { 783 for (const auto &LI : MBB->liveins()) { 784 if (!Register::isPhysicalRegister(LI.PhysReg)) { 785 report("MBB live-in list contains non-physical register", MBB); 786 continue; 787 } 788 for (const MCPhysReg &SubReg : TRI->subregs_inclusive(LI.PhysReg)) 789 regsLive.insert(SubReg); 790 } 791 } 792 793 const MachineFrameInfo &MFI = MF->getFrameInfo(); 794 BitVector PR = MFI.getPristineRegs(*MF); 795 for (unsigned I : PR.set_bits()) { 796 for (const MCPhysReg &SubReg : TRI->subregs_inclusive(I)) 797 regsLive.insert(SubReg); 798 } 799 800 regsKilled.clear(); 801 regsDefined.clear(); 802 803 if (Indexes) 804 lastIndex = Indexes->getMBBStartIdx(MBB); 805 } 806 807 // This function gets called for all bundle headers, including normal 808 // stand-alone unbundled instructions. 809 void MachineVerifier::visitMachineBundleBefore(const MachineInstr *MI) { 810 if (Indexes && Indexes->hasIndex(*MI)) { 811 SlotIndex idx = Indexes->getInstructionIndex(*MI); 812 if (!(idx > lastIndex)) { 813 report("Instruction index out of order", MI); 814 errs() << "Last instruction was at " << lastIndex << '\n'; 815 } 816 lastIndex = idx; 817 } 818 819 // Ensure non-terminators don't follow terminators. 820 if (MI->isTerminator()) { 821 if (!FirstTerminator) 822 FirstTerminator = MI; 823 } else if (FirstTerminator) { 824 report("Non-terminator instruction after the first terminator", MI); 825 errs() << "First terminator was:\t" << *FirstTerminator; 826 } 827 } 828 829 // The operands on an INLINEASM instruction must follow a template. 830 // Verify that the flag operands make sense. 831 void MachineVerifier::verifyInlineAsm(const MachineInstr *MI) { 832 // The first two operands on INLINEASM are the asm string and global flags. 833 if (MI->getNumOperands() < 2) { 834 report("Too few operands on inline asm", MI); 835 return; 836 } 837 if (!MI->getOperand(0).isSymbol()) 838 report("Asm string must be an external symbol", MI); 839 if (!MI->getOperand(1).isImm()) 840 report("Asm flags must be an immediate", MI); 841 // Allowed flags are Extra_HasSideEffects = 1, Extra_IsAlignStack = 2, 842 // Extra_AsmDialect = 4, Extra_MayLoad = 8, and Extra_MayStore = 16, 843 // and Extra_IsConvergent = 32. 844 if (!isUInt<6>(MI->getOperand(1).getImm())) 845 report("Unknown asm flags", &MI->getOperand(1), 1); 846 847 static_assert(InlineAsm::MIOp_FirstOperand == 2, "Asm format changed"); 848 849 unsigned OpNo = InlineAsm::MIOp_FirstOperand; 850 unsigned NumOps; 851 for (unsigned e = MI->getNumOperands(); OpNo < e; OpNo += NumOps) { 852 const MachineOperand &MO = MI->getOperand(OpNo); 853 // There may be implicit ops after the fixed operands. 854 if (!MO.isImm()) 855 break; 856 NumOps = 1 + InlineAsm::getNumOperandRegisters(MO.getImm()); 857 } 858 859 if (OpNo > MI->getNumOperands()) 860 report("Missing operands in last group", MI); 861 862 // An optional MDNode follows the groups. 863 if (OpNo < MI->getNumOperands() && MI->getOperand(OpNo).isMetadata()) 864 ++OpNo; 865 866 // All trailing operands must be implicit registers. 867 for (unsigned e = MI->getNumOperands(); OpNo < e; ++OpNo) { 868 const MachineOperand &MO = MI->getOperand(OpNo); 869 if (!MO.isReg() || !MO.isImplicit()) 870 report("Expected implicit register after groups", &MO, OpNo); 871 } 872 } 873 874 bool MachineVerifier::verifyAllRegOpsScalar(const MachineInstr &MI, 875 const MachineRegisterInfo &MRI) { 876 if (none_of(MI.explicit_operands(), [&MRI](const MachineOperand &Op) { 877 if (!Op.isReg()) 878 return false; 879 const auto Reg = Op.getReg(); 880 if (Reg.isPhysical()) 881 return false; 882 return !MRI.getType(Reg).isScalar(); 883 })) 884 return true; 885 report("All register operands must have scalar types", &MI); 886 return false; 887 } 888 889 /// Check that types are consistent when two operands need to have the same 890 /// number of vector elements. 891 /// \return true if the types are valid. 892 bool MachineVerifier::verifyVectorElementMatch(LLT Ty0, LLT Ty1, 893 const MachineInstr *MI) { 894 if (Ty0.isVector() != Ty1.isVector()) { 895 report("operand types must be all-vector or all-scalar", MI); 896 // Generally we try to report as many issues as possible at once, but in 897 // this case it's not clear what should we be comparing the size of the 898 // scalar with: the size of the whole vector or its lane. Instead of 899 // making an arbitrary choice and emitting not so helpful message, let's 900 // avoid the extra noise and stop here. 901 return false; 902 } 903 904 if (Ty0.isVector() && Ty0.getNumElements() != Ty1.getNumElements()) { 905 report("operand types must preserve number of vector elements", MI); 906 return false; 907 } 908 909 return true; 910 } 911 912 void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) { 913 if (isFunctionSelected) 914 report("Unexpected generic instruction in a Selected function", MI); 915 916 const MCInstrDesc &MCID = MI->getDesc(); 917 unsigned NumOps = MI->getNumOperands(); 918 919 // Branches must reference a basic block if they are not indirect 920 if (MI->isBranch() && !MI->isIndirectBranch()) { 921 bool HasMBB = false; 922 for (const MachineOperand &Op : MI->operands()) { 923 if (Op.isMBB()) { 924 HasMBB = true; 925 break; 926 } 927 } 928 929 if (!HasMBB) { 930 report("Branch instruction is missing a basic block operand or " 931 "isIndirectBranch property", 932 MI); 933 } 934 } 935 936 // Check types. 937 SmallVector<LLT, 4> Types; 938 for (unsigned I = 0, E = std::min(MCID.getNumOperands(), NumOps); 939 I != E; ++I) { 940 if (!MCID.OpInfo[I].isGenericType()) 941 continue; 942 // Generic instructions specify type equality constraints between some of 943 // their operands. Make sure these are consistent. 944 size_t TypeIdx = MCID.OpInfo[I].getGenericTypeIndex(); 945 Types.resize(std::max(TypeIdx + 1, Types.size())); 946 947 const MachineOperand *MO = &MI->getOperand(I); 948 if (!MO->isReg()) { 949 report("generic instruction must use register operands", MI); 950 continue; 951 } 952 953 LLT OpTy = MRI->getType(MO->getReg()); 954 // Don't report a type mismatch if there is no actual mismatch, only a 955 // type missing, to reduce noise: 956 if (OpTy.isValid()) { 957 // Only the first valid type for a type index will be printed: don't 958 // overwrite it later so it's always clear which type was expected: 959 if (!Types[TypeIdx].isValid()) 960 Types[TypeIdx] = OpTy; 961 else if (Types[TypeIdx] != OpTy) 962 report("Type mismatch in generic instruction", MO, I, OpTy); 963 } else { 964 // Generic instructions must have types attached to their operands. 965 report("Generic instruction is missing a virtual register type", MO, I); 966 } 967 } 968 969 // Generic opcodes must not have physical register operands. 970 for (unsigned I = 0; I < MI->getNumOperands(); ++I) { 971 const MachineOperand *MO = &MI->getOperand(I); 972 if (MO->isReg() && Register::isPhysicalRegister(MO->getReg())) 973 report("Generic instruction cannot have physical register", MO, I); 974 } 975 976 // Avoid out of bounds in checks below. This was already reported earlier. 977 if (MI->getNumOperands() < MCID.getNumOperands()) 978 return; 979 980 StringRef ErrorInfo; 981 if (!TII->verifyInstruction(*MI, ErrorInfo)) 982 report(ErrorInfo.data(), MI); 983 984 // Verify properties of various specific instruction types 985 unsigned Opc = MI->getOpcode(); 986 switch (Opc) { 987 case TargetOpcode::G_ASSERT_SEXT: 988 case TargetOpcode::G_ASSERT_ZEXT: { 989 std::string OpcName = 990 Opc == TargetOpcode::G_ASSERT_ZEXT ? "G_ASSERT_ZEXT" : "G_ASSERT_SEXT"; 991 if (!MI->getOperand(2).isImm()) { 992 report(Twine(OpcName, " expects an immediate operand #2"), MI); 993 break; 994 } 995 996 Register Dst = MI->getOperand(0).getReg(); 997 Register Src = MI->getOperand(1).getReg(); 998 LLT SrcTy = MRI->getType(Src); 999 int64_t Imm = MI->getOperand(2).getImm(); 1000 if (Imm <= 0) { 1001 report(Twine(OpcName, " size must be >= 1"), MI); 1002 break; 1003 } 1004 1005 if (Imm >= SrcTy.getScalarSizeInBits()) { 1006 report(Twine(OpcName, " size must be less than source bit width"), MI); 1007 break; 1008 } 1009 1010 const RegisterBank *SrcRB = RBI->getRegBank(Src, *MRI, *TRI); 1011 const RegisterBank *DstRB = RBI->getRegBank(Dst, *MRI, *TRI); 1012 1013 // Allow only the source bank to be set. 1014 if ((SrcRB && DstRB && SrcRB != DstRB) || (DstRB && !SrcRB)) { 1015 report(Twine(OpcName, " cannot change register bank"), MI); 1016 break; 1017 } 1018 1019 // Don't allow a class change. Do allow member class->regbank. 1020 const TargetRegisterClass *DstRC = MRI->getRegClassOrNull(Dst); 1021 if (DstRC && DstRC != MRI->getRegClassOrNull(Src)) { 1022 report( 1023 Twine(OpcName, " source and destination register classes must match"), 1024 MI); 1025 break; 1026 } 1027 1028 break; 1029 } 1030 1031 case TargetOpcode::G_CONSTANT: 1032 case TargetOpcode::G_FCONSTANT: { 1033 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1034 if (DstTy.isVector()) 1035 report("Instruction cannot use a vector result type", MI); 1036 1037 if (MI->getOpcode() == TargetOpcode::G_CONSTANT) { 1038 if (!MI->getOperand(1).isCImm()) { 1039 report("G_CONSTANT operand must be cimm", MI); 1040 break; 1041 } 1042 1043 const ConstantInt *CI = MI->getOperand(1).getCImm(); 1044 if (CI->getBitWidth() != DstTy.getSizeInBits()) 1045 report("inconsistent constant size", MI); 1046 } else { 1047 if (!MI->getOperand(1).isFPImm()) { 1048 report("G_FCONSTANT operand must be fpimm", MI); 1049 break; 1050 } 1051 const ConstantFP *CF = MI->getOperand(1).getFPImm(); 1052 1053 if (APFloat::getSizeInBits(CF->getValueAPF().getSemantics()) != 1054 DstTy.getSizeInBits()) { 1055 report("inconsistent constant size", MI); 1056 } 1057 } 1058 1059 break; 1060 } 1061 case TargetOpcode::G_LOAD: 1062 case TargetOpcode::G_STORE: 1063 case TargetOpcode::G_ZEXTLOAD: 1064 case TargetOpcode::G_SEXTLOAD: { 1065 LLT ValTy = MRI->getType(MI->getOperand(0).getReg()); 1066 LLT PtrTy = MRI->getType(MI->getOperand(1).getReg()); 1067 if (!PtrTy.isPointer()) 1068 report("Generic memory instruction must access a pointer", MI); 1069 1070 // Generic loads and stores must have a single MachineMemOperand 1071 // describing that access. 1072 if (!MI->hasOneMemOperand()) { 1073 report("Generic instruction accessing memory must have one mem operand", 1074 MI); 1075 } else { 1076 const MachineMemOperand &MMO = **MI->memoperands_begin(); 1077 if (MI->getOpcode() == TargetOpcode::G_ZEXTLOAD || 1078 MI->getOpcode() == TargetOpcode::G_SEXTLOAD) { 1079 if (MMO.getSizeInBits() >= ValTy.getSizeInBits()) 1080 report("Generic extload must have a narrower memory type", MI); 1081 } else if (MI->getOpcode() == TargetOpcode::G_LOAD) { 1082 if (MMO.getSize() > ValTy.getSizeInBytes()) 1083 report("load memory size cannot exceed result size", MI); 1084 } else if (MI->getOpcode() == TargetOpcode::G_STORE) { 1085 if (ValTy.getSizeInBytes() < MMO.getSize()) 1086 report("store memory size cannot exceed value size", MI); 1087 } 1088 1089 const AtomicOrdering Order = MMO.getSuccessOrdering(); 1090 if (Opc == TargetOpcode::G_STORE) { 1091 if (Order == AtomicOrdering::Acquire || 1092 Order == AtomicOrdering::AcquireRelease) 1093 report("atomic store cannot use acquire ordering", MI); 1094 1095 } else { 1096 if (Order == AtomicOrdering::Release || 1097 Order == AtomicOrdering::AcquireRelease) 1098 report("atomic load cannot use release ordering", MI); 1099 } 1100 } 1101 1102 break; 1103 } 1104 case TargetOpcode::G_PHI: { 1105 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1106 if (!DstTy.isValid() || !all_of(drop_begin(MI->operands()), 1107 [this, &DstTy](const MachineOperand &MO) { 1108 if (!MO.isReg()) 1109 return true; 1110 LLT Ty = MRI->getType(MO.getReg()); 1111 if (!Ty.isValid() || (Ty != DstTy)) 1112 return false; 1113 return true; 1114 })) 1115 report("Generic Instruction G_PHI has operands with incompatible/missing " 1116 "types", 1117 MI); 1118 break; 1119 } 1120 case TargetOpcode::G_BITCAST: { 1121 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1122 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg()); 1123 if (!DstTy.isValid() || !SrcTy.isValid()) 1124 break; 1125 1126 if (SrcTy.isPointer() != DstTy.isPointer()) 1127 report("bitcast cannot convert between pointers and other types", MI); 1128 1129 if (SrcTy.getSizeInBits() != DstTy.getSizeInBits()) 1130 report("bitcast sizes must match", MI); 1131 1132 if (SrcTy == DstTy) 1133 report("bitcast must change the type", MI); 1134 1135 break; 1136 } 1137 case TargetOpcode::G_INTTOPTR: 1138 case TargetOpcode::G_PTRTOINT: 1139 case TargetOpcode::G_ADDRSPACE_CAST: { 1140 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1141 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg()); 1142 if (!DstTy.isValid() || !SrcTy.isValid()) 1143 break; 1144 1145 verifyVectorElementMatch(DstTy, SrcTy, MI); 1146 1147 DstTy = DstTy.getScalarType(); 1148 SrcTy = SrcTy.getScalarType(); 1149 1150 if (MI->getOpcode() == TargetOpcode::G_INTTOPTR) { 1151 if (!DstTy.isPointer()) 1152 report("inttoptr result type must be a pointer", MI); 1153 if (SrcTy.isPointer()) 1154 report("inttoptr source type must not be a pointer", MI); 1155 } else if (MI->getOpcode() == TargetOpcode::G_PTRTOINT) { 1156 if (!SrcTy.isPointer()) 1157 report("ptrtoint source type must be a pointer", MI); 1158 if (DstTy.isPointer()) 1159 report("ptrtoint result type must not be a pointer", MI); 1160 } else { 1161 assert(MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST); 1162 if (!SrcTy.isPointer() || !DstTy.isPointer()) 1163 report("addrspacecast types must be pointers", MI); 1164 else { 1165 if (SrcTy.getAddressSpace() == DstTy.getAddressSpace()) 1166 report("addrspacecast must convert different address spaces", MI); 1167 } 1168 } 1169 1170 break; 1171 } 1172 case TargetOpcode::G_PTR_ADD: { 1173 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1174 LLT PtrTy = MRI->getType(MI->getOperand(1).getReg()); 1175 LLT OffsetTy = MRI->getType(MI->getOperand(2).getReg()); 1176 if (!DstTy.isValid() || !PtrTy.isValid() || !OffsetTy.isValid()) 1177 break; 1178 1179 if (!PtrTy.getScalarType().isPointer()) 1180 report("gep first operand must be a pointer", MI); 1181 1182 if (OffsetTy.getScalarType().isPointer()) 1183 report("gep offset operand must not be a pointer", MI); 1184 1185 // TODO: Is the offset allowed to be a scalar with a vector? 1186 break; 1187 } 1188 case TargetOpcode::G_PTRMASK: { 1189 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1190 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg()); 1191 LLT MaskTy = MRI->getType(MI->getOperand(2).getReg()); 1192 if (!DstTy.isValid() || !SrcTy.isValid() || !MaskTy.isValid()) 1193 break; 1194 1195 if (!DstTy.getScalarType().isPointer()) 1196 report("ptrmask result type must be a pointer", MI); 1197 1198 if (!MaskTy.getScalarType().isScalar()) 1199 report("ptrmask mask type must be an integer", MI); 1200 1201 verifyVectorElementMatch(DstTy, MaskTy, MI); 1202 break; 1203 } 1204 case TargetOpcode::G_SEXT: 1205 case TargetOpcode::G_ZEXT: 1206 case TargetOpcode::G_ANYEXT: 1207 case TargetOpcode::G_TRUNC: 1208 case TargetOpcode::G_FPEXT: 1209 case TargetOpcode::G_FPTRUNC: { 1210 // Number of operands and presense of types is already checked (and 1211 // reported in case of any issues), so no need to report them again. As 1212 // we're trying to report as many issues as possible at once, however, the 1213 // instructions aren't guaranteed to have the right number of operands or 1214 // types attached to them at this point 1215 assert(MCID.getNumOperands() == 2 && "Expected 2 operands G_*{EXT,TRUNC}"); 1216 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1217 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg()); 1218 if (!DstTy.isValid() || !SrcTy.isValid()) 1219 break; 1220 1221 LLT DstElTy = DstTy.getScalarType(); 1222 LLT SrcElTy = SrcTy.getScalarType(); 1223 if (DstElTy.isPointer() || SrcElTy.isPointer()) 1224 report("Generic extend/truncate can not operate on pointers", MI); 1225 1226 verifyVectorElementMatch(DstTy, SrcTy, MI); 1227 1228 unsigned DstSize = DstElTy.getSizeInBits(); 1229 unsigned SrcSize = SrcElTy.getSizeInBits(); 1230 switch (MI->getOpcode()) { 1231 default: 1232 if (DstSize <= SrcSize) 1233 report("Generic extend has destination type no larger than source", MI); 1234 break; 1235 case TargetOpcode::G_TRUNC: 1236 case TargetOpcode::G_FPTRUNC: 1237 if (DstSize >= SrcSize) 1238 report("Generic truncate has destination type no smaller than source", 1239 MI); 1240 break; 1241 } 1242 break; 1243 } 1244 case TargetOpcode::G_SELECT: { 1245 LLT SelTy = MRI->getType(MI->getOperand(0).getReg()); 1246 LLT CondTy = MRI->getType(MI->getOperand(1).getReg()); 1247 if (!SelTy.isValid() || !CondTy.isValid()) 1248 break; 1249 1250 // Scalar condition select on a vector is valid. 1251 if (CondTy.isVector()) 1252 verifyVectorElementMatch(SelTy, CondTy, MI); 1253 break; 1254 } 1255 case TargetOpcode::G_MERGE_VALUES: { 1256 // G_MERGE_VALUES should only be used to merge scalars into a larger scalar, 1257 // e.g. s2N = MERGE sN, sN 1258 // Merging multiple scalars into a vector is not allowed, should use 1259 // G_BUILD_VECTOR for that. 1260 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1261 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg()); 1262 if (DstTy.isVector() || SrcTy.isVector()) 1263 report("G_MERGE_VALUES cannot operate on vectors", MI); 1264 1265 const unsigned NumOps = MI->getNumOperands(); 1266 if (DstTy.getSizeInBits() != SrcTy.getSizeInBits() * (NumOps - 1)) 1267 report("G_MERGE_VALUES result size is inconsistent", MI); 1268 1269 for (unsigned I = 2; I != NumOps; ++I) { 1270 if (MRI->getType(MI->getOperand(I).getReg()) != SrcTy) 1271 report("G_MERGE_VALUES source types do not match", MI); 1272 } 1273 1274 break; 1275 } 1276 case TargetOpcode::G_UNMERGE_VALUES: { 1277 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1278 LLT SrcTy = MRI->getType(MI->getOperand(MI->getNumOperands()-1).getReg()); 1279 // For now G_UNMERGE can split vectors. 1280 for (unsigned i = 0; i < MI->getNumOperands()-1; ++i) { 1281 if (MRI->getType(MI->getOperand(i).getReg()) != DstTy) 1282 report("G_UNMERGE_VALUES destination types do not match", MI); 1283 } 1284 if (SrcTy.getSizeInBits() != 1285 (DstTy.getSizeInBits() * (MI->getNumOperands() - 1))) { 1286 report("G_UNMERGE_VALUES source operand does not cover dest operands", 1287 MI); 1288 } 1289 break; 1290 } 1291 case TargetOpcode::G_BUILD_VECTOR: { 1292 // Source types must be scalars, dest type a vector. Total size of scalars 1293 // must match the dest vector size. 1294 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1295 LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg()); 1296 if (!DstTy.isVector() || SrcEltTy.isVector()) { 1297 report("G_BUILD_VECTOR must produce a vector from scalar operands", MI); 1298 break; 1299 } 1300 1301 if (DstTy.getElementType() != SrcEltTy) 1302 report("G_BUILD_VECTOR result element type must match source type", MI); 1303 1304 if (DstTy.getNumElements() != MI->getNumOperands() - 1) 1305 report("G_BUILD_VECTOR must have an operand for each elemement", MI); 1306 1307 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2)) 1308 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg())) 1309 report("G_BUILD_VECTOR source operand types are not homogeneous", MI); 1310 1311 break; 1312 } 1313 case TargetOpcode::G_BUILD_VECTOR_TRUNC: { 1314 // Source types must be scalars, dest type a vector. Scalar types must be 1315 // larger than the dest vector elt type, as this is a truncating operation. 1316 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1317 LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg()); 1318 if (!DstTy.isVector() || SrcEltTy.isVector()) 1319 report("G_BUILD_VECTOR_TRUNC must produce a vector from scalar operands", 1320 MI); 1321 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2)) 1322 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg())) 1323 report("G_BUILD_VECTOR_TRUNC source operand types are not homogeneous", 1324 MI); 1325 if (SrcEltTy.getSizeInBits() <= DstTy.getElementType().getSizeInBits()) 1326 report("G_BUILD_VECTOR_TRUNC source operand types are not larger than " 1327 "dest elt type", 1328 MI); 1329 break; 1330 } 1331 case TargetOpcode::G_CONCAT_VECTORS: { 1332 // Source types should be vectors, and total size should match the dest 1333 // vector size. 1334 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1335 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg()); 1336 if (!DstTy.isVector() || !SrcTy.isVector()) 1337 report("G_CONCAT_VECTOR requires vector source and destination operands", 1338 MI); 1339 1340 if (MI->getNumOperands() < 3) 1341 report("G_CONCAT_VECTOR requires at least 2 source operands", MI); 1342 1343 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2)) 1344 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg())) 1345 report("G_CONCAT_VECTOR source operand types are not homogeneous", MI); 1346 if (DstTy.getNumElements() != 1347 SrcTy.getNumElements() * (MI->getNumOperands() - 1)) 1348 report("G_CONCAT_VECTOR num dest and source elements should match", MI); 1349 break; 1350 } 1351 case TargetOpcode::G_ICMP: 1352 case TargetOpcode::G_FCMP: { 1353 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1354 LLT SrcTy = MRI->getType(MI->getOperand(2).getReg()); 1355 1356 if ((DstTy.isVector() != SrcTy.isVector()) || 1357 (DstTy.isVector() && DstTy.getNumElements() != SrcTy.getNumElements())) 1358 report("Generic vector icmp/fcmp must preserve number of lanes", MI); 1359 1360 break; 1361 } 1362 case TargetOpcode::G_EXTRACT: { 1363 const MachineOperand &SrcOp = MI->getOperand(1); 1364 if (!SrcOp.isReg()) { 1365 report("extract source must be a register", MI); 1366 break; 1367 } 1368 1369 const MachineOperand &OffsetOp = MI->getOperand(2); 1370 if (!OffsetOp.isImm()) { 1371 report("extract offset must be a constant", MI); 1372 break; 1373 } 1374 1375 unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits(); 1376 unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits(); 1377 if (SrcSize == DstSize) 1378 report("extract source must be larger than result", MI); 1379 1380 if (DstSize + OffsetOp.getImm() > SrcSize) 1381 report("extract reads past end of register", MI); 1382 break; 1383 } 1384 case TargetOpcode::G_INSERT: { 1385 const MachineOperand &SrcOp = MI->getOperand(2); 1386 if (!SrcOp.isReg()) { 1387 report("insert source must be a register", MI); 1388 break; 1389 } 1390 1391 const MachineOperand &OffsetOp = MI->getOperand(3); 1392 if (!OffsetOp.isImm()) { 1393 report("insert offset must be a constant", MI); 1394 break; 1395 } 1396 1397 unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits(); 1398 unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits(); 1399 1400 if (DstSize <= SrcSize) 1401 report("inserted size must be smaller than total register", MI); 1402 1403 if (SrcSize + OffsetOp.getImm() > DstSize) 1404 report("insert writes past end of register", MI); 1405 1406 break; 1407 } 1408 case TargetOpcode::G_JUMP_TABLE: { 1409 if (!MI->getOperand(1).isJTI()) 1410 report("G_JUMP_TABLE source operand must be a jump table index", MI); 1411 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1412 if (!DstTy.isPointer()) 1413 report("G_JUMP_TABLE dest operand must have a pointer type", MI); 1414 break; 1415 } 1416 case TargetOpcode::G_BRJT: { 1417 if (!MRI->getType(MI->getOperand(0).getReg()).isPointer()) 1418 report("G_BRJT src operand 0 must be a pointer type", MI); 1419 1420 if (!MI->getOperand(1).isJTI()) 1421 report("G_BRJT src operand 1 must be a jump table index", MI); 1422 1423 const auto &IdxOp = MI->getOperand(2); 1424 if (!IdxOp.isReg() || MRI->getType(IdxOp.getReg()).isPointer()) 1425 report("G_BRJT src operand 2 must be a scalar reg type", MI); 1426 break; 1427 } 1428 case TargetOpcode::G_INTRINSIC: 1429 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS: { 1430 // TODO: Should verify number of def and use operands, but the current 1431 // interface requires passing in IR types for mangling. 1432 const MachineOperand &IntrIDOp = MI->getOperand(MI->getNumExplicitDefs()); 1433 if (!IntrIDOp.isIntrinsicID()) { 1434 report("G_INTRINSIC first src operand must be an intrinsic ID", MI); 1435 break; 1436 } 1437 1438 bool NoSideEffects = MI->getOpcode() == TargetOpcode::G_INTRINSIC; 1439 unsigned IntrID = IntrIDOp.getIntrinsicID(); 1440 if (IntrID != 0 && IntrID < Intrinsic::num_intrinsics) { 1441 AttributeList Attrs 1442 = Intrinsic::getAttributes(MF->getFunction().getContext(), 1443 static_cast<Intrinsic::ID>(IntrID)); 1444 bool DeclHasSideEffects = !Attrs.hasFnAttr(Attribute::ReadNone); 1445 if (NoSideEffects && DeclHasSideEffects) { 1446 report("G_INTRINSIC used with intrinsic that accesses memory", MI); 1447 break; 1448 } 1449 if (!NoSideEffects && !DeclHasSideEffects) { 1450 report("G_INTRINSIC_W_SIDE_EFFECTS used with readnone intrinsic", MI); 1451 break; 1452 } 1453 } 1454 1455 break; 1456 } 1457 case TargetOpcode::G_SEXT_INREG: { 1458 if (!MI->getOperand(2).isImm()) { 1459 report("G_SEXT_INREG expects an immediate operand #2", MI); 1460 break; 1461 } 1462 1463 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg()); 1464 int64_t Imm = MI->getOperand(2).getImm(); 1465 if (Imm <= 0) 1466 report("G_SEXT_INREG size must be >= 1", MI); 1467 if (Imm >= SrcTy.getScalarSizeInBits()) 1468 report("G_SEXT_INREG size must be less than source bit width", MI); 1469 break; 1470 } 1471 case TargetOpcode::G_SHUFFLE_VECTOR: { 1472 const MachineOperand &MaskOp = MI->getOperand(3); 1473 if (!MaskOp.isShuffleMask()) { 1474 report("Incorrect mask operand type for G_SHUFFLE_VECTOR", MI); 1475 break; 1476 } 1477 1478 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1479 LLT Src0Ty = MRI->getType(MI->getOperand(1).getReg()); 1480 LLT Src1Ty = MRI->getType(MI->getOperand(2).getReg()); 1481 1482 if (Src0Ty != Src1Ty) 1483 report("Source operands must be the same type", MI); 1484 1485 if (Src0Ty.getScalarType() != DstTy.getScalarType()) 1486 report("G_SHUFFLE_VECTOR cannot change element type", MI); 1487 1488 // Don't check that all operands are vector because scalars are used in 1489 // place of 1 element vectors. 1490 int SrcNumElts = Src0Ty.isVector() ? Src0Ty.getNumElements() : 1; 1491 int DstNumElts = DstTy.isVector() ? DstTy.getNumElements() : 1; 1492 1493 ArrayRef<int> MaskIdxes = MaskOp.getShuffleMask(); 1494 1495 if (static_cast<int>(MaskIdxes.size()) != DstNumElts) 1496 report("Wrong result type for shufflemask", MI); 1497 1498 for (int Idx : MaskIdxes) { 1499 if (Idx < 0) 1500 continue; 1501 1502 if (Idx >= 2 * SrcNumElts) 1503 report("Out of bounds shuffle index", MI); 1504 } 1505 1506 break; 1507 } 1508 case TargetOpcode::G_DYN_STACKALLOC: { 1509 const MachineOperand &DstOp = MI->getOperand(0); 1510 const MachineOperand &AllocOp = MI->getOperand(1); 1511 const MachineOperand &AlignOp = MI->getOperand(2); 1512 1513 if (!DstOp.isReg() || !MRI->getType(DstOp.getReg()).isPointer()) { 1514 report("dst operand 0 must be a pointer type", MI); 1515 break; 1516 } 1517 1518 if (!AllocOp.isReg() || !MRI->getType(AllocOp.getReg()).isScalar()) { 1519 report("src operand 1 must be a scalar reg type", MI); 1520 break; 1521 } 1522 1523 if (!AlignOp.isImm()) { 1524 report("src operand 2 must be an immediate type", MI); 1525 break; 1526 } 1527 break; 1528 } 1529 case TargetOpcode::G_MEMCPY_INLINE: 1530 case TargetOpcode::G_MEMCPY: 1531 case TargetOpcode::G_MEMMOVE: { 1532 ArrayRef<MachineMemOperand *> MMOs = MI->memoperands(); 1533 if (MMOs.size() != 2) { 1534 report("memcpy/memmove must have 2 memory operands", MI); 1535 break; 1536 } 1537 1538 if ((!MMOs[0]->isStore() || MMOs[0]->isLoad()) || 1539 (MMOs[1]->isStore() || !MMOs[1]->isLoad())) { 1540 report("wrong memory operand types", MI); 1541 break; 1542 } 1543 1544 if (MMOs[0]->getSize() != MMOs[1]->getSize()) 1545 report("inconsistent memory operand sizes", MI); 1546 1547 LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg()); 1548 LLT SrcPtrTy = MRI->getType(MI->getOperand(1).getReg()); 1549 1550 if (!DstPtrTy.isPointer() || !SrcPtrTy.isPointer()) { 1551 report("memory instruction operand must be a pointer", MI); 1552 break; 1553 } 1554 1555 if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace()) 1556 report("inconsistent store address space", MI); 1557 if (SrcPtrTy.getAddressSpace() != MMOs[1]->getAddrSpace()) 1558 report("inconsistent load address space", MI); 1559 1560 if (Opc != TargetOpcode::G_MEMCPY_INLINE) 1561 if (!MI->getOperand(3).isImm() || (MI->getOperand(3).getImm() & ~1LL)) 1562 report("'tail' flag (operand 3) must be an immediate 0 or 1", MI); 1563 1564 break; 1565 } 1566 case TargetOpcode::G_BZERO: 1567 case TargetOpcode::G_MEMSET: { 1568 ArrayRef<MachineMemOperand *> MMOs = MI->memoperands(); 1569 std::string Name = Opc == TargetOpcode::G_MEMSET ? "memset" : "bzero"; 1570 if (MMOs.size() != 1) { 1571 report(Twine(Name, " must have 1 memory operand"), MI); 1572 break; 1573 } 1574 1575 if ((!MMOs[0]->isStore() || MMOs[0]->isLoad())) { 1576 report(Twine(Name, " memory operand must be a store"), MI); 1577 break; 1578 } 1579 1580 LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg()); 1581 if (!DstPtrTy.isPointer()) { 1582 report(Twine(Name, " operand must be a pointer"), MI); 1583 break; 1584 } 1585 1586 if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace()) 1587 report("inconsistent " + Twine(Name, " address space"), MI); 1588 1589 if (!MI->getOperand(MI->getNumOperands() - 1).isImm() || 1590 (MI->getOperand(MI->getNumOperands() - 1).getImm() & ~1LL)) 1591 report("'tail' flag (last operand) must be an immediate 0 or 1", MI); 1592 1593 break; 1594 } 1595 case TargetOpcode::G_VECREDUCE_SEQ_FADD: 1596 case TargetOpcode::G_VECREDUCE_SEQ_FMUL: { 1597 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1598 LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg()); 1599 LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg()); 1600 if (!DstTy.isScalar()) 1601 report("Vector reduction requires a scalar destination type", MI); 1602 if (!Src1Ty.isScalar()) 1603 report("Sequential FADD/FMUL vector reduction requires a scalar 1st operand", MI); 1604 if (!Src2Ty.isVector()) 1605 report("Sequential FADD/FMUL vector reduction must have a vector 2nd operand", MI); 1606 break; 1607 } 1608 case TargetOpcode::G_VECREDUCE_FADD: 1609 case TargetOpcode::G_VECREDUCE_FMUL: 1610 case TargetOpcode::G_VECREDUCE_FMAX: 1611 case TargetOpcode::G_VECREDUCE_FMIN: 1612 case TargetOpcode::G_VECREDUCE_ADD: 1613 case TargetOpcode::G_VECREDUCE_MUL: 1614 case TargetOpcode::G_VECREDUCE_AND: 1615 case TargetOpcode::G_VECREDUCE_OR: 1616 case TargetOpcode::G_VECREDUCE_XOR: 1617 case TargetOpcode::G_VECREDUCE_SMAX: 1618 case TargetOpcode::G_VECREDUCE_SMIN: 1619 case TargetOpcode::G_VECREDUCE_UMAX: 1620 case TargetOpcode::G_VECREDUCE_UMIN: { 1621 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1622 if (!DstTy.isScalar()) 1623 report("Vector reduction requires a scalar destination type", MI); 1624 break; 1625 } 1626 1627 case TargetOpcode::G_SBFX: 1628 case TargetOpcode::G_UBFX: { 1629 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1630 if (DstTy.isVector()) { 1631 report("Bitfield extraction is not supported on vectors", MI); 1632 break; 1633 } 1634 break; 1635 } 1636 case TargetOpcode::G_SHL: 1637 case TargetOpcode::G_LSHR: 1638 case TargetOpcode::G_ASHR: 1639 case TargetOpcode::G_ROTR: 1640 case TargetOpcode::G_ROTL: { 1641 LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg()); 1642 LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg()); 1643 if (Src1Ty.isVector() != Src2Ty.isVector()) { 1644 report("Shifts and rotates require operands to be either all scalars or " 1645 "all vectors", 1646 MI); 1647 break; 1648 } 1649 break; 1650 } 1651 case TargetOpcode::G_LLROUND: 1652 case TargetOpcode::G_LROUND: { 1653 verifyAllRegOpsScalar(*MI, *MRI); 1654 break; 1655 } 1656 case TargetOpcode::G_IS_FPCLASS: { 1657 LLT DestTy = MRI->getType(MI->getOperand(0).getReg()); 1658 LLT DestEltTy = DestTy.getScalarType(); 1659 if (!DestEltTy.isScalar()) { 1660 report("Destination must be a scalar or vector of scalars", MI); 1661 break; 1662 } 1663 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg()); 1664 LLT SrcEltTy = SrcTy.getScalarType(); 1665 if (!SrcEltTy.isScalar()) { 1666 report("Source must be a scalar or vector of scalars", MI); 1667 break; 1668 } 1669 if (!verifyVectorElementMatch(DestTy, SrcTy, MI)) 1670 break; 1671 const MachineOperand &TestMO = MI->getOperand(2); 1672 if (!TestMO.isImm()) { 1673 report("floating-point class set (operand 2) must be an immediate", MI); 1674 break; 1675 } 1676 int64_t Test = TestMO.getImm(); 1677 if (Test < 0 || Test > fcAllFlags) { 1678 report("Incorrect floating-point class set (operand 2)", MI); 1679 break; 1680 } 1681 const MachineOperand &SemanticsMO = MI->getOperand(3); 1682 if (!SemanticsMO.isImm()) { 1683 report("floating-point semantics (operand 3) must be an immediate", MI); 1684 break; 1685 } 1686 int64_t Semantics = SemanticsMO.getImm(); 1687 if (Semantics < 0 || Semantics > APFloat::S_MaxSemantics) { 1688 report("Incorrect floating-point semantics (operand 3)", MI); 1689 break; 1690 } 1691 break; 1692 } 1693 default: 1694 break; 1695 } 1696 } 1697 1698 void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) { 1699 const MCInstrDesc &MCID = MI->getDesc(); 1700 if (MI->getNumOperands() < MCID.getNumOperands()) { 1701 report("Too few operands", MI); 1702 errs() << MCID.getNumOperands() << " operands expected, but " 1703 << MI->getNumOperands() << " given.\n"; 1704 } 1705 1706 if (MI->isPHI()) { 1707 if (MF->getProperties().hasProperty( 1708 MachineFunctionProperties::Property::NoPHIs)) 1709 report("Found PHI instruction with NoPHIs property set", MI); 1710 1711 if (FirstNonPHI) 1712 report("Found PHI instruction after non-PHI", MI); 1713 } else if (FirstNonPHI == nullptr) 1714 FirstNonPHI = MI; 1715 1716 // Check the tied operands. 1717 if (MI->isInlineAsm()) 1718 verifyInlineAsm(MI); 1719 1720 // Check that unspillable terminators define a reg and have at most one use. 1721 if (TII->isUnspillableTerminator(MI)) { 1722 if (!MI->getOperand(0).isReg() || !MI->getOperand(0).isDef()) 1723 report("Unspillable Terminator does not define a reg", MI); 1724 Register Def = MI->getOperand(0).getReg(); 1725 if (Def.isVirtual() && 1726 !MF->getProperties().hasProperty( 1727 MachineFunctionProperties::Property::NoPHIs) && 1728 std::distance(MRI->use_nodbg_begin(Def), MRI->use_nodbg_end()) > 1) 1729 report("Unspillable Terminator expected to have at most one use!", MI); 1730 } 1731 1732 // A fully-formed DBG_VALUE must have a location. Ignore partially formed 1733 // DBG_VALUEs: these are convenient to use in tests, but should never get 1734 // generated. 1735 if (MI->isDebugValue() && MI->getNumOperands() == 4) 1736 if (!MI->getDebugLoc()) 1737 report("Missing DebugLoc for debug instruction", MI); 1738 1739 // Meta instructions should never be the subject of debug value tracking, 1740 // they don't create a value in the output program at all. 1741 if (MI->isMetaInstruction() && MI->peekDebugInstrNum()) 1742 report("Metadata instruction should not have a value tracking number", MI); 1743 1744 // Check the MachineMemOperands for basic consistency. 1745 for (MachineMemOperand *Op : MI->memoperands()) { 1746 if (Op->isLoad() && !MI->mayLoad()) 1747 report("Missing mayLoad flag", MI); 1748 if (Op->isStore() && !MI->mayStore()) 1749 report("Missing mayStore flag", MI); 1750 } 1751 1752 // Debug values must not have a slot index. 1753 // Other instructions must have one, unless they are inside a bundle. 1754 if (LiveInts) { 1755 bool mapped = !LiveInts->isNotInMIMap(*MI); 1756 if (MI->isDebugOrPseudoInstr()) { 1757 if (mapped) 1758 report("Debug instruction has a slot index", MI); 1759 } else if (MI->isInsideBundle()) { 1760 if (mapped) 1761 report("Instruction inside bundle has a slot index", MI); 1762 } else { 1763 if (!mapped) 1764 report("Missing slot index", MI); 1765 } 1766 } 1767 1768 unsigned Opc = MCID.getOpcode(); 1769 if (isPreISelGenericOpcode(Opc) || isPreISelGenericOptimizationHint(Opc)) { 1770 verifyPreISelGenericInstruction(MI); 1771 return; 1772 } 1773 1774 StringRef ErrorInfo; 1775 if (!TII->verifyInstruction(*MI, ErrorInfo)) 1776 report(ErrorInfo.data(), MI); 1777 1778 // Verify properties of various specific instruction types 1779 switch (MI->getOpcode()) { 1780 case TargetOpcode::COPY: { 1781 const MachineOperand &DstOp = MI->getOperand(0); 1782 const MachineOperand &SrcOp = MI->getOperand(1); 1783 const Register SrcReg = SrcOp.getReg(); 1784 const Register DstReg = DstOp.getReg(); 1785 1786 LLT DstTy = MRI->getType(DstReg); 1787 LLT SrcTy = MRI->getType(SrcReg); 1788 if (SrcTy.isValid() && DstTy.isValid()) { 1789 // If both types are valid, check that the types are the same. 1790 if (SrcTy != DstTy) { 1791 report("Copy Instruction is illegal with mismatching types", MI); 1792 errs() << "Def = " << DstTy << ", Src = " << SrcTy << "\n"; 1793 } 1794 1795 break; 1796 } 1797 1798 if (!SrcTy.isValid() && !DstTy.isValid()) 1799 break; 1800 1801 // If we have only one valid type, this is likely a copy between a virtual 1802 // and physical register. 1803 unsigned SrcSize = 0; 1804 unsigned DstSize = 0; 1805 if (SrcReg.isPhysical() && DstTy.isValid()) { 1806 const TargetRegisterClass *SrcRC = 1807 TRI->getMinimalPhysRegClassLLT(SrcReg, DstTy); 1808 if (SrcRC) 1809 SrcSize = TRI->getRegSizeInBits(*SrcRC); 1810 } 1811 1812 if (SrcSize == 0) 1813 SrcSize = TRI->getRegSizeInBits(SrcReg, *MRI); 1814 1815 if (DstReg.isPhysical() && SrcTy.isValid()) { 1816 const TargetRegisterClass *DstRC = 1817 TRI->getMinimalPhysRegClassLLT(DstReg, SrcTy); 1818 if (DstRC) 1819 DstSize = TRI->getRegSizeInBits(*DstRC); 1820 } 1821 1822 if (DstSize == 0) 1823 DstSize = TRI->getRegSizeInBits(DstReg, *MRI); 1824 1825 if (SrcSize != 0 && DstSize != 0 && SrcSize != DstSize) { 1826 if (!DstOp.getSubReg() && !SrcOp.getSubReg()) { 1827 report("Copy Instruction is illegal with mismatching sizes", MI); 1828 errs() << "Def Size = " << DstSize << ", Src Size = " << SrcSize 1829 << "\n"; 1830 } 1831 } 1832 break; 1833 } 1834 case TargetOpcode::STATEPOINT: { 1835 StatepointOpers SO(MI); 1836 if (!MI->getOperand(SO.getIDPos()).isImm() || 1837 !MI->getOperand(SO.getNBytesPos()).isImm() || 1838 !MI->getOperand(SO.getNCallArgsPos()).isImm()) { 1839 report("meta operands to STATEPOINT not constant!", MI); 1840 break; 1841 } 1842 1843 auto VerifyStackMapConstant = [&](unsigned Offset) { 1844 if (Offset >= MI->getNumOperands()) { 1845 report("stack map constant to STATEPOINT is out of range!", MI); 1846 return; 1847 } 1848 if (!MI->getOperand(Offset - 1).isImm() || 1849 MI->getOperand(Offset - 1).getImm() != StackMaps::ConstantOp || 1850 !MI->getOperand(Offset).isImm()) 1851 report("stack map constant to STATEPOINT not well formed!", MI); 1852 }; 1853 VerifyStackMapConstant(SO.getCCIdx()); 1854 VerifyStackMapConstant(SO.getFlagsIdx()); 1855 VerifyStackMapConstant(SO.getNumDeoptArgsIdx()); 1856 VerifyStackMapConstant(SO.getNumGCPtrIdx()); 1857 VerifyStackMapConstant(SO.getNumAllocaIdx()); 1858 VerifyStackMapConstant(SO.getNumGcMapEntriesIdx()); 1859 1860 // Verify that all explicit statepoint defs are tied to gc operands as 1861 // they are expected to be a relocation of gc operands. 1862 unsigned FirstGCPtrIdx = SO.getFirstGCPtrIdx(); 1863 unsigned LastGCPtrIdx = SO.getNumAllocaIdx() - 2; 1864 for (unsigned Idx = 0; Idx < MI->getNumDefs(); Idx++) { 1865 unsigned UseOpIdx; 1866 if (!MI->isRegTiedToUseOperand(Idx, &UseOpIdx)) { 1867 report("STATEPOINT defs expected to be tied", MI); 1868 break; 1869 } 1870 if (UseOpIdx < FirstGCPtrIdx || UseOpIdx > LastGCPtrIdx) { 1871 report("STATEPOINT def tied to non-gc operand", MI); 1872 break; 1873 } 1874 } 1875 1876 // TODO: verify we have properly encoded deopt arguments 1877 } break; 1878 case TargetOpcode::INSERT_SUBREG: { 1879 unsigned InsertedSize; 1880 if (unsigned SubIdx = MI->getOperand(2).getSubReg()) 1881 InsertedSize = TRI->getSubRegIdxSize(SubIdx); 1882 else 1883 InsertedSize = TRI->getRegSizeInBits(MI->getOperand(2).getReg(), *MRI); 1884 unsigned SubRegSize = TRI->getSubRegIdxSize(MI->getOperand(3).getImm()); 1885 if (SubRegSize < InsertedSize) { 1886 report("INSERT_SUBREG expected inserted value to have equal or lesser " 1887 "size than the subreg it was inserted into", MI); 1888 break; 1889 } 1890 } break; 1891 } 1892 } 1893 1894 void 1895 MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) { 1896 const MachineInstr *MI = MO->getParent(); 1897 const MCInstrDesc &MCID = MI->getDesc(); 1898 unsigned NumDefs = MCID.getNumDefs(); 1899 if (MCID.getOpcode() == TargetOpcode::PATCHPOINT) 1900 NumDefs = (MONum == 0 && MO->isReg()) ? NumDefs : 0; 1901 1902 // The first MCID.NumDefs operands must be explicit register defines 1903 if (MONum < NumDefs) { 1904 const MCOperandInfo &MCOI = MCID.OpInfo[MONum]; 1905 if (!MO->isReg()) 1906 report("Explicit definition must be a register", MO, MONum); 1907 else if (!MO->isDef() && !MCOI.isOptionalDef()) 1908 report("Explicit definition marked as use", MO, MONum); 1909 else if (MO->isImplicit()) 1910 report("Explicit definition marked as implicit", MO, MONum); 1911 } else if (MONum < MCID.getNumOperands()) { 1912 const MCOperandInfo &MCOI = MCID.OpInfo[MONum]; 1913 // Don't check if it's the last operand in a variadic instruction. See, 1914 // e.g., LDM_RET in the arm back end. Check non-variadic operands only. 1915 bool IsOptional = MI->isVariadic() && MONum == MCID.getNumOperands() - 1; 1916 if (!IsOptional) { 1917 if (MO->isReg()) { 1918 if (MO->isDef() && !MCOI.isOptionalDef() && !MCID.variadicOpsAreDefs()) 1919 report("Explicit operand marked as def", MO, MONum); 1920 if (MO->isImplicit()) 1921 report("Explicit operand marked as implicit", MO, MONum); 1922 } 1923 1924 // Check that an instruction has register operands only as expected. 1925 if (MCOI.OperandType == MCOI::OPERAND_REGISTER && 1926 !MO->isReg() && !MO->isFI()) 1927 report("Expected a register operand.", MO, MONum); 1928 if (MO->isReg()) { 1929 if (MCOI.OperandType == MCOI::OPERAND_IMMEDIATE || 1930 (MCOI.OperandType == MCOI::OPERAND_PCREL && 1931 !TII->isPCRelRegisterOperandLegal(*MO))) 1932 report("Expected a non-register operand.", MO, MONum); 1933 } 1934 } 1935 1936 int TiedTo = MCID.getOperandConstraint(MONum, MCOI::TIED_TO); 1937 if (TiedTo != -1) { 1938 if (!MO->isReg()) 1939 report("Tied use must be a register", MO, MONum); 1940 else if (!MO->isTied()) 1941 report("Operand should be tied", MO, MONum); 1942 else if (unsigned(TiedTo) != MI->findTiedOperandIdx(MONum)) 1943 report("Tied def doesn't match MCInstrDesc", MO, MONum); 1944 else if (Register::isPhysicalRegister(MO->getReg())) { 1945 const MachineOperand &MOTied = MI->getOperand(TiedTo); 1946 if (!MOTied.isReg()) 1947 report("Tied counterpart must be a register", &MOTied, TiedTo); 1948 else if (Register::isPhysicalRegister(MOTied.getReg()) && 1949 MO->getReg() != MOTied.getReg()) 1950 report("Tied physical registers must match.", &MOTied, TiedTo); 1951 } 1952 } else if (MO->isReg() && MO->isTied()) 1953 report("Explicit operand should not be tied", MO, MONum); 1954 } else { 1955 // ARM adds %reg0 operands to indicate predicates. We'll allow that. 1956 if (MO->isReg() && !MO->isImplicit() && !MI->isVariadic() && MO->getReg()) 1957 report("Extra explicit operand on non-variadic instruction", MO, MONum); 1958 } 1959 1960 switch (MO->getType()) { 1961 case MachineOperand::MO_Register: { 1962 // Verify debug flag on debug instructions. Check this first because reg0 1963 // indicates an undefined debug value. 1964 if (MI->isDebugInstr() && MO->isUse()) { 1965 if (!MO->isDebug()) 1966 report("Register operand must be marked debug", MO, MONum); 1967 } else if (MO->isDebug()) { 1968 report("Register operand must not be marked debug", MO, MONum); 1969 } 1970 1971 const Register Reg = MO->getReg(); 1972 if (!Reg) 1973 return; 1974 if (MRI->tracksLiveness() && !MI->isDebugInstr()) 1975 checkLiveness(MO, MONum); 1976 1977 if (MO->isDef() && MO->isUndef() && !MO->getSubReg() && 1978 MO->getReg().isVirtual()) // TODO: Apply to physregs too 1979 report("Undef virtual register def operands require a subregister", MO, MONum); 1980 1981 // Verify the consistency of tied operands. 1982 if (MO->isTied()) { 1983 unsigned OtherIdx = MI->findTiedOperandIdx(MONum); 1984 const MachineOperand &OtherMO = MI->getOperand(OtherIdx); 1985 if (!OtherMO.isReg()) 1986 report("Must be tied to a register", MO, MONum); 1987 if (!OtherMO.isTied()) 1988 report("Missing tie flags on tied operand", MO, MONum); 1989 if (MI->findTiedOperandIdx(OtherIdx) != MONum) 1990 report("Inconsistent tie links", MO, MONum); 1991 if (MONum < MCID.getNumDefs()) { 1992 if (OtherIdx < MCID.getNumOperands()) { 1993 if (-1 == MCID.getOperandConstraint(OtherIdx, MCOI::TIED_TO)) 1994 report("Explicit def tied to explicit use without tie constraint", 1995 MO, MONum); 1996 } else { 1997 if (!OtherMO.isImplicit()) 1998 report("Explicit def should be tied to implicit use", MO, MONum); 1999 } 2000 } 2001 } 2002 2003 // Verify two-address constraints after the twoaddressinstruction pass. 2004 // Both twoaddressinstruction pass and phi-node-elimination pass call 2005 // MRI->leaveSSA() to set MF as NoSSA, we should do the verification after 2006 // twoaddressinstruction pass not after phi-node-elimination pass. So we 2007 // shouldn't use the NoSSA as the condition, we should based on 2008 // TiedOpsRewritten property to verify two-address constraints, this 2009 // property will be set in twoaddressinstruction pass. 2010 unsigned DefIdx; 2011 if (MF->getProperties().hasProperty( 2012 MachineFunctionProperties::Property::TiedOpsRewritten) && 2013 MO->isUse() && MI->isRegTiedToDefOperand(MONum, &DefIdx) && 2014 Reg != MI->getOperand(DefIdx).getReg()) 2015 report("Two-address instruction operands must be identical", MO, MONum); 2016 2017 // Check register classes. 2018 unsigned SubIdx = MO->getSubReg(); 2019 2020 if (Register::isPhysicalRegister(Reg)) { 2021 if (SubIdx) { 2022 report("Illegal subregister index for physical register", MO, MONum); 2023 return; 2024 } 2025 if (MONum < MCID.getNumOperands()) { 2026 if (const TargetRegisterClass *DRC = 2027 TII->getRegClass(MCID, MONum, TRI, *MF)) { 2028 if (!DRC->contains(Reg)) { 2029 report("Illegal physical register for instruction", MO, MONum); 2030 errs() << printReg(Reg, TRI) << " is not a " 2031 << TRI->getRegClassName(DRC) << " register.\n"; 2032 } 2033 } 2034 } 2035 if (MO->isRenamable()) { 2036 if (MRI->isReserved(Reg)) { 2037 report("isRenamable set on reserved register", MO, MONum); 2038 return; 2039 } 2040 } 2041 } else { 2042 // Virtual register. 2043 const TargetRegisterClass *RC = MRI->getRegClassOrNull(Reg); 2044 if (!RC) { 2045 // This is a generic virtual register. 2046 2047 // Do not allow undef uses for generic virtual registers. This ensures 2048 // getVRegDef can never fail and return null on a generic register. 2049 // 2050 // FIXME: This restriction should probably be broadened to all SSA 2051 // MIR. However, DetectDeadLanes/ProcessImplicitDefs technically still 2052 // run on the SSA function just before phi elimination. 2053 if (MO->isUndef()) 2054 report("Generic virtual register use cannot be undef", MO, MONum); 2055 2056 // Debug value instruction is permitted to use undefined vregs. 2057 // This is a performance measure to skip the overhead of immediately 2058 // pruning unused debug operands. The final undef substitution occurs 2059 // when debug values are allocated in LDVImpl::handleDebugValue, so 2060 // these verifications always apply after this pass. 2061 if (isFunctionTracksDebugUserValues || !MO->isUse() || 2062 !MI->isDebugValue() || !MRI->def_empty(Reg)) { 2063 // If we're post-Select, we can't have gvregs anymore. 2064 if (isFunctionSelected) { 2065 report("Generic virtual register invalid in a Selected function", 2066 MO, MONum); 2067 return; 2068 } 2069 2070 // The gvreg must have a type and it must not have a SubIdx. 2071 LLT Ty = MRI->getType(Reg); 2072 if (!Ty.isValid()) { 2073 report("Generic virtual register must have a valid type", MO, 2074 MONum); 2075 return; 2076 } 2077 2078 const RegisterBank *RegBank = MRI->getRegBankOrNull(Reg); 2079 2080 // If we're post-RegBankSelect, the gvreg must have a bank. 2081 if (!RegBank && isFunctionRegBankSelected) { 2082 report("Generic virtual register must have a bank in a " 2083 "RegBankSelected function", 2084 MO, MONum); 2085 return; 2086 } 2087 2088 // Make sure the register fits into its register bank if any. 2089 if (RegBank && Ty.isValid() && 2090 RegBank->getSize() < Ty.getSizeInBits()) { 2091 report("Register bank is too small for virtual register", MO, 2092 MONum); 2093 errs() << "Register bank " << RegBank->getName() << " too small(" 2094 << RegBank->getSize() << ") to fit " << Ty.getSizeInBits() 2095 << "-bits\n"; 2096 return; 2097 } 2098 } 2099 2100 if (SubIdx) { 2101 report("Generic virtual register does not allow subregister index", MO, 2102 MONum); 2103 return; 2104 } 2105 2106 // If this is a target specific instruction and this operand 2107 // has register class constraint, the virtual register must 2108 // comply to it. 2109 if (!isPreISelGenericOpcode(MCID.getOpcode()) && 2110 MONum < MCID.getNumOperands() && 2111 TII->getRegClass(MCID, MONum, TRI, *MF)) { 2112 report("Virtual register does not match instruction constraint", MO, 2113 MONum); 2114 errs() << "Expect register class " 2115 << TRI->getRegClassName( 2116 TII->getRegClass(MCID, MONum, TRI, *MF)) 2117 << " but got nothing\n"; 2118 return; 2119 } 2120 2121 break; 2122 } 2123 if (SubIdx) { 2124 const TargetRegisterClass *SRC = 2125 TRI->getSubClassWithSubReg(RC, SubIdx); 2126 if (!SRC) { 2127 report("Invalid subregister index for virtual register", MO, MONum); 2128 errs() << "Register class " << TRI->getRegClassName(RC) 2129 << " does not support subreg index " << SubIdx << "\n"; 2130 return; 2131 } 2132 if (RC != SRC) { 2133 report("Invalid register class for subregister index", MO, MONum); 2134 errs() << "Register class " << TRI->getRegClassName(RC) 2135 << " does not fully support subreg index " << SubIdx << "\n"; 2136 return; 2137 } 2138 } 2139 if (MONum < MCID.getNumOperands()) { 2140 if (const TargetRegisterClass *DRC = 2141 TII->getRegClass(MCID, MONum, TRI, *MF)) { 2142 if (SubIdx) { 2143 const TargetRegisterClass *SuperRC = 2144 TRI->getLargestLegalSuperClass(RC, *MF); 2145 if (!SuperRC) { 2146 report("No largest legal super class exists.", MO, MONum); 2147 return; 2148 } 2149 DRC = TRI->getMatchingSuperRegClass(SuperRC, DRC, SubIdx); 2150 if (!DRC) { 2151 report("No matching super-reg register class.", MO, MONum); 2152 return; 2153 } 2154 } 2155 if (!RC->hasSuperClassEq(DRC)) { 2156 report("Illegal virtual register for instruction", MO, MONum); 2157 errs() << "Expected a " << TRI->getRegClassName(DRC) 2158 << " register, but got a " << TRI->getRegClassName(RC) 2159 << " register\n"; 2160 } 2161 } 2162 } 2163 } 2164 break; 2165 } 2166 2167 case MachineOperand::MO_RegisterMask: 2168 regMasks.push_back(MO->getRegMask()); 2169 break; 2170 2171 case MachineOperand::MO_MachineBasicBlock: 2172 if (MI->isPHI() && !MO->getMBB()->isSuccessor(MI->getParent())) 2173 report("PHI operand is not in the CFG", MO, MONum); 2174 break; 2175 2176 case MachineOperand::MO_FrameIndex: 2177 if (LiveStks && LiveStks->hasInterval(MO->getIndex()) && 2178 LiveInts && !LiveInts->isNotInMIMap(*MI)) { 2179 int FI = MO->getIndex(); 2180 LiveInterval &LI = LiveStks->getInterval(FI); 2181 SlotIndex Idx = LiveInts->getInstructionIndex(*MI); 2182 2183 bool stores = MI->mayStore(); 2184 bool loads = MI->mayLoad(); 2185 // For a memory-to-memory move, we need to check if the frame 2186 // index is used for storing or loading, by inspecting the 2187 // memory operands. 2188 if (stores && loads) { 2189 for (auto *MMO : MI->memoperands()) { 2190 const PseudoSourceValue *PSV = MMO->getPseudoValue(); 2191 if (PSV == nullptr) continue; 2192 const FixedStackPseudoSourceValue *Value = 2193 dyn_cast<FixedStackPseudoSourceValue>(PSV); 2194 if (Value == nullptr) continue; 2195 if (Value->getFrameIndex() != FI) continue; 2196 2197 if (MMO->isStore()) 2198 loads = false; 2199 else 2200 stores = false; 2201 break; 2202 } 2203 if (loads == stores) 2204 report("Missing fixed stack memoperand.", MI); 2205 } 2206 if (loads && !LI.liveAt(Idx.getRegSlot(true))) { 2207 report("Instruction loads from dead spill slot", MO, MONum); 2208 errs() << "Live stack: " << LI << '\n'; 2209 } 2210 if (stores && !LI.liveAt(Idx.getRegSlot())) { 2211 report("Instruction stores to dead spill slot", MO, MONum); 2212 errs() << "Live stack: " << LI << '\n'; 2213 } 2214 } 2215 break; 2216 2217 case MachineOperand::MO_CFIIndex: 2218 if (MO->getCFIIndex() >= MF->getFrameInstructions().size()) 2219 report("CFI instruction has invalid index", MO, MONum); 2220 break; 2221 2222 default: 2223 break; 2224 } 2225 } 2226 2227 void MachineVerifier::checkLivenessAtUse(const MachineOperand *MO, 2228 unsigned MONum, SlotIndex UseIdx, 2229 const LiveRange &LR, 2230 Register VRegOrUnit, 2231 LaneBitmask LaneMask) { 2232 LiveQueryResult LRQ = LR.Query(UseIdx); 2233 // Check if we have a segment at the use, note however that we only need one 2234 // live subregister range, the others may be dead. 2235 if (!LRQ.valueIn() && LaneMask.none()) { 2236 report("No live segment at use", MO, MONum); 2237 report_context_liverange(LR); 2238 report_context_vreg_regunit(VRegOrUnit); 2239 report_context(UseIdx); 2240 } 2241 if (MO->isKill() && !LRQ.isKill()) { 2242 report("Live range continues after kill flag", MO, MONum); 2243 report_context_liverange(LR); 2244 report_context_vreg_regunit(VRegOrUnit); 2245 if (LaneMask.any()) 2246 report_context_lanemask(LaneMask); 2247 report_context(UseIdx); 2248 } 2249 } 2250 2251 void MachineVerifier::checkLivenessAtDef(const MachineOperand *MO, 2252 unsigned MONum, SlotIndex DefIdx, 2253 const LiveRange &LR, 2254 Register VRegOrUnit, 2255 bool SubRangeCheck, 2256 LaneBitmask LaneMask) { 2257 if (const VNInfo *VNI = LR.getVNInfoAt(DefIdx)) { 2258 assert(VNI && "NULL valno is not allowed"); 2259 if (VNI->def != DefIdx) { 2260 report("Inconsistent valno->def", MO, MONum); 2261 report_context_liverange(LR); 2262 report_context_vreg_regunit(VRegOrUnit); 2263 if (LaneMask.any()) 2264 report_context_lanemask(LaneMask); 2265 report_context(*VNI); 2266 report_context(DefIdx); 2267 } 2268 } else { 2269 report("No live segment at def", MO, MONum); 2270 report_context_liverange(LR); 2271 report_context_vreg_regunit(VRegOrUnit); 2272 if (LaneMask.any()) 2273 report_context_lanemask(LaneMask); 2274 report_context(DefIdx); 2275 } 2276 // Check that, if the dead def flag is present, LiveInts agree. 2277 if (MO->isDead()) { 2278 LiveQueryResult LRQ = LR.Query(DefIdx); 2279 if (!LRQ.isDeadDef()) { 2280 assert(Register::isVirtualRegister(VRegOrUnit) && 2281 "Expecting a virtual register."); 2282 // A dead subreg def only tells us that the specific subreg is dead. There 2283 // could be other non-dead defs of other subregs, or we could have other 2284 // parts of the register being live through the instruction. So unless we 2285 // are checking liveness for a subrange it is ok for the live range to 2286 // continue, given that we have a dead def of a subregister. 2287 if (SubRangeCheck || MO->getSubReg() == 0) { 2288 report("Live range continues after dead def flag", MO, MONum); 2289 report_context_liverange(LR); 2290 report_context_vreg_regunit(VRegOrUnit); 2291 if (LaneMask.any()) 2292 report_context_lanemask(LaneMask); 2293 } 2294 } 2295 } 2296 } 2297 2298 void MachineVerifier::checkLiveness(const MachineOperand *MO, unsigned MONum) { 2299 const MachineInstr *MI = MO->getParent(); 2300 const Register Reg = MO->getReg(); 2301 const unsigned SubRegIdx = MO->getSubReg(); 2302 2303 const LiveInterval *LI = nullptr; 2304 if (LiveInts && Reg.isVirtual()) { 2305 if (LiveInts->hasInterval(Reg)) { 2306 LI = &LiveInts->getInterval(Reg); 2307 if (SubRegIdx != 0 && (MO->isDef() || !MO->isUndef()) && !LI->empty() && 2308 !LI->hasSubRanges() && MRI->shouldTrackSubRegLiveness(Reg)) 2309 report("Live interval for subreg operand has no subranges", MO, MONum); 2310 } else { 2311 report("Virtual register has no live interval", MO, MONum); 2312 } 2313 } 2314 2315 // Both use and def operands can read a register. 2316 if (MO->readsReg()) { 2317 if (MO->isKill()) 2318 addRegWithSubRegs(regsKilled, Reg); 2319 2320 // Check that LiveVars knows this kill (unless we are inside a bundle, in 2321 // which case we have already checked that LiveVars knows any kills on the 2322 // bundle header instead). 2323 if (LiveVars && Reg.isVirtual() && MO->isKill() && 2324 !MI->isBundledWithPred()) { 2325 LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg); 2326 if (!is_contained(VI.Kills, MI)) 2327 report("Kill missing from LiveVariables", MO, MONum); 2328 } 2329 2330 // Check LiveInts liveness and kill. 2331 if (LiveInts && !LiveInts->isNotInMIMap(*MI)) { 2332 SlotIndex UseIdx = LiveInts->getInstructionIndex(*MI); 2333 // Check the cached regunit intervals. 2334 if (Reg.isPhysical() && !isReserved(Reg)) { 2335 for (MCRegUnitIterator Units(Reg.asMCReg(), TRI); Units.isValid(); 2336 ++Units) { 2337 if (MRI->isReservedRegUnit(*Units)) 2338 continue; 2339 if (const LiveRange *LR = LiveInts->getCachedRegUnit(*Units)) 2340 checkLivenessAtUse(MO, MONum, UseIdx, *LR, *Units); 2341 } 2342 } 2343 2344 if (Reg.isVirtual()) { 2345 // This is a virtual register interval. 2346 checkLivenessAtUse(MO, MONum, UseIdx, *LI, Reg); 2347 2348 if (LI->hasSubRanges() && !MO->isDef()) { 2349 LaneBitmask MOMask = SubRegIdx != 0 2350 ? TRI->getSubRegIndexLaneMask(SubRegIdx) 2351 : MRI->getMaxLaneMaskForVReg(Reg); 2352 LaneBitmask LiveInMask; 2353 for (const LiveInterval::SubRange &SR : LI->subranges()) { 2354 if ((MOMask & SR.LaneMask).none()) 2355 continue; 2356 checkLivenessAtUse(MO, MONum, UseIdx, SR, Reg, SR.LaneMask); 2357 LiveQueryResult LRQ = SR.Query(UseIdx); 2358 if (LRQ.valueIn()) 2359 LiveInMask |= SR.LaneMask; 2360 } 2361 // At least parts of the register has to be live at the use. 2362 if ((LiveInMask & MOMask).none()) { 2363 report("No live subrange at use", MO, MONum); 2364 report_context(*LI); 2365 report_context(UseIdx); 2366 } 2367 } 2368 } 2369 } 2370 2371 // Use of a dead register. 2372 if (!regsLive.count(Reg)) { 2373 if (Reg.isPhysical()) { 2374 // Reserved registers may be used even when 'dead'. 2375 bool Bad = !isReserved(Reg); 2376 // We are fine if just any subregister has a defined value. 2377 if (Bad) { 2378 2379 for (const MCPhysReg &SubReg : TRI->subregs(Reg)) { 2380 if (regsLive.count(SubReg)) { 2381 Bad = false; 2382 break; 2383 } 2384 } 2385 } 2386 // If there is an additional implicit-use of a super register we stop 2387 // here. By definition we are fine if the super register is not 2388 // (completely) dead, if the complete super register is dead we will 2389 // get a report for its operand. 2390 if (Bad) { 2391 for (const MachineOperand &MOP : MI->uses()) { 2392 if (!MOP.isReg() || !MOP.isImplicit()) 2393 continue; 2394 2395 if (!MOP.getReg().isPhysical()) 2396 continue; 2397 2398 if (llvm::is_contained(TRI->subregs(MOP.getReg()), Reg)) 2399 Bad = false; 2400 } 2401 } 2402 if (Bad) 2403 report("Using an undefined physical register", MO, MONum); 2404 } else if (MRI->def_empty(Reg)) { 2405 report("Reading virtual register without a def", MO, MONum); 2406 } else { 2407 BBInfo &MInfo = MBBInfoMap[MI->getParent()]; 2408 // We don't know which virtual registers are live in, so only complain 2409 // if vreg was killed in this MBB. Otherwise keep track of vregs that 2410 // must be live in. PHI instructions are handled separately. 2411 if (MInfo.regsKilled.count(Reg)) 2412 report("Using a killed virtual register", MO, MONum); 2413 else if (!MI->isPHI()) 2414 MInfo.vregsLiveIn.insert(std::make_pair(Reg, MI)); 2415 } 2416 } 2417 } 2418 2419 if (MO->isDef()) { 2420 // Register defined. 2421 // TODO: verify that earlyclobber ops are not used. 2422 if (MO->isDead()) 2423 addRegWithSubRegs(regsDead, Reg); 2424 else 2425 addRegWithSubRegs(regsDefined, Reg); 2426 2427 // Verify SSA form. 2428 if (MRI->isSSA() && Reg.isVirtual() && 2429 std::next(MRI->def_begin(Reg)) != MRI->def_end()) 2430 report("Multiple virtual register defs in SSA form", MO, MONum); 2431 2432 // Check LiveInts for a live segment, but only for virtual registers. 2433 if (LiveInts && !LiveInts->isNotInMIMap(*MI)) { 2434 SlotIndex DefIdx = LiveInts->getInstructionIndex(*MI); 2435 DefIdx = DefIdx.getRegSlot(MO->isEarlyClobber()); 2436 2437 if (Reg.isVirtual()) { 2438 checkLivenessAtDef(MO, MONum, DefIdx, *LI, Reg); 2439 2440 if (LI->hasSubRanges()) { 2441 LaneBitmask MOMask = SubRegIdx != 0 2442 ? TRI->getSubRegIndexLaneMask(SubRegIdx) 2443 : MRI->getMaxLaneMaskForVReg(Reg); 2444 for (const LiveInterval::SubRange &SR : LI->subranges()) { 2445 if ((SR.LaneMask & MOMask).none()) 2446 continue; 2447 checkLivenessAtDef(MO, MONum, DefIdx, SR, Reg, true, SR.LaneMask); 2448 } 2449 } 2450 } 2451 } 2452 } 2453 } 2454 2455 // This function gets called after visiting all instructions in a bundle. The 2456 // argument points to the bundle header. 2457 // Normal stand-alone instructions are also considered 'bundles', and this 2458 // function is called for all of them. 2459 void MachineVerifier::visitMachineBundleAfter(const MachineInstr *MI) { 2460 BBInfo &MInfo = MBBInfoMap[MI->getParent()]; 2461 set_union(MInfo.regsKilled, regsKilled); 2462 set_subtract(regsLive, regsKilled); regsKilled.clear(); 2463 // Kill any masked registers. 2464 while (!regMasks.empty()) { 2465 const uint32_t *Mask = regMasks.pop_back_val(); 2466 for (Register Reg : regsLive) 2467 if (Reg.isPhysical() && 2468 MachineOperand::clobbersPhysReg(Mask, Reg.asMCReg())) 2469 regsDead.push_back(Reg); 2470 } 2471 set_subtract(regsLive, regsDead); regsDead.clear(); 2472 set_union(regsLive, regsDefined); regsDefined.clear(); 2473 } 2474 2475 void 2476 MachineVerifier::visitMachineBasicBlockAfter(const MachineBasicBlock *MBB) { 2477 MBBInfoMap[MBB].regsLiveOut = regsLive; 2478 regsLive.clear(); 2479 2480 if (Indexes) { 2481 SlotIndex stop = Indexes->getMBBEndIdx(MBB); 2482 if (!(stop > lastIndex)) { 2483 report("Block ends before last instruction index", MBB); 2484 errs() << "Block ends at " << stop 2485 << " last instruction was at " << lastIndex << '\n'; 2486 } 2487 lastIndex = stop; 2488 } 2489 } 2490 2491 namespace { 2492 // This implements a set of registers that serves as a filter: can filter other 2493 // sets by passing through elements not in the filter and blocking those that 2494 // are. Any filter implicitly includes the full set of physical registers upon 2495 // creation, thus filtering them all out. The filter itself as a set only grows, 2496 // and needs to be as efficient as possible. 2497 struct VRegFilter { 2498 // Add elements to the filter itself. \pre Input set \p FromRegSet must have 2499 // no duplicates. Both virtual and physical registers are fine. 2500 template <typename RegSetT> void add(const RegSetT &FromRegSet) { 2501 SmallVector<Register, 0> VRegsBuffer; 2502 filterAndAdd(FromRegSet, VRegsBuffer); 2503 } 2504 // Filter \p FromRegSet through the filter and append passed elements into \p 2505 // ToVRegs. All elements appended are then added to the filter itself. 2506 // \returns true if anything changed. 2507 template <typename RegSetT> 2508 bool filterAndAdd(const RegSetT &FromRegSet, 2509 SmallVectorImpl<Register> &ToVRegs) { 2510 unsigned SparseUniverse = Sparse.size(); 2511 unsigned NewSparseUniverse = SparseUniverse; 2512 unsigned NewDenseSize = Dense.size(); 2513 size_t Begin = ToVRegs.size(); 2514 for (Register Reg : FromRegSet) { 2515 if (!Reg.isVirtual()) 2516 continue; 2517 unsigned Index = Register::virtReg2Index(Reg); 2518 if (Index < SparseUniverseMax) { 2519 if (Index < SparseUniverse && Sparse.test(Index)) 2520 continue; 2521 NewSparseUniverse = std::max(NewSparseUniverse, Index + 1); 2522 } else { 2523 if (Dense.count(Reg)) 2524 continue; 2525 ++NewDenseSize; 2526 } 2527 ToVRegs.push_back(Reg); 2528 } 2529 size_t End = ToVRegs.size(); 2530 if (Begin == End) 2531 return false; 2532 // Reserving space in sets once performs better than doing so continuously 2533 // and pays easily for double look-ups (even in Dense with SparseUniverseMax 2534 // tuned all the way down) and double iteration (the second one is over a 2535 // SmallVector, which is a lot cheaper compared to DenseSet or BitVector). 2536 Sparse.resize(NewSparseUniverse); 2537 Dense.reserve(NewDenseSize); 2538 for (unsigned I = Begin; I < End; ++I) { 2539 Register Reg = ToVRegs[I]; 2540 unsigned Index = Register::virtReg2Index(Reg); 2541 if (Index < SparseUniverseMax) 2542 Sparse.set(Index); 2543 else 2544 Dense.insert(Reg); 2545 } 2546 return true; 2547 } 2548 2549 private: 2550 static constexpr unsigned SparseUniverseMax = 10 * 1024 * 8; 2551 // VRegs indexed within SparseUniverseMax are tracked by Sparse, those beyound 2552 // are tracked by Dense. The only purpose of the threashold and the Dense set 2553 // is to have a reasonably growing memory usage in pathological cases (large 2554 // number of very sparse VRegFilter instances live at the same time). In 2555 // practice even in the worst-by-execution time cases having all elements 2556 // tracked by Sparse (very large SparseUniverseMax scenario) tends to be more 2557 // space efficient than if tracked by Dense. The threashold is set to keep the 2558 // worst-case memory usage within 2x of figures determined empirically for 2559 // "all Dense" scenario in such worst-by-execution-time cases. 2560 BitVector Sparse; 2561 DenseSet<unsigned> Dense; 2562 }; 2563 2564 // Implements both a transfer function and a (binary, in-place) join operator 2565 // for a dataflow over register sets with set union join and filtering transfer 2566 // (out_b = in_b \ filter_b). filter_b is expected to be set-up ahead of time. 2567 // Maintains out_b as its state, allowing for O(n) iteration over it at any 2568 // time, where n is the size of the set (as opposed to O(U) where U is the 2569 // universe). filter_b implicitly contains all physical registers at all times. 2570 class FilteringVRegSet { 2571 VRegFilter Filter; 2572 SmallVector<Register, 0> VRegs; 2573 2574 public: 2575 // Set-up the filter_b. \pre Input register set \p RS must have no duplicates. 2576 // Both virtual and physical registers are fine. 2577 template <typename RegSetT> void addToFilter(const RegSetT &RS) { 2578 Filter.add(RS); 2579 } 2580 // Passes \p RS through the filter_b (transfer function) and adds what's left 2581 // to itself (out_b). 2582 template <typename RegSetT> bool add(const RegSetT &RS) { 2583 // Double-duty the Filter: to maintain VRegs a set (and the join operation 2584 // a set union) just add everything being added here to the Filter as well. 2585 return Filter.filterAndAdd(RS, VRegs); 2586 } 2587 using const_iterator = decltype(VRegs)::const_iterator; 2588 const_iterator begin() const { return VRegs.begin(); } 2589 const_iterator end() const { return VRegs.end(); } 2590 size_t size() const { return VRegs.size(); } 2591 }; 2592 } // namespace 2593 2594 // Calculate the largest possible vregsPassed sets. These are the registers that 2595 // can pass through an MBB live, but may not be live every time. It is assumed 2596 // that all vregsPassed sets are empty before the call. 2597 void MachineVerifier::calcRegsPassed() { 2598 if (MF->empty()) 2599 // ReversePostOrderTraversal doesn't handle empty functions. 2600 return; 2601 2602 for (const MachineBasicBlock *MB : 2603 ReversePostOrderTraversal<const MachineFunction *>(MF)) { 2604 FilteringVRegSet VRegs; 2605 BBInfo &Info = MBBInfoMap[MB]; 2606 assert(Info.reachable); 2607 2608 VRegs.addToFilter(Info.regsKilled); 2609 VRegs.addToFilter(Info.regsLiveOut); 2610 for (const MachineBasicBlock *Pred : MB->predecessors()) { 2611 const BBInfo &PredInfo = MBBInfoMap[Pred]; 2612 if (!PredInfo.reachable) 2613 continue; 2614 2615 VRegs.add(PredInfo.regsLiveOut); 2616 VRegs.add(PredInfo.vregsPassed); 2617 } 2618 Info.vregsPassed.reserve(VRegs.size()); 2619 Info.vregsPassed.insert(VRegs.begin(), VRegs.end()); 2620 } 2621 } 2622 2623 // Calculate the set of virtual registers that must be passed through each basic 2624 // block in order to satisfy the requirements of successor blocks. This is very 2625 // similar to calcRegsPassed, only backwards. 2626 void MachineVerifier::calcRegsRequired() { 2627 // First push live-in regs to predecessors' vregsRequired. 2628 SmallPtrSet<const MachineBasicBlock*, 8> todo; 2629 for (const auto &MBB : *MF) { 2630 BBInfo &MInfo = MBBInfoMap[&MBB]; 2631 for (const MachineBasicBlock *Pred : MBB.predecessors()) { 2632 BBInfo &PInfo = MBBInfoMap[Pred]; 2633 if (PInfo.addRequired(MInfo.vregsLiveIn)) 2634 todo.insert(Pred); 2635 } 2636 2637 // Handle the PHI node. 2638 for (const MachineInstr &MI : MBB.phis()) { 2639 for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) { 2640 // Skip those Operands which are undef regs or not regs. 2641 if (!MI.getOperand(i).isReg() || !MI.getOperand(i).readsReg()) 2642 continue; 2643 2644 // Get register and predecessor for one PHI edge. 2645 Register Reg = MI.getOperand(i).getReg(); 2646 const MachineBasicBlock *Pred = MI.getOperand(i + 1).getMBB(); 2647 2648 BBInfo &PInfo = MBBInfoMap[Pred]; 2649 if (PInfo.addRequired(Reg)) 2650 todo.insert(Pred); 2651 } 2652 } 2653 } 2654 2655 // Iteratively push vregsRequired to predecessors. This will converge to the 2656 // same final state regardless of DenseSet iteration order. 2657 while (!todo.empty()) { 2658 const MachineBasicBlock *MBB = *todo.begin(); 2659 todo.erase(MBB); 2660 BBInfo &MInfo = MBBInfoMap[MBB]; 2661 for (const MachineBasicBlock *Pred : MBB->predecessors()) { 2662 if (Pred == MBB) 2663 continue; 2664 BBInfo &SInfo = MBBInfoMap[Pred]; 2665 if (SInfo.addRequired(MInfo.vregsRequired)) 2666 todo.insert(Pred); 2667 } 2668 } 2669 } 2670 2671 // Check PHI instructions at the beginning of MBB. It is assumed that 2672 // calcRegsPassed has been run so BBInfo::isLiveOut is valid. 2673 void MachineVerifier::checkPHIOps(const MachineBasicBlock &MBB) { 2674 BBInfo &MInfo = MBBInfoMap[&MBB]; 2675 2676 SmallPtrSet<const MachineBasicBlock*, 8> seen; 2677 for (const MachineInstr &Phi : MBB) { 2678 if (!Phi.isPHI()) 2679 break; 2680 seen.clear(); 2681 2682 const MachineOperand &MODef = Phi.getOperand(0); 2683 if (!MODef.isReg() || !MODef.isDef()) { 2684 report("Expected first PHI operand to be a register def", &MODef, 0); 2685 continue; 2686 } 2687 if (MODef.isTied() || MODef.isImplicit() || MODef.isInternalRead() || 2688 MODef.isEarlyClobber() || MODef.isDebug()) 2689 report("Unexpected flag on PHI operand", &MODef, 0); 2690 Register DefReg = MODef.getReg(); 2691 if (!Register::isVirtualRegister(DefReg)) 2692 report("Expected first PHI operand to be a virtual register", &MODef, 0); 2693 2694 for (unsigned I = 1, E = Phi.getNumOperands(); I != E; I += 2) { 2695 const MachineOperand &MO0 = Phi.getOperand(I); 2696 if (!MO0.isReg()) { 2697 report("Expected PHI operand to be a register", &MO0, I); 2698 continue; 2699 } 2700 if (MO0.isImplicit() || MO0.isInternalRead() || MO0.isEarlyClobber() || 2701 MO0.isDebug() || MO0.isTied()) 2702 report("Unexpected flag on PHI operand", &MO0, I); 2703 2704 const MachineOperand &MO1 = Phi.getOperand(I + 1); 2705 if (!MO1.isMBB()) { 2706 report("Expected PHI operand to be a basic block", &MO1, I + 1); 2707 continue; 2708 } 2709 2710 const MachineBasicBlock &Pre = *MO1.getMBB(); 2711 if (!Pre.isSuccessor(&MBB)) { 2712 report("PHI input is not a predecessor block", &MO1, I + 1); 2713 continue; 2714 } 2715 2716 if (MInfo.reachable) { 2717 seen.insert(&Pre); 2718 BBInfo &PrInfo = MBBInfoMap[&Pre]; 2719 if (!MO0.isUndef() && PrInfo.reachable && 2720 !PrInfo.isLiveOut(MO0.getReg())) 2721 report("PHI operand is not live-out from predecessor", &MO0, I); 2722 } 2723 } 2724 2725 // Did we see all predecessors? 2726 if (MInfo.reachable) { 2727 for (MachineBasicBlock *Pred : MBB.predecessors()) { 2728 if (!seen.count(Pred)) { 2729 report("Missing PHI operand", &Phi); 2730 errs() << printMBBReference(*Pred) 2731 << " is a predecessor according to the CFG.\n"; 2732 } 2733 } 2734 } 2735 } 2736 } 2737 2738 void MachineVerifier::visitMachineFunctionAfter() { 2739 calcRegsPassed(); 2740 2741 for (const MachineBasicBlock &MBB : *MF) 2742 checkPHIOps(MBB); 2743 2744 // Now check liveness info if available 2745 calcRegsRequired(); 2746 2747 // Check for killed virtual registers that should be live out. 2748 for (const auto &MBB : *MF) { 2749 BBInfo &MInfo = MBBInfoMap[&MBB]; 2750 for (Register VReg : MInfo.vregsRequired) 2751 if (MInfo.regsKilled.count(VReg)) { 2752 report("Virtual register killed in block, but needed live out.", &MBB); 2753 errs() << "Virtual register " << printReg(VReg) 2754 << " is used after the block.\n"; 2755 } 2756 } 2757 2758 if (!MF->empty()) { 2759 BBInfo &MInfo = MBBInfoMap[&MF->front()]; 2760 for (Register VReg : MInfo.vregsRequired) { 2761 report("Virtual register defs don't dominate all uses.", MF); 2762 report_context_vreg(VReg); 2763 } 2764 } 2765 2766 if (LiveVars) 2767 verifyLiveVariables(); 2768 if (LiveInts) 2769 verifyLiveIntervals(); 2770 2771 // Check live-in list of each MBB. If a register is live into MBB, check 2772 // that the register is in regsLiveOut of each predecessor block. Since 2773 // this must come from a definition in the predecesssor or its live-in 2774 // list, this will catch a live-through case where the predecessor does not 2775 // have the register in its live-in list. This currently only checks 2776 // registers that have no aliases, are not allocatable and are not 2777 // reserved, which could mean a condition code register for instance. 2778 if (MRI->tracksLiveness()) 2779 for (const auto &MBB : *MF) 2780 for (MachineBasicBlock::RegisterMaskPair P : MBB.liveins()) { 2781 MCPhysReg LiveInReg = P.PhysReg; 2782 bool hasAliases = MCRegAliasIterator(LiveInReg, TRI, false).isValid(); 2783 if (hasAliases || isAllocatable(LiveInReg) || isReserved(LiveInReg)) 2784 continue; 2785 for (const MachineBasicBlock *Pred : MBB.predecessors()) { 2786 BBInfo &PInfo = MBBInfoMap[Pred]; 2787 if (!PInfo.regsLiveOut.count(LiveInReg)) { 2788 report("Live in register not found to be live out from predecessor.", 2789 &MBB); 2790 errs() << TRI->getName(LiveInReg) 2791 << " not found to be live out from " 2792 << printMBBReference(*Pred) << "\n"; 2793 } 2794 } 2795 } 2796 2797 for (auto CSInfo : MF->getCallSitesInfo()) 2798 if (!CSInfo.first->isCall()) 2799 report("Call site info referencing instruction that is not call", MF); 2800 2801 // If there's debug-info, check that we don't have any duplicate value 2802 // tracking numbers. 2803 if (MF->getFunction().getSubprogram()) { 2804 DenseSet<unsigned> SeenNumbers; 2805 for (const auto &MBB : *MF) { 2806 for (const auto &MI : MBB) { 2807 if (auto Num = MI.peekDebugInstrNum()) { 2808 auto Result = SeenNumbers.insert((unsigned)Num); 2809 if (!Result.second) 2810 report("Instruction has a duplicated value tracking number", &MI); 2811 } 2812 } 2813 } 2814 } 2815 } 2816 2817 void MachineVerifier::verifyLiveVariables() { 2818 assert(LiveVars && "Don't call verifyLiveVariables without LiveVars"); 2819 for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) { 2820 Register Reg = Register::index2VirtReg(I); 2821 LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg); 2822 for (const auto &MBB : *MF) { 2823 BBInfo &MInfo = MBBInfoMap[&MBB]; 2824 2825 // Our vregsRequired should be identical to LiveVariables' AliveBlocks 2826 if (MInfo.vregsRequired.count(Reg)) { 2827 if (!VI.AliveBlocks.test(MBB.getNumber())) { 2828 report("LiveVariables: Block missing from AliveBlocks", &MBB); 2829 errs() << "Virtual register " << printReg(Reg) 2830 << " must be live through the block.\n"; 2831 } 2832 } else { 2833 if (VI.AliveBlocks.test(MBB.getNumber())) { 2834 report("LiveVariables: Block should not be in AliveBlocks", &MBB); 2835 errs() << "Virtual register " << printReg(Reg) 2836 << " is not needed live through the block.\n"; 2837 } 2838 } 2839 } 2840 } 2841 } 2842 2843 void MachineVerifier::verifyLiveIntervals() { 2844 assert(LiveInts && "Don't call verifyLiveIntervals without LiveInts"); 2845 for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) { 2846 Register Reg = Register::index2VirtReg(I); 2847 2848 // Spilling and splitting may leave unused registers around. Skip them. 2849 if (MRI->reg_nodbg_empty(Reg)) 2850 continue; 2851 2852 if (!LiveInts->hasInterval(Reg)) { 2853 report("Missing live interval for virtual register", MF); 2854 errs() << printReg(Reg, TRI) << " still has defs or uses\n"; 2855 continue; 2856 } 2857 2858 const LiveInterval &LI = LiveInts->getInterval(Reg); 2859 assert(Reg == LI.reg() && "Invalid reg to interval mapping"); 2860 verifyLiveInterval(LI); 2861 } 2862 2863 // Verify all the cached regunit intervals. 2864 for (unsigned i = 0, e = TRI->getNumRegUnits(); i != e; ++i) 2865 if (const LiveRange *LR = LiveInts->getCachedRegUnit(i)) 2866 verifyLiveRange(*LR, i); 2867 } 2868 2869 void MachineVerifier::verifyLiveRangeValue(const LiveRange &LR, 2870 const VNInfo *VNI, Register Reg, 2871 LaneBitmask LaneMask) { 2872 if (VNI->isUnused()) 2873 return; 2874 2875 const VNInfo *DefVNI = LR.getVNInfoAt(VNI->def); 2876 2877 if (!DefVNI) { 2878 report("Value not live at VNInfo def and not marked unused", MF); 2879 report_context(LR, Reg, LaneMask); 2880 report_context(*VNI); 2881 return; 2882 } 2883 2884 if (DefVNI != VNI) { 2885 report("Live segment at def has different VNInfo", MF); 2886 report_context(LR, Reg, LaneMask); 2887 report_context(*VNI); 2888 return; 2889 } 2890 2891 const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(VNI->def); 2892 if (!MBB) { 2893 report("Invalid VNInfo definition index", MF); 2894 report_context(LR, Reg, LaneMask); 2895 report_context(*VNI); 2896 return; 2897 } 2898 2899 if (VNI->isPHIDef()) { 2900 if (VNI->def != LiveInts->getMBBStartIdx(MBB)) { 2901 report("PHIDef VNInfo is not defined at MBB start", MBB); 2902 report_context(LR, Reg, LaneMask); 2903 report_context(*VNI); 2904 } 2905 return; 2906 } 2907 2908 // Non-PHI def. 2909 const MachineInstr *MI = LiveInts->getInstructionFromIndex(VNI->def); 2910 if (!MI) { 2911 report("No instruction at VNInfo def index", MBB); 2912 report_context(LR, Reg, LaneMask); 2913 report_context(*VNI); 2914 return; 2915 } 2916 2917 if (Reg != 0) { 2918 bool hasDef = false; 2919 bool isEarlyClobber = false; 2920 for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) { 2921 if (!MOI->isReg() || !MOI->isDef()) 2922 continue; 2923 if (Register::isVirtualRegister(Reg)) { 2924 if (MOI->getReg() != Reg) 2925 continue; 2926 } else { 2927 if (!Register::isPhysicalRegister(MOI->getReg()) || 2928 !TRI->hasRegUnit(MOI->getReg(), Reg)) 2929 continue; 2930 } 2931 if (LaneMask.any() && 2932 (TRI->getSubRegIndexLaneMask(MOI->getSubReg()) & LaneMask).none()) 2933 continue; 2934 hasDef = true; 2935 if (MOI->isEarlyClobber()) 2936 isEarlyClobber = true; 2937 } 2938 2939 if (!hasDef) { 2940 report("Defining instruction does not modify register", MI); 2941 report_context(LR, Reg, LaneMask); 2942 report_context(*VNI); 2943 } 2944 2945 // Early clobber defs begin at USE slots, but other defs must begin at 2946 // DEF slots. 2947 if (isEarlyClobber) { 2948 if (!VNI->def.isEarlyClobber()) { 2949 report("Early clobber def must be at an early-clobber slot", MBB); 2950 report_context(LR, Reg, LaneMask); 2951 report_context(*VNI); 2952 } 2953 } else if (!VNI->def.isRegister()) { 2954 report("Non-PHI, non-early clobber def must be at a register slot", MBB); 2955 report_context(LR, Reg, LaneMask); 2956 report_context(*VNI); 2957 } 2958 } 2959 } 2960 2961 void MachineVerifier::verifyLiveRangeSegment(const LiveRange &LR, 2962 const LiveRange::const_iterator I, 2963 Register Reg, 2964 LaneBitmask LaneMask) { 2965 const LiveRange::Segment &S = *I; 2966 const VNInfo *VNI = S.valno; 2967 assert(VNI && "Live segment has no valno"); 2968 2969 if (VNI->id >= LR.getNumValNums() || VNI != LR.getValNumInfo(VNI->id)) { 2970 report("Foreign valno in live segment", MF); 2971 report_context(LR, Reg, LaneMask); 2972 report_context(S); 2973 report_context(*VNI); 2974 } 2975 2976 if (VNI->isUnused()) { 2977 report("Live segment valno is marked unused", MF); 2978 report_context(LR, Reg, LaneMask); 2979 report_context(S); 2980 } 2981 2982 const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(S.start); 2983 if (!MBB) { 2984 report("Bad start of live segment, no basic block", MF); 2985 report_context(LR, Reg, LaneMask); 2986 report_context(S); 2987 return; 2988 } 2989 SlotIndex MBBStartIdx = LiveInts->getMBBStartIdx(MBB); 2990 if (S.start != MBBStartIdx && S.start != VNI->def) { 2991 report("Live segment must begin at MBB entry or valno def", MBB); 2992 report_context(LR, Reg, LaneMask); 2993 report_context(S); 2994 } 2995 2996 const MachineBasicBlock *EndMBB = 2997 LiveInts->getMBBFromIndex(S.end.getPrevSlot()); 2998 if (!EndMBB) { 2999 report("Bad end of live segment, no basic block", MF); 3000 report_context(LR, Reg, LaneMask); 3001 report_context(S); 3002 return; 3003 } 3004 3005 // No more checks for live-out segments. 3006 if (S.end == LiveInts->getMBBEndIdx(EndMBB)) 3007 return; 3008 3009 // RegUnit intervals are allowed dead phis. 3010 if (!Register::isVirtualRegister(Reg) && VNI->isPHIDef() && 3011 S.start == VNI->def && S.end == VNI->def.getDeadSlot()) 3012 return; 3013 3014 // The live segment is ending inside EndMBB 3015 const MachineInstr *MI = 3016 LiveInts->getInstructionFromIndex(S.end.getPrevSlot()); 3017 if (!MI) { 3018 report("Live segment doesn't end at a valid instruction", EndMBB); 3019 report_context(LR, Reg, LaneMask); 3020 report_context(S); 3021 return; 3022 } 3023 3024 // The block slot must refer to a basic block boundary. 3025 if (S.end.isBlock()) { 3026 report("Live segment ends at B slot of an instruction", EndMBB); 3027 report_context(LR, Reg, LaneMask); 3028 report_context(S); 3029 } 3030 3031 if (S.end.isDead()) { 3032 // Segment ends on the dead slot. 3033 // That means there must be a dead def. 3034 if (!SlotIndex::isSameInstr(S.start, S.end)) { 3035 report("Live segment ending at dead slot spans instructions", EndMBB); 3036 report_context(LR, Reg, LaneMask); 3037 report_context(S); 3038 } 3039 } 3040 3041 // After tied operands are rewritten, a live segment can only end at an 3042 // early-clobber slot if it is being redefined by an early-clobber def. 3043 // TODO: Before tied operands are rewritten, a live segment can only end at an 3044 // early-clobber slot if the last use is tied to an early-clobber def. 3045 if (MF->getProperties().hasProperty( 3046 MachineFunctionProperties::Property::TiedOpsRewritten) && 3047 S.end.isEarlyClobber()) { 3048 if (I+1 == LR.end() || (I+1)->start != S.end) { 3049 report("Live segment ending at early clobber slot must be " 3050 "redefined by an EC def in the same instruction", EndMBB); 3051 report_context(LR, Reg, LaneMask); 3052 report_context(S); 3053 } 3054 } 3055 3056 // The following checks only apply to virtual registers. Physreg liveness 3057 // is too weird to check. 3058 if (Register::isVirtualRegister(Reg)) { 3059 // A live segment can end with either a redefinition, a kill flag on a 3060 // use, or a dead flag on a def. 3061 bool hasRead = false; 3062 bool hasSubRegDef = false; 3063 bool hasDeadDef = false; 3064 for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) { 3065 if (!MOI->isReg() || MOI->getReg() != Reg) 3066 continue; 3067 unsigned Sub = MOI->getSubReg(); 3068 LaneBitmask SLM = Sub != 0 ? TRI->getSubRegIndexLaneMask(Sub) 3069 : LaneBitmask::getAll(); 3070 if (MOI->isDef()) { 3071 if (Sub != 0) { 3072 hasSubRegDef = true; 3073 // An operand %0:sub0 reads %0:sub1..n. Invert the lane 3074 // mask for subregister defs. Read-undef defs will be handled by 3075 // readsReg below. 3076 SLM = ~SLM; 3077 } 3078 if (MOI->isDead()) 3079 hasDeadDef = true; 3080 } 3081 if (LaneMask.any() && (LaneMask & SLM).none()) 3082 continue; 3083 if (MOI->readsReg()) 3084 hasRead = true; 3085 } 3086 if (S.end.isDead()) { 3087 // Make sure that the corresponding machine operand for a "dead" live 3088 // range has the dead flag. We cannot perform this check for subregister 3089 // liveranges as partially dead values are allowed. 3090 if (LaneMask.none() && !hasDeadDef) { 3091 report("Instruction ending live segment on dead slot has no dead flag", 3092 MI); 3093 report_context(LR, Reg, LaneMask); 3094 report_context(S); 3095 } 3096 } else { 3097 if (!hasRead) { 3098 // When tracking subregister liveness, the main range must start new 3099 // values on partial register writes, even if there is no read. 3100 if (!MRI->shouldTrackSubRegLiveness(Reg) || LaneMask.any() || 3101 !hasSubRegDef) { 3102 report("Instruction ending live segment doesn't read the register", 3103 MI); 3104 report_context(LR, Reg, LaneMask); 3105 report_context(S); 3106 } 3107 } 3108 } 3109 } 3110 3111 // Now check all the basic blocks in this live segment. 3112 MachineFunction::const_iterator MFI = MBB->getIterator(); 3113 // Is this live segment the beginning of a non-PHIDef VN? 3114 if (S.start == VNI->def && !VNI->isPHIDef()) { 3115 // Not live-in to any blocks. 3116 if (MBB == EndMBB) 3117 return; 3118 // Skip this block. 3119 ++MFI; 3120 } 3121 3122 SmallVector<SlotIndex, 4> Undefs; 3123 if (LaneMask.any()) { 3124 LiveInterval &OwnerLI = LiveInts->getInterval(Reg); 3125 OwnerLI.computeSubRangeUndefs(Undefs, LaneMask, *MRI, *Indexes); 3126 } 3127 3128 while (true) { 3129 assert(LiveInts->isLiveInToMBB(LR, &*MFI)); 3130 // We don't know how to track physregs into a landing pad. 3131 if (!Register::isVirtualRegister(Reg) && MFI->isEHPad()) { 3132 if (&*MFI == EndMBB) 3133 break; 3134 ++MFI; 3135 continue; 3136 } 3137 3138 // Is VNI a PHI-def in the current block? 3139 bool IsPHI = VNI->isPHIDef() && 3140 VNI->def == LiveInts->getMBBStartIdx(&*MFI); 3141 3142 // Check that VNI is live-out of all predecessors. 3143 for (const MachineBasicBlock *Pred : MFI->predecessors()) { 3144 SlotIndex PEnd = LiveInts->getMBBEndIdx(Pred); 3145 // Predecessor of landing pad live-out on last call. 3146 if (MFI->isEHPad()) { 3147 for (const MachineInstr &MI : llvm::reverse(*Pred)) { 3148 if (MI.isCall()) { 3149 PEnd = Indexes->getInstructionIndex(MI).getBoundaryIndex(); 3150 break; 3151 } 3152 } 3153 } 3154 const VNInfo *PVNI = LR.getVNInfoBefore(PEnd); 3155 3156 // All predecessors must have a live-out value. However for a phi 3157 // instruction with subregister intervals 3158 // only one of the subregisters (not necessarily the current one) needs to 3159 // be defined. 3160 if (!PVNI && (LaneMask.none() || !IsPHI)) { 3161 if (LiveRangeCalc::isJointlyDominated(Pred, Undefs, *Indexes)) 3162 continue; 3163 report("Register not marked live out of predecessor", Pred); 3164 report_context(LR, Reg, LaneMask); 3165 report_context(*VNI); 3166 errs() << " live into " << printMBBReference(*MFI) << '@' 3167 << LiveInts->getMBBStartIdx(&*MFI) << ", not live before " 3168 << PEnd << '\n'; 3169 continue; 3170 } 3171 3172 // Only PHI-defs can take different predecessor values. 3173 if (!IsPHI && PVNI != VNI) { 3174 report("Different value live out of predecessor", Pred); 3175 report_context(LR, Reg, LaneMask); 3176 errs() << "Valno #" << PVNI->id << " live out of " 3177 << printMBBReference(*Pred) << '@' << PEnd << "\nValno #" 3178 << VNI->id << " live into " << printMBBReference(*MFI) << '@' 3179 << LiveInts->getMBBStartIdx(&*MFI) << '\n'; 3180 } 3181 } 3182 if (&*MFI == EndMBB) 3183 break; 3184 ++MFI; 3185 } 3186 } 3187 3188 void MachineVerifier::verifyLiveRange(const LiveRange &LR, Register Reg, 3189 LaneBitmask LaneMask) { 3190 for (const VNInfo *VNI : LR.valnos) 3191 verifyLiveRangeValue(LR, VNI, Reg, LaneMask); 3192 3193 for (LiveRange::const_iterator I = LR.begin(), E = LR.end(); I != E; ++I) 3194 verifyLiveRangeSegment(LR, I, Reg, LaneMask); 3195 } 3196 3197 void MachineVerifier::verifyLiveInterval(const LiveInterval &LI) { 3198 Register Reg = LI.reg(); 3199 assert(Register::isVirtualRegister(Reg)); 3200 verifyLiveRange(LI, Reg); 3201 3202 LaneBitmask Mask; 3203 LaneBitmask MaxMask = MRI->getMaxLaneMaskForVReg(Reg); 3204 for (const LiveInterval::SubRange &SR : LI.subranges()) { 3205 if ((Mask & SR.LaneMask).any()) { 3206 report("Lane masks of sub ranges overlap in live interval", MF); 3207 report_context(LI); 3208 } 3209 if ((SR.LaneMask & ~MaxMask).any()) { 3210 report("Subrange lanemask is invalid", MF); 3211 report_context(LI); 3212 } 3213 if (SR.empty()) { 3214 report("Subrange must not be empty", MF); 3215 report_context(SR, LI.reg(), SR.LaneMask); 3216 } 3217 Mask |= SR.LaneMask; 3218 verifyLiveRange(SR, LI.reg(), SR.LaneMask); 3219 if (!LI.covers(SR)) { 3220 report("A Subrange is not covered by the main range", MF); 3221 report_context(LI); 3222 } 3223 } 3224 3225 // Check the LI only has one connected component. 3226 ConnectedVNInfoEqClasses ConEQ(*LiveInts); 3227 unsigned NumComp = ConEQ.Classify(LI); 3228 if (NumComp > 1) { 3229 report("Multiple connected components in live interval", MF); 3230 report_context(LI); 3231 for (unsigned comp = 0; comp != NumComp; ++comp) { 3232 errs() << comp << ": valnos"; 3233 for (const VNInfo *I : LI.valnos) 3234 if (comp == ConEQ.getEqClass(I)) 3235 errs() << ' ' << I->id; 3236 errs() << '\n'; 3237 } 3238 } 3239 } 3240 3241 namespace { 3242 3243 // FrameSetup and FrameDestroy can have zero adjustment, so using a single 3244 // integer, we can't tell whether it is a FrameSetup or FrameDestroy if the 3245 // value is zero. 3246 // We use a bool plus an integer to capture the stack state. 3247 struct StackStateOfBB { 3248 StackStateOfBB() = default; 3249 StackStateOfBB(int EntryVal, int ExitVal, bool EntrySetup, bool ExitSetup) : 3250 EntryValue(EntryVal), ExitValue(ExitVal), EntryIsSetup(EntrySetup), 3251 ExitIsSetup(ExitSetup) {} 3252 3253 // Can be negative, which means we are setting up a frame. 3254 int EntryValue = 0; 3255 int ExitValue = 0; 3256 bool EntryIsSetup = false; 3257 bool ExitIsSetup = false; 3258 }; 3259 3260 } // end anonymous namespace 3261 3262 /// Make sure on every path through the CFG, a FrameSetup <n> is always followed 3263 /// by a FrameDestroy <n>, stack adjustments are identical on all 3264 /// CFG edges to a merge point, and frame is destroyed at end of a return block. 3265 void MachineVerifier::verifyStackFrame() { 3266 unsigned FrameSetupOpcode = TII->getCallFrameSetupOpcode(); 3267 unsigned FrameDestroyOpcode = TII->getCallFrameDestroyOpcode(); 3268 if (FrameSetupOpcode == ~0u && FrameDestroyOpcode == ~0u) 3269 return; 3270 3271 SmallVector<StackStateOfBB, 8> SPState; 3272 SPState.resize(MF->getNumBlockIDs()); 3273 df_iterator_default_set<const MachineBasicBlock*> Reachable; 3274 3275 // Visit the MBBs in DFS order. 3276 for (df_ext_iterator<const MachineFunction *, 3277 df_iterator_default_set<const MachineBasicBlock *>> 3278 DFI = df_ext_begin(MF, Reachable), DFE = df_ext_end(MF, Reachable); 3279 DFI != DFE; ++DFI) { 3280 const MachineBasicBlock *MBB = *DFI; 3281 3282 StackStateOfBB BBState; 3283 // Check the exit state of the DFS stack predecessor. 3284 if (DFI.getPathLength() >= 2) { 3285 const MachineBasicBlock *StackPred = DFI.getPath(DFI.getPathLength() - 2); 3286 assert(Reachable.count(StackPred) && 3287 "DFS stack predecessor is already visited.\n"); 3288 BBState.EntryValue = SPState[StackPred->getNumber()].ExitValue; 3289 BBState.EntryIsSetup = SPState[StackPred->getNumber()].ExitIsSetup; 3290 BBState.ExitValue = BBState.EntryValue; 3291 BBState.ExitIsSetup = BBState.EntryIsSetup; 3292 } 3293 3294 // Update stack state by checking contents of MBB. 3295 for (const auto &I : *MBB) { 3296 if (I.getOpcode() == FrameSetupOpcode) { 3297 if (BBState.ExitIsSetup) 3298 report("FrameSetup is after another FrameSetup", &I); 3299 BBState.ExitValue -= TII->getFrameTotalSize(I); 3300 BBState.ExitIsSetup = true; 3301 } 3302 3303 if (I.getOpcode() == FrameDestroyOpcode) { 3304 int Size = TII->getFrameTotalSize(I); 3305 if (!BBState.ExitIsSetup) 3306 report("FrameDestroy is not after a FrameSetup", &I); 3307 int AbsSPAdj = BBState.ExitValue < 0 ? -BBState.ExitValue : 3308 BBState.ExitValue; 3309 if (BBState.ExitIsSetup && AbsSPAdj != Size) { 3310 report("FrameDestroy <n> is after FrameSetup <m>", &I); 3311 errs() << "FrameDestroy <" << Size << "> is after FrameSetup <" 3312 << AbsSPAdj << ">.\n"; 3313 } 3314 BBState.ExitValue += Size; 3315 BBState.ExitIsSetup = false; 3316 } 3317 } 3318 SPState[MBB->getNumber()] = BBState; 3319 3320 // Make sure the exit state of any predecessor is consistent with the entry 3321 // state. 3322 for (const MachineBasicBlock *Pred : MBB->predecessors()) { 3323 if (Reachable.count(Pred) && 3324 (SPState[Pred->getNumber()].ExitValue != BBState.EntryValue || 3325 SPState[Pred->getNumber()].ExitIsSetup != BBState.EntryIsSetup)) { 3326 report("The exit stack state of a predecessor is inconsistent.", MBB); 3327 errs() << "Predecessor " << printMBBReference(*Pred) 3328 << " has exit state (" << SPState[Pred->getNumber()].ExitValue 3329 << ", " << SPState[Pred->getNumber()].ExitIsSetup << "), while " 3330 << printMBBReference(*MBB) << " has entry state (" 3331 << BBState.EntryValue << ", " << BBState.EntryIsSetup << ").\n"; 3332 } 3333 } 3334 3335 // Make sure the entry state of any successor is consistent with the exit 3336 // state. 3337 for (const MachineBasicBlock *Succ : MBB->successors()) { 3338 if (Reachable.count(Succ) && 3339 (SPState[Succ->getNumber()].EntryValue != BBState.ExitValue || 3340 SPState[Succ->getNumber()].EntryIsSetup != BBState.ExitIsSetup)) { 3341 report("The entry stack state of a successor is inconsistent.", MBB); 3342 errs() << "Successor " << printMBBReference(*Succ) 3343 << " has entry state (" << SPState[Succ->getNumber()].EntryValue 3344 << ", " << SPState[Succ->getNumber()].EntryIsSetup << "), while " 3345 << printMBBReference(*MBB) << " has exit state (" 3346 << BBState.ExitValue << ", " << BBState.ExitIsSetup << ").\n"; 3347 } 3348 } 3349 3350 // Make sure a basic block with return ends with zero stack adjustment. 3351 if (!MBB->empty() && MBB->back().isReturn()) { 3352 if (BBState.ExitIsSetup) 3353 report("A return block ends with a FrameSetup.", MBB); 3354 if (BBState.ExitValue) 3355 report("A return block ends with a nonzero stack adjustment.", MBB); 3356 } 3357 } 3358 } 3359