1 //===- MachineVerifier.cpp - Machine Code Verifier ------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Pass to verify generated machine code. The following is checked: 10 // 11 // Operand counts: All explicit operands must be present. 12 // 13 // Register classes: All physical and virtual register operands must be 14 // compatible with the register class required by the instruction descriptor. 15 // 16 // Register live intervals: Registers must be defined only once, and must be 17 // defined before use. 18 // 19 // The machine code verifier is enabled with the command-line option 20 // -verify-machineinstrs. 21 //===----------------------------------------------------------------------===// 22 23 #include "llvm/ADT/BitVector.h" 24 #include "llvm/ADT/DenseMap.h" 25 #include "llvm/ADT/DenseSet.h" 26 #include "llvm/ADT/DepthFirstIterator.h" 27 #include "llvm/ADT/PostOrderIterator.h" 28 #include "llvm/ADT/STLExtras.h" 29 #include "llvm/ADT/SetOperations.h" 30 #include "llvm/ADT/SmallPtrSet.h" 31 #include "llvm/ADT/SmallVector.h" 32 #include "llvm/ADT/StringRef.h" 33 #include "llvm/ADT/Twine.h" 34 #include "llvm/CodeGen/CodeGenCommonISel.h" 35 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h" 36 #include "llvm/CodeGen/LiveInterval.h" 37 #include "llvm/CodeGen/LiveIntervals.h" 38 #include "llvm/CodeGen/LiveRangeCalc.h" 39 #include "llvm/CodeGen/LiveStacks.h" 40 #include "llvm/CodeGen/LiveVariables.h" 41 #include "llvm/CodeGen/LowLevelType.h" 42 #include "llvm/CodeGen/MachineBasicBlock.h" 43 #include "llvm/CodeGen/MachineFrameInfo.h" 44 #include "llvm/CodeGen/MachineFunction.h" 45 #include "llvm/CodeGen/MachineFunctionPass.h" 46 #include "llvm/CodeGen/MachineInstr.h" 47 #include "llvm/CodeGen/MachineInstrBundle.h" 48 #include "llvm/CodeGen/MachineMemOperand.h" 49 #include "llvm/CodeGen/MachineOperand.h" 50 #include "llvm/CodeGen/MachineRegisterInfo.h" 51 #include "llvm/CodeGen/PseudoSourceValue.h" 52 #include "llvm/CodeGen/RegisterBank.h" 53 #include "llvm/CodeGen/RegisterBankInfo.h" 54 #include "llvm/CodeGen/SlotIndexes.h" 55 #include "llvm/CodeGen/StackMaps.h" 56 #include "llvm/CodeGen/TargetInstrInfo.h" 57 #include "llvm/CodeGen/TargetOpcodes.h" 58 #include "llvm/CodeGen/TargetRegisterInfo.h" 59 #include "llvm/CodeGen/TargetSubtargetInfo.h" 60 #include "llvm/IR/BasicBlock.h" 61 #include "llvm/IR/Constants.h" 62 #include "llvm/IR/EHPersonalities.h" 63 #include "llvm/IR/Function.h" 64 #include "llvm/IR/InlineAsm.h" 65 #include "llvm/IR/Instructions.h" 66 #include "llvm/InitializePasses.h" 67 #include "llvm/MC/LaneBitmask.h" 68 #include "llvm/MC/MCAsmInfo.h" 69 #include "llvm/MC/MCDwarf.h" 70 #include "llvm/MC/MCInstrDesc.h" 71 #include "llvm/MC/MCRegisterInfo.h" 72 #include "llvm/MC/MCTargetOptions.h" 73 #include "llvm/Pass.h" 74 #include "llvm/Support/Casting.h" 75 #include "llvm/Support/ErrorHandling.h" 76 #include "llvm/Support/MathExtras.h" 77 #include "llvm/Support/ModRef.h" 78 #include "llvm/Support/raw_ostream.h" 79 #include "llvm/Target/TargetMachine.h" 80 #include <algorithm> 81 #include <cassert> 82 #include <cstddef> 83 #include <cstdint> 84 #include <iterator> 85 #include <string> 86 #include <utility> 87 88 using namespace llvm; 89 90 namespace { 91 92 struct MachineVerifier { 93 MachineVerifier(Pass *pass, const char *b) : PASS(pass), Banner(b) {} 94 95 MachineVerifier(const char *b, LiveVariables *LiveVars, 96 LiveIntervals *LiveInts, LiveStacks *LiveStks, 97 SlotIndexes *Indexes) 98 : Banner(b), LiveVars(LiveVars), LiveInts(LiveInts), LiveStks(LiveStks), 99 Indexes(Indexes) {} 100 101 unsigned verify(const MachineFunction &MF); 102 103 Pass *const PASS = nullptr; 104 const char *Banner; 105 const MachineFunction *MF = nullptr; 106 const TargetMachine *TM = nullptr; 107 const TargetInstrInfo *TII = nullptr; 108 const TargetRegisterInfo *TRI = nullptr; 109 const MachineRegisterInfo *MRI = nullptr; 110 const RegisterBankInfo *RBI = nullptr; 111 112 unsigned foundErrors = 0; 113 114 // Avoid querying the MachineFunctionProperties for each operand. 115 bool isFunctionRegBankSelected = false; 116 bool isFunctionSelected = false; 117 bool isFunctionTracksDebugUserValues = false; 118 119 using RegVector = SmallVector<Register, 16>; 120 using RegMaskVector = SmallVector<const uint32_t *, 4>; 121 using RegSet = DenseSet<Register>; 122 using RegMap = DenseMap<Register, const MachineInstr *>; 123 using BlockSet = SmallPtrSet<const MachineBasicBlock *, 8>; 124 125 const MachineInstr *FirstNonPHI = nullptr; 126 const MachineInstr *FirstTerminator = nullptr; 127 BlockSet FunctionBlocks; 128 129 BitVector regsReserved; 130 RegSet regsLive; 131 RegVector regsDefined, regsDead, regsKilled; 132 RegMaskVector regMasks; 133 134 SlotIndex lastIndex; 135 136 // Add Reg and any sub-registers to RV 137 void addRegWithSubRegs(RegVector &RV, Register Reg) { 138 RV.push_back(Reg); 139 if (Reg.isPhysical()) 140 append_range(RV, TRI->subregs(Reg.asMCReg())); 141 } 142 143 struct BBInfo { 144 // Is this MBB reachable from the MF entry point? 145 bool reachable = false; 146 147 // Vregs that must be live in because they are used without being 148 // defined. Map value is the user. vregsLiveIn doesn't include regs 149 // that only are used by PHI nodes. 150 RegMap vregsLiveIn; 151 152 // Regs killed in MBB. They may be defined again, and will then be in both 153 // regsKilled and regsLiveOut. 154 RegSet regsKilled; 155 156 // Regs defined in MBB and live out. Note that vregs passing through may 157 // be live out without being mentioned here. 158 RegSet regsLiveOut; 159 160 // Vregs that pass through MBB untouched. This set is disjoint from 161 // regsKilled and regsLiveOut. 162 RegSet vregsPassed; 163 164 // Vregs that must pass through MBB because they are needed by a successor 165 // block. This set is disjoint from regsLiveOut. 166 RegSet vregsRequired; 167 168 // Set versions of block's predecessor and successor lists. 169 BlockSet Preds, Succs; 170 171 BBInfo() = default; 172 173 // Add register to vregsRequired if it belongs there. Return true if 174 // anything changed. 175 bool addRequired(Register Reg) { 176 if (!Reg.isVirtual()) 177 return false; 178 if (regsLiveOut.count(Reg)) 179 return false; 180 return vregsRequired.insert(Reg).second; 181 } 182 183 // Same for a full set. 184 bool addRequired(const RegSet &RS) { 185 bool Changed = false; 186 for (Register Reg : RS) 187 Changed |= addRequired(Reg); 188 return Changed; 189 } 190 191 // Same for a full map. 192 bool addRequired(const RegMap &RM) { 193 bool Changed = false; 194 for (const auto &I : RM) 195 Changed |= addRequired(I.first); 196 return Changed; 197 } 198 199 // Live-out registers are either in regsLiveOut or vregsPassed. 200 bool isLiveOut(Register Reg) const { 201 return regsLiveOut.count(Reg) || vregsPassed.count(Reg); 202 } 203 }; 204 205 // Extra register info per MBB. 206 DenseMap<const MachineBasicBlock*, BBInfo> MBBInfoMap; 207 208 bool isReserved(Register Reg) { 209 return Reg.id() < regsReserved.size() && regsReserved.test(Reg.id()); 210 } 211 212 bool isAllocatable(Register Reg) const { 213 return Reg.id() < TRI->getNumRegs() && TRI->isInAllocatableClass(Reg) && 214 !regsReserved.test(Reg.id()); 215 } 216 217 // Analysis information if available 218 LiveVariables *LiveVars = nullptr; 219 LiveIntervals *LiveInts = nullptr; 220 LiveStacks *LiveStks = nullptr; 221 SlotIndexes *Indexes = nullptr; 222 223 void visitMachineFunctionBefore(); 224 void visitMachineBasicBlockBefore(const MachineBasicBlock *MBB); 225 void visitMachineBundleBefore(const MachineInstr *MI); 226 227 /// Verify that all of \p MI's virtual register operands are scalars. 228 /// \returns True if all virtual register operands are scalar. False 229 /// otherwise. 230 bool verifyAllRegOpsScalar(const MachineInstr &MI, 231 const MachineRegisterInfo &MRI); 232 bool verifyVectorElementMatch(LLT Ty0, LLT Ty1, const MachineInstr *MI); 233 234 bool verifyGIntrinsicSideEffects(const MachineInstr *MI); 235 bool verifyGIntrinsicConvergence(const MachineInstr *MI); 236 void verifyPreISelGenericInstruction(const MachineInstr *MI); 237 238 void visitMachineInstrBefore(const MachineInstr *MI); 239 void visitMachineOperand(const MachineOperand *MO, unsigned MONum); 240 void visitMachineBundleAfter(const MachineInstr *MI); 241 void visitMachineBasicBlockAfter(const MachineBasicBlock *MBB); 242 void visitMachineFunctionAfter(); 243 244 void report(const char *msg, const MachineFunction *MF); 245 void report(const char *msg, const MachineBasicBlock *MBB); 246 void report(const char *msg, const MachineInstr *MI); 247 void report(const char *msg, const MachineOperand *MO, unsigned MONum, 248 LLT MOVRegType = LLT{}); 249 void report(const Twine &Msg, const MachineInstr *MI); 250 251 void report_context(const LiveInterval &LI) const; 252 void report_context(const LiveRange &LR, Register VRegUnit, 253 LaneBitmask LaneMask) const; 254 void report_context(const LiveRange::Segment &S) const; 255 void report_context(const VNInfo &VNI) const; 256 void report_context(SlotIndex Pos) const; 257 void report_context(MCPhysReg PhysReg) const; 258 void report_context_liverange(const LiveRange &LR) const; 259 void report_context_lanemask(LaneBitmask LaneMask) const; 260 void report_context_vreg(Register VReg) const; 261 void report_context_vreg_regunit(Register VRegOrUnit) const; 262 263 void verifyInlineAsm(const MachineInstr *MI); 264 265 void checkLiveness(const MachineOperand *MO, unsigned MONum); 266 void checkLivenessAtUse(const MachineOperand *MO, unsigned MONum, 267 SlotIndex UseIdx, const LiveRange &LR, 268 Register VRegOrUnit, 269 LaneBitmask LaneMask = LaneBitmask::getNone()); 270 void checkLivenessAtDef(const MachineOperand *MO, unsigned MONum, 271 SlotIndex DefIdx, const LiveRange &LR, 272 Register VRegOrUnit, bool SubRangeCheck = false, 273 LaneBitmask LaneMask = LaneBitmask::getNone()); 274 275 void markReachable(const MachineBasicBlock *MBB); 276 void calcRegsPassed(); 277 void checkPHIOps(const MachineBasicBlock &MBB); 278 279 void calcRegsRequired(); 280 void verifyLiveVariables(); 281 void verifyLiveIntervals(); 282 void verifyLiveInterval(const LiveInterval&); 283 void verifyLiveRangeValue(const LiveRange &, const VNInfo *, Register, 284 LaneBitmask); 285 void verifyLiveRangeSegment(const LiveRange &, 286 const LiveRange::const_iterator I, Register, 287 LaneBitmask); 288 void verifyLiveRange(const LiveRange &, Register, 289 LaneBitmask LaneMask = LaneBitmask::getNone()); 290 291 void verifyStackFrame(); 292 293 void verifySlotIndexes() const; 294 void verifyProperties(const MachineFunction &MF); 295 }; 296 297 struct MachineVerifierPass : public MachineFunctionPass { 298 static char ID; // Pass ID, replacement for typeid 299 300 const std::string Banner; 301 302 MachineVerifierPass(std::string banner = std::string()) 303 : MachineFunctionPass(ID), Banner(std::move(banner)) { 304 initializeMachineVerifierPassPass(*PassRegistry::getPassRegistry()); 305 } 306 307 void getAnalysisUsage(AnalysisUsage &AU) const override { 308 AU.addUsedIfAvailable<LiveStacks>(); 309 AU.addUsedIfAvailable<LiveVariables>(); 310 AU.addUsedIfAvailable<SlotIndexes>(); 311 AU.addUsedIfAvailable<LiveIntervals>(); 312 AU.setPreservesAll(); 313 MachineFunctionPass::getAnalysisUsage(AU); 314 } 315 316 bool runOnMachineFunction(MachineFunction &MF) override { 317 // Skip functions that have known verification problems. 318 // FIXME: Remove this mechanism when all problematic passes have been 319 // fixed. 320 if (MF.getProperties().hasProperty( 321 MachineFunctionProperties::Property::FailsVerification)) 322 return false; 323 324 unsigned FoundErrors = MachineVerifier(this, Banner.c_str()).verify(MF); 325 if (FoundErrors) 326 report_fatal_error("Found "+Twine(FoundErrors)+" machine code errors."); 327 return false; 328 } 329 }; 330 331 } // end anonymous namespace 332 333 char MachineVerifierPass::ID = 0; 334 335 INITIALIZE_PASS(MachineVerifierPass, "machineverifier", 336 "Verify generated machine code", false, false) 337 338 FunctionPass *llvm::createMachineVerifierPass(const std::string &Banner) { 339 return new MachineVerifierPass(Banner); 340 } 341 342 void llvm::verifyMachineFunction(MachineFunctionAnalysisManager *, 343 const std::string &Banner, 344 const MachineFunction &MF) { 345 // TODO: Use MFAM after porting below analyses. 346 // LiveVariables *LiveVars; 347 // LiveIntervals *LiveInts; 348 // LiveStacks *LiveStks; 349 // SlotIndexes *Indexes; 350 unsigned FoundErrors = MachineVerifier(nullptr, Banner.c_str()).verify(MF); 351 if (FoundErrors) 352 report_fatal_error("Found " + Twine(FoundErrors) + " machine code errors."); 353 } 354 355 bool MachineFunction::verify(Pass *p, const char *Banner, bool AbortOnErrors) 356 const { 357 MachineFunction &MF = const_cast<MachineFunction&>(*this); 358 unsigned FoundErrors = MachineVerifier(p, Banner).verify(MF); 359 if (AbortOnErrors && FoundErrors) 360 report_fatal_error("Found "+Twine(FoundErrors)+" machine code errors."); 361 return FoundErrors == 0; 362 } 363 364 bool MachineFunction::verify(LiveIntervals *LiveInts, SlotIndexes *Indexes, 365 const char *Banner, bool AbortOnErrors) const { 366 MachineFunction &MF = const_cast<MachineFunction &>(*this); 367 unsigned FoundErrors = 368 MachineVerifier(Banner, nullptr, LiveInts, nullptr, Indexes).verify(MF); 369 if (AbortOnErrors && FoundErrors) 370 report_fatal_error("Found " + Twine(FoundErrors) + " machine code errors."); 371 return FoundErrors == 0; 372 } 373 374 void MachineVerifier::verifySlotIndexes() const { 375 if (Indexes == nullptr) 376 return; 377 378 // Ensure the IdxMBB list is sorted by slot indexes. 379 SlotIndex Last; 380 for (SlotIndexes::MBBIndexIterator I = Indexes->MBBIndexBegin(), 381 E = Indexes->MBBIndexEnd(); I != E; ++I) { 382 assert(!Last.isValid() || I->first > Last); 383 Last = I->first; 384 } 385 } 386 387 void MachineVerifier::verifyProperties(const MachineFunction &MF) { 388 // If a pass has introduced virtual registers without clearing the 389 // NoVRegs property (or set it without allocating the vregs) 390 // then report an error. 391 if (MF.getProperties().hasProperty( 392 MachineFunctionProperties::Property::NoVRegs) && 393 MRI->getNumVirtRegs()) 394 report("Function has NoVRegs property but there are VReg operands", &MF); 395 } 396 397 unsigned MachineVerifier::verify(const MachineFunction &MF) { 398 foundErrors = 0; 399 400 this->MF = &MF; 401 TM = &MF.getTarget(); 402 TII = MF.getSubtarget().getInstrInfo(); 403 TRI = MF.getSubtarget().getRegisterInfo(); 404 RBI = MF.getSubtarget().getRegBankInfo(); 405 MRI = &MF.getRegInfo(); 406 407 const bool isFunctionFailedISel = MF.getProperties().hasProperty( 408 MachineFunctionProperties::Property::FailedISel); 409 410 // If we're mid-GlobalISel and we already triggered the fallback path then 411 // it's expected that the MIR is somewhat broken but that's ok since we'll 412 // reset it and clear the FailedISel attribute in ResetMachineFunctions. 413 if (isFunctionFailedISel) 414 return foundErrors; 415 416 isFunctionRegBankSelected = MF.getProperties().hasProperty( 417 MachineFunctionProperties::Property::RegBankSelected); 418 isFunctionSelected = MF.getProperties().hasProperty( 419 MachineFunctionProperties::Property::Selected); 420 isFunctionTracksDebugUserValues = MF.getProperties().hasProperty( 421 MachineFunctionProperties::Property::TracksDebugUserValues); 422 423 if (PASS) { 424 LiveInts = PASS->getAnalysisIfAvailable<LiveIntervals>(); 425 // We don't want to verify LiveVariables if LiveIntervals is available. 426 if (!LiveInts) 427 LiveVars = PASS->getAnalysisIfAvailable<LiveVariables>(); 428 LiveStks = PASS->getAnalysisIfAvailable<LiveStacks>(); 429 Indexes = PASS->getAnalysisIfAvailable<SlotIndexes>(); 430 } 431 432 verifySlotIndexes(); 433 434 verifyProperties(MF); 435 436 visitMachineFunctionBefore(); 437 for (const MachineBasicBlock &MBB : MF) { 438 visitMachineBasicBlockBefore(&MBB); 439 // Keep track of the current bundle header. 440 const MachineInstr *CurBundle = nullptr; 441 // Do we expect the next instruction to be part of the same bundle? 442 bool InBundle = false; 443 444 for (const MachineInstr &MI : MBB.instrs()) { 445 if (MI.getParent() != &MBB) { 446 report("Bad instruction parent pointer", &MBB); 447 errs() << "Instruction: " << MI; 448 continue; 449 } 450 451 // Check for consistent bundle flags. 452 if (InBundle && !MI.isBundledWithPred()) 453 report("Missing BundledPred flag, " 454 "BundledSucc was set on predecessor", 455 &MI); 456 if (!InBundle && MI.isBundledWithPred()) 457 report("BundledPred flag is set, " 458 "but BundledSucc not set on predecessor", 459 &MI); 460 461 // Is this a bundle header? 462 if (!MI.isInsideBundle()) { 463 if (CurBundle) 464 visitMachineBundleAfter(CurBundle); 465 CurBundle = &MI; 466 visitMachineBundleBefore(CurBundle); 467 } else if (!CurBundle) 468 report("No bundle header", &MI); 469 visitMachineInstrBefore(&MI); 470 for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) { 471 const MachineOperand &Op = MI.getOperand(I); 472 if (Op.getParent() != &MI) { 473 // Make sure to use correct addOperand / removeOperand / ChangeTo 474 // functions when replacing operands of a MachineInstr. 475 report("Instruction has operand with wrong parent set", &MI); 476 } 477 478 visitMachineOperand(&Op, I); 479 } 480 481 // Was this the last bundled instruction? 482 InBundle = MI.isBundledWithSucc(); 483 } 484 if (CurBundle) 485 visitMachineBundleAfter(CurBundle); 486 if (InBundle) 487 report("BundledSucc flag set on last instruction in block", &MBB.back()); 488 visitMachineBasicBlockAfter(&MBB); 489 } 490 visitMachineFunctionAfter(); 491 492 // Clean up. 493 regsLive.clear(); 494 regsDefined.clear(); 495 regsDead.clear(); 496 regsKilled.clear(); 497 regMasks.clear(); 498 MBBInfoMap.clear(); 499 500 return foundErrors; 501 } 502 503 void MachineVerifier::report(const char *msg, const MachineFunction *MF) { 504 assert(MF); 505 errs() << '\n'; 506 if (!foundErrors++) { 507 if (Banner) 508 errs() << "# " << Banner << '\n'; 509 if (LiveInts != nullptr) 510 LiveInts->print(errs()); 511 else 512 MF->print(errs(), Indexes); 513 } 514 errs() << "*** Bad machine code: " << msg << " ***\n" 515 << "- function: " << MF->getName() << "\n"; 516 } 517 518 void MachineVerifier::report(const char *msg, const MachineBasicBlock *MBB) { 519 assert(MBB); 520 report(msg, MBB->getParent()); 521 errs() << "- basic block: " << printMBBReference(*MBB) << ' ' 522 << MBB->getName() << " (" << (const void *)MBB << ')'; 523 if (Indexes) 524 errs() << " [" << Indexes->getMBBStartIdx(MBB) 525 << ';' << Indexes->getMBBEndIdx(MBB) << ')'; 526 errs() << '\n'; 527 } 528 529 void MachineVerifier::report(const char *msg, const MachineInstr *MI) { 530 assert(MI); 531 report(msg, MI->getParent()); 532 errs() << "- instruction: "; 533 if (Indexes && Indexes->hasIndex(*MI)) 534 errs() << Indexes->getInstructionIndex(*MI) << '\t'; 535 MI->print(errs(), /*IsStandalone=*/true); 536 } 537 538 void MachineVerifier::report(const char *msg, const MachineOperand *MO, 539 unsigned MONum, LLT MOVRegType) { 540 assert(MO); 541 report(msg, MO->getParent()); 542 errs() << "- operand " << MONum << ": "; 543 MO->print(errs(), MOVRegType, TRI); 544 errs() << "\n"; 545 } 546 547 void MachineVerifier::report(const Twine &Msg, const MachineInstr *MI) { 548 report(Msg.str().c_str(), MI); 549 } 550 551 void MachineVerifier::report_context(SlotIndex Pos) const { 552 errs() << "- at: " << Pos << '\n'; 553 } 554 555 void MachineVerifier::report_context(const LiveInterval &LI) const { 556 errs() << "- interval: " << LI << '\n'; 557 } 558 559 void MachineVerifier::report_context(const LiveRange &LR, Register VRegUnit, 560 LaneBitmask LaneMask) const { 561 report_context_liverange(LR); 562 report_context_vreg_regunit(VRegUnit); 563 if (LaneMask.any()) 564 report_context_lanemask(LaneMask); 565 } 566 567 void MachineVerifier::report_context(const LiveRange::Segment &S) const { 568 errs() << "- segment: " << S << '\n'; 569 } 570 571 void MachineVerifier::report_context(const VNInfo &VNI) const { 572 errs() << "- ValNo: " << VNI.id << " (def " << VNI.def << ")\n"; 573 } 574 575 void MachineVerifier::report_context_liverange(const LiveRange &LR) const { 576 errs() << "- liverange: " << LR << '\n'; 577 } 578 579 void MachineVerifier::report_context(MCPhysReg PReg) const { 580 errs() << "- p. register: " << printReg(PReg, TRI) << '\n'; 581 } 582 583 void MachineVerifier::report_context_vreg(Register VReg) const { 584 errs() << "- v. register: " << printReg(VReg, TRI) << '\n'; 585 } 586 587 void MachineVerifier::report_context_vreg_regunit(Register VRegOrUnit) const { 588 if (VRegOrUnit.isVirtual()) { 589 report_context_vreg(VRegOrUnit); 590 } else { 591 errs() << "- regunit: " << printRegUnit(VRegOrUnit, TRI) << '\n'; 592 } 593 } 594 595 void MachineVerifier::report_context_lanemask(LaneBitmask LaneMask) const { 596 errs() << "- lanemask: " << PrintLaneMask(LaneMask) << '\n'; 597 } 598 599 void MachineVerifier::markReachable(const MachineBasicBlock *MBB) { 600 BBInfo &MInfo = MBBInfoMap[MBB]; 601 if (!MInfo.reachable) { 602 MInfo.reachable = true; 603 for (const MachineBasicBlock *Succ : MBB->successors()) 604 markReachable(Succ); 605 } 606 } 607 608 void MachineVerifier::visitMachineFunctionBefore() { 609 lastIndex = SlotIndex(); 610 regsReserved = MRI->reservedRegsFrozen() ? MRI->getReservedRegs() 611 : TRI->getReservedRegs(*MF); 612 613 if (!MF->empty()) 614 markReachable(&MF->front()); 615 616 // Build a set of the basic blocks in the function. 617 FunctionBlocks.clear(); 618 for (const auto &MBB : *MF) { 619 FunctionBlocks.insert(&MBB); 620 BBInfo &MInfo = MBBInfoMap[&MBB]; 621 622 MInfo.Preds.insert(MBB.pred_begin(), MBB.pred_end()); 623 if (MInfo.Preds.size() != MBB.pred_size()) 624 report("MBB has duplicate entries in its predecessor list.", &MBB); 625 626 MInfo.Succs.insert(MBB.succ_begin(), MBB.succ_end()); 627 if (MInfo.Succs.size() != MBB.succ_size()) 628 report("MBB has duplicate entries in its successor list.", &MBB); 629 } 630 631 // Check that the register use lists are sane. 632 MRI->verifyUseLists(); 633 634 if (!MF->empty()) 635 verifyStackFrame(); 636 } 637 638 void 639 MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) { 640 FirstTerminator = nullptr; 641 FirstNonPHI = nullptr; 642 643 if (!MF->getProperties().hasProperty( 644 MachineFunctionProperties::Property::NoPHIs) && MRI->tracksLiveness()) { 645 // If this block has allocatable physical registers live-in, check that 646 // it is an entry block or landing pad. 647 for (const auto &LI : MBB->liveins()) { 648 if (isAllocatable(LI.PhysReg) && !MBB->isEHPad() && 649 MBB->getIterator() != MBB->getParent()->begin() && 650 !MBB->isInlineAsmBrIndirectTarget()) { 651 report("MBB has allocatable live-in, but isn't entry, landing-pad, or " 652 "inlineasm-br-indirect-target.", 653 MBB); 654 report_context(LI.PhysReg); 655 } 656 } 657 } 658 659 if (MBB->isIRBlockAddressTaken()) { 660 if (!MBB->getAddressTakenIRBlock()->hasAddressTaken()) 661 report("ir-block-address-taken is associated with basic block not used by " 662 "a blockaddress.", 663 MBB); 664 } 665 666 // Count the number of landing pad successors. 667 SmallPtrSet<const MachineBasicBlock*, 4> LandingPadSuccs; 668 for (const auto *succ : MBB->successors()) { 669 if (succ->isEHPad()) 670 LandingPadSuccs.insert(succ); 671 if (!FunctionBlocks.count(succ)) 672 report("MBB has successor that isn't part of the function.", MBB); 673 if (!MBBInfoMap[succ].Preds.count(MBB)) { 674 report("Inconsistent CFG", MBB); 675 errs() << "MBB is not in the predecessor list of the successor " 676 << printMBBReference(*succ) << ".\n"; 677 } 678 } 679 680 // Check the predecessor list. 681 for (const MachineBasicBlock *Pred : MBB->predecessors()) { 682 if (!FunctionBlocks.count(Pred)) 683 report("MBB has predecessor that isn't part of the function.", MBB); 684 if (!MBBInfoMap[Pred].Succs.count(MBB)) { 685 report("Inconsistent CFG", MBB); 686 errs() << "MBB is not in the successor list of the predecessor " 687 << printMBBReference(*Pred) << ".\n"; 688 } 689 } 690 691 const MCAsmInfo *AsmInfo = TM->getMCAsmInfo(); 692 const BasicBlock *BB = MBB->getBasicBlock(); 693 const Function &F = MF->getFunction(); 694 if (LandingPadSuccs.size() > 1 && 695 !(AsmInfo && 696 AsmInfo->getExceptionHandlingType() == ExceptionHandling::SjLj && 697 BB && isa<SwitchInst>(BB->getTerminator())) && 698 !isScopedEHPersonality(classifyEHPersonality(F.getPersonalityFn()))) 699 report("MBB has more than one landing pad successor", MBB); 700 701 // Call analyzeBranch. If it succeeds, there several more conditions to check. 702 MachineBasicBlock *TBB = nullptr, *FBB = nullptr; 703 SmallVector<MachineOperand, 4> Cond; 704 if (!TII->analyzeBranch(*const_cast<MachineBasicBlock *>(MBB), TBB, FBB, 705 Cond)) { 706 // Ok, analyzeBranch thinks it knows what's going on with this block. Let's 707 // check whether its answers match up with reality. 708 if (!TBB && !FBB) { 709 // Block falls through to its successor. 710 if (!MBB->empty() && MBB->back().isBarrier() && 711 !TII->isPredicated(MBB->back())) { 712 report("MBB exits via unconditional fall-through but ends with a " 713 "barrier instruction!", MBB); 714 } 715 if (!Cond.empty()) { 716 report("MBB exits via unconditional fall-through but has a condition!", 717 MBB); 718 } 719 } else if (TBB && !FBB && Cond.empty()) { 720 // Block unconditionally branches somewhere. 721 if (MBB->empty()) { 722 report("MBB exits via unconditional branch but doesn't contain " 723 "any instructions!", MBB); 724 } else if (!MBB->back().isBarrier()) { 725 report("MBB exits via unconditional branch but doesn't end with a " 726 "barrier instruction!", MBB); 727 } else if (!MBB->back().isTerminator()) { 728 report("MBB exits via unconditional branch but the branch isn't a " 729 "terminator instruction!", MBB); 730 } 731 } else if (TBB && !FBB && !Cond.empty()) { 732 // Block conditionally branches somewhere, otherwise falls through. 733 if (MBB->empty()) { 734 report("MBB exits via conditional branch/fall-through but doesn't " 735 "contain any instructions!", MBB); 736 } else if (MBB->back().isBarrier()) { 737 report("MBB exits via conditional branch/fall-through but ends with a " 738 "barrier instruction!", MBB); 739 } else if (!MBB->back().isTerminator()) { 740 report("MBB exits via conditional branch/fall-through but the branch " 741 "isn't a terminator instruction!", MBB); 742 } 743 } else if (TBB && FBB) { 744 // Block conditionally branches somewhere, otherwise branches 745 // somewhere else. 746 if (MBB->empty()) { 747 report("MBB exits via conditional branch/branch but doesn't " 748 "contain any instructions!", MBB); 749 } else if (!MBB->back().isBarrier()) { 750 report("MBB exits via conditional branch/branch but doesn't end with a " 751 "barrier instruction!", MBB); 752 } else if (!MBB->back().isTerminator()) { 753 report("MBB exits via conditional branch/branch but the branch " 754 "isn't a terminator instruction!", MBB); 755 } 756 if (Cond.empty()) { 757 report("MBB exits via conditional branch/branch but there's no " 758 "condition!", MBB); 759 } 760 } else { 761 report("analyzeBranch returned invalid data!", MBB); 762 } 763 764 // Now check that the successors match up with the answers reported by 765 // analyzeBranch. 766 if (TBB && !MBB->isSuccessor(TBB)) 767 report("MBB exits via jump or conditional branch, but its target isn't a " 768 "CFG successor!", 769 MBB); 770 if (FBB && !MBB->isSuccessor(FBB)) 771 report("MBB exits via conditional branch, but its target isn't a CFG " 772 "successor!", 773 MBB); 774 775 // There might be a fallthrough to the next block if there's either no 776 // unconditional true branch, or if there's a condition, and one of the 777 // branches is missing. 778 bool Fallthrough = !TBB || (!Cond.empty() && !FBB); 779 780 // A conditional fallthrough must be an actual CFG successor, not 781 // unreachable. (Conversely, an unconditional fallthrough might not really 782 // be a successor, because the block might end in unreachable.) 783 if (!Cond.empty() && !FBB) { 784 MachineFunction::const_iterator MBBI = std::next(MBB->getIterator()); 785 if (MBBI == MF->end()) { 786 report("MBB conditionally falls through out of function!", MBB); 787 } else if (!MBB->isSuccessor(&*MBBI)) 788 report("MBB exits via conditional branch/fall-through but the CFG " 789 "successors don't match the actual successors!", 790 MBB); 791 } 792 793 // Verify that there aren't any extra un-accounted-for successors. 794 for (const MachineBasicBlock *SuccMBB : MBB->successors()) { 795 // If this successor is one of the branch targets, it's okay. 796 if (SuccMBB == TBB || SuccMBB == FBB) 797 continue; 798 // If we might have a fallthrough, and the successor is the fallthrough 799 // block, that's also ok. 800 if (Fallthrough && SuccMBB == MBB->getNextNode()) 801 continue; 802 // Also accept successors which are for exception-handling or might be 803 // inlineasm_br targets. 804 if (SuccMBB->isEHPad() || SuccMBB->isInlineAsmBrIndirectTarget()) 805 continue; 806 report("MBB has unexpected successors which are not branch targets, " 807 "fallthrough, EHPads, or inlineasm_br targets.", 808 MBB); 809 } 810 } 811 812 regsLive.clear(); 813 if (MRI->tracksLiveness()) { 814 for (const auto &LI : MBB->liveins()) { 815 if (!Register::isPhysicalRegister(LI.PhysReg)) { 816 report("MBB live-in list contains non-physical register", MBB); 817 continue; 818 } 819 for (const MCPhysReg &SubReg : TRI->subregs_inclusive(LI.PhysReg)) 820 regsLive.insert(SubReg); 821 } 822 } 823 824 const MachineFrameInfo &MFI = MF->getFrameInfo(); 825 BitVector PR = MFI.getPristineRegs(*MF); 826 for (unsigned I : PR.set_bits()) { 827 for (const MCPhysReg &SubReg : TRI->subregs_inclusive(I)) 828 regsLive.insert(SubReg); 829 } 830 831 regsKilled.clear(); 832 regsDefined.clear(); 833 834 if (Indexes) 835 lastIndex = Indexes->getMBBStartIdx(MBB); 836 } 837 838 // This function gets called for all bundle headers, including normal 839 // stand-alone unbundled instructions. 840 void MachineVerifier::visitMachineBundleBefore(const MachineInstr *MI) { 841 if (Indexes && Indexes->hasIndex(*MI)) { 842 SlotIndex idx = Indexes->getInstructionIndex(*MI); 843 if (!(idx > lastIndex)) { 844 report("Instruction index out of order", MI); 845 errs() << "Last instruction was at " << lastIndex << '\n'; 846 } 847 lastIndex = idx; 848 } 849 850 // Ensure non-terminators don't follow terminators. 851 if (MI->isTerminator()) { 852 if (!FirstTerminator) 853 FirstTerminator = MI; 854 } else if (FirstTerminator) { 855 // For GlobalISel, G_INVOKE_REGION_START is a terminator that we allow to 856 // precede non-terminators. 857 if (FirstTerminator->getOpcode() != TargetOpcode::G_INVOKE_REGION_START) { 858 report("Non-terminator instruction after the first terminator", MI); 859 errs() << "First terminator was:\t" << *FirstTerminator; 860 } 861 } 862 } 863 864 // The operands on an INLINEASM instruction must follow a template. 865 // Verify that the flag operands make sense. 866 void MachineVerifier::verifyInlineAsm(const MachineInstr *MI) { 867 // The first two operands on INLINEASM are the asm string and global flags. 868 if (MI->getNumOperands() < 2) { 869 report("Too few operands on inline asm", MI); 870 return; 871 } 872 if (!MI->getOperand(0).isSymbol()) 873 report("Asm string must be an external symbol", MI); 874 if (!MI->getOperand(1).isImm()) 875 report("Asm flags must be an immediate", MI); 876 // Allowed flags are Extra_HasSideEffects = 1, Extra_IsAlignStack = 2, 877 // Extra_AsmDialect = 4, Extra_MayLoad = 8, and Extra_MayStore = 16, 878 // and Extra_IsConvergent = 32. 879 if (!isUInt<6>(MI->getOperand(1).getImm())) 880 report("Unknown asm flags", &MI->getOperand(1), 1); 881 882 static_assert(InlineAsm::MIOp_FirstOperand == 2, "Asm format changed"); 883 884 unsigned OpNo = InlineAsm::MIOp_FirstOperand; 885 unsigned NumOps; 886 for (unsigned e = MI->getNumOperands(); OpNo < e; OpNo += NumOps) { 887 const MachineOperand &MO = MI->getOperand(OpNo); 888 // There may be implicit ops after the fixed operands. 889 if (!MO.isImm()) 890 break; 891 const InlineAsm::Flag F(MO.getImm()); 892 NumOps = 1 + F.getNumOperandRegisters(); 893 } 894 895 if (OpNo > MI->getNumOperands()) 896 report("Missing operands in last group", MI); 897 898 // An optional MDNode follows the groups. 899 if (OpNo < MI->getNumOperands() && MI->getOperand(OpNo).isMetadata()) 900 ++OpNo; 901 902 // All trailing operands must be implicit registers. 903 for (unsigned e = MI->getNumOperands(); OpNo < e; ++OpNo) { 904 const MachineOperand &MO = MI->getOperand(OpNo); 905 if (!MO.isReg() || !MO.isImplicit()) 906 report("Expected implicit register after groups", &MO, OpNo); 907 } 908 909 if (MI->getOpcode() == TargetOpcode::INLINEASM_BR) { 910 const MachineBasicBlock *MBB = MI->getParent(); 911 912 for (unsigned i = InlineAsm::MIOp_FirstOperand, e = MI->getNumOperands(); 913 i != e; ++i) { 914 const MachineOperand &MO = MI->getOperand(i); 915 916 if (!MO.isMBB()) 917 continue; 918 919 // Check the successor & predecessor lists look ok, assume they are 920 // not. Find the indirect target without going through the successors. 921 const MachineBasicBlock *IndirectTargetMBB = MO.getMBB(); 922 if (!IndirectTargetMBB) { 923 report("INLINEASM_BR indirect target does not exist", &MO, i); 924 break; 925 } 926 927 if (!MBB->isSuccessor(IndirectTargetMBB)) 928 report("INLINEASM_BR indirect target missing from successor list", &MO, 929 i); 930 931 if (!IndirectTargetMBB->isPredecessor(MBB)) 932 report("INLINEASM_BR indirect target predecessor list missing parent", 933 &MO, i); 934 } 935 } 936 } 937 938 bool MachineVerifier::verifyAllRegOpsScalar(const MachineInstr &MI, 939 const MachineRegisterInfo &MRI) { 940 if (none_of(MI.explicit_operands(), [&MRI](const MachineOperand &Op) { 941 if (!Op.isReg()) 942 return false; 943 const auto Reg = Op.getReg(); 944 if (Reg.isPhysical()) 945 return false; 946 return !MRI.getType(Reg).isScalar(); 947 })) 948 return true; 949 report("All register operands must have scalar types", &MI); 950 return false; 951 } 952 953 /// Check that types are consistent when two operands need to have the same 954 /// number of vector elements. 955 /// \return true if the types are valid. 956 bool MachineVerifier::verifyVectorElementMatch(LLT Ty0, LLT Ty1, 957 const MachineInstr *MI) { 958 if (Ty0.isVector() != Ty1.isVector()) { 959 report("operand types must be all-vector or all-scalar", MI); 960 // Generally we try to report as many issues as possible at once, but in 961 // this case it's not clear what should we be comparing the size of the 962 // scalar with: the size of the whole vector or its lane. Instead of 963 // making an arbitrary choice and emitting not so helpful message, let's 964 // avoid the extra noise and stop here. 965 return false; 966 } 967 968 if (Ty0.isVector() && Ty0.getElementCount() != Ty1.getElementCount()) { 969 report("operand types must preserve number of vector elements", MI); 970 return false; 971 } 972 973 return true; 974 } 975 976 bool MachineVerifier::verifyGIntrinsicSideEffects(const MachineInstr *MI) { 977 auto Opcode = MI->getOpcode(); 978 bool NoSideEffects = Opcode == TargetOpcode::G_INTRINSIC || 979 Opcode == TargetOpcode::G_INTRINSIC_CONVERGENT; 980 unsigned IntrID = cast<GIntrinsic>(MI)->getIntrinsicID(); 981 if (IntrID != 0 && IntrID < Intrinsic::num_intrinsics) { 982 AttributeList Attrs = Intrinsic::getAttributes( 983 MF->getFunction().getContext(), static_cast<Intrinsic::ID>(IntrID)); 984 bool DeclHasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory(); 985 if (NoSideEffects && DeclHasSideEffects) { 986 report(Twine(TII->getName(Opcode), 987 " used with intrinsic that accesses memory"), 988 MI); 989 return false; 990 } 991 if (!NoSideEffects && !DeclHasSideEffects) { 992 report(Twine(TII->getName(Opcode), " used with readnone intrinsic"), MI); 993 return false; 994 } 995 } 996 997 return true; 998 } 999 1000 bool MachineVerifier::verifyGIntrinsicConvergence(const MachineInstr *MI) { 1001 auto Opcode = MI->getOpcode(); 1002 bool NotConvergent = Opcode == TargetOpcode::G_INTRINSIC || 1003 Opcode == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS; 1004 unsigned IntrID = cast<GIntrinsic>(MI)->getIntrinsicID(); 1005 if (IntrID != 0 && IntrID < Intrinsic::num_intrinsics) { 1006 AttributeList Attrs = Intrinsic::getAttributes( 1007 MF->getFunction().getContext(), static_cast<Intrinsic::ID>(IntrID)); 1008 bool DeclIsConvergent = Attrs.hasFnAttr(Attribute::Convergent); 1009 if (NotConvergent && DeclIsConvergent) { 1010 report(Twine(TII->getName(Opcode), " used with a convergent intrinsic"), 1011 MI); 1012 return false; 1013 } 1014 if (!NotConvergent && !DeclIsConvergent) { 1015 report( 1016 Twine(TII->getName(Opcode), " used with a non-convergent intrinsic"), 1017 MI); 1018 return false; 1019 } 1020 } 1021 1022 return true; 1023 } 1024 1025 void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) { 1026 if (isFunctionSelected) 1027 report("Unexpected generic instruction in a Selected function", MI); 1028 1029 const MCInstrDesc &MCID = MI->getDesc(); 1030 unsigned NumOps = MI->getNumOperands(); 1031 1032 // Branches must reference a basic block if they are not indirect 1033 if (MI->isBranch() && !MI->isIndirectBranch()) { 1034 bool HasMBB = false; 1035 for (const MachineOperand &Op : MI->operands()) { 1036 if (Op.isMBB()) { 1037 HasMBB = true; 1038 break; 1039 } 1040 } 1041 1042 if (!HasMBB) { 1043 report("Branch instruction is missing a basic block operand or " 1044 "isIndirectBranch property", 1045 MI); 1046 } 1047 } 1048 1049 // Check types. 1050 SmallVector<LLT, 4> Types; 1051 for (unsigned I = 0, E = std::min(MCID.getNumOperands(), NumOps); 1052 I != E; ++I) { 1053 if (!MCID.operands()[I].isGenericType()) 1054 continue; 1055 // Generic instructions specify type equality constraints between some of 1056 // their operands. Make sure these are consistent. 1057 size_t TypeIdx = MCID.operands()[I].getGenericTypeIndex(); 1058 Types.resize(std::max(TypeIdx + 1, Types.size())); 1059 1060 const MachineOperand *MO = &MI->getOperand(I); 1061 if (!MO->isReg()) { 1062 report("generic instruction must use register operands", MI); 1063 continue; 1064 } 1065 1066 LLT OpTy = MRI->getType(MO->getReg()); 1067 // Don't report a type mismatch if there is no actual mismatch, only a 1068 // type missing, to reduce noise: 1069 if (OpTy.isValid()) { 1070 // Only the first valid type for a type index will be printed: don't 1071 // overwrite it later so it's always clear which type was expected: 1072 if (!Types[TypeIdx].isValid()) 1073 Types[TypeIdx] = OpTy; 1074 else if (Types[TypeIdx] != OpTy) 1075 report("Type mismatch in generic instruction", MO, I, OpTy); 1076 } else { 1077 // Generic instructions must have types attached to their operands. 1078 report("Generic instruction is missing a virtual register type", MO, I); 1079 } 1080 } 1081 1082 // Generic opcodes must not have physical register operands. 1083 for (unsigned I = 0; I < MI->getNumOperands(); ++I) { 1084 const MachineOperand *MO = &MI->getOperand(I); 1085 if (MO->isReg() && MO->getReg().isPhysical()) 1086 report("Generic instruction cannot have physical register", MO, I); 1087 } 1088 1089 // Avoid out of bounds in checks below. This was already reported earlier. 1090 if (MI->getNumOperands() < MCID.getNumOperands()) 1091 return; 1092 1093 StringRef ErrorInfo; 1094 if (!TII->verifyInstruction(*MI, ErrorInfo)) 1095 report(ErrorInfo.data(), MI); 1096 1097 // Verify properties of various specific instruction types 1098 unsigned Opc = MI->getOpcode(); 1099 switch (Opc) { 1100 case TargetOpcode::G_ASSERT_SEXT: 1101 case TargetOpcode::G_ASSERT_ZEXT: { 1102 std::string OpcName = 1103 Opc == TargetOpcode::G_ASSERT_ZEXT ? "G_ASSERT_ZEXT" : "G_ASSERT_SEXT"; 1104 if (!MI->getOperand(2).isImm()) { 1105 report(Twine(OpcName, " expects an immediate operand #2"), MI); 1106 break; 1107 } 1108 1109 Register Dst = MI->getOperand(0).getReg(); 1110 Register Src = MI->getOperand(1).getReg(); 1111 LLT SrcTy = MRI->getType(Src); 1112 int64_t Imm = MI->getOperand(2).getImm(); 1113 if (Imm <= 0) { 1114 report(Twine(OpcName, " size must be >= 1"), MI); 1115 break; 1116 } 1117 1118 if (Imm >= SrcTy.getScalarSizeInBits()) { 1119 report(Twine(OpcName, " size must be less than source bit width"), MI); 1120 break; 1121 } 1122 1123 const RegisterBank *SrcRB = RBI->getRegBank(Src, *MRI, *TRI); 1124 const RegisterBank *DstRB = RBI->getRegBank(Dst, *MRI, *TRI); 1125 1126 // Allow only the source bank to be set. 1127 if ((SrcRB && DstRB && SrcRB != DstRB) || (DstRB && !SrcRB)) { 1128 report(Twine(OpcName, " cannot change register bank"), MI); 1129 break; 1130 } 1131 1132 // Don't allow a class change. Do allow member class->regbank. 1133 const TargetRegisterClass *DstRC = MRI->getRegClassOrNull(Dst); 1134 if (DstRC && DstRC != MRI->getRegClassOrNull(Src)) { 1135 report( 1136 Twine(OpcName, " source and destination register classes must match"), 1137 MI); 1138 break; 1139 } 1140 1141 break; 1142 } 1143 1144 case TargetOpcode::G_CONSTANT: 1145 case TargetOpcode::G_FCONSTANT: { 1146 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1147 if (DstTy.isVector()) 1148 report("Instruction cannot use a vector result type", MI); 1149 1150 if (MI->getOpcode() == TargetOpcode::G_CONSTANT) { 1151 if (!MI->getOperand(1).isCImm()) { 1152 report("G_CONSTANT operand must be cimm", MI); 1153 break; 1154 } 1155 1156 const ConstantInt *CI = MI->getOperand(1).getCImm(); 1157 if (CI->getBitWidth() != DstTy.getSizeInBits()) 1158 report("inconsistent constant size", MI); 1159 } else { 1160 if (!MI->getOperand(1).isFPImm()) { 1161 report("G_FCONSTANT operand must be fpimm", MI); 1162 break; 1163 } 1164 const ConstantFP *CF = MI->getOperand(1).getFPImm(); 1165 1166 if (APFloat::getSizeInBits(CF->getValueAPF().getSemantics()) != 1167 DstTy.getSizeInBits()) { 1168 report("inconsistent constant size", MI); 1169 } 1170 } 1171 1172 break; 1173 } 1174 case TargetOpcode::G_LOAD: 1175 case TargetOpcode::G_STORE: 1176 case TargetOpcode::G_ZEXTLOAD: 1177 case TargetOpcode::G_SEXTLOAD: { 1178 LLT ValTy = MRI->getType(MI->getOperand(0).getReg()); 1179 LLT PtrTy = MRI->getType(MI->getOperand(1).getReg()); 1180 if (!PtrTy.isPointer()) 1181 report("Generic memory instruction must access a pointer", MI); 1182 1183 // Generic loads and stores must have a single MachineMemOperand 1184 // describing that access. 1185 if (!MI->hasOneMemOperand()) { 1186 report("Generic instruction accessing memory must have one mem operand", 1187 MI); 1188 } else { 1189 const MachineMemOperand &MMO = **MI->memoperands_begin(); 1190 if (MI->getOpcode() == TargetOpcode::G_ZEXTLOAD || 1191 MI->getOpcode() == TargetOpcode::G_SEXTLOAD) { 1192 if (MMO.getSizeInBits() >= ValTy.getSizeInBits()) 1193 report("Generic extload must have a narrower memory type", MI); 1194 } else if (MI->getOpcode() == TargetOpcode::G_LOAD) { 1195 if (MMO.getSize() > ValTy.getSizeInBytes()) 1196 report("load memory size cannot exceed result size", MI); 1197 } else if (MI->getOpcode() == TargetOpcode::G_STORE) { 1198 if (ValTy.getSizeInBytes() < MMO.getSize()) 1199 report("store memory size cannot exceed value size", MI); 1200 } 1201 1202 const AtomicOrdering Order = MMO.getSuccessOrdering(); 1203 if (Opc == TargetOpcode::G_STORE) { 1204 if (Order == AtomicOrdering::Acquire || 1205 Order == AtomicOrdering::AcquireRelease) 1206 report("atomic store cannot use acquire ordering", MI); 1207 1208 } else { 1209 if (Order == AtomicOrdering::Release || 1210 Order == AtomicOrdering::AcquireRelease) 1211 report("atomic load cannot use release ordering", MI); 1212 } 1213 } 1214 1215 break; 1216 } 1217 case TargetOpcode::G_PHI: { 1218 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1219 if (!DstTy.isValid() || !all_of(drop_begin(MI->operands()), 1220 [this, &DstTy](const MachineOperand &MO) { 1221 if (!MO.isReg()) 1222 return true; 1223 LLT Ty = MRI->getType(MO.getReg()); 1224 if (!Ty.isValid() || (Ty != DstTy)) 1225 return false; 1226 return true; 1227 })) 1228 report("Generic Instruction G_PHI has operands with incompatible/missing " 1229 "types", 1230 MI); 1231 break; 1232 } 1233 case TargetOpcode::G_BITCAST: { 1234 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1235 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg()); 1236 if (!DstTy.isValid() || !SrcTy.isValid()) 1237 break; 1238 1239 if (SrcTy.isPointer() != DstTy.isPointer()) 1240 report("bitcast cannot convert between pointers and other types", MI); 1241 1242 if (SrcTy.getSizeInBits() != DstTy.getSizeInBits()) 1243 report("bitcast sizes must match", MI); 1244 1245 if (SrcTy == DstTy) 1246 report("bitcast must change the type", MI); 1247 1248 break; 1249 } 1250 case TargetOpcode::G_INTTOPTR: 1251 case TargetOpcode::G_PTRTOINT: 1252 case TargetOpcode::G_ADDRSPACE_CAST: { 1253 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1254 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg()); 1255 if (!DstTy.isValid() || !SrcTy.isValid()) 1256 break; 1257 1258 verifyVectorElementMatch(DstTy, SrcTy, MI); 1259 1260 DstTy = DstTy.getScalarType(); 1261 SrcTy = SrcTy.getScalarType(); 1262 1263 if (MI->getOpcode() == TargetOpcode::G_INTTOPTR) { 1264 if (!DstTy.isPointer()) 1265 report("inttoptr result type must be a pointer", MI); 1266 if (SrcTy.isPointer()) 1267 report("inttoptr source type must not be a pointer", MI); 1268 } else if (MI->getOpcode() == TargetOpcode::G_PTRTOINT) { 1269 if (!SrcTy.isPointer()) 1270 report("ptrtoint source type must be a pointer", MI); 1271 if (DstTy.isPointer()) 1272 report("ptrtoint result type must not be a pointer", MI); 1273 } else { 1274 assert(MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST); 1275 if (!SrcTy.isPointer() || !DstTy.isPointer()) 1276 report("addrspacecast types must be pointers", MI); 1277 else { 1278 if (SrcTy.getAddressSpace() == DstTy.getAddressSpace()) 1279 report("addrspacecast must convert different address spaces", MI); 1280 } 1281 } 1282 1283 break; 1284 } 1285 case TargetOpcode::G_PTR_ADD: { 1286 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1287 LLT PtrTy = MRI->getType(MI->getOperand(1).getReg()); 1288 LLT OffsetTy = MRI->getType(MI->getOperand(2).getReg()); 1289 if (!DstTy.isValid() || !PtrTy.isValid() || !OffsetTy.isValid()) 1290 break; 1291 1292 if (!PtrTy.getScalarType().isPointer()) 1293 report("gep first operand must be a pointer", MI); 1294 1295 if (OffsetTy.getScalarType().isPointer()) 1296 report("gep offset operand must not be a pointer", MI); 1297 1298 // TODO: Is the offset allowed to be a scalar with a vector? 1299 break; 1300 } 1301 case TargetOpcode::G_PTRMASK: { 1302 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1303 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg()); 1304 LLT MaskTy = MRI->getType(MI->getOperand(2).getReg()); 1305 if (!DstTy.isValid() || !SrcTy.isValid() || !MaskTy.isValid()) 1306 break; 1307 1308 if (!DstTy.getScalarType().isPointer()) 1309 report("ptrmask result type must be a pointer", MI); 1310 1311 if (!MaskTy.getScalarType().isScalar()) 1312 report("ptrmask mask type must be an integer", MI); 1313 1314 verifyVectorElementMatch(DstTy, MaskTy, MI); 1315 break; 1316 } 1317 case TargetOpcode::G_SEXT: 1318 case TargetOpcode::G_ZEXT: 1319 case TargetOpcode::G_ANYEXT: 1320 case TargetOpcode::G_TRUNC: 1321 case TargetOpcode::G_FPEXT: 1322 case TargetOpcode::G_FPTRUNC: { 1323 // Number of operands and presense of types is already checked (and 1324 // reported in case of any issues), so no need to report them again. As 1325 // we're trying to report as many issues as possible at once, however, the 1326 // instructions aren't guaranteed to have the right number of operands or 1327 // types attached to them at this point 1328 assert(MCID.getNumOperands() == 2 && "Expected 2 operands G_*{EXT,TRUNC}"); 1329 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1330 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg()); 1331 if (!DstTy.isValid() || !SrcTy.isValid()) 1332 break; 1333 1334 LLT DstElTy = DstTy.getScalarType(); 1335 LLT SrcElTy = SrcTy.getScalarType(); 1336 if (DstElTy.isPointer() || SrcElTy.isPointer()) 1337 report("Generic extend/truncate can not operate on pointers", MI); 1338 1339 verifyVectorElementMatch(DstTy, SrcTy, MI); 1340 1341 unsigned DstSize = DstElTy.getSizeInBits(); 1342 unsigned SrcSize = SrcElTy.getSizeInBits(); 1343 switch (MI->getOpcode()) { 1344 default: 1345 if (DstSize <= SrcSize) 1346 report("Generic extend has destination type no larger than source", MI); 1347 break; 1348 case TargetOpcode::G_TRUNC: 1349 case TargetOpcode::G_FPTRUNC: 1350 if (DstSize >= SrcSize) 1351 report("Generic truncate has destination type no smaller than source", 1352 MI); 1353 break; 1354 } 1355 break; 1356 } 1357 case TargetOpcode::G_SELECT: { 1358 LLT SelTy = MRI->getType(MI->getOperand(0).getReg()); 1359 LLT CondTy = MRI->getType(MI->getOperand(1).getReg()); 1360 if (!SelTy.isValid() || !CondTy.isValid()) 1361 break; 1362 1363 // Scalar condition select on a vector is valid. 1364 if (CondTy.isVector()) 1365 verifyVectorElementMatch(SelTy, CondTy, MI); 1366 break; 1367 } 1368 case TargetOpcode::G_MERGE_VALUES: { 1369 // G_MERGE_VALUES should only be used to merge scalars into a larger scalar, 1370 // e.g. s2N = MERGE sN, sN 1371 // Merging multiple scalars into a vector is not allowed, should use 1372 // G_BUILD_VECTOR for that. 1373 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1374 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg()); 1375 if (DstTy.isVector() || SrcTy.isVector()) 1376 report("G_MERGE_VALUES cannot operate on vectors", MI); 1377 1378 const unsigned NumOps = MI->getNumOperands(); 1379 if (DstTy.getSizeInBits() != SrcTy.getSizeInBits() * (NumOps - 1)) 1380 report("G_MERGE_VALUES result size is inconsistent", MI); 1381 1382 for (unsigned I = 2; I != NumOps; ++I) { 1383 if (MRI->getType(MI->getOperand(I).getReg()) != SrcTy) 1384 report("G_MERGE_VALUES source types do not match", MI); 1385 } 1386 1387 break; 1388 } 1389 case TargetOpcode::G_UNMERGE_VALUES: { 1390 unsigned NumDsts = MI->getNumOperands() - 1; 1391 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1392 for (unsigned i = 1; i < NumDsts; ++i) { 1393 if (MRI->getType(MI->getOperand(i).getReg()) != DstTy) { 1394 report("G_UNMERGE_VALUES destination types do not match", MI); 1395 break; 1396 } 1397 } 1398 1399 LLT SrcTy = MRI->getType(MI->getOperand(NumDsts).getReg()); 1400 if (DstTy.isVector()) { 1401 // This case is the converse of G_CONCAT_VECTORS. 1402 if (!SrcTy.isVector() || SrcTy.getScalarType() != DstTy.getScalarType() || 1403 SrcTy.getNumElements() != NumDsts * DstTy.getNumElements()) 1404 report("G_UNMERGE_VALUES source operand does not match vector " 1405 "destination operands", 1406 MI); 1407 } else if (SrcTy.isVector()) { 1408 // This case is the converse of G_BUILD_VECTOR, but relaxed to allow 1409 // mismatched types as long as the total size matches: 1410 // %0:_(s64), %1:_(s64) = G_UNMERGE_VALUES %2:_(<4 x s32>) 1411 if (SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits()) 1412 report("G_UNMERGE_VALUES vector source operand does not match scalar " 1413 "destination operands", 1414 MI); 1415 } else { 1416 // This case is the converse of G_MERGE_VALUES. 1417 if (SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits()) { 1418 report("G_UNMERGE_VALUES scalar source operand does not match scalar " 1419 "destination operands", 1420 MI); 1421 } 1422 } 1423 break; 1424 } 1425 case TargetOpcode::G_BUILD_VECTOR: { 1426 // Source types must be scalars, dest type a vector. Total size of scalars 1427 // must match the dest vector size. 1428 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1429 LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg()); 1430 if (!DstTy.isVector() || SrcEltTy.isVector()) { 1431 report("G_BUILD_VECTOR must produce a vector from scalar operands", MI); 1432 break; 1433 } 1434 1435 if (DstTy.getElementType() != SrcEltTy) 1436 report("G_BUILD_VECTOR result element type must match source type", MI); 1437 1438 if (DstTy.getNumElements() != MI->getNumOperands() - 1) 1439 report("G_BUILD_VECTOR must have an operand for each elemement", MI); 1440 1441 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2)) 1442 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg())) 1443 report("G_BUILD_VECTOR source operand types are not homogeneous", MI); 1444 1445 break; 1446 } 1447 case TargetOpcode::G_BUILD_VECTOR_TRUNC: { 1448 // Source types must be scalars, dest type a vector. Scalar types must be 1449 // larger than the dest vector elt type, as this is a truncating operation. 1450 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1451 LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg()); 1452 if (!DstTy.isVector() || SrcEltTy.isVector()) 1453 report("G_BUILD_VECTOR_TRUNC must produce a vector from scalar operands", 1454 MI); 1455 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2)) 1456 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg())) 1457 report("G_BUILD_VECTOR_TRUNC source operand types are not homogeneous", 1458 MI); 1459 if (SrcEltTy.getSizeInBits() <= DstTy.getElementType().getSizeInBits()) 1460 report("G_BUILD_VECTOR_TRUNC source operand types are not larger than " 1461 "dest elt type", 1462 MI); 1463 break; 1464 } 1465 case TargetOpcode::G_CONCAT_VECTORS: { 1466 // Source types should be vectors, and total size should match the dest 1467 // vector size. 1468 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1469 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg()); 1470 if (!DstTy.isVector() || !SrcTy.isVector()) 1471 report("G_CONCAT_VECTOR requires vector source and destination operands", 1472 MI); 1473 1474 if (MI->getNumOperands() < 3) 1475 report("G_CONCAT_VECTOR requires at least 2 source operands", MI); 1476 1477 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2)) 1478 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg())) 1479 report("G_CONCAT_VECTOR source operand types are not homogeneous", MI); 1480 if (DstTy.getNumElements() != 1481 SrcTy.getNumElements() * (MI->getNumOperands() - 1)) 1482 report("G_CONCAT_VECTOR num dest and source elements should match", MI); 1483 break; 1484 } 1485 case TargetOpcode::G_ICMP: 1486 case TargetOpcode::G_FCMP: { 1487 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1488 LLT SrcTy = MRI->getType(MI->getOperand(2).getReg()); 1489 1490 if ((DstTy.isVector() != SrcTy.isVector()) || 1491 (DstTy.isVector() && DstTy.getNumElements() != SrcTy.getNumElements())) 1492 report("Generic vector icmp/fcmp must preserve number of lanes", MI); 1493 1494 break; 1495 } 1496 case TargetOpcode::G_EXTRACT: { 1497 const MachineOperand &SrcOp = MI->getOperand(1); 1498 if (!SrcOp.isReg()) { 1499 report("extract source must be a register", MI); 1500 break; 1501 } 1502 1503 const MachineOperand &OffsetOp = MI->getOperand(2); 1504 if (!OffsetOp.isImm()) { 1505 report("extract offset must be a constant", MI); 1506 break; 1507 } 1508 1509 unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits(); 1510 unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits(); 1511 if (SrcSize == DstSize) 1512 report("extract source must be larger than result", MI); 1513 1514 if (DstSize + OffsetOp.getImm() > SrcSize) 1515 report("extract reads past end of register", MI); 1516 break; 1517 } 1518 case TargetOpcode::G_INSERT: { 1519 const MachineOperand &SrcOp = MI->getOperand(2); 1520 if (!SrcOp.isReg()) { 1521 report("insert source must be a register", MI); 1522 break; 1523 } 1524 1525 const MachineOperand &OffsetOp = MI->getOperand(3); 1526 if (!OffsetOp.isImm()) { 1527 report("insert offset must be a constant", MI); 1528 break; 1529 } 1530 1531 unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits(); 1532 unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits(); 1533 1534 if (DstSize <= SrcSize) 1535 report("inserted size must be smaller than total register", MI); 1536 1537 if (SrcSize + OffsetOp.getImm() > DstSize) 1538 report("insert writes past end of register", MI); 1539 1540 break; 1541 } 1542 case TargetOpcode::G_JUMP_TABLE: { 1543 if (!MI->getOperand(1).isJTI()) 1544 report("G_JUMP_TABLE source operand must be a jump table index", MI); 1545 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1546 if (!DstTy.isPointer()) 1547 report("G_JUMP_TABLE dest operand must have a pointer type", MI); 1548 break; 1549 } 1550 case TargetOpcode::G_BRJT: { 1551 if (!MRI->getType(MI->getOperand(0).getReg()).isPointer()) 1552 report("G_BRJT src operand 0 must be a pointer type", MI); 1553 1554 if (!MI->getOperand(1).isJTI()) 1555 report("G_BRJT src operand 1 must be a jump table index", MI); 1556 1557 const auto &IdxOp = MI->getOperand(2); 1558 if (!IdxOp.isReg() || MRI->getType(IdxOp.getReg()).isPointer()) 1559 report("G_BRJT src operand 2 must be a scalar reg type", MI); 1560 break; 1561 } 1562 case TargetOpcode::G_INTRINSIC: 1563 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS: 1564 case TargetOpcode::G_INTRINSIC_CONVERGENT: 1565 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS: { 1566 // TODO: Should verify number of def and use operands, but the current 1567 // interface requires passing in IR types for mangling. 1568 const MachineOperand &IntrIDOp = MI->getOperand(MI->getNumExplicitDefs()); 1569 if (!IntrIDOp.isIntrinsicID()) { 1570 report("G_INTRINSIC first src operand must be an intrinsic ID", MI); 1571 break; 1572 } 1573 1574 if (!verifyGIntrinsicSideEffects(MI)) 1575 break; 1576 if (!verifyGIntrinsicConvergence(MI)) 1577 break; 1578 1579 break; 1580 } 1581 case TargetOpcode::G_SEXT_INREG: { 1582 if (!MI->getOperand(2).isImm()) { 1583 report("G_SEXT_INREG expects an immediate operand #2", MI); 1584 break; 1585 } 1586 1587 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg()); 1588 int64_t Imm = MI->getOperand(2).getImm(); 1589 if (Imm <= 0) 1590 report("G_SEXT_INREG size must be >= 1", MI); 1591 if (Imm >= SrcTy.getScalarSizeInBits()) 1592 report("G_SEXT_INREG size must be less than source bit width", MI); 1593 break; 1594 } 1595 case TargetOpcode::G_BSWAP: { 1596 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1597 if (DstTy.getScalarSizeInBits() % 16 != 0) 1598 report("G_BSWAP size must be a multiple of 16 bits", MI); 1599 break; 1600 } 1601 case TargetOpcode::G_SHUFFLE_VECTOR: { 1602 const MachineOperand &MaskOp = MI->getOperand(3); 1603 if (!MaskOp.isShuffleMask()) { 1604 report("Incorrect mask operand type for G_SHUFFLE_VECTOR", MI); 1605 break; 1606 } 1607 1608 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1609 LLT Src0Ty = MRI->getType(MI->getOperand(1).getReg()); 1610 LLT Src1Ty = MRI->getType(MI->getOperand(2).getReg()); 1611 1612 if (Src0Ty != Src1Ty) 1613 report("Source operands must be the same type", MI); 1614 1615 if (Src0Ty.getScalarType() != DstTy.getScalarType()) 1616 report("G_SHUFFLE_VECTOR cannot change element type", MI); 1617 1618 // Don't check that all operands are vector because scalars are used in 1619 // place of 1 element vectors. 1620 int SrcNumElts = Src0Ty.isVector() ? Src0Ty.getNumElements() : 1; 1621 int DstNumElts = DstTy.isVector() ? DstTy.getNumElements() : 1; 1622 1623 ArrayRef<int> MaskIdxes = MaskOp.getShuffleMask(); 1624 1625 if (static_cast<int>(MaskIdxes.size()) != DstNumElts) 1626 report("Wrong result type for shufflemask", MI); 1627 1628 for (int Idx : MaskIdxes) { 1629 if (Idx < 0) 1630 continue; 1631 1632 if (Idx >= 2 * SrcNumElts) 1633 report("Out of bounds shuffle index", MI); 1634 } 1635 1636 break; 1637 } 1638 case TargetOpcode::G_DYN_STACKALLOC: { 1639 const MachineOperand &DstOp = MI->getOperand(0); 1640 const MachineOperand &AllocOp = MI->getOperand(1); 1641 const MachineOperand &AlignOp = MI->getOperand(2); 1642 1643 if (!DstOp.isReg() || !MRI->getType(DstOp.getReg()).isPointer()) { 1644 report("dst operand 0 must be a pointer type", MI); 1645 break; 1646 } 1647 1648 if (!AllocOp.isReg() || !MRI->getType(AllocOp.getReg()).isScalar()) { 1649 report("src operand 1 must be a scalar reg type", MI); 1650 break; 1651 } 1652 1653 if (!AlignOp.isImm()) { 1654 report("src operand 2 must be an immediate type", MI); 1655 break; 1656 } 1657 break; 1658 } 1659 case TargetOpcode::G_MEMCPY_INLINE: 1660 case TargetOpcode::G_MEMCPY: 1661 case TargetOpcode::G_MEMMOVE: { 1662 ArrayRef<MachineMemOperand *> MMOs = MI->memoperands(); 1663 if (MMOs.size() != 2) { 1664 report("memcpy/memmove must have 2 memory operands", MI); 1665 break; 1666 } 1667 1668 if ((!MMOs[0]->isStore() || MMOs[0]->isLoad()) || 1669 (MMOs[1]->isStore() || !MMOs[1]->isLoad())) { 1670 report("wrong memory operand types", MI); 1671 break; 1672 } 1673 1674 if (MMOs[0]->getSize() != MMOs[1]->getSize()) 1675 report("inconsistent memory operand sizes", MI); 1676 1677 LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg()); 1678 LLT SrcPtrTy = MRI->getType(MI->getOperand(1).getReg()); 1679 1680 if (!DstPtrTy.isPointer() || !SrcPtrTy.isPointer()) { 1681 report("memory instruction operand must be a pointer", MI); 1682 break; 1683 } 1684 1685 if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace()) 1686 report("inconsistent store address space", MI); 1687 if (SrcPtrTy.getAddressSpace() != MMOs[1]->getAddrSpace()) 1688 report("inconsistent load address space", MI); 1689 1690 if (Opc != TargetOpcode::G_MEMCPY_INLINE) 1691 if (!MI->getOperand(3).isImm() || (MI->getOperand(3).getImm() & ~1LL)) 1692 report("'tail' flag (operand 3) must be an immediate 0 or 1", MI); 1693 1694 break; 1695 } 1696 case TargetOpcode::G_BZERO: 1697 case TargetOpcode::G_MEMSET: { 1698 ArrayRef<MachineMemOperand *> MMOs = MI->memoperands(); 1699 std::string Name = Opc == TargetOpcode::G_MEMSET ? "memset" : "bzero"; 1700 if (MMOs.size() != 1) { 1701 report(Twine(Name, " must have 1 memory operand"), MI); 1702 break; 1703 } 1704 1705 if ((!MMOs[0]->isStore() || MMOs[0]->isLoad())) { 1706 report(Twine(Name, " memory operand must be a store"), MI); 1707 break; 1708 } 1709 1710 LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg()); 1711 if (!DstPtrTy.isPointer()) { 1712 report(Twine(Name, " operand must be a pointer"), MI); 1713 break; 1714 } 1715 1716 if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace()) 1717 report("inconsistent " + Twine(Name, " address space"), MI); 1718 1719 if (!MI->getOperand(MI->getNumOperands() - 1).isImm() || 1720 (MI->getOperand(MI->getNumOperands() - 1).getImm() & ~1LL)) 1721 report("'tail' flag (last operand) must be an immediate 0 or 1", MI); 1722 1723 break; 1724 } 1725 case TargetOpcode::G_VECREDUCE_SEQ_FADD: 1726 case TargetOpcode::G_VECREDUCE_SEQ_FMUL: { 1727 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1728 LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg()); 1729 LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg()); 1730 if (!DstTy.isScalar()) 1731 report("Vector reduction requires a scalar destination type", MI); 1732 if (!Src1Ty.isScalar()) 1733 report("Sequential FADD/FMUL vector reduction requires a scalar 1st operand", MI); 1734 if (!Src2Ty.isVector()) 1735 report("Sequential FADD/FMUL vector reduction must have a vector 2nd operand", MI); 1736 break; 1737 } 1738 case TargetOpcode::G_VECREDUCE_FADD: 1739 case TargetOpcode::G_VECREDUCE_FMUL: 1740 case TargetOpcode::G_VECREDUCE_FMAX: 1741 case TargetOpcode::G_VECREDUCE_FMIN: 1742 case TargetOpcode::G_VECREDUCE_FMAXIMUM: 1743 case TargetOpcode::G_VECREDUCE_FMINIMUM: 1744 case TargetOpcode::G_VECREDUCE_ADD: 1745 case TargetOpcode::G_VECREDUCE_MUL: 1746 case TargetOpcode::G_VECREDUCE_AND: 1747 case TargetOpcode::G_VECREDUCE_OR: 1748 case TargetOpcode::G_VECREDUCE_XOR: 1749 case TargetOpcode::G_VECREDUCE_SMAX: 1750 case TargetOpcode::G_VECREDUCE_SMIN: 1751 case TargetOpcode::G_VECREDUCE_UMAX: 1752 case TargetOpcode::G_VECREDUCE_UMIN: { 1753 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1754 if (!DstTy.isScalar()) 1755 report("Vector reduction requires a scalar destination type", MI); 1756 break; 1757 } 1758 1759 case TargetOpcode::G_SBFX: 1760 case TargetOpcode::G_UBFX: { 1761 LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); 1762 if (DstTy.isVector()) { 1763 report("Bitfield extraction is not supported on vectors", MI); 1764 break; 1765 } 1766 break; 1767 } 1768 case TargetOpcode::G_SHL: 1769 case TargetOpcode::G_LSHR: 1770 case TargetOpcode::G_ASHR: 1771 case TargetOpcode::G_ROTR: 1772 case TargetOpcode::G_ROTL: { 1773 LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg()); 1774 LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg()); 1775 if (Src1Ty.isVector() != Src2Ty.isVector()) { 1776 report("Shifts and rotates require operands to be either all scalars or " 1777 "all vectors", 1778 MI); 1779 break; 1780 } 1781 break; 1782 } 1783 case TargetOpcode::G_LLROUND: 1784 case TargetOpcode::G_LROUND: { 1785 verifyAllRegOpsScalar(*MI, *MRI); 1786 break; 1787 } 1788 case TargetOpcode::G_IS_FPCLASS: { 1789 LLT DestTy = MRI->getType(MI->getOperand(0).getReg()); 1790 LLT DestEltTy = DestTy.getScalarType(); 1791 if (!DestEltTy.isScalar()) { 1792 report("Destination must be a scalar or vector of scalars", MI); 1793 break; 1794 } 1795 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg()); 1796 LLT SrcEltTy = SrcTy.getScalarType(); 1797 if (!SrcEltTy.isScalar()) { 1798 report("Source must be a scalar or vector of scalars", MI); 1799 break; 1800 } 1801 if (!verifyVectorElementMatch(DestTy, SrcTy, MI)) 1802 break; 1803 const MachineOperand &TestMO = MI->getOperand(2); 1804 if (!TestMO.isImm()) { 1805 report("floating-point class set (operand 2) must be an immediate", MI); 1806 break; 1807 } 1808 int64_t Test = TestMO.getImm(); 1809 if (Test < 0 || Test > fcAllFlags) { 1810 report("Incorrect floating-point class set (operand 2)", MI); 1811 break; 1812 } 1813 break; 1814 } 1815 case TargetOpcode::G_PREFETCH: { 1816 const MachineOperand &AddrOp = MI->getOperand(0); 1817 if (!AddrOp.isReg() || !MRI->getType(AddrOp.getReg()).isPointer()) { 1818 report("addr operand must be a pointer", &AddrOp, 0); 1819 break; 1820 } 1821 const MachineOperand &RWOp = MI->getOperand(1); 1822 if (!RWOp.isImm() || (uint64_t)RWOp.getImm() >= 2) { 1823 report("rw operand must be an immediate 0-1", &RWOp, 1); 1824 break; 1825 } 1826 const MachineOperand &LocalityOp = MI->getOperand(2); 1827 if (!LocalityOp.isImm() || (uint64_t)LocalityOp.getImm() >= 4) { 1828 report("locality operand must be an immediate 0-3", &LocalityOp, 2); 1829 break; 1830 } 1831 const MachineOperand &CacheTypeOp = MI->getOperand(3); 1832 if (!CacheTypeOp.isImm() || (uint64_t)CacheTypeOp.getImm() >= 2) { 1833 report("cache type operand must be an immediate 0-1", &CacheTypeOp, 3); 1834 break; 1835 } 1836 break; 1837 } 1838 case TargetOpcode::G_ASSERT_ALIGN: { 1839 if (MI->getOperand(2).getImm() < 1) 1840 report("alignment immediate must be >= 1", MI); 1841 break; 1842 } 1843 case TargetOpcode::G_CONSTANT_POOL: { 1844 if (!MI->getOperand(1).isCPI()) 1845 report("Src operand 1 must be a constant pool index", MI); 1846 if (!MRI->getType(MI->getOperand(0).getReg()).isPointer()) 1847 report("Dst operand 0 must be a pointer", MI); 1848 break; 1849 } 1850 default: 1851 break; 1852 } 1853 } 1854 1855 void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) { 1856 const MCInstrDesc &MCID = MI->getDesc(); 1857 if (MI->getNumOperands() < MCID.getNumOperands()) { 1858 report("Too few operands", MI); 1859 errs() << MCID.getNumOperands() << " operands expected, but " 1860 << MI->getNumOperands() << " given.\n"; 1861 } 1862 1863 if (MI->getFlag(MachineInstr::NoConvergent) && !MCID.isConvergent()) 1864 report("NoConvergent flag expected only on convergent instructions.", MI); 1865 1866 if (MI->isPHI()) { 1867 if (MF->getProperties().hasProperty( 1868 MachineFunctionProperties::Property::NoPHIs)) 1869 report("Found PHI instruction with NoPHIs property set", MI); 1870 1871 if (FirstNonPHI) 1872 report("Found PHI instruction after non-PHI", MI); 1873 } else if (FirstNonPHI == nullptr) 1874 FirstNonPHI = MI; 1875 1876 // Check the tied operands. 1877 if (MI->isInlineAsm()) 1878 verifyInlineAsm(MI); 1879 1880 // Check that unspillable terminators define a reg and have at most one use. 1881 if (TII->isUnspillableTerminator(MI)) { 1882 if (!MI->getOperand(0).isReg() || !MI->getOperand(0).isDef()) 1883 report("Unspillable Terminator does not define a reg", MI); 1884 Register Def = MI->getOperand(0).getReg(); 1885 if (Def.isVirtual() && 1886 !MF->getProperties().hasProperty( 1887 MachineFunctionProperties::Property::NoPHIs) && 1888 std::distance(MRI->use_nodbg_begin(Def), MRI->use_nodbg_end()) > 1) 1889 report("Unspillable Terminator expected to have at most one use!", MI); 1890 } 1891 1892 // A fully-formed DBG_VALUE must have a location. Ignore partially formed 1893 // DBG_VALUEs: these are convenient to use in tests, but should never get 1894 // generated. 1895 if (MI->isDebugValue() && MI->getNumOperands() == 4) 1896 if (!MI->getDebugLoc()) 1897 report("Missing DebugLoc for debug instruction", MI); 1898 1899 // Meta instructions should never be the subject of debug value tracking, 1900 // they don't create a value in the output program at all. 1901 if (MI->isMetaInstruction() && MI->peekDebugInstrNum()) 1902 report("Metadata instruction should not have a value tracking number", MI); 1903 1904 // Check the MachineMemOperands for basic consistency. 1905 for (MachineMemOperand *Op : MI->memoperands()) { 1906 if (Op->isLoad() && !MI->mayLoad()) 1907 report("Missing mayLoad flag", MI); 1908 if (Op->isStore() && !MI->mayStore()) 1909 report("Missing mayStore flag", MI); 1910 } 1911 1912 // Debug values must not have a slot index. 1913 // Other instructions must have one, unless they are inside a bundle. 1914 if (LiveInts) { 1915 bool mapped = !LiveInts->isNotInMIMap(*MI); 1916 if (MI->isDebugOrPseudoInstr()) { 1917 if (mapped) 1918 report("Debug instruction has a slot index", MI); 1919 } else if (MI->isInsideBundle()) { 1920 if (mapped) 1921 report("Instruction inside bundle has a slot index", MI); 1922 } else { 1923 if (!mapped) 1924 report("Missing slot index", MI); 1925 } 1926 } 1927 1928 unsigned Opc = MCID.getOpcode(); 1929 if (isPreISelGenericOpcode(Opc) || isPreISelGenericOptimizationHint(Opc)) { 1930 verifyPreISelGenericInstruction(MI); 1931 return; 1932 } 1933 1934 StringRef ErrorInfo; 1935 if (!TII->verifyInstruction(*MI, ErrorInfo)) 1936 report(ErrorInfo.data(), MI); 1937 1938 // Verify properties of various specific instruction types 1939 switch (MI->getOpcode()) { 1940 case TargetOpcode::COPY: { 1941 const MachineOperand &DstOp = MI->getOperand(0); 1942 const MachineOperand &SrcOp = MI->getOperand(1); 1943 const Register SrcReg = SrcOp.getReg(); 1944 const Register DstReg = DstOp.getReg(); 1945 1946 LLT DstTy = MRI->getType(DstReg); 1947 LLT SrcTy = MRI->getType(SrcReg); 1948 if (SrcTy.isValid() && DstTy.isValid()) { 1949 // If both types are valid, check that the types are the same. 1950 if (SrcTy != DstTy) { 1951 report("Copy Instruction is illegal with mismatching types", MI); 1952 errs() << "Def = " << DstTy << ", Src = " << SrcTy << "\n"; 1953 } 1954 1955 break; 1956 } 1957 1958 if (!SrcTy.isValid() && !DstTy.isValid()) 1959 break; 1960 1961 // If we have only one valid type, this is likely a copy between a virtual 1962 // and physical register. 1963 TypeSize SrcSize = TRI->getRegSizeInBits(SrcReg, *MRI); 1964 TypeSize DstSize = TRI->getRegSizeInBits(DstReg, *MRI); 1965 if (SrcReg.isPhysical() && DstTy.isValid()) { 1966 const TargetRegisterClass *SrcRC = 1967 TRI->getMinimalPhysRegClassLLT(SrcReg, DstTy); 1968 if (SrcRC) 1969 SrcSize = TRI->getRegSizeInBits(*SrcRC); 1970 } 1971 1972 if (DstReg.isPhysical() && SrcTy.isValid()) { 1973 const TargetRegisterClass *DstRC = 1974 TRI->getMinimalPhysRegClassLLT(DstReg, SrcTy); 1975 if (DstRC) 1976 DstSize = TRI->getRegSizeInBits(*DstRC); 1977 } 1978 1979 // The next two checks allow COPY between physical and virtual registers, 1980 // when the virtual register has a scalable size and the physical register 1981 // has a fixed size. These checks allow COPY between *potentialy* mismatched 1982 // sizes. However, once RegisterBankSelection occurs, MachineVerifier should 1983 // be able to resolve a fixed size for the scalable vector, and at that 1984 // point this function will know for sure whether the sizes are mismatched 1985 // and correctly report a size mismatch. 1986 if (SrcReg.isPhysical() && DstReg.isVirtual() && DstSize.isScalable() && 1987 !SrcSize.isScalable()) 1988 break; 1989 if (SrcReg.isVirtual() && DstReg.isPhysical() && SrcSize.isScalable() && 1990 !DstSize.isScalable()) 1991 break; 1992 1993 if (SrcSize.isNonZero() && DstSize.isNonZero() && SrcSize != DstSize) { 1994 if (!DstOp.getSubReg() && !SrcOp.getSubReg()) { 1995 report("Copy Instruction is illegal with mismatching sizes", MI); 1996 errs() << "Def Size = " << DstSize << ", Src Size = " << SrcSize 1997 << "\n"; 1998 } 1999 } 2000 break; 2001 } 2002 case TargetOpcode::STATEPOINT: { 2003 StatepointOpers SO(MI); 2004 if (!MI->getOperand(SO.getIDPos()).isImm() || 2005 !MI->getOperand(SO.getNBytesPos()).isImm() || 2006 !MI->getOperand(SO.getNCallArgsPos()).isImm()) { 2007 report("meta operands to STATEPOINT not constant!", MI); 2008 break; 2009 } 2010 2011 auto VerifyStackMapConstant = [&](unsigned Offset) { 2012 if (Offset >= MI->getNumOperands()) { 2013 report("stack map constant to STATEPOINT is out of range!", MI); 2014 return; 2015 } 2016 if (!MI->getOperand(Offset - 1).isImm() || 2017 MI->getOperand(Offset - 1).getImm() != StackMaps::ConstantOp || 2018 !MI->getOperand(Offset).isImm()) 2019 report("stack map constant to STATEPOINT not well formed!", MI); 2020 }; 2021 VerifyStackMapConstant(SO.getCCIdx()); 2022 VerifyStackMapConstant(SO.getFlagsIdx()); 2023 VerifyStackMapConstant(SO.getNumDeoptArgsIdx()); 2024 VerifyStackMapConstant(SO.getNumGCPtrIdx()); 2025 VerifyStackMapConstant(SO.getNumAllocaIdx()); 2026 VerifyStackMapConstant(SO.getNumGcMapEntriesIdx()); 2027 2028 // Verify that all explicit statepoint defs are tied to gc operands as 2029 // they are expected to be a relocation of gc operands. 2030 unsigned FirstGCPtrIdx = SO.getFirstGCPtrIdx(); 2031 unsigned LastGCPtrIdx = SO.getNumAllocaIdx() - 2; 2032 for (unsigned Idx = 0; Idx < MI->getNumDefs(); Idx++) { 2033 unsigned UseOpIdx; 2034 if (!MI->isRegTiedToUseOperand(Idx, &UseOpIdx)) { 2035 report("STATEPOINT defs expected to be tied", MI); 2036 break; 2037 } 2038 if (UseOpIdx < FirstGCPtrIdx || UseOpIdx > LastGCPtrIdx) { 2039 report("STATEPOINT def tied to non-gc operand", MI); 2040 break; 2041 } 2042 } 2043 2044 // TODO: verify we have properly encoded deopt arguments 2045 } break; 2046 case TargetOpcode::INSERT_SUBREG: { 2047 unsigned InsertedSize; 2048 if (unsigned SubIdx = MI->getOperand(2).getSubReg()) 2049 InsertedSize = TRI->getSubRegIdxSize(SubIdx); 2050 else 2051 InsertedSize = TRI->getRegSizeInBits(MI->getOperand(2).getReg(), *MRI); 2052 unsigned SubRegSize = TRI->getSubRegIdxSize(MI->getOperand(3).getImm()); 2053 if (SubRegSize < InsertedSize) { 2054 report("INSERT_SUBREG expected inserted value to have equal or lesser " 2055 "size than the subreg it was inserted into", MI); 2056 break; 2057 } 2058 } break; 2059 case TargetOpcode::REG_SEQUENCE: { 2060 unsigned NumOps = MI->getNumOperands(); 2061 if (!(NumOps & 1)) { 2062 report("Invalid number of operands for REG_SEQUENCE", MI); 2063 break; 2064 } 2065 2066 for (unsigned I = 1; I != NumOps; I += 2) { 2067 const MachineOperand &RegOp = MI->getOperand(I); 2068 const MachineOperand &SubRegOp = MI->getOperand(I + 1); 2069 2070 if (!RegOp.isReg()) 2071 report("Invalid register operand for REG_SEQUENCE", &RegOp, I); 2072 2073 if (!SubRegOp.isImm() || SubRegOp.getImm() == 0 || 2074 SubRegOp.getImm() >= TRI->getNumSubRegIndices()) { 2075 report("Invalid subregister index operand for REG_SEQUENCE", 2076 &SubRegOp, I + 1); 2077 } 2078 } 2079 2080 Register DstReg = MI->getOperand(0).getReg(); 2081 if (DstReg.isPhysical()) 2082 report("REG_SEQUENCE does not support physical register results", MI); 2083 2084 if (MI->getOperand(0).getSubReg()) 2085 report("Invalid subreg result for REG_SEQUENCE", MI); 2086 2087 break; 2088 } 2089 } 2090 } 2091 2092 void 2093 MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) { 2094 const MachineInstr *MI = MO->getParent(); 2095 const MCInstrDesc &MCID = MI->getDesc(); 2096 unsigned NumDefs = MCID.getNumDefs(); 2097 if (MCID.getOpcode() == TargetOpcode::PATCHPOINT) 2098 NumDefs = (MONum == 0 && MO->isReg()) ? NumDefs : 0; 2099 2100 // The first MCID.NumDefs operands must be explicit register defines 2101 if (MONum < NumDefs) { 2102 const MCOperandInfo &MCOI = MCID.operands()[MONum]; 2103 if (!MO->isReg()) 2104 report("Explicit definition must be a register", MO, MONum); 2105 else if (!MO->isDef() && !MCOI.isOptionalDef()) 2106 report("Explicit definition marked as use", MO, MONum); 2107 else if (MO->isImplicit()) 2108 report("Explicit definition marked as implicit", MO, MONum); 2109 } else if (MONum < MCID.getNumOperands()) { 2110 const MCOperandInfo &MCOI = MCID.operands()[MONum]; 2111 // Don't check if it's the last operand in a variadic instruction. See, 2112 // e.g., LDM_RET in the arm back end. Check non-variadic operands only. 2113 bool IsOptional = MI->isVariadic() && MONum == MCID.getNumOperands() - 1; 2114 if (!IsOptional) { 2115 if (MO->isReg()) { 2116 if (MO->isDef() && !MCOI.isOptionalDef() && !MCID.variadicOpsAreDefs()) 2117 report("Explicit operand marked as def", MO, MONum); 2118 if (MO->isImplicit()) 2119 report("Explicit operand marked as implicit", MO, MONum); 2120 } 2121 2122 // Check that an instruction has register operands only as expected. 2123 if (MCOI.OperandType == MCOI::OPERAND_REGISTER && 2124 !MO->isReg() && !MO->isFI()) 2125 report("Expected a register operand.", MO, MONum); 2126 if (MO->isReg()) { 2127 if (MCOI.OperandType == MCOI::OPERAND_IMMEDIATE || 2128 (MCOI.OperandType == MCOI::OPERAND_PCREL && 2129 !TII->isPCRelRegisterOperandLegal(*MO))) 2130 report("Expected a non-register operand.", MO, MONum); 2131 } 2132 } 2133 2134 int TiedTo = MCID.getOperandConstraint(MONum, MCOI::TIED_TO); 2135 if (TiedTo != -1) { 2136 if (!MO->isReg()) 2137 report("Tied use must be a register", MO, MONum); 2138 else if (!MO->isTied()) 2139 report("Operand should be tied", MO, MONum); 2140 else if (unsigned(TiedTo) != MI->findTiedOperandIdx(MONum)) 2141 report("Tied def doesn't match MCInstrDesc", MO, MONum); 2142 else if (MO->getReg().isPhysical()) { 2143 const MachineOperand &MOTied = MI->getOperand(TiedTo); 2144 if (!MOTied.isReg()) 2145 report("Tied counterpart must be a register", &MOTied, TiedTo); 2146 else if (MOTied.getReg().isPhysical() && 2147 MO->getReg() != MOTied.getReg()) 2148 report("Tied physical registers must match.", &MOTied, TiedTo); 2149 } 2150 } else if (MO->isReg() && MO->isTied()) 2151 report("Explicit operand should not be tied", MO, MONum); 2152 } else if (!MI->isVariadic()) { 2153 // ARM adds %reg0 operands to indicate predicates. We'll allow that. 2154 if (!MO->isValidExcessOperand()) 2155 report("Extra explicit operand on non-variadic instruction", MO, MONum); 2156 } 2157 2158 switch (MO->getType()) { 2159 case MachineOperand::MO_Register: { 2160 // Verify debug flag on debug instructions. Check this first because reg0 2161 // indicates an undefined debug value. 2162 if (MI->isDebugInstr() && MO->isUse()) { 2163 if (!MO->isDebug()) 2164 report("Register operand must be marked debug", MO, MONum); 2165 } else if (MO->isDebug()) { 2166 report("Register operand must not be marked debug", MO, MONum); 2167 } 2168 2169 const Register Reg = MO->getReg(); 2170 if (!Reg) 2171 return; 2172 if (MRI->tracksLiveness() && !MI->isDebugInstr()) 2173 checkLiveness(MO, MONum); 2174 2175 if (MO->isDef() && MO->isUndef() && !MO->getSubReg() && 2176 MO->getReg().isVirtual()) // TODO: Apply to physregs too 2177 report("Undef virtual register def operands require a subregister", MO, MONum); 2178 2179 // Verify the consistency of tied operands. 2180 if (MO->isTied()) { 2181 unsigned OtherIdx = MI->findTiedOperandIdx(MONum); 2182 const MachineOperand &OtherMO = MI->getOperand(OtherIdx); 2183 if (!OtherMO.isReg()) 2184 report("Must be tied to a register", MO, MONum); 2185 if (!OtherMO.isTied()) 2186 report("Missing tie flags on tied operand", MO, MONum); 2187 if (MI->findTiedOperandIdx(OtherIdx) != MONum) 2188 report("Inconsistent tie links", MO, MONum); 2189 if (MONum < MCID.getNumDefs()) { 2190 if (OtherIdx < MCID.getNumOperands()) { 2191 if (-1 == MCID.getOperandConstraint(OtherIdx, MCOI::TIED_TO)) 2192 report("Explicit def tied to explicit use without tie constraint", 2193 MO, MONum); 2194 } else { 2195 if (!OtherMO.isImplicit()) 2196 report("Explicit def should be tied to implicit use", MO, MONum); 2197 } 2198 } 2199 } 2200 2201 // Verify two-address constraints after the twoaddressinstruction pass. 2202 // Both twoaddressinstruction pass and phi-node-elimination pass call 2203 // MRI->leaveSSA() to set MF as not IsSSA, we should do the verification 2204 // after twoaddressinstruction pass not after phi-node-elimination pass. So 2205 // we shouldn't use the IsSSA as the condition, we should based on 2206 // TiedOpsRewritten property to verify two-address constraints, this 2207 // property will be set in twoaddressinstruction pass. 2208 unsigned DefIdx; 2209 if (MF->getProperties().hasProperty( 2210 MachineFunctionProperties::Property::TiedOpsRewritten) && 2211 MO->isUse() && MI->isRegTiedToDefOperand(MONum, &DefIdx) && 2212 Reg != MI->getOperand(DefIdx).getReg()) 2213 report("Two-address instruction operands must be identical", MO, MONum); 2214 2215 // Check register classes. 2216 unsigned SubIdx = MO->getSubReg(); 2217 2218 if (Reg.isPhysical()) { 2219 if (SubIdx) { 2220 report("Illegal subregister index for physical register", MO, MONum); 2221 return; 2222 } 2223 if (MONum < MCID.getNumOperands()) { 2224 if (const TargetRegisterClass *DRC = 2225 TII->getRegClass(MCID, MONum, TRI, *MF)) { 2226 if (!DRC->contains(Reg)) { 2227 report("Illegal physical register for instruction", MO, MONum); 2228 errs() << printReg(Reg, TRI) << " is not a " 2229 << TRI->getRegClassName(DRC) << " register.\n"; 2230 } 2231 } 2232 } 2233 if (MO->isRenamable()) { 2234 if (MRI->isReserved(Reg)) { 2235 report("isRenamable set on reserved register", MO, MONum); 2236 return; 2237 } 2238 } 2239 } else { 2240 // Virtual register. 2241 const TargetRegisterClass *RC = MRI->getRegClassOrNull(Reg); 2242 if (!RC) { 2243 // This is a generic virtual register. 2244 2245 // Do not allow undef uses for generic virtual registers. This ensures 2246 // getVRegDef can never fail and return null on a generic register. 2247 // 2248 // FIXME: This restriction should probably be broadened to all SSA 2249 // MIR. However, DetectDeadLanes/ProcessImplicitDefs technically still 2250 // run on the SSA function just before phi elimination. 2251 if (MO->isUndef()) 2252 report("Generic virtual register use cannot be undef", MO, MONum); 2253 2254 // Debug value instruction is permitted to use undefined vregs. 2255 // This is a performance measure to skip the overhead of immediately 2256 // pruning unused debug operands. The final undef substitution occurs 2257 // when debug values are allocated in LDVImpl::handleDebugValue, so 2258 // these verifications always apply after this pass. 2259 if (isFunctionTracksDebugUserValues || !MO->isUse() || 2260 !MI->isDebugValue() || !MRI->def_empty(Reg)) { 2261 // If we're post-Select, we can't have gvregs anymore. 2262 if (isFunctionSelected) { 2263 report("Generic virtual register invalid in a Selected function", 2264 MO, MONum); 2265 return; 2266 } 2267 2268 // The gvreg must have a type and it must not have a SubIdx. 2269 LLT Ty = MRI->getType(Reg); 2270 if (!Ty.isValid()) { 2271 report("Generic virtual register must have a valid type", MO, 2272 MONum); 2273 return; 2274 } 2275 2276 const RegisterBank *RegBank = MRI->getRegBankOrNull(Reg); 2277 const RegisterBankInfo *RBI = MF->getSubtarget().getRegBankInfo(); 2278 2279 // If we're post-RegBankSelect, the gvreg must have a bank. 2280 if (!RegBank && isFunctionRegBankSelected) { 2281 report("Generic virtual register must have a bank in a " 2282 "RegBankSelected function", 2283 MO, MONum); 2284 return; 2285 } 2286 2287 // Make sure the register fits into its register bank if any. 2288 if (RegBank && Ty.isValid() && !Ty.isScalableVector() && 2289 RBI->getMaximumSize(RegBank->getID()) < Ty.getSizeInBits()) { 2290 report("Register bank is too small for virtual register", MO, 2291 MONum); 2292 errs() << "Register bank " << RegBank->getName() << " too small(" 2293 << RBI->getMaximumSize(RegBank->getID()) << ") to fit " 2294 << Ty.getSizeInBits() << "-bits\n"; 2295 return; 2296 } 2297 } 2298 2299 if (SubIdx) { 2300 report("Generic virtual register does not allow subregister index", MO, 2301 MONum); 2302 return; 2303 } 2304 2305 // If this is a target specific instruction and this operand 2306 // has register class constraint, the virtual register must 2307 // comply to it. 2308 if (!isPreISelGenericOpcode(MCID.getOpcode()) && 2309 MONum < MCID.getNumOperands() && 2310 TII->getRegClass(MCID, MONum, TRI, *MF)) { 2311 report("Virtual register does not match instruction constraint", MO, 2312 MONum); 2313 errs() << "Expect register class " 2314 << TRI->getRegClassName( 2315 TII->getRegClass(MCID, MONum, TRI, *MF)) 2316 << " but got nothing\n"; 2317 return; 2318 } 2319 2320 break; 2321 } 2322 if (SubIdx) { 2323 const TargetRegisterClass *SRC = 2324 TRI->getSubClassWithSubReg(RC, SubIdx); 2325 if (!SRC) { 2326 report("Invalid subregister index for virtual register", MO, MONum); 2327 errs() << "Register class " << TRI->getRegClassName(RC) 2328 << " does not support subreg index " << SubIdx << "\n"; 2329 return; 2330 } 2331 if (RC != SRC) { 2332 report("Invalid register class for subregister index", MO, MONum); 2333 errs() << "Register class " << TRI->getRegClassName(RC) 2334 << " does not fully support subreg index " << SubIdx << "\n"; 2335 return; 2336 } 2337 } 2338 if (MONum < MCID.getNumOperands()) { 2339 if (const TargetRegisterClass *DRC = 2340 TII->getRegClass(MCID, MONum, TRI, *MF)) { 2341 if (SubIdx) { 2342 const TargetRegisterClass *SuperRC = 2343 TRI->getLargestLegalSuperClass(RC, *MF); 2344 if (!SuperRC) { 2345 report("No largest legal super class exists.", MO, MONum); 2346 return; 2347 } 2348 DRC = TRI->getMatchingSuperRegClass(SuperRC, DRC, SubIdx); 2349 if (!DRC) { 2350 report("No matching super-reg register class.", MO, MONum); 2351 return; 2352 } 2353 } 2354 if (!RC->hasSuperClassEq(DRC)) { 2355 report("Illegal virtual register for instruction", MO, MONum); 2356 errs() << "Expected a " << TRI->getRegClassName(DRC) 2357 << " register, but got a " << TRI->getRegClassName(RC) 2358 << " register\n"; 2359 } 2360 } 2361 } 2362 } 2363 break; 2364 } 2365 2366 case MachineOperand::MO_RegisterMask: 2367 regMasks.push_back(MO->getRegMask()); 2368 break; 2369 2370 case MachineOperand::MO_MachineBasicBlock: 2371 if (MI->isPHI() && !MO->getMBB()->isSuccessor(MI->getParent())) 2372 report("PHI operand is not in the CFG", MO, MONum); 2373 break; 2374 2375 case MachineOperand::MO_FrameIndex: 2376 if (LiveStks && LiveStks->hasInterval(MO->getIndex()) && 2377 LiveInts && !LiveInts->isNotInMIMap(*MI)) { 2378 int FI = MO->getIndex(); 2379 LiveInterval &LI = LiveStks->getInterval(FI); 2380 SlotIndex Idx = LiveInts->getInstructionIndex(*MI); 2381 2382 bool stores = MI->mayStore(); 2383 bool loads = MI->mayLoad(); 2384 // For a memory-to-memory move, we need to check if the frame 2385 // index is used for storing or loading, by inspecting the 2386 // memory operands. 2387 if (stores && loads) { 2388 for (auto *MMO : MI->memoperands()) { 2389 const PseudoSourceValue *PSV = MMO->getPseudoValue(); 2390 if (PSV == nullptr) continue; 2391 const FixedStackPseudoSourceValue *Value = 2392 dyn_cast<FixedStackPseudoSourceValue>(PSV); 2393 if (Value == nullptr) continue; 2394 if (Value->getFrameIndex() != FI) continue; 2395 2396 if (MMO->isStore()) 2397 loads = false; 2398 else 2399 stores = false; 2400 break; 2401 } 2402 if (loads == stores) 2403 report("Missing fixed stack memoperand.", MI); 2404 } 2405 if (loads && !LI.liveAt(Idx.getRegSlot(true))) { 2406 report("Instruction loads from dead spill slot", MO, MONum); 2407 errs() << "Live stack: " << LI << '\n'; 2408 } 2409 if (stores && !LI.liveAt(Idx.getRegSlot())) { 2410 report("Instruction stores to dead spill slot", MO, MONum); 2411 errs() << "Live stack: " << LI << '\n'; 2412 } 2413 } 2414 break; 2415 2416 case MachineOperand::MO_CFIIndex: 2417 if (MO->getCFIIndex() >= MF->getFrameInstructions().size()) 2418 report("CFI instruction has invalid index", MO, MONum); 2419 break; 2420 2421 default: 2422 break; 2423 } 2424 } 2425 2426 void MachineVerifier::checkLivenessAtUse(const MachineOperand *MO, 2427 unsigned MONum, SlotIndex UseIdx, 2428 const LiveRange &LR, 2429 Register VRegOrUnit, 2430 LaneBitmask LaneMask) { 2431 const MachineInstr *MI = MO->getParent(); 2432 LiveQueryResult LRQ = LR.Query(UseIdx); 2433 bool HasValue = LRQ.valueIn() || (MI->isPHI() && LRQ.valueOut()); 2434 // Check if we have a segment at the use, note however that we only need one 2435 // live subregister range, the others may be dead. 2436 if (!HasValue && LaneMask.none()) { 2437 report("No live segment at use", MO, MONum); 2438 report_context_liverange(LR); 2439 report_context_vreg_regunit(VRegOrUnit); 2440 report_context(UseIdx); 2441 } 2442 if (MO->isKill() && !LRQ.isKill()) { 2443 report("Live range continues after kill flag", MO, MONum); 2444 report_context_liverange(LR); 2445 report_context_vreg_regunit(VRegOrUnit); 2446 if (LaneMask.any()) 2447 report_context_lanemask(LaneMask); 2448 report_context(UseIdx); 2449 } 2450 } 2451 2452 void MachineVerifier::checkLivenessAtDef(const MachineOperand *MO, 2453 unsigned MONum, SlotIndex DefIdx, 2454 const LiveRange &LR, 2455 Register VRegOrUnit, 2456 bool SubRangeCheck, 2457 LaneBitmask LaneMask) { 2458 if (const VNInfo *VNI = LR.getVNInfoAt(DefIdx)) { 2459 // The LR can correspond to the whole reg and its def slot is not obliged 2460 // to be the same as the MO' def slot. E.g. when we check here "normal" 2461 // subreg MO but there is other EC subreg MO in the same instruction so the 2462 // whole reg has EC def slot and differs from the currently checked MO' def 2463 // slot. For example: 2464 // %0 [16e,32r:0) 0@16e L..3 [16e,32r:0) 0@16e L..C [16r,32r:0) 0@16r 2465 // Check that there is an early-clobber def of the same superregister 2466 // somewhere is performed in visitMachineFunctionAfter() 2467 if (((SubRangeCheck || MO->getSubReg() == 0) && VNI->def != DefIdx) || 2468 !SlotIndex::isSameInstr(VNI->def, DefIdx) || 2469 (VNI->def != DefIdx && 2470 (!VNI->def.isEarlyClobber() || !DefIdx.isRegister()))) { 2471 report("Inconsistent valno->def", MO, MONum); 2472 report_context_liverange(LR); 2473 report_context_vreg_regunit(VRegOrUnit); 2474 if (LaneMask.any()) 2475 report_context_lanemask(LaneMask); 2476 report_context(*VNI); 2477 report_context(DefIdx); 2478 } 2479 } else { 2480 report("No live segment at def", MO, MONum); 2481 report_context_liverange(LR); 2482 report_context_vreg_regunit(VRegOrUnit); 2483 if (LaneMask.any()) 2484 report_context_lanemask(LaneMask); 2485 report_context(DefIdx); 2486 } 2487 // Check that, if the dead def flag is present, LiveInts agree. 2488 if (MO->isDead()) { 2489 LiveQueryResult LRQ = LR.Query(DefIdx); 2490 if (!LRQ.isDeadDef()) { 2491 assert(VRegOrUnit.isVirtual() && "Expecting a virtual register."); 2492 // A dead subreg def only tells us that the specific subreg is dead. There 2493 // could be other non-dead defs of other subregs, or we could have other 2494 // parts of the register being live through the instruction. So unless we 2495 // are checking liveness for a subrange it is ok for the live range to 2496 // continue, given that we have a dead def of a subregister. 2497 if (SubRangeCheck || MO->getSubReg() == 0) { 2498 report("Live range continues after dead def flag", MO, MONum); 2499 report_context_liverange(LR); 2500 report_context_vreg_regunit(VRegOrUnit); 2501 if (LaneMask.any()) 2502 report_context_lanemask(LaneMask); 2503 } 2504 } 2505 } 2506 } 2507 2508 void MachineVerifier::checkLiveness(const MachineOperand *MO, unsigned MONum) { 2509 const MachineInstr *MI = MO->getParent(); 2510 const Register Reg = MO->getReg(); 2511 const unsigned SubRegIdx = MO->getSubReg(); 2512 2513 const LiveInterval *LI = nullptr; 2514 if (LiveInts && Reg.isVirtual()) { 2515 if (LiveInts->hasInterval(Reg)) { 2516 LI = &LiveInts->getInterval(Reg); 2517 if (SubRegIdx != 0 && (MO->isDef() || !MO->isUndef()) && !LI->empty() && 2518 !LI->hasSubRanges() && MRI->shouldTrackSubRegLiveness(Reg)) 2519 report("Live interval for subreg operand has no subranges", MO, MONum); 2520 } else { 2521 report("Virtual register has no live interval", MO, MONum); 2522 } 2523 } 2524 2525 // Both use and def operands can read a register. 2526 if (MO->readsReg()) { 2527 if (MO->isKill()) 2528 addRegWithSubRegs(regsKilled, Reg); 2529 2530 // Check that LiveVars knows this kill (unless we are inside a bundle, in 2531 // which case we have already checked that LiveVars knows any kills on the 2532 // bundle header instead). 2533 if (LiveVars && Reg.isVirtual() && MO->isKill() && 2534 !MI->isBundledWithPred()) { 2535 LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg); 2536 if (!is_contained(VI.Kills, MI)) 2537 report("Kill missing from LiveVariables", MO, MONum); 2538 } 2539 2540 // Check LiveInts liveness and kill. 2541 if (LiveInts && !LiveInts->isNotInMIMap(*MI)) { 2542 SlotIndex UseIdx; 2543 if (MI->isPHI()) { 2544 // PHI use occurs on the edge, so check for live out here instead. 2545 UseIdx = LiveInts->getMBBEndIdx( 2546 MI->getOperand(MONum + 1).getMBB()).getPrevSlot(); 2547 } else { 2548 UseIdx = LiveInts->getInstructionIndex(*MI); 2549 } 2550 // Check the cached regunit intervals. 2551 if (Reg.isPhysical() && !isReserved(Reg)) { 2552 for (MCRegUnit Unit : TRI->regunits(Reg.asMCReg())) { 2553 if (MRI->isReservedRegUnit(Unit)) 2554 continue; 2555 if (const LiveRange *LR = LiveInts->getCachedRegUnit(Unit)) 2556 checkLivenessAtUse(MO, MONum, UseIdx, *LR, Unit); 2557 } 2558 } 2559 2560 if (Reg.isVirtual()) { 2561 // This is a virtual register interval. 2562 checkLivenessAtUse(MO, MONum, UseIdx, *LI, Reg); 2563 2564 if (LI->hasSubRanges() && !MO->isDef()) { 2565 LaneBitmask MOMask = SubRegIdx != 0 2566 ? TRI->getSubRegIndexLaneMask(SubRegIdx) 2567 : MRI->getMaxLaneMaskForVReg(Reg); 2568 LaneBitmask LiveInMask; 2569 for (const LiveInterval::SubRange &SR : LI->subranges()) { 2570 if ((MOMask & SR.LaneMask).none()) 2571 continue; 2572 checkLivenessAtUse(MO, MONum, UseIdx, SR, Reg, SR.LaneMask); 2573 LiveQueryResult LRQ = SR.Query(UseIdx); 2574 if (LRQ.valueIn() || (MI->isPHI() && LRQ.valueOut())) 2575 LiveInMask |= SR.LaneMask; 2576 } 2577 // At least parts of the register has to be live at the use. 2578 if ((LiveInMask & MOMask).none()) { 2579 report("No live subrange at use", MO, MONum); 2580 report_context(*LI); 2581 report_context(UseIdx); 2582 } 2583 // For PHIs all lanes should be live 2584 if (MI->isPHI() && LiveInMask != MOMask) { 2585 report("Not all lanes of PHI source live at use", MO, MONum); 2586 report_context(*LI); 2587 report_context(UseIdx); 2588 } 2589 } 2590 } 2591 } 2592 2593 // Use of a dead register. 2594 if (!regsLive.count(Reg)) { 2595 if (Reg.isPhysical()) { 2596 // Reserved registers may be used even when 'dead'. 2597 bool Bad = !isReserved(Reg); 2598 // We are fine if just any subregister has a defined value. 2599 if (Bad) { 2600 2601 for (const MCPhysReg &SubReg : TRI->subregs(Reg)) { 2602 if (regsLive.count(SubReg)) { 2603 Bad = false; 2604 break; 2605 } 2606 } 2607 } 2608 // If there is an additional implicit-use of a super register we stop 2609 // here. By definition we are fine if the super register is not 2610 // (completely) dead, if the complete super register is dead we will 2611 // get a report for its operand. 2612 if (Bad) { 2613 for (const MachineOperand &MOP : MI->uses()) { 2614 if (!MOP.isReg() || !MOP.isImplicit()) 2615 continue; 2616 2617 if (!MOP.getReg().isPhysical()) 2618 continue; 2619 2620 if (llvm::is_contained(TRI->subregs(MOP.getReg()), Reg)) 2621 Bad = false; 2622 } 2623 } 2624 if (Bad) 2625 report("Using an undefined physical register", MO, MONum); 2626 } else if (MRI->def_empty(Reg)) { 2627 report("Reading virtual register without a def", MO, MONum); 2628 } else { 2629 BBInfo &MInfo = MBBInfoMap[MI->getParent()]; 2630 // We don't know which virtual registers are live in, so only complain 2631 // if vreg was killed in this MBB. Otherwise keep track of vregs that 2632 // must be live in. PHI instructions are handled separately. 2633 if (MInfo.regsKilled.count(Reg)) 2634 report("Using a killed virtual register", MO, MONum); 2635 else if (!MI->isPHI()) 2636 MInfo.vregsLiveIn.insert(std::make_pair(Reg, MI)); 2637 } 2638 } 2639 } 2640 2641 if (MO->isDef()) { 2642 // Register defined. 2643 // TODO: verify that earlyclobber ops are not used. 2644 if (MO->isDead()) 2645 addRegWithSubRegs(regsDead, Reg); 2646 else 2647 addRegWithSubRegs(regsDefined, Reg); 2648 2649 // Verify SSA form. 2650 if (MRI->isSSA() && Reg.isVirtual() && 2651 std::next(MRI->def_begin(Reg)) != MRI->def_end()) 2652 report("Multiple virtual register defs in SSA form", MO, MONum); 2653 2654 // Check LiveInts for a live segment, but only for virtual registers. 2655 if (LiveInts && !LiveInts->isNotInMIMap(*MI)) { 2656 SlotIndex DefIdx = LiveInts->getInstructionIndex(*MI); 2657 DefIdx = DefIdx.getRegSlot(MO->isEarlyClobber()); 2658 2659 if (Reg.isVirtual()) { 2660 checkLivenessAtDef(MO, MONum, DefIdx, *LI, Reg); 2661 2662 if (LI->hasSubRanges()) { 2663 LaneBitmask MOMask = SubRegIdx != 0 2664 ? TRI->getSubRegIndexLaneMask(SubRegIdx) 2665 : MRI->getMaxLaneMaskForVReg(Reg); 2666 for (const LiveInterval::SubRange &SR : LI->subranges()) { 2667 if ((SR.LaneMask & MOMask).none()) 2668 continue; 2669 checkLivenessAtDef(MO, MONum, DefIdx, SR, Reg, true, SR.LaneMask); 2670 } 2671 } 2672 } 2673 } 2674 } 2675 } 2676 2677 // This function gets called after visiting all instructions in a bundle. The 2678 // argument points to the bundle header. 2679 // Normal stand-alone instructions are also considered 'bundles', and this 2680 // function is called for all of them. 2681 void MachineVerifier::visitMachineBundleAfter(const MachineInstr *MI) { 2682 BBInfo &MInfo = MBBInfoMap[MI->getParent()]; 2683 set_union(MInfo.regsKilled, regsKilled); 2684 set_subtract(regsLive, regsKilled); regsKilled.clear(); 2685 // Kill any masked registers. 2686 while (!regMasks.empty()) { 2687 const uint32_t *Mask = regMasks.pop_back_val(); 2688 for (Register Reg : regsLive) 2689 if (Reg.isPhysical() && 2690 MachineOperand::clobbersPhysReg(Mask, Reg.asMCReg())) 2691 regsDead.push_back(Reg); 2692 } 2693 set_subtract(regsLive, regsDead); regsDead.clear(); 2694 set_union(regsLive, regsDefined); regsDefined.clear(); 2695 } 2696 2697 void 2698 MachineVerifier::visitMachineBasicBlockAfter(const MachineBasicBlock *MBB) { 2699 MBBInfoMap[MBB].regsLiveOut = regsLive; 2700 regsLive.clear(); 2701 2702 if (Indexes) { 2703 SlotIndex stop = Indexes->getMBBEndIdx(MBB); 2704 if (!(stop > lastIndex)) { 2705 report("Block ends before last instruction index", MBB); 2706 errs() << "Block ends at " << stop 2707 << " last instruction was at " << lastIndex << '\n'; 2708 } 2709 lastIndex = stop; 2710 } 2711 } 2712 2713 namespace { 2714 // This implements a set of registers that serves as a filter: can filter other 2715 // sets by passing through elements not in the filter and blocking those that 2716 // are. Any filter implicitly includes the full set of physical registers upon 2717 // creation, thus filtering them all out. The filter itself as a set only grows, 2718 // and needs to be as efficient as possible. 2719 struct VRegFilter { 2720 // Add elements to the filter itself. \pre Input set \p FromRegSet must have 2721 // no duplicates. Both virtual and physical registers are fine. 2722 template <typename RegSetT> void add(const RegSetT &FromRegSet) { 2723 SmallVector<Register, 0> VRegsBuffer; 2724 filterAndAdd(FromRegSet, VRegsBuffer); 2725 } 2726 // Filter \p FromRegSet through the filter and append passed elements into \p 2727 // ToVRegs. All elements appended are then added to the filter itself. 2728 // \returns true if anything changed. 2729 template <typename RegSetT> 2730 bool filterAndAdd(const RegSetT &FromRegSet, 2731 SmallVectorImpl<Register> &ToVRegs) { 2732 unsigned SparseUniverse = Sparse.size(); 2733 unsigned NewSparseUniverse = SparseUniverse; 2734 unsigned NewDenseSize = Dense.size(); 2735 size_t Begin = ToVRegs.size(); 2736 for (Register Reg : FromRegSet) { 2737 if (!Reg.isVirtual()) 2738 continue; 2739 unsigned Index = Register::virtReg2Index(Reg); 2740 if (Index < SparseUniverseMax) { 2741 if (Index < SparseUniverse && Sparse.test(Index)) 2742 continue; 2743 NewSparseUniverse = std::max(NewSparseUniverse, Index + 1); 2744 } else { 2745 if (Dense.count(Reg)) 2746 continue; 2747 ++NewDenseSize; 2748 } 2749 ToVRegs.push_back(Reg); 2750 } 2751 size_t End = ToVRegs.size(); 2752 if (Begin == End) 2753 return false; 2754 // Reserving space in sets once performs better than doing so continuously 2755 // and pays easily for double look-ups (even in Dense with SparseUniverseMax 2756 // tuned all the way down) and double iteration (the second one is over a 2757 // SmallVector, which is a lot cheaper compared to DenseSet or BitVector). 2758 Sparse.resize(NewSparseUniverse); 2759 Dense.reserve(NewDenseSize); 2760 for (unsigned I = Begin; I < End; ++I) { 2761 Register Reg = ToVRegs[I]; 2762 unsigned Index = Register::virtReg2Index(Reg); 2763 if (Index < SparseUniverseMax) 2764 Sparse.set(Index); 2765 else 2766 Dense.insert(Reg); 2767 } 2768 return true; 2769 } 2770 2771 private: 2772 static constexpr unsigned SparseUniverseMax = 10 * 1024 * 8; 2773 // VRegs indexed within SparseUniverseMax are tracked by Sparse, those beyound 2774 // are tracked by Dense. The only purpose of the threashold and the Dense set 2775 // is to have a reasonably growing memory usage in pathological cases (large 2776 // number of very sparse VRegFilter instances live at the same time). In 2777 // practice even in the worst-by-execution time cases having all elements 2778 // tracked by Sparse (very large SparseUniverseMax scenario) tends to be more 2779 // space efficient than if tracked by Dense. The threashold is set to keep the 2780 // worst-case memory usage within 2x of figures determined empirically for 2781 // "all Dense" scenario in such worst-by-execution-time cases. 2782 BitVector Sparse; 2783 DenseSet<unsigned> Dense; 2784 }; 2785 2786 // Implements both a transfer function and a (binary, in-place) join operator 2787 // for a dataflow over register sets with set union join and filtering transfer 2788 // (out_b = in_b \ filter_b). filter_b is expected to be set-up ahead of time. 2789 // Maintains out_b as its state, allowing for O(n) iteration over it at any 2790 // time, where n is the size of the set (as opposed to O(U) where U is the 2791 // universe). filter_b implicitly contains all physical registers at all times. 2792 class FilteringVRegSet { 2793 VRegFilter Filter; 2794 SmallVector<Register, 0> VRegs; 2795 2796 public: 2797 // Set-up the filter_b. \pre Input register set \p RS must have no duplicates. 2798 // Both virtual and physical registers are fine. 2799 template <typename RegSetT> void addToFilter(const RegSetT &RS) { 2800 Filter.add(RS); 2801 } 2802 // Passes \p RS through the filter_b (transfer function) and adds what's left 2803 // to itself (out_b). 2804 template <typename RegSetT> bool add(const RegSetT &RS) { 2805 // Double-duty the Filter: to maintain VRegs a set (and the join operation 2806 // a set union) just add everything being added here to the Filter as well. 2807 return Filter.filterAndAdd(RS, VRegs); 2808 } 2809 using const_iterator = decltype(VRegs)::const_iterator; 2810 const_iterator begin() const { return VRegs.begin(); } 2811 const_iterator end() const { return VRegs.end(); } 2812 size_t size() const { return VRegs.size(); } 2813 }; 2814 } // namespace 2815 2816 // Calculate the largest possible vregsPassed sets. These are the registers that 2817 // can pass through an MBB live, but may not be live every time. It is assumed 2818 // that all vregsPassed sets are empty before the call. 2819 void MachineVerifier::calcRegsPassed() { 2820 if (MF->empty()) 2821 // ReversePostOrderTraversal doesn't handle empty functions. 2822 return; 2823 2824 for (const MachineBasicBlock *MB : 2825 ReversePostOrderTraversal<const MachineFunction *>(MF)) { 2826 FilteringVRegSet VRegs; 2827 BBInfo &Info = MBBInfoMap[MB]; 2828 assert(Info.reachable); 2829 2830 VRegs.addToFilter(Info.regsKilled); 2831 VRegs.addToFilter(Info.regsLiveOut); 2832 for (const MachineBasicBlock *Pred : MB->predecessors()) { 2833 const BBInfo &PredInfo = MBBInfoMap[Pred]; 2834 if (!PredInfo.reachable) 2835 continue; 2836 2837 VRegs.add(PredInfo.regsLiveOut); 2838 VRegs.add(PredInfo.vregsPassed); 2839 } 2840 Info.vregsPassed.reserve(VRegs.size()); 2841 Info.vregsPassed.insert(VRegs.begin(), VRegs.end()); 2842 } 2843 } 2844 2845 // Calculate the set of virtual registers that must be passed through each basic 2846 // block in order to satisfy the requirements of successor blocks. This is very 2847 // similar to calcRegsPassed, only backwards. 2848 void MachineVerifier::calcRegsRequired() { 2849 // First push live-in regs to predecessors' vregsRequired. 2850 SmallPtrSet<const MachineBasicBlock*, 8> todo; 2851 for (const auto &MBB : *MF) { 2852 BBInfo &MInfo = MBBInfoMap[&MBB]; 2853 for (const MachineBasicBlock *Pred : MBB.predecessors()) { 2854 BBInfo &PInfo = MBBInfoMap[Pred]; 2855 if (PInfo.addRequired(MInfo.vregsLiveIn)) 2856 todo.insert(Pred); 2857 } 2858 2859 // Handle the PHI node. 2860 for (const MachineInstr &MI : MBB.phis()) { 2861 for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) { 2862 // Skip those Operands which are undef regs or not regs. 2863 if (!MI.getOperand(i).isReg() || !MI.getOperand(i).readsReg()) 2864 continue; 2865 2866 // Get register and predecessor for one PHI edge. 2867 Register Reg = MI.getOperand(i).getReg(); 2868 const MachineBasicBlock *Pred = MI.getOperand(i + 1).getMBB(); 2869 2870 BBInfo &PInfo = MBBInfoMap[Pred]; 2871 if (PInfo.addRequired(Reg)) 2872 todo.insert(Pred); 2873 } 2874 } 2875 } 2876 2877 // Iteratively push vregsRequired to predecessors. This will converge to the 2878 // same final state regardless of DenseSet iteration order. 2879 while (!todo.empty()) { 2880 const MachineBasicBlock *MBB = *todo.begin(); 2881 todo.erase(MBB); 2882 BBInfo &MInfo = MBBInfoMap[MBB]; 2883 for (const MachineBasicBlock *Pred : MBB->predecessors()) { 2884 if (Pred == MBB) 2885 continue; 2886 BBInfo &SInfo = MBBInfoMap[Pred]; 2887 if (SInfo.addRequired(MInfo.vregsRequired)) 2888 todo.insert(Pred); 2889 } 2890 } 2891 } 2892 2893 // Check PHI instructions at the beginning of MBB. It is assumed that 2894 // calcRegsPassed has been run so BBInfo::isLiveOut is valid. 2895 void MachineVerifier::checkPHIOps(const MachineBasicBlock &MBB) { 2896 BBInfo &MInfo = MBBInfoMap[&MBB]; 2897 2898 SmallPtrSet<const MachineBasicBlock*, 8> seen; 2899 for (const MachineInstr &Phi : MBB) { 2900 if (!Phi.isPHI()) 2901 break; 2902 seen.clear(); 2903 2904 const MachineOperand &MODef = Phi.getOperand(0); 2905 if (!MODef.isReg() || !MODef.isDef()) { 2906 report("Expected first PHI operand to be a register def", &MODef, 0); 2907 continue; 2908 } 2909 if (MODef.isTied() || MODef.isImplicit() || MODef.isInternalRead() || 2910 MODef.isEarlyClobber() || MODef.isDebug()) 2911 report("Unexpected flag on PHI operand", &MODef, 0); 2912 Register DefReg = MODef.getReg(); 2913 if (!DefReg.isVirtual()) 2914 report("Expected first PHI operand to be a virtual register", &MODef, 0); 2915 2916 for (unsigned I = 1, E = Phi.getNumOperands(); I != E; I += 2) { 2917 const MachineOperand &MO0 = Phi.getOperand(I); 2918 if (!MO0.isReg()) { 2919 report("Expected PHI operand to be a register", &MO0, I); 2920 continue; 2921 } 2922 if (MO0.isImplicit() || MO0.isInternalRead() || MO0.isEarlyClobber() || 2923 MO0.isDebug() || MO0.isTied()) 2924 report("Unexpected flag on PHI operand", &MO0, I); 2925 2926 const MachineOperand &MO1 = Phi.getOperand(I + 1); 2927 if (!MO1.isMBB()) { 2928 report("Expected PHI operand to be a basic block", &MO1, I + 1); 2929 continue; 2930 } 2931 2932 const MachineBasicBlock &Pre = *MO1.getMBB(); 2933 if (!Pre.isSuccessor(&MBB)) { 2934 report("PHI input is not a predecessor block", &MO1, I + 1); 2935 continue; 2936 } 2937 2938 if (MInfo.reachable) { 2939 seen.insert(&Pre); 2940 BBInfo &PrInfo = MBBInfoMap[&Pre]; 2941 if (!MO0.isUndef() && PrInfo.reachable && 2942 !PrInfo.isLiveOut(MO0.getReg())) 2943 report("PHI operand is not live-out from predecessor", &MO0, I); 2944 } 2945 } 2946 2947 // Did we see all predecessors? 2948 if (MInfo.reachable) { 2949 for (MachineBasicBlock *Pred : MBB.predecessors()) { 2950 if (!seen.count(Pred)) { 2951 report("Missing PHI operand", &Phi); 2952 errs() << printMBBReference(*Pred) 2953 << " is a predecessor according to the CFG.\n"; 2954 } 2955 } 2956 } 2957 } 2958 } 2959 2960 void MachineVerifier::visitMachineFunctionAfter() { 2961 calcRegsPassed(); 2962 2963 for (const MachineBasicBlock &MBB : *MF) 2964 checkPHIOps(MBB); 2965 2966 // Now check liveness info if available 2967 calcRegsRequired(); 2968 2969 // Check for killed virtual registers that should be live out. 2970 for (const auto &MBB : *MF) { 2971 BBInfo &MInfo = MBBInfoMap[&MBB]; 2972 for (Register VReg : MInfo.vregsRequired) 2973 if (MInfo.regsKilled.count(VReg)) { 2974 report("Virtual register killed in block, but needed live out.", &MBB); 2975 errs() << "Virtual register " << printReg(VReg) 2976 << " is used after the block.\n"; 2977 } 2978 } 2979 2980 if (!MF->empty()) { 2981 BBInfo &MInfo = MBBInfoMap[&MF->front()]; 2982 for (Register VReg : MInfo.vregsRequired) { 2983 report("Virtual register defs don't dominate all uses.", MF); 2984 report_context_vreg(VReg); 2985 } 2986 } 2987 2988 if (LiveVars) 2989 verifyLiveVariables(); 2990 if (LiveInts) 2991 verifyLiveIntervals(); 2992 2993 // Check live-in list of each MBB. If a register is live into MBB, check 2994 // that the register is in regsLiveOut of each predecessor block. Since 2995 // this must come from a definition in the predecesssor or its live-in 2996 // list, this will catch a live-through case where the predecessor does not 2997 // have the register in its live-in list. This currently only checks 2998 // registers that have no aliases, are not allocatable and are not 2999 // reserved, which could mean a condition code register for instance. 3000 if (MRI->tracksLiveness()) 3001 for (const auto &MBB : *MF) 3002 for (MachineBasicBlock::RegisterMaskPair P : MBB.liveins()) { 3003 MCPhysReg LiveInReg = P.PhysReg; 3004 bool hasAliases = MCRegAliasIterator(LiveInReg, TRI, false).isValid(); 3005 if (hasAliases || isAllocatable(LiveInReg) || isReserved(LiveInReg)) 3006 continue; 3007 for (const MachineBasicBlock *Pred : MBB.predecessors()) { 3008 BBInfo &PInfo = MBBInfoMap[Pred]; 3009 if (!PInfo.regsLiveOut.count(LiveInReg)) { 3010 report("Live in register not found to be live out from predecessor.", 3011 &MBB); 3012 errs() << TRI->getName(LiveInReg) 3013 << " not found to be live out from " 3014 << printMBBReference(*Pred) << "\n"; 3015 } 3016 } 3017 } 3018 3019 for (auto CSInfo : MF->getCallSitesInfo()) 3020 if (!CSInfo.first->isCall()) 3021 report("Call site info referencing instruction that is not call", MF); 3022 3023 // If there's debug-info, check that we don't have any duplicate value 3024 // tracking numbers. 3025 if (MF->getFunction().getSubprogram()) { 3026 DenseSet<unsigned> SeenNumbers; 3027 for (const auto &MBB : *MF) { 3028 for (const auto &MI : MBB) { 3029 if (auto Num = MI.peekDebugInstrNum()) { 3030 auto Result = SeenNumbers.insert((unsigned)Num); 3031 if (!Result.second) 3032 report("Instruction has a duplicated value tracking number", &MI); 3033 } 3034 } 3035 } 3036 } 3037 } 3038 3039 void MachineVerifier::verifyLiveVariables() { 3040 assert(LiveVars && "Don't call verifyLiveVariables without LiveVars"); 3041 for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) { 3042 Register Reg = Register::index2VirtReg(I); 3043 LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg); 3044 for (const auto &MBB : *MF) { 3045 BBInfo &MInfo = MBBInfoMap[&MBB]; 3046 3047 // Our vregsRequired should be identical to LiveVariables' AliveBlocks 3048 if (MInfo.vregsRequired.count(Reg)) { 3049 if (!VI.AliveBlocks.test(MBB.getNumber())) { 3050 report("LiveVariables: Block missing from AliveBlocks", &MBB); 3051 errs() << "Virtual register " << printReg(Reg) 3052 << " must be live through the block.\n"; 3053 } 3054 } else { 3055 if (VI.AliveBlocks.test(MBB.getNumber())) { 3056 report("LiveVariables: Block should not be in AliveBlocks", &MBB); 3057 errs() << "Virtual register " << printReg(Reg) 3058 << " is not needed live through the block.\n"; 3059 } 3060 } 3061 } 3062 } 3063 } 3064 3065 void MachineVerifier::verifyLiveIntervals() { 3066 assert(LiveInts && "Don't call verifyLiveIntervals without LiveInts"); 3067 for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) { 3068 Register Reg = Register::index2VirtReg(I); 3069 3070 // Spilling and splitting may leave unused registers around. Skip them. 3071 if (MRI->reg_nodbg_empty(Reg)) 3072 continue; 3073 3074 if (!LiveInts->hasInterval(Reg)) { 3075 report("Missing live interval for virtual register", MF); 3076 errs() << printReg(Reg, TRI) << " still has defs or uses\n"; 3077 continue; 3078 } 3079 3080 const LiveInterval &LI = LiveInts->getInterval(Reg); 3081 assert(Reg == LI.reg() && "Invalid reg to interval mapping"); 3082 verifyLiveInterval(LI); 3083 } 3084 3085 // Verify all the cached regunit intervals. 3086 for (unsigned i = 0, e = TRI->getNumRegUnits(); i != e; ++i) 3087 if (const LiveRange *LR = LiveInts->getCachedRegUnit(i)) 3088 verifyLiveRange(*LR, i); 3089 } 3090 3091 void MachineVerifier::verifyLiveRangeValue(const LiveRange &LR, 3092 const VNInfo *VNI, Register Reg, 3093 LaneBitmask LaneMask) { 3094 if (VNI->isUnused()) 3095 return; 3096 3097 const VNInfo *DefVNI = LR.getVNInfoAt(VNI->def); 3098 3099 if (!DefVNI) { 3100 report("Value not live at VNInfo def and not marked unused", MF); 3101 report_context(LR, Reg, LaneMask); 3102 report_context(*VNI); 3103 return; 3104 } 3105 3106 if (DefVNI != VNI) { 3107 report("Live segment at def has different VNInfo", MF); 3108 report_context(LR, Reg, LaneMask); 3109 report_context(*VNI); 3110 return; 3111 } 3112 3113 const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(VNI->def); 3114 if (!MBB) { 3115 report("Invalid VNInfo definition index", MF); 3116 report_context(LR, Reg, LaneMask); 3117 report_context(*VNI); 3118 return; 3119 } 3120 3121 if (VNI->isPHIDef()) { 3122 if (VNI->def != LiveInts->getMBBStartIdx(MBB)) { 3123 report("PHIDef VNInfo is not defined at MBB start", MBB); 3124 report_context(LR, Reg, LaneMask); 3125 report_context(*VNI); 3126 } 3127 return; 3128 } 3129 3130 // Non-PHI def. 3131 const MachineInstr *MI = LiveInts->getInstructionFromIndex(VNI->def); 3132 if (!MI) { 3133 report("No instruction at VNInfo def index", MBB); 3134 report_context(LR, Reg, LaneMask); 3135 report_context(*VNI); 3136 return; 3137 } 3138 3139 if (Reg != 0) { 3140 bool hasDef = false; 3141 bool isEarlyClobber = false; 3142 for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) { 3143 if (!MOI->isReg() || !MOI->isDef()) 3144 continue; 3145 if (Reg.isVirtual()) { 3146 if (MOI->getReg() != Reg) 3147 continue; 3148 } else { 3149 if (!MOI->getReg().isPhysical() || !TRI->hasRegUnit(MOI->getReg(), Reg)) 3150 continue; 3151 } 3152 if (LaneMask.any() && 3153 (TRI->getSubRegIndexLaneMask(MOI->getSubReg()) & LaneMask).none()) 3154 continue; 3155 hasDef = true; 3156 if (MOI->isEarlyClobber()) 3157 isEarlyClobber = true; 3158 } 3159 3160 if (!hasDef) { 3161 report("Defining instruction does not modify register", MI); 3162 report_context(LR, Reg, LaneMask); 3163 report_context(*VNI); 3164 } 3165 3166 // Early clobber defs begin at USE slots, but other defs must begin at 3167 // DEF slots. 3168 if (isEarlyClobber) { 3169 if (!VNI->def.isEarlyClobber()) { 3170 report("Early clobber def must be at an early-clobber slot", MBB); 3171 report_context(LR, Reg, LaneMask); 3172 report_context(*VNI); 3173 } 3174 } else if (!VNI->def.isRegister()) { 3175 report("Non-PHI, non-early clobber def must be at a register slot", MBB); 3176 report_context(LR, Reg, LaneMask); 3177 report_context(*VNI); 3178 } 3179 } 3180 } 3181 3182 void MachineVerifier::verifyLiveRangeSegment(const LiveRange &LR, 3183 const LiveRange::const_iterator I, 3184 Register Reg, 3185 LaneBitmask LaneMask) { 3186 const LiveRange::Segment &S = *I; 3187 const VNInfo *VNI = S.valno; 3188 assert(VNI && "Live segment has no valno"); 3189 3190 if (VNI->id >= LR.getNumValNums() || VNI != LR.getValNumInfo(VNI->id)) { 3191 report("Foreign valno in live segment", MF); 3192 report_context(LR, Reg, LaneMask); 3193 report_context(S); 3194 report_context(*VNI); 3195 } 3196 3197 if (VNI->isUnused()) { 3198 report("Live segment valno is marked unused", MF); 3199 report_context(LR, Reg, LaneMask); 3200 report_context(S); 3201 } 3202 3203 const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(S.start); 3204 if (!MBB) { 3205 report("Bad start of live segment, no basic block", MF); 3206 report_context(LR, Reg, LaneMask); 3207 report_context(S); 3208 return; 3209 } 3210 SlotIndex MBBStartIdx = LiveInts->getMBBStartIdx(MBB); 3211 if (S.start != MBBStartIdx && S.start != VNI->def) { 3212 report("Live segment must begin at MBB entry or valno def", MBB); 3213 report_context(LR, Reg, LaneMask); 3214 report_context(S); 3215 } 3216 3217 const MachineBasicBlock *EndMBB = 3218 LiveInts->getMBBFromIndex(S.end.getPrevSlot()); 3219 if (!EndMBB) { 3220 report("Bad end of live segment, no basic block", MF); 3221 report_context(LR, Reg, LaneMask); 3222 report_context(S); 3223 return; 3224 } 3225 3226 // Checks for non-live-out segments. 3227 if (S.end != LiveInts->getMBBEndIdx(EndMBB)) { 3228 // RegUnit intervals are allowed dead phis. 3229 if (!Reg.isVirtual() && VNI->isPHIDef() && S.start == VNI->def && 3230 S.end == VNI->def.getDeadSlot()) 3231 return; 3232 3233 // The live segment is ending inside EndMBB 3234 const MachineInstr *MI = 3235 LiveInts->getInstructionFromIndex(S.end.getPrevSlot()); 3236 if (!MI) { 3237 report("Live segment doesn't end at a valid instruction", EndMBB); 3238 report_context(LR, Reg, LaneMask); 3239 report_context(S); 3240 return; 3241 } 3242 3243 // The block slot must refer to a basic block boundary. 3244 if (S.end.isBlock()) { 3245 report("Live segment ends at B slot of an instruction", EndMBB); 3246 report_context(LR, Reg, LaneMask); 3247 report_context(S); 3248 } 3249 3250 if (S.end.isDead()) { 3251 // Segment ends on the dead slot. 3252 // That means there must be a dead def. 3253 if (!SlotIndex::isSameInstr(S.start, S.end)) { 3254 report("Live segment ending at dead slot spans instructions", EndMBB); 3255 report_context(LR, Reg, LaneMask); 3256 report_context(S); 3257 } 3258 } 3259 3260 // After tied operands are rewritten, a live segment can only end at an 3261 // early-clobber slot if it is being redefined by an early-clobber def. 3262 // TODO: Before tied operands are rewritten, a live segment can only end at 3263 // an early-clobber slot if the last use is tied to an early-clobber def. 3264 if (MF->getProperties().hasProperty( 3265 MachineFunctionProperties::Property::TiedOpsRewritten) && 3266 S.end.isEarlyClobber()) { 3267 if (I + 1 == LR.end() || (I + 1)->start != S.end) { 3268 report("Live segment ending at early clobber slot must be " 3269 "redefined by an EC def in the same instruction", 3270 EndMBB); 3271 report_context(LR, Reg, LaneMask); 3272 report_context(S); 3273 } 3274 } 3275 3276 // The following checks only apply to virtual registers. Physreg liveness 3277 // is too weird to check. 3278 if (Reg.isVirtual()) { 3279 // A live segment can end with either a redefinition, a kill flag on a 3280 // use, or a dead flag on a def. 3281 bool hasRead = false; 3282 bool hasSubRegDef = false; 3283 bool hasDeadDef = false; 3284 for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) { 3285 if (!MOI->isReg() || MOI->getReg() != Reg) 3286 continue; 3287 unsigned Sub = MOI->getSubReg(); 3288 LaneBitmask SLM = 3289 Sub != 0 ? TRI->getSubRegIndexLaneMask(Sub) : LaneBitmask::getAll(); 3290 if (MOI->isDef()) { 3291 if (Sub != 0) { 3292 hasSubRegDef = true; 3293 // An operand %0:sub0 reads %0:sub1..n. Invert the lane 3294 // mask for subregister defs. Read-undef defs will be handled by 3295 // readsReg below. 3296 SLM = ~SLM; 3297 } 3298 if (MOI->isDead()) 3299 hasDeadDef = true; 3300 } 3301 if (LaneMask.any() && (LaneMask & SLM).none()) 3302 continue; 3303 if (MOI->readsReg()) 3304 hasRead = true; 3305 } 3306 if (S.end.isDead()) { 3307 // Make sure that the corresponding machine operand for a "dead" live 3308 // range has the dead flag. We cannot perform this check for subregister 3309 // liveranges as partially dead values are allowed. 3310 if (LaneMask.none() && !hasDeadDef) { 3311 report( 3312 "Instruction ending live segment on dead slot has no dead flag", 3313 MI); 3314 report_context(LR, Reg, LaneMask); 3315 report_context(S); 3316 } 3317 } else { 3318 if (!hasRead) { 3319 // When tracking subregister liveness, the main range must start new 3320 // values on partial register writes, even if there is no read. 3321 if (!MRI->shouldTrackSubRegLiveness(Reg) || LaneMask.any() || 3322 !hasSubRegDef) { 3323 report("Instruction ending live segment doesn't read the register", 3324 MI); 3325 report_context(LR, Reg, LaneMask); 3326 report_context(S); 3327 } 3328 } 3329 } 3330 } 3331 } 3332 3333 // Now check all the basic blocks in this live segment. 3334 MachineFunction::const_iterator MFI = MBB->getIterator(); 3335 // Is this live segment the beginning of a non-PHIDef VN? 3336 if (S.start == VNI->def && !VNI->isPHIDef()) { 3337 // Not live-in to any blocks. 3338 if (MBB == EndMBB) 3339 return; 3340 // Skip this block. 3341 ++MFI; 3342 } 3343 3344 SmallVector<SlotIndex, 4> Undefs; 3345 if (LaneMask.any()) { 3346 LiveInterval &OwnerLI = LiveInts->getInterval(Reg); 3347 OwnerLI.computeSubRangeUndefs(Undefs, LaneMask, *MRI, *Indexes); 3348 } 3349 3350 while (true) { 3351 assert(LiveInts->isLiveInToMBB(LR, &*MFI)); 3352 // We don't know how to track physregs into a landing pad. 3353 if (!Reg.isVirtual() && MFI->isEHPad()) { 3354 if (&*MFI == EndMBB) 3355 break; 3356 ++MFI; 3357 continue; 3358 } 3359 3360 // Is VNI a PHI-def in the current block? 3361 bool IsPHI = VNI->isPHIDef() && 3362 VNI->def == LiveInts->getMBBStartIdx(&*MFI); 3363 3364 // Check that VNI is live-out of all predecessors. 3365 for (const MachineBasicBlock *Pred : MFI->predecessors()) { 3366 SlotIndex PEnd = LiveInts->getMBBEndIdx(Pred); 3367 // Predecessor of landing pad live-out on last call. 3368 if (MFI->isEHPad()) { 3369 for (const MachineInstr &MI : llvm::reverse(*Pred)) { 3370 if (MI.isCall()) { 3371 PEnd = Indexes->getInstructionIndex(MI).getBoundaryIndex(); 3372 break; 3373 } 3374 } 3375 } 3376 const VNInfo *PVNI = LR.getVNInfoBefore(PEnd); 3377 3378 // All predecessors must have a live-out value. However for a phi 3379 // instruction with subregister intervals 3380 // only one of the subregisters (not necessarily the current one) needs to 3381 // be defined. 3382 if (!PVNI && (LaneMask.none() || !IsPHI)) { 3383 if (LiveRangeCalc::isJointlyDominated(Pred, Undefs, *Indexes)) 3384 continue; 3385 report("Register not marked live out of predecessor", Pred); 3386 report_context(LR, Reg, LaneMask); 3387 report_context(*VNI); 3388 errs() << " live into " << printMBBReference(*MFI) << '@' 3389 << LiveInts->getMBBStartIdx(&*MFI) << ", not live before " 3390 << PEnd << '\n'; 3391 continue; 3392 } 3393 3394 // Only PHI-defs can take different predecessor values. 3395 if (!IsPHI && PVNI != VNI) { 3396 report("Different value live out of predecessor", Pred); 3397 report_context(LR, Reg, LaneMask); 3398 errs() << "Valno #" << PVNI->id << " live out of " 3399 << printMBBReference(*Pred) << '@' << PEnd << "\nValno #" 3400 << VNI->id << " live into " << printMBBReference(*MFI) << '@' 3401 << LiveInts->getMBBStartIdx(&*MFI) << '\n'; 3402 } 3403 } 3404 if (&*MFI == EndMBB) 3405 break; 3406 ++MFI; 3407 } 3408 } 3409 3410 void MachineVerifier::verifyLiveRange(const LiveRange &LR, Register Reg, 3411 LaneBitmask LaneMask) { 3412 for (const VNInfo *VNI : LR.valnos) 3413 verifyLiveRangeValue(LR, VNI, Reg, LaneMask); 3414 3415 for (LiveRange::const_iterator I = LR.begin(), E = LR.end(); I != E; ++I) 3416 verifyLiveRangeSegment(LR, I, Reg, LaneMask); 3417 } 3418 3419 void MachineVerifier::verifyLiveInterval(const LiveInterval &LI) { 3420 Register Reg = LI.reg(); 3421 assert(Reg.isVirtual()); 3422 verifyLiveRange(LI, Reg); 3423 3424 if (LI.hasSubRanges()) { 3425 LaneBitmask Mask; 3426 LaneBitmask MaxMask = MRI->getMaxLaneMaskForVReg(Reg); 3427 for (const LiveInterval::SubRange &SR : LI.subranges()) { 3428 if ((Mask & SR.LaneMask).any()) { 3429 report("Lane masks of sub ranges overlap in live interval", MF); 3430 report_context(LI); 3431 } 3432 if ((SR.LaneMask & ~MaxMask).any()) { 3433 report("Subrange lanemask is invalid", MF); 3434 report_context(LI); 3435 } 3436 if (SR.empty()) { 3437 report("Subrange must not be empty", MF); 3438 report_context(SR, LI.reg(), SR.LaneMask); 3439 } 3440 Mask |= SR.LaneMask; 3441 verifyLiveRange(SR, LI.reg(), SR.LaneMask); 3442 if (!LI.covers(SR)) { 3443 report("A Subrange is not covered by the main range", MF); 3444 report_context(LI); 3445 } 3446 } 3447 } 3448 3449 // Check the LI only has one connected component. 3450 ConnectedVNInfoEqClasses ConEQ(*LiveInts); 3451 unsigned NumComp = ConEQ.Classify(LI); 3452 if (NumComp > 1) { 3453 report("Multiple connected components in live interval", MF); 3454 report_context(LI); 3455 for (unsigned comp = 0; comp != NumComp; ++comp) { 3456 errs() << comp << ": valnos"; 3457 for (const VNInfo *I : LI.valnos) 3458 if (comp == ConEQ.getEqClass(I)) 3459 errs() << ' ' << I->id; 3460 errs() << '\n'; 3461 } 3462 } 3463 } 3464 3465 namespace { 3466 3467 // FrameSetup and FrameDestroy can have zero adjustment, so using a single 3468 // integer, we can't tell whether it is a FrameSetup or FrameDestroy if the 3469 // value is zero. 3470 // We use a bool plus an integer to capture the stack state. 3471 struct StackStateOfBB { 3472 StackStateOfBB() = default; 3473 StackStateOfBB(int EntryVal, int ExitVal, bool EntrySetup, bool ExitSetup) : 3474 EntryValue(EntryVal), ExitValue(ExitVal), EntryIsSetup(EntrySetup), 3475 ExitIsSetup(ExitSetup) {} 3476 3477 // Can be negative, which means we are setting up a frame. 3478 int EntryValue = 0; 3479 int ExitValue = 0; 3480 bool EntryIsSetup = false; 3481 bool ExitIsSetup = false; 3482 }; 3483 3484 } // end anonymous namespace 3485 3486 /// Make sure on every path through the CFG, a FrameSetup <n> is always followed 3487 /// by a FrameDestroy <n>, stack adjustments are identical on all 3488 /// CFG edges to a merge point, and frame is destroyed at end of a return block. 3489 void MachineVerifier::verifyStackFrame() { 3490 unsigned FrameSetupOpcode = TII->getCallFrameSetupOpcode(); 3491 unsigned FrameDestroyOpcode = TII->getCallFrameDestroyOpcode(); 3492 if (FrameSetupOpcode == ~0u && FrameDestroyOpcode == ~0u) 3493 return; 3494 3495 SmallVector<StackStateOfBB, 8> SPState; 3496 SPState.resize(MF->getNumBlockIDs()); 3497 df_iterator_default_set<const MachineBasicBlock*> Reachable; 3498 3499 // Visit the MBBs in DFS order. 3500 for (df_ext_iterator<const MachineFunction *, 3501 df_iterator_default_set<const MachineBasicBlock *>> 3502 DFI = df_ext_begin(MF, Reachable), DFE = df_ext_end(MF, Reachable); 3503 DFI != DFE; ++DFI) { 3504 const MachineBasicBlock *MBB = *DFI; 3505 3506 StackStateOfBB BBState; 3507 // Check the exit state of the DFS stack predecessor. 3508 if (DFI.getPathLength() >= 2) { 3509 const MachineBasicBlock *StackPred = DFI.getPath(DFI.getPathLength() - 2); 3510 assert(Reachable.count(StackPred) && 3511 "DFS stack predecessor is already visited.\n"); 3512 BBState.EntryValue = SPState[StackPred->getNumber()].ExitValue; 3513 BBState.EntryIsSetup = SPState[StackPred->getNumber()].ExitIsSetup; 3514 BBState.ExitValue = BBState.EntryValue; 3515 BBState.ExitIsSetup = BBState.EntryIsSetup; 3516 } 3517 3518 if ((int)MBB->getCallFrameSize() != -BBState.EntryValue) { 3519 report("Call frame size on entry does not match value computed from " 3520 "predecessor", 3521 MBB); 3522 errs() << "Call frame size on entry " << MBB->getCallFrameSize() 3523 << " does not match value computed from predecessor " 3524 << -BBState.EntryValue << '\n'; 3525 } 3526 3527 // Update stack state by checking contents of MBB. 3528 for (const auto &I : *MBB) { 3529 if (I.getOpcode() == FrameSetupOpcode) { 3530 if (BBState.ExitIsSetup) 3531 report("FrameSetup is after another FrameSetup", &I); 3532 BBState.ExitValue -= TII->getFrameTotalSize(I); 3533 BBState.ExitIsSetup = true; 3534 } 3535 3536 if (I.getOpcode() == FrameDestroyOpcode) { 3537 int Size = TII->getFrameTotalSize(I); 3538 if (!BBState.ExitIsSetup) 3539 report("FrameDestroy is not after a FrameSetup", &I); 3540 int AbsSPAdj = BBState.ExitValue < 0 ? -BBState.ExitValue : 3541 BBState.ExitValue; 3542 if (BBState.ExitIsSetup && AbsSPAdj != Size) { 3543 report("FrameDestroy <n> is after FrameSetup <m>", &I); 3544 errs() << "FrameDestroy <" << Size << "> is after FrameSetup <" 3545 << AbsSPAdj << ">.\n"; 3546 } 3547 BBState.ExitValue += Size; 3548 BBState.ExitIsSetup = false; 3549 } 3550 } 3551 SPState[MBB->getNumber()] = BBState; 3552 3553 // Make sure the exit state of any predecessor is consistent with the entry 3554 // state. 3555 for (const MachineBasicBlock *Pred : MBB->predecessors()) { 3556 if (Reachable.count(Pred) && 3557 (SPState[Pred->getNumber()].ExitValue != BBState.EntryValue || 3558 SPState[Pred->getNumber()].ExitIsSetup != BBState.EntryIsSetup)) { 3559 report("The exit stack state of a predecessor is inconsistent.", MBB); 3560 errs() << "Predecessor " << printMBBReference(*Pred) 3561 << " has exit state (" << SPState[Pred->getNumber()].ExitValue 3562 << ", " << SPState[Pred->getNumber()].ExitIsSetup << "), while " 3563 << printMBBReference(*MBB) << " has entry state (" 3564 << BBState.EntryValue << ", " << BBState.EntryIsSetup << ").\n"; 3565 } 3566 } 3567 3568 // Make sure the entry state of any successor is consistent with the exit 3569 // state. 3570 for (const MachineBasicBlock *Succ : MBB->successors()) { 3571 if (Reachable.count(Succ) && 3572 (SPState[Succ->getNumber()].EntryValue != BBState.ExitValue || 3573 SPState[Succ->getNumber()].EntryIsSetup != BBState.ExitIsSetup)) { 3574 report("The entry stack state of a successor is inconsistent.", MBB); 3575 errs() << "Successor " << printMBBReference(*Succ) 3576 << " has entry state (" << SPState[Succ->getNumber()].EntryValue 3577 << ", " << SPState[Succ->getNumber()].EntryIsSetup << "), while " 3578 << printMBBReference(*MBB) << " has exit state (" 3579 << BBState.ExitValue << ", " << BBState.ExitIsSetup << ").\n"; 3580 } 3581 } 3582 3583 // Make sure a basic block with return ends with zero stack adjustment. 3584 if (!MBB->empty() && MBB->back().isReturn()) { 3585 if (BBState.ExitIsSetup) 3586 report("A return block ends with a FrameSetup.", MBB); 3587 if (BBState.ExitValue) 3588 report("A return block ends with a nonzero stack adjustment.", MBB); 3589 } 3590 } 3591 } 3592