1 //===- ImplicitNullChecks.cpp - Fold null checks into memory accesses -----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass turns explicit null checks of the form 10 // 11 // test %r10, %r10 12 // je throw_npe 13 // movl (%r10), %esi 14 // ... 15 // 16 // to 17 // 18 // faulting_load_op("movl (%r10), %esi", throw_npe) 19 // ... 20 // 21 // With the help of a runtime that understands the .fault_maps section, 22 // faulting_load_op branches to throw_npe if executing movl (%r10), %esi incurs 23 // a page fault. 24 // Store and LoadStore are also supported. 25 // 26 //===----------------------------------------------------------------------===// 27 28 #include "llvm/ADT/ArrayRef.h" 29 #include "llvm/ADT/None.h" 30 #include "llvm/ADT/Optional.h" 31 #include "llvm/ADT/STLExtras.h" 32 #include "llvm/ADT/SmallVector.h" 33 #include "llvm/ADT/Statistic.h" 34 #include "llvm/Analysis/AliasAnalysis.h" 35 #include "llvm/Analysis/MemoryLocation.h" 36 #include "llvm/CodeGen/FaultMaps.h" 37 #include "llvm/CodeGen/MachineBasicBlock.h" 38 #include "llvm/CodeGen/MachineFunction.h" 39 #include "llvm/CodeGen/MachineFunctionPass.h" 40 #include "llvm/CodeGen/MachineInstr.h" 41 #include "llvm/CodeGen/MachineInstrBuilder.h" 42 #include "llvm/CodeGen/MachineMemOperand.h" 43 #include "llvm/CodeGen/MachineOperand.h" 44 #include "llvm/CodeGen/MachineRegisterInfo.h" 45 #include "llvm/CodeGen/PseudoSourceValue.h" 46 #include "llvm/CodeGen/TargetInstrInfo.h" 47 #include "llvm/CodeGen/TargetOpcodes.h" 48 #include "llvm/CodeGen/TargetRegisterInfo.h" 49 #include "llvm/CodeGen/TargetSubtargetInfo.h" 50 #include "llvm/IR/BasicBlock.h" 51 #include "llvm/IR/DebugLoc.h" 52 #include "llvm/IR/LLVMContext.h" 53 #include "llvm/MC/MCInstrDesc.h" 54 #include "llvm/MC/MCRegisterInfo.h" 55 #include "llvm/Pass.h" 56 #include "llvm/Support/CommandLine.h" 57 #include <cassert> 58 #include <cstdint> 59 #include <iterator> 60 61 using namespace llvm; 62 63 static cl::opt<int> PageSize("imp-null-check-page-size", 64 cl::desc("The page size of the target in bytes"), 65 cl::init(4096), cl::Hidden); 66 67 static cl::opt<unsigned> MaxInstsToConsider( 68 "imp-null-max-insts-to-consider", 69 cl::desc("The max number of instructions to consider hoisting loads over " 70 "(the algorithm is quadratic over this number)"), 71 cl::Hidden, cl::init(8)); 72 73 #define DEBUG_TYPE "implicit-null-checks" 74 75 STATISTIC(NumImplicitNullChecks, 76 "Number of explicit null checks made implicit"); 77 78 namespace { 79 80 class ImplicitNullChecks : public MachineFunctionPass { 81 /// Return true if \c computeDependence can process \p MI. 82 static bool canHandle(const MachineInstr *MI); 83 84 /// Helper function for \c computeDependence. Return true if \p A 85 /// and \p B do not have any dependences between them, and can be 86 /// re-ordered without changing program semantics. 87 bool canReorder(const MachineInstr *A, const MachineInstr *B); 88 89 /// A data type for representing the result computed by \c 90 /// computeDependence. States whether it is okay to reorder the 91 /// instruction passed to \c computeDependence with at most one 92 /// dependency. 93 struct DependenceResult { 94 /// Can we actually re-order \p MI with \p Insts (see \c 95 /// computeDependence). 96 bool CanReorder; 97 98 /// If non-None, then an instruction in \p Insts that also must be 99 /// hoisted. 100 Optional<ArrayRef<MachineInstr *>::iterator> PotentialDependence; 101 102 /*implicit*/ DependenceResult( 103 bool CanReorder, 104 Optional<ArrayRef<MachineInstr *>::iterator> PotentialDependence) 105 : CanReorder(CanReorder), PotentialDependence(PotentialDependence) { 106 assert((!PotentialDependence || CanReorder) && 107 "!CanReorder && PotentialDependence.hasValue() not allowed!"); 108 } 109 }; 110 111 /// Compute a result for the following question: can \p MI be 112 /// re-ordered from after \p Insts to before it. 113 /// 114 /// \c canHandle should return true for all instructions in \p 115 /// Insts. 116 DependenceResult computeDependence(const MachineInstr *MI, 117 ArrayRef<MachineInstr *> Block); 118 119 /// Represents one null check that can be made implicit. 120 class NullCheck { 121 // The memory operation the null check can be folded into. 122 MachineInstr *MemOperation; 123 124 // The instruction actually doing the null check (Ptr != 0). 125 MachineInstr *CheckOperation; 126 127 // The block the check resides in. 128 MachineBasicBlock *CheckBlock; 129 130 // The block branched to if the pointer is non-null. 131 MachineBasicBlock *NotNullSucc; 132 133 // The block branched to if the pointer is null. 134 MachineBasicBlock *NullSucc; 135 136 // If this is non-null, then MemOperation has a dependency on this 137 // instruction; and it needs to be hoisted to execute before MemOperation. 138 MachineInstr *OnlyDependency; 139 140 public: 141 explicit NullCheck(MachineInstr *memOperation, MachineInstr *checkOperation, 142 MachineBasicBlock *checkBlock, 143 MachineBasicBlock *notNullSucc, 144 MachineBasicBlock *nullSucc, 145 MachineInstr *onlyDependency) 146 : MemOperation(memOperation), CheckOperation(checkOperation), 147 CheckBlock(checkBlock), NotNullSucc(notNullSucc), NullSucc(nullSucc), 148 OnlyDependency(onlyDependency) {} 149 150 MachineInstr *getMemOperation() const { return MemOperation; } 151 152 MachineInstr *getCheckOperation() const { return CheckOperation; } 153 154 MachineBasicBlock *getCheckBlock() const { return CheckBlock; } 155 156 MachineBasicBlock *getNotNullSucc() const { return NotNullSucc; } 157 158 MachineBasicBlock *getNullSucc() const { return NullSucc; } 159 160 MachineInstr *getOnlyDependency() const { return OnlyDependency; } 161 }; 162 163 const TargetInstrInfo *TII = nullptr; 164 const TargetRegisterInfo *TRI = nullptr; 165 AliasAnalysis *AA = nullptr; 166 MachineFrameInfo *MFI = nullptr; 167 168 bool analyzeBlockForNullChecks(MachineBasicBlock &MBB, 169 SmallVectorImpl<NullCheck> &NullCheckList); 170 MachineInstr *insertFaultingInstr(MachineInstr *MI, MachineBasicBlock *MBB, 171 MachineBasicBlock *HandlerMBB); 172 void rewriteNullChecks(ArrayRef<NullCheck> NullCheckList); 173 174 enum AliasResult { 175 AR_NoAlias, 176 AR_MayAlias, 177 AR_WillAliasEverything 178 }; 179 180 /// Returns AR_NoAlias if \p MI memory operation does not alias with 181 /// \p PrevMI, AR_MayAlias if they may alias and AR_WillAliasEverything if 182 /// they may alias and any further memory operation may alias with \p PrevMI. 183 AliasResult areMemoryOpsAliased(const MachineInstr &MI, 184 const MachineInstr *PrevMI) const; 185 186 enum SuitabilityResult { 187 SR_Suitable, 188 SR_Unsuitable, 189 SR_Impossible 190 }; 191 192 /// Return SR_Suitable if \p MI a memory operation that can be used to 193 /// implicitly null check the value in \p PointerReg, SR_Unsuitable if 194 /// \p MI cannot be used to null check and SR_Impossible if there is 195 /// no sense to continue lookup due to any other instruction will not be able 196 /// to be used. \p PrevInsts is the set of instruction seen since 197 /// the explicit null check on \p PointerReg. 198 SuitabilityResult isSuitableMemoryOp(const MachineInstr &MI, 199 unsigned PointerReg, 200 ArrayRef<MachineInstr *> PrevInsts); 201 202 /// Return true if \p FaultingMI can be hoisted from after the 203 /// instructions in \p InstsSeenSoFar to before them. Set \p Dependence to a 204 /// non-null value if we also need to (and legally can) hoist a depedency. 205 bool canHoistInst(MachineInstr *FaultingMI, unsigned PointerReg, 206 ArrayRef<MachineInstr *> InstsSeenSoFar, 207 MachineBasicBlock *NullSucc, MachineInstr *&Dependence); 208 209 public: 210 static char ID; 211 212 ImplicitNullChecks() : MachineFunctionPass(ID) { 213 initializeImplicitNullChecksPass(*PassRegistry::getPassRegistry()); 214 } 215 216 bool runOnMachineFunction(MachineFunction &MF) override; 217 218 void getAnalysisUsage(AnalysisUsage &AU) const override { 219 AU.addRequired<AAResultsWrapperPass>(); 220 MachineFunctionPass::getAnalysisUsage(AU); 221 } 222 223 MachineFunctionProperties getRequiredProperties() const override { 224 return MachineFunctionProperties().set( 225 MachineFunctionProperties::Property::NoVRegs); 226 } 227 }; 228 229 } // end anonymous namespace 230 231 bool ImplicitNullChecks::canHandle(const MachineInstr *MI) { 232 if (MI->isCall() || MI->mayRaiseFPException() || 233 MI->hasUnmodeledSideEffects()) 234 return false; 235 auto IsRegMask = [](const MachineOperand &MO) { return MO.isRegMask(); }; 236 (void)IsRegMask; 237 238 assert(!llvm::any_of(MI->operands(), IsRegMask) && 239 "Calls were filtered out above!"); 240 241 auto IsUnordered = [](MachineMemOperand *MMO) { return MMO->isUnordered(); }; 242 return llvm::all_of(MI->memoperands(), IsUnordered); 243 } 244 245 ImplicitNullChecks::DependenceResult 246 ImplicitNullChecks::computeDependence(const MachineInstr *MI, 247 ArrayRef<MachineInstr *> Block) { 248 assert(llvm::all_of(Block, canHandle) && "Check this first!"); 249 assert(!is_contained(Block, MI) && "Block must be exclusive of MI!"); 250 251 Optional<ArrayRef<MachineInstr *>::iterator> Dep; 252 253 for (auto I = Block.begin(), E = Block.end(); I != E; ++I) { 254 if (canReorder(*I, MI)) 255 continue; 256 257 if (Dep == None) { 258 // Found one possible dependency, keep track of it. 259 Dep = I; 260 } else { 261 // We found two dependencies, so bail out. 262 return {false, None}; 263 } 264 } 265 266 return {true, Dep}; 267 } 268 269 bool ImplicitNullChecks::canReorder(const MachineInstr *A, 270 const MachineInstr *B) { 271 assert(canHandle(A) && canHandle(B) && "Precondition!"); 272 273 // canHandle makes sure that we _can_ correctly analyze the dependencies 274 // between A and B here -- for instance, we should not be dealing with heap 275 // load-store dependencies here. 276 277 for (auto MOA : A->operands()) { 278 if (!(MOA.isReg() && MOA.getReg())) 279 continue; 280 281 unsigned RegA = MOA.getReg(); 282 for (auto MOB : B->operands()) { 283 if (!(MOB.isReg() && MOB.getReg())) 284 continue; 285 286 unsigned RegB = MOB.getReg(); 287 288 if (TRI->regsOverlap(RegA, RegB) && (MOA.isDef() || MOB.isDef())) 289 return false; 290 } 291 } 292 293 return true; 294 } 295 296 bool ImplicitNullChecks::runOnMachineFunction(MachineFunction &MF) { 297 TII = MF.getSubtarget().getInstrInfo(); 298 TRI = MF.getRegInfo().getTargetRegisterInfo(); 299 MFI = &MF.getFrameInfo(); 300 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 301 302 SmallVector<NullCheck, 16> NullCheckList; 303 304 for (auto &MBB : MF) 305 analyzeBlockForNullChecks(MBB, NullCheckList); 306 307 if (!NullCheckList.empty()) 308 rewriteNullChecks(NullCheckList); 309 310 return !NullCheckList.empty(); 311 } 312 313 // Return true if any register aliasing \p Reg is live-in into \p MBB. 314 static bool AnyAliasLiveIn(const TargetRegisterInfo *TRI, 315 MachineBasicBlock *MBB, unsigned Reg) { 316 for (MCRegAliasIterator AR(Reg, TRI, /*IncludeSelf*/ true); AR.isValid(); 317 ++AR) 318 if (MBB->isLiveIn(*AR)) 319 return true; 320 return false; 321 } 322 323 ImplicitNullChecks::AliasResult 324 ImplicitNullChecks::areMemoryOpsAliased(const MachineInstr &MI, 325 const MachineInstr *PrevMI) const { 326 // If it is not memory access, skip the check. 327 if (!(PrevMI->mayStore() || PrevMI->mayLoad())) 328 return AR_NoAlias; 329 // Load-Load may alias 330 if (!(MI.mayStore() || PrevMI->mayStore())) 331 return AR_NoAlias; 332 // We lost info, conservatively alias. If it was store then no sense to 333 // continue because we won't be able to check against it further. 334 if (MI.memoperands_empty()) 335 return MI.mayStore() ? AR_WillAliasEverything : AR_MayAlias; 336 if (PrevMI->memoperands_empty()) 337 return PrevMI->mayStore() ? AR_WillAliasEverything : AR_MayAlias; 338 339 for (MachineMemOperand *MMO1 : MI.memoperands()) { 340 // MMO1 should have a value due it comes from operation we'd like to use 341 // as implicit null check. 342 assert(MMO1->getValue() && "MMO1 should have a Value!"); 343 for (MachineMemOperand *MMO2 : PrevMI->memoperands()) { 344 if (const PseudoSourceValue *PSV = MMO2->getPseudoValue()) { 345 if (PSV->mayAlias(MFI)) 346 return AR_MayAlias; 347 continue; 348 } 349 llvm::AliasResult AAResult = 350 AA->alias(MemoryLocation(MMO1->getValue(), LocationSize::unknown(), 351 MMO1->getAAInfo()), 352 MemoryLocation(MMO2->getValue(), LocationSize::unknown(), 353 MMO2->getAAInfo())); 354 if (AAResult != NoAlias) 355 return AR_MayAlias; 356 } 357 } 358 return AR_NoAlias; 359 } 360 361 ImplicitNullChecks::SuitabilityResult 362 ImplicitNullChecks::isSuitableMemoryOp(const MachineInstr &MI, 363 unsigned PointerReg, 364 ArrayRef<MachineInstr *> PrevInsts) { 365 int64_t Offset; 366 const MachineOperand *BaseOp; 367 368 if (!TII->getMemOperandWithOffset(MI, BaseOp, Offset, TRI) || 369 !BaseOp->isReg() || BaseOp->getReg() != PointerReg) 370 return SR_Unsuitable; 371 372 // We want the mem access to be issued at a sane offset from PointerReg, 373 // so that if PointerReg is null then the access reliably page faults. 374 if (!((MI.mayLoad() || MI.mayStore()) && !MI.isPredicable() && 375 -PageSize < Offset && Offset < PageSize)) 376 return SR_Unsuitable; 377 378 // Finally, check whether the current memory access aliases with previous one. 379 for (auto *PrevMI : PrevInsts) { 380 AliasResult AR = areMemoryOpsAliased(MI, PrevMI); 381 if (AR == AR_WillAliasEverything) 382 return SR_Impossible; 383 if (AR == AR_MayAlias) 384 return SR_Unsuitable; 385 } 386 return SR_Suitable; 387 } 388 389 bool ImplicitNullChecks::canHoistInst(MachineInstr *FaultingMI, 390 unsigned PointerReg, 391 ArrayRef<MachineInstr *> InstsSeenSoFar, 392 MachineBasicBlock *NullSucc, 393 MachineInstr *&Dependence) { 394 auto DepResult = computeDependence(FaultingMI, InstsSeenSoFar); 395 if (!DepResult.CanReorder) 396 return false; 397 398 if (!DepResult.PotentialDependence) { 399 Dependence = nullptr; 400 return true; 401 } 402 403 auto DependenceItr = *DepResult.PotentialDependence; 404 auto *DependenceMI = *DependenceItr; 405 406 // We don't want to reason about speculating loads. Note -- at this point 407 // we should have already filtered out all of the other non-speculatable 408 // things, like calls and stores. 409 // We also do not want to hoist stores because it might change the memory 410 // while the FaultingMI may result in faulting. 411 assert(canHandle(DependenceMI) && "Should never have reached here!"); 412 if (DependenceMI->mayLoadOrStore()) 413 return false; 414 415 for (auto &DependenceMO : DependenceMI->operands()) { 416 if (!(DependenceMO.isReg() && DependenceMO.getReg())) 417 continue; 418 419 // Make sure that we won't clobber any live ins to the sibling block by 420 // hoisting Dependency. For instance, we can't hoist INST to before the 421 // null check (even if it safe, and does not violate any dependencies in 422 // the non_null_block) if %rdx is live in to _null_block. 423 // 424 // test %rcx, %rcx 425 // je _null_block 426 // _non_null_block: 427 // %rdx = INST 428 // ... 429 // 430 // This restriction does not apply to the faulting load inst because in 431 // case the pointer loaded from is in the null page, the load will not 432 // semantically execute, and affect machine state. That is, if the load 433 // was loading into %rax and it faults, the value of %rax should stay the 434 // same as it would have been had the load not have executed and we'd have 435 // branched to NullSucc directly. 436 if (AnyAliasLiveIn(TRI, NullSucc, DependenceMO.getReg())) 437 return false; 438 439 // The Dependency can't be re-defining the base register -- then we won't 440 // get the memory operation on the address we want. This is already 441 // checked in \c IsSuitableMemoryOp. 442 assert(!(DependenceMO.isDef() && 443 TRI->regsOverlap(DependenceMO.getReg(), PointerReg)) && 444 "Should have been checked before!"); 445 } 446 447 auto DepDepResult = 448 computeDependence(DependenceMI, {InstsSeenSoFar.begin(), DependenceItr}); 449 450 if (!DepDepResult.CanReorder || DepDepResult.PotentialDependence) 451 return false; 452 453 Dependence = DependenceMI; 454 return true; 455 } 456 457 /// Analyze MBB to check if its terminating branch can be turned into an 458 /// implicit null check. If yes, append a description of the said null check to 459 /// NullCheckList and return true, else return false. 460 bool ImplicitNullChecks::analyzeBlockForNullChecks( 461 MachineBasicBlock &MBB, SmallVectorImpl<NullCheck> &NullCheckList) { 462 using MachineBranchPredicate = TargetInstrInfo::MachineBranchPredicate; 463 464 MDNode *BranchMD = nullptr; 465 if (auto *BB = MBB.getBasicBlock()) 466 BranchMD = BB->getTerminator()->getMetadata(LLVMContext::MD_make_implicit); 467 468 if (!BranchMD) 469 return false; 470 471 MachineBranchPredicate MBP; 472 473 if (TII->analyzeBranchPredicate(MBB, MBP, true)) 474 return false; 475 476 // Is the predicate comparing an integer to zero? 477 if (!(MBP.LHS.isReg() && MBP.RHS.isImm() && MBP.RHS.getImm() == 0 && 478 (MBP.Predicate == MachineBranchPredicate::PRED_NE || 479 MBP.Predicate == MachineBranchPredicate::PRED_EQ))) 480 return false; 481 482 // If we cannot erase the test instruction itself, then making the null check 483 // implicit does not buy us much. 484 if (!MBP.SingleUseCondition) 485 return false; 486 487 MachineBasicBlock *NotNullSucc, *NullSucc; 488 489 if (MBP.Predicate == MachineBranchPredicate::PRED_NE) { 490 NotNullSucc = MBP.TrueDest; 491 NullSucc = MBP.FalseDest; 492 } else { 493 NotNullSucc = MBP.FalseDest; 494 NullSucc = MBP.TrueDest; 495 } 496 497 // We handle the simplest case for now. We can potentially do better by using 498 // the machine dominator tree. 499 if (NotNullSucc->pred_size() != 1) 500 return false; 501 502 // To prevent the invalid transformation of the following code: 503 // 504 // mov %rax, %rcx 505 // test %rax, %rax 506 // %rax = ... 507 // je throw_npe 508 // mov(%rcx), %r9 509 // mov(%rax), %r10 510 // 511 // into: 512 // 513 // mov %rax, %rcx 514 // %rax = .... 515 // faulting_load_op("movl (%rax), %r10", throw_npe) 516 // mov(%rcx), %r9 517 // 518 // we must ensure that there are no instructions between the 'test' and 519 // conditional jump that modify %rax. 520 const unsigned PointerReg = MBP.LHS.getReg(); 521 522 assert(MBP.ConditionDef->getParent() == &MBB && "Should be in basic block"); 523 524 for (auto I = MBB.rbegin(); MBP.ConditionDef != &*I; ++I) 525 if (I->modifiesRegister(PointerReg, TRI)) 526 return false; 527 528 // Starting with a code fragment like: 529 // 530 // test %rax, %rax 531 // jne LblNotNull 532 // 533 // LblNull: 534 // callq throw_NullPointerException 535 // 536 // LblNotNull: 537 // Inst0 538 // Inst1 539 // ... 540 // Def = Load (%rax + <offset>) 541 // ... 542 // 543 // 544 // we want to end up with 545 // 546 // Def = FaultingLoad (%rax + <offset>), LblNull 547 // jmp LblNotNull ;; explicit or fallthrough 548 // 549 // LblNotNull: 550 // Inst0 551 // Inst1 552 // ... 553 // 554 // LblNull: 555 // callq throw_NullPointerException 556 // 557 // 558 // To see why this is legal, consider the two possibilities: 559 // 560 // 1. %rax is null: since we constrain <offset> to be less than PageSize, the 561 // load instruction dereferences the null page, causing a segmentation 562 // fault. 563 // 564 // 2. %rax is not null: in this case we know that the load cannot fault, as 565 // otherwise the load would've faulted in the original program too and the 566 // original program would've been undefined. 567 // 568 // This reasoning cannot be extended to justify hoisting through arbitrary 569 // control flow. For instance, in the example below (in pseudo-C) 570 // 571 // if (ptr == null) { throw_npe(); unreachable; } 572 // if (some_cond) { return 42; } 573 // v = ptr->field; // LD 574 // ... 575 // 576 // we cannot (without code duplication) use the load marked "LD" to null check 577 // ptr -- clause (2) above does not apply in this case. In the above program 578 // the safety of ptr->field can be dependent on some_cond; and, for instance, 579 // ptr could be some non-null invalid reference that never gets loaded from 580 // because some_cond is always true. 581 582 SmallVector<MachineInstr *, 8> InstsSeenSoFar; 583 584 for (auto &MI : *NotNullSucc) { 585 if (!canHandle(&MI) || InstsSeenSoFar.size() >= MaxInstsToConsider) 586 return false; 587 588 MachineInstr *Dependence; 589 SuitabilityResult SR = isSuitableMemoryOp(MI, PointerReg, InstsSeenSoFar); 590 if (SR == SR_Impossible) 591 return false; 592 if (SR == SR_Suitable && 593 canHoistInst(&MI, PointerReg, InstsSeenSoFar, NullSucc, Dependence)) { 594 NullCheckList.emplace_back(&MI, MBP.ConditionDef, &MBB, NotNullSucc, 595 NullSucc, Dependence); 596 return true; 597 } 598 599 // If MI re-defines the PointerReg then we cannot move further. 600 if (llvm::any_of(MI.operands(), [&](MachineOperand &MO) { 601 return MO.isReg() && MO.getReg() && MO.isDef() && 602 TRI->regsOverlap(MO.getReg(), PointerReg); 603 })) 604 return false; 605 InstsSeenSoFar.push_back(&MI); 606 } 607 608 return false; 609 } 610 611 /// Wrap a machine instruction, MI, into a FAULTING machine instruction. 612 /// The FAULTING instruction does the same load/store as MI 613 /// (defining the same register), and branches to HandlerMBB if the mem access 614 /// faults. The FAULTING instruction is inserted at the end of MBB. 615 MachineInstr *ImplicitNullChecks::insertFaultingInstr( 616 MachineInstr *MI, MachineBasicBlock *MBB, MachineBasicBlock *HandlerMBB) { 617 const unsigned NoRegister = 0; // Guaranteed to be the NoRegister value for 618 // all targets. 619 620 DebugLoc DL; 621 unsigned NumDefs = MI->getDesc().getNumDefs(); 622 assert(NumDefs <= 1 && "other cases unhandled!"); 623 624 unsigned DefReg = NoRegister; 625 if (NumDefs != 0) { 626 DefReg = MI->getOperand(0).getReg(); 627 assert(NumDefs == 1 && "expected exactly one def!"); 628 } 629 630 FaultMaps::FaultKind FK; 631 if (MI->mayLoad()) 632 FK = 633 MI->mayStore() ? FaultMaps::FaultingLoadStore : FaultMaps::FaultingLoad; 634 else 635 FK = FaultMaps::FaultingStore; 636 637 auto MIB = BuildMI(MBB, DL, TII->get(TargetOpcode::FAULTING_OP), DefReg) 638 .addImm(FK) 639 .addMBB(HandlerMBB) 640 .addImm(MI->getOpcode()); 641 642 for (auto &MO : MI->uses()) { 643 if (MO.isReg()) { 644 MachineOperand NewMO = MO; 645 if (MO.isUse()) { 646 NewMO.setIsKill(false); 647 } else { 648 assert(MO.isDef() && "Expected def or use"); 649 NewMO.setIsDead(false); 650 } 651 MIB.add(NewMO); 652 } else { 653 MIB.add(MO); 654 } 655 } 656 657 MIB.setMemRefs(MI->memoperands()); 658 659 return MIB; 660 } 661 662 /// Rewrite the null checks in NullCheckList into implicit null checks. 663 void ImplicitNullChecks::rewriteNullChecks( 664 ArrayRef<ImplicitNullChecks::NullCheck> NullCheckList) { 665 DebugLoc DL; 666 667 for (auto &NC : NullCheckList) { 668 // Remove the conditional branch dependent on the null check. 669 unsigned BranchesRemoved = TII->removeBranch(*NC.getCheckBlock()); 670 (void)BranchesRemoved; 671 assert(BranchesRemoved > 0 && "expected at least one branch!"); 672 673 if (auto *DepMI = NC.getOnlyDependency()) { 674 DepMI->removeFromParent(); 675 NC.getCheckBlock()->insert(NC.getCheckBlock()->end(), DepMI); 676 } 677 678 // Insert a faulting instruction where the conditional branch was 679 // originally. We check earlier ensures that this bit of code motion 680 // is legal. We do not touch the successors list for any basic block 681 // since we haven't changed control flow, we've just made it implicit. 682 MachineInstr *FaultingInstr = insertFaultingInstr( 683 NC.getMemOperation(), NC.getCheckBlock(), NC.getNullSucc()); 684 // Now the values defined by MemOperation, if any, are live-in of 685 // the block of MemOperation. 686 // The original operation may define implicit-defs alongside 687 // the value. 688 MachineBasicBlock *MBB = NC.getMemOperation()->getParent(); 689 for (const MachineOperand &MO : FaultingInstr->operands()) { 690 if (!MO.isReg() || !MO.isDef()) 691 continue; 692 unsigned Reg = MO.getReg(); 693 if (!Reg || MBB->isLiveIn(Reg)) 694 continue; 695 MBB->addLiveIn(Reg); 696 } 697 698 if (auto *DepMI = NC.getOnlyDependency()) { 699 for (auto &MO : DepMI->operands()) { 700 if (!MO.isReg() || !MO.getReg() || !MO.isDef()) 701 continue; 702 if (!NC.getNotNullSucc()->isLiveIn(MO.getReg())) 703 NC.getNotNullSucc()->addLiveIn(MO.getReg()); 704 } 705 } 706 707 NC.getMemOperation()->eraseFromParent(); 708 NC.getCheckOperation()->eraseFromParent(); 709 710 // Insert an *unconditional* branch to not-null successor. 711 TII->insertBranch(*NC.getCheckBlock(), NC.getNotNullSucc(), nullptr, 712 /*Cond=*/None, DL); 713 714 NumImplicitNullChecks++; 715 } 716 } 717 718 char ImplicitNullChecks::ID = 0; 719 720 char &llvm::ImplicitNullChecksID = ImplicitNullChecks::ID; 721 722 INITIALIZE_PASS_BEGIN(ImplicitNullChecks, DEBUG_TYPE, 723 "Implicit null checks", false, false) 724 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 725 INITIALIZE_PASS_END(ImplicitNullChecks, DEBUG_TYPE, 726 "Implicit null checks", false, false) 727