1 //===- Local.cpp - Functions to perform local transformations -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This family of functions perform various local transformations to the 10 // program. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Transforms/Utils/Local.h" 15 #include "llvm/ADT/APInt.h" 16 #include "llvm/ADT/DenseMap.h" 17 #include "llvm/ADT/DenseMapInfo.h" 18 #include "llvm/ADT/DenseSet.h" 19 #include "llvm/ADT/Hashing.h" 20 #include "llvm/ADT/STLExtras.h" 21 #include "llvm/ADT/SetVector.h" 22 #include "llvm/ADT/SmallPtrSet.h" 23 #include "llvm/ADT/SmallVector.h" 24 #include "llvm/ADT/Statistic.h" 25 #include "llvm/Analysis/AssumeBundleQueries.h" 26 #include "llvm/Analysis/ConstantFolding.h" 27 #include "llvm/Analysis/DomTreeUpdater.h" 28 #include "llvm/Analysis/InstructionSimplify.h" 29 #include "llvm/Analysis/MemoryBuiltins.h" 30 #include "llvm/Analysis/MemorySSAUpdater.h" 31 #include "llvm/Analysis/TargetLibraryInfo.h" 32 #include "llvm/Analysis/ValueTracking.h" 33 #include "llvm/Analysis/VectorUtils.h" 34 #include "llvm/BinaryFormat/Dwarf.h" 35 #include "llvm/IR/Argument.h" 36 #include "llvm/IR/Attributes.h" 37 #include "llvm/IR/BasicBlock.h" 38 #include "llvm/IR/CFG.h" 39 #include "llvm/IR/Constant.h" 40 #include "llvm/IR/ConstantRange.h" 41 #include "llvm/IR/Constants.h" 42 #include "llvm/IR/DIBuilder.h" 43 #include "llvm/IR/DataLayout.h" 44 #include "llvm/IR/DebugInfo.h" 45 #include "llvm/IR/DebugInfoMetadata.h" 46 #include "llvm/IR/DebugLoc.h" 47 #include "llvm/IR/DerivedTypes.h" 48 #include "llvm/IR/Dominators.h" 49 #include "llvm/IR/EHPersonalities.h" 50 #include "llvm/IR/Function.h" 51 #include "llvm/IR/GetElementPtrTypeIterator.h" 52 #include "llvm/IR/IRBuilder.h" 53 #include "llvm/IR/InstrTypes.h" 54 #include "llvm/IR/Instruction.h" 55 #include "llvm/IR/Instructions.h" 56 #include "llvm/IR/IntrinsicInst.h" 57 #include "llvm/IR/Intrinsics.h" 58 #include "llvm/IR/IntrinsicsWebAssembly.h" 59 #include "llvm/IR/LLVMContext.h" 60 #include "llvm/IR/MDBuilder.h" 61 #include "llvm/IR/MemoryModelRelaxationAnnotations.h" 62 #include "llvm/IR/Metadata.h" 63 #include "llvm/IR/Module.h" 64 #include "llvm/IR/PatternMatch.h" 65 #include "llvm/IR/ProfDataUtils.h" 66 #include "llvm/IR/Type.h" 67 #include "llvm/IR/Use.h" 68 #include "llvm/IR/User.h" 69 #include "llvm/IR/Value.h" 70 #include "llvm/IR/ValueHandle.h" 71 #include "llvm/Support/Casting.h" 72 #include "llvm/Support/CommandLine.h" 73 #include "llvm/Support/Compiler.h" 74 #include "llvm/Support/Debug.h" 75 #include "llvm/Support/ErrorHandling.h" 76 #include "llvm/Support/KnownBits.h" 77 #include "llvm/Support/raw_ostream.h" 78 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 79 #include "llvm/Transforms/Utils/ValueMapper.h" 80 #include <algorithm> 81 #include <cassert> 82 #include <cstdint> 83 #include <iterator> 84 #include <map> 85 #include <optional> 86 #include <utility> 87 88 using namespace llvm; 89 using namespace llvm::PatternMatch; 90 91 #define DEBUG_TYPE "local" 92 93 STATISTIC(NumRemoved, "Number of unreachable basic blocks removed"); 94 STATISTIC(NumPHICSEs, "Number of PHI's that got CSE'd"); 95 96 static cl::opt<bool> PHICSEDebugHash( 97 "phicse-debug-hash", 98 #ifdef EXPENSIVE_CHECKS 99 cl::init(true), 100 #else 101 cl::init(false), 102 #endif 103 cl::Hidden, 104 cl::desc("Perform extra assertion checking to verify that PHINodes's hash " 105 "function is well-behaved w.r.t. its isEqual predicate")); 106 107 static cl::opt<unsigned> PHICSENumPHISmallSize( 108 "phicse-num-phi-smallsize", cl::init(32), cl::Hidden, 109 cl::desc( 110 "When the basic block contains not more than this number of PHI nodes, " 111 "perform a (faster!) exhaustive search instead of set-driven one.")); 112 113 static cl::opt<unsigned> MaxPhiEntriesIncreaseAfterRemovingEmptyBlock( 114 "max-phi-entries-increase-after-removing-empty-block", cl::init(1000), 115 cl::Hidden, 116 cl::desc("Stop removing an empty block if removing it will introduce more " 117 "than this number of phi entries in its successor")); 118 119 // Max recursion depth for collectBitParts used when detecting bswap and 120 // bitreverse idioms. 121 static const unsigned BitPartRecursionMaxDepth = 48; 122 123 //===----------------------------------------------------------------------===// 124 // Local constant propagation. 125 // 126 127 /// ConstantFoldTerminator - If a terminator instruction is predicated on a 128 /// constant value, convert it into an unconditional branch to the constant 129 /// destination. This is a nontrivial operation because the successors of this 130 /// basic block must have their PHI nodes updated. 131 /// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch 132 /// conditions and indirectbr addresses this might make dead if 133 /// DeleteDeadConditions is true. 134 bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions, 135 const TargetLibraryInfo *TLI, 136 DomTreeUpdater *DTU) { 137 Instruction *T = BB->getTerminator(); 138 IRBuilder<> Builder(T); 139 140 // Branch - See if we are conditional jumping on constant 141 if (auto *BI = dyn_cast<BranchInst>(T)) { 142 if (BI->isUnconditional()) return false; // Can't optimize uncond branch 143 144 BasicBlock *Dest1 = BI->getSuccessor(0); 145 BasicBlock *Dest2 = BI->getSuccessor(1); 146 147 if (Dest2 == Dest1) { // Conditional branch to same location? 148 // This branch matches something like this: 149 // br bool %cond, label %Dest, label %Dest 150 // and changes it into: br label %Dest 151 152 // Let the basic block know that we are letting go of one copy of it. 153 assert(BI->getParent() && "Terminator not inserted in block!"); 154 Dest1->removePredecessor(BI->getParent()); 155 156 // Replace the conditional branch with an unconditional one. 157 BranchInst *NewBI = Builder.CreateBr(Dest1); 158 159 // Transfer the metadata to the new branch instruction. 160 NewBI->copyMetadata(*BI, {LLVMContext::MD_loop, LLVMContext::MD_dbg, 161 LLVMContext::MD_annotation}); 162 163 Value *Cond = BI->getCondition(); 164 BI->eraseFromParent(); 165 if (DeleteDeadConditions) 166 RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI); 167 return true; 168 } 169 170 if (auto *Cond = dyn_cast<ConstantInt>(BI->getCondition())) { 171 // Are we branching on constant? 172 // YES. Change to unconditional branch... 173 BasicBlock *Destination = Cond->getZExtValue() ? Dest1 : Dest2; 174 BasicBlock *OldDest = Cond->getZExtValue() ? Dest2 : Dest1; 175 176 // Let the basic block know that we are letting go of it. Based on this, 177 // it will adjust it's PHI nodes. 178 OldDest->removePredecessor(BB); 179 180 // Replace the conditional branch with an unconditional one. 181 BranchInst *NewBI = Builder.CreateBr(Destination); 182 183 // Transfer the metadata to the new branch instruction. 184 NewBI->copyMetadata(*BI, {LLVMContext::MD_loop, LLVMContext::MD_dbg, 185 LLVMContext::MD_annotation}); 186 187 BI->eraseFromParent(); 188 if (DTU) 189 DTU->applyUpdates({{DominatorTree::Delete, BB, OldDest}}); 190 return true; 191 } 192 193 return false; 194 } 195 196 if (auto *SI = dyn_cast<SwitchInst>(T)) { 197 // If we are switching on a constant, we can convert the switch to an 198 // unconditional branch. 199 auto *CI = dyn_cast<ConstantInt>(SI->getCondition()); 200 BasicBlock *DefaultDest = SI->getDefaultDest(); 201 BasicBlock *TheOnlyDest = DefaultDest; 202 203 // If the default is unreachable, ignore it when searching for TheOnlyDest. 204 if (SI->defaultDestUnreachable() && SI->getNumCases() > 0) 205 TheOnlyDest = SI->case_begin()->getCaseSuccessor(); 206 207 bool Changed = false; 208 209 // Figure out which case it goes to. 210 for (auto It = SI->case_begin(), End = SI->case_end(); It != End;) { 211 // Found case matching a constant operand? 212 if (It->getCaseValue() == CI) { 213 TheOnlyDest = It->getCaseSuccessor(); 214 break; 215 } 216 217 // Check to see if this branch is going to the same place as the default 218 // dest. If so, eliminate it as an explicit compare. 219 if (It->getCaseSuccessor() == DefaultDest) { 220 MDNode *MD = getValidBranchWeightMDNode(*SI); 221 unsigned NCases = SI->getNumCases(); 222 // Fold the case metadata into the default if there will be any branches 223 // left, unless the metadata doesn't match the switch. 224 if (NCases > 1 && MD) { 225 // Collect branch weights into a vector. 226 SmallVector<uint32_t, 8> Weights; 227 extractBranchWeights(MD, Weights); 228 229 // Merge weight of this case to the default weight. 230 unsigned Idx = It->getCaseIndex(); 231 // TODO: Add overflow check. 232 Weights[0] += Weights[Idx + 1]; 233 // Remove weight for this case. 234 std::swap(Weights[Idx + 1], Weights.back()); 235 Weights.pop_back(); 236 setBranchWeights(*SI, Weights, hasBranchWeightOrigin(MD)); 237 } 238 // Remove this entry. 239 BasicBlock *ParentBB = SI->getParent(); 240 DefaultDest->removePredecessor(ParentBB); 241 It = SI->removeCase(It); 242 End = SI->case_end(); 243 244 // Removing this case may have made the condition constant. In that 245 // case, update CI and restart iteration through the cases. 246 if (auto *NewCI = dyn_cast<ConstantInt>(SI->getCondition())) { 247 CI = NewCI; 248 It = SI->case_begin(); 249 } 250 251 Changed = true; 252 continue; 253 } 254 255 // Otherwise, check to see if the switch only branches to one destination. 256 // We do this by reseting "TheOnlyDest" to null when we find two non-equal 257 // destinations. 258 if (It->getCaseSuccessor() != TheOnlyDest) 259 TheOnlyDest = nullptr; 260 261 // Increment this iterator as we haven't removed the case. 262 ++It; 263 } 264 265 if (CI && !TheOnlyDest) { 266 // Branching on a constant, but not any of the cases, go to the default 267 // successor. 268 TheOnlyDest = SI->getDefaultDest(); 269 } 270 271 // If we found a single destination that we can fold the switch into, do so 272 // now. 273 if (TheOnlyDest) { 274 // Insert the new branch. 275 Builder.CreateBr(TheOnlyDest); 276 BasicBlock *BB = SI->getParent(); 277 278 SmallSet<BasicBlock *, 8> RemovedSuccessors; 279 280 // Remove entries from PHI nodes which we no longer branch to... 281 BasicBlock *SuccToKeep = TheOnlyDest; 282 for (BasicBlock *Succ : successors(SI)) { 283 if (DTU && Succ != TheOnlyDest) 284 RemovedSuccessors.insert(Succ); 285 // Found case matching a constant operand? 286 if (Succ == SuccToKeep) { 287 SuccToKeep = nullptr; // Don't modify the first branch to TheOnlyDest 288 } else { 289 Succ->removePredecessor(BB); 290 } 291 } 292 293 // Delete the old switch. 294 Value *Cond = SI->getCondition(); 295 SI->eraseFromParent(); 296 if (DeleteDeadConditions) 297 RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI); 298 if (DTU) { 299 std::vector<DominatorTree::UpdateType> Updates; 300 Updates.reserve(RemovedSuccessors.size()); 301 for (auto *RemovedSuccessor : RemovedSuccessors) 302 Updates.push_back({DominatorTree::Delete, BB, RemovedSuccessor}); 303 DTU->applyUpdates(Updates); 304 } 305 return true; 306 } 307 308 if (SI->getNumCases() == 1) { 309 // Otherwise, we can fold this switch into a conditional branch 310 // instruction if it has only one non-default destination. 311 auto FirstCase = *SI->case_begin(); 312 Value *Cond = Builder.CreateICmpEQ(SI->getCondition(), 313 FirstCase.getCaseValue(), "cond"); 314 315 // Insert the new branch. 316 BranchInst *NewBr = Builder.CreateCondBr(Cond, 317 FirstCase.getCaseSuccessor(), 318 SI->getDefaultDest()); 319 SmallVector<uint32_t> Weights; 320 if (extractBranchWeights(*SI, Weights) && Weights.size() == 2) { 321 uint32_t DefWeight = Weights[0]; 322 uint32_t CaseWeight = Weights[1]; 323 // The TrueWeight should be the weight for the single case of SI. 324 NewBr->setMetadata(LLVMContext::MD_prof, 325 MDBuilder(BB->getContext()) 326 .createBranchWeights(CaseWeight, DefWeight)); 327 } 328 329 // Update make.implicit metadata to the newly-created conditional branch. 330 MDNode *MakeImplicitMD = SI->getMetadata(LLVMContext::MD_make_implicit); 331 if (MakeImplicitMD) 332 NewBr->setMetadata(LLVMContext::MD_make_implicit, MakeImplicitMD); 333 334 // Delete the old switch. 335 SI->eraseFromParent(); 336 return true; 337 } 338 return Changed; 339 } 340 341 if (auto *IBI = dyn_cast<IndirectBrInst>(T)) { 342 // indirectbr blockaddress(@F, @BB) -> br label @BB 343 if (auto *BA = 344 dyn_cast<BlockAddress>(IBI->getAddress()->stripPointerCasts())) { 345 BasicBlock *TheOnlyDest = BA->getBasicBlock(); 346 SmallSet<BasicBlock *, 8> RemovedSuccessors; 347 348 // Insert the new branch. 349 Builder.CreateBr(TheOnlyDest); 350 351 BasicBlock *SuccToKeep = TheOnlyDest; 352 for (unsigned i = 0, e = IBI->getNumDestinations(); i != e; ++i) { 353 BasicBlock *DestBB = IBI->getDestination(i); 354 if (DTU && DestBB != TheOnlyDest) 355 RemovedSuccessors.insert(DestBB); 356 if (IBI->getDestination(i) == SuccToKeep) { 357 SuccToKeep = nullptr; 358 } else { 359 DestBB->removePredecessor(BB); 360 } 361 } 362 Value *Address = IBI->getAddress(); 363 IBI->eraseFromParent(); 364 if (DeleteDeadConditions) 365 // Delete pointer cast instructions. 366 RecursivelyDeleteTriviallyDeadInstructions(Address, TLI); 367 368 // Also zap the blockaddress constant if there are no users remaining, 369 // otherwise the destination is still marked as having its address taken. 370 if (BA->use_empty()) 371 BA->destroyConstant(); 372 373 // If we didn't find our destination in the IBI successor list, then we 374 // have undefined behavior. Replace the unconditional branch with an 375 // 'unreachable' instruction. 376 if (SuccToKeep) { 377 BB->getTerminator()->eraseFromParent(); 378 new UnreachableInst(BB->getContext(), BB); 379 } 380 381 if (DTU) { 382 std::vector<DominatorTree::UpdateType> Updates; 383 Updates.reserve(RemovedSuccessors.size()); 384 for (auto *RemovedSuccessor : RemovedSuccessors) 385 Updates.push_back({DominatorTree::Delete, BB, RemovedSuccessor}); 386 DTU->applyUpdates(Updates); 387 } 388 return true; 389 } 390 } 391 392 return false; 393 } 394 395 //===----------------------------------------------------------------------===// 396 // Local dead code elimination. 397 // 398 399 /// isInstructionTriviallyDead - Return true if the result produced by the 400 /// instruction is not used, and the instruction has no side effects. 401 /// 402 bool llvm::isInstructionTriviallyDead(Instruction *I, 403 const TargetLibraryInfo *TLI) { 404 if (!I->use_empty()) 405 return false; 406 return wouldInstructionBeTriviallyDead(I, TLI); 407 } 408 409 bool llvm::wouldInstructionBeTriviallyDeadOnUnusedPaths( 410 Instruction *I, const TargetLibraryInfo *TLI) { 411 // Instructions that are "markers" and have implied meaning on code around 412 // them (without explicit uses), are not dead on unused paths. 413 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) 414 if (II->getIntrinsicID() == Intrinsic::stacksave || 415 II->getIntrinsicID() == Intrinsic::launder_invariant_group || 416 II->isLifetimeStartOrEnd()) 417 return false; 418 return wouldInstructionBeTriviallyDead(I, TLI); 419 } 420 421 bool llvm::wouldInstructionBeTriviallyDead(const Instruction *I, 422 const TargetLibraryInfo *TLI) { 423 if (I->isTerminator()) 424 return false; 425 426 // We don't want the landingpad-like instructions removed by anything this 427 // general. 428 if (I->isEHPad()) 429 return false; 430 431 // We don't want debug info removed by anything this general. 432 if (isa<DbgVariableIntrinsic>(I)) 433 return false; 434 435 if (const DbgLabelInst *DLI = dyn_cast<DbgLabelInst>(I)) { 436 if (DLI->getLabel()) 437 return false; 438 return true; 439 } 440 441 if (auto *CB = dyn_cast<CallBase>(I)) 442 if (isRemovableAlloc(CB, TLI)) 443 return true; 444 445 if (!I->willReturn()) { 446 auto *II = dyn_cast<IntrinsicInst>(I); 447 if (!II) 448 return false; 449 450 switch (II->getIntrinsicID()) { 451 case Intrinsic::experimental_guard: { 452 // Guards on true are operationally no-ops. In the future we can 453 // consider more sophisticated tradeoffs for guards considering potential 454 // for check widening, but for now we keep things simple. 455 auto *Cond = dyn_cast<ConstantInt>(II->getArgOperand(0)); 456 return Cond && Cond->isOne(); 457 } 458 // TODO: These intrinsics are not safe to remove, because this may remove 459 // a well-defined trap. 460 case Intrinsic::wasm_trunc_signed: 461 case Intrinsic::wasm_trunc_unsigned: 462 case Intrinsic::ptrauth_auth: 463 case Intrinsic::ptrauth_resign: 464 return true; 465 default: 466 return false; 467 } 468 } 469 470 if (!I->mayHaveSideEffects()) 471 return true; 472 473 // Special case intrinsics that "may have side effects" but can be deleted 474 // when dead. 475 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 476 // Safe to delete llvm.stacksave and launder.invariant.group if dead. 477 if (II->getIntrinsicID() == Intrinsic::stacksave || 478 II->getIntrinsicID() == Intrinsic::launder_invariant_group) 479 return true; 480 481 // Intrinsics declare sideeffects to prevent them from moving, but they are 482 // nops without users. 483 if (II->getIntrinsicID() == Intrinsic::allow_runtime_check || 484 II->getIntrinsicID() == Intrinsic::allow_ubsan_check) 485 return true; 486 487 if (II->isLifetimeStartOrEnd()) { 488 auto *Arg = II->getArgOperand(1); 489 // Lifetime intrinsics are dead when their right-hand is undef. 490 if (isa<UndefValue>(Arg)) 491 return true; 492 // If the right-hand is an alloc, global, or argument and the only uses 493 // are lifetime intrinsics then the intrinsics are dead. 494 if (isa<AllocaInst>(Arg) || isa<GlobalValue>(Arg) || isa<Argument>(Arg)) 495 return llvm::all_of(Arg->uses(), [](Use &Use) { 496 return isa<LifetimeIntrinsic>(Use.getUser()); 497 }); 498 return false; 499 } 500 501 // Assumptions are dead if their condition is trivially true. 502 if (II->getIntrinsicID() == Intrinsic::assume && 503 isAssumeWithEmptyBundle(cast<AssumeInst>(*II))) { 504 if (ConstantInt *Cond = dyn_cast<ConstantInt>(II->getArgOperand(0))) 505 return !Cond->isZero(); 506 507 return false; 508 } 509 510 if (auto *FPI = dyn_cast<ConstrainedFPIntrinsic>(I)) { 511 std::optional<fp::ExceptionBehavior> ExBehavior = 512 FPI->getExceptionBehavior(); 513 return *ExBehavior != fp::ebStrict; 514 } 515 } 516 517 if (auto *Call = dyn_cast<CallBase>(I)) { 518 if (Value *FreedOp = getFreedOperand(Call, TLI)) 519 if (Constant *C = dyn_cast<Constant>(FreedOp)) 520 return C->isNullValue() || isa<UndefValue>(C); 521 if (isMathLibCallNoop(Call, TLI)) 522 return true; 523 } 524 525 // Non-volatile atomic loads from constants can be removed. 526 if (auto *LI = dyn_cast<LoadInst>(I)) 527 if (auto *GV = dyn_cast<GlobalVariable>( 528 LI->getPointerOperand()->stripPointerCasts())) 529 if (!LI->isVolatile() && GV->isConstant()) 530 return true; 531 532 return false; 533 } 534 535 /// RecursivelyDeleteTriviallyDeadInstructions - If the specified value is a 536 /// trivially dead instruction, delete it. If that makes any of its operands 537 /// trivially dead, delete them too, recursively. Return true if any 538 /// instructions were deleted. 539 bool llvm::RecursivelyDeleteTriviallyDeadInstructions( 540 Value *V, const TargetLibraryInfo *TLI, MemorySSAUpdater *MSSAU, 541 std::function<void(Value *)> AboutToDeleteCallback) { 542 Instruction *I = dyn_cast<Instruction>(V); 543 if (!I || !isInstructionTriviallyDead(I, TLI)) 544 return false; 545 546 SmallVector<WeakTrackingVH, 16> DeadInsts; 547 DeadInsts.push_back(I); 548 RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI, MSSAU, 549 AboutToDeleteCallback); 550 551 return true; 552 } 553 554 bool llvm::RecursivelyDeleteTriviallyDeadInstructionsPermissive( 555 SmallVectorImpl<WeakTrackingVH> &DeadInsts, const TargetLibraryInfo *TLI, 556 MemorySSAUpdater *MSSAU, 557 std::function<void(Value *)> AboutToDeleteCallback) { 558 unsigned S = 0, E = DeadInsts.size(), Alive = 0; 559 for (; S != E; ++S) { 560 auto *I = dyn_cast_or_null<Instruction>(DeadInsts[S]); 561 if (!I || !isInstructionTriviallyDead(I)) { 562 DeadInsts[S] = nullptr; 563 ++Alive; 564 } 565 } 566 if (Alive == E) 567 return false; 568 RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI, MSSAU, 569 AboutToDeleteCallback); 570 return true; 571 } 572 573 void llvm::RecursivelyDeleteTriviallyDeadInstructions( 574 SmallVectorImpl<WeakTrackingVH> &DeadInsts, const TargetLibraryInfo *TLI, 575 MemorySSAUpdater *MSSAU, 576 std::function<void(Value *)> AboutToDeleteCallback) { 577 // Process the dead instruction list until empty. 578 while (!DeadInsts.empty()) { 579 Value *V = DeadInsts.pop_back_val(); 580 Instruction *I = cast_or_null<Instruction>(V); 581 if (!I) 582 continue; 583 assert(isInstructionTriviallyDead(I, TLI) && 584 "Live instruction found in dead worklist!"); 585 assert(I->use_empty() && "Instructions with uses are not dead."); 586 587 // Don't lose the debug info while deleting the instructions. 588 salvageDebugInfo(*I); 589 590 if (AboutToDeleteCallback) 591 AboutToDeleteCallback(I); 592 593 // Null out all of the instruction's operands to see if any operand becomes 594 // dead as we go. 595 for (Use &OpU : I->operands()) { 596 Value *OpV = OpU.get(); 597 OpU.set(nullptr); 598 599 if (!OpV->use_empty()) 600 continue; 601 602 // If the operand is an instruction that became dead as we nulled out the 603 // operand, and if it is 'trivially' dead, delete it in a future loop 604 // iteration. 605 if (Instruction *OpI = dyn_cast<Instruction>(OpV)) 606 if (isInstructionTriviallyDead(OpI, TLI)) 607 DeadInsts.push_back(OpI); 608 } 609 if (MSSAU) 610 MSSAU->removeMemoryAccess(I); 611 612 I->eraseFromParent(); 613 } 614 } 615 616 bool llvm::replaceDbgUsesWithUndef(Instruction *I) { 617 SmallVector<DbgVariableIntrinsic *, 1> DbgUsers; 618 SmallVector<DbgVariableRecord *, 1> DPUsers; 619 findDbgUsers(DbgUsers, I, &DPUsers); 620 for (auto *DII : DbgUsers) 621 DII->setKillLocation(); 622 for (auto *DVR : DPUsers) 623 DVR->setKillLocation(); 624 return !DbgUsers.empty() || !DPUsers.empty(); 625 } 626 627 /// areAllUsesEqual - Check whether the uses of a value are all the same. 628 /// This is similar to Instruction::hasOneUse() except this will also return 629 /// true when there are no uses or multiple uses that all refer to the same 630 /// value. 631 static bool areAllUsesEqual(Instruction *I) { 632 Value::user_iterator UI = I->user_begin(); 633 Value::user_iterator UE = I->user_end(); 634 if (UI == UE) 635 return true; 636 637 User *TheUse = *UI; 638 for (++UI; UI != UE; ++UI) { 639 if (*UI != TheUse) 640 return false; 641 } 642 return true; 643 } 644 645 /// RecursivelyDeleteDeadPHINode - If the specified value is an effectively 646 /// dead PHI node, due to being a def-use chain of single-use nodes that 647 /// either forms a cycle or is terminated by a trivially dead instruction, 648 /// delete it. If that makes any of its operands trivially dead, delete them 649 /// too, recursively. Return true if a change was made. 650 bool llvm::RecursivelyDeleteDeadPHINode(PHINode *PN, 651 const TargetLibraryInfo *TLI, 652 llvm::MemorySSAUpdater *MSSAU) { 653 SmallPtrSet<Instruction*, 4> Visited; 654 for (Instruction *I = PN; areAllUsesEqual(I) && !I->mayHaveSideEffects(); 655 I = cast<Instruction>(*I->user_begin())) { 656 if (I->use_empty()) 657 return RecursivelyDeleteTriviallyDeadInstructions(I, TLI, MSSAU); 658 659 // If we find an instruction more than once, we're on a cycle that 660 // won't prove fruitful. 661 if (!Visited.insert(I).second) { 662 // Break the cycle and delete the instruction and its operands. 663 I->replaceAllUsesWith(PoisonValue::get(I->getType())); 664 (void)RecursivelyDeleteTriviallyDeadInstructions(I, TLI, MSSAU); 665 return true; 666 } 667 } 668 return false; 669 } 670 671 static bool 672 simplifyAndDCEInstruction(Instruction *I, 673 SmallSetVector<Instruction *, 16> &WorkList, 674 const DataLayout &DL, 675 const TargetLibraryInfo *TLI) { 676 if (isInstructionTriviallyDead(I, TLI)) { 677 salvageDebugInfo(*I); 678 679 // Null out all of the instruction's operands to see if any operand becomes 680 // dead as we go. 681 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 682 Value *OpV = I->getOperand(i); 683 I->setOperand(i, nullptr); 684 685 if (!OpV->use_empty() || I == OpV) 686 continue; 687 688 // If the operand is an instruction that became dead as we nulled out the 689 // operand, and if it is 'trivially' dead, delete it in a future loop 690 // iteration. 691 if (Instruction *OpI = dyn_cast<Instruction>(OpV)) 692 if (isInstructionTriviallyDead(OpI, TLI)) 693 WorkList.insert(OpI); 694 } 695 696 I->eraseFromParent(); 697 698 return true; 699 } 700 701 if (Value *SimpleV = simplifyInstruction(I, DL)) { 702 // Add the users to the worklist. CAREFUL: an instruction can use itself, 703 // in the case of a phi node. 704 for (User *U : I->users()) { 705 if (U != I) { 706 WorkList.insert(cast<Instruction>(U)); 707 } 708 } 709 710 // Replace the instruction with its simplified value. 711 bool Changed = false; 712 if (!I->use_empty()) { 713 I->replaceAllUsesWith(SimpleV); 714 Changed = true; 715 } 716 if (isInstructionTriviallyDead(I, TLI)) { 717 I->eraseFromParent(); 718 Changed = true; 719 } 720 return Changed; 721 } 722 return false; 723 } 724 725 /// SimplifyInstructionsInBlock - Scan the specified basic block and try to 726 /// simplify any instructions in it and recursively delete dead instructions. 727 /// 728 /// This returns true if it changed the code, note that it can delete 729 /// instructions in other blocks as well in this block. 730 bool llvm::SimplifyInstructionsInBlock(BasicBlock *BB, 731 const TargetLibraryInfo *TLI) { 732 bool MadeChange = false; 733 const DataLayout &DL = BB->getDataLayout(); 734 735 #ifndef NDEBUG 736 // In debug builds, ensure that the terminator of the block is never replaced 737 // or deleted by these simplifications. The idea of simplification is that it 738 // cannot introduce new instructions, and there is no way to replace the 739 // terminator of a block without introducing a new instruction. 740 AssertingVH<Instruction> TerminatorVH(&BB->back()); 741 #endif 742 743 SmallSetVector<Instruction *, 16> WorkList; 744 // Iterate over the original function, only adding insts to the worklist 745 // if they actually need to be revisited. This avoids having to pre-init 746 // the worklist with the entire function's worth of instructions. 747 for (BasicBlock::iterator BI = BB->begin(), E = std::prev(BB->end()); 748 BI != E;) { 749 assert(!BI->isTerminator()); 750 Instruction *I = &*BI; 751 ++BI; 752 753 // We're visiting this instruction now, so make sure it's not in the 754 // worklist from an earlier visit. 755 if (!WorkList.count(I)) 756 MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI); 757 } 758 759 while (!WorkList.empty()) { 760 Instruction *I = WorkList.pop_back_val(); 761 MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI); 762 } 763 return MadeChange; 764 } 765 766 //===----------------------------------------------------------------------===// 767 // Control Flow Graph Restructuring. 768 // 769 770 void llvm::MergeBasicBlockIntoOnlyPred(BasicBlock *DestBB, 771 DomTreeUpdater *DTU) { 772 773 // If BB has single-entry PHI nodes, fold them. 774 while (PHINode *PN = dyn_cast<PHINode>(DestBB->begin())) { 775 Value *NewVal = PN->getIncomingValue(0); 776 // Replace self referencing PHI with poison, it must be dead. 777 if (NewVal == PN) NewVal = PoisonValue::get(PN->getType()); 778 PN->replaceAllUsesWith(NewVal); 779 PN->eraseFromParent(); 780 } 781 782 BasicBlock *PredBB = DestBB->getSinglePredecessor(); 783 assert(PredBB && "Block doesn't have a single predecessor!"); 784 785 bool ReplaceEntryBB = PredBB->isEntryBlock(); 786 787 // DTU updates: Collect all the edges that enter 788 // PredBB. These dominator edges will be redirected to DestBB. 789 SmallVector<DominatorTree::UpdateType, 32> Updates; 790 791 if (DTU) { 792 // To avoid processing the same predecessor more than once. 793 SmallPtrSet<BasicBlock *, 2> SeenPreds; 794 Updates.reserve(Updates.size() + 2 * pred_size(PredBB) + 1); 795 for (BasicBlock *PredOfPredBB : predecessors(PredBB)) 796 // This predecessor of PredBB may already have DestBB as a successor. 797 if (PredOfPredBB != PredBB) 798 if (SeenPreds.insert(PredOfPredBB).second) 799 Updates.push_back({DominatorTree::Insert, PredOfPredBB, DestBB}); 800 SeenPreds.clear(); 801 for (BasicBlock *PredOfPredBB : predecessors(PredBB)) 802 if (SeenPreds.insert(PredOfPredBB).second) 803 Updates.push_back({DominatorTree::Delete, PredOfPredBB, PredBB}); 804 Updates.push_back({DominatorTree::Delete, PredBB, DestBB}); 805 } 806 807 // Zap anything that took the address of DestBB. Not doing this will give the 808 // address an invalid value. 809 if (DestBB->hasAddressTaken()) { 810 BlockAddress *BA = BlockAddress::get(DestBB); 811 Constant *Replacement = 812 ConstantInt::get(Type::getInt32Ty(BA->getContext()), 1); 813 BA->replaceAllUsesWith(ConstantExpr::getIntToPtr(Replacement, 814 BA->getType())); 815 BA->destroyConstant(); 816 } 817 818 // Anything that branched to PredBB now branches to DestBB. 819 PredBB->replaceAllUsesWith(DestBB); 820 821 // Splice all the instructions from PredBB to DestBB. 822 PredBB->getTerminator()->eraseFromParent(); 823 DestBB->splice(DestBB->begin(), PredBB); 824 new UnreachableInst(PredBB->getContext(), PredBB); 825 826 // If the PredBB is the entry block of the function, move DestBB up to 827 // become the entry block after we erase PredBB. 828 if (ReplaceEntryBB) 829 DestBB->moveAfter(PredBB); 830 831 if (DTU) { 832 assert(PredBB->size() == 1 && 833 isa<UnreachableInst>(PredBB->getTerminator()) && 834 "The successor list of PredBB isn't empty before " 835 "applying corresponding DTU updates."); 836 DTU->applyUpdatesPermissive(Updates); 837 DTU->deleteBB(PredBB); 838 // Recalculation of DomTree is needed when updating a forward DomTree and 839 // the Entry BB is replaced. 840 if (ReplaceEntryBB && DTU->hasDomTree()) { 841 // The entry block was removed and there is no external interface for 842 // the dominator tree to be notified of this change. In this corner-case 843 // we recalculate the entire tree. 844 DTU->recalculate(*(DestBB->getParent())); 845 } 846 } 847 848 else { 849 PredBB->eraseFromParent(); // Nuke BB if DTU is nullptr. 850 } 851 } 852 853 /// Return true if we can choose one of these values to use in place of the 854 /// other. Note that we will always choose the non-undef value to keep. 855 static bool CanMergeValues(Value *First, Value *Second) { 856 return First == Second || isa<UndefValue>(First) || isa<UndefValue>(Second); 857 } 858 859 /// Return true if we can fold BB, an almost-empty BB ending in an unconditional 860 /// branch to Succ, into Succ. 861 /// 862 /// Assumption: Succ is the single successor for BB. 863 static bool 864 CanPropagatePredecessorsForPHIs(BasicBlock *BB, BasicBlock *Succ, 865 const SmallPtrSetImpl<BasicBlock *> &BBPreds) { 866 assert(*succ_begin(BB) == Succ && "Succ is not successor of BB!"); 867 868 LLVM_DEBUG(dbgs() << "Looking to fold " << BB->getName() << " into " 869 << Succ->getName() << "\n"); 870 // Shortcut, if there is only a single predecessor it must be BB and merging 871 // is always safe 872 if (Succ->getSinglePredecessor()) 873 return true; 874 875 // Look at all the phi nodes in Succ, to see if they present a conflict when 876 // merging these blocks 877 for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) { 878 PHINode *PN = cast<PHINode>(I); 879 880 // If the incoming value from BB is again a PHINode in 881 // BB which has the same incoming value for *PI as PN does, we can 882 // merge the phi nodes and then the blocks can still be merged 883 PHINode *BBPN = dyn_cast<PHINode>(PN->getIncomingValueForBlock(BB)); 884 if (BBPN && BBPN->getParent() == BB) { 885 for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) { 886 BasicBlock *IBB = PN->getIncomingBlock(PI); 887 if (BBPreds.count(IBB) && 888 !CanMergeValues(BBPN->getIncomingValueForBlock(IBB), 889 PN->getIncomingValue(PI))) { 890 LLVM_DEBUG(dbgs() 891 << "Can't fold, phi node " << PN->getName() << " in " 892 << Succ->getName() << " is conflicting with " 893 << BBPN->getName() << " with regard to common predecessor " 894 << IBB->getName() << "\n"); 895 return false; 896 } 897 } 898 } else { 899 Value* Val = PN->getIncomingValueForBlock(BB); 900 for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) { 901 // See if the incoming value for the common predecessor is equal to the 902 // one for BB, in which case this phi node will not prevent the merging 903 // of the block. 904 BasicBlock *IBB = PN->getIncomingBlock(PI); 905 if (BBPreds.count(IBB) && 906 !CanMergeValues(Val, PN->getIncomingValue(PI))) { 907 LLVM_DEBUG(dbgs() << "Can't fold, phi node " << PN->getName() 908 << " in " << Succ->getName() 909 << " is conflicting with regard to common " 910 << "predecessor " << IBB->getName() << "\n"); 911 return false; 912 } 913 } 914 } 915 } 916 917 return true; 918 } 919 920 using PredBlockVector = SmallVector<BasicBlock *, 16>; 921 using IncomingValueMap = SmallDenseMap<BasicBlock *, Value *, 16>; 922 923 /// Determines the value to use as the phi node input for a block. 924 /// 925 /// Select between \p OldVal any value that we know flows from \p BB 926 /// to a particular phi on the basis of which one (if either) is not 927 /// undef. Update IncomingValues based on the selected value. 928 /// 929 /// \param OldVal The value we are considering selecting. 930 /// \param BB The block that the value flows in from. 931 /// \param IncomingValues A map from block-to-value for other phi inputs 932 /// that we have examined. 933 /// 934 /// \returns the selected value. 935 static Value *selectIncomingValueForBlock(Value *OldVal, BasicBlock *BB, 936 IncomingValueMap &IncomingValues) { 937 if (!isa<UndefValue>(OldVal)) { 938 assert((!IncomingValues.count(BB) || 939 IncomingValues.find(BB)->second == OldVal) && 940 "Expected OldVal to match incoming value from BB!"); 941 942 IncomingValues.insert(std::make_pair(BB, OldVal)); 943 return OldVal; 944 } 945 946 IncomingValueMap::const_iterator It = IncomingValues.find(BB); 947 if (It != IncomingValues.end()) return It->second; 948 949 return OldVal; 950 } 951 952 /// Create a map from block to value for the operands of a 953 /// given phi. 954 /// 955 /// Create a map from block to value for each non-undef value flowing 956 /// into \p PN. 957 /// 958 /// \param PN The phi we are collecting the map for. 959 /// \param IncomingValues [out] The map from block to value for this phi. 960 static void gatherIncomingValuesToPhi(PHINode *PN, 961 IncomingValueMap &IncomingValues) { 962 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 963 BasicBlock *BB = PN->getIncomingBlock(i); 964 Value *V = PN->getIncomingValue(i); 965 966 if (!isa<UndefValue>(V)) 967 IncomingValues.insert(std::make_pair(BB, V)); 968 } 969 } 970 971 /// Replace the incoming undef values to a phi with the values 972 /// from a block-to-value map. 973 /// 974 /// \param PN The phi we are replacing the undefs in. 975 /// \param IncomingValues A map from block to value. 976 static void replaceUndefValuesInPhi(PHINode *PN, 977 const IncomingValueMap &IncomingValues) { 978 SmallVector<unsigned> TrueUndefOps; 979 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 980 Value *V = PN->getIncomingValue(i); 981 982 if (!isa<UndefValue>(V)) continue; 983 984 BasicBlock *BB = PN->getIncomingBlock(i); 985 IncomingValueMap::const_iterator It = IncomingValues.find(BB); 986 987 // Keep track of undef/poison incoming values. Those must match, so we fix 988 // them up below if needed. 989 // Note: this is conservatively correct, but we could try harder and group 990 // the undef values per incoming basic block. 991 if (It == IncomingValues.end()) { 992 TrueUndefOps.push_back(i); 993 continue; 994 } 995 996 // There is a defined value for this incoming block, so map this undef 997 // incoming value to the defined value. 998 PN->setIncomingValue(i, It->second); 999 } 1000 1001 // If there are both undef and poison values incoming, then convert those 1002 // values to undef. It is invalid to have different values for the same 1003 // incoming block. 1004 unsigned PoisonCount = count_if(TrueUndefOps, [&](unsigned i) { 1005 return isa<PoisonValue>(PN->getIncomingValue(i)); 1006 }); 1007 if (PoisonCount != 0 && PoisonCount != TrueUndefOps.size()) { 1008 for (unsigned i : TrueUndefOps) 1009 PN->setIncomingValue(i, UndefValue::get(PN->getType())); 1010 } 1011 } 1012 1013 // Only when they shares a single common predecessor, return true. 1014 // Only handles cases when BB can't be merged while its predecessors can be 1015 // redirected. 1016 static bool 1017 CanRedirectPredsOfEmptyBBToSucc(BasicBlock *BB, BasicBlock *Succ, 1018 const SmallPtrSetImpl<BasicBlock *> &BBPreds, 1019 BasicBlock *&CommonPred) { 1020 1021 // There must be phis in BB, otherwise BB will be merged into Succ directly 1022 if (BB->phis().empty() || Succ->phis().empty()) 1023 return false; 1024 1025 // BB must have predecessors not shared that can be redirected to Succ 1026 if (!BB->hasNPredecessorsOrMore(2)) 1027 return false; 1028 1029 if (any_of(BBPreds, [](const BasicBlock *Pred) { 1030 return isa<IndirectBrInst>(Pred->getTerminator()); 1031 })) 1032 return false; 1033 1034 // Get the single common predecessor of both BB and Succ. Return false 1035 // when there are more than one common predecessors. 1036 for (BasicBlock *SuccPred : predecessors(Succ)) { 1037 if (BBPreds.count(SuccPred)) { 1038 if (CommonPred) 1039 return false; 1040 CommonPred = SuccPred; 1041 } 1042 } 1043 1044 return true; 1045 } 1046 1047 /// Check whether removing \p BB will make the phis in its \p Succ have too 1048 /// many incoming entries. This function does not check whether \p BB is 1049 /// foldable or not. 1050 static bool introduceTooManyPhiEntries(BasicBlock *BB, BasicBlock *Succ) { 1051 // If BB only has one predecessor, then removing it will not introduce more 1052 // incoming edges for phis. 1053 if (BB->hasNPredecessors(1)) 1054 return false; 1055 unsigned NumPreds = pred_size(BB); 1056 unsigned NumChangedPhi = 0; 1057 for (auto &Phi : Succ->phis()) { 1058 // If the incoming value is a phi and the phi is defined in BB, 1059 // then removing BB will not increase the total phi entries of the ir. 1060 if (auto *IncomingPhi = dyn_cast<PHINode>(Phi.getIncomingValueForBlock(BB))) 1061 if (IncomingPhi->getParent() == BB) 1062 continue; 1063 // Otherwise, we need to add entries to the phi 1064 NumChangedPhi++; 1065 } 1066 // For every phi that needs to be changed, (NumPreds - 1) new entries will be 1067 // added. If the total increase in phi entries exceeds 1068 // MaxPhiEntriesIncreaseAfterRemovingEmptyBlock, it will be considered as 1069 // introducing too many new phi entries. 1070 return (NumPreds - 1) * NumChangedPhi > 1071 MaxPhiEntriesIncreaseAfterRemovingEmptyBlock; 1072 } 1073 1074 /// Replace a value flowing from a block to a phi with 1075 /// potentially multiple instances of that value flowing from the 1076 /// block's predecessors to the phi. 1077 /// 1078 /// \param BB The block with the value flowing into the phi. 1079 /// \param BBPreds The predecessors of BB. 1080 /// \param PN The phi that we are updating. 1081 /// \param CommonPred The common predecessor of BB and PN's BasicBlock 1082 static void redirectValuesFromPredecessorsToPhi(BasicBlock *BB, 1083 const PredBlockVector &BBPreds, 1084 PHINode *PN, 1085 BasicBlock *CommonPred) { 1086 Value *OldVal = PN->removeIncomingValue(BB, false); 1087 assert(OldVal && "No entry in PHI for Pred BB!"); 1088 1089 IncomingValueMap IncomingValues; 1090 1091 // We are merging two blocks - BB, and the block containing PN - and 1092 // as a result we need to redirect edges from the predecessors of BB 1093 // to go to the block containing PN, and update PN 1094 // accordingly. Since we allow merging blocks in the case where the 1095 // predecessor and successor blocks both share some predecessors, 1096 // and where some of those common predecessors might have undef 1097 // values flowing into PN, we want to rewrite those values to be 1098 // consistent with the non-undef values. 1099 1100 gatherIncomingValuesToPhi(PN, IncomingValues); 1101 1102 // If this incoming value is one of the PHI nodes in BB, the new entries 1103 // in the PHI node are the entries from the old PHI. 1104 if (isa<PHINode>(OldVal) && cast<PHINode>(OldVal)->getParent() == BB) { 1105 PHINode *OldValPN = cast<PHINode>(OldVal); 1106 for (unsigned i = 0, e = OldValPN->getNumIncomingValues(); i != e; ++i) { 1107 // Note that, since we are merging phi nodes and BB and Succ might 1108 // have common predecessors, we could end up with a phi node with 1109 // identical incoming branches. This will be cleaned up later (and 1110 // will trigger asserts if we try to clean it up now, without also 1111 // simplifying the corresponding conditional branch). 1112 BasicBlock *PredBB = OldValPN->getIncomingBlock(i); 1113 1114 if (PredBB == CommonPred) 1115 continue; 1116 1117 Value *PredVal = OldValPN->getIncomingValue(i); 1118 Value *Selected = 1119 selectIncomingValueForBlock(PredVal, PredBB, IncomingValues); 1120 1121 // And add a new incoming value for this predecessor for the 1122 // newly retargeted branch. 1123 PN->addIncoming(Selected, PredBB); 1124 } 1125 if (CommonPred) 1126 PN->addIncoming(OldValPN->getIncomingValueForBlock(CommonPred), BB); 1127 1128 } else { 1129 for (BasicBlock *PredBB : BBPreds) { 1130 // Update existing incoming values in PN for this 1131 // predecessor of BB. 1132 if (PredBB == CommonPred) 1133 continue; 1134 1135 Value *Selected = 1136 selectIncomingValueForBlock(OldVal, PredBB, IncomingValues); 1137 1138 // And add a new incoming value for this predecessor for the 1139 // newly retargeted branch. 1140 PN->addIncoming(Selected, PredBB); 1141 } 1142 if (CommonPred) 1143 PN->addIncoming(OldVal, BB); 1144 } 1145 1146 replaceUndefValuesInPhi(PN, IncomingValues); 1147 } 1148 1149 bool llvm::TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB, 1150 DomTreeUpdater *DTU) { 1151 assert(BB != &BB->getParent()->getEntryBlock() && 1152 "TryToSimplifyUncondBranchFromEmptyBlock called on entry block!"); 1153 1154 // We can't simplify infinite loops. 1155 BasicBlock *Succ = cast<BranchInst>(BB->getTerminator())->getSuccessor(0); 1156 if (BB == Succ) 1157 return false; 1158 1159 SmallPtrSet<BasicBlock *, 16> BBPreds(llvm::from_range, predecessors(BB)); 1160 1161 // The single common predecessor of BB and Succ when BB cannot be killed 1162 BasicBlock *CommonPred = nullptr; 1163 1164 bool BBKillable = CanPropagatePredecessorsForPHIs(BB, Succ, BBPreds); 1165 1166 // Even if we can not fold BB into Succ, we may be able to redirect the 1167 // predecessors of BB to Succ. 1168 bool BBPhisMergeable = BBKillable || CanRedirectPredsOfEmptyBBToSucc( 1169 BB, Succ, BBPreds, CommonPred); 1170 1171 if ((!BBKillable && !BBPhisMergeable) || introduceTooManyPhiEntries(BB, Succ)) 1172 return false; 1173 1174 // Check to see if merging these blocks/phis would cause conflicts for any of 1175 // the phi nodes in BB or Succ. If not, we can safely merge. 1176 1177 // Check for cases where Succ has multiple predecessors and a PHI node in BB 1178 // has uses which will not disappear when the PHI nodes are merged. It is 1179 // possible to handle such cases, but difficult: it requires checking whether 1180 // BB dominates Succ, which is non-trivial to calculate in the case where 1181 // Succ has multiple predecessors. Also, it requires checking whether 1182 // constructing the necessary self-referential PHI node doesn't introduce any 1183 // conflicts; this isn't too difficult, but the previous code for doing this 1184 // was incorrect. 1185 // 1186 // Note that if this check finds a live use, BB dominates Succ, so BB is 1187 // something like a loop pre-header (or rarely, a part of an irreducible CFG); 1188 // folding the branch isn't profitable in that case anyway. 1189 if (!Succ->getSinglePredecessor()) { 1190 BasicBlock::iterator BBI = BB->begin(); 1191 while (isa<PHINode>(*BBI)) { 1192 for (Use &U : BBI->uses()) { 1193 if (PHINode* PN = dyn_cast<PHINode>(U.getUser())) { 1194 if (PN->getIncomingBlock(U) != BB) 1195 return false; 1196 } else { 1197 return false; 1198 } 1199 } 1200 ++BBI; 1201 } 1202 } 1203 1204 if (BBPhisMergeable && CommonPred) 1205 LLVM_DEBUG(dbgs() << "Found Common Predecessor between: " << BB->getName() 1206 << " and " << Succ->getName() << " : " 1207 << CommonPred->getName() << "\n"); 1208 1209 // 'BB' and 'BB->Pred' are loop latches, bail out to presrve inner loop 1210 // metadata. 1211 // 1212 // FIXME: This is a stop-gap solution to preserve inner-loop metadata given 1213 // current status (that loop metadata is implemented as metadata attached to 1214 // the branch instruction in the loop latch block). To quote from review 1215 // comments, "the current representation of loop metadata (using a loop latch 1216 // terminator attachment) is known to be fundamentally broken. Loop latches 1217 // are not uniquely associated with loops (both in that a latch can be part of 1218 // multiple loops and a loop may have multiple latches). Loop headers are. The 1219 // solution to this problem is also known: Add support for basic block 1220 // metadata, and attach loop metadata to the loop header." 1221 // 1222 // Why bail out: 1223 // In this case, we expect 'BB' is the latch for outer-loop and 'BB->Pred' is 1224 // the latch for inner-loop (see reason below), so bail out to prerserve 1225 // inner-loop metadata rather than eliminating 'BB' and attaching its metadata 1226 // to this inner-loop. 1227 // - The reason we believe 'BB' and 'BB->Pred' have different inner-most 1228 // loops: assuming 'BB' and 'BB->Pred' are from the same inner-most loop L, 1229 // then 'BB' is the header and latch of 'L' and thereby 'L' must consist of 1230 // one self-looping basic block, which is contradictory with the assumption. 1231 // 1232 // To illustrate how inner-loop metadata is dropped: 1233 // 1234 // CFG Before 1235 // 1236 // BB is while.cond.exit, attached with loop metdata md2. 1237 // BB->Pred is for.body, attached with loop metadata md1. 1238 // 1239 // entry 1240 // | 1241 // v 1242 // ---> while.cond -------------> while.end 1243 // | | 1244 // | v 1245 // | while.body 1246 // | | 1247 // | v 1248 // | for.body <---- (md1) 1249 // | | |______| 1250 // | v 1251 // | while.cond.exit (md2) 1252 // | | 1253 // |_______| 1254 // 1255 // CFG After 1256 // 1257 // while.cond1 is the merge of while.cond.exit and while.cond above. 1258 // for.body is attached with md2, and md1 is dropped. 1259 // If LoopSimplify runs later (as a part of loop pass), it could create 1260 // dedicated exits for inner-loop (essentially adding `while.cond.exit` 1261 // back), but won't it won't see 'md1' nor restore it for the inner-loop. 1262 // 1263 // entry 1264 // | 1265 // v 1266 // ---> while.cond1 -------------> while.end 1267 // | | 1268 // | v 1269 // | while.body 1270 // | | 1271 // | v 1272 // | for.body <---- (md2) 1273 // |_______| |______| 1274 if (Instruction *TI = BB->getTerminator()) 1275 if (TI->hasNonDebugLocLoopMetadata()) 1276 for (BasicBlock *Pred : predecessors(BB)) 1277 if (Instruction *PredTI = Pred->getTerminator()) 1278 if (PredTI->hasNonDebugLocLoopMetadata()) 1279 return false; 1280 1281 if (BBKillable) 1282 LLVM_DEBUG(dbgs() << "Killing Trivial BB: \n" << *BB); 1283 else if (BBPhisMergeable) 1284 LLVM_DEBUG(dbgs() << "Merge Phis in Trivial BB: \n" << *BB); 1285 1286 SmallVector<DominatorTree::UpdateType, 32> Updates; 1287 1288 if (DTU) { 1289 // To avoid processing the same predecessor more than once. 1290 SmallPtrSet<BasicBlock *, 8> SeenPreds; 1291 // All predecessors of BB (except the common predecessor) will be moved to 1292 // Succ. 1293 Updates.reserve(Updates.size() + 2 * pred_size(BB) + 1); 1294 SmallPtrSet<BasicBlock *, 16> SuccPreds(llvm::from_range, 1295 predecessors(Succ)); 1296 for (auto *PredOfBB : predecessors(BB)) { 1297 // Do not modify those common predecessors of BB and Succ 1298 if (!SuccPreds.contains(PredOfBB)) 1299 if (SeenPreds.insert(PredOfBB).second) 1300 Updates.push_back({DominatorTree::Insert, PredOfBB, Succ}); 1301 } 1302 1303 SeenPreds.clear(); 1304 1305 for (auto *PredOfBB : predecessors(BB)) 1306 // When BB cannot be killed, do not remove the edge between BB and 1307 // CommonPred. 1308 if (SeenPreds.insert(PredOfBB).second && PredOfBB != CommonPred) 1309 Updates.push_back({DominatorTree::Delete, PredOfBB, BB}); 1310 1311 if (BBKillable) 1312 Updates.push_back({DominatorTree::Delete, BB, Succ}); 1313 } 1314 1315 if (isa<PHINode>(Succ->begin())) { 1316 // If there is more than one pred of succ, and there are PHI nodes in 1317 // the successor, then we need to add incoming edges for the PHI nodes 1318 // 1319 const PredBlockVector BBPreds(predecessors(BB)); 1320 1321 // Loop over all of the PHI nodes in the successor of BB. 1322 for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) { 1323 PHINode *PN = cast<PHINode>(I); 1324 redirectValuesFromPredecessorsToPhi(BB, BBPreds, PN, CommonPred); 1325 } 1326 } 1327 1328 if (Succ->getSinglePredecessor()) { 1329 // BB is the only predecessor of Succ, so Succ will end up with exactly 1330 // the same predecessors BB had. 1331 // Copy over any phi, debug or lifetime instruction. 1332 BB->getTerminator()->eraseFromParent(); 1333 Succ->splice(Succ->getFirstNonPHIIt(), BB); 1334 } else { 1335 while (PHINode *PN = dyn_cast<PHINode>(&BB->front())) { 1336 // We explicitly check for such uses for merging phis. 1337 assert(PN->use_empty() && "There shouldn't be any uses here!"); 1338 PN->eraseFromParent(); 1339 } 1340 } 1341 1342 // If the unconditional branch we replaced contains non-debug llvm.loop 1343 // metadata, we add the metadata to the branch instructions in the 1344 // predecessors. 1345 if (Instruction *TI = BB->getTerminator()) 1346 if (TI->hasNonDebugLocLoopMetadata()) { 1347 MDNode *LoopMD = TI->getMetadata(LLVMContext::MD_loop); 1348 for (BasicBlock *Pred : predecessors(BB)) 1349 Pred->getTerminator()->setMetadata(LLVMContext::MD_loop, LoopMD); 1350 } 1351 1352 if (BBKillable) { 1353 // Everything that jumped to BB now goes to Succ. 1354 BB->replaceAllUsesWith(Succ); 1355 1356 if (!Succ->hasName()) 1357 Succ->takeName(BB); 1358 1359 // Clear the successor list of BB to match updates applying to DTU later. 1360 if (BB->getTerminator()) 1361 BB->back().eraseFromParent(); 1362 1363 new UnreachableInst(BB->getContext(), BB); 1364 assert(succ_empty(BB) && "The successor list of BB isn't empty before " 1365 "applying corresponding DTU updates."); 1366 } else if (BBPhisMergeable) { 1367 // Everything except CommonPred that jumped to BB now goes to Succ. 1368 BB->replaceUsesWithIf(Succ, [BBPreds, CommonPred](Use &U) -> bool { 1369 if (Instruction *UseInst = dyn_cast<Instruction>(U.getUser())) 1370 return UseInst->getParent() != CommonPred && 1371 BBPreds.contains(UseInst->getParent()); 1372 return false; 1373 }); 1374 } 1375 1376 if (DTU) 1377 DTU->applyUpdates(Updates); 1378 1379 if (BBKillable) 1380 DeleteDeadBlock(BB, DTU); 1381 1382 return true; 1383 } 1384 1385 static bool 1386 EliminateDuplicatePHINodesNaiveImpl(BasicBlock *BB, 1387 SmallPtrSetImpl<PHINode *> &ToRemove) { 1388 // This implementation doesn't currently consider undef operands 1389 // specially. Theoretically, two phis which are identical except for 1390 // one having an undef where the other doesn't could be collapsed. 1391 1392 bool Changed = false; 1393 1394 // Examine each PHI. 1395 // Note that increment of I must *NOT* be in the iteration_expression, since 1396 // we don't want to immediately advance when we restart from the beginning. 1397 for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(I);) { 1398 ++I; 1399 // Is there an identical PHI node in this basic block? 1400 // Note that we only look in the upper square's triangle, 1401 // we already checked that the lower triangle PHI's aren't identical. 1402 for (auto J = I; PHINode *DuplicatePN = dyn_cast<PHINode>(J); ++J) { 1403 if (ToRemove.contains(DuplicatePN)) 1404 continue; 1405 if (!DuplicatePN->isIdenticalToWhenDefined(PN)) 1406 continue; 1407 // A duplicate. Replace this PHI with the base PHI. 1408 ++NumPHICSEs; 1409 DuplicatePN->replaceAllUsesWith(PN); 1410 ToRemove.insert(DuplicatePN); 1411 Changed = true; 1412 1413 // The RAUW can change PHIs that we already visited. 1414 I = BB->begin(); 1415 break; // Start over from the beginning. 1416 } 1417 } 1418 return Changed; 1419 } 1420 1421 static bool 1422 EliminateDuplicatePHINodesSetBasedImpl(BasicBlock *BB, 1423 SmallPtrSetImpl<PHINode *> &ToRemove) { 1424 // This implementation doesn't currently consider undef operands 1425 // specially. Theoretically, two phis which are identical except for 1426 // one having an undef where the other doesn't could be collapsed. 1427 1428 struct PHIDenseMapInfo { 1429 static PHINode *getEmptyKey() { 1430 return DenseMapInfo<PHINode *>::getEmptyKey(); 1431 } 1432 1433 static PHINode *getTombstoneKey() { 1434 return DenseMapInfo<PHINode *>::getTombstoneKey(); 1435 } 1436 1437 static bool isSentinel(PHINode *PN) { 1438 return PN == getEmptyKey() || PN == getTombstoneKey(); 1439 } 1440 1441 // WARNING: this logic must be kept in sync with 1442 // Instruction::isIdenticalToWhenDefined()! 1443 static unsigned getHashValueImpl(PHINode *PN) { 1444 // Compute a hash value on the operands. Instcombine will likely have 1445 // sorted them, which helps expose duplicates, but we have to check all 1446 // the operands to be safe in case instcombine hasn't run. 1447 return static_cast<unsigned>( 1448 hash_combine(hash_combine_range(PN->operand_values()), 1449 hash_combine_range(PN->blocks()))); 1450 } 1451 1452 static unsigned getHashValue(PHINode *PN) { 1453 #ifndef NDEBUG 1454 // If -phicse-debug-hash was specified, return a constant -- this 1455 // will force all hashing to collide, so we'll exhaustively search 1456 // the table for a match, and the assertion in isEqual will fire if 1457 // there's a bug causing equal keys to hash differently. 1458 if (PHICSEDebugHash) 1459 return 0; 1460 #endif 1461 return getHashValueImpl(PN); 1462 } 1463 1464 static bool isEqualImpl(PHINode *LHS, PHINode *RHS) { 1465 if (isSentinel(LHS) || isSentinel(RHS)) 1466 return LHS == RHS; 1467 return LHS->isIdenticalTo(RHS); 1468 } 1469 1470 static bool isEqual(PHINode *LHS, PHINode *RHS) { 1471 // These comparisons are nontrivial, so assert that equality implies 1472 // hash equality (DenseMap demands this as an invariant). 1473 bool Result = isEqualImpl(LHS, RHS); 1474 assert(!Result || (isSentinel(LHS) && LHS == RHS) || 1475 getHashValueImpl(LHS) == getHashValueImpl(RHS)); 1476 return Result; 1477 } 1478 }; 1479 1480 // Set of unique PHINodes. 1481 DenseSet<PHINode *, PHIDenseMapInfo> PHISet; 1482 PHISet.reserve(4 * PHICSENumPHISmallSize); 1483 1484 // Examine each PHI. 1485 bool Changed = false; 1486 for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(I++);) { 1487 if (ToRemove.contains(PN)) 1488 continue; 1489 auto Inserted = PHISet.insert(PN); 1490 if (!Inserted.second) { 1491 // A duplicate. Replace this PHI with its duplicate. 1492 ++NumPHICSEs; 1493 PN->replaceAllUsesWith(*Inserted.first); 1494 ToRemove.insert(PN); 1495 Changed = true; 1496 1497 // The RAUW can change PHIs that we already visited. Start over from the 1498 // beginning. 1499 PHISet.clear(); 1500 I = BB->begin(); 1501 } 1502 } 1503 1504 return Changed; 1505 } 1506 1507 bool llvm::EliminateDuplicatePHINodes(BasicBlock *BB, 1508 SmallPtrSetImpl<PHINode *> &ToRemove) { 1509 if ( 1510 #ifndef NDEBUG 1511 !PHICSEDebugHash && 1512 #endif 1513 hasNItemsOrLess(BB->phis(), PHICSENumPHISmallSize)) 1514 return EliminateDuplicatePHINodesNaiveImpl(BB, ToRemove); 1515 return EliminateDuplicatePHINodesSetBasedImpl(BB, ToRemove); 1516 } 1517 1518 bool llvm::EliminateDuplicatePHINodes(BasicBlock *BB) { 1519 SmallPtrSet<PHINode *, 8> ToRemove; 1520 bool Changed = EliminateDuplicatePHINodes(BB, ToRemove); 1521 for (PHINode *PN : ToRemove) 1522 PN->eraseFromParent(); 1523 return Changed; 1524 } 1525 1526 Align llvm::tryEnforceAlignment(Value *V, Align PrefAlign, 1527 const DataLayout &DL) { 1528 V = V->stripPointerCasts(); 1529 1530 if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) { 1531 // TODO: Ideally, this function would not be called if PrefAlign is smaller 1532 // than the current alignment, as the known bits calculation should have 1533 // already taken it into account. However, this is not always the case, 1534 // as computeKnownBits() has a depth limit, while stripPointerCasts() 1535 // doesn't. 1536 Align CurrentAlign = AI->getAlign(); 1537 if (PrefAlign <= CurrentAlign) 1538 return CurrentAlign; 1539 1540 // If the preferred alignment is greater than the natural stack alignment 1541 // then don't round up. This avoids dynamic stack realignment. 1542 MaybeAlign StackAlign = DL.getStackAlignment(); 1543 if (StackAlign && PrefAlign > *StackAlign) 1544 return CurrentAlign; 1545 AI->setAlignment(PrefAlign); 1546 return PrefAlign; 1547 } 1548 1549 if (auto *GV = dyn_cast<GlobalVariable>(V)) { 1550 // TODO: as above, this shouldn't be necessary. 1551 Align CurrentAlign = GV->getPointerAlignment(DL); 1552 if (PrefAlign <= CurrentAlign) 1553 return CurrentAlign; 1554 1555 // If there is a large requested alignment and we can, bump up the alignment 1556 // of the global. If the memory we set aside for the global may not be the 1557 // memory used by the final program then it is impossible for us to reliably 1558 // enforce the preferred alignment. 1559 if (!GV->canIncreaseAlignment()) 1560 return CurrentAlign; 1561 1562 if (GV->isThreadLocal()) { 1563 unsigned MaxTLSAlign = GV->getParent()->getMaxTLSAlignment() / CHAR_BIT; 1564 if (MaxTLSAlign && PrefAlign > Align(MaxTLSAlign)) 1565 PrefAlign = Align(MaxTLSAlign); 1566 } 1567 1568 GV->setAlignment(PrefAlign); 1569 return PrefAlign; 1570 } 1571 1572 return Align(1); 1573 } 1574 1575 Align llvm::getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign, 1576 const DataLayout &DL, 1577 const Instruction *CxtI, 1578 AssumptionCache *AC, 1579 const DominatorTree *DT) { 1580 assert(V->getType()->isPointerTy() && 1581 "getOrEnforceKnownAlignment expects a pointer!"); 1582 1583 KnownBits Known = computeKnownBits(V, DL, AC, CxtI, DT); 1584 unsigned TrailZ = Known.countMinTrailingZeros(); 1585 1586 // Avoid trouble with ridiculously large TrailZ values, such as 1587 // those computed from a null pointer. 1588 // LLVM doesn't support alignments larger than (1 << MaxAlignmentExponent). 1589 TrailZ = std::min(TrailZ, +Value::MaxAlignmentExponent); 1590 1591 Align Alignment = Align(1ull << std::min(Known.getBitWidth() - 1, TrailZ)); 1592 1593 if (PrefAlign && *PrefAlign > Alignment) 1594 Alignment = std::max(Alignment, tryEnforceAlignment(V, *PrefAlign, DL)); 1595 1596 // We don't need to make any adjustment. 1597 return Alignment; 1598 } 1599 1600 ///===---------------------------------------------------------------------===// 1601 /// Dbg Intrinsic utilities 1602 /// 1603 1604 /// See if there is a dbg.value intrinsic for DIVar for the PHI node. 1605 static bool PhiHasDebugValue(DILocalVariable *DIVar, 1606 DIExpression *DIExpr, 1607 PHINode *APN) { 1608 // Since we can't guarantee that the original dbg.declare intrinsic 1609 // is removed by LowerDbgDeclare(), we need to make sure that we are 1610 // not inserting the same dbg.value intrinsic over and over. 1611 SmallVector<DbgValueInst *, 1> DbgValues; 1612 SmallVector<DbgVariableRecord *, 1> DbgVariableRecords; 1613 findDbgValues(DbgValues, APN, &DbgVariableRecords); 1614 for (auto *DVI : DbgValues) { 1615 assert(is_contained(DVI->getValues(), APN)); 1616 if ((DVI->getVariable() == DIVar) && (DVI->getExpression() == DIExpr)) 1617 return true; 1618 } 1619 for (auto *DVR : DbgVariableRecords) { 1620 assert(is_contained(DVR->location_ops(), APN)); 1621 if ((DVR->getVariable() == DIVar) && (DVR->getExpression() == DIExpr)) 1622 return true; 1623 } 1624 return false; 1625 } 1626 1627 /// Check if the alloc size of \p ValTy is large enough to cover the variable 1628 /// (or fragment of the variable) described by \p DII. 1629 /// 1630 /// This is primarily intended as a helper for the different 1631 /// ConvertDebugDeclareToDebugValue functions. The dbg.declare that is converted 1632 /// describes an alloca'd variable, so we need to use the alloc size of the 1633 /// value when doing the comparison. E.g. an i1 value will be identified as 1634 /// covering an n-bit fragment, if the store size of i1 is at least n bits. 1635 static bool valueCoversEntireFragment(Type *ValTy, DbgVariableIntrinsic *DII) { 1636 const DataLayout &DL = DII->getDataLayout(); 1637 TypeSize ValueSize = DL.getTypeAllocSizeInBits(ValTy); 1638 if (std::optional<uint64_t> FragmentSize = 1639 DII->getExpression()->getActiveBits(DII->getVariable())) 1640 return TypeSize::isKnownGE(ValueSize, TypeSize::getFixed(*FragmentSize)); 1641 1642 // We can't always calculate the size of the DI variable (e.g. if it is a 1643 // VLA). Try to use the size of the alloca that the dbg intrinsic describes 1644 // instead. 1645 if (DII->isAddressOfVariable()) { 1646 // DII should have exactly 1 location when it is an address. 1647 assert(DII->getNumVariableLocationOps() == 1 && 1648 "address of variable must have exactly 1 location operand."); 1649 if (auto *AI = 1650 dyn_cast_or_null<AllocaInst>(DII->getVariableLocationOp(0))) { 1651 if (std::optional<TypeSize> FragmentSize = 1652 AI->getAllocationSizeInBits(DL)) { 1653 return TypeSize::isKnownGE(ValueSize, *FragmentSize); 1654 } 1655 } 1656 } 1657 // Could not determine size of variable. Conservatively return false. 1658 return false; 1659 } 1660 // RemoveDIs: duplicate implementation of the above, using DbgVariableRecords, 1661 // the replacement for dbg.values. 1662 static bool valueCoversEntireFragment(Type *ValTy, DbgVariableRecord *DVR) { 1663 const DataLayout &DL = DVR->getModule()->getDataLayout(); 1664 TypeSize ValueSize = DL.getTypeAllocSizeInBits(ValTy); 1665 if (std::optional<uint64_t> FragmentSize = 1666 DVR->getExpression()->getActiveBits(DVR->getVariable())) 1667 return TypeSize::isKnownGE(ValueSize, TypeSize::getFixed(*FragmentSize)); 1668 1669 // We can't always calculate the size of the DI variable (e.g. if it is a 1670 // VLA). Try to use the size of the alloca that the dbg intrinsic describes 1671 // instead. 1672 if (DVR->isAddressOfVariable()) { 1673 // DVR should have exactly 1 location when it is an address. 1674 assert(DVR->getNumVariableLocationOps() == 1 && 1675 "address of variable must have exactly 1 location operand."); 1676 if (auto *AI = 1677 dyn_cast_or_null<AllocaInst>(DVR->getVariableLocationOp(0))) { 1678 if (std::optional<TypeSize> FragmentSize = AI->getAllocationSizeInBits(DL)) { 1679 return TypeSize::isKnownGE(ValueSize, *FragmentSize); 1680 } 1681 } 1682 } 1683 // Could not determine size of variable. Conservatively return false. 1684 return false; 1685 } 1686 1687 static void insertDbgValueOrDbgVariableRecord(DIBuilder &Builder, Value *DV, 1688 DILocalVariable *DIVar, 1689 DIExpression *DIExpr, 1690 const DebugLoc &NewLoc, 1691 BasicBlock::iterator Instr) { 1692 ValueAsMetadata *DVAM = ValueAsMetadata::get(DV); 1693 DbgVariableRecord *DVRec = 1694 new DbgVariableRecord(DVAM, DIVar, DIExpr, NewLoc.get()); 1695 Instr->getParent()->insertDbgRecordBefore(DVRec, Instr); 1696 } 1697 1698 static void insertDbgValueOrDbgVariableRecordAfter( 1699 DIBuilder &Builder, Value *DV, DILocalVariable *DIVar, DIExpression *DIExpr, 1700 const DebugLoc &NewLoc, Instruction *Instr) { 1701 BasicBlock::iterator NextIt = std::next(Instr->getIterator()); 1702 NextIt.setHeadBit(true); 1703 insertDbgValueOrDbgVariableRecord(Builder, DV, DIVar, DIExpr, NewLoc, NextIt); 1704 } 1705 1706 /// Inserts a llvm.dbg.value intrinsic before a store to an alloca'd value 1707 /// that has an associated llvm.dbg.declare intrinsic. 1708 void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII, 1709 StoreInst *SI, DIBuilder &Builder) { 1710 assert(DII->isAddressOfVariable() || isa<DbgAssignIntrinsic>(DII)); 1711 auto *DIVar = DII->getVariable(); 1712 assert(DIVar && "Missing variable"); 1713 auto *DIExpr = DII->getExpression(); 1714 Value *DV = SI->getValueOperand(); 1715 1716 DebugLoc NewLoc = getDebugValueLoc(DII); 1717 1718 // If the alloca describes the variable itself, i.e. the expression in the 1719 // dbg.declare doesn't start with a dereference, we can perform the 1720 // conversion if the value covers the entire fragment of DII. 1721 // If the alloca describes the *address* of DIVar, i.e. DIExpr is 1722 // *just* a DW_OP_deref, we use DV as is for the dbg.value. 1723 // We conservatively ignore other dereferences, because the following two are 1724 // not equivalent: 1725 // dbg.declare(alloca, ..., !Expr(deref, plus_uconstant, 2)) 1726 // dbg.value(DV, ..., !Expr(deref, plus_uconstant, 2)) 1727 // The former is adding 2 to the address of the variable, whereas the latter 1728 // is adding 2 to the value of the variable. As such, we insist on just a 1729 // deref expression. 1730 bool CanConvert = 1731 DIExpr->isDeref() || (!DIExpr->startsWithDeref() && 1732 valueCoversEntireFragment(DV->getType(), DII)); 1733 if (CanConvert) { 1734 insertDbgValueOrDbgVariableRecord(Builder, DV, DIVar, DIExpr, NewLoc, 1735 SI->getIterator()); 1736 return; 1737 } 1738 1739 // FIXME: If storing to a part of the variable described by the dbg.declare, 1740 // then we want to insert a dbg.value for the corresponding fragment. 1741 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: " << *DII 1742 << '\n'); 1743 // For now, when there is a store to parts of the variable (but we do not 1744 // know which part) we insert an dbg.value intrinsic to indicate that we 1745 // know nothing about the variable's content. 1746 DV = PoisonValue::get(DV->getType()); 1747 insertDbgValueOrDbgVariableRecord(Builder, DV, DIVar, DIExpr, NewLoc, 1748 SI->getIterator()); 1749 } 1750 1751 static DIExpression *dropInitialDeref(const DIExpression *DIExpr) { 1752 int NumEltDropped = DIExpr->getElements()[0] == dwarf::DW_OP_LLVM_arg ? 3 : 1; 1753 return DIExpression::get(DIExpr->getContext(), 1754 DIExpr->getElements().drop_front(NumEltDropped)); 1755 } 1756 1757 void llvm::InsertDebugValueAtStoreLoc(DbgVariableIntrinsic *DII, StoreInst *SI, 1758 DIBuilder &Builder) { 1759 auto *DIVar = DII->getVariable(); 1760 assert(DIVar && "Missing variable"); 1761 auto *DIExpr = DII->getExpression(); 1762 DIExpr = dropInitialDeref(DIExpr); 1763 Value *DV = SI->getValueOperand(); 1764 1765 DebugLoc NewLoc = getDebugValueLoc(DII); 1766 1767 insertDbgValueOrDbgVariableRecord(Builder, DV, DIVar, DIExpr, NewLoc, 1768 SI->getIterator()); 1769 } 1770 1771 /// Inserts a llvm.dbg.value intrinsic before a load of an alloca'd value 1772 /// that has an associated llvm.dbg.declare intrinsic. 1773 void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII, 1774 LoadInst *LI, DIBuilder &Builder) { 1775 auto *DIVar = DII->getVariable(); 1776 auto *DIExpr = DII->getExpression(); 1777 assert(DIVar && "Missing variable"); 1778 1779 if (!valueCoversEntireFragment(LI->getType(), DII)) { 1780 // FIXME: If only referring to a part of the variable described by the 1781 // dbg.declare, then we want to insert a dbg.value for the corresponding 1782 // fragment. 1783 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: " 1784 << *DII << '\n'); 1785 return; 1786 } 1787 1788 DebugLoc NewLoc = getDebugValueLoc(DII); 1789 1790 // We are now tracking the loaded value instead of the address. In the 1791 // future if multi-location support is added to the IR, it might be 1792 // preferable to keep tracking both the loaded value and the original 1793 // address in case the alloca can not be elided. 1794 insertDbgValueOrDbgVariableRecordAfter(Builder, LI, DIVar, DIExpr, NewLoc, 1795 LI); 1796 } 1797 1798 void llvm::ConvertDebugDeclareToDebugValue(DbgVariableRecord *DVR, 1799 StoreInst *SI, DIBuilder &Builder) { 1800 assert(DVR->isAddressOfVariable() || DVR->isDbgAssign()); 1801 auto *DIVar = DVR->getVariable(); 1802 assert(DIVar && "Missing variable"); 1803 auto *DIExpr = DVR->getExpression(); 1804 Value *DV = SI->getValueOperand(); 1805 1806 DebugLoc NewLoc = getDebugValueLoc(DVR); 1807 1808 // If the alloca describes the variable itself, i.e. the expression in the 1809 // dbg.declare doesn't start with a dereference, we can perform the 1810 // conversion if the value covers the entire fragment of DII. 1811 // If the alloca describes the *address* of DIVar, i.e. DIExpr is 1812 // *just* a DW_OP_deref, we use DV as is for the dbg.value. 1813 // We conservatively ignore other dereferences, because the following two are 1814 // not equivalent: 1815 // dbg.declare(alloca, ..., !Expr(deref, plus_uconstant, 2)) 1816 // dbg.value(DV, ..., !Expr(deref, plus_uconstant, 2)) 1817 // The former is adding 2 to the address of the variable, whereas the latter 1818 // is adding 2 to the value of the variable. As such, we insist on just a 1819 // deref expression. 1820 bool CanConvert = 1821 DIExpr->isDeref() || (!DIExpr->startsWithDeref() && 1822 valueCoversEntireFragment(DV->getType(), DVR)); 1823 if (CanConvert) { 1824 insertDbgValueOrDbgVariableRecord(Builder, DV, DIVar, DIExpr, NewLoc, 1825 SI->getIterator()); 1826 return; 1827 } 1828 1829 // FIXME: If storing to a part of the variable described by the dbg.declare, 1830 // then we want to insert a dbg.value for the corresponding fragment. 1831 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: " << *DVR 1832 << '\n'); 1833 1834 // For now, when there is a store to parts of the variable (but we do not 1835 // know which part) we insert an dbg.value intrinsic to indicate that we 1836 // know nothing about the variable's content. 1837 DV = PoisonValue::get(DV->getType()); 1838 ValueAsMetadata *DVAM = ValueAsMetadata::get(DV); 1839 DbgVariableRecord *NewDVR = 1840 new DbgVariableRecord(DVAM, DIVar, DIExpr, NewLoc.get()); 1841 SI->getParent()->insertDbgRecordBefore(NewDVR, SI->getIterator()); 1842 } 1843 1844 void llvm::InsertDebugValueAtStoreLoc(DbgVariableRecord *DVR, StoreInst *SI, 1845 DIBuilder &Builder) { 1846 auto *DIVar = DVR->getVariable(); 1847 assert(DIVar && "Missing variable"); 1848 auto *DIExpr = DVR->getExpression(); 1849 DIExpr = dropInitialDeref(DIExpr); 1850 Value *DV = SI->getValueOperand(); 1851 1852 DebugLoc NewLoc = getDebugValueLoc(DVR); 1853 1854 insertDbgValueOrDbgVariableRecord(Builder, DV, DIVar, DIExpr, NewLoc, 1855 SI->getIterator()); 1856 } 1857 1858 /// Inserts a llvm.dbg.value intrinsic after a phi that has an associated 1859 /// llvm.dbg.declare intrinsic. 1860 void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII, 1861 PHINode *APN, DIBuilder &Builder) { 1862 auto *DIVar = DII->getVariable(); 1863 auto *DIExpr = DII->getExpression(); 1864 assert(DIVar && "Missing variable"); 1865 1866 if (PhiHasDebugValue(DIVar, DIExpr, APN)) 1867 return; 1868 1869 if (!valueCoversEntireFragment(APN->getType(), DII)) { 1870 // FIXME: If only referring to a part of the variable described by the 1871 // dbg.declare, then we want to insert a dbg.value for the corresponding 1872 // fragment. 1873 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: " 1874 << *DII << '\n'); 1875 return; 1876 } 1877 1878 BasicBlock *BB = APN->getParent(); 1879 auto InsertionPt = BB->getFirstInsertionPt(); 1880 1881 DebugLoc NewLoc = getDebugValueLoc(DII); 1882 1883 // The block may be a catchswitch block, which does not have a valid 1884 // insertion point. 1885 // FIXME: Insert dbg.value markers in the successors when appropriate. 1886 if (InsertionPt != BB->end()) { 1887 insertDbgValueOrDbgVariableRecord(Builder, APN, DIVar, DIExpr, NewLoc, 1888 InsertionPt); 1889 } 1890 } 1891 1892 void llvm::ConvertDebugDeclareToDebugValue(DbgVariableRecord *DVR, LoadInst *LI, 1893 DIBuilder &Builder) { 1894 auto *DIVar = DVR->getVariable(); 1895 auto *DIExpr = DVR->getExpression(); 1896 assert(DIVar && "Missing variable"); 1897 1898 if (!valueCoversEntireFragment(LI->getType(), DVR)) { 1899 // FIXME: If only referring to a part of the variable described by the 1900 // dbg.declare, then we want to insert a DbgVariableRecord for the 1901 // corresponding fragment. 1902 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to DbgVariableRecord: " 1903 << *DVR << '\n'); 1904 return; 1905 } 1906 1907 DebugLoc NewLoc = getDebugValueLoc(DVR); 1908 1909 // We are now tracking the loaded value instead of the address. In the 1910 // future if multi-location support is added to the IR, it might be 1911 // preferable to keep tracking both the loaded value and the original 1912 // address in case the alloca can not be elided. 1913 1914 // Create a DbgVariableRecord directly and insert. 1915 ValueAsMetadata *LIVAM = ValueAsMetadata::get(LI); 1916 DbgVariableRecord *DV = 1917 new DbgVariableRecord(LIVAM, DIVar, DIExpr, NewLoc.get()); 1918 LI->getParent()->insertDbgRecordAfter(DV, LI); 1919 } 1920 1921 /// Determine whether this alloca is either a VLA or an array. 1922 static bool isArray(AllocaInst *AI) { 1923 return AI->isArrayAllocation() || 1924 (AI->getAllocatedType() && AI->getAllocatedType()->isArrayTy()); 1925 } 1926 1927 /// Determine whether this alloca is a structure. 1928 static bool isStructure(AllocaInst *AI) { 1929 return AI->getAllocatedType() && AI->getAllocatedType()->isStructTy(); 1930 } 1931 void llvm::ConvertDebugDeclareToDebugValue(DbgVariableRecord *DVR, PHINode *APN, 1932 DIBuilder &Builder) { 1933 auto *DIVar = DVR->getVariable(); 1934 auto *DIExpr = DVR->getExpression(); 1935 assert(DIVar && "Missing variable"); 1936 1937 if (PhiHasDebugValue(DIVar, DIExpr, APN)) 1938 return; 1939 1940 if (!valueCoversEntireFragment(APN->getType(), DVR)) { 1941 // FIXME: If only referring to a part of the variable described by the 1942 // dbg.declare, then we want to insert a DbgVariableRecord for the 1943 // corresponding fragment. 1944 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to DbgVariableRecord: " 1945 << *DVR << '\n'); 1946 return; 1947 } 1948 1949 BasicBlock *BB = APN->getParent(); 1950 auto InsertionPt = BB->getFirstInsertionPt(); 1951 1952 DebugLoc NewLoc = getDebugValueLoc(DVR); 1953 1954 // The block may be a catchswitch block, which does not have a valid 1955 // insertion point. 1956 // FIXME: Insert DbgVariableRecord markers in the successors when appropriate. 1957 if (InsertionPt != BB->end()) { 1958 insertDbgValueOrDbgVariableRecord(Builder, APN, DIVar, DIExpr, NewLoc, 1959 InsertionPt); 1960 } 1961 } 1962 1963 /// LowerDbgDeclare - Lowers llvm.dbg.declare intrinsics into appropriate set 1964 /// of llvm.dbg.value intrinsics. 1965 bool llvm::LowerDbgDeclare(Function &F) { 1966 bool Changed = false; 1967 DIBuilder DIB(*F.getParent(), /*AllowUnresolved*/ false); 1968 SmallVector<DbgDeclareInst *, 4> Dbgs; 1969 SmallVector<DbgVariableRecord *> DVRs; 1970 for (auto &FI : F) { 1971 for (Instruction &BI : FI) { 1972 if (auto *DDI = dyn_cast<DbgDeclareInst>(&BI)) 1973 Dbgs.push_back(DDI); 1974 for (DbgVariableRecord &DVR : filterDbgVars(BI.getDbgRecordRange())) { 1975 if (DVR.getType() == DbgVariableRecord::LocationType::Declare) 1976 DVRs.push_back(&DVR); 1977 } 1978 } 1979 } 1980 1981 if (Dbgs.empty() && DVRs.empty()) 1982 return Changed; 1983 1984 auto LowerOne = [&](auto *DDI) { 1985 AllocaInst *AI = 1986 dyn_cast_or_null<AllocaInst>(DDI->getVariableLocationOp(0)); 1987 // If this is an alloca for a scalar variable, insert a dbg.value 1988 // at each load and store to the alloca and erase the dbg.declare. 1989 // The dbg.values allow tracking a variable even if it is not 1990 // stored on the stack, while the dbg.declare can only describe 1991 // the stack slot (and at a lexical-scope granularity). Later 1992 // passes will attempt to elide the stack slot. 1993 if (!AI || isArray(AI) || isStructure(AI)) 1994 return; 1995 1996 // A volatile load/store means that the alloca can't be elided anyway. 1997 if (llvm::any_of(AI->users(), [](User *U) -> bool { 1998 if (LoadInst *LI = dyn_cast<LoadInst>(U)) 1999 return LI->isVolatile(); 2000 if (StoreInst *SI = dyn_cast<StoreInst>(U)) 2001 return SI->isVolatile(); 2002 return false; 2003 })) 2004 return; 2005 2006 SmallVector<const Value *, 8> WorkList; 2007 WorkList.push_back(AI); 2008 while (!WorkList.empty()) { 2009 const Value *V = WorkList.pop_back_val(); 2010 for (const auto &AIUse : V->uses()) { 2011 User *U = AIUse.getUser(); 2012 if (StoreInst *SI = dyn_cast<StoreInst>(U)) { 2013 if (AIUse.getOperandNo() == 1) 2014 ConvertDebugDeclareToDebugValue(DDI, SI, DIB); 2015 } else if (LoadInst *LI = dyn_cast<LoadInst>(U)) { 2016 ConvertDebugDeclareToDebugValue(DDI, LI, DIB); 2017 } else if (CallInst *CI = dyn_cast<CallInst>(U)) { 2018 // This is a call by-value or some other instruction that takes a 2019 // pointer to the variable. Insert a *value* intrinsic that describes 2020 // the variable by dereferencing the alloca. 2021 if (!CI->isLifetimeStartOrEnd()) { 2022 DebugLoc NewLoc = getDebugValueLoc(DDI); 2023 auto *DerefExpr = 2024 DIExpression::append(DDI->getExpression(), dwarf::DW_OP_deref); 2025 insertDbgValueOrDbgVariableRecord(DIB, AI, DDI->getVariable(), 2026 DerefExpr, NewLoc, 2027 CI->getIterator()); 2028 } 2029 } else if (BitCastInst *BI = dyn_cast<BitCastInst>(U)) { 2030 if (BI->getType()->isPointerTy()) 2031 WorkList.push_back(BI); 2032 } 2033 } 2034 } 2035 DDI->eraseFromParent(); 2036 Changed = true; 2037 }; 2038 2039 for_each(Dbgs, LowerOne); 2040 for_each(DVRs, LowerOne); 2041 2042 if (Changed) 2043 for (BasicBlock &BB : F) 2044 RemoveRedundantDbgInstrs(&BB); 2045 2046 return Changed; 2047 } 2048 2049 // RemoveDIs: re-implementation of insertDebugValuesForPHIs, but which pulls the 2050 // debug-info out of the block's DbgVariableRecords rather than dbg.value 2051 // intrinsics. 2052 static void 2053 insertDbgVariableRecordsForPHIs(BasicBlock *BB, 2054 SmallVectorImpl<PHINode *> &InsertedPHIs) { 2055 assert(BB && "No BasicBlock to clone DbgVariableRecord(s) from."); 2056 if (InsertedPHIs.size() == 0) 2057 return; 2058 2059 // Map existing PHI nodes to their DbgVariableRecords. 2060 DenseMap<Value *, DbgVariableRecord *> DbgValueMap; 2061 for (auto &I : *BB) { 2062 for (DbgVariableRecord &DVR : filterDbgVars(I.getDbgRecordRange())) { 2063 for (Value *V : DVR.location_ops()) 2064 if (auto *Loc = dyn_cast_or_null<PHINode>(V)) 2065 DbgValueMap.insert({Loc, &DVR}); 2066 } 2067 } 2068 if (DbgValueMap.size() == 0) 2069 return; 2070 2071 // Map a pair of the destination BB and old DbgVariableRecord to the new 2072 // DbgVariableRecord, so that if a DbgVariableRecord is being rewritten to use 2073 // more than one of the inserted PHIs in the same destination BB, we can 2074 // update the same DbgVariableRecord with all the new PHIs instead of creating 2075 // one copy for each. 2076 MapVector<std::pair<BasicBlock *, DbgVariableRecord *>, DbgVariableRecord *> 2077 NewDbgValueMap; 2078 // Then iterate through the new PHIs and look to see if they use one of the 2079 // previously mapped PHIs. If so, create a new DbgVariableRecord that will 2080 // propagate the info through the new PHI. If we use more than one new PHI in 2081 // a single destination BB with the same old dbg.value, merge the updates so 2082 // that we get a single new DbgVariableRecord with all the new PHIs. 2083 for (auto PHI : InsertedPHIs) { 2084 BasicBlock *Parent = PHI->getParent(); 2085 // Avoid inserting a debug-info record into an EH block. 2086 if (Parent->getFirstNonPHIIt()->isEHPad()) 2087 continue; 2088 for (auto VI : PHI->operand_values()) { 2089 auto V = DbgValueMap.find(VI); 2090 if (V != DbgValueMap.end()) { 2091 DbgVariableRecord *DbgII = cast<DbgVariableRecord>(V->second); 2092 auto NewDI = NewDbgValueMap.find({Parent, DbgII}); 2093 if (NewDI == NewDbgValueMap.end()) { 2094 DbgVariableRecord *NewDbgII = DbgII->clone(); 2095 NewDI = NewDbgValueMap.insert({{Parent, DbgII}, NewDbgII}).first; 2096 } 2097 DbgVariableRecord *NewDbgII = NewDI->second; 2098 // If PHI contains VI as an operand more than once, we may 2099 // replaced it in NewDbgII; confirm that it is present. 2100 if (is_contained(NewDbgII->location_ops(), VI)) 2101 NewDbgII->replaceVariableLocationOp(VI, PHI); 2102 } 2103 } 2104 } 2105 // Insert the new DbgVariableRecords into their destination blocks. 2106 for (auto DI : NewDbgValueMap) { 2107 BasicBlock *Parent = DI.first.first; 2108 DbgVariableRecord *NewDbgII = DI.second; 2109 auto InsertionPt = Parent->getFirstInsertionPt(); 2110 assert(InsertionPt != Parent->end() && "Ill-formed basic block"); 2111 2112 Parent->insertDbgRecordBefore(NewDbgII, InsertionPt); 2113 } 2114 } 2115 2116 /// Propagate dbg.value intrinsics through the newly inserted PHIs. 2117 void llvm::insertDebugValuesForPHIs(BasicBlock *BB, 2118 SmallVectorImpl<PHINode *> &InsertedPHIs) { 2119 assert(BB && "No BasicBlock to clone dbg.value(s) from."); 2120 if (InsertedPHIs.size() == 0) 2121 return; 2122 2123 insertDbgVariableRecordsForPHIs(BB, InsertedPHIs); 2124 2125 // Map existing PHI nodes to their dbg.values. 2126 ValueToValueMapTy DbgValueMap; 2127 for (auto &I : *BB) { 2128 if (auto DbgII = dyn_cast<DbgVariableIntrinsic>(&I)) { 2129 for (Value *V : DbgII->location_ops()) 2130 if (auto *Loc = dyn_cast_or_null<PHINode>(V)) 2131 DbgValueMap.insert({Loc, DbgII}); 2132 } 2133 } 2134 if (DbgValueMap.size() == 0) 2135 return; 2136 2137 // Map a pair of the destination BB and old dbg.value to the new dbg.value, 2138 // so that if a dbg.value is being rewritten to use more than one of the 2139 // inserted PHIs in the same destination BB, we can update the same dbg.value 2140 // with all the new PHIs instead of creating one copy for each. 2141 MapVector<std::pair<BasicBlock *, DbgVariableIntrinsic *>, 2142 DbgVariableIntrinsic *> 2143 NewDbgValueMap; 2144 // Then iterate through the new PHIs and look to see if they use one of the 2145 // previously mapped PHIs. If so, create a new dbg.value intrinsic that will 2146 // propagate the info through the new PHI. If we use more than one new PHI in 2147 // a single destination BB with the same old dbg.value, merge the updates so 2148 // that we get a single new dbg.value with all the new PHIs. 2149 for (auto *PHI : InsertedPHIs) { 2150 BasicBlock *Parent = PHI->getParent(); 2151 // Avoid inserting an intrinsic into an EH block. 2152 if (Parent->getFirstNonPHIIt()->isEHPad()) 2153 continue; 2154 for (auto *VI : PHI->operand_values()) { 2155 auto V = DbgValueMap.find(VI); 2156 if (V != DbgValueMap.end()) { 2157 auto *DbgII = cast<DbgVariableIntrinsic>(V->second); 2158 auto [NewDI, Inserted] = NewDbgValueMap.try_emplace({Parent, DbgII}); 2159 if (Inserted) 2160 NewDI->second = cast<DbgVariableIntrinsic>(DbgII->clone()); 2161 DbgVariableIntrinsic *NewDbgII = NewDI->second; 2162 // If PHI contains VI as an operand more than once, we may 2163 // replaced it in NewDbgII; confirm that it is present. 2164 if (is_contained(NewDbgII->location_ops(), VI)) 2165 NewDbgII->replaceVariableLocationOp(VI, PHI); 2166 } 2167 } 2168 } 2169 // Insert thew new dbg.values into their destination blocks. 2170 for (auto DI : NewDbgValueMap) { 2171 BasicBlock *Parent = DI.first.first; 2172 auto *NewDbgII = DI.second; 2173 auto InsertionPt = Parent->getFirstInsertionPt(); 2174 assert(InsertionPt != Parent->end() && "Ill-formed basic block"); 2175 NewDbgII->insertBefore(InsertionPt); 2176 } 2177 } 2178 2179 bool llvm::replaceDbgDeclare(Value *Address, Value *NewAddress, 2180 DIBuilder &Builder, uint8_t DIExprFlags, 2181 int Offset) { 2182 TinyPtrVector<DbgDeclareInst *> DbgDeclares = findDbgDeclares(Address); 2183 TinyPtrVector<DbgVariableRecord *> DVRDeclares = findDVRDeclares(Address); 2184 2185 auto ReplaceOne = [&](auto *DII) { 2186 assert(DII->getVariable() && "Missing variable"); 2187 auto *DIExpr = DII->getExpression(); 2188 DIExpr = DIExpression::prepend(DIExpr, DIExprFlags, Offset); 2189 DII->setExpression(DIExpr); 2190 DII->replaceVariableLocationOp(Address, NewAddress); 2191 }; 2192 2193 for_each(DbgDeclares, ReplaceOne); 2194 for_each(DVRDeclares, ReplaceOne); 2195 2196 return !DbgDeclares.empty() || !DVRDeclares.empty(); 2197 } 2198 2199 static void updateOneDbgValueForAlloca(const DebugLoc &Loc, 2200 DILocalVariable *DIVar, 2201 DIExpression *DIExpr, Value *NewAddress, 2202 DbgValueInst *DVI, 2203 DbgVariableRecord *DVR, 2204 DIBuilder &Builder, int Offset) { 2205 assert(DIVar && "Missing variable"); 2206 2207 // This is an alloca-based dbg.value/DbgVariableRecord. The first thing it 2208 // should do with the alloca pointer is dereference it. Otherwise we don't 2209 // know how to handle it and give up. 2210 if (!DIExpr || DIExpr->getNumElements() < 1 || 2211 DIExpr->getElement(0) != dwarf::DW_OP_deref) 2212 return; 2213 2214 // Insert the offset before the first deref. 2215 if (Offset) 2216 DIExpr = DIExpression::prepend(DIExpr, 0, Offset); 2217 2218 if (DVI) { 2219 DVI->setExpression(DIExpr); 2220 DVI->replaceVariableLocationOp(0u, NewAddress); 2221 } else { 2222 assert(DVR); 2223 DVR->setExpression(DIExpr); 2224 DVR->replaceVariableLocationOp(0u, NewAddress); 2225 } 2226 } 2227 2228 void llvm::replaceDbgValueForAlloca(AllocaInst *AI, Value *NewAllocaAddress, 2229 DIBuilder &Builder, int Offset) { 2230 SmallVector<DbgValueInst *, 1> DbgUsers; 2231 SmallVector<DbgVariableRecord *, 1> DPUsers; 2232 findDbgValues(DbgUsers, AI, &DPUsers); 2233 2234 // Attempt to replace dbg.values that use this alloca. 2235 for (auto *DVI : DbgUsers) 2236 updateOneDbgValueForAlloca(DVI->getDebugLoc(), DVI->getVariable(), 2237 DVI->getExpression(), NewAllocaAddress, DVI, 2238 nullptr, Builder, Offset); 2239 2240 // Replace any DbgVariableRecords that use this alloca. 2241 for (DbgVariableRecord *DVR : DPUsers) 2242 updateOneDbgValueForAlloca(DVR->getDebugLoc(), DVR->getVariable(), 2243 DVR->getExpression(), NewAllocaAddress, nullptr, 2244 DVR, Builder, Offset); 2245 } 2246 2247 /// Where possible to salvage debug information for \p I do so. 2248 /// If not possible mark undef. 2249 void llvm::salvageDebugInfo(Instruction &I) { 2250 SmallVector<DbgVariableIntrinsic *, 1> DbgUsers; 2251 SmallVector<DbgVariableRecord *, 1> DPUsers; 2252 findDbgUsers(DbgUsers, &I, &DPUsers); 2253 salvageDebugInfoForDbgValues(I, DbgUsers, DPUsers); 2254 } 2255 2256 template <typename T> static void salvageDbgAssignAddress(T *Assign) { 2257 Instruction *I = dyn_cast<Instruction>(Assign->getAddress()); 2258 // Only instructions can be salvaged at the moment. 2259 if (!I) 2260 return; 2261 2262 assert(!Assign->getAddressExpression()->getFragmentInfo().has_value() && 2263 "address-expression shouldn't have fragment info"); 2264 2265 // The address component of a dbg.assign cannot be variadic. 2266 uint64_t CurrentLocOps = 0; 2267 SmallVector<Value *, 4> AdditionalValues; 2268 SmallVector<uint64_t, 16> Ops; 2269 Value *NewV = salvageDebugInfoImpl(*I, CurrentLocOps, Ops, AdditionalValues); 2270 2271 // Check if the salvage failed. 2272 if (!NewV) 2273 return; 2274 2275 DIExpression *SalvagedExpr = DIExpression::appendOpsToArg( 2276 Assign->getAddressExpression(), Ops, 0, /*StackValue=*/false); 2277 assert(!SalvagedExpr->getFragmentInfo().has_value() && 2278 "address-expression shouldn't have fragment info"); 2279 2280 SalvagedExpr = SalvagedExpr->foldConstantMath(); 2281 2282 // Salvage succeeds if no additional values are required. 2283 if (AdditionalValues.empty()) { 2284 Assign->setAddress(NewV); 2285 Assign->setAddressExpression(SalvagedExpr); 2286 } else { 2287 Assign->setKillAddress(); 2288 } 2289 } 2290 2291 void llvm::salvageDebugInfoForDbgValues( 2292 Instruction &I, ArrayRef<DbgVariableIntrinsic *> DbgUsers, 2293 ArrayRef<DbgVariableRecord *> DPUsers) { 2294 // These are arbitrary chosen limits on the maximum number of values and the 2295 // maximum size of a debug expression we can salvage up to, used for 2296 // performance reasons. 2297 const unsigned MaxDebugArgs = 16; 2298 const unsigned MaxExpressionSize = 128; 2299 bool Salvaged = false; 2300 2301 for (auto *DII : DbgUsers) { 2302 if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(DII)) { 2303 if (DAI->getAddress() == &I) { 2304 salvageDbgAssignAddress(DAI); 2305 Salvaged = true; 2306 } 2307 if (DAI->getValue() != &I) 2308 continue; 2309 } 2310 2311 // Do not add DW_OP_stack_value for DbgDeclare, because they are implicitly 2312 // pointing out the value as a DWARF memory location description. 2313 bool StackValue = isa<DbgValueInst>(DII); 2314 auto DIILocation = DII->location_ops(); 2315 assert( 2316 is_contained(DIILocation, &I) && 2317 "DbgVariableIntrinsic must use salvaged instruction as its location"); 2318 SmallVector<Value *, 4> AdditionalValues; 2319 // `I` may appear more than once in DII's location ops, and each use of `I` 2320 // must be updated in the DIExpression and potentially have additional 2321 // values added; thus we call salvageDebugInfoImpl for each `I` instance in 2322 // DIILocation. 2323 Value *Op0 = nullptr; 2324 DIExpression *SalvagedExpr = DII->getExpression(); 2325 auto LocItr = find(DIILocation, &I); 2326 while (SalvagedExpr && LocItr != DIILocation.end()) { 2327 SmallVector<uint64_t, 16> Ops; 2328 unsigned LocNo = std::distance(DIILocation.begin(), LocItr); 2329 uint64_t CurrentLocOps = SalvagedExpr->getNumLocationOperands(); 2330 Op0 = salvageDebugInfoImpl(I, CurrentLocOps, Ops, AdditionalValues); 2331 if (!Op0) 2332 break; 2333 SalvagedExpr = 2334 DIExpression::appendOpsToArg(SalvagedExpr, Ops, LocNo, StackValue); 2335 LocItr = std::find(++LocItr, DIILocation.end(), &I); 2336 } 2337 // salvageDebugInfoImpl should fail on examining the first element of 2338 // DbgUsers, or none of them. 2339 if (!Op0) 2340 break; 2341 2342 SalvagedExpr = SalvagedExpr->foldConstantMath(); 2343 DII->replaceVariableLocationOp(&I, Op0); 2344 bool IsValidSalvageExpr = SalvagedExpr->getNumElements() <= MaxExpressionSize; 2345 if (AdditionalValues.empty() && IsValidSalvageExpr) { 2346 DII->setExpression(SalvagedExpr); 2347 } else if (isa<DbgValueInst>(DII) && IsValidSalvageExpr && 2348 DII->getNumVariableLocationOps() + AdditionalValues.size() <= 2349 MaxDebugArgs) { 2350 DII->addVariableLocationOps(AdditionalValues, SalvagedExpr); 2351 } else { 2352 // Do not salvage using DIArgList for dbg.declare, as it is not currently 2353 // supported in those instructions. Also do not salvage if the resulting 2354 // DIArgList would contain an unreasonably large number of values. 2355 DII->setKillLocation(); 2356 } 2357 LLVM_DEBUG(dbgs() << "SALVAGE: " << *DII << '\n'); 2358 Salvaged = true; 2359 } 2360 // Duplicate of above block for DbgVariableRecords. 2361 for (auto *DVR : DPUsers) { 2362 if (DVR->isDbgAssign()) { 2363 if (DVR->getAddress() == &I) { 2364 salvageDbgAssignAddress(DVR); 2365 Salvaged = true; 2366 } 2367 if (DVR->getValue() != &I) 2368 continue; 2369 } 2370 2371 // Do not add DW_OP_stack_value for DbgDeclare and DbgAddr, because they 2372 // are implicitly pointing out the value as a DWARF memory location 2373 // description. 2374 bool StackValue = 2375 DVR->getType() != DbgVariableRecord::LocationType::Declare; 2376 auto DVRLocation = DVR->location_ops(); 2377 assert( 2378 is_contained(DVRLocation, &I) && 2379 "DbgVariableIntrinsic must use salvaged instruction as its location"); 2380 SmallVector<Value *, 4> AdditionalValues; 2381 // 'I' may appear more than once in DVR's location ops, and each use of 'I' 2382 // must be updated in the DIExpression and potentially have additional 2383 // values added; thus we call salvageDebugInfoImpl for each 'I' instance in 2384 // DVRLocation. 2385 Value *Op0 = nullptr; 2386 DIExpression *SalvagedExpr = DVR->getExpression(); 2387 auto LocItr = find(DVRLocation, &I); 2388 while (SalvagedExpr && LocItr != DVRLocation.end()) { 2389 SmallVector<uint64_t, 16> Ops; 2390 unsigned LocNo = std::distance(DVRLocation.begin(), LocItr); 2391 uint64_t CurrentLocOps = SalvagedExpr->getNumLocationOperands(); 2392 Op0 = salvageDebugInfoImpl(I, CurrentLocOps, Ops, AdditionalValues); 2393 if (!Op0) 2394 break; 2395 SalvagedExpr = 2396 DIExpression::appendOpsToArg(SalvagedExpr, Ops, LocNo, StackValue); 2397 LocItr = std::find(++LocItr, DVRLocation.end(), &I); 2398 } 2399 // salvageDebugInfoImpl should fail on examining the first element of 2400 // DbgUsers, or none of them. 2401 if (!Op0) 2402 break; 2403 2404 SalvagedExpr = SalvagedExpr->foldConstantMath(); 2405 DVR->replaceVariableLocationOp(&I, Op0); 2406 bool IsValidSalvageExpr = 2407 SalvagedExpr->getNumElements() <= MaxExpressionSize; 2408 if (AdditionalValues.empty() && IsValidSalvageExpr) { 2409 DVR->setExpression(SalvagedExpr); 2410 } else if (DVR->getType() != DbgVariableRecord::LocationType::Declare && 2411 IsValidSalvageExpr && 2412 DVR->getNumVariableLocationOps() + AdditionalValues.size() <= 2413 MaxDebugArgs) { 2414 DVR->addVariableLocationOps(AdditionalValues, SalvagedExpr); 2415 } else { 2416 // Do not salvage using DIArgList for dbg.addr/dbg.declare, as it is 2417 // currently only valid for stack value expressions. 2418 // Also do not salvage if the resulting DIArgList would contain an 2419 // unreasonably large number of values. 2420 DVR->setKillLocation(); 2421 } 2422 LLVM_DEBUG(dbgs() << "SALVAGE: " << DVR << '\n'); 2423 Salvaged = true; 2424 } 2425 2426 if (Salvaged) 2427 return; 2428 2429 for (auto *DII : DbgUsers) 2430 DII->setKillLocation(); 2431 2432 for (auto *DVR : DPUsers) 2433 DVR->setKillLocation(); 2434 } 2435 2436 Value *getSalvageOpsForGEP(GetElementPtrInst *GEP, const DataLayout &DL, 2437 uint64_t CurrentLocOps, 2438 SmallVectorImpl<uint64_t> &Opcodes, 2439 SmallVectorImpl<Value *> &AdditionalValues) { 2440 unsigned BitWidth = DL.getIndexSizeInBits(GEP->getPointerAddressSpace()); 2441 // Rewrite a GEP into a DIExpression. 2442 SmallMapVector<Value *, APInt, 4> VariableOffsets; 2443 APInt ConstantOffset(BitWidth, 0); 2444 if (!GEP->collectOffset(DL, BitWidth, VariableOffsets, ConstantOffset)) 2445 return nullptr; 2446 if (!VariableOffsets.empty() && !CurrentLocOps) { 2447 Opcodes.insert(Opcodes.begin(), {dwarf::DW_OP_LLVM_arg, 0}); 2448 CurrentLocOps = 1; 2449 } 2450 for (const auto &Offset : VariableOffsets) { 2451 AdditionalValues.push_back(Offset.first); 2452 assert(Offset.second.isStrictlyPositive() && 2453 "Expected strictly positive multiplier for offset."); 2454 Opcodes.append({dwarf::DW_OP_LLVM_arg, CurrentLocOps++, dwarf::DW_OP_constu, 2455 Offset.second.getZExtValue(), dwarf::DW_OP_mul, 2456 dwarf::DW_OP_plus}); 2457 } 2458 DIExpression::appendOffset(Opcodes, ConstantOffset.getSExtValue()); 2459 return GEP->getOperand(0); 2460 } 2461 2462 uint64_t getDwarfOpForBinOp(Instruction::BinaryOps Opcode) { 2463 switch (Opcode) { 2464 case Instruction::Add: 2465 return dwarf::DW_OP_plus; 2466 case Instruction::Sub: 2467 return dwarf::DW_OP_minus; 2468 case Instruction::Mul: 2469 return dwarf::DW_OP_mul; 2470 case Instruction::SDiv: 2471 return dwarf::DW_OP_div; 2472 case Instruction::SRem: 2473 return dwarf::DW_OP_mod; 2474 case Instruction::Or: 2475 return dwarf::DW_OP_or; 2476 case Instruction::And: 2477 return dwarf::DW_OP_and; 2478 case Instruction::Xor: 2479 return dwarf::DW_OP_xor; 2480 case Instruction::Shl: 2481 return dwarf::DW_OP_shl; 2482 case Instruction::LShr: 2483 return dwarf::DW_OP_shr; 2484 case Instruction::AShr: 2485 return dwarf::DW_OP_shra; 2486 default: 2487 // TODO: Salvage from each kind of binop we know about. 2488 return 0; 2489 } 2490 } 2491 2492 static void handleSSAValueOperands(uint64_t CurrentLocOps, 2493 SmallVectorImpl<uint64_t> &Opcodes, 2494 SmallVectorImpl<Value *> &AdditionalValues, 2495 Instruction *I) { 2496 if (!CurrentLocOps) { 2497 Opcodes.append({dwarf::DW_OP_LLVM_arg, 0}); 2498 CurrentLocOps = 1; 2499 } 2500 Opcodes.append({dwarf::DW_OP_LLVM_arg, CurrentLocOps}); 2501 AdditionalValues.push_back(I->getOperand(1)); 2502 } 2503 2504 Value *getSalvageOpsForBinOp(BinaryOperator *BI, uint64_t CurrentLocOps, 2505 SmallVectorImpl<uint64_t> &Opcodes, 2506 SmallVectorImpl<Value *> &AdditionalValues) { 2507 // Handle binary operations with constant integer operands as a special case. 2508 auto *ConstInt = dyn_cast<ConstantInt>(BI->getOperand(1)); 2509 // Values wider than 64 bits cannot be represented within a DIExpression. 2510 if (ConstInt && ConstInt->getBitWidth() > 64) 2511 return nullptr; 2512 2513 Instruction::BinaryOps BinOpcode = BI->getOpcode(); 2514 // Push any Constant Int operand onto the expression stack. 2515 if (ConstInt) { 2516 uint64_t Val = ConstInt->getSExtValue(); 2517 // Add or Sub Instructions with a constant operand can potentially be 2518 // simplified. 2519 if (BinOpcode == Instruction::Add || BinOpcode == Instruction::Sub) { 2520 uint64_t Offset = BinOpcode == Instruction::Add ? Val : -int64_t(Val); 2521 DIExpression::appendOffset(Opcodes, Offset); 2522 return BI->getOperand(0); 2523 } 2524 Opcodes.append({dwarf::DW_OP_constu, Val}); 2525 } else { 2526 handleSSAValueOperands(CurrentLocOps, Opcodes, AdditionalValues, BI); 2527 } 2528 2529 // Add salvaged binary operator to expression stack, if it has a valid 2530 // representation in a DIExpression. 2531 uint64_t DwarfBinOp = getDwarfOpForBinOp(BinOpcode); 2532 if (!DwarfBinOp) 2533 return nullptr; 2534 Opcodes.push_back(DwarfBinOp); 2535 return BI->getOperand(0); 2536 } 2537 2538 uint64_t getDwarfOpForIcmpPred(CmpInst::Predicate Pred) { 2539 // The signedness of the operation is implicit in the typed stack, signed and 2540 // unsigned instructions map to the same DWARF opcode. 2541 switch (Pred) { 2542 case CmpInst::ICMP_EQ: 2543 return dwarf::DW_OP_eq; 2544 case CmpInst::ICMP_NE: 2545 return dwarf::DW_OP_ne; 2546 case CmpInst::ICMP_UGT: 2547 case CmpInst::ICMP_SGT: 2548 return dwarf::DW_OP_gt; 2549 case CmpInst::ICMP_UGE: 2550 case CmpInst::ICMP_SGE: 2551 return dwarf::DW_OP_ge; 2552 case CmpInst::ICMP_ULT: 2553 case CmpInst::ICMP_SLT: 2554 return dwarf::DW_OP_lt; 2555 case CmpInst::ICMP_ULE: 2556 case CmpInst::ICMP_SLE: 2557 return dwarf::DW_OP_le; 2558 default: 2559 return 0; 2560 } 2561 } 2562 2563 Value *getSalvageOpsForIcmpOp(ICmpInst *Icmp, uint64_t CurrentLocOps, 2564 SmallVectorImpl<uint64_t> &Opcodes, 2565 SmallVectorImpl<Value *> &AdditionalValues) { 2566 // Handle icmp operations with constant integer operands as a special case. 2567 auto *ConstInt = dyn_cast<ConstantInt>(Icmp->getOperand(1)); 2568 // Values wider than 64 bits cannot be represented within a DIExpression. 2569 if (ConstInt && ConstInt->getBitWidth() > 64) 2570 return nullptr; 2571 // Push any Constant Int operand onto the expression stack. 2572 if (ConstInt) { 2573 if (Icmp->isSigned()) 2574 Opcodes.push_back(dwarf::DW_OP_consts); 2575 else 2576 Opcodes.push_back(dwarf::DW_OP_constu); 2577 uint64_t Val = ConstInt->getSExtValue(); 2578 Opcodes.push_back(Val); 2579 } else { 2580 handleSSAValueOperands(CurrentLocOps, Opcodes, AdditionalValues, Icmp); 2581 } 2582 2583 // Add salvaged binary operator to expression stack, if it has a valid 2584 // representation in a DIExpression. 2585 uint64_t DwarfIcmpOp = getDwarfOpForIcmpPred(Icmp->getPredicate()); 2586 if (!DwarfIcmpOp) 2587 return nullptr; 2588 Opcodes.push_back(DwarfIcmpOp); 2589 return Icmp->getOperand(0); 2590 } 2591 2592 Value *llvm::salvageDebugInfoImpl(Instruction &I, uint64_t CurrentLocOps, 2593 SmallVectorImpl<uint64_t> &Ops, 2594 SmallVectorImpl<Value *> &AdditionalValues) { 2595 auto &M = *I.getModule(); 2596 auto &DL = M.getDataLayout(); 2597 2598 if (auto *CI = dyn_cast<CastInst>(&I)) { 2599 Value *FromValue = CI->getOperand(0); 2600 // No-op casts are irrelevant for debug info. 2601 if (CI->isNoopCast(DL)) { 2602 return FromValue; 2603 } 2604 2605 Type *Type = CI->getType(); 2606 if (Type->isPointerTy()) 2607 Type = DL.getIntPtrType(Type); 2608 // Casts other than Trunc, SExt, or ZExt to scalar types cannot be salvaged. 2609 if (Type->isVectorTy() || 2610 !(isa<TruncInst>(&I) || isa<SExtInst>(&I) || isa<ZExtInst>(&I) || 2611 isa<IntToPtrInst>(&I) || isa<PtrToIntInst>(&I))) 2612 return nullptr; 2613 2614 llvm::Type *FromType = FromValue->getType(); 2615 if (FromType->isPointerTy()) 2616 FromType = DL.getIntPtrType(FromType); 2617 2618 unsigned FromTypeBitSize = FromType->getScalarSizeInBits(); 2619 unsigned ToTypeBitSize = Type->getScalarSizeInBits(); 2620 2621 auto ExtOps = DIExpression::getExtOps(FromTypeBitSize, ToTypeBitSize, 2622 isa<SExtInst>(&I)); 2623 Ops.append(ExtOps.begin(), ExtOps.end()); 2624 return FromValue; 2625 } 2626 2627 if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) 2628 return getSalvageOpsForGEP(GEP, DL, CurrentLocOps, Ops, AdditionalValues); 2629 if (auto *BI = dyn_cast<BinaryOperator>(&I)) 2630 return getSalvageOpsForBinOp(BI, CurrentLocOps, Ops, AdditionalValues); 2631 if (auto *IC = dyn_cast<ICmpInst>(&I)) 2632 return getSalvageOpsForIcmpOp(IC, CurrentLocOps, Ops, AdditionalValues); 2633 2634 // *Not* to do: we should not attempt to salvage load instructions, 2635 // because the validity and lifetime of a dbg.value containing 2636 // DW_OP_deref becomes difficult to analyze. See PR40628 for examples. 2637 return nullptr; 2638 } 2639 2640 /// A replacement for a dbg.value expression. 2641 using DbgValReplacement = std::optional<DIExpression *>; 2642 2643 /// Point debug users of \p From to \p To using exprs given by \p RewriteExpr, 2644 /// possibly moving/undefing users to prevent use-before-def. Returns true if 2645 /// changes are made. 2646 static bool rewriteDebugUsers( 2647 Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT, 2648 function_ref<DbgValReplacement(DbgVariableIntrinsic &DII)> RewriteExpr, 2649 function_ref<DbgValReplacement(DbgVariableRecord &DVR)> RewriteDVRExpr) { 2650 // Find debug users of From. 2651 SmallVector<DbgVariableIntrinsic *, 1> Users; 2652 SmallVector<DbgVariableRecord *, 1> DPUsers; 2653 findDbgUsers(Users, &From, &DPUsers); 2654 if (Users.empty() && DPUsers.empty()) 2655 return false; 2656 2657 // Prevent use-before-def of To. 2658 bool Changed = false; 2659 2660 SmallPtrSet<DbgVariableIntrinsic *, 1> UndefOrSalvage; 2661 SmallPtrSet<DbgVariableRecord *, 1> UndefOrSalvageDVR; 2662 if (isa<Instruction>(&To)) { 2663 bool DomPointAfterFrom = From.getNextNonDebugInstruction() == &DomPoint; 2664 2665 for (auto *DII : Users) { 2666 // It's common to see a debug user between From and DomPoint. Move it 2667 // after DomPoint to preserve the variable update without any reordering. 2668 if (DomPointAfterFrom && DII->getNextNonDebugInstruction() == &DomPoint) { 2669 LLVM_DEBUG(dbgs() << "MOVE: " << *DII << '\n'); 2670 DII->moveAfter(&DomPoint); 2671 Changed = true; 2672 2673 // Users which otherwise aren't dominated by the replacement value must 2674 // be salvaged or deleted. 2675 } else if (!DT.dominates(&DomPoint, DII)) { 2676 UndefOrSalvage.insert(DII); 2677 } 2678 } 2679 2680 // DbgVariableRecord implementation of the above. 2681 for (auto *DVR : DPUsers) { 2682 Instruction *MarkedInstr = DVR->getMarker()->MarkedInstr; 2683 Instruction *NextNonDebug = MarkedInstr; 2684 // The next instruction might still be a dbg.declare, skip over it. 2685 if (isa<DbgVariableIntrinsic>(NextNonDebug)) 2686 NextNonDebug = NextNonDebug->getNextNonDebugInstruction(); 2687 2688 if (DomPointAfterFrom && NextNonDebug == &DomPoint) { 2689 LLVM_DEBUG(dbgs() << "MOVE: " << *DVR << '\n'); 2690 DVR->removeFromParent(); 2691 // Ensure there's a marker. 2692 DomPoint.getParent()->insertDbgRecordAfter(DVR, &DomPoint); 2693 Changed = true; 2694 } else if (!DT.dominates(&DomPoint, MarkedInstr)) { 2695 UndefOrSalvageDVR.insert(DVR); 2696 } 2697 } 2698 } 2699 2700 // Update debug users without use-before-def risk. 2701 for (auto *DII : Users) { 2702 if (UndefOrSalvage.count(DII)) 2703 continue; 2704 2705 DbgValReplacement DVRepl = RewriteExpr(*DII); 2706 if (!DVRepl) 2707 continue; 2708 2709 DII->replaceVariableLocationOp(&From, &To); 2710 DII->setExpression(*DVRepl); 2711 LLVM_DEBUG(dbgs() << "REWRITE: " << *DII << '\n'); 2712 Changed = true; 2713 } 2714 for (auto *DVR : DPUsers) { 2715 if (UndefOrSalvageDVR.count(DVR)) 2716 continue; 2717 2718 DbgValReplacement DVRepl = RewriteDVRExpr(*DVR); 2719 if (!DVRepl) 2720 continue; 2721 2722 DVR->replaceVariableLocationOp(&From, &To); 2723 DVR->setExpression(*DVRepl); 2724 LLVM_DEBUG(dbgs() << "REWRITE: " << DVR << '\n'); 2725 Changed = true; 2726 } 2727 2728 if (!UndefOrSalvage.empty() || !UndefOrSalvageDVR.empty()) { 2729 // Try to salvage the remaining debug users. 2730 salvageDebugInfo(From); 2731 Changed = true; 2732 } 2733 2734 return Changed; 2735 } 2736 2737 /// Check if a bitcast between a value of type \p FromTy to type \p ToTy would 2738 /// losslessly preserve the bits and semantics of the value. This predicate is 2739 /// symmetric, i.e swapping \p FromTy and \p ToTy should give the same result. 2740 /// 2741 /// Note that Type::canLosslesslyBitCastTo is not suitable here because it 2742 /// allows semantically unequivalent bitcasts, such as <2 x i64> -> <4 x i32>, 2743 /// and also does not allow lossless pointer <-> integer conversions. 2744 static bool isBitCastSemanticsPreserving(const DataLayout &DL, Type *FromTy, 2745 Type *ToTy) { 2746 // Trivially compatible types. 2747 if (FromTy == ToTy) 2748 return true; 2749 2750 // Handle compatible pointer <-> integer conversions. 2751 if (FromTy->isIntOrPtrTy() && ToTy->isIntOrPtrTy()) { 2752 bool SameSize = DL.getTypeSizeInBits(FromTy) == DL.getTypeSizeInBits(ToTy); 2753 bool LosslessConversion = !DL.isNonIntegralPointerType(FromTy) && 2754 !DL.isNonIntegralPointerType(ToTy); 2755 return SameSize && LosslessConversion; 2756 } 2757 2758 // TODO: This is not exhaustive. 2759 return false; 2760 } 2761 2762 bool llvm::replaceAllDbgUsesWith(Instruction &From, Value &To, 2763 Instruction &DomPoint, DominatorTree &DT) { 2764 // Exit early if From has no debug users. 2765 if (!From.isUsedByMetadata()) 2766 return false; 2767 2768 assert(&From != &To && "Can't replace something with itself"); 2769 2770 Type *FromTy = From.getType(); 2771 Type *ToTy = To.getType(); 2772 2773 auto Identity = [&](DbgVariableIntrinsic &DII) -> DbgValReplacement { 2774 return DII.getExpression(); 2775 }; 2776 auto IdentityDVR = [&](DbgVariableRecord &DVR) -> DbgValReplacement { 2777 return DVR.getExpression(); 2778 }; 2779 2780 // Handle no-op conversions. 2781 Module &M = *From.getModule(); 2782 const DataLayout &DL = M.getDataLayout(); 2783 if (isBitCastSemanticsPreserving(DL, FromTy, ToTy)) 2784 return rewriteDebugUsers(From, To, DomPoint, DT, Identity, IdentityDVR); 2785 2786 // Handle integer-to-integer widening and narrowing. 2787 // FIXME: Use DW_OP_convert when it's available everywhere. 2788 if (FromTy->isIntegerTy() && ToTy->isIntegerTy()) { 2789 uint64_t FromBits = FromTy->getPrimitiveSizeInBits(); 2790 uint64_t ToBits = ToTy->getPrimitiveSizeInBits(); 2791 assert(FromBits != ToBits && "Unexpected no-op conversion"); 2792 2793 // When the width of the result grows, assume that a debugger will only 2794 // access the low `FromBits` bits when inspecting the source variable. 2795 if (FromBits < ToBits) 2796 return rewriteDebugUsers(From, To, DomPoint, DT, Identity, IdentityDVR); 2797 2798 // The width of the result has shrunk. Use sign/zero extension to describe 2799 // the source variable's high bits. 2800 auto SignOrZeroExt = [&](DbgVariableIntrinsic &DII) -> DbgValReplacement { 2801 DILocalVariable *Var = DII.getVariable(); 2802 2803 // Without knowing signedness, sign/zero extension isn't possible. 2804 auto Signedness = Var->getSignedness(); 2805 if (!Signedness) 2806 return std::nullopt; 2807 2808 bool Signed = *Signedness == DIBasicType::Signedness::Signed; 2809 return DIExpression::appendExt(DII.getExpression(), ToBits, FromBits, 2810 Signed); 2811 }; 2812 // RemoveDIs: duplicate implementation working on DbgVariableRecords rather 2813 // than on dbg.value intrinsics. 2814 auto SignOrZeroExtDVR = [&](DbgVariableRecord &DVR) -> DbgValReplacement { 2815 DILocalVariable *Var = DVR.getVariable(); 2816 2817 // Without knowing signedness, sign/zero extension isn't possible. 2818 auto Signedness = Var->getSignedness(); 2819 if (!Signedness) 2820 return std::nullopt; 2821 2822 bool Signed = *Signedness == DIBasicType::Signedness::Signed; 2823 return DIExpression::appendExt(DVR.getExpression(), ToBits, FromBits, 2824 Signed); 2825 }; 2826 return rewriteDebugUsers(From, To, DomPoint, DT, SignOrZeroExt, 2827 SignOrZeroExtDVR); 2828 } 2829 2830 // TODO: Floating-point conversions, vectors. 2831 return false; 2832 } 2833 2834 bool llvm::handleUnreachableTerminator( 2835 Instruction *I, SmallVectorImpl<Value *> &PoisonedValues) { 2836 bool Changed = false; 2837 // RemoveDIs: erase debug-info on this instruction manually. 2838 I->dropDbgRecords(); 2839 for (Use &U : I->operands()) { 2840 Value *Op = U.get(); 2841 if (isa<Instruction>(Op) && !Op->getType()->isTokenTy()) { 2842 U.set(PoisonValue::get(Op->getType())); 2843 PoisonedValues.push_back(Op); 2844 Changed = true; 2845 } 2846 } 2847 2848 return Changed; 2849 } 2850 2851 unsigned llvm::removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB) { 2852 unsigned NumDeadInst = 0; 2853 // Delete the instructions backwards, as it has a reduced likelihood of 2854 // having to update as many def-use and use-def chains. 2855 Instruction *EndInst = BB->getTerminator(); // Last not to be deleted. 2856 SmallVector<Value *> Uses; 2857 handleUnreachableTerminator(EndInst, Uses); 2858 2859 while (EndInst != &BB->front()) { 2860 // Delete the next to last instruction. 2861 Instruction *Inst = &*--EndInst->getIterator(); 2862 if (!Inst->use_empty() && !Inst->getType()->isTokenTy()) 2863 Inst->replaceAllUsesWith(PoisonValue::get(Inst->getType())); 2864 if (Inst->isEHPad() || Inst->getType()->isTokenTy()) { 2865 // EHPads can't have DbgVariableRecords attached to them, but it might be 2866 // possible for things with token type. 2867 Inst->dropDbgRecords(); 2868 EndInst = Inst; 2869 continue; 2870 } 2871 ++NumDeadInst; 2872 // RemoveDIs: erasing debug-info must be done manually. 2873 Inst->dropDbgRecords(); 2874 Inst->eraseFromParent(); 2875 } 2876 return NumDeadInst; 2877 } 2878 2879 unsigned llvm::changeToUnreachable(Instruction *I, bool PreserveLCSSA, 2880 DomTreeUpdater *DTU, 2881 MemorySSAUpdater *MSSAU) { 2882 BasicBlock *BB = I->getParent(); 2883 2884 if (MSSAU) 2885 MSSAU->changeToUnreachable(I); 2886 2887 SmallSet<BasicBlock *, 8> UniqueSuccessors; 2888 2889 // Loop over all of the successors, removing BB's entry from any PHI 2890 // nodes. 2891 for (BasicBlock *Successor : successors(BB)) { 2892 Successor->removePredecessor(BB, PreserveLCSSA); 2893 if (DTU) 2894 UniqueSuccessors.insert(Successor); 2895 } 2896 auto *UI = new UnreachableInst(I->getContext(), I->getIterator()); 2897 UI->setDebugLoc(I->getDebugLoc()); 2898 2899 // All instructions after this are dead. 2900 unsigned NumInstrsRemoved = 0; 2901 BasicBlock::iterator BBI = I->getIterator(), BBE = BB->end(); 2902 while (BBI != BBE) { 2903 if (!BBI->use_empty()) 2904 BBI->replaceAllUsesWith(PoisonValue::get(BBI->getType())); 2905 BBI++->eraseFromParent(); 2906 ++NumInstrsRemoved; 2907 } 2908 if (DTU) { 2909 SmallVector<DominatorTree::UpdateType, 8> Updates; 2910 Updates.reserve(UniqueSuccessors.size()); 2911 for (BasicBlock *UniqueSuccessor : UniqueSuccessors) 2912 Updates.push_back({DominatorTree::Delete, BB, UniqueSuccessor}); 2913 DTU->applyUpdates(Updates); 2914 } 2915 BB->flushTerminatorDbgRecords(); 2916 return NumInstrsRemoved; 2917 } 2918 2919 CallInst *llvm::createCallMatchingInvoke(InvokeInst *II) { 2920 SmallVector<Value *, 8> Args(II->args()); 2921 SmallVector<OperandBundleDef, 1> OpBundles; 2922 II->getOperandBundlesAsDefs(OpBundles); 2923 CallInst *NewCall = CallInst::Create(II->getFunctionType(), 2924 II->getCalledOperand(), Args, OpBundles); 2925 NewCall->setCallingConv(II->getCallingConv()); 2926 NewCall->setAttributes(II->getAttributes()); 2927 NewCall->setDebugLoc(II->getDebugLoc()); 2928 NewCall->copyMetadata(*II); 2929 2930 // If the invoke had profile metadata, try converting them for CallInst. 2931 uint64_t TotalWeight; 2932 if (NewCall->extractProfTotalWeight(TotalWeight)) { 2933 // Set the total weight if it fits into i32, otherwise reset. 2934 MDBuilder MDB(NewCall->getContext()); 2935 auto NewWeights = uint32_t(TotalWeight) != TotalWeight 2936 ? nullptr 2937 : MDB.createBranchWeights({uint32_t(TotalWeight)}); 2938 NewCall->setMetadata(LLVMContext::MD_prof, NewWeights); 2939 } 2940 2941 return NewCall; 2942 } 2943 2944 // changeToCall - Convert the specified invoke into a normal call. 2945 CallInst *llvm::changeToCall(InvokeInst *II, DomTreeUpdater *DTU) { 2946 CallInst *NewCall = createCallMatchingInvoke(II); 2947 NewCall->takeName(II); 2948 NewCall->insertBefore(II->getIterator()); 2949 II->replaceAllUsesWith(NewCall); 2950 2951 // Follow the call by a branch to the normal destination. 2952 BasicBlock *NormalDestBB = II->getNormalDest(); 2953 auto *BI = BranchInst::Create(NormalDestBB, II->getIterator()); 2954 // Although it takes place after the call itself, the new branch is still 2955 // performing part of the control-flow functionality of the invoke, so we use 2956 // II's DebugLoc. 2957 BI->setDebugLoc(II->getDebugLoc()); 2958 2959 // Update PHI nodes in the unwind destination 2960 BasicBlock *BB = II->getParent(); 2961 BasicBlock *UnwindDestBB = II->getUnwindDest(); 2962 UnwindDestBB->removePredecessor(BB); 2963 II->eraseFromParent(); 2964 if (DTU) 2965 DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDestBB}}); 2966 return NewCall; 2967 } 2968 2969 BasicBlock *llvm::changeToInvokeAndSplitBasicBlock(CallInst *CI, 2970 BasicBlock *UnwindEdge, 2971 DomTreeUpdater *DTU) { 2972 BasicBlock *BB = CI->getParent(); 2973 2974 // Convert this function call into an invoke instruction. First, split the 2975 // basic block. 2976 BasicBlock *Split = SplitBlock(BB, CI, DTU, /*LI=*/nullptr, /*MSSAU*/ nullptr, 2977 CI->getName() + ".noexc"); 2978 2979 // Delete the unconditional branch inserted by SplitBlock 2980 BB->back().eraseFromParent(); 2981 2982 // Create the new invoke instruction. 2983 SmallVector<Value *, 8> InvokeArgs(CI->args()); 2984 SmallVector<OperandBundleDef, 1> OpBundles; 2985 2986 CI->getOperandBundlesAsDefs(OpBundles); 2987 2988 // Note: we're round tripping operand bundles through memory here, and that 2989 // can potentially be avoided with a cleverer API design that we do not have 2990 // as of this time. 2991 2992 InvokeInst *II = 2993 InvokeInst::Create(CI->getFunctionType(), CI->getCalledOperand(), Split, 2994 UnwindEdge, InvokeArgs, OpBundles, CI->getName(), BB); 2995 II->setDebugLoc(CI->getDebugLoc()); 2996 II->setCallingConv(CI->getCallingConv()); 2997 II->setAttributes(CI->getAttributes()); 2998 II->setMetadata(LLVMContext::MD_prof, CI->getMetadata(LLVMContext::MD_prof)); 2999 3000 if (DTU) 3001 DTU->applyUpdates({{DominatorTree::Insert, BB, UnwindEdge}}); 3002 3003 // Make sure that anything using the call now uses the invoke! This also 3004 // updates the CallGraph if present, because it uses a WeakTrackingVH. 3005 CI->replaceAllUsesWith(II); 3006 3007 // Delete the original call 3008 Split->front().eraseFromParent(); 3009 return Split; 3010 } 3011 3012 static bool markAliveBlocks(Function &F, 3013 SmallPtrSetImpl<BasicBlock *> &Reachable, 3014 DomTreeUpdater *DTU = nullptr) { 3015 SmallVector<BasicBlock*, 128> Worklist; 3016 BasicBlock *BB = &F.front(); 3017 Worklist.push_back(BB); 3018 Reachable.insert(BB); 3019 bool Changed = false; 3020 do { 3021 BB = Worklist.pop_back_val(); 3022 3023 // Do a quick scan of the basic block, turning any obviously unreachable 3024 // instructions into LLVM unreachable insts. The instruction combining pass 3025 // canonicalizes unreachable insts into stores to null or undef. 3026 for (Instruction &I : *BB) { 3027 if (auto *CI = dyn_cast<CallInst>(&I)) { 3028 Value *Callee = CI->getCalledOperand(); 3029 // Handle intrinsic calls. 3030 if (Function *F = dyn_cast<Function>(Callee)) { 3031 auto IntrinsicID = F->getIntrinsicID(); 3032 // Assumptions that are known to be false are equivalent to 3033 // unreachable. Also, if the condition is undefined, then we make the 3034 // choice most beneficial to the optimizer, and choose that to also be 3035 // unreachable. 3036 if (IntrinsicID == Intrinsic::assume) { 3037 if (match(CI->getArgOperand(0), m_CombineOr(m_Zero(), m_Undef()))) { 3038 // Don't insert a call to llvm.trap right before the unreachable. 3039 changeToUnreachable(CI, false, DTU); 3040 Changed = true; 3041 break; 3042 } 3043 } else if (IntrinsicID == Intrinsic::experimental_guard) { 3044 // A call to the guard intrinsic bails out of the current 3045 // compilation unit if the predicate passed to it is false. If the 3046 // predicate is a constant false, then we know the guard will bail 3047 // out of the current compile unconditionally, so all code following 3048 // it is dead. 3049 // 3050 // Note: unlike in llvm.assume, it is not "obviously profitable" for 3051 // guards to treat `undef` as `false` since a guard on `undef` can 3052 // still be useful for widening. 3053 if (match(CI->getArgOperand(0), m_Zero())) 3054 if (!isa<UnreachableInst>(CI->getNextNode())) { 3055 changeToUnreachable(CI->getNextNode(), false, DTU); 3056 Changed = true; 3057 break; 3058 } 3059 } 3060 } else if ((isa<ConstantPointerNull>(Callee) && 3061 !NullPointerIsDefined(CI->getFunction(), 3062 cast<PointerType>(Callee->getType()) 3063 ->getAddressSpace())) || 3064 isa<UndefValue>(Callee)) { 3065 changeToUnreachable(CI, false, DTU); 3066 Changed = true; 3067 break; 3068 } 3069 if (CI->doesNotReturn() && !CI->isMustTailCall()) { 3070 // If we found a call to a no-return function, insert an unreachable 3071 // instruction after it. Make sure there isn't *already* one there 3072 // though. 3073 if (!isa<UnreachableInst>(CI->getNextNonDebugInstruction())) { 3074 // Don't insert a call to llvm.trap right before the unreachable. 3075 changeToUnreachable(CI->getNextNonDebugInstruction(), false, DTU); 3076 Changed = true; 3077 } 3078 break; 3079 } 3080 } else if (auto *SI = dyn_cast<StoreInst>(&I)) { 3081 // Store to undef and store to null are undefined and used to signal 3082 // that they should be changed to unreachable by passes that can't 3083 // modify the CFG. 3084 3085 // Don't touch volatile stores. 3086 if (SI->isVolatile()) continue; 3087 3088 Value *Ptr = SI->getOperand(1); 3089 3090 if (isa<UndefValue>(Ptr) || 3091 (isa<ConstantPointerNull>(Ptr) && 3092 !NullPointerIsDefined(SI->getFunction(), 3093 SI->getPointerAddressSpace()))) { 3094 changeToUnreachable(SI, false, DTU); 3095 Changed = true; 3096 break; 3097 } 3098 } 3099 } 3100 3101 Instruction *Terminator = BB->getTerminator(); 3102 if (auto *II = dyn_cast<InvokeInst>(Terminator)) { 3103 // Turn invokes that call 'nounwind' functions into ordinary calls. 3104 Value *Callee = II->getCalledOperand(); 3105 if ((isa<ConstantPointerNull>(Callee) && 3106 !NullPointerIsDefined(BB->getParent())) || 3107 isa<UndefValue>(Callee)) { 3108 changeToUnreachable(II, false, DTU); 3109 Changed = true; 3110 } else { 3111 if (II->doesNotReturn() && 3112 !isa<UnreachableInst>(II->getNormalDest()->front())) { 3113 // If we found an invoke of a no-return function, 3114 // create a new empty basic block with an `unreachable` terminator, 3115 // and set it as the normal destination for the invoke, 3116 // unless that is already the case. 3117 // Note that the original normal destination could have other uses. 3118 BasicBlock *OrigNormalDest = II->getNormalDest(); 3119 OrigNormalDest->removePredecessor(II->getParent()); 3120 LLVMContext &Ctx = II->getContext(); 3121 BasicBlock *UnreachableNormalDest = BasicBlock::Create( 3122 Ctx, OrigNormalDest->getName() + ".unreachable", 3123 II->getFunction(), OrigNormalDest); 3124 auto *UI = new UnreachableInst(Ctx, UnreachableNormalDest); 3125 UI->setDebugLoc(DebugLoc::getTemporary()); 3126 II->setNormalDest(UnreachableNormalDest); 3127 if (DTU) 3128 DTU->applyUpdates( 3129 {{DominatorTree::Delete, BB, OrigNormalDest}, 3130 {DominatorTree::Insert, BB, UnreachableNormalDest}}); 3131 Changed = true; 3132 } 3133 if (II->doesNotThrow() && canSimplifyInvokeNoUnwind(&F)) { 3134 if (II->use_empty() && !II->mayHaveSideEffects()) { 3135 // jump to the normal destination branch. 3136 BasicBlock *NormalDestBB = II->getNormalDest(); 3137 BasicBlock *UnwindDestBB = II->getUnwindDest(); 3138 BranchInst::Create(NormalDestBB, II->getIterator()); 3139 UnwindDestBB->removePredecessor(II->getParent()); 3140 II->eraseFromParent(); 3141 if (DTU) 3142 DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDestBB}}); 3143 } else 3144 changeToCall(II, DTU); 3145 Changed = true; 3146 } 3147 } 3148 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Terminator)) { 3149 // Remove catchpads which cannot be reached. 3150 struct CatchPadDenseMapInfo { 3151 static CatchPadInst *getEmptyKey() { 3152 return DenseMapInfo<CatchPadInst *>::getEmptyKey(); 3153 } 3154 3155 static CatchPadInst *getTombstoneKey() { 3156 return DenseMapInfo<CatchPadInst *>::getTombstoneKey(); 3157 } 3158 3159 static unsigned getHashValue(CatchPadInst *CatchPad) { 3160 return static_cast<unsigned>(hash_combine_range( 3161 CatchPad->value_op_begin(), CatchPad->value_op_end())); 3162 } 3163 3164 static bool isEqual(CatchPadInst *LHS, CatchPadInst *RHS) { 3165 if (LHS == getEmptyKey() || LHS == getTombstoneKey() || 3166 RHS == getEmptyKey() || RHS == getTombstoneKey()) 3167 return LHS == RHS; 3168 return LHS->isIdenticalTo(RHS); 3169 } 3170 }; 3171 3172 SmallDenseMap<BasicBlock *, int, 8> NumPerSuccessorCases; 3173 // Set of unique CatchPads. 3174 SmallDenseMap<CatchPadInst *, detail::DenseSetEmpty, 4, 3175 CatchPadDenseMapInfo, detail::DenseSetPair<CatchPadInst *>> 3176 HandlerSet; 3177 detail::DenseSetEmpty Empty; 3178 for (CatchSwitchInst::handler_iterator I = CatchSwitch->handler_begin(), 3179 E = CatchSwitch->handler_end(); 3180 I != E; ++I) { 3181 BasicBlock *HandlerBB = *I; 3182 if (DTU) 3183 ++NumPerSuccessorCases[HandlerBB]; 3184 auto *CatchPad = cast<CatchPadInst>(HandlerBB->getFirstNonPHIIt()); 3185 if (!HandlerSet.insert({CatchPad, Empty}).second) { 3186 if (DTU) 3187 --NumPerSuccessorCases[HandlerBB]; 3188 CatchSwitch->removeHandler(I); 3189 --I; 3190 --E; 3191 Changed = true; 3192 } 3193 } 3194 if (DTU) { 3195 std::vector<DominatorTree::UpdateType> Updates; 3196 for (const std::pair<BasicBlock *, int> &I : NumPerSuccessorCases) 3197 if (I.second == 0) 3198 Updates.push_back({DominatorTree::Delete, BB, I.first}); 3199 DTU->applyUpdates(Updates); 3200 } 3201 } 3202 3203 Changed |= ConstantFoldTerminator(BB, true, nullptr, DTU); 3204 for (BasicBlock *Successor : successors(BB)) 3205 if (Reachable.insert(Successor).second) 3206 Worklist.push_back(Successor); 3207 } while (!Worklist.empty()); 3208 return Changed; 3209 } 3210 3211 Instruction *llvm::removeUnwindEdge(BasicBlock *BB, DomTreeUpdater *DTU) { 3212 Instruction *TI = BB->getTerminator(); 3213 3214 if (auto *II = dyn_cast<InvokeInst>(TI)) 3215 return changeToCall(II, DTU); 3216 3217 Instruction *NewTI; 3218 BasicBlock *UnwindDest; 3219 3220 if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) { 3221 NewTI = CleanupReturnInst::Create(CRI->getCleanupPad(), nullptr, CRI->getIterator()); 3222 UnwindDest = CRI->getUnwindDest(); 3223 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(TI)) { 3224 auto *NewCatchSwitch = CatchSwitchInst::Create( 3225 CatchSwitch->getParentPad(), nullptr, CatchSwitch->getNumHandlers(), 3226 CatchSwitch->getName(), CatchSwitch->getIterator()); 3227 for (BasicBlock *PadBB : CatchSwitch->handlers()) 3228 NewCatchSwitch->addHandler(PadBB); 3229 3230 NewTI = NewCatchSwitch; 3231 UnwindDest = CatchSwitch->getUnwindDest(); 3232 } else { 3233 llvm_unreachable("Could not find unwind successor"); 3234 } 3235 3236 NewTI->takeName(TI); 3237 NewTI->setDebugLoc(TI->getDebugLoc()); 3238 UnwindDest->removePredecessor(BB); 3239 TI->replaceAllUsesWith(NewTI); 3240 TI->eraseFromParent(); 3241 if (DTU) 3242 DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDest}}); 3243 return NewTI; 3244 } 3245 3246 /// removeUnreachableBlocks - Remove blocks that are not reachable, even 3247 /// if they are in a dead cycle. Return true if a change was made, false 3248 /// otherwise. 3249 bool llvm::removeUnreachableBlocks(Function &F, DomTreeUpdater *DTU, 3250 MemorySSAUpdater *MSSAU) { 3251 SmallPtrSet<BasicBlock *, 16> Reachable; 3252 bool Changed = markAliveBlocks(F, Reachable, DTU); 3253 3254 // If there are unreachable blocks in the CFG... 3255 if (Reachable.size() == F.size()) 3256 return Changed; 3257 3258 assert(Reachable.size() < F.size()); 3259 3260 // Are there any blocks left to actually delete? 3261 SmallSetVector<BasicBlock *, 8> BlocksToRemove; 3262 for (BasicBlock &BB : F) { 3263 // Skip reachable basic blocks 3264 if (Reachable.count(&BB)) 3265 continue; 3266 // Skip already-deleted blocks 3267 if (DTU && DTU->isBBPendingDeletion(&BB)) 3268 continue; 3269 BlocksToRemove.insert(&BB); 3270 } 3271 3272 if (BlocksToRemove.empty()) 3273 return Changed; 3274 3275 Changed = true; 3276 NumRemoved += BlocksToRemove.size(); 3277 3278 if (MSSAU) 3279 MSSAU->removeBlocks(BlocksToRemove); 3280 3281 DeleteDeadBlocks(BlocksToRemove.takeVector(), DTU); 3282 3283 return Changed; 3284 } 3285 3286 /// If AAOnly is set, only intersect alias analysis metadata and preserve other 3287 /// known metadata. Unknown metadata is always dropped. 3288 static void combineMetadata(Instruction *K, const Instruction *J, 3289 bool DoesKMove, bool AAOnly = false) { 3290 SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata; 3291 K->getAllMetadataOtherThanDebugLoc(Metadata); 3292 for (const auto &MD : Metadata) { 3293 unsigned Kind = MD.first; 3294 MDNode *JMD = J->getMetadata(Kind); 3295 MDNode *KMD = MD.second; 3296 3297 // TODO: Assert that this switch is exhaustive for fixed MD kinds. 3298 switch (Kind) { 3299 default: 3300 K->setMetadata(Kind, nullptr); // Remove unknown metadata 3301 break; 3302 case LLVMContext::MD_dbg: 3303 llvm_unreachable("getAllMetadataOtherThanDebugLoc returned a MD_dbg"); 3304 case LLVMContext::MD_DIAssignID: 3305 if (!AAOnly) 3306 K->mergeDIAssignID(J); 3307 break; 3308 case LLVMContext::MD_tbaa: 3309 if (DoesKMove) 3310 K->setMetadata(Kind, MDNode::getMostGenericTBAA(JMD, KMD)); 3311 break; 3312 case LLVMContext::MD_alias_scope: 3313 if (DoesKMove) 3314 K->setMetadata(Kind, MDNode::getMostGenericAliasScope(JMD, KMD)); 3315 break; 3316 case LLVMContext::MD_noalias: 3317 case LLVMContext::MD_mem_parallel_loop_access: 3318 if (DoesKMove) 3319 K->setMetadata(Kind, MDNode::intersect(JMD, KMD)); 3320 break; 3321 case LLVMContext::MD_access_group: 3322 if (DoesKMove) 3323 K->setMetadata(LLVMContext::MD_access_group, 3324 intersectAccessGroups(K, J)); 3325 break; 3326 case LLVMContext::MD_range: 3327 if (!AAOnly && (DoesKMove || !K->hasMetadata(LLVMContext::MD_noundef))) 3328 K->setMetadata(Kind, MDNode::getMostGenericRange(JMD, KMD)); 3329 break; 3330 case LLVMContext::MD_fpmath: 3331 if (!AAOnly) 3332 K->setMetadata(Kind, MDNode::getMostGenericFPMath(JMD, KMD)); 3333 break; 3334 case LLVMContext::MD_invariant_load: 3335 // If K moves, only set the !invariant.load if it is present in both 3336 // instructions. 3337 if (DoesKMove) 3338 K->setMetadata(Kind, JMD); 3339 break; 3340 case LLVMContext::MD_nonnull: 3341 if (!AAOnly && (DoesKMove || !K->hasMetadata(LLVMContext::MD_noundef))) 3342 K->setMetadata(Kind, JMD); 3343 break; 3344 case LLVMContext::MD_invariant_group: 3345 // Preserve !invariant.group in K. 3346 break; 3347 // Keep empty cases for prof, mmra, memprof, and callsite to prevent them 3348 // from being removed as unknown metadata. The actual merging is handled 3349 // separately below. 3350 case LLVMContext::MD_prof: 3351 case LLVMContext::MD_mmra: 3352 case LLVMContext::MD_memprof: 3353 case LLVMContext::MD_callsite: 3354 break; 3355 case LLVMContext::MD_align: 3356 if (!AAOnly && (DoesKMove || !K->hasMetadata(LLVMContext::MD_noundef))) 3357 K->setMetadata( 3358 Kind, MDNode::getMostGenericAlignmentOrDereferenceable(JMD, KMD)); 3359 break; 3360 case LLVMContext::MD_dereferenceable: 3361 case LLVMContext::MD_dereferenceable_or_null: 3362 if (!AAOnly && DoesKMove) 3363 K->setMetadata(Kind, 3364 MDNode::getMostGenericAlignmentOrDereferenceable(JMD, KMD)); 3365 break; 3366 case LLVMContext::MD_preserve_access_index: 3367 // Preserve !preserve.access.index in K. 3368 break; 3369 case LLVMContext::MD_noundef: 3370 // If K does move, keep noundef if it is present in both instructions. 3371 if (!AAOnly && DoesKMove) 3372 K->setMetadata(Kind, JMD); 3373 break; 3374 case LLVMContext::MD_nontemporal: 3375 // Preserve !nontemporal if it is present on both instructions. 3376 if (!AAOnly) 3377 K->setMetadata(Kind, JMD); 3378 break; 3379 case LLVMContext::MD_noalias_addrspace: 3380 if (DoesKMove) 3381 K->setMetadata(Kind, 3382 MDNode::getMostGenericNoaliasAddrspace(JMD, KMD)); 3383 break; 3384 case LLVMContext::MD_nosanitize: 3385 // Preserve !nosanitize if both K and J have it. 3386 K->setMetadata(Kind, JMD); 3387 break; 3388 } 3389 } 3390 // Set !invariant.group from J if J has it. If both instructions have it 3391 // then we will just pick it from J - even when they are different. 3392 // Also make sure that K is load or store - f.e. combining bitcast with load 3393 // could produce bitcast with invariant.group metadata, which is invalid. 3394 // FIXME: we should try to preserve both invariant.group md if they are 3395 // different, but right now instruction can only have one invariant.group. 3396 if (auto *JMD = J->getMetadata(LLVMContext::MD_invariant_group)) 3397 if (isa<LoadInst>(K) || isa<StoreInst>(K)) 3398 K->setMetadata(LLVMContext::MD_invariant_group, JMD); 3399 3400 // Merge MMRAs. 3401 // This is handled separately because we also want to handle cases where K 3402 // doesn't have tags but J does. 3403 auto JMMRA = J->getMetadata(LLVMContext::MD_mmra); 3404 auto KMMRA = K->getMetadata(LLVMContext::MD_mmra); 3405 if (JMMRA || KMMRA) { 3406 K->setMetadata(LLVMContext::MD_mmra, 3407 MMRAMetadata::combine(K->getContext(), JMMRA, KMMRA)); 3408 } 3409 3410 // Merge memprof metadata. 3411 // Handle separately to support cases where only one instruction has the 3412 // metadata. 3413 auto *JMemProf = J->getMetadata(LLVMContext::MD_memprof); 3414 auto *KMemProf = K->getMetadata(LLVMContext::MD_memprof); 3415 if (!AAOnly && (JMemProf || KMemProf)) { 3416 K->setMetadata(LLVMContext::MD_memprof, 3417 MDNode::getMergedMemProfMetadata(KMemProf, JMemProf)); 3418 } 3419 3420 // Merge callsite metadata. 3421 // Handle separately to support cases where only one instruction has the 3422 // metadata. 3423 auto *JCallSite = J->getMetadata(LLVMContext::MD_callsite); 3424 auto *KCallSite = K->getMetadata(LLVMContext::MD_callsite); 3425 if (!AAOnly && (JCallSite || KCallSite)) { 3426 K->setMetadata(LLVMContext::MD_callsite, 3427 MDNode::getMergedCallsiteMetadata(KCallSite, JCallSite)); 3428 } 3429 3430 // Merge prof metadata. 3431 // Handle separately to support cases where only one instruction has the 3432 // metadata. 3433 auto *JProf = J->getMetadata(LLVMContext::MD_prof); 3434 auto *KProf = K->getMetadata(LLVMContext::MD_prof); 3435 if (!AAOnly && (JProf || KProf)) { 3436 K->setMetadata(LLVMContext::MD_prof, 3437 MDNode::getMergedProfMetadata(KProf, JProf, K, J)); 3438 } 3439 } 3440 3441 void llvm::combineMetadataForCSE(Instruction *K, const Instruction *J, 3442 bool DoesKMove) { 3443 combineMetadata(K, J, DoesKMove); 3444 } 3445 3446 void llvm::combineAAMetadata(Instruction *K, const Instruction *J) { 3447 combineMetadata(K, J, /*DoesKMove=*/true, /*AAOnly=*/true); 3448 } 3449 3450 void llvm::copyMetadataForLoad(LoadInst &Dest, const LoadInst &Source) { 3451 SmallVector<std::pair<unsigned, MDNode *>, 8> MD; 3452 Source.getAllMetadata(MD); 3453 MDBuilder MDB(Dest.getContext()); 3454 Type *NewType = Dest.getType(); 3455 const DataLayout &DL = Source.getDataLayout(); 3456 for (const auto &MDPair : MD) { 3457 unsigned ID = MDPair.first; 3458 MDNode *N = MDPair.second; 3459 // Note, essentially every kind of metadata should be preserved here! This 3460 // routine is supposed to clone a load instruction changing *only its type*. 3461 // The only metadata it makes sense to drop is metadata which is invalidated 3462 // when the pointer type changes. This should essentially never be the case 3463 // in LLVM, but we explicitly switch over only known metadata to be 3464 // conservatively correct. If you are adding metadata to LLVM which pertains 3465 // to loads, you almost certainly want to add it here. 3466 switch (ID) { 3467 case LLVMContext::MD_dbg: 3468 case LLVMContext::MD_tbaa: 3469 case LLVMContext::MD_prof: 3470 case LLVMContext::MD_fpmath: 3471 case LLVMContext::MD_tbaa_struct: 3472 case LLVMContext::MD_invariant_load: 3473 case LLVMContext::MD_alias_scope: 3474 case LLVMContext::MD_noalias: 3475 case LLVMContext::MD_nontemporal: 3476 case LLVMContext::MD_mem_parallel_loop_access: 3477 case LLVMContext::MD_access_group: 3478 case LLVMContext::MD_noundef: 3479 case LLVMContext::MD_noalias_addrspace: 3480 // All of these directly apply. 3481 Dest.setMetadata(ID, N); 3482 break; 3483 3484 case LLVMContext::MD_nonnull: 3485 copyNonnullMetadata(Source, N, Dest); 3486 break; 3487 3488 case LLVMContext::MD_align: 3489 case LLVMContext::MD_dereferenceable: 3490 case LLVMContext::MD_dereferenceable_or_null: 3491 // These only directly apply if the new type is also a pointer. 3492 if (NewType->isPointerTy()) 3493 Dest.setMetadata(ID, N); 3494 break; 3495 3496 case LLVMContext::MD_range: 3497 copyRangeMetadata(DL, Source, N, Dest); 3498 break; 3499 } 3500 } 3501 } 3502 3503 void llvm::patchReplacementInstruction(Instruction *I, Value *Repl) { 3504 auto *ReplInst = dyn_cast<Instruction>(Repl); 3505 if (!ReplInst) 3506 return; 3507 3508 // Patch the replacement so that it is not more restrictive than the value 3509 // being replaced. 3510 WithOverflowInst *UnusedWO; 3511 // When replacing the result of a llvm.*.with.overflow intrinsic with a 3512 // overflowing binary operator, nuw/nsw flags may no longer hold. 3513 if (isa<OverflowingBinaryOperator>(ReplInst) && 3514 match(I, m_ExtractValue<0>(m_WithOverflowInst(UnusedWO)))) 3515 ReplInst->dropPoisonGeneratingFlags(); 3516 // Note that if 'I' is a load being replaced by some operation, 3517 // for example, by an arithmetic operation, then andIRFlags() 3518 // would just erase all math flags from the original arithmetic 3519 // operation, which is clearly not wanted and not needed. 3520 else if (!isa<LoadInst>(I)) 3521 ReplInst->andIRFlags(I); 3522 3523 // Handle attributes. 3524 if (auto *CB1 = dyn_cast<CallBase>(ReplInst)) { 3525 if (auto *CB2 = dyn_cast<CallBase>(I)) { 3526 bool Success = CB1->tryIntersectAttributes(CB2); 3527 assert(Success && "We should not be trying to sink callbases " 3528 "with non-intersectable attributes"); 3529 // For NDEBUG Compile. 3530 (void)Success; 3531 } 3532 } 3533 3534 // FIXME: If both the original and replacement value are part of the 3535 // same control-flow region (meaning that the execution of one 3536 // guarantees the execution of the other), then we can combine the 3537 // noalias scopes here and do better than the general conservative 3538 // answer used in combineMetadata(). 3539 3540 // In general, GVN unifies expressions over different control-flow 3541 // regions, and so we need a conservative combination of the noalias 3542 // scopes. 3543 combineMetadataForCSE(ReplInst, I, false); 3544 } 3545 3546 template <typename RootType, typename ShouldReplaceFn> 3547 static unsigned replaceDominatedUsesWith(Value *From, Value *To, 3548 const RootType &Root, 3549 const ShouldReplaceFn &ShouldReplace) { 3550 assert(From->getType() == To->getType()); 3551 3552 unsigned Count = 0; 3553 for (Use &U : llvm::make_early_inc_range(From->uses())) { 3554 auto *II = dyn_cast<IntrinsicInst>(U.getUser()); 3555 if (II && II->getIntrinsicID() == Intrinsic::fake_use) 3556 continue; 3557 if (!ShouldReplace(Root, U)) 3558 continue; 3559 LLVM_DEBUG(dbgs() << "Replace dominated use of '"; 3560 From->printAsOperand(dbgs()); 3561 dbgs() << "' with " << *To << " in " << *U.getUser() << "\n"); 3562 U.set(To); 3563 ++Count; 3564 } 3565 return Count; 3566 } 3567 3568 unsigned llvm::replaceNonLocalUsesWith(Instruction *From, Value *To) { 3569 assert(From->getType() == To->getType()); 3570 auto *BB = From->getParent(); 3571 unsigned Count = 0; 3572 3573 for (Use &U : llvm::make_early_inc_range(From->uses())) { 3574 auto *I = cast<Instruction>(U.getUser()); 3575 if (I->getParent() == BB) 3576 continue; 3577 U.set(To); 3578 ++Count; 3579 } 3580 return Count; 3581 } 3582 3583 unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To, 3584 DominatorTree &DT, 3585 const BasicBlockEdge &Root) { 3586 auto Dominates = [&DT](const BasicBlockEdge &Root, const Use &U) { 3587 return DT.dominates(Root, U); 3588 }; 3589 return ::replaceDominatedUsesWith(From, To, Root, Dominates); 3590 } 3591 3592 unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To, 3593 DominatorTree &DT, 3594 const BasicBlock *BB) { 3595 auto Dominates = [&DT](const BasicBlock *BB, const Use &U) { 3596 return DT.dominates(BB, U); 3597 }; 3598 return ::replaceDominatedUsesWith(From, To, BB, Dominates); 3599 } 3600 3601 unsigned llvm::replaceDominatedUsesWithIf( 3602 Value *From, Value *To, DominatorTree &DT, const BasicBlockEdge &Root, 3603 function_ref<bool(const Use &U, const Value *To)> ShouldReplace) { 3604 auto DominatesAndShouldReplace = 3605 [&DT, &ShouldReplace, To](const BasicBlockEdge &Root, const Use &U) { 3606 return DT.dominates(Root, U) && ShouldReplace(U, To); 3607 }; 3608 return ::replaceDominatedUsesWith(From, To, Root, DominatesAndShouldReplace); 3609 } 3610 3611 unsigned llvm::replaceDominatedUsesWithIf( 3612 Value *From, Value *To, DominatorTree &DT, const BasicBlock *BB, 3613 function_ref<bool(const Use &U, const Value *To)> ShouldReplace) { 3614 auto DominatesAndShouldReplace = [&DT, &ShouldReplace, 3615 To](const BasicBlock *BB, const Use &U) { 3616 return DT.dominates(BB, U) && ShouldReplace(U, To); 3617 }; 3618 return ::replaceDominatedUsesWith(From, To, BB, DominatesAndShouldReplace); 3619 } 3620 3621 bool llvm::callsGCLeafFunction(const CallBase *Call, 3622 const TargetLibraryInfo &TLI) { 3623 // Check if the function is specifically marked as a gc leaf function. 3624 if (Call->hasFnAttr("gc-leaf-function")) 3625 return true; 3626 if (const Function *F = Call->getCalledFunction()) { 3627 if (F->hasFnAttribute("gc-leaf-function")) 3628 return true; 3629 3630 if (auto IID = F->getIntrinsicID()) { 3631 // Most LLVM intrinsics do not take safepoints. 3632 return IID != Intrinsic::experimental_gc_statepoint && 3633 IID != Intrinsic::experimental_deoptimize && 3634 IID != Intrinsic::memcpy_element_unordered_atomic && 3635 IID != Intrinsic::memmove_element_unordered_atomic; 3636 } 3637 } 3638 3639 // Lib calls can be materialized by some passes, and won't be 3640 // marked as 'gc-leaf-function.' All available Libcalls are 3641 // GC-leaf. 3642 LibFunc LF; 3643 if (TLI.getLibFunc(*Call, LF)) { 3644 return TLI.has(LF); 3645 } 3646 3647 return false; 3648 } 3649 3650 void llvm::copyNonnullMetadata(const LoadInst &OldLI, MDNode *N, 3651 LoadInst &NewLI) { 3652 auto *NewTy = NewLI.getType(); 3653 3654 // This only directly applies if the new type is also a pointer. 3655 if (NewTy->isPointerTy()) { 3656 NewLI.setMetadata(LLVMContext::MD_nonnull, N); 3657 return; 3658 } 3659 3660 // The only other translation we can do is to integral loads with !range 3661 // metadata. 3662 if (!NewTy->isIntegerTy()) 3663 return; 3664 3665 MDBuilder MDB(NewLI.getContext()); 3666 const Value *Ptr = OldLI.getPointerOperand(); 3667 auto *ITy = cast<IntegerType>(NewTy); 3668 auto *NullInt = ConstantExpr::getPtrToInt( 3669 ConstantPointerNull::get(cast<PointerType>(Ptr->getType())), ITy); 3670 auto *NonNullInt = ConstantExpr::getAdd(NullInt, ConstantInt::get(ITy, 1)); 3671 NewLI.setMetadata(LLVMContext::MD_range, 3672 MDB.createRange(NonNullInt, NullInt)); 3673 } 3674 3675 void llvm::copyRangeMetadata(const DataLayout &DL, const LoadInst &OldLI, 3676 MDNode *N, LoadInst &NewLI) { 3677 auto *NewTy = NewLI.getType(); 3678 // Simply copy the metadata if the type did not change. 3679 if (NewTy == OldLI.getType()) { 3680 NewLI.setMetadata(LLVMContext::MD_range, N); 3681 return; 3682 } 3683 3684 // Give up unless it is converted to a pointer where there is a single very 3685 // valuable mapping we can do reliably. 3686 // FIXME: It would be nice to propagate this in more ways, but the type 3687 // conversions make it hard. 3688 if (!NewTy->isPointerTy()) 3689 return; 3690 3691 unsigned BitWidth = DL.getPointerTypeSizeInBits(NewTy); 3692 if (BitWidth == OldLI.getType()->getScalarSizeInBits() && 3693 !getConstantRangeFromMetadata(*N).contains(APInt(BitWidth, 0))) { 3694 MDNode *NN = MDNode::get(OldLI.getContext(), {}); 3695 NewLI.setMetadata(LLVMContext::MD_nonnull, NN); 3696 } 3697 } 3698 3699 void llvm::dropDebugUsers(Instruction &I) { 3700 SmallVector<DbgVariableIntrinsic *, 1> DbgUsers; 3701 SmallVector<DbgVariableRecord *, 1> DPUsers; 3702 findDbgUsers(DbgUsers, &I, &DPUsers); 3703 for (auto *DII : DbgUsers) 3704 DII->eraseFromParent(); 3705 for (auto *DVR : DPUsers) 3706 DVR->eraseFromParent(); 3707 } 3708 3709 void llvm::hoistAllInstructionsInto(BasicBlock *DomBlock, Instruction *InsertPt, 3710 BasicBlock *BB) { 3711 // Since we are moving the instructions out of its basic block, we do not 3712 // retain their original debug locations (DILocations) and debug intrinsic 3713 // instructions. 3714 // 3715 // Doing so would degrade the debugging experience and adversely affect the 3716 // accuracy of profiling information. 3717 // 3718 // Currently, when hoisting the instructions, we take the following actions: 3719 // - Remove their debug intrinsic instructions. 3720 // - Set their debug locations to the values from the insertion point. 3721 // 3722 // As per PR39141 (comment #8), the more fundamental reason why the dbg.values 3723 // need to be deleted, is because there will not be any instructions with a 3724 // DILocation in either branch left after performing the transformation. We 3725 // can only insert a dbg.value after the two branches are joined again. 3726 // 3727 // See PR38762, PR39243 for more details. 3728 // 3729 // TODO: Extend llvm.dbg.value to take more than one SSA Value (PR39141) to 3730 // encode predicated DIExpressions that yield different results on different 3731 // code paths. 3732 3733 for (BasicBlock::iterator II = BB->begin(), IE = BB->end(); II != IE;) { 3734 Instruction *I = &*II; 3735 I->dropUBImplyingAttrsAndMetadata(); 3736 if (I->isUsedByMetadata()) 3737 dropDebugUsers(*I); 3738 // RemoveDIs: drop debug-info too as the following code does. 3739 I->dropDbgRecords(); 3740 if (I->isDebugOrPseudoInst()) { 3741 // Remove DbgInfo and pseudo probe Intrinsics. 3742 II = I->eraseFromParent(); 3743 continue; 3744 } 3745 I->setDebugLoc(InsertPt->getDebugLoc()); 3746 ++II; 3747 } 3748 DomBlock->splice(InsertPt->getIterator(), BB, BB->begin(), 3749 BB->getTerminator()->getIterator()); 3750 } 3751 3752 DIExpression *llvm::getExpressionForConstant(DIBuilder &DIB, const Constant &C, 3753 Type &Ty) { 3754 // Create integer constant expression. 3755 auto createIntegerExpression = [&DIB](const Constant &CV) -> DIExpression * { 3756 const APInt &API = cast<ConstantInt>(&CV)->getValue(); 3757 std::optional<int64_t> InitIntOpt = API.trySExtValue(); 3758 return InitIntOpt ? DIB.createConstantValueExpression( 3759 static_cast<uint64_t>(*InitIntOpt)) 3760 : nullptr; 3761 }; 3762 3763 if (isa<ConstantInt>(C)) 3764 return createIntegerExpression(C); 3765 3766 auto *FP = dyn_cast<ConstantFP>(&C); 3767 if (FP && Ty.isFloatingPointTy() && Ty.getScalarSizeInBits() <= 64) { 3768 const APFloat &APF = FP->getValueAPF(); 3769 APInt const &API = APF.bitcastToAPInt(); 3770 if (auto Temp = API.getZExtValue()) 3771 return DIB.createConstantValueExpression(static_cast<uint64_t>(Temp)); 3772 return DIB.createConstantValueExpression(*API.getRawData()); 3773 } 3774 3775 if (!Ty.isPointerTy()) 3776 return nullptr; 3777 3778 if (isa<ConstantPointerNull>(C)) 3779 return DIB.createConstantValueExpression(0); 3780 3781 if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(&C)) 3782 if (CE->getOpcode() == Instruction::IntToPtr) { 3783 const Value *V = CE->getOperand(0); 3784 if (auto CI = dyn_cast_or_null<ConstantInt>(V)) 3785 return createIntegerExpression(*CI); 3786 } 3787 return nullptr; 3788 } 3789 3790 void llvm::remapDebugVariable(ValueToValueMapTy &Mapping, Instruction *Inst) { 3791 auto RemapDebugOperands = [&Mapping](auto *DV, auto Set) { 3792 for (auto *Op : Set) { 3793 auto I = Mapping.find(Op); 3794 if (I != Mapping.end()) 3795 DV->replaceVariableLocationOp(Op, I->second, /*AllowEmpty=*/true); 3796 } 3797 }; 3798 auto RemapAssignAddress = [&Mapping](auto *DA) { 3799 auto I = Mapping.find(DA->getAddress()); 3800 if (I != Mapping.end()) 3801 DA->setAddress(I->second); 3802 }; 3803 if (auto DVI = dyn_cast<DbgVariableIntrinsic>(Inst)) 3804 RemapDebugOperands(DVI, DVI->location_ops()); 3805 if (auto DAI = dyn_cast<DbgAssignIntrinsic>(Inst)) 3806 RemapAssignAddress(DAI); 3807 for (DbgVariableRecord &DVR : filterDbgVars(Inst->getDbgRecordRange())) { 3808 RemapDebugOperands(&DVR, DVR.location_ops()); 3809 if (DVR.isDbgAssign()) 3810 RemapAssignAddress(&DVR); 3811 } 3812 } 3813 3814 namespace { 3815 3816 /// A potential constituent of a bitreverse or bswap expression. See 3817 /// collectBitParts for a fuller explanation. 3818 struct BitPart { 3819 BitPart(Value *P, unsigned BW) : Provider(P) { 3820 Provenance.resize(BW); 3821 } 3822 3823 /// The Value that this is a bitreverse/bswap of. 3824 Value *Provider; 3825 3826 /// The "provenance" of each bit. Provenance[A] = B means that bit A 3827 /// in Provider becomes bit B in the result of this expression. 3828 SmallVector<int8_t, 32> Provenance; // int8_t means max size is i128. 3829 3830 enum { Unset = -1 }; 3831 }; 3832 3833 } // end anonymous namespace 3834 3835 /// Analyze the specified subexpression and see if it is capable of providing 3836 /// pieces of a bswap or bitreverse. The subexpression provides a potential 3837 /// piece of a bswap or bitreverse if it can be proved that each non-zero bit in 3838 /// the output of the expression came from a corresponding bit in some other 3839 /// value. This function is recursive, and the end result is a mapping of 3840 /// bitnumber to bitnumber. It is the caller's responsibility to validate that 3841 /// the bitnumber to bitnumber mapping is correct for a bswap or bitreverse. 3842 /// 3843 /// For example, if the current subexpression if "(shl i32 %X, 24)" then we know 3844 /// that the expression deposits the low byte of %X into the high byte of the 3845 /// result and that all other bits are zero. This expression is accepted and a 3846 /// BitPart is returned with Provider set to %X and Provenance[24-31] set to 3847 /// [0-7]. 3848 /// 3849 /// For vector types, all analysis is performed at the per-element level. No 3850 /// cross-element analysis is supported (shuffle/insertion/reduction), and all 3851 /// constant masks must be splatted across all elements. 3852 /// 3853 /// To avoid revisiting values, the BitPart results are memoized into the 3854 /// provided map. To avoid unnecessary copying of BitParts, BitParts are 3855 /// constructed in-place in the \c BPS map. Because of this \c BPS needs to 3856 /// store BitParts objects, not pointers. As we need the concept of a nullptr 3857 /// BitParts (Value has been analyzed and the analysis failed), we an Optional 3858 /// type instead to provide the same functionality. 3859 /// 3860 /// Because we pass around references into \c BPS, we must use a container that 3861 /// does not invalidate internal references (std::map instead of DenseMap). 3862 static const std::optional<BitPart> & 3863 collectBitParts(Value *V, bool MatchBSwaps, bool MatchBitReversals, 3864 std::map<Value *, std::optional<BitPart>> &BPS, int Depth, 3865 bool &FoundRoot) { 3866 auto [I, Inserted] = BPS.try_emplace(V); 3867 if (!Inserted) 3868 return I->second; 3869 3870 auto &Result = I->second; 3871 auto BitWidth = V->getType()->getScalarSizeInBits(); 3872 3873 // Can't do integer/elements > 128 bits. 3874 if (BitWidth > 128) 3875 return Result; 3876 3877 // Prevent stack overflow by limiting the recursion depth 3878 if (Depth == BitPartRecursionMaxDepth) { 3879 LLVM_DEBUG(dbgs() << "collectBitParts max recursion depth reached.\n"); 3880 return Result; 3881 } 3882 3883 if (auto *I = dyn_cast<Instruction>(V)) { 3884 Value *X, *Y; 3885 const APInt *C; 3886 3887 // If this is an or instruction, it may be an inner node of the bswap. 3888 if (match(V, m_Or(m_Value(X), m_Value(Y)))) { 3889 // Check we have both sources and they are from the same provider. 3890 const auto &A = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS, 3891 Depth + 1, FoundRoot); 3892 if (!A || !A->Provider) 3893 return Result; 3894 3895 const auto &B = collectBitParts(Y, MatchBSwaps, MatchBitReversals, BPS, 3896 Depth + 1, FoundRoot); 3897 if (!B || A->Provider != B->Provider) 3898 return Result; 3899 3900 // Try and merge the two together. 3901 Result = BitPart(A->Provider, BitWidth); 3902 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx) { 3903 if (A->Provenance[BitIdx] != BitPart::Unset && 3904 B->Provenance[BitIdx] != BitPart::Unset && 3905 A->Provenance[BitIdx] != B->Provenance[BitIdx]) 3906 return Result = std::nullopt; 3907 3908 if (A->Provenance[BitIdx] == BitPart::Unset) 3909 Result->Provenance[BitIdx] = B->Provenance[BitIdx]; 3910 else 3911 Result->Provenance[BitIdx] = A->Provenance[BitIdx]; 3912 } 3913 3914 return Result; 3915 } 3916 3917 // If this is a logical shift by a constant, recurse then shift the result. 3918 if (match(V, m_LogicalShift(m_Value(X), m_APInt(C)))) { 3919 const APInt &BitShift = *C; 3920 3921 // Ensure the shift amount is defined. 3922 if (BitShift.uge(BitWidth)) 3923 return Result; 3924 3925 // For bswap-only, limit shift amounts to whole bytes, for an early exit. 3926 if (!MatchBitReversals && (BitShift.getZExtValue() % 8) != 0) 3927 return Result; 3928 3929 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS, 3930 Depth + 1, FoundRoot); 3931 if (!Res) 3932 return Result; 3933 Result = Res; 3934 3935 // Perform the "shift" on BitProvenance. 3936 auto &P = Result->Provenance; 3937 if (I->getOpcode() == Instruction::Shl) { 3938 P.erase(std::prev(P.end(), BitShift.getZExtValue()), P.end()); 3939 P.insert(P.begin(), BitShift.getZExtValue(), BitPart::Unset); 3940 } else { 3941 P.erase(P.begin(), std::next(P.begin(), BitShift.getZExtValue())); 3942 P.insert(P.end(), BitShift.getZExtValue(), BitPart::Unset); 3943 } 3944 3945 return Result; 3946 } 3947 3948 // If this is a logical 'and' with a mask that clears bits, recurse then 3949 // unset the appropriate bits. 3950 if (match(V, m_And(m_Value(X), m_APInt(C)))) { 3951 const APInt &AndMask = *C; 3952 3953 // Check that the mask allows a multiple of 8 bits for a bswap, for an 3954 // early exit. 3955 unsigned NumMaskedBits = AndMask.popcount(); 3956 if (!MatchBitReversals && (NumMaskedBits % 8) != 0) 3957 return Result; 3958 3959 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS, 3960 Depth + 1, FoundRoot); 3961 if (!Res) 3962 return Result; 3963 Result = Res; 3964 3965 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx) 3966 // If the AndMask is zero for this bit, clear the bit. 3967 if (AndMask[BitIdx] == 0) 3968 Result->Provenance[BitIdx] = BitPart::Unset; 3969 return Result; 3970 } 3971 3972 // If this is a zext instruction zero extend the result. 3973 if (match(V, m_ZExt(m_Value(X)))) { 3974 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS, 3975 Depth + 1, FoundRoot); 3976 if (!Res) 3977 return Result; 3978 3979 Result = BitPart(Res->Provider, BitWidth); 3980 auto NarrowBitWidth = X->getType()->getScalarSizeInBits(); 3981 for (unsigned BitIdx = 0; BitIdx < NarrowBitWidth; ++BitIdx) 3982 Result->Provenance[BitIdx] = Res->Provenance[BitIdx]; 3983 for (unsigned BitIdx = NarrowBitWidth; BitIdx < BitWidth; ++BitIdx) 3984 Result->Provenance[BitIdx] = BitPart::Unset; 3985 return Result; 3986 } 3987 3988 // If this is a truncate instruction, extract the lower bits. 3989 if (match(V, m_Trunc(m_Value(X)))) { 3990 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS, 3991 Depth + 1, FoundRoot); 3992 if (!Res) 3993 return Result; 3994 3995 Result = BitPart(Res->Provider, BitWidth); 3996 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx) 3997 Result->Provenance[BitIdx] = Res->Provenance[BitIdx]; 3998 return Result; 3999 } 4000 4001 // BITREVERSE - most likely due to us previous matching a partial 4002 // bitreverse. 4003 if (match(V, m_BitReverse(m_Value(X)))) { 4004 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS, 4005 Depth + 1, FoundRoot); 4006 if (!Res) 4007 return Result; 4008 4009 Result = BitPart(Res->Provider, BitWidth); 4010 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx) 4011 Result->Provenance[(BitWidth - 1) - BitIdx] = Res->Provenance[BitIdx]; 4012 return Result; 4013 } 4014 4015 // BSWAP - most likely due to us previous matching a partial bswap. 4016 if (match(V, m_BSwap(m_Value(X)))) { 4017 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS, 4018 Depth + 1, FoundRoot); 4019 if (!Res) 4020 return Result; 4021 4022 unsigned ByteWidth = BitWidth / 8; 4023 Result = BitPart(Res->Provider, BitWidth); 4024 for (unsigned ByteIdx = 0; ByteIdx < ByteWidth; ++ByteIdx) { 4025 unsigned ByteBitOfs = ByteIdx * 8; 4026 for (unsigned BitIdx = 0; BitIdx < 8; ++BitIdx) 4027 Result->Provenance[(BitWidth - 8 - ByteBitOfs) + BitIdx] = 4028 Res->Provenance[ByteBitOfs + BitIdx]; 4029 } 4030 return Result; 4031 } 4032 4033 // Funnel 'double' shifts take 3 operands, 2 inputs and the shift 4034 // amount (modulo). 4035 // fshl(X,Y,Z): (X << (Z % BW)) | (Y >> (BW - (Z % BW))) 4036 // fshr(X,Y,Z): (X << (BW - (Z % BW))) | (Y >> (Z % BW)) 4037 if (match(V, m_FShl(m_Value(X), m_Value(Y), m_APInt(C))) || 4038 match(V, m_FShr(m_Value(X), m_Value(Y), m_APInt(C)))) { 4039 // We can treat fshr as a fshl by flipping the modulo amount. 4040 unsigned ModAmt = C->urem(BitWidth); 4041 if (cast<IntrinsicInst>(I)->getIntrinsicID() == Intrinsic::fshr) 4042 ModAmt = BitWidth - ModAmt; 4043 4044 // For bswap-only, limit shift amounts to whole bytes, for an early exit. 4045 if (!MatchBitReversals && (ModAmt % 8) != 0) 4046 return Result; 4047 4048 // Check we have both sources and they are from the same provider. 4049 const auto &LHS = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS, 4050 Depth + 1, FoundRoot); 4051 if (!LHS || !LHS->Provider) 4052 return Result; 4053 4054 const auto &RHS = collectBitParts(Y, MatchBSwaps, MatchBitReversals, BPS, 4055 Depth + 1, FoundRoot); 4056 if (!RHS || LHS->Provider != RHS->Provider) 4057 return Result; 4058 4059 unsigned StartBitRHS = BitWidth - ModAmt; 4060 Result = BitPart(LHS->Provider, BitWidth); 4061 for (unsigned BitIdx = 0; BitIdx < StartBitRHS; ++BitIdx) 4062 Result->Provenance[BitIdx + ModAmt] = LHS->Provenance[BitIdx]; 4063 for (unsigned BitIdx = 0; BitIdx < ModAmt; ++BitIdx) 4064 Result->Provenance[BitIdx] = RHS->Provenance[BitIdx + StartBitRHS]; 4065 return Result; 4066 } 4067 } 4068 4069 // If we've already found a root input value then we're never going to merge 4070 // these back together. 4071 if (FoundRoot) 4072 return Result; 4073 4074 // Okay, we got to something that isn't a shift, 'or', 'and', etc. This must 4075 // be the root input value to the bswap/bitreverse. 4076 FoundRoot = true; 4077 Result = BitPart(V, BitWidth); 4078 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx) 4079 Result->Provenance[BitIdx] = BitIdx; 4080 return Result; 4081 } 4082 4083 static bool bitTransformIsCorrectForBSwap(unsigned From, unsigned To, 4084 unsigned BitWidth) { 4085 if (From % 8 != To % 8) 4086 return false; 4087 // Convert from bit indices to byte indices and check for a byte reversal. 4088 From >>= 3; 4089 To >>= 3; 4090 BitWidth >>= 3; 4091 return From == BitWidth - To - 1; 4092 } 4093 4094 static bool bitTransformIsCorrectForBitReverse(unsigned From, unsigned To, 4095 unsigned BitWidth) { 4096 return From == BitWidth - To - 1; 4097 } 4098 4099 bool llvm::recognizeBSwapOrBitReverseIdiom( 4100 Instruction *I, bool MatchBSwaps, bool MatchBitReversals, 4101 SmallVectorImpl<Instruction *> &InsertedInsts) { 4102 if (!match(I, m_Or(m_Value(), m_Value())) && 4103 !match(I, m_FShl(m_Value(), m_Value(), m_Value())) && 4104 !match(I, m_FShr(m_Value(), m_Value(), m_Value())) && 4105 !match(I, m_BSwap(m_Value()))) 4106 return false; 4107 if (!MatchBSwaps && !MatchBitReversals) 4108 return false; 4109 Type *ITy = I->getType(); 4110 if (!ITy->isIntOrIntVectorTy() || ITy->getScalarSizeInBits() == 1 || 4111 ITy->getScalarSizeInBits() > 128) 4112 return false; // Can't do integer/elements > 128 bits. 4113 4114 // Try to find all the pieces corresponding to the bswap. 4115 bool FoundRoot = false; 4116 std::map<Value *, std::optional<BitPart>> BPS; 4117 const auto &Res = 4118 collectBitParts(I, MatchBSwaps, MatchBitReversals, BPS, 0, FoundRoot); 4119 if (!Res) 4120 return false; 4121 ArrayRef<int8_t> BitProvenance = Res->Provenance; 4122 assert(all_of(BitProvenance, 4123 [](int8_t I) { return I == BitPart::Unset || 0 <= I; }) && 4124 "Illegal bit provenance index"); 4125 4126 // If the upper bits are zero, then attempt to perform as a truncated op. 4127 Type *DemandedTy = ITy; 4128 if (BitProvenance.back() == BitPart::Unset) { 4129 while (!BitProvenance.empty() && BitProvenance.back() == BitPart::Unset) 4130 BitProvenance = BitProvenance.drop_back(); 4131 if (BitProvenance.empty()) 4132 return false; // TODO - handle null value? 4133 DemandedTy = Type::getIntNTy(I->getContext(), BitProvenance.size()); 4134 if (auto *IVecTy = dyn_cast<VectorType>(ITy)) 4135 DemandedTy = VectorType::get(DemandedTy, IVecTy); 4136 } 4137 4138 // Check BitProvenance hasn't found a source larger than the result type. 4139 unsigned DemandedBW = DemandedTy->getScalarSizeInBits(); 4140 if (DemandedBW > ITy->getScalarSizeInBits()) 4141 return false; 4142 4143 // Now, is the bit permutation correct for a bswap or a bitreverse? We can 4144 // only byteswap values with an even number of bytes. 4145 APInt DemandedMask = APInt::getAllOnes(DemandedBW); 4146 bool OKForBSwap = MatchBSwaps && (DemandedBW % 16) == 0; 4147 bool OKForBitReverse = MatchBitReversals; 4148 for (unsigned BitIdx = 0; 4149 (BitIdx < DemandedBW) && (OKForBSwap || OKForBitReverse); ++BitIdx) { 4150 if (BitProvenance[BitIdx] == BitPart::Unset) { 4151 DemandedMask.clearBit(BitIdx); 4152 continue; 4153 } 4154 OKForBSwap &= bitTransformIsCorrectForBSwap(BitProvenance[BitIdx], BitIdx, 4155 DemandedBW); 4156 OKForBitReverse &= bitTransformIsCorrectForBitReverse(BitProvenance[BitIdx], 4157 BitIdx, DemandedBW); 4158 } 4159 4160 Intrinsic::ID Intrin; 4161 if (OKForBSwap) 4162 Intrin = Intrinsic::bswap; 4163 else if (OKForBitReverse) 4164 Intrin = Intrinsic::bitreverse; 4165 else 4166 return false; 4167 4168 Function *F = 4169 Intrinsic::getOrInsertDeclaration(I->getModule(), Intrin, DemandedTy); 4170 Value *Provider = Res->Provider; 4171 4172 // We may need to truncate the provider. 4173 if (DemandedTy != Provider->getType()) { 4174 auto *Trunc = 4175 CastInst::CreateIntegerCast(Provider, DemandedTy, false, "trunc", I->getIterator()); 4176 InsertedInsts.push_back(Trunc); 4177 Provider = Trunc; 4178 } 4179 4180 Instruction *Result = CallInst::Create(F, Provider, "rev", I->getIterator()); 4181 InsertedInsts.push_back(Result); 4182 4183 if (!DemandedMask.isAllOnes()) { 4184 auto *Mask = ConstantInt::get(DemandedTy, DemandedMask); 4185 Result = BinaryOperator::Create(Instruction::And, Result, Mask, "mask", I->getIterator()); 4186 InsertedInsts.push_back(Result); 4187 } 4188 4189 // We may need to zeroextend back to the result type. 4190 if (ITy != Result->getType()) { 4191 auto *ExtInst = CastInst::CreateIntegerCast(Result, ITy, false, "zext", I->getIterator()); 4192 InsertedInsts.push_back(ExtInst); 4193 } 4194 4195 return true; 4196 } 4197 4198 // CodeGen has special handling for some string functions that may replace 4199 // them with target-specific intrinsics. Since that'd skip our interceptors 4200 // in ASan/MSan/TSan/DFSan, and thus make us miss some memory accesses, 4201 // we mark affected calls as NoBuiltin, which will disable optimization 4202 // in CodeGen. 4203 void llvm::maybeMarkSanitizerLibraryCallNoBuiltin( 4204 CallInst *CI, const TargetLibraryInfo *TLI) { 4205 Function *F = CI->getCalledFunction(); 4206 LibFunc Func; 4207 if (F && !F->hasLocalLinkage() && F->hasName() && 4208 TLI->getLibFunc(F->getName(), Func) && TLI->hasOptimizedCodeGen(Func) && 4209 !F->doesNotAccessMemory()) 4210 CI->addFnAttr(Attribute::NoBuiltin); 4211 } 4212 4213 bool llvm::canReplaceOperandWithVariable(const Instruction *I, unsigned OpIdx) { 4214 const auto *Op = I->getOperand(OpIdx); 4215 // We can't have a PHI with a metadata type. 4216 if (Op->getType()->isMetadataTy()) 4217 return false; 4218 4219 // swifterror pointers can only be used by a load, store, or as a swifterror 4220 // argument; swifterror pointers are not allowed to be used in select or phi 4221 // instructions. 4222 if (Op->isSwiftError()) 4223 return false; 4224 4225 // Early exit. 4226 if (!isa<Constant, InlineAsm>(Op)) 4227 return true; 4228 4229 switch (I->getOpcode()) { 4230 default: 4231 return true; 4232 case Instruction::Call: 4233 case Instruction::Invoke: { 4234 const auto &CB = cast<CallBase>(*I); 4235 4236 // Can't handle inline asm. Skip it. 4237 if (CB.isInlineAsm()) 4238 return false; 4239 4240 // Constant bundle operands may need to retain their constant-ness for 4241 // correctness. 4242 if (CB.isBundleOperand(OpIdx)) 4243 return false; 4244 4245 if (OpIdx < CB.arg_size()) { 4246 // Some variadic intrinsics require constants in the variadic arguments, 4247 // which currently aren't markable as immarg. 4248 if (isa<IntrinsicInst>(CB) && 4249 OpIdx >= CB.getFunctionType()->getNumParams()) { 4250 // This is known to be OK for stackmap. 4251 return CB.getIntrinsicID() == Intrinsic::experimental_stackmap; 4252 } 4253 4254 // gcroot is a special case, since it requires a constant argument which 4255 // isn't also required to be a simple ConstantInt. 4256 if (CB.getIntrinsicID() == Intrinsic::gcroot) 4257 return false; 4258 4259 // Some intrinsic operands are required to be immediates. 4260 return !CB.paramHasAttr(OpIdx, Attribute::ImmArg); 4261 } 4262 4263 // It is never allowed to replace the call argument to an intrinsic, but it 4264 // may be possible for a call. 4265 return !isa<IntrinsicInst>(CB); 4266 } 4267 case Instruction::ShuffleVector: 4268 // Shufflevector masks are constant. 4269 return OpIdx != 2; 4270 case Instruction::Switch: 4271 case Instruction::ExtractValue: 4272 // All operands apart from the first are constant. 4273 return OpIdx == 0; 4274 case Instruction::InsertValue: 4275 // All operands apart from the first and the second are constant. 4276 return OpIdx < 2; 4277 case Instruction::Alloca: 4278 // Static allocas (constant size in the entry block) are handled by 4279 // prologue/epilogue insertion so they're free anyway. We definitely don't 4280 // want to make them non-constant. 4281 return !cast<AllocaInst>(I)->isStaticAlloca(); 4282 case Instruction::GetElementPtr: 4283 if (OpIdx == 0) 4284 return true; 4285 gep_type_iterator It = gep_type_begin(I); 4286 for (auto E = std::next(It, OpIdx); It != E; ++It) 4287 if (It.isStruct()) 4288 return false; 4289 return true; 4290 } 4291 } 4292 4293 Value *llvm::invertCondition(Value *Condition) { 4294 // First: Check if it's a constant 4295 if (Constant *C = dyn_cast<Constant>(Condition)) 4296 return ConstantExpr::getNot(C); 4297 4298 // Second: If the condition is already inverted, return the original value 4299 Value *NotCondition; 4300 if (match(Condition, m_Not(m_Value(NotCondition)))) 4301 return NotCondition; 4302 4303 BasicBlock *Parent = nullptr; 4304 Instruction *Inst = dyn_cast<Instruction>(Condition); 4305 if (Inst) 4306 Parent = Inst->getParent(); 4307 else if (Argument *Arg = dyn_cast<Argument>(Condition)) 4308 Parent = &Arg->getParent()->getEntryBlock(); 4309 assert(Parent && "Unsupported condition to invert"); 4310 4311 // Third: Check all the users for an invert 4312 for (User *U : Condition->users()) 4313 if (Instruction *I = dyn_cast<Instruction>(U)) 4314 if (I->getParent() == Parent && match(I, m_Not(m_Specific(Condition)))) 4315 return I; 4316 4317 // Last option: Create a new instruction 4318 auto *Inverted = 4319 BinaryOperator::CreateNot(Condition, Condition->getName() + ".inv"); 4320 if (Inst && !isa<PHINode>(Inst)) 4321 Inverted->insertAfter(Inst->getIterator()); 4322 else 4323 Inverted->insertBefore(Parent->getFirstInsertionPt()); 4324 return Inverted; 4325 } 4326 4327 bool llvm::inferAttributesFromOthers(Function &F) { 4328 // Note: We explicitly check for attributes rather than using cover functions 4329 // because some of the cover functions include the logic being implemented. 4330 4331 bool Changed = false; 4332 // readnone + not convergent implies nosync 4333 if (!F.hasFnAttribute(Attribute::NoSync) && 4334 F.doesNotAccessMemory() && !F.isConvergent()) { 4335 F.setNoSync(); 4336 Changed = true; 4337 } 4338 4339 // readonly implies nofree 4340 if (!F.hasFnAttribute(Attribute::NoFree) && F.onlyReadsMemory()) { 4341 F.setDoesNotFreeMemory(); 4342 Changed = true; 4343 } 4344 4345 // willreturn implies mustprogress 4346 if (!F.hasFnAttribute(Attribute::MustProgress) && F.willReturn()) { 4347 F.setMustProgress(); 4348 Changed = true; 4349 } 4350 4351 // TODO: There are a bunch of cases of restrictive memory effects we 4352 // can infer by inspecting arguments of argmemonly-ish functions. 4353 4354 return Changed; 4355 } 4356 4357 void OverflowTracking::mergeFlags(Instruction &I) { 4358 #ifndef NDEBUG 4359 if (Opcode) 4360 assert(Opcode == I.getOpcode() && 4361 "can only use mergeFlags on instructions with matching opcodes"); 4362 else 4363 Opcode = I.getOpcode(); 4364 #endif 4365 if (isa<OverflowingBinaryOperator>(&I)) { 4366 HasNUW &= I.hasNoUnsignedWrap(); 4367 HasNSW &= I.hasNoSignedWrap(); 4368 } 4369 if (auto *DisjointOp = dyn_cast<PossiblyDisjointInst>(&I)) 4370 IsDisjoint &= DisjointOp->isDisjoint(); 4371 } 4372 4373 void OverflowTracking::applyFlags(Instruction &I) { 4374 I.clearSubclassOptionalData(); 4375 if (I.getOpcode() == Instruction::Add || 4376 (I.getOpcode() == Instruction::Mul && AllKnownNonZero)) { 4377 if (HasNUW) 4378 I.setHasNoUnsignedWrap(); 4379 if (HasNSW && (AllKnownNonNegative || HasNUW)) 4380 I.setHasNoSignedWrap(); 4381 } 4382 if (auto *DisjointOp = dyn_cast<PossiblyDisjointInst>(&I)) 4383 DisjointOp->setIsDisjoint(IsDisjoint); 4384 } 4385