1 //===- Local.cpp - Functions to perform local transformations -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This family of functions perform various local transformations to the
10 // program.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "llvm/Transforms/Utils/Local.h"
15 #include "llvm/ADT/APInt.h"
16 #include "llvm/ADT/DenseMap.h"
17 #include "llvm/ADT/DenseMapInfo.h"
18 #include "llvm/ADT/DenseSet.h"
19 #include "llvm/ADT/Hashing.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SetVector.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/Statistic.h"
25 #include "llvm/Analysis/AssumeBundleQueries.h"
26 #include "llvm/Analysis/ConstantFolding.h"
27 #include "llvm/Analysis/DomTreeUpdater.h"
28 #include "llvm/Analysis/InstructionSimplify.h"
29 #include "llvm/Analysis/MemoryBuiltins.h"
30 #include "llvm/Analysis/MemorySSAUpdater.h"
31 #include "llvm/Analysis/TargetLibraryInfo.h"
32 #include "llvm/Analysis/ValueTracking.h"
33 #include "llvm/Analysis/VectorUtils.h"
34 #include "llvm/BinaryFormat/Dwarf.h"
35 #include "llvm/IR/Argument.h"
36 #include "llvm/IR/Attributes.h"
37 #include "llvm/IR/BasicBlock.h"
38 #include "llvm/IR/CFG.h"
39 #include "llvm/IR/Constant.h"
40 #include "llvm/IR/ConstantRange.h"
41 #include "llvm/IR/Constants.h"
42 #include "llvm/IR/DIBuilder.h"
43 #include "llvm/IR/DataLayout.h"
44 #include "llvm/IR/DebugInfo.h"
45 #include "llvm/IR/DebugInfoMetadata.h"
46 #include "llvm/IR/DebugLoc.h"
47 #include "llvm/IR/DerivedTypes.h"
48 #include "llvm/IR/Dominators.h"
49 #include "llvm/IR/EHPersonalities.h"
50 #include "llvm/IR/Function.h"
51 #include "llvm/IR/GetElementPtrTypeIterator.h"
52 #include "llvm/IR/GlobalObject.h"
53 #include "llvm/IR/IRBuilder.h"
54 #include "llvm/IR/InstrTypes.h"
55 #include "llvm/IR/Instruction.h"
56 #include "llvm/IR/Instructions.h"
57 #include "llvm/IR/IntrinsicInst.h"
58 #include "llvm/IR/Intrinsics.h"
59 #include "llvm/IR/IntrinsicsWebAssembly.h"
60 #include "llvm/IR/LLVMContext.h"
61 #include "llvm/IR/MDBuilder.h"
62 #include "llvm/IR/MemoryModelRelaxationAnnotations.h"
63 #include "llvm/IR/Metadata.h"
64 #include "llvm/IR/Module.h"
65 #include "llvm/IR/PatternMatch.h"
66 #include "llvm/IR/ProfDataUtils.h"
67 #include "llvm/IR/Type.h"
68 #include "llvm/IR/Use.h"
69 #include "llvm/IR/User.h"
70 #include "llvm/IR/Value.h"
71 #include "llvm/IR/ValueHandle.h"
72 #include "llvm/Support/Casting.h"
73 #include "llvm/Support/CommandLine.h"
74 #include "llvm/Support/Debug.h"
75 #include "llvm/Support/ErrorHandling.h"
76 #include "llvm/Support/KnownBits.h"
77 #include "llvm/Support/raw_ostream.h"
78 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
79 #include "llvm/Transforms/Utils/ValueMapper.h"
80 #include <algorithm>
81 #include <cassert>
82 #include <cstdint>
83 #include <iterator>
84 #include <map>
85 #include <optional>
86 #include <utility>
87
88 using namespace llvm;
89 using namespace llvm::PatternMatch;
90
91 extern cl::opt<bool> UseNewDbgInfoFormat;
92
93 #define DEBUG_TYPE "local"
94
95 STATISTIC(NumRemoved, "Number of unreachable basic blocks removed");
96 STATISTIC(NumPHICSEs, "Number of PHI's that got CSE'd");
97
98 static cl::opt<bool> PHICSEDebugHash(
99 "phicse-debug-hash",
100 #ifdef EXPENSIVE_CHECKS
101 cl::init(true),
102 #else
103 cl::init(false),
104 #endif
105 cl::Hidden,
106 cl::desc("Perform extra assertion checking to verify that PHINodes's hash "
107 "function is well-behaved w.r.t. its isEqual predicate"));
108
109 static cl::opt<unsigned> PHICSENumPHISmallSize(
110 "phicse-num-phi-smallsize", cl::init(32), cl::Hidden,
111 cl::desc(
112 "When the basic block contains not more than this number of PHI nodes, "
113 "perform a (faster!) exhaustive search instead of set-driven one."));
114
115 // Max recursion depth for collectBitParts used when detecting bswap and
116 // bitreverse idioms.
117 static const unsigned BitPartRecursionMaxDepth = 48;
118
119 //===----------------------------------------------------------------------===//
120 // Local constant propagation.
121 //
122
123 /// ConstantFoldTerminator - If a terminator instruction is predicated on a
124 /// constant value, convert it into an unconditional branch to the constant
125 /// destination. This is a nontrivial operation because the successors of this
126 /// basic block must have their PHI nodes updated.
127 /// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch
128 /// conditions and indirectbr addresses this might make dead if
129 /// DeleteDeadConditions is true.
ConstantFoldTerminator(BasicBlock * BB,bool DeleteDeadConditions,const TargetLibraryInfo * TLI,DomTreeUpdater * DTU)130 bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions,
131 const TargetLibraryInfo *TLI,
132 DomTreeUpdater *DTU) {
133 Instruction *T = BB->getTerminator();
134 IRBuilder<> Builder(T);
135
136 // Branch - See if we are conditional jumping on constant
137 if (auto *BI = dyn_cast<BranchInst>(T)) {
138 if (BI->isUnconditional()) return false; // Can't optimize uncond branch
139
140 BasicBlock *Dest1 = BI->getSuccessor(0);
141 BasicBlock *Dest2 = BI->getSuccessor(1);
142
143 if (Dest2 == Dest1) { // Conditional branch to same location?
144 // This branch matches something like this:
145 // br bool %cond, label %Dest, label %Dest
146 // and changes it into: br label %Dest
147
148 // Let the basic block know that we are letting go of one copy of it.
149 assert(BI->getParent() && "Terminator not inserted in block!");
150 Dest1->removePredecessor(BI->getParent());
151
152 // Replace the conditional branch with an unconditional one.
153 BranchInst *NewBI = Builder.CreateBr(Dest1);
154
155 // Transfer the metadata to the new branch instruction.
156 NewBI->copyMetadata(*BI, {LLVMContext::MD_loop, LLVMContext::MD_dbg,
157 LLVMContext::MD_annotation});
158
159 Value *Cond = BI->getCondition();
160 BI->eraseFromParent();
161 if (DeleteDeadConditions)
162 RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI);
163 return true;
164 }
165
166 if (auto *Cond = dyn_cast<ConstantInt>(BI->getCondition())) {
167 // Are we branching on constant?
168 // YES. Change to unconditional branch...
169 BasicBlock *Destination = Cond->getZExtValue() ? Dest1 : Dest2;
170 BasicBlock *OldDest = Cond->getZExtValue() ? Dest2 : Dest1;
171
172 // Let the basic block know that we are letting go of it. Based on this,
173 // it will adjust it's PHI nodes.
174 OldDest->removePredecessor(BB);
175
176 // Replace the conditional branch with an unconditional one.
177 BranchInst *NewBI = Builder.CreateBr(Destination);
178
179 // Transfer the metadata to the new branch instruction.
180 NewBI->copyMetadata(*BI, {LLVMContext::MD_loop, LLVMContext::MD_dbg,
181 LLVMContext::MD_annotation});
182
183 BI->eraseFromParent();
184 if (DTU)
185 DTU->applyUpdates({{DominatorTree::Delete, BB, OldDest}});
186 return true;
187 }
188
189 return false;
190 }
191
192 if (auto *SI = dyn_cast<SwitchInst>(T)) {
193 // If we are switching on a constant, we can convert the switch to an
194 // unconditional branch.
195 auto *CI = dyn_cast<ConstantInt>(SI->getCondition());
196 BasicBlock *DefaultDest = SI->getDefaultDest();
197 BasicBlock *TheOnlyDest = DefaultDest;
198
199 // If the default is unreachable, ignore it when searching for TheOnlyDest.
200 if (isa<UnreachableInst>(DefaultDest->getFirstNonPHIOrDbg()) &&
201 SI->getNumCases() > 0) {
202 TheOnlyDest = SI->case_begin()->getCaseSuccessor();
203 }
204
205 bool Changed = false;
206
207 // Figure out which case it goes to.
208 for (auto It = SI->case_begin(), End = SI->case_end(); It != End;) {
209 // Found case matching a constant operand?
210 if (It->getCaseValue() == CI) {
211 TheOnlyDest = It->getCaseSuccessor();
212 break;
213 }
214
215 // Check to see if this branch is going to the same place as the default
216 // dest. If so, eliminate it as an explicit compare.
217 if (It->getCaseSuccessor() == DefaultDest) {
218 MDNode *MD = getValidBranchWeightMDNode(*SI);
219 unsigned NCases = SI->getNumCases();
220 // Fold the case metadata into the default if there will be any branches
221 // left, unless the metadata doesn't match the switch.
222 if (NCases > 1 && MD) {
223 // Collect branch weights into a vector.
224 SmallVector<uint32_t, 8> Weights;
225 extractBranchWeights(MD, Weights);
226
227 // Merge weight of this case to the default weight.
228 unsigned Idx = It->getCaseIndex();
229 // TODO: Add overflow check.
230 Weights[0] += Weights[Idx + 1];
231 // Remove weight for this case.
232 std::swap(Weights[Idx + 1], Weights.back());
233 Weights.pop_back();
234 setBranchWeights(*SI, Weights, hasBranchWeightOrigin(MD));
235 }
236 // Remove this entry.
237 BasicBlock *ParentBB = SI->getParent();
238 DefaultDest->removePredecessor(ParentBB);
239 It = SI->removeCase(It);
240 End = SI->case_end();
241
242 // Removing this case may have made the condition constant. In that
243 // case, update CI and restart iteration through the cases.
244 if (auto *NewCI = dyn_cast<ConstantInt>(SI->getCondition())) {
245 CI = NewCI;
246 It = SI->case_begin();
247 }
248
249 Changed = true;
250 continue;
251 }
252
253 // Otherwise, check to see if the switch only branches to one destination.
254 // We do this by reseting "TheOnlyDest" to null when we find two non-equal
255 // destinations.
256 if (It->getCaseSuccessor() != TheOnlyDest)
257 TheOnlyDest = nullptr;
258
259 // Increment this iterator as we haven't removed the case.
260 ++It;
261 }
262
263 if (CI && !TheOnlyDest) {
264 // Branching on a constant, but not any of the cases, go to the default
265 // successor.
266 TheOnlyDest = SI->getDefaultDest();
267 }
268
269 // If we found a single destination that we can fold the switch into, do so
270 // now.
271 if (TheOnlyDest) {
272 // Insert the new branch.
273 Builder.CreateBr(TheOnlyDest);
274 BasicBlock *BB = SI->getParent();
275
276 SmallSet<BasicBlock *, 8> RemovedSuccessors;
277
278 // Remove entries from PHI nodes which we no longer branch to...
279 BasicBlock *SuccToKeep = TheOnlyDest;
280 for (BasicBlock *Succ : successors(SI)) {
281 if (DTU && Succ != TheOnlyDest)
282 RemovedSuccessors.insert(Succ);
283 // Found case matching a constant operand?
284 if (Succ == SuccToKeep) {
285 SuccToKeep = nullptr; // Don't modify the first branch to TheOnlyDest
286 } else {
287 Succ->removePredecessor(BB);
288 }
289 }
290
291 // Delete the old switch.
292 Value *Cond = SI->getCondition();
293 SI->eraseFromParent();
294 if (DeleteDeadConditions)
295 RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI);
296 if (DTU) {
297 std::vector<DominatorTree::UpdateType> Updates;
298 Updates.reserve(RemovedSuccessors.size());
299 for (auto *RemovedSuccessor : RemovedSuccessors)
300 Updates.push_back({DominatorTree::Delete, BB, RemovedSuccessor});
301 DTU->applyUpdates(Updates);
302 }
303 return true;
304 }
305
306 if (SI->getNumCases() == 1) {
307 // Otherwise, we can fold this switch into a conditional branch
308 // instruction if it has only one non-default destination.
309 auto FirstCase = *SI->case_begin();
310 Value *Cond = Builder.CreateICmpEQ(SI->getCondition(),
311 FirstCase.getCaseValue(), "cond");
312
313 // Insert the new branch.
314 BranchInst *NewBr = Builder.CreateCondBr(Cond,
315 FirstCase.getCaseSuccessor(),
316 SI->getDefaultDest());
317 SmallVector<uint32_t> Weights;
318 if (extractBranchWeights(*SI, Weights) && Weights.size() == 2) {
319 uint32_t DefWeight = Weights[0];
320 uint32_t CaseWeight = Weights[1];
321 // The TrueWeight should be the weight for the single case of SI.
322 NewBr->setMetadata(LLVMContext::MD_prof,
323 MDBuilder(BB->getContext())
324 .createBranchWeights(CaseWeight, DefWeight));
325 }
326
327 // Update make.implicit metadata to the newly-created conditional branch.
328 MDNode *MakeImplicitMD = SI->getMetadata(LLVMContext::MD_make_implicit);
329 if (MakeImplicitMD)
330 NewBr->setMetadata(LLVMContext::MD_make_implicit, MakeImplicitMD);
331
332 // Delete the old switch.
333 SI->eraseFromParent();
334 return true;
335 }
336 return Changed;
337 }
338
339 if (auto *IBI = dyn_cast<IndirectBrInst>(T)) {
340 // indirectbr blockaddress(@F, @BB) -> br label @BB
341 if (auto *BA =
342 dyn_cast<BlockAddress>(IBI->getAddress()->stripPointerCasts())) {
343 BasicBlock *TheOnlyDest = BA->getBasicBlock();
344 SmallSet<BasicBlock *, 8> RemovedSuccessors;
345
346 // Insert the new branch.
347 Builder.CreateBr(TheOnlyDest);
348
349 BasicBlock *SuccToKeep = TheOnlyDest;
350 for (unsigned i = 0, e = IBI->getNumDestinations(); i != e; ++i) {
351 BasicBlock *DestBB = IBI->getDestination(i);
352 if (DTU && DestBB != TheOnlyDest)
353 RemovedSuccessors.insert(DestBB);
354 if (IBI->getDestination(i) == SuccToKeep) {
355 SuccToKeep = nullptr;
356 } else {
357 DestBB->removePredecessor(BB);
358 }
359 }
360 Value *Address = IBI->getAddress();
361 IBI->eraseFromParent();
362 if (DeleteDeadConditions)
363 // Delete pointer cast instructions.
364 RecursivelyDeleteTriviallyDeadInstructions(Address, TLI);
365
366 // Also zap the blockaddress constant if there are no users remaining,
367 // otherwise the destination is still marked as having its address taken.
368 if (BA->use_empty())
369 BA->destroyConstant();
370
371 // If we didn't find our destination in the IBI successor list, then we
372 // have undefined behavior. Replace the unconditional branch with an
373 // 'unreachable' instruction.
374 if (SuccToKeep) {
375 BB->getTerminator()->eraseFromParent();
376 new UnreachableInst(BB->getContext(), BB);
377 }
378
379 if (DTU) {
380 std::vector<DominatorTree::UpdateType> Updates;
381 Updates.reserve(RemovedSuccessors.size());
382 for (auto *RemovedSuccessor : RemovedSuccessors)
383 Updates.push_back({DominatorTree::Delete, BB, RemovedSuccessor});
384 DTU->applyUpdates(Updates);
385 }
386 return true;
387 }
388 }
389
390 return false;
391 }
392
393 //===----------------------------------------------------------------------===//
394 // Local dead code elimination.
395 //
396
397 /// isInstructionTriviallyDead - Return true if the result produced by the
398 /// instruction is not used, and the instruction has no side effects.
399 ///
isInstructionTriviallyDead(Instruction * I,const TargetLibraryInfo * TLI)400 bool llvm::isInstructionTriviallyDead(Instruction *I,
401 const TargetLibraryInfo *TLI) {
402 if (!I->use_empty())
403 return false;
404 return wouldInstructionBeTriviallyDead(I, TLI);
405 }
406
wouldInstructionBeTriviallyDeadOnUnusedPaths(Instruction * I,const TargetLibraryInfo * TLI)407 bool llvm::wouldInstructionBeTriviallyDeadOnUnusedPaths(
408 Instruction *I, const TargetLibraryInfo *TLI) {
409 // Instructions that are "markers" and have implied meaning on code around
410 // them (without explicit uses), are not dead on unused paths.
411 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
412 if (II->getIntrinsicID() == Intrinsic::stacksave ||
413 II->getIntrinsicID() == Intrinsic::launder_invariant_group ||
414 II->isLifetimeStartOrEnd())
415 return false;
416 return wouldInstructionBeTriviallyDead(I, TLI);
417 }
418
wouldInstructionBeTriviallyDead(const Instruction * I,const TargetLibraryInfo * TLI)419 bool llvm::wouldInstructionBeTriviallyDead(const Instruction *I,
420 const TargetLibraryInfo *TLI) {
421 if (I->isTerminator())
422 return false;
423
424 // We don't want the landingpad-like instructions removed by anything this
425 // general.
426 if (I->isEHPad())
427 return false;
428
429 // We don't want debug info removed by anything this general.
430 if (isa<DbgVariableIntrinsic>(I))
431 return false;
432
433 if (const DbgLabelInst *DLI = dyn_cast<DbgLabelInst>(I)) {
434 if (DLI->getLabel())
435 return false;
436 return true;
437 }
438
439 if (auto *CB = dyn_cast<CallBase>(I))
440 if (isRemovableAlloc(CB, TLI))
441 return true;
442
443 if (!I->willReturn()) {
444 auto *II = dyn_cast<IntrinsicInst>(I);
445 if (!II)
446 return false;
447
448 switch (II->getIntrinsicID()) {
449 case Intrinsic::experimental_guard: {
450 // Guards on true are operationally no-ops. In the future we can
451 // consider more sophisticated tradeoffs for guards considering potential
452 // for check widening, but for now we keep things simple.
453 auto *Cond = dyn_cast<ConstantInt>(II->getArgOperand(0));
454 return Cond && Cond->isOne();
455 }
456 // TODO: These intrinsics are not safe to remove, because this may remove
457 // a well-defined trap.
458 case Intrinsic::wasm_trunc_signed:
459 case Intrinsic::wasm_trunc_unsigned:
460 case Intrinsic::ptrauth_auth:
461 case Intrinsic::ptrauth_resign:
462 return true;
463 default:
464 return false;
465 }
466 }
467
468 if (!I->mayHaveSideEffects())
469 return true;
470
471 // Special case intrinsics that "may have side effects" but can be deleted
472 // when dead.
473 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
474 // Safe to delete llvm.stacksave and launder.invariant.group if dead.
475 if (II->getIntrinsicID() == Intrinsic::stacksave ||
476 II->getIntrinsicID() == Intrinsic::launder_invariant_group)
477 return true;
478
479 // Intrinsics declare sideeffects to prevent them from moving, but they are
480 // nops without users.
481 if (II->getIntrinsicID() == Intrinsic::allow_runtime_check ||
482 II->getIntrinsicID() == Intrinsic::allow_ubsan_check)
483 return true;
484
485 if (II->isLifetimeStartOrEnd()) {
486 auto *Arg = II->getArgOperand(1);
487 // Lifetime intrinsics are dead when their right-hand is undef.
488 if (isa<UndefValue>(Arg))
489 return true;
490 // If the right-hand is an alloc, global, or argument and the only uses
491 // are lifetime intrinsics then the intrinsics are dead.
492 if (isa<AllocaInst>(Arg) || isa<GlobalValue>(Arg) || isa<Argument>(Arg))
493 return llvm::all_of(Arg->uses(), [](Use &Use) {
494 if (IntrinsicInst *IntrinsicUse =
495 dyn_cast<IntrinsicInst>(Use.getUser()))
496 return IntrinsicUse->isLifetimeStartOrEnd();
497 return false;
498 });
499 return false;
500 }
501
502 // Assumptions are dead if their condition is trivially true.
503 if (II->getIntrinsicID() == Intrinsic::assume &&
504 isAssumeWithEmptyBundle(cast<AssumeInst>(*II))) {
505 if (ConstantInt *Cond = dyn_cast<ConstantInt>(II->getArgOperand(0)))
506 return !Cond->isZero();
507
508 return false;
509 }
510
511 if (auto *FPI = dyn_cast<ConstrainedFPIntrinsic>(I)) {
512 std::optional<fp::ExceptionBehavior> ExBehavior =
513 FPI->getExceptionBehavior();
514 return *ExBehavior != fp::ebStrict;
515 }
516 }
517
518 if (auto *Call = dyn_cast<CallBase>(I)) {
519 if (Value *FreedOp = getFreedOperand(Call, TLI))
520 if (Constant *C = dyn_cast<Constant>(FreedOp))
521 return C->isNullValue() || isa<UndefValue>(C);
522 if (isMathLibCallNoop(Call, TLI))
523 return true;
524 }
525
526 // Non-volatile atomic loads from constants can be removed.
527 if (auto *LI = dyn_cast<LoadInst>(I))
528 if (auto *GV = dyn_cast<GlobalVariable>(
529 LI->getPointerOperand()->stripPointerCasts()))
530 if (!LI->isVolatile() && GV->isConstant())
531 return true;
532
533 return false;
534 }
535
536 /// RecursivelyDeleteTriviallyDeadInstructions - If the specified value is a
537 /// trivially dead instruction, delete it. If that makes any of its operands
538 /// trivially dead, delete them too, recursively. Return true if any
539 /// instructions were deleted.
RecursivelyDeleteTriviallyDeadInstructions(Value * V,const TargetLibraryInfo * TLI,MemorySSAUpdater * MSSAU,std::function<void (Value *)> AboutToDeleteCallback)540 bool llvm::RecursivelyDeleteTriviallyDeadInstructions(
541 Value *V, const TargetLibraryInfo *TLI, MemorySSAUpdater *MSSAU,
542 std::function<void(Value *)> AboutToDeleteCallback) {
543 Instruction *I = dyn_cast<Instruction>(V);
544 if (!I || !isInstructionTriviallyDead(I, TLI))
545 return false;
546
547 SmallVector<WeakTrackingVH, 16> DeadInsts;
548 DeadInsts.push_back(I);
549 RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI, MSSAU,
550 AboutToDeleteCallback);
551
552 return true;
553 }
554
RecursivelyDeleteTriviallyDeadInstructionsPermissive(SmallVectorImpl<WeakTrackingVH> & DeadInsts,const TargetLibraryInfo * TLI,MemorySSAUpdater * MSSAU,std::function<void (Value *)> AboutToDeleteCallback)555 bool llvm::RecursivelyDeleteTriviallyDeadInstructionsPermissive(
556 SmallVectorImpl<WeakTrackingVH> &DeadInsts, const TargetLibraryInfo *TLI,
557 MemorySSAUpdater *MSSAU,
558 std::function<void(Value *)> AboutToDeleteCallback) {
559 unsigned S = 0, E = DeadInsts.size(), Alive = 0;
560 for (; S != E; ++S) {
561 auto *I = dyn_cast_or_null<Instruction>(DeadInsts[S]);
562 if (!I || !isInstructionTriviallyDead(I)) {
563 DeadInsts[S] = nullptr;
564 ++Alive;
565 }
566 }
567 if (Alive == E)
568 return false;
569 RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI, MSSAU,
570 AboutToDeleteCallback);
571 return true;
572 }
573
RecursivelyDeleteTriviallyDeadInstructions(SmallVectorImpl<WeakTrackingVH> & DeadInsts,const TargetLibraryInfo * TLI,MemorySSAUpdater * MSSAU,std::function<void (Value *)> AboutToDeleteCallback)574 void llvm::RecursivelyDeleteTriviallyDeadInstructions(
575 SmallVectorImpl<WeakTrackingVH> &DeadInsts, const TargetLibraryInfo *TLI,
576 MemorySSAUpdater *MSSAU,
577 std::function<void(Value *)> AboutToDeleteCallback) {
578 // Process the dead instruction list until empty.
579 while (!DeadInsts.empty()) {
580 Value *V = DeadInsts.pop_back_val();
581 Instruction *I = cast_or_null<Instruction>(V);
582 if (!I)
583 continue;
584 assert(isInstructionTriviallyDead(I, TLI) &&
585 "Live instruction found in dead worklist!");
586 assert(I->use_empty() && "Instructions with uses are not dead.");
587
588 // Don't lose the debug info while deleting the instructions.
589 salvageDebugInfo(*I);
590
591 if (AboutToDeleteCallback)
592 AboutToDeleteCallback(I);
593
594 // Null out all of the instruction's operands to see if any operand becomes
595 // dead as we go.
596 for (Use &OpU : I->operands()) {
597 Value *OpV = OpU.get();
598 OpU.set(nullptr);
599
600 if (!OpV->use_empty())
601 continue;
602
603 // If the operand is an instruction that became dead as we nulled out the
604 // operand, and if it is 'trivially' dead, delete it in a future loop
605 // iteration.
606 if (Instruction *OpI = dyn_cast<Instruction>(OpV))
607 if (isInstructionTriviallyDead(OpI, TLI))
608 DeadInsts.push_back(OpI);
609 }
610 if (MSSAU)
611 MSSAU->removeMemoryAccess(I);
612
613 I->eraseFromParent();
614 }
615 }
616
replaceDbgUsesWithUndef(Instruction * I)617 bool llvm::replaceDbgUsesWithUndef(Instruction *I) {
618 SmallVector<DbgVariableIntrinsic *, 1> DbgUsers;
619 SmallVector<DbgVariableRecord *, 1> DPUsers;
620 findDbgUsers(DbgUsers, I, &DPUsers);
621 for (auto *DII : DbgUsers)
622 DII->setKillLocation();
623 for (auto *DVR : DPUsers)
624 DVR->setKillLocation();
625 return !DbgUsers.empty() || !DPUsers.empty();
626 }
627
628 /// areAllUsesEqual - Check whether the uses of a value are all the same.
629 /// This is similar to Instruction::hasOneUse() except this will also return
630 /// true when there are no uses or multiple uses that all refer to the same
631 /// value.
areAllUsesEqual(Instruction * I)632 static bool areAllUsesEqual(Instruction *I) {
633 Value::user_iterator UI = I->user_begin();
634 Value::user_iterator UE = I->user_end();
635 if (UI == UE)
636 return true;
637
638 User *TheUse = *UI;
639 for (++UI; UI != UE; ++UI) {
640 if (*UI != TheUse)
641 return false;
642 }
643 return true;
644 }
645
646 /// RecursivelyDeleteDeadPHINode - If the specified value is an effectively
647 /// dead PHI node, due to being a def-use chain of single-use nodes that
648 /// either forms a cycle or is terminated by a trivially dead instruction,
649 /// delete it. If that makes any of its operands trivially dead, delete them
650 /// too, recursively. Return true if a change was made.
RecursivelyDeleteDeadPHINode(PHINode * PN,const TargetLibraryInfo * TLI,llvm::MemorySSAUpdater * MSSAU)651 bool llvm::RecursivelyDeleteDeadPHINode(PHINode *PN,
652 const TargetLibraryInfo *TLI,
653 llvm::MemorySSAUpdater *MSSAU) {
654 SmallPtrSet<Instruction*, 4> Visited;
655 for (Instruction *I = PN; areAllUsesEqual(I) && !I->mayHaveSideEffects();
656 I = cast<Instruction>(*I->user_begin())) {
657 if (I->use_empty())
658 return RecursivelyDeleteTriviallyDeadInstructions(I, TLI, MSSAU);
659
660 // If we find an instruction more than once, we're on a cycle that
661 // won't prove fruitful.
662 if (!Visited.insert(I).second) {
663 // Break the cycle and delete the instruction and its operands.
664 I->replaceAllUsesWith(PoisonValue::get(I->getType()));
665 (void)RecursivelyDeleteTriviallyDeadInstructions(I, TLI, MSSAU);
666 return true;
667 }
668 }
669 return false;
670 }
671
672 static bool
simplifyAndDCEInstruction(Instruction * I,SmallSetVector<Instruction *,16> & WorkList,const DataLayout & DL,const TargetLibraryInfo * TLI)673 simplifyAndDCEInstruction(Instruction *I,
674 SmallSetVector<Instruction *, 16> &WorkList,
675 const DataLayout &DL,
676 const TargetLibraryInfo *TLI) {
677 if (isInstructionTriviallyDead(I, TLI)) {
678 salvageDebugInfo(*I);
679
680 // Null out all of the instruction's operands to see if any operand becomes
681 // dead as we go.
682 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
683 Value *OpV = I->getOperand(i);
684 I->setOperand(i, nullptr);
685
686 if (!OpV->use_empty() || I == OpV)
687 continue;
688
689 // If the operand is an instruction that became dead as we nulled out the
690 // operand, and if it is 'trivially' dead, delete it in a future loop
691 // iteration.
692 if (Instruction *OpI = dyn_cast<Instruction>(OpV))
693 if (isInstructionTriviallyDead(OpI, TLI))
694 WorkList.insert(OpI);
695 }
696
697 I->eraseFromParent();
698
699 return true;
700 }
701
702 if (Value *SimpleV = simplifyInstruction(I, DL)) {
703 // Add the users to the worklist. CAREFUL: an instruction can use itself,
704 // in the case of a phi node.
705 for (User *U : I->users()) {
706 if (U != I) {
707 WorkList.insert(cast<Instruction>(U));
708 }
709 }
710
711 // Replace the instruction with its simplified value.
712 bool Changed = false;
713 if (!I->use_empty()) {
714 I->replaceAllUsesWith(SimpleV);
715 Changed = true;
716 }
717 if (isInstructionTriviallyDead(I, TLI)) {
718 I->eraseFromParent();
719 Changed = true;
720 }
721 return Changed;
722 }
723 return false;
724 }
725
726 /// SimplifyInstructionsInBlock - Scan the specified basic block and try to
727 /// simplify any instructions in it and recursively delete dead instructions.
728 ///
729 /// This returns true if it changed the code, note that it can delete
730 /// instructions in other blocks as well in this block.
SimplifyInstructionsInBlock(BasicBlock * BB,const TargetLibraryInfo * TLI)731 bool llvm::SimplifyInstructionsInBlock(BasicBlock *BB,
732 const TargetLibraryInfo *TLI) {
733 bool MadeChange = false;
734 const DataLayout &DL = BB->getDataLayout();
735
736 #ifndef NDEBUG
737 // In debug builds, ensure that the terminator of the block is never replaced
738 // or deleted by these simplifications. The idea of simplification is that it
739 // cannot introduce new instructions, and there is no way to replace the
740 // terminator of a block without introducing a new instruction.
741 AssertingVH<Instruction> TerminatorVH(&BB->back());
742 #endif
743
744 SmallSetVector<Instruction *, 16> WorkList;
745 // Iterate over the original function, only adding insts to the worklist
746 // if they actually need to be revisited. This avoids having to pre-init
747 // the worklist with the entire function's worth of instructions.
748 for (BasicBlock::iterator BI = BB->begin(), E = std::prev(BB->end());
749 BI != E;) {
750 assert(!BI->isTerminator());
751 Instruction *I = &*BI;
752 ++BI;
753
754 // We're visiting this instruction now, so make sure it's not in the
755 // worklist from an earlier visit.
756 if (!WorkList.count(I))
757 MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
758 }
759
760 while (!WorkList.empty()) {
761 Instruction *I = WorkList.pop_back_val();
762 MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
763 }
764 return MadeChange;
765 }
766
767 //===----------------------------------------------------------------------===//
768 // Control Flow Graph Restructuring.
769 //
770
MergeBasicBlockIntoOnlyPred(BasicBlock * DestBB,DomTreeUpdater * DTU)771 void llvm::MergeBasicBlockIntoOnlyPred(BasicBlock *DestBB,
772 DomTreeUpdater *DTU) {
773
774 // If BB has single-entry PHI nodes, fold them.
775 while (PHINode *PN = dyn_cast<PHINode>(DestBB->begin())) {
776 Value *NewVal = PN->getIncomingValue(0);
777 // Replace self referencing PHI with poison, it must be dead.
778 if (NewVal == PN) NewVal = PoisonValue::get(PN->getType());
779 PN->replaceAllUsesWith(NewVal);
780 PN->eraseFromParent();
781 }
782
783 BasicBlock *PredBB = DestBB->getSinglePredecessor();
784 assert(PredBB && "Block doesn't have a single predecessor!");
785
786 bool ReplaceEntryBB = PredBB->isEntryBlock();
787
788 // DTU updates: Collect all the edges that enter
789 // PredBB. These dominator edges will be redirected to DestBB.
790 SmallVector<DominatorTree::UpdateType, 32> Updates;
791
792 if (DTU) {
793 // To avoid processing the same predecessor more than once.
794 SmallPtrSet<BasicBlock *, 2> SeenPreds;
795 Updates.reserve(Updates.size() + 2 * pred_size(PredBB) + 1);
796 for (BasicBlock *PredOfPredBB : predecessors(PredBB))
797 // This predecessor of PredBB may already have DestBB as a successor.
798 if (PredOfPredBB != PredBB)
799 if (SeenPreds.insert(PredOfPredBB).second)
800 Updates.push_back({DominatorTree::Insert, PredOfPredBB, DestBB});
801 SeenPreds.clear();
802 for (BasicBlock *PredOfPredBB : predecessors(PredBB))
803 if (SeenPreds.insert(PredOfPredBB).second)
804 Updates.push_back({DominatorTree::Delete, PredOfPredBB, PredBB});
805 Updates.push_back({DominatorTree::Delete, PredBB, DestBB});
806 }
807
808 // Zap anything that took the address of DestBB. Not doing this will give the
809 // address an invalid value.
810 if (DestBB->hasAddressTaken()) {
811 BlockAddress *BA = BlockAddress::get(DestBB);
812 Constant *Replacement =
813 ConstantInt::get(Type::getInt32Ty(BA->getContext()), 1);
814 BA->replaceAllUsesWith(ConstantExpr::getIntToPtr(Replacement,
815 BA->getType()));
816 BA->destroyConstant();
817 }
818
819 // Anything that branched to PredBB now branches to DestBB.
820 PredBB->replaceAllUsesWith(DestBB);
821
822 // Splice all the instructions from PredBB to DestBB.
823 PredBB->getTerminator()->eraseFromParent();
824 DestBB->splice(DestBB->begin(), PredBB);
825 new UnreachableInst(PredBB->getContext(), PredBB);
826
827 // If the PredBB is the entry block of the function, move DestBB up to
828 // become the entry block after we erase PredBB.
829 if (ReplaceEntryBB)
830 DestBB->moveAfter(PredBB);
831
832 if (DTU) {
833 assert(PredBB->size() == 1 &&
834 isa<UnreachableInst>(PredBB->getTerminator()) &&
835 "The successor list of PredBB isn't empty before "
836 "applying corresponding DTU updates.");
837 DTU->applyUpdatesPermissive(Updates);
838 DTU->deleteBB(PredBB);
839 // Recalculation of DomTree is needed when updating a forward DomTree and
840 // the Entry BB is replaced.
841 if (ReplaceEntryBB && DTU->hasDomTree()) {
842 // The entry block was removed and there is no external interface for
843 // the dominator tree to be notified of this change. In this corner-case
844 // we recalculate the entire tree.
845 DTU->recalculate(*(DestBB->getParent()));
846 }
847 }
848
849 else {
850 PredBB->eraseFromParent(); // Nuke BB if DTU is nullptr.
851 }
852 }
853
854 /// Return true if we can choose one of these values to use in place of the
855 /// other. Note that we will always choose the non-undef value to keep.
CanMergeValues(Value * First,Value * Second)856 static bool CanMergeValues(Value *First, Value *Second) {
857 return First == Second || isa<UndefValue>(First) || isa<UndefValue>(Second);
858 }
859
860 /// Return true if we can fold BB, an almost-empty BB ending in an unconditional
861 /// branch to Succ, into Succ.
862 ///
863 /// Assumption: Succ is the single successor for BB.
864 static bool
CanPropagatePredecessorsForPHIs(BasicBlock * BB,BasicBlock * Succ,const SmallPtrSetImpl<BasicBlock * > & BBPreds)865 CanPropagatePredecessorsForPHIs(BasicBlock *BB, BasicBlock *Succ,
866 const SmallPtrSetImpl<BasicBlock *> &BBPreds) {
867 assert(*succ_begin(BB) == Succ && "Succ is not successor of BB!");
868
869 LLVM_DEBUG(dbgs() << "Looking to fold " << BB->getName() << " into "
870 << Succ->getName() << "\n");
871 // Shortcut, if there is only a single predecessor it must be BB and merging
872 // is always safe
873 if (Succ->getSinglePredecessor())
874 return true;
875
876 // Look at all the phi nodes in Succ, to see if they present a conflict when
877 // merging these blocks
878 for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) {
879 PHINode *PN = cast<PHINode>(I);
880
881 // If the incoming value from BB is again a PHINode in
882 // BB which has the same incoming value for *PI as PN does, we can
883 // merge the phi nodes and then the blocks can still be merged
884 PHINode *BBPN = dyn_cast<PHINode>(PN->getIncomingValueForBlock(BB));
885 if (BBPN && BBPN->getParent() == BB) {
886 for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
887 BasicBlock *IBB = PN->getIncomingBlock(PI);
888 if (BBPreds.count(IBB) &&
889 !CanMergeValues(BBPN->getIncomingValueForBlock(IBB),
890 PN->getIncomingValue(PI))) {
891 LLVM_DEBUG(dbgs()
892 << "Can't fold, phi node " << PN->getName() << " in "
893 << Succ->getName() << " is conflicting with "
894 << BBPN->getName() << " with regard to common predecessor "
895 << IBB->getName() << "\n");
896 return false;
897 }
898 }
899 } else {
900 Value* Val = PN->getIncomingValueForBlock(BB);
901 for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
902 // See if the incoming value for the common predecessor is equal to the
903 // one for BB, in which case this phi node will not prevent the merging
904 // of the block.
905 BasicBlock *IBB = PN->getIncomingBlock(PI);
906 if (BBPreds.count(IBB) &&
907 !CanMergeValues(Val, PN->getIncomingValue(PI))) {
908 LLVM_DEBUG(dbgs() << "Can't fold, phi node " << PN->getName()
909 << " in " << Succ->getName()
910 << " is conflicting with regard to common "
911 << "predecessor " << IBB->getName() << "\n");
912 return false;
913 }
914 }
915 }
916 }
917
918 return true;
919 }
920
921 using PredBlockVector = SmallVector<BasicBlock *, 16>;
922 using IncomingValueMap = DenseMap<BasicBlock *, Value *>;
923
924 /// Determines the value to use as the phi node input for a block.
925 ///
926 /// Select between \p OldVal any value that we know flows from \p BB
927 /// to a particular phi on the basis of which one (if either) is not
928 /// undef. Update IncomingValues based on the selected value.
929 ///
930 /// \param OldVal The value we are considering selecting.
931 /// \param BB The block that the value flows in from.
932 /// \param IncomingValues A map from block-to-value for other phi inputs
933 /// that we have examined.
934 ///
935 /// \returns the selected value.
selectIncomingValueForBlock(Value * OldVal,BasicBlock * BB,IncomingValueMap & IncomingValues)936 static Value *selectIncomingValueForBlock(Value *OldVal, BasicBlock *BB,
937 IncomingValueMap &IncomingValues) {
938 if (!isa<UndefValue>(OldVal)) {
939 assert((!IncomingValues.count(BB) ||
940 IncomingValues.find(BB)->second == OldVal) &&
941 "Expected OldVal to match incoming value from BB!");
942
943 IncomingValues.insert(std::make_pair(BB, OldVal));
944 return OldVal;
945 }
946
947 IncomingValueMap::const_iterator It = IncomingValues.find(BB);
948 if (It != IncomingValues.end()) return It->second;
949
950 return OldVal;
951 }
952
953 /// Create a map from block to value for the operands of a
954 /// given phi.
955 ///
956 /// Create a map from block to value for each non-undef value flowing
957 /// into \p PN.
958 ///
959 /// \param PN The phi we are collecting the map for.
960 /// \param IncomingValues [out] The map from block to value for this phi.
gatherIncomingValuesToPhi(PHINode * PN,IncomingValueMap & IncomingValues)961 static void gatherIncomingValuesToPhi(PHINode *PN,
962 IncomingValueMap &IncomingValues) {
963 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
964 BasicBlock *BB = PN->getIncomingBlock(i);
965 Value *V = PN->getIncomingValue(i);
966
967 if (!isa<UndefValue>(V))
968 IncomingValues.insert(std::make_pair(BB, V));
969 }
970 }
971
972 /// Replace the incoming undef values to a phi with the values
973 /// from a block-to-value map.
974 ///
975 /// \param PN The phi we are replacing the undefs in.
976 /// \param IncomingValues A map from block to value.
replaceUndefValuesInPhi(PHINode * PN,const IncomingValueMap & IncomingValues)977 static void replaceUndefValuesInPhi(PHINode *PN,
978 const IncomingValueMap &IncomingValues) {
979 SmallVector<unsigned> TrueUndefOps;
980 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
981 Value *V = PN->getIncomingValue(i);
982
983 if (!isa<UndefValue>(V)) continue;
984
985 BasicBlock *BB = PN->getIncomingBlock(i);
986 IncomingValueMap::const_iterator It = IncomingValues.find(BB);
987
988 // Keep track of undef/poison incoming values. Those must match, so we fix
989 // them up below if needed.
990 // Note: this is conservatively correct, but we could try harder and group
991 // the undef values per incoming basic block.
992 if (It == IncomingValues.end()) {
993 TrueUndefOps.push_back(i);
994 continue;
995 }
996
997 // There is a defined value for this incoming block, so map this undef
998 // incoming value to the defined value.
999 PN->setIncomingValue(i, It->second);
1000 }
1001
1002 // If there are both undef and poison values incoming, then convert those
1003 // values to undef. It is invalid to have different values for the same
1004 // incoming block.
1005 unsigned PoisonCount = count_if(TrueUndefOps, [&](unsigned i) {
1006 return isa<PoisonValue>(PN->getIncomingValue(i));
1007 });
1008 if (PoisonCount != 0 && PoisonCount != TrueUndefOps.size()) {
1009 for (unsigned i : TrueUndefOps)
1010 PN->setIncomingValue(i, UndefValue::get(PN->getType()));
1011 }
1012 }
1013
1014 // Only when they shares a single common predecessor, return true.
1015 // Only handles cases when BB can't be merged while its predecessors can be
1016 // redirected.
1017 static bool
CanRedirectPredsOfEmptyBBToSucc(BasicBlock * BB,BasicBlock * Succ,const SmallPtrSetImpl<BasicBlock * > & BBPreds,const SmallPtrSetImpl<BasicBlock * > & SuccPreds,BasicBlock * & CommonPred)1018 CanRedirectPredsOfEmptyBBToSucc(BasicBlock *BB, BasicBlock *Succ,
1019 const SmallPtrSetImpl<BasicBlock *> &BBPreds,
1020 const SmallPtrSetImpl<BasicBlock *> &SuccPreds,
1021 BasicBlock *&CommonPred) {
1022
1023 // There must be phis in BB, otherwise BB will be merged into Succ directly
1024 if (BB->phis().empty() || Succ->phis().empty())
1025 return false;
1026
1027 // BB must have predecessors not shared that can be redirected to Succ
1028 if (!BB->hasNPredecessorsOrMore(2))
1029 return false;
1030
1031 if (any_of(BBPreds, [](const BasicBlock *Pred) {
1032 return isa<IndirectBrInst>(Pred->getTerminator());
1033 }))
1034 return false;
1035
1036 // Get the single common predecessor of both BB and Succ. Return false
1037 // when there are more than one common predecessors.
1038 for (BasicBlock *SuccPred : SuccPreds) {
1039 if (BBPreds.count(SuccPred)) {
1040 if (CommonPred)
1041 return false;
1042 CommonPred = SuccPred;
1043 }
1044 }
1045
1046 return true;
1047 }
1048
1049 /// Replace a value flowing from a block to a phi with
1050 /// potentially multiple instances of that value flowing from the
1051 /// block's predecessors to the phi.
1052 ///
1053 /// \param BB The block with the value flowing into the phi.
1054 /// \param BBPreds The predecessors of BB.
1055 /// \param PN The phi that we are updating.
1056 /// \param CommonPred The common predecessor of BB and PN's BasicBlock
redirectValuesFromPredecessorsToPhi(BasicBlock * BB,const PredBlockVector & BBPreds,PHINode * PN,BasicBlock * CommonPred)1057 static void redirectValuesFromPredecessorsToPhi(BasicBlock *BB,
1058 const PredBlockVector &BBPreds,
1059 PHINode *PN,
1060 BasicBlock *CommonPred) {
1061 Value *OldVal = PN->removeIncomingValue(BB, false);
1062 assert(OldVal && "No entry in PHI for Pred BB!");
1063
1064 IncomingValueMap IncomingValues;
1065
1066 // We are merging two blocks - BB, and the block containing PN - and
1067 // as a result we need to redirect edges from the predecessors of BB
1068 // to go to the block containing PN, and update PN
1069 // accordingly. Since we allow merging blocks in the case where the
1070 // predecessor and successor blocks both share some predecessors,
1071 // and where some of those common predecessors might have undef
1072 // values flowing into PN, we want to rewrite those values to be
1073 // consistent with the non-undef values.
1074
1075 gatherIncomingValuesToPhi(PN, IncomingValues);
1076
1077 // If this incoming value is one of the PHI nodes in BB, the new entries
1078 // in the PHI node are the entries from the old PHI.
1079 if (isa<PHINode>(OldVal) && cast<PHINode>(OldVal)->getParent() == BB) {
1080 PHINode *OldValPN = cast<PHINode>(OldVal);
1081 for (unsigned i = 0, e = OldValPN->getNumIncomingValues(); i != e; ++i) {
1082 // Note that, since we are merging phi nodes and BB and Succ might
1083 // have common predecessors, we could end up with a phi node with
1084 // identical incoming branches. This will be cleaned up later (and
1085 // will trigger asserts if we try to clean it up now, without also
1086 // simplifying the corresponding conditional branch).
1087 BasicBlock *PredBB = OldValPN->getIncomingBlock(i);
1088
1089 if (PredBB == CommonPred)
1090 continue;
1091
1092 Value *PredVal = OldValPN->getIncomingValue(i);
1093 Value *Selected =
1094 selectIncomingValueForBlock(PredVal, PredBB, IncomingValues);
1095
1096 // And add a new incoming value for this predecessor for the
1097 // newly retargeted branch.
1098 PN->addIncoming(Selected, PredBB);
1099 }
1100 if (CommonPred)
1101 PN->addIncoming(OldValPN->getIncomingValueForBlock(CommonPred), BB);
1102
1103 } else {
1104 for (BasicBlock *PredBB : BBPreds) {
1105 // Update existing incoming values in PN for this
1106 // predecessor of BB.
1107 if (PredBB == CommonPred)
1108 continue;
1109
1110 Value *Selected =
1111 selectIncomingValueForBlock(OldVal, PredBB, IncomingValues);
1112
1113 // And add a new incoming value for this predecessor for the
1114 // newly retargeted branch.
1115 PN->addIncoming(Selected, PredBB);
1116 }
1117 if (CommonPred)
1118 PN->addIncoming(OldVal, BB);
1119 }
1120
1121 replaceUndefValuesInPhi(PN, IncomingValues);
1122 }
1123
TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock * BB,DomTreeUpdater * DTU)1124 bool llvm::TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB,
1125 DomTreeUpdater *DTU) {
1126 assert(BB != &BB->getParent()->getEntryBlock() &&
1127 "TryToSimplifyUncondBranchFromEmptyBlock called on entry block!");
1128
1129 // We can't simplify infinite loops.
1130 BasicBlock *Succ = cast<BranchInst>(BB->getTerminator())->getSuccessor(0);
1131 if (BB == Succ)
1132 return false;
1133
1134 SmallPtrSet<BasicBlock *, 16> BBPreds(pred_begin(BB), pred_end(BB));
1135 SmallPtrSet<BasicBlock *, 16> SuccPreds(pred_begin(Succ), pred_end(Succ));
1136
1137 // The single common predecessor of BB and Succ when BB cannot be killed
1138 BasicBlock *CommonPred = nullptr;
1139
1140 bool BBKillable = CanPropagatePredecessorsForPHIs(BB, Succ, BBPreds);
1141
1142 // Even if we can not fold BB into Succ, we may be able to redirect the
1143 // predecessors of BB to Succ.
1144 bool BBPhisMergeable =
1145 BBKillable ||
1146 CanRedirectPredsOfEmptyBBToSucc(BB, Succ, BBPreds, SuccPreds, CommonPred);
1147
1148 if (!BBKillable && !BBPhisMergeable)
1149 return false;
1150
1151 // Check to see if merging these blocks/phis would cause conflicts for any of
1152 // the phi nodes in BB or Succ. If not, we can safely merge.
1153
1154 // Check for cases where Succ has multiple predecessors and a PHI node in BB
1155 // has uses which will not disappear when the PHI nodes are merged. It is
1156 // possible to handle such cases, but difficult: it requires checking whether
1157 // BB dominates Succ, which is non-trivial to calculate in the case where
1158 // Succ has multiple predecessors. Also, it requires checking whether
1159 // constructing the necessary self-referential PHI node doesn't introduce any
1160 // conflicts; this isn't too difficult, but the previous code for doing this
1161 // was incorrect.
1162 //
1163 // Note that if this check finds a live use, BB dominates Succ, so BB is
1164 // something like a loop pre-header (or rarely, a part of an irreducible CFG);
1165 // folding the branch isn't profitable in that case anyway.
1166 if (!Succ->getSinglePredecessor()) {
1167 BasicBlock::iterator BBI = BB->begin();
1168 while (isa<PHINode>(*BBI)) {
1169 for (Use &U : BBI->uses()) {
1170 if (PHINode* PN = dyn_cast<PHINode>(U.getUser())) {
1171 if (PN->getIncomingBlock(U) != BB)
1172 return false;
1173 } else {
1174 return false;
1175 }
1176 }
1177 ++BBI;
1178 }
1179 }
1180
1181 if (BBPhisMergeable && CommonPred)
1182 LLVM_DEBUG(dbgs() << "Found Common Predecessor between: " << BB->getName()
1183 << " and " << Succ->getName() << " : "
1184 << CommonPred->getName() << "\n");
1185
1186 // 'BB' and 'BB->Pred' are loop latches, bail out to presrve inner loop
1187 // metadata.
1188 //
1189 // FIXME: This is a stop-gap solution to preserve inner-loop metadata given
1190 // current status (that loop metadata is implemented as metadata attached to
1191 // the branch instruction in the loop latch block). To quote from review
1192 // comments, "the current representation of loop metadata (using a loop latch
1193 // terminator attachment) is known to be fundamentally broken. Loop latches
1194 // are not uniquely associated with loops (both in that a latch can be part of
1195 // multiple loops and a loop may have multiple latches). Loop headers are. The
1196 // solution to this problem is also known: Add support for basic block
1197 // metadata, and attach loop metadata to the loop header."
1198 //
1199 // Why bail out:
1200 // In this case, we expect 'BB' is the latch for outer-loop and 'BB->Pred' is
1201 // the latch for inner-loop (see reason below), so bail out to prerserve
1202 // inner-loop metadata rather than eliminating 'BB' and attaching its metadata
1203 // to this inner-loop.
1204 // - The reason we believe 'BB' and 'BB->Pred' have different inner-most
1205 // loops: assuming 'BB' and 'BB->Pred' are from the same inner-most loop L,
1206 // then 'BB' is the header and latch of 'L' and thereby 'L' must consist of
1207 // one self-looping basic block, which is contradictory with the assumption.
1208 //
1209 // To illustrate how inner-loop metadata is dropped:
1210 //
1211 // CFG Before
1212 //
1213 // BB is while.cond.exit, attached with loop metdata md2.
1214 // BB->Pred is for.body, attached with loop metadata md1.
1215 //
1216 // entry
1217 // |
1218 // v
1219 // ---> while.cond -------------> while.end
1220 // | |
1221 // | v
1222 // | while.body
1223 // | |
1224 // | v
1225 // | for.body <---- (md1)
1226 // | | |______|
1227 // | v
1228 // | while.cond.exit (md2)
1229 // | |
1230 // |_______|
1231 //
1232 // CFG After
1233 //
1234 // while.cond1 is the merge of while.cond.exit and while.cond above.
1235 // for.body is attached with md2, and md1 is dropped.
1236 // If LoopSimplify runs later (as a part of loop pass), it could create
1237 // dedicated exits for inner-loop (essentially adding `while.cond.exit`
1238 // back), but won't it won't see 'md1' nor restore it for the inner-loop.
1239 //
1240 // entry
1241 // |
1242 // v
1243 // ---> while.cond1 -------------> while.end
1244 // | |
1245 // | v
1246 // | while.body
1247 // | |
1248 // | v
1249 // | for.body <---- (md2)
1250 // |_______| |______|
1251 if (Instruction *TI = BB->getTerminator())
1252 if (TI->hasMetadata(LLVMContext::MD_loop))
1253 for (BasicBlock *Pred : predecessors(BB))
1254 if (Instruction *PredTI = Pred->getTerminator())
1255 if (PredTI->hasMetadata(LLVMContext::MD_loop))
1256 return false;
1257
1258 if (BBKillable)
1259 LLVM_DEBUG(dbgs() << "Killing Trivial BB: \n" << *BB);
1260 else if (BBPhisMergeable)
1261 LLVM_DEBUG(dbgs() << "Merge Phis in Trivial BB: \n" << *BB);
1262
1263 SmallVector<DominatorTree::UpdateType, 32> Updates;
1264
1265 if (DTU) {
1266 // To avoid processing the same predecessor more than once.
1267 SmallPtrSet<BasicBlock *, 8> SeenPreds;
1268 // All predecessors of BB (except the common predecessor) will be moved to
1269 // Succ.
1270 Updates.reserve(Updates.size() + 2 * pred_size(BB) + 1);
1271
1272 for (auto *PredOfBB : predecessors(BB)) {
1273 // Do not modify those common predecessors of BB and Succ
1274 if (!SuccPreds.contains(PredOfBB))
1275 if (SeenPreds.insert(PredOfBB).second)
1276 Updates.push_back({DominatorTree::Insert, PredOfBB, Succ});
1277 }
1278
1279 SeenPreds.clear();
1280
1281 for (auto *PredOfBB : predecessors(BB))
1282 // When BB cannot be killed, do not remove the edge between BB and
1283 // CommonPred.
1284 if (SeenPreds.insert(PredOfBB).second && PredOfBB != CommonPred)
1285 Updates.push_back({DominatorTree::Delete, PredOfBB, BB});
1286
1287 if (BBKillable)
1288 Updates.push_back({DominatorTree::Delete, BB, Succ});
1289 }
1290
1291 if (isa<PHINode>(Succ->begin())) {
1292 // If there is more than one pred of succ, and there are PHI nodes in
1293 // the successor, then we need to add incoming edges for the PHI nodes
1294 //
1295 const PredBlockVector BBPreds(predecessors(BB));
1296
1297 // Loop over all of the PHI nodes in the successor of BB.
1298 for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) {
1299 PHINode *PN = cast<PHINode>(I);
1300 redirectValuesFromPredecessorsToPhi(BB, BBPreds, PN, CommonPred);
1301 }
1302 }
1303
1304 if (Succ->getSinglePredecessor()) {
1305 // BB is the only predecessor of Succ, so Succ will end up with exactly
1306 // the same predecessors BB had.
1307 // Copy over any phi, debug or lifetime instruction.
1308 BB->getTerminator()->eraseFromParent();
1309 Succ->splice(Succ->getFirstNonPHIIt(), BB);
1310 } else {
1311 while (PHINode *PN = dyn_cast<PHINode>(&BB->front())) {
1312 // We explicitly check for such uses for merging phis.
1313 assert(PN->use_empty() && "There shouldn't be any uses here!");
1314 PN->eraseFromParent();
1315 }
1316 }
1317
1318 // If the unconditional branch we replaced contains llvm.loop metadata, we
1319 // add the metadata to the branch instructions in the predecessors.
1320 if (Instruction *TI = BB->getTerminator())
1321 if (MDNode *LoopMD = TI->getMetadata(LLVMContext::MD_loop))
1322 for (BasicBlock *Pred : predecessors(BB))
1323 Pred->getTerminator()->setMetadata(LLVMContext::MD_loop, LoopMD);
1324
1325 if (BBKillable) {
1326 // Everything that jumped to BB now goes to Succ.
1327 BB->replaceAllUsesWith(Succ);
1328
1329 if (!Succ->hasName())
1330 Succ->takeName(BB);
1331
1332 // Clear the successor list of BB to match updates applying to DTU later.
1333 if (BB->getTerminator())
1334 BB->back().eraseFromParent();
1335
1336 new UnreachableInst(BB->getContext(), BB);
1337 assert(succ_empty(BB) && "The successor list of BB isn't empty before "
1338 "applying corresponding DTU updates.");
1339 } else if (BBPhisMergeable) {
1340 // Everything except CommonPred that jumped to BB now goes to Succ.
1341 BB->replaceUsesWithIf(Succ, [BBPreds, CommonPred](Use &U) -> bool {
1342 if (Instruction *UseInst = dyn_cast<Instruction>(U.getUser()))
1343 return UseInst->getParent() != CommonPred &&
1344 BBPreds.contains(UseInst->getParent());
1345 return false;
1346 });
1347 }
1348
1349 if (DTU)
1350 DTU->applyUpdates(Updates);
1351
1352 if (BBKillable)
1353 DeleteDeadBlock(BB, DTU);
1354
1355 return true;
1356 }
1357
1358 static bool
EliminateDuplicatePHINodesNaiveImpl(BasicBlock * BB,SmallPtrSetImpl<PHINode * > & ToRemove)1359 EliminateDuplicatePHINodesNaiveImpl(BasicBlock *BB,
1360 SmallPtrSetImpl<PHINode *> &ToRemove) {
1361 // This implementation doesn't currently consider undef operands
1362 // specially. Theoretically, two phis which are identical except for
1363 // one having an undef where the other doesn't could be collapsed.
1364
1365 bool Changed = false;
1366
1367 // Examine each PHI.
1368 // Note that increment of I must *NOT* be in the iteration_expression, since
1369 // we don't want to immediately advance when we restart from the beginning.
1370 for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(I);) {
1371 ++I;
1372 // Is there an identical PHI node in this basic block?
1373 // Note that we only look in the upper square's triangle,
1374 // we already checked that the lower triangle PHI's aren't identical.
1375 for (auto J = I; PHINode *DuplicatePN = dyn_cast<PHINode>(J); ++J) {
1376 if (ToRemove.contains(DuplicatePN))
1377 continue;
1378 if (!DuplicatePN->isIdenticalToWhenDefined(PN))
1379 continue;
1380 // A duplicate. Replace this PHI with the base PHI.
1381 ++NumPHICSEs;
1382 DuplicatePN->replaceAllUsesWith(PN);
1383 ToRemove.insert(DuplicatePN);
1384 Changed = true;
1385
1386 // The RAUW can change PHIs that we already visited.
1387 I = BB->begin();
1388 break; // Start over from the beginning.
1389 }
1390 }
1391 return Changed;
1392 }
1393
1394 static bool
EliminateDuplicatePHINodesSetBasedImpl(BasicBlock * BB,SmallPtrSetImpl<PHINode * > & ToRemove)1395 EliminateDuplicatePHINodesSetBasedImpl(BasicBlock *BB,
1396 SmallPtrSetImpl<PHINode *> &ToRemove) {
1397 // This implementation doesn't currently consider undef operands
1398 // specially. Theoretically, two phis which are identical except for
1399 // one having an undef where the other doesn't could be collapsed.
1400
1401 struct PHIDenseMapInfo {
1402 static PHINode *getEmptyKey() {
1403 return DenseMapInfo<PHINode *>::getEmptyKey();
1404 }
1405
1406 static PHINode *getTombstoneKey() {
1407 return DenseMapInfo<PHINode *>::getTombstoneKey();
1408 }
1409
1410 static bool isSentinel(PHINode *PN) {
1411 return PN == getEmptyKey() || PN == getTombstoneKey();
1412 }
1413
1414 // WARNING: this logic must be kept in sync with
1415 // Instruction::isIdenticalToWhenDefined()!
1416 static unsigned getHashValueImpl(PHINode *PN) {
1417 // Compute a hash value on the operands. Instcombine will likely have
1418 // sorted them, which helps expose duplicates, but we have to check all
1419 // the operands to be safe in case instcombine hasn't run.
1420 return static_cast<unsigned>(hash_combine(
1421 hash_combine_range(PN->value_op_begin(), PN->value_op_end()),
1422 hash_combine_range(PN->block_begin(), PN->block_end())));
1423 }
1424
1425 static unsigned getHashValue(PHINode *PN) {
1426 #ifndef NDEBUG
1427 // If -phicse-debug-hash was specified, return a constant -- this
1428 // will force all hashing to collide, so we'll exhaustively search
1429 // the table for a match, and the assertion in isEqual will fire if
1430 // there's a bug causing equal keys to hash differently.
1431 if (PHICSEDebugHash)
1432 return 0;
1433 #endif
1434 return getHashValueImpl(PN);
1435 }
1436
1437 static bool isEqualImpl(PHINode *LHS, PHINode *RHS) {
1438 if (isSentinel(LHS) || isSentinel(RHS))
1439 return LHS == RHS;
1440 return LHS->isIdenticalTo(RHS);
1441 }
1442
1443 static bool isEqual(PHINode *LHS, PHINode *RHS) {
1444 // These comparisons are nontrivial, so assert that equality implies
1445 // hash equality (DenseMap demands this as an invariant).
1446 bool Result = isEqualImpl(LHS, RHS);
1447 assert(!Result || (isSentinel(LHS) && LHS == RHS) ||
1448 getHashValueImpl(LHS) == getHashValueImpl(RHS));
1449 return Result;
1450 }
1451 };
1452
1453 // Set of unique PHINodes.
1454 DenseSet<PHINode *, PHIDenseMapInfo> PHISet;
1455 PHISet.reserve(4 * PHICSENumPHISmallSize);
1456
1457 // Examine each PHI.
1458 bool Changed = false;
1459 for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(I++);) {
1460 if (ToRemove.contains(PN))
1461 continue;
1462 auto Inserted = PHISet.insert(PN);
1463 if (!Inserted.second) {
1464 // A duplicate. Replace this PHI with its duplicate.
1465 ++NumPHICSEs;
1466 PN->replaceAllUsesWith(*Inserted.first);
1467 ToRemove.insert(PN);
1468 Changed = true;
1469
1470 // The RAUW can change PHIs that we already visited. Start over from the
1471 // beginning.
1472 PHISet.clear();
1473 I = BB->begin();
1474 }
1475 }
1476
1477 return Changed;
1478 }
1479
EliminateDuplicatePHINodes(BasicBlock * BB,SmallPtrSetImpl<PHINode * > & ToRemove)1480 bool llvm::EliminateDuplicatePHINodes(BasicBlock *BB,
1481 SmallPtrSetImpl<PHINode *> &ToRemove) {
1482 if (
1483 #ifndef NDEBUG
1484 !PHICSEDebugHash &&
1485 #endif
1486 hasNItemsOrLess(BB->phis(), PHICSENumPHISmallSize))
1487 return EliminateDuplicatePHINodesNaiveImpl(BB, ToRemove);
1488 return EliminateDuplicatePHINodesSetBasedImpl(BB, ToRemove);
1489 }
1490
EliminateDuplicatePHINodes(BasicBlock * BB)1491 bool llvm::EliminateDuplicatePHINodes(BasicBlock *BB) {
1492 SmallPtrSet<PHINode *, 8> ToRemove;
1493 bool Changed = EliminateDuplicatePHINodes(BB, ToRemove);
1494 for (PHINode *PN : ToRemove)
1495 PN->eraseFromParent();
1496 return Changed;
1497 }
1498
tryEnforceAlignment(Value * V,Align PrefAlign,const DataLayout & DL)1499 Align llvm::tryEnforceAlignment(Value *V, Align PrefAlign,
1500 const DataLayout &DL) {
1501 V = V->stripPointerCasts();
1502
1503 if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1504 // TODO: Ideally, this function would not be called if PrefAlign is smaller
1505 // than the current alignment, as the known bits calculation should have
1506 // already taken it into account. However, this is not always the case,
1507 // as computeKnownBits() has a depth limit, while stripPointerCasts()
1508 // doesn't.
1509 Align CurrentAlign = AI->getAlign();
1510 if (PrefAlign <= CurrentAlign)
1511 return CurrentAlign;
1512
1513 // If the preferred alignment is greater than the natural stack alignment
1514 // then don't round up. This avoids dynamic stack realignment.
1515 if (DL.exceedsNaturalStackAlignment(PrefAlign))
1516 return CurrentAlign;
1517 AI->setAlignment(PrefAlign);
1518 return PrefAlign;
1519 }
1520
1521 if (auto *GO = dyn_cast<GlobalObject>(V)) {
1522 // TODO: as above, this shouldn't be necessary.
1523 Align CurrentAlign = GO->getPointerAlignment(DL);
1524 if (PrefAlign <= CurrentAlign)
1525 return CurrentAlign;
1526
1527 // If there is a large requested alignment and we can, bump up the alignment
1528 // of the global. If the memory we set aside for the global may not be the
1529 // memory used by the final program then it is impossible for us to reliably
1530 // enforce the preferred alignment.
1531 if (!GO->canIncreaseAlignment())
1532 return CurrentAlign;
1533
1534 if (GO->isThreadLocal()) {
1535 unsigned MaxTLSAlign = GO->getParent()->getMaxTLSAlignment() / CHAR_BIT;
1536 if (MaxTLSAlign && PrefAlign > Align(MaxTLSAlign))
1537 PrefAlign = Align(MaxTLSAlign);
1538 }
1539
1540 GO->setAlignment(PrefAlign);
1541 return PrefAlign;
1542 }
1543
1544 return Align(1);
1545 }
1546
getOrEnforceKnownAlignment(Value * V,MaybeAlign PrefAlign,const DataLayout & DL,const Instruction * CxtI,AssumptionCache * AC,const DominatorTree * DT)1547 Align llvm::getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign,
1548 const DataLayout &DL,
1549 const Instruction *CxtI,
1550 AssumptionCache *AC,
1551 const DominatorTree *DT) {
1552 assert(V->getType()->isPointerTy() &&
1553 "getOrEnforceKnownAlignment expects a pointer!");
1554
1555 KnownBits Known = computeKnownBits(V, DL, 0, AC, CxtI, DT);
1556 unsigned TrailZ = Known.countMinTrailingZeros();
1557
1558 // Avoid trouble with ridiculously large TrailZ values, such as
1559 // those computed from a null pointer.
1560 // LLVM doesn't support alignments larger than (1 << MaxAlignmentExponent).
1561 TrailZ = std::min(TrailZ, +Value::MaxAlignmentExponent);
1562
1563 Align Alignment = Align(1ull << std::min(Known.getBitWidth() - 1, TrailZ));
1564
1565 if (PrefAlign && *PrefAlign > Alignment)
1566 Alignment = std::max(Alignment, tryEnforceAlignment(V, *PrefAlign, DL));
1567
1568 // We don't need to make any adjustment.
1569 return Alignment;
1570 }
1571
1572 ///===---------------------------------------------------------------------===//
1573 /// Dbg Intrinsic utilities
1574 ///
1575
1576 /// See if there is a dbg.value intrinsic for DIVar for the PHI node.
PhiHasDebugValue(DILocalVariable * DIVar,DIExpression * DIExpr,PHINode * APN)1577 static bool PhiHasDebugValue(DILocalVariable *DIVar,
1578 DIExpression *DIExpr,
1579 PHINode *APN) {
1580 // Since we can't guarantee that the original dbg.declare intrinsic
1581 // is removed by LowerDbgDeclare(), we need to make sure that we are
1582 // not inserting the same dbg.value intrinsic over and over.
1583 SmallVector<DbgValueInst *, 1> DbgValues;
1584 SmallVector<DbgVariableRecord *, 1> DbgVariableRecords;
1585 findDbgValues(DbgValues, APN, &DbgVariableRecords);
1586 for (auto *DVI : DbgValues) {
1587 assert(is_contained(DVI->getValues(), APN));
1588 if ((DVI->getVariable() == DIVar) && (DVI->getExpression() == DIExpr))
1589 return true;
1590 }
1591 for (auto *DVR : DbgVariableRecords) {
1592 assert(is_contained(DVR->location_ops(), APN));
1593 if ((DVR->getVariable() == DIVar) && (DVR->getExpression() == DIExpr))
1594 return true;
1595 }
1596 return false;
1597 }
1598
1599 /// Check if the alloc size of \p ValTy is large enough to cover the variable
1600 /// (or fragment of the variable) described by \p DII.
1601 ///
1602 /// This is primarily intended as a helper for the different
1603 /// ConvertDebugDeclareToDebugValue functions. The dbg.declare that is converted
1604 /// describes an alloca'd variable, so we need to use the alloc size of the
1605 /// value when doing the comparison. E.g. an i1 value will be identified as
1606 /// covering an n-bit fragment, if the store size of i1 is at least n bits.
valueCoversEntireFragment(Type * ValTy,DbgVariableIntrinsic * DII)1607 static bool valueCoversEntireFragment(Type *ValTy, DbgVariableIntrinsic *DII) {
1608 const DataLayout &DL = DII->getDataLayout();
1609 TypeSize ValueSize = DL.getTypeAllocSizeInBits(ValTy);
1610 if (std::optional<uint64_t> FragmentSize =
1611 DII->getExpression()->getActiveBits(DII->getVariable()))
1612 return TypeSize::isKnownGE(ValueSize, TypeSize::getFixed(*FragmentSize));
1613
1614 // We can't always calculate the size of the DI variable (e.g. if it is a
1615 // VLA). Try to use the size of the alloca that the dbg intrinsic describes
1616 // intead.
1617 if (DII->isAddressOfVariable()) {
1618 // DII should have exactly 1 location when it is an address.
1619 assert(DII->getNumVariableLocationOps() == 1 &&
1620 "address of variable must have exactly 1 location operand.");
1621 if (auto *AI =
1622 dyn_cast_or_null<AllocaInst>(DII->getVariableLocationOp(0))) {
1623 if (std::optional<TypeSize> FragmentSize =
1624 AI->getAllocationSizeInBits(DL)) {
1625 return TypeSize::isKnownGE(ValueSize, *FragmentSize);
1626 }
1627 }
1628 }
1629 // Could not determine size of variable. Conservatively return false.
1630 return false;
1631 }
1632 // RemoveDIs: duplicate implementation of the above, using DbgVariableRecords,
1633 // the replacement for dbg.values.
valueCoversEntireFragment(Type * ValTy,DbgVariableRecord * DVR)1634 static bool valueCoversEntireFragment(Type *ValTy, DbgVariableRecord *DVR) {
1635 const DataLayout &DL = DVR->getModule()->getDataLayout();
1636 TypeSize ValueSize = DL.getTypeAllocSizeInBits(ValTy);
1637 if (std::optional<uint64_t> FragmentSize =
1638 DVR->getExpression()->getActiveBits(DVR->getVariable()))
1639 return TypeSize::isKnownGE(ValueSize, TypeSize::getFixed(*FragmentSize));
1640
1641 // We can't always calculate the size of the DI variable (e.g. if it is a
1642 // VLA). Try to use the size of the alloca that the dbg intrinsic describes
1643 // intead.
1644 if (DVR->isAddressOfVariable()) {
1645 // DVR should have exactly 1 location when it is an address.
1646 assert(DVR->getNumVariableLocationOps() == 1 &&
1647 "address of variable must have exactly 1 location operand.");
1648 if (auto *AI =
1649 dyn_cast_or_null<AllocaInst>(DVR->getVariableLocationOp(0))) {
1650 if (std::optional<TypeSize> FragmentSize = AI->getAllocationSizeInBits(DL)) {
1651 return TypeSize::isKnownGE(ValueSize, *FragmentSize);
1652 }
1653 }
1654 }
1655 // Could not determine size of variable. Conservatively return false.
1656 return false;
1657 }
1658
insertDbgValueOrDbgVariableRecord(DIBuilder & Builder,Value * DV,DILocalVariable * DIVar,DIExpression * DIExpr,const DebugLoc & NewLoc,BasicBlock::iterator Instr)1659 static void insertDbgValueOrDbgVariableRecord(DIBuilder &Builder, Value *DV,
1660 DILocalVariable *DIVar,
1661 DIExpression *DIExpr,
1662 const DebugLoc &NewLoc,
1663 BasicBlock::iterator Instr) {
1664 if (!UseNewDbgInfoFormat) {
1665 auto DbgVal = Builder.insertDbgValueIntrinsic(DV, DIVar, DIExpr, NewLoc,
1666 (Instruction *)nullptr);
1667 DbgVal.get<Instruction *>()->insertBefore(Instr);
1668 } else {
1669 // RemoveDIs: if we're using the new debug-info format, allocate a
1670 // DbgVariableRecord directly instead of a dbg.value intrinsic.
1671 ValueAsMetadata *DVAM = ValueAsMetadata::get(DV);
1672 DbgVariableRecord *DV =
1673 new DbgVariableRecord(DVAM, DIVar, DIExpr, NewLoc.get());
1674 Instr->getParent()->insertDbgRecordBefore(DV, Instr);
1675 }
1676 }
1677
insertDbgValueOrDbgVariableRecordAfter(DIBuilder & Builder,Value * DV,DILocalVariable * DIVar,DIExpression * DIExpr,const DebugLoc & NewLoc,BasicBlock::iterator Instr)1678 static void insertDbgValueOrDbgVariableRecordAfter(
1679 DIBuilder &Builder, Value *DV, DILocalVariable *DIVar, DIExpression *DIExpr,
1680 const DebugLoc &NewLoc, BasicBlock::iterator Instr) {
1681 if (!UseNewDbgInfoFormat) {
1682 auto DbgVal = Builder.insertDbgValueIntrinsic(DV, DIVar, DIExpr, NewLoc,
1683 (Instruction *)nullptr);
1684 DbgVal.get<Instruction *>()->insertAfter(&*Instr);
1685 } else {
1686 // RemoveDIs: if we're using the new debug-info format, allocate a
1687 // DbgVariableRecord directly instead of a dbg.value intrinsic.
1688 ValueAsMetadata *DVAM = ValueAsMetadata::get(DV);
1689 DbgVariableRecord *DV =
1690 new DbgVariableRecord(DVAM, DIVar, DIExpr, NewLoc.get());
1691 Instr->getParent()->insertDbgRecordAfter(DV, &*Instr);
1692 }
1693 }
1694
1695 /// Inserts a llvm.dbg.value intrinsic before a store to an alloca'd value
1696 /// that has an associated llvm.dbg.declare intrinsic.
ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic * DII,StoreInst * SI,DIBuilder & Builder)1697 void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII,
1698 StoreInst *SI, DIBuilder &Builder) {
1699 assert(DII->isAddressOfVariable() || isa<DbgAssignIntrinsic>(DII));
1700 auto *DIVar = DII->getVariable();
1701 assert(DIVar && "Missing variable");
1702 auto *DIExpr = DII->getExpression();
1703 Value *DV = SI->getValueOperand();
1704
1705 DebugLoc NewLoc = getDebugValueLoc(DII);
1706
1707 // If the alloca describes the variable itself, i.e. the expression in the
1708 // dbg.declare doesn't start with a dereference, we can perform the
1709 // conversion if the value covers the entire fragment of DII.
1710 // If the alloca describes the *address* of DIVar, i.e. DIExpr is
1711 // *just* a DW_OP_deref, we use DV as is for the dbg.value.
1712 // We conservatively ignore other dereferences, because the following two are
1713 // not equivalent:
1714 // dbg.declare(alloca, ..., !Expr(deref, plus_uconstant, 2))
1715 // dbg.value(DV, ..., !Expr(deref, plus_uconstant, 2))
1716 // The former is adding 2 to the address of the variable, whereas the latter
1717 // is adding 2 to the value of the variable. As such, we insist on just a
1718 // deref expression.
1719 bool CanConvert =
1720 DIExpr->isDeref() || (!DIExpr->startsWithDeref() &&
1721 valueCoversEntireFragment(DV->getType(), DII));
1722 if (CanConvert) {
1723 insertDbgValueOrDbgVariableRecord(Builder, DV, DIVar, DIExpr, NewLoc,
1724 SI->getIterator());
1725 return;
1726 }
1727
1728 // FIXME: If storing to a part of the variable described by the dbg.declare,
1729 // then we want to insert a dbg.value for the corresponding fragment.
1730 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: " << *DII
1731 << '\n');
1732 // For now, when there is a store to parts of the variable (but we do not
1733 // know which part) we insert an dbg.value intrinsic to indicate that we
1734 // know nothing about the variable's content.
1735 DV = UndefValue::get(DV->getType());
1736 insertDbgValueOrDbgVariableRecord(Builder, DV, DIVar, DIExpr, NewLoc,
1737 SI->getIterator());
1738 }
1739
1740 /// Inserts a llvm.dbg.value intrinsic before a load of an alloca'd value
1741 /// that has an associated llvm.dbg.declare intrinsic.
ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic * DII,LoadInst * LI,DIBuilder & Builder)1742 void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII,
1743 LoadInst *LI, DIBuilder &Builder) {
1744 auto *DIVar = DII->getVariable();
1745 auto *DIExpr = DII->getExpression();
1746 assert(DIVar && "Missing variable");
1747
1748 if (!valueCoversEntireFragment(LI->getType(), DII)) {
1749 // FIXME: If only referring to a part of the variable described by the
1750 // dbg.declare, then we want to insert a dbg.value for the corresponding
1751 // fragment.
1752 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: "
1753 << *DII << '\n');
1754 return;
1755 }
1756
1757 DebugLoc NewLoc = getDebugValueLoc(DII);
1758
1759 // We are now tracking the loaded value instead of the address. In the
1760 // future if multi-location support is added to the IR, it might be
1761 // preferable to keep tracking both the loaded value and the original
1762 // address in case the alloca can not be elided.
1763 insertDbgValueOrDbgVariableRecordAfter(Builder, LI, DIVar, DIExpr, NewLoc,
1764 LI->getIterator());
1765 }
1766
ConvertDebugDeclareToDebugValue(DbgVariableRecord * DVR,StoreInst * SI,DIBuilder & Builder)1767 void llvm::ConvertDebugDeclareToDebugValue(DbgVariableRecord *DVR,
1768 StoreInst *SI, DIBuilder &Builder) {
1769 assert(DVR->isAddressOfVariable() || DVR->isDbgAssign());
1770 auto *DIVar = DVR->getVariable();
1771 assert(DIVar && "Missing variable");
1772 auto *DIExpr = DVR->getExpression();
1773 Value *DV = SI->getValueOperand();
1774
1775 DebugLoc NewLoc = getDebugValueLoc(DVR);
1776
1777 // If the alloca describes the variable itself, i.e. the expression in the
1778 // dbg.declare doesn't start with a dereference, we can perform the
1779 // conversion if the value covers the entire fragment of DII.
1780 // If the alloca describes the *address* of DIVar, i.e. DIExpr is
1781 // *just* a DW_OP_deref, we use DV as is for the dbg.value.
1782 // We conservatively ignore other dereferences, because the following two are
1783 // not equivalent:
1784 // dbg.declare(alloca, ..., !Expr(deref, plus_uconstant, 2))
1785 // dbg.value(DV, ..., !Expr(deref, plus_uconstant, 2))
1786 // The former is adding 2 to the address of the variable, whereas the latter
1787 // is adding 2 to the value of the variable. As such, we insist on just a
1788 // deref expression.
1789 bool CanConvert =
1790 DIExpr->isDeref() || (!DIExpr->startsWithDeref() &&
1791 valueCoversEntireFragment(DV->getType(), DVR));
1792 if (CanConvert) {
1793 insertDbgValueOrDbgVariableRecord(Builder, DV, DIVar, DIExpr, NewLoc,
1794 SI->getIterator());
1795 return;
1796 }
1797
1798 // FIXME: If storing to a part of the variable described by the dbg.declare,
1799 // then we want to insert a dbg.value for the corresponding fragment.
1800 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: " << *DVR
1801 << '\n');
1802 assert(UseNewDbgInfoFormat);
1803
1804 // For now, when there is a store to parts of the variable (but we do not
1805 // know which part) we insert an dbg.value intrinsic to indicate that we
1806 // know nothing about the variable's content.
1807 DV = UndefValue::get(DV->getType());
1808 ValueAsMetadata *DVAM = ValueAsMetadata::get(DV);
1809 DbgVariableRecord *NewDVR =
1810 new DbgVariableRecord(DVAM, DIVar, DIExpr, NewLoc.get());
1811 SI->getParent()->insertDbgRecordBefore(NewDVR, SI->getIterator());
1812 }
1813
1814 /// Inserts a llvm.dbg.value intrinsic after a phi that has an associated
1815 /// llvm.dbg.declare intrinsic.
ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic * DII,PHINode * APN,DIBuilder & Builder)1816 void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII,
1817 PHINode *APN, DIBuilder &Builder) {
1818 auto *DIVar = DII->getVariable();
1819 auto *DIExpr = DII->getExpression();
1820 assert(DIVar && "Missing variable");
1821
1822 if (PhiHasDebugValue(DIVar, DIExpr, APN))
1823 return;
1824
1825 if (!valueCoversEntireFragment(APN->getType(), DII)) {
1826 // FIXME: If only referring to a part of the variable described by the
1827 // dbg.declare, then we want to insert a dbg.value for the corresponding
1828 // fragment.
1829 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: "
1830 << *DII << '\n');
1831 return;
1832 }
1833
1834 BasicBlock *BB = APN->getParent();
1835 auto InsertionPt = BB->getFirstInsertionPt();
1836
1837 DebugLoc NewLoc = getDebugValueLoc(DII);
1838
1839 // The block may be a catchswitch block, which does not have a valid
1840 // insertion point.
1841 // FIXME: Insert dbg.value markers in the successors when appropriate.
1842 if (InsertionPt != BB->end()) {
1843 insertDbgValueOrDbgVariableRecord(Builder, APN, DIVar, DIExpr, NewLoc,
1844 InsertionPt);
1845 }
1846 }
1847
ConvertDebugDeclareToDebugValue(DbgVariableRecord * DVR,LoadInst * LI,DIBuilder & Builder)1848 void llvm::ConvertDebugDeclareToDebugValue(DbgVariableRecord *DVR, LoadInst *LI,
1849 DIBuilder &Builder) {
1850 auto *DIVar = DVR->getVariable();
1851 auto *DIExpr = DVR->getExpression();
1852 assert(DIVar && "Missing variable");
1853
1854 if (!valueCoversEntireFragment(LI->getType(), DVR)) {
1855 // FIXME: If only referring to a part of the variable described by the
1856 // dbg.declare, then we want to insert a DbgVariableRecord for the
1857 // corresponding fragment.
1858 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to DbgVariableRecord: "
1859 << *DVR << '\n');
1860 return;
1861 }
1862
1863 DebugLoc NewLoc = getDebugValueLoc(DVR);
1864
1865 // We are now tracking the loaded value instead of the address. In the
1866 // future if multi-location support is added to the IR, it might be
1867 // preferable to keep tracking both the loaded value and the original
1868 // address in case the alloca can not be elided.
1869 assert(UseNewDbgInfoFormat);
1870
1871 // Create a DbgVariableRecord directly and insert.
1872 ValueAsMetadata *LIVAM = ValueAsMetadata::get(LI);
1873 DbgVariableRecord *DV =
1874 new DbgVariableRecord(LIVAM, DIVar, DIExpr, NewLoc.get());
1875 LI->getParent()->insertDbgRecordAfter(DV, LI);
1876 }
1877
1878 /// Determine whether this alloca is either a VLA or an array.
isArray(AllocaInst * AI)1879 static bool isArray(AllocaInst *AI) {
1880 return AI->isArrayAllocation() ||
1881 (AI->getAllocatedType() && AI->getAllocatedType()->isArrayTy());
1882 }
1883
1884 /// Determine whether this alloca is a structure.
isStructure(AllocaInst * AI)1885 static bool isStructure(AllocaInst *AI) {
1886 return AI->getAllocatedType() && AI->getAllocatedType()->isStructTy();
1887 }
ConvertDebugDeclareToDebugValue(DbgVariableRecord * DVR,PHINode * APN,DIBuilder & Builder)1888 void llvm::ConvertDebugDeclareToDebugValue(DbgVariableRecord *DVR, PHINode *APN,
1889 DIBuilder &Builder) {
1890 auto *DIVar = DVR->getVariable();
1891 auto *DIExpr = DVR->getExpression();
1892 assert(DIVar && "Missing variable");
1893
1894 if (PhiHasDebugValue(DIVar, DIExpr, APN))
1895 return;
1896
1897 if (!valueCoversEntireFragment(APN->getType(), DVR)) {
1898 // FIXME: If only referring to a part of the variable described by the
1899 // dbg.declare, then we want to insert a DbgVariableRecord for the
1900 // corresponding fragment.
1901 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to DbgVariableRecord: "
1902 << *DVR << '\n');
1903 return;
1904 }
1905
1906 BasicBlock *BB = APN->getParent();
1907 auto InsertionPt = BB->getFirstInsertionPt();
1908
1909 DebugLoc NewLoc = getDebugValueLoc(DVR);
1910
1911 // The block may be a catchswitch block, which does not have a valid
1912 // insertion point.
1913 // FIXME: Insert DbgVariableRecord markers in the successors when appropriate.
1914 if (InsertionPt != BB->end()) {
1915 insertDbgValueOrDbgVariableRecord(Builder, APN, DIVar, DIExpr, NewLoc,
1916 InsertionPt);
1917 }
1918 }
1919
1920 /// LowerDbgDeclare - Lowers llvm.dbg.declare intrinsics into appropriate set
1921 /// of llvm.dbg.value intrinsics.
LowerDbgDeclare(Function & F)1922 bool llvm::LowerDbgDeclare(Function &F) {
1923 bool Changed = false;
1924 DIBuilder DIB(*F.getParent(), /*AllowUnresolved*/ false);
1925 SmallVector<DbgDeclareInst *, 4> Dbgs;
1926 SmallVector<DbgVariableRecord *> DVRs;
1927 for (auto &FI : F) {
1928 for (Instruction &BI : FI) {
1929 if (auto *DDI = dyn_cast<DbgDeclareInst>(&BI))
1930 Dbgs.push_back(DDI);
1931 for (DbgVariableRecord &DVR : filterDbgVars(BI.getDbgRecordRange())) {
1932 if (DVR.getType() == DbgVariableRecord::LocationType::Declare)
1933 DVRs.push_back(&DVR);
1934 }
1935 }
1936 }
1937
1938 if (Dbgs.empty() && DVRs.empty())
1939 return Changed;
1940
1941 auto LowerOne = [&](auto *DDI) {
1942 AllocaInst *AI =
1943 dyn_cast_or_null<AllocaInst>(DDI->getVariableLocationOp(0));
1944 // If this is an alloca for a scalar variable, insert a dbg.value
1945 // at each load and store to the alloca and erase the dbg.declare.
1946 // The dbg.values allow tracking a variable even if it is not
1947 // stored on the stack, while the dbg.declare can only describe
1948 // the stack slot (and at a lexical-scope granularity). Later
1949 // passes will attempt to elide the stack slot.
1950 if (!AI || isArray(AI) || isStructure(AI))
1951 return;
1952
1953 // A volatile load/store means that the alloca can't be elided anyway.
1954 if (llvm::any_of(AI->users(), [](User *U) -> bool {
1955 if (LoadInst *LI = dyn_cast<LoadInst>(U))
1956 return LI->isVolatile();
1957 if (StoreInst *SI = dyn_cast<StoreInst>(U))
1958 return SI->isVolatile();
1959 return false;
1960 }))
1961 return;
1962
1963 SmallVector<const Value *, 8> WorkList;
1964 WorkList.push_back(AI);
1965 while (!WorkList.empty()) {
1966 const Value *V = WorkList.pop_back_val();
1967 for (const auto &AIUse : V->uses()) {
1968 User *U = AIUse.getUser();
1969 if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
1970 if (AIUse.getOperandNo() == 1)
1971 ConvertDebugDeclareToDebugValue(DDI, SI, DIB);
1972 } else if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
1973 ConvertDebugDeclareToDebugValue(DDI, LI, DIB);
1974 } else if (CallInst *CI = dyn_cast<CallInst>(U)) {
1975 // This is a call by-value or some other instruction that takes a
1976 // pointer to the variable. Insert a *value* intrinsic that describes
1977 // the variable by dereferencing the alloca.
1978 if (!CI->isLifetimeStartOrEnd()) {
1979 DebugLoc NewLoc = getDebugValueLoc(DDI);
1980 auto *DerefExpr =
1981 DIExpression::append(DDI->getExpression(), dwarf::DW_OP_deref);
1982 insertDbgValueOrDbgVariableRecord(DIB, AI, DDI->getVariable(),
1983 DerefExpr, NewLoc,
1984 CI->getIterator());
1985 }
1986 } else if (BitCastInst *BI = dyn_cast<BitCastInst>(U)) {
1987 if (BI->getType()->isPointerTy())
1988 WorkList.push_back(BI);
1989 }
1990 }
1991 }
1992 DDI->eraseFromParent();
1993 Changed = true;
1994 };
1995
1996 for_each(Dbgs, LowerOne);
1997 for_each(DVRs, LowerOne);
1998
1999 if (Changed)
2000 for (BasicBlock &BB : F)
2001 RemoveRedundantDbgInstrs(&BB);
2002
2003 return Changed;
2004 }
2005
2006 // RemoveDIs: re-implementation of insertDebugValuesForPHIs, but which pulls the
2007 // debug-info out of the block's DbgVariableRecords rather than dbg.value
2008 // intrinsics.
2009 static void
insertDbgVariableRecordsForPHIs(BasicBlock * BB,SmallVectorImpl<PHINode * > & InsertedPHIs)2010 insertDbgVariableRecordsForPHIs(BasicBlock *BB,
2011 SmallVectorImpl<PHINode *> &InsertedPHIs) {
2012 assert(BB && "No BasicBlock to clone DbgVariableRecord(s) from.");
2013 if (InsertedPHIs.size() == 0)
2014 return;
2015
2016 // Map existing PHI nodes to their DbgVariableRecords.
2017 DenseMap<Value *, DbgVariableRecord *> DbgValueMap;
2018 for (auto &I : *BB) {
2019 for (DbgVariableRecord &DVR : filterDbgVars(I.getDbgRecordRange())) {
2020 for (Value *V : DVR.location_ops())
2021 if (auto *Loc = dyn_cast_or_null<PHINode>(V))
2022 DbgValueMap.insert({Loc, &DVR});
2023 }
2024 }
2025 if (DbgValueMap.size() == 0)
2026 return;
2027
2028 // Map a pair of the destination BB and old DbgVariableRecord to the new
2029 // DbgVariableRecord, so that if a DbgVariableRecord is being rewritten to use
2030 // more than one of the inserted PHIs in the same destination BB, we can
2031 // update the same DbgVariableRecord with all the new PHIs instead of creating
2032 // one copy for each.
2033 MapVector<std::pair<BasicBlock *, DbgVariableRecord *>, DbgVariableRecord *>
2034 NewDbgValueMap;
2035 // Then iterate through the new PHIs and look to see if they use one of the
2036 // previously mapped PHIs. If so, create a new DbgVariableRecord that will
2037 // propagate the info through the new PHI. If we use more than one new PHI in
2038 // a single destination BB with the same old dbg.value, merge the updates so
2039 // that we get a single new DbgVariableRecord with all the new PHIs.
2040 for (auto PHI : InsertedPHIs) {
2041 BasicBlock *Parent = PHI->getParent();
2042 // Avoid inserting a debug-info record into an EH block.
2043 if (Parent->getFirstNonPHI()->isEHPad())
2044 continue;
2045 for (auto VI : PHI->operand_values()) {
2046 auto V = DbgValueMap.find(VI);
2047 if (V != DbgValueMap.end()) {
2048 DbgVariableRecord *DbgII = cast<DbgVariableRecord>(V->second);
2049 auto NewDI = NewDbgValueMap.find({Parent, DbgII});
2050 if (NewDI == NewDbgValueMap.end()) {
2051 DbgVariableRecord *NewDbgII = DbgII->clone();
2052 NewDI = NewDbgValueMap.insert({{Parent, DbgII}, NewDbgII}).first;
2053 }
2054 DbgVariableRecord *NewDbgII = NewDI->second;
2055 // If PHI contains VI as an operand more than once, we may
2056 // replaced it in NewDbgII; confirm that it is present.
2057 if (is_contained(NewDbgII->location_ops(), VI))
2058 NewDbgII->replaceVariableLocationOp(VI, PHI);
2059 }
2060 }
2061 }
2062 // Insert the new DbgVariableRecords into their destination blocks.
2063 for (auto DI : NewDbgValueMap) {
2064 BasicBlock *Parent = DI.first.first;
2065 DbgVariableRecord *NewDbgII = DI.second;
2066 auto InsertionPt = Parent->getFirstInsertionPt();
2067 assert(InsertionPt != Parent->end() && "Ill-formed basic block");
2068
2069 Parent->insertDbgRecordBefore(NewDbgII, InsertionPt);
2070 }
2071 }
2072
2073 /// Propagate dbg.value intrinsics through the newly inserted PHIs.
insertDebugValuesForPHIs(BasicBlock * BB,SmallVectorImpl<PHINode * > & InsertedPHIs)2074 void llvm::insertDebugValuesForPHIs(BasicBlock *BB,
2075 SmallVectorImpl<PHINode *> &InsertedPHIs) {
2076 assert(BB && "No BasicBlock to clone dbg.value(s) from.");
2077 if (InsertedPHIs.size() == 0)
2078 return;
2079
2080 insertDbgVariableRecordsForPHIs(BB, InsertedPHIs);
2081
2082 // Map existing PHI nodes to their dbg.values.
2083 ValueToValueMapTy DbgValueMap;
2084 for (auto &I : *BB) {
2085 if (auto DbgII = dyn_cast<DbgVariableIntrinsic>(&I)) {
2086 for (Value *V : DbgII->location_ops())
2087 if (auto *Loc = dyn_cast_or_null<PHINode>(V))
2088 DbgValueMap.insert({Loc, DbgII});
2089 }
2090 }
2091 if (DbgValueMap.size() == 0)
2092 return;
2093
2094 // Map a pair of the destination BB and old dbg.value to the new dbg.value,
2095 // so that if a dbg.value is being rewritten to use more than one of the
2096 // inserted PHIs in the same destination BB, we can update the same dbg.value
2097 // with all the new PHIs instead of creating one copy for each.
2098 MapVector<std::pair<BasicBlock *, DbgVariableIntrinsic *>,
2099 DbgVariableIntrinsic *>
2100 NewDbgValueMap;
2101 // Then iterate through the new PHIs and look to see if they use one of the
2102 // previously mapped PHIs. If so, create a new dbg.value intrinsic that will
2103 // propagate the info through the new PHI. If we use more than one new PHI in
2104 // a single destination BB with the same old dbg.value, merge the updates so
2105 // that we get a single new dbg.value with all the new PHIs.
2106 for (auto *PHI : InsertedPHIs) {
2107 BasicBlock *Parent = PHI->getParent();
2108 // Avoid inserting an intrinsic into an EH block.
2109 if (Parent->getFirstNonPHI()->isEHPad())
2110 continue;
2111 for (auto *VI : PHI->operand_values()) {
2112 auto V = DbgValueMap.find(VI);
2113 if (V != DbgValueMap.end()) {
2114 auto *DbgII = cast<DbgVariableIntrinsic>(V->second);
2115 auto NewDI = NewDbgValueMap.find({Parent, DbgII});
2116 if (NewDI == NewDbgValueMap.end()) {
2117 auto *NewDbgII = cast<DbgVariableIntrinsic>(DbgII->clone());
2118 NewDI = NewDbgValueMap.insert({{Parent, DbgII}, NewDbgII}).first;
2119 }
2120 DbgVariableIntrinsic *NewDbgII = NewDI->second;
2121 // If PHI contains VI as an operand more than once, we may
2122 // replaced it in NewDbgII; confirm that it is present.
2123 if (is_contained(NewDbgII->location_ops(), VI))
2124 NewDbgII->replaceVariableLocationOp(VI, PHI);
2125 }
2126 }
2127 }
2128 // Insert thew new dbg.values into their destination blocks.
2129 for (auto DI : NewDbgValueMap) {
2130 BasicBlock *Parent = DI.first.first;
2131 auto *NewDbgII = DI.second;
2132 auto InsertionPt = Parent->getFirstInsertionPt();
2133 assert(InsertionPt != Parent->end() && "Ill-formed basic block");
2134 NewDbgII->insertBefore(&*InsertionPt);
2135 }
2136 }
2137
replaceDbgDeclare(Value * Address,Value * NewAddress,DIBuilder & Builder,uint8_t DIExprFlags,int Offset)2138 bool llvm::replaceDbgDeclare(Value *Address, Value *NewAddress,
2139 DIBuilder &Builder, uint8_t DIExprFlags,
2140 int Offset) {
2141 TinyPtrVector<DbgDeclareInst *> DbgDeclares = findDbgDeclares(Address);
2142 TinyPtrVector<DbgVariableRecord *> DVRDeclares = findDVRDeclares(Address);
2143
2144 auto ReplaceOne = [&](auto *DII) {
2145 assert(DII->getVariable() && "Missing variable");
2146 auto *DIExpr = DII->getExpression();
2147 DIExpr = DIExpression::prepend(DIExpr, DIExprFlags, Offset);
2148 DII->setExpression(DIExpr);
2149 DII->replaceVariableLocationOp(Address, NewAddress);
2150 };
2151
2152 for_each(DbgDeclares, ReplaceOne);
2153 for_each(DVRDeclares, ReplaceOne);
2154
2155 return !DbgDeclares.empty() || !DVRDeclares.empty();
2156 }
2157
updateOneDbgValueForAlloca(const DebugLoc & Loc,DILocalVariable * DIVar,DIExpression * DIExpr,Value * NewAddress,DbgValueInst * DVI,DbgVariableRecord * DVR,DIBuilder & Builder,int Offset)2158 static void updateOneDbgValueForAlloca(const DebugLoc &Loc,
2159 DILocalVariable *DIVar,
2160 DIExpression *DIExpr, Value *NewAddress,
2161 DbgValueInst *DVI,
2162 DbgVariableRecord *DVR,
2163 DIBuilder &Builder, int Offset) {
2164 assert(DIVar && "Missing variable");
2165
2166 // This is an alloca-based dbg.value/DbgVariableRecord. The first thing it
2167 // should do with the alloca pointer is dereference it. Otherwise we don't
2168 // know how to handle it and give up.
2169 if (!DIExpr || DIExpr->getNumElements() < 1 ||
2170 DIExpr->getElement(0) != dwarf::DW_OP_deref)
2171 return;
2172
2173 // Insert the offset before the first deref.
2174 if (Offset)
2175 DIExpr = DIExpression::prepend(DIExpr, 0, Offset);
2176
2177 if (DVI) {
2178 DVI->setExpression(DIExpr);
2179 DVI->replaceVariableLocationOp(0u, NewAddress);
2180 } else {
2181 assert(DVR);
2182 DVR->setExpression(DIExpr);
2183 DVR->replaceVariableLocationOp(0u, NewAddress);
2184 }
2185 }
2186
replaceDbgValueForAlloca(AllocaInst * AI,Value * NewAllocaAddress,DIBuilder & Builder,int Offset)2187 void llvm::replaceDbgValueForAlloca(AllocaInst *AI, Value *NewAllocaAddress,
2188 DIBuilder &Builder, int Offset) {
2189 SmallVector<DbgValueInst *, 1> DbgUsers;
2190 SmallVector<DbgVariableRecord *, 1> DPUsers;
2191 findDbgValues(DbgUsers, AI, &DPUsers);
2192
2193 // Attempt to replace dbg.values that use this alloca.
2194 for (auto *DVI : DbgUsers)
2195 updateOneDbgValueForAlloca(DVI->getDebugLoc(), DVI->getVariable(),
2196 DVI->getExpression(), NewAllocaAddress, DVI,
2197 nullptr, Builder, Offset);
2198
2199 // Replace any DbgVariableRecords that use this alloca.
2200 for (DbgVariableRecord *DVR : DPUsers)
2201 updateOneDbgValueForAlloca(DVR->getDebugLoc(), DVR->getVariable(),
2202 DVR->getExpression(), NewAllocaAddress, nullptr,
2203 DVR, Builder, Offset);
2204 }
2205
2206 /// Where possible to salvage debug information for \p I do so.
2207 /// If not possible mark undef.
salvageDebugInfo(Instruction & I)2208 void llvm::salvageDebugInfo(Instruction &I) {
2209 SmallVector<DbgVariableIntrinsic *, 1> DbgUsers;
2210 SmallVector<DbgVariableRecord *, 1> DPUsers;
2211 findDbgUsers(DbgUsers, &I, &DPUsers);
2212 salvageDebugInfoForDbgValues(I, DbgUsers, DPUsers);
2213 }
2214
salvageDbgAssignAddress(T * Assign)2215 template <typename T> static void salvageDbgAssignAddress(T *Assign) {
2216 Instruction *I = dyn_cast<Instruction>(Assign->getAddress());
2217 // Only instructions can be salvaged at the moment.
2218 if (!I)
2219 return;
2220
2221 assert(!Assign->getAddressExpression()->getFragmentInfo().has_value() &&
2222 "address-expression shouldn't have fragment info");
2223
2224 // The address component of a dbg.assign cannot be variadic.
2225 uint64_t CurrentLocOps = 0;
2226 SmallVector<Value *, 4> AdditionalValues;
2227 SmallVector<uint64_t, 16> Ops;
2228 Value *NewV = salvageDebugInfoImpl(*I, CurrentLocOps, Ops, AdditionalValues);
2229
2230 // Check if the salvage failed.
2231 if (!NewV)
2232 return;
2233
2234 DIExpression *SalvagedExpr = DIExpression::appendOpsToArg(
2235 Assign->getAddressExpression(), Ops, 0, /*StackValue=*/false);
2236 assert(!SalvagedExpr->getFragmentInfo().has_value() &&
2237 "address-expression shouldn't have fragment info");
2238
2239 SalvagedExpr = SalvagedExpr->foldConstantMath();
2240
2241 // Salvage succeeds if no additional values are required.
2242 if (AdditionalValues.empty()) {
2243 Assign->setAddress(NewV);
2244 Assign->setAddressExpression(SalvagedExpr);
2245 } else {
2246 Assign->setKillAddress();
2247 }
2248 }
2249
salvageDebugInfoForDbgValues(Instruction & I,ArrayRef<DbgVariableIntrinsic * > DbgUsers,ArrayRef<DbgVariableRecord * > DPUsers)2250 void llvm::salvageDebugInfoForDbgValues(
2251 Instruction &I, ArrayRef<DbgVariableIntrinsic *> DbgUsers,
2252 ArrayRef<DbgVariableRecord *> DPUsers) {
2253 // These are arbitrary chosen limits on the maximum number of values and the
2254 // maximum size of a debug expression we can salvage up to, used for
2255 // performance reasons.
2256 const unsigned MaxDebugArgs = 16;
2257 const unsigned MaxExpressionSize = 128;
2258 bool Salvaged = false;
2259
2260 for (auto *DII : DbgUsers) {
2261 if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(DII)) {
2262 if (DAI->getAddress() == &I) {
2263 salvageDbgAssignAddress(DAI);
2264 Salvaged = true;
2265 }
2266 if (DAI->getValue() != &I)
2267 continue;
2268 }
2269
2270 // Do not add DW_OP_stack_value for DbgDeclare, because they are implicitly
2271 // pointing out the value as a DWARF memory location description.
2272 bool StackValue = isa<DbgValueInst>(DII);
2273 auto DIILocation = DII->location_ops();
2274 assert(
2275 is_contained(DIILocation, &I) &&
2276 "DbgVariableIntrinsic must use salvaged instruction as its location");
2277 SmallVector<Value *, 4> AdditionalValues;
2278 // `I` may appear more than once in DII's location ops, and each use of `I`
2279 // must be updated in the DIExpression and potentially have additional
2280 // values added; thus we call salvageDebugInfoImpl for each `I` instance in
2281 // DIILocation.
2282 Value *Op0 = nullptr;
2283 DIExpression *SalvagedExpr = DII->getExpression();
2284 auto LocItr = find(DIILocation, &I);
2285 while (SalvagedExpr && LocItr != DIILocation.end()) {
2286 SmallVector<uint64_t, 16> Ops;
2287 unsigned LocNo = std::distance(DIILocation.begin(), LocItr);
2288 uint64_t CurrentLocOps = SalvagedExpr->getNumLocationOperands();
2289 Op0 = salvageDebugInfoImpl(I, CurrentLocOps, Ops, AdditionalValues);
2290 if (!Op0)
2291 break;
2292 SalvagedExpr =
2293 DIExpression::appendOpsToArg(SalvagedExpr, Ops, LocNo, StackValue);
2294 LocItr = std::find(++LocItr, DIILocation.end(), &I);
2295 }
2296 // salvageDebugInfoImpl should fail on examining the first element of
2297 // DbgUsers, or none of them.
2298 if (!Op0)
2299 break;
2300
2301 SalvagedExpr = SalvagedExpr->foldConstantMath();
2302 DII->replaceVariableLocationOp(&I, Op0);
2303 bool IsValidSalvageExpr = SalvagedExpr->getNumElements() <= MaxExpressionSize;
2304 if (AdditionalValues.empty() && IsValidSalvageExpr) {
2305 DII->setExpression(SalvagedExpr);
2306 } else if (isa<DbgValueInst>(DII) && IsValidSalvageExpr &&
2307 DII->getNumVariableLocationOps() + AdditionalValues.size() <=
2308 MaxDebugArgs) {
2309 DII->addVariableLocationOps(AdditionalValues, SalvagedExpr);
2310 } else {
2311 // Do not salvage using DIArgList for dbg.declare, as it is not currently
2312 // supported in those instructions. Also do not salvage if the resulting
2313 // DIArgList would contain an unreasonably large number of values.
2314 DII->setKillLocation();
2315 }
2316 LLVM_DEBUG(dbgs() << "SALVAGE: " << *DII << '\n');
2317 Salvaged = true;
2318 }
2319 // Duplicate of above block for DbgVariableRecords.
2320 for (auto *DVR : DPUsers) {
2321 if (DVR->isDbgAssign()) {
2322 if (DVR->getAddress() == &I) {
2323 salvageDbgAssignAddress(DVR);
2324 Salvaged = true;
2325 }
2326 if (DVR->getValue() != &I)
2327 continue;
2328 }
2329
2330 // Do not add DW_OP_stack_value for DbgDeclare and DbgAddr, because they
2331 // are implicitly pointing out the value as a DWARF memory location
2332 // description.
2333 bool StackValue =
2334 DVR->getType() != DbgVariableRecord::LocationType::Declare;
2335 auto DVRLocation = DVR->location_ops();
2336 assert(
2337 is_contained(DVRLocation, &I) &&
2338 "DbgVariableIntrinsic must use salvaged instruction as its location");
2339 SmallVector<Value *, 4> AdditionalValues;
2340 // 'I' may appear more than once in DVR's location ops, and each use of 'I'
2341 // must be updated in the DIExpression and potentially have additional
2342 // values added; thus we call salvageDebugInfoImpl for each 'I' instance in
2343 // DVRLocation.
2344 Value *Op0 = nullptr;
2345 DIExpression *SalvagedExpr = DVR->getExpression();
2346 auto LocItr = find(DVRLocation, &I);
2347 while (SalvagedExpr && LocItr != DVRLocation.end()) {
2348 SmallVector<uint64_t, 16> Ops;
2349 unsigned LocNo = std::distance(DVRLocation.begin(), LocItr);
2350 uint64_t CurrentLocOps = SalvagedExpr->getNumLocationOperands();
2351 Op0 = salvageDebugInfoImpl(I, CurrentLocOps, Ops, AdditionalValues);
2352 if (!Op0)
2353 break;
2354 SalvagedExpr =
2355 DIExpression::appendOpsToArg(SalvagedExpr, Ops, LocNo, StackValue);
2356 LocItr = std::find(++LocItr, DVRLocation.end(), &I);
2357 }
2358 // salvageDebugInfoImpl should fail on examining the first element of
2359 // DbgUsers, or none of them.
2360 if (!Op0)
2361 break;
2362
2363 SalvagedExpr = SalvagedExpr->foldConstantMath();
2364 DVR->replaceVariableLocationOp(&I, Op0);
2365 bool IsValidSalvageExpr =
2366 SalvagedExpr->getNumElements() <= MaxExpressionSize;
2367 if (AdditionalValues.empty() && IsValidSalvageExpr) {
2368 DVR->setExpression(SalvagedExpr);
2369 } else if (DVR->getType() != DbgVariableRecord::LocationType::Declare &&
2370 IsValidSalvageExpr &&
2371 DVR->getNumVariableLocationOps() + AdditionalValues.size() <=
2372 MaxDebugArgs) {
2373 DVR->addVariableLocationOps(AdditionalValues, SalvagedExpr);
2374 } else {
2375 // Do not salvage using DIArgList for dbg.addr/dbg.declare, as it is
2376 // currently only valid for stack value expressions.
2377 // Also do not salvage if the resulting DIArgList would contain an
2378 // unreasonably large number of values.
2379 DVR->setKillLocation();
2380 }
2381 LLVM_DEBUG(dbgs() << "SALVAGE: " << DVR << '\n');
2382 Salvaged = true;
2383 }
2384
2385 if (Salvaged)
2386 return;
2387
2388 for (auto *DII : DbgUsers)
2389 DII->setKillLocation();
2390
2391 for (auto *DVR : DPUsers)
2392 DVR->setKillLocation();
2393 }
2394
getSalvageOpsForGEP(GetElementPtrInst * GEP,const DataLayout & DL,uint64_t CurrentLocOps,SmallVectorImpl<uint64_t> & Opcodes,SmallVectorImpl<Value * > & AdditionalValues)2395 Value *getSalvageOpsForGEP(GetElementPtrInst *GEP, const DataLayout &DL,
2396 uint64_t CurrentLocOps,
2397 SmallVectorImpl<uint64_t> &Opcodes,
2398 SmallVectorImpl<Value *> &AdditionalValues) {
2399 unsigned BitWidth = DL.getIndexSizeInBits(GEP->getPointerAddressSpace());
2400 // Rewrite a GEP into a DIExpression.
2401 MapVector<Value *, APInt> VariableOffsets;
2402 APInt ConstantOffset(BitWidth, 0);
2403 if (!GEP->collectOffset(DL, BitWidth, VariableOffsets, ConstantOffset))
2404 return nullptr;
2405 if (!VariableOffsets.empty() && !CurrentLocOps) {
2406 Opcodes.insert(Opcodes.begin(), {dwarf::DW_OP_LLVM_arg, 0});
2407 CurrentLocOps = 1;
2408 }
2409 for (const auto &Offset : VariableOffsets) {
2410 AdditionalValues.push_back(Offset.first);
2411 assert(Offset.second.isStrictlyPositive() &&
2412 "Expected strictly positive multiplier for offset.");
2413 Opcodes.append({dwarf::DW_OP_LLVM_arg, CurrentLocOps++, dwarf::DW_OP_constu,
2414 Offset.second.getZExtValue(), dwarf::DW_OP_mul,
2415 dwarf::DW_OP_plus});
2416 }
2417 DIExpression::appendOffset(Opcodes, ConstantOffset.getSExtValue());
2418 return GEP->getOperand(0);
2419 }
2420
getDwarfOpForBinOp(Instruction::BinaryOps Opcode)2421 uint64_t getDwarfOpForBinOp(Instruction::BinaryOps Opcode) {
2422 switch (Opcode) {
2423 case Instruction::Add:
2424 return dwarf::DW_OP_plus;
2425 case Instruction::Sub:
2426 return dwarf::DW_OP_minus;
2427 case Instruction::Mul:
2428 return dwarf::DW_OP_mul;
2429 case Instruction::SDiv:
2430 return dwarf::DW_OP_div;
2431 case Instruction::SRem:
2432 return dwarf::DW_OP_mod;
2433 case Instruction::Or:
2434 return dwarf::DW_OP_or;
2435 case Instruction::And:
2436 return dwarf::DW_OP_and;
2437 case Instruction::Xor:
2438 return dwarf::DW_OP_xor;
2439 case Instruction::Shl:
2440 return dwarf::DW_OP_shl;
2441 case Instruction::LShr:
2442 return dwarf::DW_OP_shr;
2443 case Instruction::AShr:
2444 return dwarf::DW_OP_shra;
2445 default:
2446 // TODO: Salvage from each kind of binop we know about.
2447 return 0;
2448 }
2449 }
2450
handleSSAValueOperands(uint64_t CurrentLocOps,SmallVectorImpl<uint64_t> & Opcodes,SmallVectorImpl<Value * > & AdditionalValues,Instruction * I)2451 static void handleSSAValueOperands(uint64_t CurrentLocOps,
2452 SmallVectorImpl<uint64_t> &Opcodes,
2453 SmallVectorImpl<Value *> &AdditionalValues,
2454 Instruction *I) {
2455 if (!CurrentLocOps) {
2456 Opcodes.append({dwarf::DW_OP_LLVM_arg, 0});
2457 CurrentLocOps = 1;
2458 }
2459 Opcodes.append({dwarf::DW_OP_LLVM_arg, CurrentLocOps});
2460 AdditionalValues.push_back(I->getOperand(1));
2461 }
2462
getSalvageOpsForBinOp(BinaryOperator * BI,uint64_t CurrentLocOps,SmallVectorImpl<uint64_t> & Opcodes,SmallVectorImpl<Value * > & AdditionalValues)2463 Value *getSalvageOpsForBinOp(BinaryOperator *BI, uint64_t CurrentLocOps,
2464 SmallVectorImpl<uint64_t> &Opcodes,
2465 SmallVectorImpl<Value *> &AdditionalValues) {
2466 // Handle binary operations with constant integer operands as a special case.
2467 auto *ConstInt = dyn_cast<ConstantInt>(BI->getOperand(1));
2468 // Values wider than 64 bits cannot be represented within a DIExpression.
2469 if (ConstInt && ConstInt->getBitWidth() > 64)
2470 return nullptr;
2471
2472 Instruction::BinaryOps BinOpcode = BI->getOpcode();
2473 // Push any Constant Int operand onto the expression stack.
2474 if (ConstInt) {
2475 uint64_t Val = ConstInt->getSExtValue();
2476 // Add or Sub Instructions with a constant operand can potentially be
2477 // simplified.
2478 if (BinOpcode == Instruction::Add || BinOpcode == Instruction::Sub) {
2479 uint64_t Offset = BinOpcode == Instruction::Add ? Val : -int64_t(Val);
2480 DIExpression::appendOffset(Opcodes, Offset);
2481 return BI->getOperand(0);
2482 }
2483 Opcodes.append({dwarf::DW_OP_constu, Val});
2484 } else {
2485 handleSSAValueOperands(CurrentLocOps, Opcodes, AdditionalValues, BI);
2486 }
2487
2488 // Add salvaged binary operator to expression stack, if it has a valid
2489 // representation in a DIExpression.
2490 uint64_t DwarfBinOp = getDwarfOpForBinOp(BinOpcode);
2491 if (!DwarfBinOp)
2492 return nullptr;
2493 Opcodes.push_back(DwarfBinOp);
2494 return BI->getOperand(0);
2495 }
2496
getDwarfOpForIcmpPred(CmpInst::Predicate Pred)2497 uint64_t getDwarfOpForIcmpPred(CmpInst::Predicate Pred) {
2498 // The signedness of the operation is implicit in the typed stack, signed and
2499 // unsigned instructions map to the same DWARF opcode.
2500 switch (Pred) {
2501 case CmpInst::ICMP_EQ:
2502 return dwarf::DW_OP_eq;
2503 case CmpInst::ICMP_NE:
2504 return dwarf::DW_OP_ne;
2505 case CmpInst::ICMP_UGT:
2506 case CmpInst::ICMP_SGT:
2507 return dwarf::DW_OP_gt;
2508 case CmpInst::ICMP_UGE:
2509 case CmpInst::ICMP_SGE:
2510 return dwarf::DW_OP_ge;
2511 case CmpInst::ICMP_ULT:
2512 case CmpInst::ICMP_SLT:
2513 return dwarf::DW_OP_lt;
2514 case CmpInst::ICMP_ULE:
2515 case CmpInst::ICMP_SLE:
2516 return dwarf::DW_OP_le;
2517 default:
2518 return 0;
2519 }
2520 }
2521
getSalvageOpsForIcmpOp(ICmpInst * Icmp,uint64_t CurrentLocOps,SmallVectorImpl<uint64_t> & Opcodes,SmallVectorImpl<Value * > & AdditionalValues)2522 Value *getSalvageOpsForIcmpOp(ICmpInst *Icmp, uint64_t CurrentLocOps,
2523 SmallVectorImpl<uint64_t> &Opcodes,
2524 SmallVectorImpl<Value *> &AdditionalValues) {
2525 // Handle icmp operations with constant integer operands as a special case.
2526 auto *ConstInt = dyn_cast<ConstantInt>(Icmp->getOperand(1));
2527 // Values wider than 64 bits cannot be represented within a DIExpression.
2528 if (ConstInt && ConstInt->getBitWidth() > 64)
2529 return nullptr;
2530 // Push any Constant Int operand onto the expression stack.
2531 if (ConstInt) {
2532 if (Icmp->isSigned())
2533 Opcodes.push_back(dwarf::DW_OP_consts);
2534 else
2535 Opcodes.push_back(dwarf::DW_OP_constu);
2536 uint64_t Val = ConstInt->getSExtValue();
2537 Opcodes.push_back(Val);
2538 } else {
2539 handleSSAValueOperands(CurrentLocOps, Opcodes, AdditionalValues, Icmp);
2540 }
2541
2542 // Add salvaged binary operator to expression stack, if it has a valid
2543 // representation in a DIExpression.
2544 uint64_t DwarfIcmpOp = getDwarfOpForIcmpPred(Icmp->getPredicate());
2545 if (!DwarfIcmpOp)
2546 return nullptr;
2547 Opcodes.push_back(DwarfIcmpOp);
2548 return Icmp->getOperand(0);
2549 }
2550
salvageDebugInfoImpl(Instruction & I,uint64_t CurrentLocOps,SmallVectorImpl<uint64_t> & Ops,SmallVectorImpl<Value * > & AdditionalValues)2551 Value *llvm::salvageDebugInfoImpl(Instruction &I, uint64_t CurrentLocOps,
2552 SmallVectorImpl<uint64_t> &Ops,
2553 SmallVectorImpl<Value *> &AdditionalValues) {
2554 auto &M = *I.getModule();
2555 auto &DL = M.getDataLayout();
2556
2557 if (auto *CI = dyn_cast<CastInst>(&I)) {
2558 Value *FromValue = CI->getOperand(0);
2559 // No-op casts are irrelevant for debug info.
2560 if (CI->isNoopCast(DL)) {
2561 return FromValue;
2562 }
2563
2564 Type *Type = CI->getType();
2565 if (Type->isPointerTy())
2566 Type = DL.getIntPtrType(Type);
2567 // Casts other than Trunc, SExt, or ZExt to scalar types cannot be salvaged.
2568 if (Type->isVectorTy() ||
2569 !(isa<TruncInst>(&I) || isa<SExtInst>(&I) || isa<ZExtInst>(&I) ||
2570 isa<IntToPtrInst>(&I) || isa<PtrToIntInst>(&I)))
2571 return nullptr;
2572
2573 llvm::Type *FromType = FromValue->getType();
2574 if (FromType->isPointerTy())
2575 FromType = DL.getIntPtrType(FromType);
2576
2577 unsigned FromTypeBitSize = FromType->getScalarSizeInBits();
2578 unsigned ToTypeBitSize = Type->getScalarSizeInBits();
2579
2580 auto ExtOps = DIExpression::getExtOps(FromTypeBitSize, ToTypeBitSize,
2581 isa<SExtInst>(&I));
2582 Ops.append(ExtOps.begin(), ExtOps.end());
2583 return FromValue;
2584 }
2585
2586 if (auto *GEP = dyn_cast<GetElementPtrInst>(&I))
2587 return getSalvageOpsForGEP(GEP, DL, CurrentLocOps, Ops, AdditionalValues);
2588 if (auto *BI = dyn_cast<BinaryOperator>(&I))
2589 return getSalvageOpsForBinOp(BI, CurrentLocOps, Ops, AdditionalValues);
2590 if (auto *IC = dyn_cast<ICmpInst>(&I))
2591 return getSalvageOpsForIcmpOp(IC, CurrentLocOps, Ops, AdditionalValues);
2592
2593 // *Not* to do: we should not attempt to salvage load instructions,
2594 // because the validity and lifetime of a dbg.value containing
2595 // DW_OP_deref becomes difficult to analyze. See PR40628 for examples.
2596 return nullptr;
2597 }
2598
2599 /// A replacement for a dbg.value expression.
2600 using DbgValReplacement = std::optional<DIExpression *>;
2601
2602 /// Point debug users of \p From to \p To using exprs given by \p RewriteExpr,
2603 /// possibly moving/undefing users to prevent use-before-def. Returns true if
2604 /// changes are made.
rewriteDebugUsers(Instruction & From,Value & To,Instruction & DomPoint,DominatorTree & DT,function_ref<DbgValReplacement (DbgVariableIntrinsic & DII)> RewriteExpr,function_ref<DbgValReplacement (DbgVariableRecord & DVR)> RewriteDVRExpr)2605 static bool rewriteDebugUsers(
2606 Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT,
2607 function_ref<DbgValReplacement(DbgVariableIntrinsic &DII)> RewriteExpr,
2608 function_ref<DbgValReplacement(DbgVariableRecord &DVR)> RewriteDVRExpr) {
2609 // Find debug users of From.
2610 SmallVector<DbgVariableIntrinsic *, 1> Users;
2611 SmallVector<DbgVariableRecord *, 1> DPUsers;
2612 findDbgUsers(Users, &From, &DPUsers);
2613 if (Users.empty() && DPUsers.empty())
2614 return false;
2615
2616 // Prevent use-before-def of To.
2617 bool Changed = false;
2618
2619 SmallPtrSet<DbgVariableIntrinsic *, 1> UndefOrSalvage;
2620 SmallPtrSet<DbgVariableRecord *, 1> UndefOrSalvageDVR;
2621 if (isa<Instruction>(&To)) {
2622 bool DomPointAfterFrom = From.getNextNonDebugInstruction() == &DomPoint;
2623
2624 for (auto *DII : Users) {
2625 // It's common to see a debug user between From and DomPoint. Move it
2626 // after DomPoint to preserve the variable update without any reordering.
2627 if (DomPointAfterFrom && DII->getNextNonDebugInstruction() == &DomPoint) {
2628 LLVM_DEBUG(dbgs() << "MOVE: " << *DII << '\n');
2629 DII->moveAfter(&DomPoint);
2630 Changed = true;
2631
2632 // Users which otherwise aren't dominated by the replacement value must
2633 // be salvaged or deleted.
2634 } else if (!DT.dominates(&DomPoint, DII)) {
2635 UndefOrSalvage.insert(DII);
2636 }
2637 }
2638
2639 // DbgVariableRecord implementation of the above.
2640 for (auto *DVR : DPUsers) {
2641 Instruction *MarkedInstr = DVR->getMarker()->MarkedInstr;
2642 Instruction *NextNonDebug = MarkedInstr;
2643 // The next instruction might still be a dbg.declare, skip over it.
2644 if (isa<DbgVariableIntrinsic>(NextNonDebug))
2645 NextNonDebug = NextNonDebug->getNextNonDebugInstruction();
2646
2647 if (DomPointAfterFrom && NextNonDebug == &DomPoint) {
2648 LLVM_DEBUG(dbgs() << "MOVE: " << *DVR << '\n');
2649 DVR->removeFromParent();
2650 // Ensure there's a marker.
2651 DomPoint.getParent()->insertDbgRecordAfter(DVR, &DomPoint);
2652 Changed = true;
2653 } else if (!DT.dominates(&DomPoint, MarkedInstr)) {
2654 UndefOrSalvageDVR.insert(DVR);
2655 }
2656 }
2657 }
2658
2659 // Update debug users without use-before-def risk.
2660 for (auto *DII : Users) {
2661 if (UndefOrSalvage.count(DII))
2662 continue;
2663
2664 DbgValReplacement DVRepl = RewriteExpr(*DII);
2665 if (!DVRepl)
2666 continue;
2667
2668 DII->replaceVariableLocationOp(&From, &To);
2669 DII->setExpression(*DVRepl);
2670 LLVM_DEBUG(dbgs() << "REWRITE: " << *DII << '\n');
2671 Changed = true;
2672 }
2673 for (auto *DVR : DPUsers) {
2674 if (UndefOrSalvageDVR.count(DVR))
2675 continue;
2676
2677 DbgValReplacement DVRepl = RewriteDVRExpr(*DVR);
2678 if (!DVRepl)
2679 continue;
2680
2681 DVR->replaceVariableLocationOp(&From, &To);
2682 DVR->setExpression(*DVRepl);
2683 LLVM_DEBUG(dbgs() << "REWRITE: " << DVR << '\n');
2684 Changed = true;
2685 }
2686
2687 if (!UndefOrSalvage.empty() || !UndefOrSalvageDVR.empty()) {
2688 // Try to salvage the remaining debug users.
2689 salvageDebugInfo(From);
2690 Changed = true;
2691 }
2692
2693 return Changed;
2694 }
2695
2696 /// Check if a bitcast between a value of type \p FromTy to type \p ToTy would
2697 /// losslessly preserve the bits and semantics of the value. This predicate is
2698 /// symmetric, i.e swapping \p FromTy and \p ToTy should give the same result.
2699 ///
2700 /// Note that Type::canLosslesslyBitCastTo is not suitable here because it
2701 /// allows semantically unequivalent bitcasts, such as <2 x i64> -> <4 x i32>,
2702 /// and also does not allow lossless pointer <-> integer conversions.
isBitCastSemanticsPreserving(const DataLayout & DL,Type * FromTy,Type * ToTy)2703 static bool isBitCastSemanticsPreserving(const DataLayout &DL, Type *FromTy,
2704 Type *ToTy) {
2705 // Trivially compatible types.
2706 if (FromTy == ToTy)
2707 return true;
2708
2709 // Handle compatible pointer <-> integer conversions.
2710 if (FromTy->isIntOrPtrTy() && ToTy->isIntOrPtrTy()) {
2711 bool SameSize = DL.getTypeSizeInBits(FromTy) == DL.getTypeSizeInBits(ToTy);
2712 bool LosslessConversion = !DL.isNonIntegralPointerType(FromTy) &&
2713 !DL.isNonIntegralPointerType(ToTy);
2714 return SameSize && LosslessConversion;
2715 }
2716
2717 // TODO: This is not exhaustive.
2718 return false;
2719 }
2720
replaceAllDbgUsesWith(Instruction & From,Value & To,Instruction & DomPoint,DominatorTree & DT)2721 bool llvm::replaceAllDbgUsesWith(Instruction &From, Value &To,
2722 Instruction &DomPoint, DominatorTree &DT) {
2723 // Exit early if From has no debug users.
2724 if (!From.isUsedByMetadata())
2725 return false;
2726
2727 assert(&From != &To && "Can't replace something with itself");
2728
2729 Type *FromTy = From.getType();
2730 Type *ToTy = To.getType();
2731
2732 auto Identity = [&](DbgVariableIntrinsic &DII) -> DbgValReplacement {
2733 return DII.getExpression();
2734 };
2735 auto IdentityDVR = [&](DbgVariableRecord &DVR) -> DbgValReplacement {
2736 return DVR.getExpression();
2737 };
2738
2739 // Handle no-op conversions.
2740 Module &M = *From.getModule();
2741 const DataLayout &DL = M.getDataLayout();
2742 if (isBitCastSemanticsPreserving(DL, FromTy, ToTy))
2743 return rewriteDebugUsers(From, To, DomPoint, DT, Identity, IdentityDVR);
2744
2745 // Handle integer-to-integer widening and narrowing.
2746 // FIXME: Use DW_OP_convert when it's available everywhere.
2747 if (FromTy->isIntegerTy() && ToTy->isIntegerTy()) {
2748 uint64_t FromBits = FromTy->getPrimitiveSizeInBits();
2749 uint64_t ToBits = ToTy->getPrimitiveSizeInBits();
2750 assert(FromBits != ToBits && "Unexpected no-op conversion");
2751
2752 // When the width of the result grows, assume that a debugger will only
2753 // access the low `FromBits` bits when inspecting the source variable.
2754 if (FromBits < ToBits)
2755 return rewriteDebugUsers(From, To, DomPoint, DT, Identity, IdentityDVR);
2756
2757 // The width of the result has shrunk. Use sign/zero extension to describe
2758 // the source variable's high bits.
2759 auto SignOrZeroExt = [&](DbgVariableIntrinsic &DII) -> DbgValReplacement {
2760 DILocalVariable *Var = DII.getVariable();
2761
2762 // Without knowing signedness, sign/zero extension isn't possible.
2763 auto Signedness = Var->getSignedness();
2764 if (!Signedness)
2765 return std::nullopt;
2766
2767 bool Signed = *Signedness == DIBasicType::Signedness::Signed;
2768 return DIExpression::appendExt(DII.getExpression(), ToBits, FromBits,
2769 Signed);
2770 };
2771 // RemoveDIs: duplicate implementation working on DbgVariableRecords rather
2772 // than on dbg.value intrinsics.
2773 auto SignOrZeroExtDVR = [&](DbgVariableRecord &DVR) -> DbgValReplacement {
2774 DILocalVariable *Var = DVR.getVariable();
2775
2776 // Without knowing signedness, sign/zero extension isn't possible.
2777 auto Signedness = Var->getSignedness();
2778 if (!Signedness)
2779 return std::nullopt;
2780
2781 bool Signed = *Signedness == DIBasicType::Signedness::Signed;
2782 return DIExpression::appendExt(DVR.getExpression(), ToBits, FromBits,
2783 Signed);
2784 };
2785 return rewriteDebugUsers(From, To, DomPoint, DT, SignOrZeroExt,
2786 SignOrZeroExtDVR);
2787 }
2788
2789 // TODO: Floating-point conversions, vectors.
2790 return false;
2791 }
2792
handleUnreachableTerminator(Instruction * I,SmallVectorImpl<Value * > & PoisonedValues)2793 bool llvm::handleUnreachableTerminator(
2794 Instruction *I, SmallVectorImpl<Value *> &PoisonedValues) {
2795 bool Changed = false;
2796 // RemoveDIs: erase debug-info on this instruction manually.
2797 I->dropDbgRecords();
2798 for (Use &U : I->operands()) {
2799 Value *Op = U.get();
2800 if (isa<Instruction>(Op) && !Op->getType()->isTokenTy()) {
2801 U.set(PoisonValue::get(Op->getType()));
2802 PoisonedValues.push_back(Op);
2803 Changed = true;
2804 }
2805 }
2806
2807 return Changed;
2808 }
2809
2810 std::pair<unsigned, unsigned>
removeAllNonTerminatorAndEHPadInstructions(BasicBlock * BB)2811 llvm::removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB) {
2812 unsigned NumDeadInst = 0;
2813 unsigned NumDeadDbgInst = 0;
2814 // Delete the instructions backwards, as it has a reduced likelihood of
2815 // having to update as many def-use and use-def chains.
2816 Instruction *EndInst = BB->getTerminator(); // Last not to be deleted.
2817 SmallVector<Value *> Uses;
2818 handleUnreachableTerminator(EndInst, Uses);
2819
2820 while (EndInst != &BB->front()) {
2821 // Delete the next to last instruction.
2822 Instruction *Inst = &*--EndInst->getIterator();
2823 if (!Inst->use_empty() && !Inst->getType()->isTokenTy())
2824 Inst->replaceAllUsesWith(PoisonValue::get(Inst->getType()));
2825 if (Inst->isEHPad() || Inst->getType()->isTokenTy()) {
2826 // EHPads can't have DbgVariableRecords attached to them, but it might be
2827 // possible for things with token type.
2828 Inst->dropDbgRecords();
2829 EndInst = Inst;
2830 continue;
2831 }
2832 if (isa<DbgInfoIntrinsic>(Inst))
2833 ++NumDeadDbgInst;
2834 else
2835 ++NumDeadInst;
2836 // RemoveDIs: erasing debug-info must be done manually.
2837 Inst->dropDbgRecords();
2838 Inst->eraseFromParent();
2839 }
2840 return {NumDeadInst, NumDeadDbgInst};
2841 }
2842
changeToUnreachable(Instruction * I,bool PreserveLCSSA,DomTreeUpdater * DTU,MemorySSAUpdater * MSSAU)2843 unsigned llvm::changeToUnreachable(Instruction *I, bool PreserveLCSSA,
2844 DomTreeUpdater *DTU,
2845 MemorySSAUpdater *MSSAU) {
2846 BasicBlock *BB = I->getParent();
2847
2848 if (MSSAU)
2849 MSSAU->changeToUnreachable(I);
2850
2851 SmallSet<BasicBlock *, 8> UniqueSuccessors;
2852
2853 // Loop over all of the successors, removing BB's entry from any PHI
2854 // nodes.
2855 for (BasicBlock *Successor : successors(BB)) {
2856 Successor->removePredecessor(BB, PreserveLCSSA);
2857 if (DTU)
2858 UniqueSuccessors.insert(Successor);
2859 }
2860 auto *UI = new UnreachableInst(I->getContext(), I->getIterator());
2861 UI->setDebugLoc(I->getDebugLoc());
2862
2863 // All instructions after this are dead.
2864 unsigned NumInstrsRemoved = 0;
2865 BasicBlock::iterator BBI = I->getIterator(), BBE = BB->end();
2866 while (BBI != BBE) {
2867 if (!BBI->use_empty())
2868 BBI->replaceAllUsesWith(PoisonValue::get(BBI->getType()));
2869 BBI++->eraseFromParent();
2870 ++NumInstrsRemoved;
2871 }
2872 if (DTU) {
2873 SmallVector<DominatorTree::UpdateType, 8> Updates;
2874 Updates.reserve(UniqueSuccessors.size());
2875 for (BasicBlock *UniqueSuccessor : UniqueSuccessors)
2876 Updates.push_back({DominatorTree::Delete, BB, UniqueSuccessor});
2877 DTU->applyUpdates(Updates);
2878 }
2879 BB->flushTerminatorDbgRecords();
2880 return NumInstrsRemoved;
2881 }
2882
createCallMatchingInvoke(InvokeInst * II)2883 CallInst *llvm::createCallMatchingInvoke(InvokeInst *II) {
2884 SmallVector<Value *, 8> Args(II->args());
2885 SmallVector<OperandBundleDef, 1> OpBundles;
2886 II->getOperandBundlesAsDefs(OpBundles);
2887 CallInst *NewCall = CallInst::Create(II->getFunctionType(),
2888 II->getCalledOperand(), Args, OpBundles);
2889 NewCall->setCallingConv(II->getCallingConv());
2890 NewCall->setAttributes(II->getAttributes());
2891 NewCall->setDebugLoc(II->getDebugLoc());
2892 NewCall->copyMetadata(*II);
2893
2894 // If the invoke had profile metadata, try converting them for CallInst.
2895 uint64_t TotalWeight;
2896 if (NewCall->extractProfTotalWeight(TotalWeight)) {
2897 // Set the total weight if it fits into i32, otherwise reset.
2898 MDBuilder MDB(NewCall->getContext());
2899 auto NewWeights = uint32_t(TotalWeight) != TotalWeight
2900 ? nullptr
2901 : MDB.createBranchWeights({uint32_t(TotalWeight)});
2902 NewCall->setMetadata(LLVMContext::MD_prof, NewWeights);
2903 }
2904
2905 return NewCall;
2906 }
2907
2908 // changeToCall - Convert the specified invoke into a normal call.
changeToCall(InvokeInst * II,DomTreeUpdater * DTU)2909 CallInst *llvm::changeToCall(InvokeInst *II, DomTreeUpdater *DTU) {
2910 CallInst *NewCall = createCallMatchingInvoke(II);
2911 NewCall->takeName(II);
2912 NewCall->insertBefore(II);
2913 II->replaceAllUsesWith(NewCall);
2914
2915 // Follow the call by a branch to the normal destination.
2916 BasicBlock *NormalDestBB = II->getNormalDest();
2917 BranchInst::Create(NormalDestBB, II->getIterator());
2918
2919 // Update PHI nodes in the unwind destination
2920 BasicBlock *BB = II->getParent();
2921 BasicBlock *UnwindDestBB = II->getUnwindDest();
2922 UnwindDestBB->removePredecessor(BB);
2923 II->eraseFromParent();
2924 if (DTU)
2925 DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDestBB}});
2926 return NewCall;
2927 }
2928
changeToInvokeAndSplitBasicBlock(CallInst * CI,BasicBlock * UnwindEdge,DomTreeUpdater * DTU)2929 BasicBlock *llvm::changeToInvokeAndSplitBasicBlock(CallInst *CI,
2930 BasicBlock *UnwindEdge,
2931 DomTreeUpdater *DTU) {
2932 BasicBlock *BB = CI->getParent();
2933
2934 // Convert this function call into an invoke instruction. First, split the
2935 // basic block.
2936 BasicBlock *Split = SplitBlock(BB, CI, DTU, /*LI=*/nullptr, /*MSSAU*/ nullptr,
2937 CI->getName() + ".noexc");
2938
2939 // Delete the unconditional branch inserted by SplitBlock
2940 BB->back().eraseFromParent();
2941
2942 // Create the new invoke instruction.
2943 SmallVector<Value *, 8> InvokeArgs(CI->args());
2944 SmallVector<OperandBundleDef, 1> OpBundles;
2945
2946 CI->getOperandBundlesAsDefs(OpBundles);
2947
2948 // Note: we're round tripping operand bundles through memory here, and that
2949 // can potentially be avoided with a cleverer API design that we do not have
2950 // as of this time.
2951
2952 InvokeInst *II =
2953 InvokeInst::Create(CI->getFunctionType(), CI->getCalledOperand(), Split,
2954 UnwindEdge, InvokeArgs, OpBundles, CI->getName(), BB);
2955 II->setDebugLoc(CI->getDebugLoc());
2956 II->setCallingConv(CI->getCallingConv());
2957 II->setAttributes(CI->getAttributes());
2958 II->setMetadata(LLVMContext::MD_prof, CI->getMetadata(LLVMContext::MD_prof));
2959
2960 if (DTU)
2961 DTU->applyUpdates({{DominatorTree::Insert, BB, UnwindEdge}});
2962
2963 // Make sure that anything using the call now uses the invoke! This also
2964 // updates the CallGraph if present, because it uses a WeakTrackingVH.
2965 CI->replaceAllUsesWith(II);
2966
2967 // Delete the original call
2968 Split->front().eraseFromParent();
2969 return Split;
2970 }
2971
markAliveBlocks(Function & F,SmallPtrSetImpl<BasicBlock * > & Reachable,DomTreeUpdater * DTU=nullptr)2972 static bool markAliveBlocks(Function &F,
2973 SmallPtrSetImpl<BasicBlock *> &Reachable,
2974 DomTreeUpdater *DTU = nullptr) {
2975 SmallVector<BasicBlock*, 128> Worklist;
2976 BasicBlock *BB = &F.front();
2977 Worklist.push_back(BB);
2978 Reachable.insert(BB);
2979 bool Changed = false;
2980 do {
2981 BB = Worklist.pop_back_val();
2982
2983 // Do a quick scan of the basic block, turning any obviously unreachable
2984 // instructions into LLVM unreachable insts. The instruction combining pass
2985 // canonicalizes unreachable insts into stores to null or undef.
2986 for (Instruction &I : *BB) {
2987 if (auto *CI = dyn_cast<CallInst>(&I)) {
2988 Value *Callee = CI->getCalledOperand();
2989 // Handle intrinsic calls.
2990 if (Function *F = dyn_cast<Function>(Callee)) {
2991 auto IntrinsicID = F->getIntrinsicID();
2992 // Assumptions that are known to be false are equivalent to
2993 // unreachable. Also, if the condition is undefined, then we make the
2994 // choice most beneficial to the optimizer, and choose that to also be
2995 // unreachable.
2996 if (IntrinsicID == Intrinsic::assume) {
2997 if (match(CI->getArgOperand(0), m_CombineOr(m_Zero(), m_Undef()))) {
2998 // Don't insert a call to llvm.trap right before the unreachable.
2999 changeToUnreachable(CI, false, DTU);
3000 Changed = true;
3001 break;
3002 }
3003 } else if (IntrinsicID == Intrinsic::experimental_guard) {
3004 // A call to the guard intrinsic bails out of the current
3005 // compilation unit if the predicate passed to it is false. If the
3006 // predicate is a constant false, then we know the guard will bail
3007 // out of the current compile unconditionally, so all code following
3008 // it is dead.
3009 //
3010 // Note: unlike in llvm.assume, it is not "obviously profitable" for
3011 // guards to treat `undef` as `false` since a guard on `undef` can
3012 // still be useful for widening.
3013 if (match(CI->getArgOperand(0), m_Zero()))
3014 if (!isa<UnreachableInst>(CI->getNextNode())) {
3015 changeToUnreachable(CI->getNextNode(), false, DTU);
3016 Changed = true;
3017 break;
3018 }
3019 }
3020 } else if ((isa<ConstantPointerNull>(Callee) &&
3021 !NullPointerIsDefined(CI->getFunction(),
3022 cast<PointerType>(Callee->getType())
3023 ->getAddressSpace())) ||
3024 isa<UndefValue>(Callee)) {
3025 changeToUnreachable(CI, false, DTU);
3026 Changed = true;
3027 break;
3028 }
3029 if (CI->doesNotReturn() && !CI->isMustTailCall()) {
3030 // If we found a call to a no-return function, insert an unreachable
3031 // instruction after it. Make sure there isn't *already* one there
3032 // though.
3033 if (!isa<UnreachableInst>(CI->getNextNonDebugInstruction())) {
3034 // Don't insert a call to llvm.trap right before the unreachable.
3035 changeToUnreachable(CI->getNextNonDebugInstruction(), false, DTU);
3036 Changed = true;
3037 }
3038 break;
3039 }
3040 } else if (auto *SI = dyn_cast<StoreInst>(&I)) {
3041 // Store to undef and store to null are undefined and used to signal
3042 // that they should be changed to unreachable by passes that can't
3043 // modify the CFG.
3044
3045 // Don't touch volatile stores.
3046 if (SI->isVolatile()) continue;
3047
3048 Value *Ptr = SI->getOperand(1);
3049
3050 if (isa<UndefValue>(Ptr) ||
3051 (isa<ConstantPointerNull>(Ptr) &&
3052 !NullPointerIsDefined(SI->getFunction(),
3053 SI->getPointerAddressSpace()))) {
3054 changeToUnreachable(SI, false, DTU);
3055 Changed = true;
3056 break;
3057 }
3058 }
3059 }
3060
3061 Instruction *Terminator = BB->getTerminator();
3062 if (auto *II = dyn_cast<InvokeInst>(Terminator)) {
3063 // Turn invokes that call 'nounwind' functions into ordinary calls.
3064 Value *Callee = II->getCalledOperand();
3065 if ((isa<ConstantPointerNull>(Callee) &&
3066 !NullPointerIsDefined(BB->getParent())) ||
3067 isa<UndefValue>(Callee)) {
3068 changeToUnreachable(II, false, DTU);
3069 Changed = true;
3070 } else {
3071 if (II->doesNotReturn() &&
3072 !isa<UnreachableInst>(II->getNormalDest()->front())) {
3073 // If we found an invoke of a no-return function,
3074 // create a new empty basic block with an `unreachable` terminator,
3075 // and set it as the normal destination for the invoke,
3076 // unless that is already the case.
3077 // Note that the original normal destination could have other uses.
3078 BasicBlock *OrigNormalDest = II->getNormalDest();
3079 OrigNormalDest->removePredecessor(II->getParent());
3080 LLVMContext &Ctx = II->getContext();
3081 BasicBlock *UnreachableNormalDest = BasicBlock::Create(
3082 Ctx, OrigNormalDest->getName() + ".unreachable",
3083 II->getFunction(), OrigNormalDest);
3084 new UnreachableInst(Ctx, UnreachableNormalDest);
3085 II->setNormalDest(UnreachableNormalDest);
3086 if (DTU)
3087 DTU->applyUpdates(
3088 {{DominatorTree::Delete, BB, OrigNormalDest},
3089 {DominatorTree::Insert, BB, UnreachableNormalDest}});
3090 Changed = true;
3091 }
3092 if (II->doesNotThrow() && canSimplifyInvokeNoUnwind(&F)) {
3093 if (II->use_empty() && !II->mayHaveSideEffects()) {
3094 // jump to the normal destination branch.
3095 BasicBlock *NormalDestBB = II->getNormalDest();
3096 BasicBlock *UnwindDestBB = II->getUnwindDest();
3097 BranchInst::Create(NormalDestBB, II->getIterator());
3098 UnwindDestBB->removePredecessor(II->getParent());
3099 II->eraseFromParent();
3100 if (DTU)
3101 DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDestBB}});
3102 } else
3103 changeToCall(II, DTU);
3104 Changed = true;
3105 }
3106 }
3107 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Terminator)) {
3108 // Remove catchpads which cannot be reached.
3109 struct CatchPadDenseMapInfo {
3110 static CatchPadInst *getEmptyKey() {
3111 return DenseMapInfo<CatchPadInst *>::getEmptyKey();
3112 }
3113
3114 static CatchPadInst *getTombstoneKey() {
3115 return DenseMapInfo<CatchPadInst *>::getTombstoneKey();
3116 }
3117
3118 static unsigned getHashValue(CatchPadInst *CatchPad) {
3119 return static_cast<unsigned>(hash_combine_range(
3120 CatchPad->value_op_begin(), CatchPad->value_op_end()));
3121 }
3122
3123 static bool isEqual(CatchPadInst *LHS, CatchPadInst *RHS) {
3124 if (LHS == getEmptyKey() || LHS == getTombstoneKey() ||
3125 RHS == getEmptyKey() || RHS == getTombstoneKey())
3126 return LHS == RHS;
3127 return LHS->isIdenticalTo(RHS);
3128 }
3129 };
3130
3131 SmallDenseMap<BasicBlock *, int, 8> NumPerSuccessorCases;
3132 // Set of unique CatchPads.
3133 SmallDenseMap<CatchPadInst *, detail::DenseSetEmpty, 4,
3134 CatchPadDenseMapInfo, detail::DenseSetPair<CatchPadInst *>>
3135 HandlerSet;
3136 detail::DenseSetEmpty Empty;
3137 for (CatchSwitchInst::handler_iterator I = CatchSwitch->handler_begin(),
3138 E = CatchSwitch->handler_end();
3139 I != E; ++I) {
3140 BasicBlock *HandlerBB = *I;
3141 if (DTU)
3142 ++NumPerSuccessorCases[HandlerBB];
3143 auto *CatchPad = cast<CatchPadInst>(HandlerBB->getFirstNonPHI());
3144 if (!HandlerSet.insert({CatchPad, Empty}).second) {
3145 if (DTU)
3146 --NumPerSuccessorCases[HandlerBB];
3147 CatchSwitch->removeHandler(I);
3148 --I;
3149 --E;
3150 Changed = true;
3151 }
3152 }
3153 if (DTU) {
3154 std::vector<DominatorTree::UpdateType> Updates;
3155 for (const std::pair<BasicBlock *, int> &I : NumPerSuccessorCases)
3156 if (I.second == 0)
3157 Updates.push_back({DominatorTree::Delete, BB, I.first});
3158 DTU->applyUpdates(Updates);
3159 }
3160 }
3161
3162 Changed |= ConstantFoldTerminator(BB, true, nullptr, DTU);
3163 for (BasicBlock *Successor : successors(BB))
3164 if (Reachable.insert(Successor).second)
3165 Worklist.push_back(Successor);
3166 } while (!Worklist.empty());
3167 return Changed;
3168 }
3169
removeUnwindEdge(BasicBlock * BB,DomTreeUpdater * DTU)3170 Instruction *llvm::removeUnwindEdge(BasicBlock *BB, DomTreeUpdater *DTU) {
3171 Instruction *TI = BB->getTerminator();
3172
3173 if (auto *II = dyn_cast<InvokeInst>(TI))
3174 return changeToCall(II, DTU);
3175
3176 Instruction *NewTI;
3177 BasicBlock *UnwindDest;
3178
3179 if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
3180 NewTI = CleanupReturnInst::Create(CRI->getCleanupPad(), nullptr, CRI->getIterator());
3181 UnwindDest = CRI->getUnwindDest();
3182 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(TI)) {
3183 auto *NewCatchSwitch = CatchSwitchInst::Create(
3184 CatchSwitch->getParentPad(), nullptr, CatchSwitch->getNumHandlers(),
3185 CatchSwitch->getName(), CatchSwitch->getIterator());
3186 for (BasicBlock *PadBB : CatchSwitch->handlers())
3187 NewCatchSwitch->addHandler(PadBB);
3188
3189 NewTI = NewCatchSwitch;
3190 UnwindDest = CatchSwitch->getUnwindDest();
3191 } else {
3192 llvm_unreachable("Could not find unwind successor");
3193 }
3194
3195 NewTI->takeName(TI);
3196 NewTI->setDebugLoc(TI->getDebugLoc());
3197 UnwindDest->removePredecessor(BB);
3198 TI->replaceAllUsesWith(NewTI);
3199 TI->eraseFromParent();
3200 if (DTU)
3201 DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDest}});
3202 return NewTI;
3203 }
3204
3205 /// removeUnreachableBlocks - Remove blocks that are not reachable, even
3206 /// if they are in a dead cycle. Return true if a change was made, false
3207 /// otherwise.
removeUnreachableBlocks(Function & F,DomTreeUpdater * DTU,MemorySSAUpdater * MSSAU)3208 bool llvm::removeUnreachableBlocks(Function &F, DomTreeUpdater *DTU,
3209 MemorySSAUpdater *MSSAU) {
3210 SmallPtrSet<BasicBlock *, 16> Reachable;
3211 bool Changed = markAliveBlocks(F, Reachable, DTU);
3212
3213 // If there are unreachable blocks in the CFG...
3214 if (Reachable.size() == F.size())
3215 return Changed;
3216
3217 assert(Reachable.size() < F.size());
3218
3219 // Are there any blocks left to actually delete?
3220 SmallSetVector<BasicBlock *, 8> BlocksToRemove;
3221 for (BasicBlock &BB : F) {
3222 // Skip reachable basic blocks
3223 if (Reachable.count(&BB))
3224 continue;
3225 // Skip already-deleted blocks
3226 if (DTU && DTU->isBBPendingDeletion(&BB))
3227 continue;
3228 BlocksToRemove.insert(&BB);
3229 }
3230
3231 if (BlocksToRemove.empty())
3232 return Changed;
3233
3234 Changed = true;
3235 NumRemoved += BlocksToRemove.size();
3236
3237 if (MSSAU)
3238 MSSAU->removeBlocks(BlocksToRemove);
3239
3240 DeleteDeadBlocks(BlocksToRemove.takeVector(), DTU);
3241
3242 return Changed;
3243 }
3244
combineMetadata(Instruction * K,const Instruction * J,ArrayRef<unsigned> KnownIDs,bool DoesKMove)3245 void llvm::combineMetadata(Instruction *K, const Instruction *J,
3246 ArrayRef<unsigned> KnownIDs, bool DoesKMove) {
3247 SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata;
3248 K->dropUnknownNonDebugMetadata(KnownIDs);
3249 K->getAllMetadataOtherThanDebugLoc(Metadata);
3250 for (const auto &MD : Metadata) {
3251 unsigned Kind = MD.first;
3252 MDNode *JMD = J->getMetadata(Kind);
3253 MDNode *KMD = MD.second;
3254
3255 switch (Kind) {
3256 default:
3257 K->setMetadata(Kind, nullptr); // Remove unknown metadata
3258 break;
3259 case LLVMContext::MD_dbg:
3260 llvm_unreachable("getAllMetadataOtherThanDebugLoc returned a MD_dbg");
3261 case LLVMContext::MD_DIAssignID:
3262 K->mergeDIAssignID(J);
3263 break;
3264 case LLVMContext::MD_tbaa:
3265 K->setMetadata(Kind, MDNode::getMostGenericTBAA(JMD, KMD));
3266 break;
3267 case LLVMContext::MD_alias_scope:
3268 K->setMetadata(Kind, MDNode::getMostGenericAliasScope(JMD, KMD));
3269 break;
3270 case LLVMContext::MD_noalias:
3271 case LLVMContext::MD_mem_parallel_loop_access:
3272 K->setMetadata(Kind, MDNode::intersect(JMD, KMD));
3273 break;
3274 case LLVMContext::MD_access_group:
3275 K->setMetadata(LLVMContext::MD_access_group,
3276 intersectAccessGroups(K, J));
3277 break;
3278 case LLVMContext::MD_range:
3279 if (DoesKMove || !K->hasMetadata(LLVMContext::MD_noundef))
3280 K->setMetadata(Kind, MDNode::getMostGenericRange(JMD, KMD));
3281 break;
3282 case LLVMContext::MD_fpmath:
3283 K->setMetadata(Kind, MDNode::getMostGenericFPMath(JMD, KMD));
3284 break;
3285 case LLVMContext::MD_invariant_load:
3286 // If K moves, only set the !invariant.load if it is present in both
3287 // instructions.
3288 if (DoesKMove)
3289 K->setMetadata(Kind, JMD);
3290 break;
3291 case LLVMContext::MD_nonnull:
3292 if (DoesKMove || !K->hasMetadata(LLVMContext::MD_noundef))
3293 K->setMetadata(Kind, JMD);
3294 break;
3295 case LLVMContext::MD_invariant_group:
3296 // Preserve !invariant.group in K.
3297 break;
3298 case LLVMContext::MD_mmra:
3299 // Combine MMRAs
3300 break;
3301 case LLVMContext::MD_align:
3302 if (DoesKMove || !K->hasMetadata(LLVMContext::MD_noundef))
3303 K->setMetadata(
3304 Kind, MDNode::getMostGenericAlignmentOrDereferenceable(JMD, KMD));
3305 break;
3306 case LLVMContext::MD_dereferenceable:
3307 case LLVMContext::MD_dereferenceable_or_null:
3308 if (DoesKMove)
3309 K->setMetadata(Kind,
3310 MDNode::getMostGenericAlignmentOrDereferenceable(JMD, KMD));
3311 break;
3312 case LLVMContext::MD_preserve_access_index:
3313 // Preserve !preserve.access.index in K.
3314 break;
3315 case LLVMContext::MD_noundef:
3316 // If K does move, keep noundef if it is present in both instructions.
3317 if (DoesKMove)
3318 K->setMetadata(Kind, JMD);
3319 break;
3320 case LLVMContext::MD_nontemporal:
3321 // Preserve !nontemporal if it is present on both instructions.
3322 K->setMetadata(Kind, JMD);
3323 break;
3324 case LLVMContext::MD_prof:
3325 if (DoesKMove)
3326 K->setMetadata(Kind, MDNode::getMergedProfMetadata(KMD, JMD, K, J));
3327 break;
3328 }
3329 }
3330 // Set !invariant.group from J if J has it. If both instructions have it
3331 // then we will just pick it from J - even when they are different.
3332 // Also make sure that K is load or store - f.e. combining bitcast with load
3333 // could produce bitcast with invariant.group metadata, which is invalid.
3334 // FIXME: we should try to preserve both invariant.group md if they are
3335 // different, but right now instruction can only have one invariant.group.
3336 if (auto *JMD = J->getMetadata(LLVMContext::MD_invariant_group))
3337 if (isa<LoadInst>(K) || isa<StoreInst>(K))
3338 K->setMetadata(LLVMContext::MD_invariant_group, JMD);
3339
3340 // Merge MMRAs.
3341 // This is handled separately because we also want to handle cases where K
3342 // doesn't have tags but J does.
3343 auto JMMRA = J->getMetadata(LLVMContext::MD_mmra);
3344 auto KMMRA = K->getMetadata(LLVMContext::MD_mmra);
3345 if (JMMRA || KMMRA) {
3346 K->setMetadata(LLVMContext::MD_mmra,
3347 MMRAMetadata::combine(K->getContext(), JMMRA, KMMRA));
3348 }
3349 }
3350
combineMetadataForCSE(Instruction * K,const Instruction * J,bool KDominatesJ)3351 void llvm::combineMetadataForCSE(Instruction *K, const Instruction *J,
3352 bool KDominatesJ) {
3353 unsigned KnownIDs[] = {LLVMContext::MD_tbaa,
3354 LLVMContext::MD_alias_scope,
3355 LLVMContext::MD_noalias,
3356 LLVMContext::MD_range,
3357 LLVMContext::MD_fpmath,
3358 LLVMContext::MD_invariant_load,
3359 LLVMContext::MD_nonnull,
3360 LLVMContext::MD_invariant_group,
3361 LLVMContext::MD_align,
3362 LLVMContext::MD_dereferenceable,
3363 LLVMContext::MD_dereferenceable_or_null,
3364 LLVMContext::MD_access_group,
3365 LLVMContext::MD_preserve_access_index,
3366 LLVMContext::MD_prof,
3367 LLVMContext::MD_nontemporal,
3368 LLVMContext::MD_noundef,
3369 LLVMContext::MD_mmra};
3370 combineMetadata(K, J, KnownIDs, KDominatesJ);
3371 }
3372
copyMetadataForLoad(LoadInst & Dest,const LoadInst & Source)3373 void llvm::copyMetadataForLoad(LoadInst &Dest, const LoadInst &Source) {
3374 SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
3375 Source.getAllMetadata(MD);
3376 MDBuilder MDB(Dest.getContext());
3377 Type *NewType = Dest.getType();
3378 const DataLayout &DL = Source.getDataLayout();
3379 for (const auto &MDPair : MD) {
3380 unsigned ID = MDPair.first;
3381 MDNode *N = MDPair.second;
3382 // Note, essentially every kind of metadata should be preserved here! This
3383 // routine is supposed to clone a load instruction changing *only its type*.
3384 // The only metadata it makes sense to drop is metadata which is invalidated
3385 // when the pointer type changes. This should essentially never be the case
3386 // in LLVM, but we explicitly switch over only known metadata to be
3387 // conservatively correct. If you are adding metadata to LLVM which pertains
3388 // to loads, you almost certainly want to add it here.
3389 switch (ID) {
3390 case LLVMContext::MD_dbg:
3391 case LLVMContext::MD_tbaa:
3392 case LLVMContext::MD_prof:
3393 case LLVMContext::MD_fpmath:
3394 case LLVMContext::MD_tbaa_struct:
3395 case LLVMContext::MD_invariant_load:
3396 case LLVMContext::MD_alias_scope:
3397 case LLVMContext::MD_noalias:
3398 case LLVMContext::MD_nontemporal:
3399 case LLVMContext::MD_mem_parallel_loop_access:
3400 case LLVMContext::MD_access_group:
3401 case LLVMContext::MD_noundef:
3402 // All of these directly apply.
3403 Dest.setMetadata(ID, N);
3404 break;
3405
3406 case LLVMContext::MD_nonnull:
3407 copyNonnullMetadata(Source, N, Dest);
3408 break;
3409
3410 case LLVMContext::MD_align:
3411 case LLVMContext::MD_dereferenceable:
3412 case LLVMContext::MD_dereferenceable_or_null:
3413 // These only directly apply if the new type is also a pointer.
3414 if (NewType->isPointerTy())
3415 Dest.setMetadata(ID, N);
3416 break;
3417
3418 case LLVMContext::MD_range:
3419 copyRangeMetadata(DL, Source, N, Dest);
3420 break;
3421 }
3422 }
3423 }
3424
patchReplacementInstruction(Instruction * I,Value * Repl)3425 void llvm::patchReplacementInstruction(Instruction *I, Value *Repl) {
3426 auto *ReplInst = dyn_cast<Instruction>(Repl);
3427 if (!ReplInst)
3428 return;
3429
3430 // Patch the replacement so that it is not more restrictive than the value
3431 // being replaced.
3432 WithOverflowInst *UnusedWO;
3433 // When replacing the result of a llvm.*.with.overflow intrinsic with a
3434 // overflowing binary operator, nuw/nsw flags may no longer hold.
3435 if (isa<OverflowingBinaryOperator>(ReplInst) &&
3436 match(I, m_ExtractValue<0>(m_WithOverflowInst(UnusedWO))))
3437 ReplInst->dropPoisonGeneratingFlags();
3438 // Note that if 'I' is a load being replaced by some operation,
3439 // for example, by an arithmetic operation, then andIRFlags()
3440 // would just erase all math flags from the original arithmetic
3441 // operation, which is clearly not wanted and not needed.
3442 else if (!isa<LoadInst>(I))
3443 ReplInst->andIRFlags(I);
3444
3445 // FIXME: If both the original and replacement value are part of the
3446 // same control-flow region (meaning that the execution of one
3447 // guarantees the execution of the other), then we can combine the
3448 // noalias scopes here and do better than the general conservative
3449 // answer used in combineMetadata().
3450
3451 // In general, GVN unifies expressions over different control-flow
3452 // regions, and so we need a conservative combination of the noalias
3453 // scopes.
3454 combineMetadataForCSE(ReplInst, I, false);
3455 }
3456
3457 template <typename RootType, typename ShouldReplaceFn>
replaceDominatedUsesWith(Value * From,Value * To,const RootType & Root,const ShouldReplaceFn & ShouldReplace)3458 static unsigned replaceDominatedUsesWith(Value *From, Value *To,
3459 const RootType &Root,
3460 const ShouldReplaceFn &ShouldReplace) {
3461 assert(From->getType() == To->getType());
3462
3463 unsigned Count = 0;
3464 for (Use &U : llvm::make_early_inc_range(From->uses())) {
3465 if (!ShouldReplace(Root, U))
3466 continue;
3467 LLVM_DEBUG(dbgs() << "Replace dominated use of '";
3468 From->printAsOperand(dbgs());
3469 dbgs() << "' with " << *To << " in " << *U.getUser() << "\n");
3470 U.set(To);
3471 ++Count;
3472 }
3473 return Count;
3474 }
3475
replaceNonLocalUsesWith(Instruction * From,Value * To)3476 unsigned llvm::replaceNonLocalUsesWith(Instruction *From, Value *To) {
3477 assert(From->getType() == To->getType());
3478 auto *BB = From->getParent();
3479 unsigned Count = 0;
3480
3481 for (Use &U : llvm::make_early_inc_range(From->uses())) {
3482 auto *I = cast<Instruction>(U.getUser());
3483 if (I->getParent() == BB)
3484 continue;
3485 U.set(To);
3486 ++Count;
3487 }
3488 return Count;
3489 }
3490
replaceDominatedUsesWith(Value * From,Value * To,DominatorTree & DT,const BasicBlockEdge & Root)3491 unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To,
3492 DominatorTree &DT,
3493 const BasicBlockEdge &Root) {
3494 auto Dominates = [&DT](const BasicBlockEdge &Root, const Use &U) {
3495 return DT.dominates(Root, U);
3496 };
3497 return ::replaceDominatedUsesWith(From, To, Root, Dominates);
3498 }
3499
replaceDominatedUsesWith(Value * From,Value * To,DominatorTree & DT,const BasicBlock * BB)3500 unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To,
3501 DominatorTree &DT,
3502 const BasicBlock *BB) {
3503 auto Dominates = [&DT](const BasicBlock *BB, const Use &U) {
3504 return DT.dominates(BB, U);
3505 };
3506 return ::replaceDominatedUsesWith(From, To, BB, Dominates);
3507 }
3508
replaceDominatedUsesWithIf(Value * From,Value * To,DominatorTree & DT,const BasicBlockEdge & Root,function_ref<bool (const Use & U,const Value * To)> ShouldReplace)3509 unsigned llvm::replaceDominatedUsesWithIf(
3510 Value *From, Value *To, DominatorTree &DT, const BasicBlockEdge &Root,
3511 function_ref<bool(const Use &U, const Value *To)> ShouldReplace) {
3512 auto DominatesAndShouldReplace =
3513 [&DT, &ShouldReplace, To](const BasicBlockEdge &Root, const Use &U) {
3514 return DT.dominates(Root, U) && ShouldReplace(U, To);
3515 };
3516 return ::replaceDominatedUsesWith(From, To, Root, DominatesAndShouldReplace);
3517 }
3518
replaceDominatedUsesWithIf(Value * From,Value * To,DominatorTree & DT,const BasicBlock * BB,function_ref<bool (const Use & U,const Value * To)> ShouldReplace)3519 unsigned llvm::replaceDominatedUsesWithIf(
3520 Value *From, Value *To, DominatorTree &DT, const BasicBlock *BB,
3521 function_ref<bool(const Use &U, const Value *To)> ShouldReplace) {
3522 auto DominatesAndShouldReplace = [&DT, &ShouldReplace,
3523 To](const BasicBlock *BB, const Use &U) {
3524 return DT.dominates(BB, U) && ShouldReplace(U, To);
3525 };
3526 return ::replaceDominatedUsesWith(From, To, BB, DominatesAndShouldReplace);
3527 }
3528
callsGCLeafFunction(const CallBase * Call,const TargetLibraryInfo & TLI)3529 bool llvm::callsGCLeafFunction(const CallBase *Call,
3530 const TargetLibraryInfo &TLI) {
3531 // Check if the function is specifically marked as a gc leaf function.
3532 if (Call->hasFnAttr("gc-leaf-function"))
3533 return true;
3534 if (const Function *F = Call->getCalledFunction()) {
3535 if (F->hasFnAttribute("gc-leaf-function"))
3536 return true;
3537
3538 if (auto IID = F->getIntrinsicID()) {
3539 // Most LLVM intrinsics do not take safepoints.
3540 return IID != Intrinsic::experimental_gc_statepoint &&
3541 IID != Intrinsic::experimental_deoptimize &&
3542 IID != Intrinsic::memcpy_element_unordered_atomic &&
3543 IID != Intrinsic::memmove_element_unordered_atomic;
3544 }
3545 }
3546
3547 // Lib calls can be materialized by some passes, and won't be
3548 // marked as 'gc-leaf-function.' All available Libcalls are
3549 // GC-leaf.
3550 LibFunc LF;
3551 if (TLI.getLibFunc(*Call, LF)) {
3552 return TLI.has(LF);
3553 }
3554
3555 return false;
3556 }
3557
copyNonnullMetadata(const LoadInst & OldLI,MDNode * N,LoadInst & NewLI)3558 void llvm::copyNonnullMetadata(const LoadInst &OldLI, MDNode *N,
3559 LoadInst &NewLI) {
3560 auto *NewTy = NewLI.getType();
3561
3562 // This only directly applies if the new type is also a pointer.
3563 if (NewTy->isPointerTy()) {
3564 NewLI.setMetadata(LLVMContext::MD_nonnull, N);
3565 return;
3566 }
3567
3568 // The only other translation we can do is to integral loads with !range
3569 // metadata.
3570 if (!NewTy->isIntegerTy())
3571 return;
3572
3573 MDBuilder MDB(NewLI.getContext());
3574 const Value *Ptr = OldLI.getPointerOperand();
3575 auto *ITy = cast<IntegerType>(NewTy);
3576 auto *NullInt = ConstantExpr::getPtrToInt(
3577 ConstantPointerNull::get(cast<PointerType>(Ptr->getType())), ITy);
3578 auto *NonNullInt = ConstantExpr::getAdd(NullInt, ConstantInt::get(ITy, 1));
3579 NewLI.setMetadata(LLVMContext::MD_range,
3580 MDB.createRange(NonNullInt, NullInt));
3581 }
3582
copyRangeMetadata(const DataLayout & DL,const LoadInst & OldLI,MDNode * N,LoadInst & NewLI)3583 void llvm::copyRangeMetadata(const DataLayout &DL, const LoadInst &OldLI,
3584 MDNode *N, LoadInst &NewLI) {
3585 auto *NewTy = NewLI.getType();
3586 // Simply copy the metadata if the type did not change.
3587 if (NewTy == OldLI.getType()) {
3588 NewLI.setMetadata(LLVMContext::MD_range, N);
3589 return;
3590 }
3591
3592 // Give up unless it is converted to a pointer where there is a single very
3593 // valuable mapping we can do reliably.
3594 // FIXME: It would be nice to propagate this in more ways, but the type
3595 // conversions make it hard.
3596 if (!NewTy->isPointerTy())
3597 return;
3598
3599 unsigned BitWidth = DL.getPointerTypeSizeInBits(NewTy);
3600 if (BitWidth == OldLI.getType()->getScalarSizeInBits() &&
3601 !getConstantRangeFromMetadata(*N).contains(APInt(BitWidth, 0))) {
3602 MDNode *NN = MDNode::get(OldLI.getContext(), std::nullopt);
3603 NewLI.setMetadata(LLVMContext::MD_nonnull, NN);
3604 }
3605 }
3606
dropDebugUsers(Instruction & I)3607 void llvm::dropDebugUsers(Instruction &I) {
3608 SmallVector<DbgVariableIntrinsic *, 1> DbgUsers;
3609 SmallVector<DbgVariableRecord *, 1> DPUsers;
3610 findDbgUsers(DbgUsers, &I, &DPUsers);
3611 for (auto *DII : DbgUsers)
3612 DII->eraseFromParent();
3613 for (auto *DVR : DPUsers)
3614 DVR->eraseFromParent();
3615 }
3616
hoistAllInstructionsInto(BasicBlock * DomBlock,Instruction * InsertPt,BasicBlock * BB)3617 void llvm::hoistAllInstructionsInto(BasicBlock *DomBlock, Instruction *InsertPt,
3618 BasicBlock *BB) {
3619 // Since we are moving the instructions out of its basic block, we do not
3620 // retain their original debug locations (DILocations) and debug intrinsic
3621 // instructions.
3622 //
3623 // Doing so would degrade the debugging experience and adversely affect the
3624 // accuracy of profiling information.
3625 //
3626 // Currently, when hoisting the instructions, we take the following actions:
3627 // - Remove their debug intrinsic instructions.
3628 // - Set their debug locations to the values from the insertion point.
3629 //
3630 // As per PR39141 (comment #8), the more fundamental reason why the dbg.values
3631 // need to be deleted, is because there will not be any instructions with a
3632 // DILocation in either branch left after performing the transformation. We
3633 // can only insert a dbg.value after the two branches are joined again.
3634 //
3635 // See PR38762, PR39243 for more details.
3636 //
3637 // TODO: Extend llvm.dbg.value to take more than one SSA Value (PR39141) to
3638 // encode predicated DIExpressions that yield different results on different
3639 // code paths.
3640
3641 for (BasicBlock::iterator II = BB->begin(), IE = BB->end(); II != IE;) {
3642 Instruction *I = &*II;
3643 I->dropUBImplyingAttrsAndMetadata();
3644 if (I->isUsedByMetadata())
3645 dropDebugUsers(*I);
3646 // RemoveDIs: drop debug-info too as the following code does.
3647 I->dropDbgRecords();
3648 if (I->isDebugOrPseudoInst()) {
3649 // Remove DbgInfo and pseudo probe Intrinsics.
3650 II = I->eraseFromParent();
3651 continue;
3652 }
3653 I->setDebugLoc(InsertPt->getDebugLoc());
3654 ++II;
3655 }
3656 DomBlock->splice(InsertPt->getIterator(), BB, BB->begin(),
3657 BB->getTerminator()->getIterator());
3658 }
3659
getExpressionForConstant(DIBuilder & DIB,const Constant & C,Type & Ty)3660 DIExpression *llvm::getExpressionForConstant(DIBuilder &DIB, const Constant &C,
3661 Type &Ty) {
3662 // Create integer constant expression.
3663 auto createIntegerExpression = [&DIB](const Constant &CV) -> DIExpression * {
3664 const APInt &API = cast<ConstantInt>(&CV)->getValue();
3665 std::optional<int64_t> InitIntOpt = API.trySExtValue();
3666 return InitIntOpt ? DIB.createConstantValueExpression(
3667 static_cast<uint64_t>(*InitIntOpt))
3668 : nullptr;
3669 };
3670
3671 if (isa<ConstantInt>(C))
3672 return createIntegerExpression(C);
3673
3674 auto *FP = dyn_cast<ConstantFP>(&C);
3675 if (FP && Ty.isFloatingPointTy() && Ty.getScalarSizeInBits() <= 64) {
3676 const APFloat &APF = FP->getValueAPF();
3677 APInt const &API = APF.bitcastToAPInt();
3678 if (auto Temp = API.getZExtValue())
3679 return DIB.createConstantValueExpression(static_cast<uint64_t>(Temp));
3680 return DIB.createConstantValueExpression(*API.getRawData());
3681 }
3682
3683 if (!Ty.isPointerTy())
3684 return nullptr;
3685
3686 if (isa<ConstantPointerNull>(C))
3687 return DIB.createConstantValueExpression(0);
3688
3689 if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(&C))
3690 if (CE->getOpcode() == Instruction::IntToPtr) {
3691 const Value *V = CE->getOperand(0);
3692 if (auto CI = dyn_cast_or_null<ConstantInt>(V))
3693 return createIntegerExpression(*CI);
3694 }
3695 return nullptr;
3696 }
3697
remapDebugVariable(ValueToValueMapTy & Mapping,Instruction * Inst)3698 void llvm::remapDebugVariable(ValueToValueMapTy &Mapping, Instruction *Inst) {
3699 auto RemapDebugOperands = [&Mapping](auto *DV, auto Set) {
3700 for (auto *Op : Set) {
3701 auto I = Mapping.find(Op);
3702 if (I != Mapping.end())
3703 DV->replaceVariableLocationOp(Op, I->second, /*AllowEmpty=*/true);
3704 }
3705 };
3706 auto RemapAssignAddress = [&Mapping](auto *DA) {
3707 auto I = Mapping.find(DA->getAddress());
3708 if (I != Mapping.end())
3709 DA->setAddress(I->second);
3710 };
3711 if (auto DVI = dyn_cast<DbgVariableIntrinsic>(Inst))
3712 RemapDebugOperands(DVI, DVI->location_ops());
3713 if (auto DAI = dyn_cast<DbgAssignIntrinsic>(Inst))
3714 RemapAssignAddress(DAI);
3715 for (DbgVariableRecord &DVR : filterDbgVars(Inst->getDbgRecordRange())) {
3716 RemapDebugOperands(&DVR, DVR.location_ops());
3717 if (DVR.isDbgAssign())
3718 RemapAssignAddress(&DVR);
3719 }
3720 }
3721
3722 namespace {
3723
3724 /// A potential constituent of a bitreverse or bswap expression. See
3725 /// collectBitParts for a fuller explanation.
3726 struct BitPart {
BitPart__anon34d338391311::BitPart3727 BitPart(Value *P, unsigned BW) : Provider(P) {
3728 Provenance.resize(BW);
3729 }
3730
3731 /// The Value that this is a bitreverse/bswap of.
3732 Value *Provider;
3733
3734 /// The "provenance" of each bit. Provenance[A] = B means that bit A
3735 /// in Provider becomes bit B in the result of this expression.
3736 SmallVector<int8_t, 32> Provenance; // int8_t means max size is i128.
3737
3738 enum { Unset = -1 };
3739 };
3740
3741 } // end anonymous namespace
3742
3743 /// Analyze the specified subexpression and see if it is capable of providing
3744 /// pieces of a bswap or bitreverse. The subexpression provides a potential
3745 /// piece of a bswap or bitreverse if it can be proved that each non-zero bit in
3746 /// the output of the expression came from a corresponding bit in some other
3747 /// value. This function is recursive, and the end result is a mapping of
3748 /// bitnumber to bitnumber. It is the caller's responsibility to validate that
3749 /// the bitnumber to bitnumber mapping is correct for a bswap or bitreverse.
3750 ///
3751 /// For example, if the current subexpression if "(shl i32 %X, 24)" then we know
3752 /// that the expression deposits the low byte of %X into the high byte of the
3753 /// result and that all other bits are zero. This expression is accepted and a
3754 /// BitPart is returned with Provider set to %X and Provenance[24-31] set to
3755 /// [0-7].
3756 ///
3757 /// For vector types, all analysis is performed at the per-element level. No
3758 /// cross-element analysis is supported (shuffle/insertion/reduction), and all
3759 /// constant masks must be splatted across all elements.
3760 ///
3761 /// To avoid revisiting values, the BitPart results are memoized into the
3762 /// provided map. To avoid unnecessary copying of BitParts, BitParts are
3763 /// constructed in-place in the \c BPS map. Because of this \c BPS needs to
3764 /// store BitParts objects, not pointers. As we need the concept of a nullptr
3765 /// BitParts (Value has been analyzed and the analysis failed), we an Optional
3766 /// type instead to provide the same functionality.
3767 ///
3768 /// Because we pass around references into \c BPS, we must use a container that
3769 /// does not invalidate internal references (std::map instead of DenseMap).
3770 static const std::optional<BitPart> &
collectBitParts(Value * V,bool MatchBSwaps,bool MatchBitReversals,std::map<Value *,std::optional<BitPart>> & BPS,int Depth,bool & FoundRoot)3771 collectBitParts(Value *V, bool MatchBSwaps, bool MatchBitReversals,
3772 std::map<Value *, std::optional<BitPart>> &BPS, int Depth,
3773 bool &FoundRoot) {
3774 auto I = BPS.find(V);
3775 if (I != BPS.end())
3776 return I->second;
3777
3778 auto &Result = BPS[V] = std::nullopt;
3779 auto BitWidth = V->getType()->getScalarSizeInBits();
3780
3781 // Can't do integer/elements > 128 bits.
3782 if (BitWidth > 128)
3783 return Result;
3784
3785 // Prevent stack overflow by limiting the recursion depth
3786 if (Depth == BitPartRecursionMaxDepth) {
3787 LLVM_DEBUG(dbgs() << "collectBitParts max recursion depth reached.\n");
3788 return Result;
3789 }
3790
3791 if (auto *I = dyn_cast<Instruction>(V)) {
3792 Value *X, *Y;
3793 const APInt *C;
3794
3795 // If this is an or instruction, it may be an inner node of the bswap.
3796 if (match(V, m_Or(m_Value(X), m_Value(Y)))) {
3797 // Check we have both sources and they are from the same provider.
3798 const auto &A = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3799 Depth + 1, FoundRoot);
3800 if (!A || !A->Provider)
3801 return Result;
3802
3803 const auto &B = collectBitParts(Y, MatchBSwaps, MatchBitReversals, BPS,
3804 Depth + 1, FoundRoot);
3805 if (!B || A->Provider != B->Provider)
3806 return Result;
3807
3808 // Try and merge the two together.
3809 Result = BitPart(A->Provider, BitWidth);
3810 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx) {
3811 if (A->Provenance[BitIdx] != BitPart::Unset &&
3812 B->Provenance[BitIdx] != BitPart::Unset &&
3813 A->Provenance[BitIdx] != B->Provenance[BitIdx])
3814 return Result = std::nullopt;
3815
3816 if (A->Provenance[BitIdx] == BitPart::Unset)
3817 Result->Provenance[BitIdx] = B->Provenance[BitIdx];
3818 else
3819 Result->Provenance[BitIdx] = A->Provenance[BitIdx];
3820 }
3821
3822 return Result;
3823 }
3824
3825 // If this is a logical shift by a constant, recurse then shift the result.
3826 if (match(V, m_LogicalShift(m_Value(X), m_APInt(C)))) {
3827 const APInt &BitShift = *C;
3828
3829 // Ensure the shift amount is defined.
3830 if (BitShift.uge(BitWidth))
3831 return Result;
3832
3833 // For bswap-only, limit shift amounts to whole bytes, for an early exit.
3834 if (!MatchBitReversals && (BitShift.getZExtValue() % 8) != 0)
3835 return Result;
3836
3837 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3838 Depth + 1, FoundRoot);
3839 if (!Res)
3840 return Result;
3841 Result = Res;
3842
3843 // Perform the "shift" on BitProvenance.
3844 auto &P = Result->Provenance;
3845 if (I->getOpcode() == Instruction::Shl) {
3846 P.erase(std::prev(P.end(), BitShift.getZExtValue()), P.end());
3847 P.insert(P.begin(), BitShift.getZExtValue(), BitPart::Unset);
3848 } else {
3849 P.erase(P.begin(), std::next(P.begin(), BitShift.getZExtValue()));
3850 P.insert(P.end(), BitShift.getZExtValue(), BitPart::Unset);
3851 }
3852
3853 return Result;
3854 }
3855
3856 // If this is a logical 'and' with a mask that clears bits, recurse then
3857 // unset the appropriate bits.
3858 if (match(V, m_And(m_Value(X), m_APInt(C)))) {
3859 const APInt &AndMask = *C;
3860
3861 // Check that the mask allows a multiple of 8 bits for a bswap, for an
3862 // early exit.
3863 unsigned NumMaskedBits = AndMask.popcount();
3864 if (!MatchBitReversals && (NumMaskedBits % 8) != 0)
3865 return Result;
3866
3867 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3868 Depth + 1, FoundRoot);
3869 if (!Res)
3870 return Result;
3871 Result = Res;
3872
3873 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3874 // If the AndMask is zero for this bit, clear the bit.
3875 if (AndMask[BitIdx] == 0)
3876 Result->Provenance[BitIdx] = BitPart::Unset;
3877 return Result;
3878 }
3879
3880 // If this is a zext instruction zero extend the result.
3881 if (match(V, m_ZExt(m_Value(X)))) {
3882 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3883 Depth + 1, FoundRoot);
3884 if (!Res)
3885 return Result;
3886
3887 Result = BitPart(Res->Provider, BitWidth);
3888 auto NarrowBitWidth = X->getType()->getScalarSizeInBits();
3889 for (unsigned BitIdx = 0; BitIdx < NarrowBitWidth; ++BitIdx)
3890 Result->Provenance[BitIdx] = Res->Provenance[BitIdx];
3891 for (unsigned BitIdx = NarrowBitWidth; BitIdx < BitWidth; ++BitIdx)
3892 Result->Provenance[BitIdx] = BitPart::Unset;
3893 return Result;
3894 }
3895
3896 // If this is a truncate instruction, extract the lower bits.
3897 if (match(V, m_Trunc(m_Value(X)))) {
3898 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3899 Depth + 1, FoundRoot);
3900 if (!Res)
3901 return Result;
3902
3903 Result = BitPart(Res->Provider, BitWidth);
3904 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3905 Result->Provenance[BitIdx] = Res->Provenance[BitIdx];
3906 return Result;
3907 }
3908
3909 // BITREVERSE - most likely due to us previous matching a partial
3910 // bitreverse.
3911 if (match(V, m_BitReverse(m_Value(X)))) {
3912 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3913 Depth + 1, FoundRoot);
3914 if (!Res)
3915 return Result;
3916
3917 Result = BitPart(Res->Provider, BitWidth);
3918 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3919 Result->Provenance[(BitWidth - 1) - BitIdx] = Res->Provenance[BitIdx];
3920 return Result;
3921 }
3922
3923 // BSWAP - most likely due to us previous matching a partial bswap.
3924 if (match(V, m_BSwap(m_Value(X)))) {
3925 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3926 Depth + 1, FoundRoot);
3927 if (!Res)
3928 return Result;
3929
3930 unsigned ByteWidth = BitWidth / 8;
3931 Result = BitPart(Res->Provider, BitWidth);
3932 for (unsigned ByteIdx = 0; ByteIdx < ByteWidth; ++ByteIdx) {
3933 unsigned ByteBitOfs = ByteIdx * 8;
3934 for (unsigned BitIdx = 0; BitIdx < 8; ++BitIdx)
3935 Result->Provenance[(BitWidth - 8 - ByteBitOfs) + BitIdx] =
3936 Res->Provenance[ByteBitOfs + BitIdx];
3937 }
3938 return Result;
3939 }
3940
3941 // Funnel 'double' shifts take 3 operands, 2 inputs and the shift
3942 // amount (modulo).
3943 // fshl(X,Y,Z): (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
3944 // fshr(X,Y,Z): (X << (BW - (Z % BW))) | (Y >> (Z % BW))
3945 if (match(V, m_FShl(m_Value(X), m_Value(Y), m_APInt(C))) ||
3946 match(V, m_FShr(m_Value(X), m_Value(Y), m_APInt(C)))) {
3947 // We can treat fshr as a fshl by flipping the modulo amount.
3948 unsigned ModAmt = C->urem(BitWidth);
3949 if (cast<IntrinsicInst>(I)->getIntrinsicID() == Intrinsic::fshr)
3950 ModAmt = BitWidth - ModAmt;
3951
3952 // For bswap-only, limit shift amounts to whole bytes, for an early exit.
3953 if (!MatchBitReversals && (ModAmt % 8) != 0)
3954 return Result;
3955
3956 // Check we have both sources and they are from the same provider.
3957 const auto &LHS = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3958 Depth + 1, FoundRoot);
3959 if (!LHS || !LHS->Provider)
3960 return Result;
3961
3962 const auto &RHS = collectBitParts(Y, MatchBSwaps, MatchBitReversals, BPS,
3963 Depth + 1, FoundRoot);
3964 if (!RHS || LHS->Provider != RHS->Provider)
3965 return Result;
3966
3967 unsigned StartBitRHS = BitWidth - ModAmt;
3968 Result = BitPart(LHS->Provider, BitWidth);
3969 for (unsigned BitIdx = 0; BitIdx < StartBitRHS; ++BitIdx)
3970 Result->Provenance[BitIdx + ModAmt] = LHS->Provenance[BitIdx];
3971 for (unsigned BitIdx = 0; BitIdx < ModAmt; ++BitIdx)
3972 Result->Provenance[BitIdx] = RHS->Provenance[BitIdx + StartBitRHS];
3973 return Result;
3974 }
3975 }
3976
3977 // If we've already found a root input value then we're never going to merge
3978 // these back together.
3979 if (FoundRoot)
3980 return Result;
3981
3982 // Okay, we got to something that isn't a shift, 'or', 'and', etc. This must
3983 // be the root input value to the bswap/bitreverse.
3984 FoundRoot = true;
3985 Result = BitPart(V, BitWidth);
3986 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3987 Result->Provenance[BitIdx] = BitIdx;
3988 return Result;
3989 }
3990
bitTransformIsCorrectForBSwap(unsigned From,unsigned To,unsigned BitWidth)3991 static bool bitTransformIsCorrectForBSwap(unsigned From, unsigned To,
3992 unsigned BitWidth) {
3993 if (From % 8 != To % 8)
3994 return false;
3995 // Convert from bit indices to byte indices and check for a byte reversal.
3996 From >>= 3;
3997 To >>= 3;
3998 BitWidth >>= 3;
3999 return From == BitWidth - To - 1;
4000 }
4001
bitTransformIsCorrectForBitReverse(unsigned From,unsigned To,unsigned BitWidth)4002 static bool bitTransformIsCorrectForBitReverse(unsigned From, unsigned To,
4003 unsigned BitWidth) {
4004 return From == BitWidth - To - 1;
4005 }
4006
recognizeBSwapOrBitReverseIdiom(Instruction * I,bool MatchBSwaps,bool MatchBitReversals,SmallVectorImpl<Instruction * > & InsertedInsts)4007 bool llvm::recognizeBSwapOrBitReverseIdiom(
4008 Instruction *I, bool MatchBSwaps, bool MatchBitReversals,
4009 SmallVectorImpl<Instruction *> &InsertedInsts) {
4010 if (!match(I, m_Or(m_Value(), m_Value())) &&
4011 !match(I, m_FShl(m_Value(), m_Value(), m_Value())) &&
4012 !match(I, m_FShr(m_Value(), m_Value(), m_Value())) &&
4013 !match(I, m_BSwap(m_Value())))
4014 return false;
4015 if (!MatchBSwaps && !MatchBitReversals)
4016 return false;
4017 Type *ITy = I->getType();
4018 if (!ITy->isIntOrIntVectorTy() || ITy->getScalarSizeInBits() > 128)
4019 return false; // Can't do integer/elements > 128 bits.
4020
4021 // Try to find all the pieces corresponding to the bswap.
4022 bool FoundRoot = false;
4023 std::map<Value *, std::optional<BitPart>> BPS;
4024 const auto &Res =
4025 collectBitParts(I, MatchBSwaps, MatchBitReversals, BPS, 0, FoundRoot);
4026 if (!Res)
4027 return false;
4028 ArrayRef<int8_t> BitProvenance = Res->Provenance;
4029 assert(all_of(BitProvenance,
4030 [](int8_t I) { return I == BitPart::Unset || 0 <= I; }) &&
4031 "Illegal bit provenance index");
4032
4033 // If the upper bits are zero, then attempt to perform as a truncated op.
4034 Type *DemandedTy = ITy;
4035 if (BitProvenance.back() == BitPart::Unset) {
4036 while (!BitProvenance.empty() && BitProvenance.back() == BitPart::Unset)
4037 BitProvenance = BitProvenance.drop_back();
4038 if (BitProvenance.empty())
4039 return false; // TODO - handle null value?
4040 DemandedTy = Type::getIntNTy(I->getContext(), BitProvenance.size());
4041 if (auto *IVecTy = dyn_cast<VectorType>(ITy))
4042 DemandedTy = VectorType::get(DemandedTy, IVecTy);
4043 }
4044
4045 // Check BitProvenance hasn't found a source larger than the result type.
4046 unsigned DemandedBW = DemandedTy->getScalarSizeInBits();
4047 if (DemandedBW > ITy->getScalarSizeInBits())
4048 return false;
4049
4050 // Now, is the bit permutation correct for a bswap or a bitreverse? We can
4051 // only byteswap values with an even number of bytes.
4052 APInt DemandedMask = APInt::getAllOnes(DemandedBW);
4053 bool OKForBSwap = MatchBSwaps && (DemandedBW % 16) == 0;
4054 bool OKForBitReverse = MatchBitReversals;
4055 for (unsigned BitIdx = 0;
4056 (BitIdx < DemandedBW) && (OKForBSwap || OKForBitReverse); ++BitIdx) {
4057 if (BitProvenance[BitIdx] == BitPart::Unset) {
4058 DemandedMask.clearBit(BitIdx);
4059 continue;
4060 }
4061 OKForBSwap &= bitTransformIsCorrectForBSwap(BitProvenance[BitIdx], BitIdx,
4062 DemandedBW);
4063 OKForBitReverse &= bitTransformIsCorrectForBitReverse(BitProvenance[BitIdx],
4064 BitIdx, DemandedBW);
4065 }
4066
4067 Intrinsic::ID Intrin;
4068 if (OKForBSwap)
4069 Intrin = Intrinsic::bswap;
4070 else if (OKForBitReverse)
4071 Intrin = Intrinsic::bitreverse;
4072 else
4073 return false;
4074
4075 Function *F = Intrinsic::getDeclaration(I->getModule(), Intrin, DemandedTy);
4076 Value *Provider = Res->Provider;
4077
4078 // We may need to truncate the provider.
4079 if (DemandedTy != Provider->getType()) {
4080 auto *Trunc =
4081 CastInst::CreateIntegerCast(Provider, DemandedTy, false, "trunc", I->getIterator());
4082 InsertedInsts.push_back(Trunc);
4083 Provider = Trunc;
4084 }
4085
4086 Instruction *Result = CallInst::Create(F, Provider, "rev", I->getIterator());
4087 InsertedInsts.push_back(Result);
4088
4089 if (!DemandedMask.isAllOnes()) {
4090 auto *Mask = ConstantInt::get(DemandedTy, DemandedMask);
4091 Result = BinaryOperator::Create(Instruction::And, Result, Mask, "mask", I->getIterator());
4092 InsertedInsts.push_back(Result);
4093 }
4094
4095 // We may need to zeroextend back to the result type.
4096 if (ITy != Result->getType()) {
4097 auto *ExtInst = CastInst::CreateIntegerCast(Result, ITy, false, "zext", I->getIterator());
4098 InsertedInsts.push_back(ExtInst);
4099 }
4100
4101 return true;
4102 }
4103
4104 // CodeGen has special handling for some string functions that may replace
4105 // them with target-specific intrinsics. Since that'd skip our interceptors
4106 // in ASan/MSan/TSan/DFSan, and thus make us miss some memory accesses,
4107 // we mark affected calls as NoBuiltin, which will disable optimization
4108 // in CodeGen.
maybeMarkSanitizerLibraryCallNoBuiltin(CallInst * CI,const TargetLibraryInfo * TLI)4109 void llvm::maybeMarkSanitizerLibraryCallNoBuiltin(
4110 CallInst *CI, const TargetLibraryInfo *TLI) {
4111 Function *F = CI->getCalledFunction();
4112 LibFunc Func;
4113 if (F && !F->hasLocalLinkage() && F->hasName() &&
4114 TLI->getLibFunc(F->getName(), Func) && TLI->hasOptimizedCodeGen(Func) &&
4115 !F->doesNotAccessMemory())
4116 CI->addFnAttr(Attribute::NoBuiltin);
4117 }
4118
canReplaceOperandWithVariable(const Instruction * I,unsigned OpIdx)4119 bool llvm::canReplaceOperandWithVariable(const Instruction *I, unsigned OpIdx) {
4120 // We can't have a PHI with a metadata type.
4121 if (I->getOperand(OpIdx)->getType()->isMetadataTy())
4122 return false;
4123
4124 // Early exit.
4125 if (!isa<Constant>(I->getOperand(OpIdx)))
4126 return true;
4127
4128 switch (I->getOpcode()) {
4129 default:
4130 return true;
4131 case Instruction::Call:
4132 case Instruction::Invoke: {
4133 const auto &CB = cast<CallBase>(*I);
4134
4135 // Can't handle inline asm. Skip it.
4136 if (CB.isInlineAsm())
4137 return false;
4138
4139 // Constant bundle operands may need to retain their constant-ness for
4140 // correctness.
4141 if (CB.isBundleOperand(OpIdx))
4142 return false;
4143
4144 if (OpIdx < CB.arg_size()) {
4145 // Some variadic intrinsics require constants in the variadic arguments,
4146 // which currently aren't markable as immarg.
4147 if (isa<IntrinsicInst>(CB) &&
4148 OpIdx >= CB.getFunctionType()->getNumParams()) {
4149 // This is known to be OK for stackmap.
4150 return CB.getIntrinsicID() == Intrinsic::experimental_stackmap;
4151 }
4152
4153 // gcroot is a special case, since it requires a constant argument which
4154 // isn't also required to be a simple ConstantInt.
4155 if (CB.getIntrinsicID() == Intrinsic::gcroot)
4156 return false;
4157
4158 // Some intrinsic operands are required to be immediates.
4159 return !CB.paramHasAttr(OpIdx, Attribute::ImmArg);
4160 }
4161
4162 // It is never allowed to replace the call argument to an intrinsic, but it
4163 // may be possible for a call.
4164 return !isa<IntrinsicInst>(CB);
4165 }
4166 case Instruction::ShuffleVector:
4167 // Shufflevector masks are constant.
4168 return OpIdx != 2;
4169 case Instruction::Switch:
4170 case Instruction::ExtractValue:
4171 // All operands apart from the first are constant.
4172 return OpIdx == 0;
4173 case Instruction::InsertValue:
4174 // All operands apart from the first and the second are constant.
4175 return OpIdx < 2;
4176 case Instruction::Alloca:
4177 // Static allocas (constant size in the entry block) are handled by
4178 // prologue/epilogue insertion so they're free anyway. We definitely don't
4179 // want to make them non-constant.
4180 return !cast<AllocaInst>(I)->isStaticAlloca();
4181 case Instruction::GetElementPtr:
4182 if (OpIdx == 0)
4183 return true;
4184 gep_type_iterator It = gep_type_begin(I);
4185 for (auto E = std::next(It, OpIdx); It != E; ++It)
4186 if (It.isStruct())
4187 return false;
4188 return true;
4189 }
4190 }
4191
invertCondition(Value * Condition)4192 Value *llvm::invertCondition(Value *Condition) {
4193 // First: Check if it's a constant
4194 if (Constant *C = dyn_cast<Constant>(Condition))
4195 return ConstantExpr::getNot(C);
4196
4197 // Second: If the condition is already inverted, return the original value
4198 Value *NotCondition;
4199 if (match(Condition, m_Not(m_Value(NotCondition))))
4200 return NotCondition;
4201
4202 BasicBlock *Parent = nullptr;
4203 Instruction *Inst = dyn_cast<Instruction>(Condition);
4204 if (Inst)
4205 Parent = Inst->getParent();
4206 else if (Argument *Arg = dyn_cast<Argument>(Condition))
4207 Parent = &Arg->getParent()->getEntryBlock();
4208 assert(Parent && "Unsupported condition to invert");
4209
4210 // Third: Check all the users for an invert
4211 for (User *U : Condition->users())
4212 if (Instruction *I = dyn_cast<Instruction>(U))
4213 if (I->getParent() == Parent && match(I, m_Not(m_Specific(Condition))))
4214 return I;
4215
4216 // Last option: Create a new instruction
4217 auto *Inverted =
4218 BinaryOperator::CreateNot(Condition, Condition->getName() + ".inv");
4219 if (Inst && !isa<PHINode>(Inst))
4220 Inverted->insertAfter(Inst);
4221 else
4222 Inverted->insertBefore(&*Parent->getFirstInsertionPt());
4223 return Inverted;
4224 }
4225
inferAttributesFromOthers(Function & F)4226 bool llvm::inferAttributesFromOthers(Function &F) {
4227 // Note: We explicitly check for attributes rather than using cover functions
4228 // because some of the cover functions include the logic being implemented.
4229
4230 bool Changed = false;
4231 // readnone + not convergent implies nosync
4232 if (!F.hasFnAttribute(Attribute::NoSync) &&
4233 F.doesNotAccessMemory() && !F.isConvergent()) {
4234 F.setNoSync();
4235 Changed = true;
4236 }
4237
4238 // readonly implies nofree
4239 if (!F.hasFnAttribute(Attribute::NoFree) && F.onlyReadsMemory()) {
4240 F.setDoesNotFreeMemory();
4241 Changed = true;
4242 }
4243
4244 // willreturn implies mustprogress
4245 if (!F.hasFnAttribute(Attribute::MustProgress) && F.willReturn()) {
4246 F.setMustProgress();
4247 Changed = true;
4248 }
4249
4250 // TODO: There are a bunch of cases of restrictive memory effects we
4251 // can infer by inspecting arguments of argmemonly-ish functions.
4252
4253 return Changed;
4254 }
4255