1 //===- EarlyCSE.cpp - Simple and fast CSE pass ----------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass performs a simple dominator tree walk that eliminates trivially 10 // redundant instructions. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Transforms/Scalar/EarlyCSE.h" 15 #include "llvm/ADT/DenseMapInfo.h" 16 #include "llvm/ADT/Hashing.h" 17 #include "llvm/ADT/STLExtras.h" 18 #include "llvm/ADT/ScopedHashTable.h" 19 #include "llvm/ADT/SetVector.h" 20 #include "llvm/ADT/SmallVector.h" 21 #include "llvm/ADT/Statistic.h" 22 #include "llvm/Analysis/AssumptionCache.h" 23 #include "llvm/Analysis/GlobalsModRef.h" 24 #include "llvm/Analysis/GuardUtils.h" 25 #include "llvm/Analysis/InstructionSimplify.h" 26 #include "llvm/Analysis/MemorySSA.h" 27 #include "llvm/Analysis/MemorySSAUpdater.h" 28 #include "llvm/Analysis/TargetLibraryInfo.h" 29 #include "llvm/Analysis/TargetTransformInfo.h" 30 #include "llvm/Analysis/ValueTracking.h" 31 #include "llvm/IR/BasicBlock.h" 32 #include "llvm/IR/Constants.h" 33 #include "llvm/IR/DataLayout.h" 34 #include "llvm/IR/Dominators.h" 35 #include "llvm/IR/Function.h" 36 #include "llvm/IR/InstrTypes.h" 37 #include "llvm/IR/Instruction.h" 38 #include "llvm/IR/Instructions.h" 39 #include "llvm/IR/IntrinsicInst.h" 40 #include "llvm/IR/Intrinsics.h" 41 #include "llvm/IR/LLVMContext.h" 42 #include "llvm/IR/PassManager.h" 43 #include "llvm/IR/PatternMatch.h" 44 #include "llvm/IR/Statepoint.h" 45 #include "llvm/IR/Type.h" 46 #include "llvm/IR/Use.h" 47 #include "llvm/IR/Value.h" 48 #include "llvm/InitializePasses.h" 49 #include "llvm/Pass.h" 50 #include "llvm/Support/Allocator.h" 51 #include "llvm/Support/AtomicOrdering.h" 52 #include "llvm/Support/Casting.h" 53 #include "llvm/Support/Debug.h" 54 #include "llvm/Support/DebugCounter.h" 55 #include "llvm/Support/RecyclingAllocator.h" 56 #include "llvm/Support/raw_ostream.h" 57 #include "llvm/Transforms/Scalar.h" 58 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h" 59 #include "llvm/Transforms/Utils/GuardUtils.h" 60 #include "llvm/Transforms/Utils/Local.h" 61 #include <cassert> 62 #include <deque> 63 #include <memory> 64 #include <utility> 65 66 using namespace llvm; 67 using namespace llvm::PatternMatch; 68 69 #define DEBUG_TYPE "early-cse" 70 71 STATISTIC(NumSimplify, "Number of instructions simplified or DCE'd"); 72 STATISTIC(NumCSE, "Number of instructions CSE'd"); 73 STATISTIC(NumCSECVP, "Number of compare instructions CVP'd"); 74 STATISTIC(NumCSELoad, "Number of load instructions CSE'd"); 75 STATISTIC(NumCSECall, "Number of call instructions CSE'd"); 76 STATISTIC(NumDSE, "Number of trivial dead stores removed"); 77 78 DEBUG_COUNTER(CSECounter, "early-cse", 79 "Controls which instructions are removed"); 80 81 static cl::opt<unsigned> EarlyCSEMssaOptCap( 82 "earlycse-mssa-optimization-cap", cl::init(500), cl::Hidden, 83 cl::desc("Enable imprecision in EarlyCSE in pathological cases, in exchange " 84 "for faster compile. Caps the MemorySSA clobbering calls.")); 85 86 static cl::opt<bool> EarlyCSEDebugHash( 87 "earlycse-debug-hash", cl::init(false), cl::Hidden, 88 cl::desc("Perform extra assertion checking to verify that SimpleValue's hash " 89 "function is well-behaved w.r.t. its isEqual predicate")); 90 91 //===----------------------------------------------------------------------===// 92 // SimpleValue 93 //===----------------------------------------------------------------------===// 94 95 namespace { 96 97 /// Struct representing the available values in the scoped hash table. 98 struct SimpleValue { 99 Instruction *Inst; 100 101 SimpleValue(Instruction *I) : Inst(I) { 102 assert((isSentinel() || canHandle(I)) && "Inst can't be handled!"); 103 } 104 105 bool isSentinel() const { 106 return Inst == DenseMapInfo<Instruction *>::getEmptyKey() || 107 Inst == DenseMapInfo<Instruction *>::getTombstoneKey(); 108 } 109 110 static bool canHandle(Instruction *Inst) { 111 // This can only handle non-void readnone functions. 112 if (CallInst *CI = dyn_cast<CallInst>(Inst)) 113 return CI->doesNotAccessMemory() && !CI->getType()->isVoidTy(); 114 return isa<CastInst>(Inst) || isa<UnaryOperator>(Inst) || 115 isa<BinaryOperator>(Inst) || isa<GetElementPtrInst>(Inst) || 116 isa<CmpInst>(Inst) || isa<SelectInst>(Inst) || 117 isa<ExtractElementInst>(Inst) || isa<InsertElementInst>(Inst) || 118 isa<ShuffleVectorInst>(Inst) || isa<ExtractValueInst>(Inst) || 119 isa<InsertValueInst>(Inst) || isa<FreezeInst>(Inst); 120 } 121 }; 122 123 } // end anonymous namespace 124 125 namespace llvm { 126 127 template <> struct DenseMapInfo<SimpleValue> { 128 static inline SimpleValue getEmptyKey() { 129 return DenseMapInfo<Instruction *>::getEmptyKey(); 130 } 131 132 static inline SimpleValue getTombstoneKey() { 133 return DenseMapInfo<Instruction *>::getTombstoneKey(); 134 } 135 136 static unsigned getHashValue(SimpleValue Val); 137 static bool isEqual(SimpleValue LHS, SimpleValue RHS); 138 }; 139 140 } // end namespace llvm 141 142 /// Match a 'select' including an optional 'not's of the condition. 143 static bool matchSelectWithOptionalNotCond(Value *V, Value *&Cond, Value *&A, 144 Value *&B, 145 SelectPatternFlavor &Flavor) { 146 // Return false if V is not even a select. 147 if (!match(V, m_Select(m_Value(Cond), m_Value(A), m_Value(B)))) 148 return false; 149 150 // Look through a 'not' of the condition operand by swapping A/B. 151 Value *CondNot; 152 if (match(Cond, m_Not(m_Value(CondNot)))) { 153 Cond = CondNot; 154 std::swap(A, B); 155 } 156 157 // Match canonical forms of min/max. We are not using ValueTracking's 158 // more powerful matchSelectPattern() because it may rely on instruction flags 159 // such as "nsw". That would be incompatible with the current hashing 160 // mechanism that may remove flags to increase the likelihood of CSE. 161 162 Flavor = SPF_UNKNOWN; 163 CmpInst::Predicate Pred; 164 165 if (!match(Cond, m_ICmp(Pred, m_Specific(A), m_Specific(B)))) { 166 // Check for commuted variants of min/max by swapping predicate. 167 // If we do not match the standard or commuted patterns, this is not a 168 // recognized form of min/max, but it is still a select, so return true. 169 if (!match(Cond, m_ICmp(Pred, m_Specific(B), m_Specific(A)))) 170 return true; 171 Pred = ICmpInst::getSwappedPredicate(Pred); 172 } 173 174 switch (Pred) { 175 case CmpInst::ICMP_UGT: Flavor = SPF_UMAX; break; 176 case CmpInst::ICMP_ULT: Flavor = SPF_UMIN; break; 177 case CmpInst::ICMP_SGT: Flavor = SPF_SMAX; break; 178 case CmpInst::ICMP_SLT: Flavor = SPF_SMIN; break; 179 // Non-strict inequalities. 180 case CmpInst::ICMP_ULE: Flavor = SPF_UMIN; break; 181 case CmpInst::ICMP_UGE: Flavor = SPF_UMAX; break; 182 case CmpInst::ICMP_SLE: Flavor = SPF_SMIN; break; 183 case CmpInst::ICMP_SGE: Flavor = SPF_SMAX; break; 184 default: break; 185 } 186 187 return true; 188 } 189 190 static unsigned getHashValueImpl(SimpleValue Val) { 191 Instruction *Inst = Val.Inst; 192 // Hash in all of the operands as pointers. 193 if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst)) { 194 Value *LHS = BinOp->getOperand(0); 195 Value *RHS = BinOp->getOperand(1); 196 if (BinOp->isCommutative() && BinOp->getOperand(0) > BinOp->getOperand(1)) 197 std::swap(LHS, RHS); 198 199 return hash_combine(BinOp->getOpcode(), LHS, RHS); 200 } 201 202 if (CmpInst *CI = dyn_cast<CmpInst>(Inst)) { 203 // Compares can be commuted by swapping the comparands and 204 // updating the predicate. Choose the form that has the 205 // comparands in sorted order, or in the case of a tie, the 206 // one with the lower predicate. 207 Value *LHS = CI->getOperand(0); 208 Value *RHS = CI->getOperand(1); 209 CmpInst::Predicate Pred = CI->getPredicate(); 210 CmpInst::Predicate SwappedPred = CI->getSwappedPredicate(); 211 if (std::tie(LHS, Pred) > std::tie(RHS, SwappedPred)) { 212 std::swap(LHS, RHS); 213 Pred = SwappedPred; 214 } 215 return hash_combine(Inst->getOpcode(), Pred, LHS, RHS); 216 } 217 218 // Hash general selects to allow matching commuted true/false operands. 219 SelectPatternFlavor SPF; 220 Value *Cond, *A, *B; 221 if (matchSelectWithOptionalNotCond(Inst, Cond, A, B, SPF)) { 222 // Hash min/max (cmp + select) to allow for commuted operands. 223 // Min/max may also have non-canonical compare predicate (eg, the compare for 224 // smin may use 'sgt' rather than 'slt'), and non-canonical operands in the 225 // compare. 226 // TODO: We should also detect FP min/max. 227 if (SPF == SPF_SMIN || SPF == SPF_SMAX || 228 SPF == SPF_UMIN || SPF == SPF_UMAX) { 229 if (A > B) 230 std::swap(A, B); 231 return hash_combine(Inst->getOpcode(), SPF, A, B); 232 } 233 234 // Hash general selects to allow matching commuted true/false operands. 235 236 // If we do not have a compare as the condition, just hash in the condition. 237 CmpInst::Predicate Pred; 238 Value *X, *Y; 239 if (!match(Cond, m_Cmp(Pred, m_Value(X), m_Value(Y)))) 240 return hash_combine(Inst->getOpcode(), Cond, A, B); 241 242 // Similar to cmp normalization (above) - canonicalize the predicate value: 243 // select (icmp Pred, X, Y), A, B --> select (icmp InvPred, X, Y), B, A 244 if (CmpInst::getInversePredicate(Pred) < Pred) { 245 Pred = CmpInst::getInversePredicate(Pred); 246 std::swap(A, B); 247 } 248 return hash_combine(Inst->getOpcode(), Pred, X, Y, A, B); 249 } 250 251 if (CastInst *CI = dyn_cast<CastInst>(Inst)) 252 return hash_combine(CI->getOpcode(), CI->getType(), CI->getOperand(0)); 253 254 if (FreezeInst *FI = dyn_cast<FreezeInst>(Inst)) 255 return hash_combine(FI->getOpcode(), FI->getOperand(0)); 256 257 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(Inst)) 258 return hash_combine(EVI->getOpcode(), EVI->getOperand(0), 259 hash_combine_range(EVI->idx_begin(), EVI->idx_end())); 260 261 if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(Inst)) 262 return hash_combine(IVI->getOpcode(), IVI->getOperand(0), 263 IVI->getOperand(1), 264 hash_combine_range(IVI->idx_begin(), IVI->idx_end())); 265 266 assert((isa<CallInst>(Inst) || isa<GetElementPtrInst>(Inst) || 267 isa<ExtractElementInst>(Inst) || isa<InsertElementInst>(Inst) || 268 isa<ShuffleVectorInst>(Inst) || isa<UnaryOperator>(Inst) || 269 isa<FreezeInst>(Inst)) && 270 "Invalid/unknown instruction"); 271 272 // Handle intrinsics with commutative operands. 273 // TODO: Extend this to handle intrinsics with >2 operands where the 1st 274 // 2 operands are commutative. 275 auto *II = dyn_cast<IntrinsicInst>(Inst); 276 if (II && II->isCommutative() && II->getNumArgOperands() == 2) { 277 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1); 278 if (LHS > RHS) 279 std::swap(LHS, RHS); 280 return hash_combine(II->getOpcode(), LHS, RHS); 281 } 282 283 // Mix in the opcode. 284 return hash_combine( 285 Inst->getOpcode(), 286 hash_combine_range(Inst->value_op_begin(), Inst->value_op_end())); 287 } 288 289 unsigned DenseMapInfo<SimpleValue>::getHashValue(SimpleValue Val) { 290 #ifndef NDEBUG 291 // If -earlycse-debug-hash was specified, return a constant -- this 292 // will force all hashing to collide, so we'll exhaustively search 293 // the table for a match, and the assertion in isEqual will fire if 294 // there's a bug causing equal keys to hash differently. 295 if (EarlyCSEDebugHash) 296 return 0; 297 #endif 298 return getHashValueImpl(Val); 299 } 300 301 static bool isEqualImpl(SimpleValue LHS, SimpleValue RHS) { 302 Instruction *LHSI = LHS.Inst, *RHSI = RHS.Inst; 303 304 if (LHS.isSentinel() || RHS.isSentinel()) 305 return LHSI == RHSI; 306 307 if (LHSI->getOpcode() != RHSI->getOpcode()) 308 return false; 309 if (LHSI->isIdenticalToWhenDefined(RHSI)) 310 return true; 311 312 // If we're not strictly identical, we still might be a commutable instruction 313 if (BinaryOperator *LHSBinOp = dyn_cast<BinaryOperator>(LHSI)) { 314 if (!LHSBinOp->isCommutative()) 315 return false; 316 317 assert(isa<BinaryOperator>(RHSI) && 318 "same opcode, but different instruction type?"); 319 BinaryOperator *RHSBinOp = cast<BinaryOperator>(RHSI); 320 321 // Commuted equality 322 return LHSBinOp->getOperand(0) == RHSBinOp->getOperand(1) && 323 LHSBinOp->getOperand(1) == RHSBinOp->getOperand(0); 324 } 325 if (CmpInst *LHSCmp = dyn_cast<CmpInst>(LHSI)) { 326 assert(isa<CmpInst>(RHSI) && 327 "same opcode, but different instruction type?"); 328 CmpInst *RHSCmp = cast<CmpInst>(RHSI); 329 // Commuted equality 330 return LHSCmp->getOperand(0) == RHSCmp->getOperand(1) && 331 LHSCmp->getOperand(1) == RHSCmp->getOperand(0) && 332 LHSCmp->getSwappedPredicate() == RHSCmp->getPredicate(); 333 } 334 335 // TODO: Extend this for >2 args by matching the trailing N-2 args. 336 auto *LII = dyn_cast<IntrinsicInst>(LHSI); 337 auto *RII = dyn_cast<IntrinsicInst>(RHSI); 338 if (LII && RII && LII->getIntrinsicID() == RII->getIntrinsicID() && 339 LII->isCommutative() && LII->getNumArgOperands() == 2) { 340 return LII->getArgOperand(0) == RII->getArgOperand(1) && 341 LII->getArgOperand(1) == RII->getArgOperand(0); 342 } 343 344 // Min/max can occur with commuted operands, non-canonical predicates, 345 // and/or non-canonical operands. 346 // Selects can be non-trivially equivalent via inverted conditions and swaps. 347 SelectPatternFlavor LSPF, RSPF; 348 Value *CondL, *CondR, *LHSA, *RHSA, *LHSB, *RHSB; 349 if (matchSelectWithOptionalNotCond(LHSI, CondL, LHSA, LHSB, LSPF) && 350 matchSelectWithOptionalNotCond(RHSI, CondR, RHSA, RHSB, RSPF)) { 351 if (LSPF == RSPF) { 352 // TODO: We should also detect FP min/max. 353 if (LSPF == SPF_SMIN || LSPF == SPF_SMAX || 354 LSPF == SPF_UMIN || LSPF == SPF_UMAX) 355 return ((LHSA == RHSA && LHSB == RHSB) || 356 (LHSA == RHSB && LHSB == RHSA)); 357 358 // select Cond, A, B <--> select not(Cond), B, A 359 if (CondL == CondR && LHSA == RHSA && LHSB == RHSB) 360 return true; 361 } 362 363 // If the true/false operands are swapped and the conditions are compares 364 // with inverted predicates, the selects are equal: 365 // select (icmp Pred, X, Y), A, B <--> select (icmp InvPred, X, Y), B, A 366 // 367 // This also handles patterns with a double-negation in the sense of not + 368 // inverse, because we looked through a 'not' in the matching function and 369 // swapped A/B: 370 // select (cmp Pred, X, Y), A, B <--> select (not (cmp InvPred, X, Y)), B, A 371 // 372 // This intentionally does NOT handle patterns with a double-negation in 373 // the sense of not + not, because doing so could result in values 374 // comparing 375 // as equal that hash differently in the min/max cases like: 376 // select (cmp slt, X, Y), X, Y <--> select (not (not (cmp slt, X, Y))), X, Y 377 // ^ hashes as min ^ would not hash as min 378 // In the context of the EarlyCSE pass, however, such cases never reach 379 // this code, as we simplify the double-negation before hashing the second 380 // select (and so still succeed at CSEing them). 381 if (LHSA == RHSB && LHSB == RHSA) { 382 CmpInst::Predicate PredL, PredR; 383 Value *X, *Y; 384 if (match(CondL, m_Cmp(PredL, m_Value(X), m_Value(Y))) && 385 match(CondR, m_Cmp(PredR, m_Specific(X), m_Specific(Y))) && 386 CmpInst::getInversePredicate(PredL) == PredR) 387 return true; 388 } 389 } 390 391 return false; 392 } 393 394 bool DenseMapInfo<SimpleValue>::isEqual(SimpleValue LHS, SimpleValue RHS) { 395 // These comparisons are nontrivial, so assert that equality implies 396 // hash equality (DenseMap demands this as an invariant). 397 bool Result = isEqualImpl(LHS, RHS); 398 assert(!Result || (LHS.isSentinel() && LHS.Inst == RHS.Inst) || 399 getHashValueImpl(LHS) == getHashValueImpl(RHS)); 400 return Result; 401 } 402 403 //===----------------------------------------------------------------------===// 404 // CallValue 405 //===----------------------------------------------------------------------===// 406 407 namespace { 408 409 /// Struct representing the available call values in the scoped hash 410 /// table. 411 struct CallValue { 412 Instruction *Inst; 413 414 CallValue(Instruction *I) : Inst(I) { 415 assert((isSentinel() || canHandle(I)) && "Inst can't be handled!"); 416 } 417 418 bool isSentinel() const { 419 return Inst == DenseMapInfo<Instruction *>::getEmptyKey() || 420 Inst == DenseMapInfo<Instruction *>::getTombstoneKey(); 421 } 422 423 static bool canHandle(Instruction *Inst) { 424 // Don't value number anything that returns void. 425 if (Inst->getType()->isVoidTy()) 426 return false; 427 428 CallInst *CI = dyn_cast<CallInst>(Inst); 429 if (!CI || !CI->onlyReadsMemory()) 430 return false; 431 return true; 432 } 433 }; 434 435 } // end anonymous namespace 436 437 namespace llvm { 438 439 template <> struct DenseMapInfo<CallValue> { 440 static inline CallValue getEmptyKey() { 441 return DenseMapInfo<Instruction *>::getEmptyKey(); 442 } 443 444 static inline CallValue getTombstoneKey() { 445 return DenseMapInfo<Instruction *>::getTombstoneKey(); 446 } 447 448 static unsigned getHashValue(CallValue Val); 449 static bool isEqual(CallValue LHS, CallValue RHS); 450 }; 451 452 } // end namespace llvm 453 454 unsigned DenseMapInfo<CallValue>::getHashValue(CallValue Val) { 455 Instruction *Inst = Val.Inst; 456 457 // gc.relocate is 'special' call: its second and third operands are 458 // not real values, but indices into statepoint's argument list. 459 // Get values they point to. 460 if (const GCRelocateInst *GCR = dyn_cast<GCRelocateInst>(Inst)) 461 return hash_combine(GCR->getOpcode(), GCR->getOperand(0), 462 GCR->getBasePtr(), GCR->getDerivedPtr()); 463 464 // Hash all of the operands as pointers and mix in the opcode. 465 return hash_combine( 466 Inst->getOpcode(), 467 hash_combine_range(Inst->value_op_begin(), Inst->value_op_end())); 468 } 469 470 bool DenseMapInfo<CallValue>::isEqual(CallValue LHS, CallValue RHS) { 471 Instruction *LHSI = LHS.Inst, *RHSI = RHS.Inst; 472 if (LHS.isSentinel() || RHS.isSentinel()) 473 return LHSI == RHSI; 474 475 // See comment above in `getHashValue()`. 476 if (const GCRelocateInst *GCR1 = dyn_cast<GCRelocateInst>(LHSI)) 477 if (const GCRelocateInst *GCR2 = dyn_cast<GCRelocateInst>(RHSI)) 478 return GCR1->getOperand(0) == GCR2->getOperand(0) && 479 GCR1->getBasePtr() == GCR2->getBasePtr() && 480 GCR1->getDerivedPtr() == GCR2->getDerivedPtr(); 481 482 return LHSI->isIdenticalTo(RHSI); 483 } 484 485 //===----------------------------------------------------------------------===// 486 // EarlyCSE implementation 487 //===----------------------------------------------------------------------===// 488 489 namespace { 490 491 /// A simple and fast domtree-based CSE pass. 492 /// 493 /// This pass does a simple depth-first walk over the dominator tree, 494 /// eliminating trivially redundant instructions and using instsimplify to 495 /// canonicalize things as it goes. It is intended to be fast and catch obvious 496 /// cases so that instcombine and other passes are more effective. It is 497 /// expected that a later pass of GVN will catch the interesting/hard cases. 498 class EarlyCSE { 499 public: 500 const TargetLibraryInfo &TLI; 501 const TargetTransformInfo &TTI; 502 DominatorTree &DT; 503 AssumptionCache &AC; 504 const SimplifyQuery SQ; 505 MemorySSA *MSSA; 506 std::unique_ptr<MemorySSAUpdater> MSSAUpdater; 507 508 using AllocatorTy = 509 RecyclingAllocator<BumpPtrAllocator, 510 ScopedHashTableVal<SimpleValue, Value *>>; 511 using ScopedHTType = 512 ScopedHashTable<SimpleValue, Value *, DenseMapInfo<SimpleValue>, 513 AllocatorTy>; 514 515 /// A scoped hash table of the current values of all of our simple 516 /// scalar expressions. 517 /// 518 /// As we walk down the domtree, we look to see if instructions are in this: 519 /// if so, we replace them with what we find, otherwise we insert them so 520 /// that dominated values can succeed in their lookup. 521 ScopedHTType AvailableValues; 522 523 /// A scoped hash table of the current values of previously encountered 524 /// memory locations. 525 /// 526 /// This allows us to get efficient access to dominating loads or stores when 527 /// we have a fully redundant load. In addition to the most recent load, we 528 /// keep track of a generation count of the read, which is compared against 529 /// the current generation count. The current generation count is incremented 530 /// after every possibly writing memory operation, which ensures that we only 531 /// CSE loads with other loads that have no intervening store. Ordering 532 /// events (such as fences or atomic instructions) increment the generation 533 /// count as well; essentially, we model these as writes to all possible 534 /// locations. Note that atomic and/or volatile loads and stores can be 535 /// present the table; it is the responsibility of the consumer to inspect 536 /// the atomicity/volatility if needed. 537 struct LoadValue { 538 Instruction *DefInst = nullptr; 539 unsigned Generation = 0; 540 int MatchingId = -1; 541 bool IsAtomic = false; 542 543 LoadValue() = default; 544 LoadValue(Instruction *Inst, unsigned Generation, unsigned MatchingId, 545 bool IsAtomic) 546 : DefInst(Inst), Generation(Generation), MatchingId(MatchingId), 547 IsAtomic(IsAtomic) {} 548 }; 549 550 using LoadMapAllocator = 551 RecyclingAllocator<BumpPtrAllocator, 552 ScopedHashTableVal<Value *, LoadValue>>; 553 using LoadHTType = 554 ScopedHashTable<Value *, LoadValue, DenseMapInfo<Value *>, 555 LoadMapAllocator>; 556 557 LoadHTType AvailableLoads; 558 559 // A scoped hash table mapping memory locations (represented as typed 560 // addresses) to generation numbers at which that memory location became 561 // (henceforth indefinitely) invariant. 562 using InvariantMapAllocator = 563 RecyclingAllocator<BumpPtrAllocator, 564 ScopedHashTableVal<MemoryLocation, unsigned>>; 565 using InvariantHTType = 566 ScopedHashTable<MemoryLocation, unsigned, DenseMapInfo<MemoryLocation>, 567 InvariantMapAllocator>; 568 InvariantHTType AvailableInvariants; 569 570 /// A scoped hash table of the current values of read-only call 571 /// values. 572 /// 573 /// It uses the same generation count as loads. 574 using CallHTType = 575 ScopedHashTable<CallValue, std::pair<Instruction *, unsigned>>; 576 CallHTType AvailableCalls; 577 578 /// This is the current generation of the memory value. 579 unsigned CurrentGeneration = 0; 580 581 /// Set up the EarlyCSE runner for a particular function. 582 EarlyCSE(const DataLayout &DL, const TargetLibraryInfo &TLI, 583 const TargetTransformInfo &TTI, DominatorTree &DT, 584 AssumptionCache &AC, MemorySSA *MSSA) 585 : TLI(TLI), TTI(TTI), DT(DT), AC(AC), SQ(DL, &TLI, &DT, &AC), MSSA(MSSA), 586 MSSAUpdater(std::make_unique<MemorySSAUpdater>(MSSA)) {} 587 588 bool run(); 589 590 private: 591 unsigned ClobberCounter = 0; 592 // Almost a POD, but needs to call the constructors for the scoped hash 593 // tables so that a new scope gets pushed on. These are RAII so that the 594 // scope gets popped when the NodeScope is destroyed. 595 class NodeScope { 596 public: 597 NodeScope(ScopedHTType &AvailableValues, LoadHTType &AvailableLoads, 598 InvariantHTType &AvailableInvariants, CallHTType &AvailableCalls) 599 : Scope(AvailableValues), LoadScope(AvailableLoads), 600 InvariantScope(AvailableInvariants), CallScope(AvailableCalls) {} 601 NodeScope(const NodeScope &) = delete; 602 NodeScope &operator=(const NodeScope &) = delete; 603 604 private: 605 ScopedHTType::ScopeTy Scope; 606 LoadHTType::ScopeTy LoadScope; 607 InvariantHTType::ScopeTy InvariantScope; 608 CallHTType::ScopeTy CallScope; 609 }; 610 611 // Contains all the needed information to create a stack for doing a depth 612 // first traversal of the tree. This includes scopes for values, loads, and 613 // calls as well as the generation. There is a child iterator so that the 614 // children do not need to be store separately. 615 class StackNode { 616 public: 617 StackNode(ScopedHTType &AvailableValues, LoadHTType &AvailableLoads, 618 InvariantHTType &AvailableInvariants, CallHTType &AvailableCalls, 619 unsigned cg, DomTreeNode *n, DomTreeNode::const_iterator child, 620 DomTreeNode::const_iterator end) 621 : CurrentGeneration(cg), ChildGeneration(cg), Node(n), ChildIter(child), 622 EndIter(end), 623 Scopes(AvailableValues, AvailableLoads, AvailableInvariants, 624 AvailableCalls) 625 {} 626 StackNode(const StackNode &) = delete; 627 StackNode &operator=(const StackNode &) = delete; 628 629 // Accessors. 630 unsigned currentGeneration() const { return CurrentGeneration; } 631 unsigned childGeneration() const { return ChildGeneration; } 632 void childGeneration(unsigned generation) { ChildGeneration = generation; } 633 DomTreeNode *node() { return Node; } 634 DomTreeNode::const_iterator childIter() const { return ChildIter; } 635 636 DomTreeNode *nextChild() { 637 DomTreeNode *child = *ChildIter; 638 ++ChildIter; 639 return child; 640 } 641 642 DomTreeNode::const_iterator end() const { return EndIter; } 643 bool isProcessed() const { return Processed; } 644 void process() { Processed = true; } 645 646 private: 647 unsigned CurrentGeneration; 648 unsigned ChildGeneration; 649 DomTreeNode *Node; 650 DomTreeNode::const_iterator ChildIter; 651 DomTreeNode::const_iterator EndIter; 652 NodeScope Scopes; 653 bool Processed = false; 654 }; 655 656 /// Wrapper class to handle memory instructions, including loads, 657 /// stores and intrinsic loads and stores defined by the target. 658 class ParseMemoryInst { 659 public: 660 ParseMemoryInst(Instruction *Inst, const TargetTransformInfo &TTI) 661 : Inst(Inst) { 662 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { 663 IntrID = II->getIntrinsicID(); 664 if (TTI.getTgtMemIntrinsic(II, Info)) 665 return; 666 if (isHandledNonTargetIntrinsic(IntrID)) { 667 switch (IntrID) { 668 case Intrinsic::masked_load: 669 Info.PtrVal = Inst->getOperand(0); 670 Info.MatchingId = Intrinsic::masked_load; 671 Info.ReadMem = true; 672 Info.WriteMem = false; 673 Info.IsVolatile = false; 674 break; 675 case Intrinsic::masked_store: 676 Info.PtrVal = Inst->getOperand(1); 677 // Use the ID of masked load as the "matching id". This will 678 // prevent matching non-masked loads/stores with masked ones 679 // (which could be done), but at the moment, the code here 680 // does not support matching intrinsics with non-intrinsics, 681 // so keep the MatchingIds specific to masked instructions 682 // for now (TODO). 683 Info.MatchingId = Intrinsic::masked_load; 684 Info.ReadMem = false; 685 Info.WriteMem = true; 686 Info.IsVolatile = false; 687 break; 688 } 689 } 690 } 691 } 692 693 Instruction *get() { return Inst; } 694 const Instruction *get() const { return Inst; } 695 696 bool isLoad() const { 697 if (IntrID != 0) 698 return Info.ReadMem; 699 return isa<LoadInst>(Inst); 700 } 701 702 bool isStore() const { 703 if (IntrID != 0) 704 return Info.WriteMem; 705 return isa<StoreInst>(Inst); 706 } 707 708 bool isAtomic() const { 709 if (IntrID != 0) 710 return Info.Ordering != AtomicOrdering::NotAtomic; 711 return Inst->isAtomic(); 712 } 713 714 bool isUnordered() const { 715 if (IntrID != 0) 716 return Info.isUnordered(); 717 718 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) { 719 return LI->isUnordered(); 720 } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 721 return SI->isUnordered(); 722 } 723 // Conservative answer 724 return !Inst->isAtomic(); 725 } 726 727 bool isVolatile() const { 728 if (IntrID != 0) 729 return Info.IsVolatile; 730 731 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) { 732 return LI->isVolatile(); 733 } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 734 return SI->isVolatile(); 735 } 736 // Conservative answer 737 return true; 738 } 739 740 bool isInvariantLoad() const { 741 if (auto *LI = dyn_cast<LoadInst>(Inst)) 742 return LI->hasMetadata(LLVMContext::MD_invariant_load); 743 return false; 744 } 745 746 bool isValid() const { return getPointerOperand() != nullptr; } 747 748 // For regular (non-intrinsic) loads/stores, this is set to -1. For 749 // intrinsic loads/stores, the id is retrieved from the corresponding 750 // field in the MemIntrinsicInfo structure. That field contains 751 // non-negative values only. 752 int getMatchingId() const { 753 if (IntrID != 0) 754 return Info.MatchingId; 755 return -1; 756 } 757 758 Value *getPointerOperand() const { 759 if (IntrID != 0) 760 return Info.PtrVal; 761 return getLoadStorePointerOperand(Inst); 762 } 763 764 bool mayReadFromMemory() const { 765 if (IntrID != 0) 766 return Info.ReadMem; 767 return Inst->mayReadFromMemory(); 768 } 769 770 bool mayWriteToMemory() const { 771 if (IntrID != 0) 772 return Info.WriteMem; 773 return Inst->mayWriteToMemory(); 774 } 775 776 private: 777 Intrinsic::ID IntrID = 0; 778 MemIntrinsicInfo Info; 779 Instruction *Inst; 780 }; 781 782 // This function is to prevent accidentally passing a non-target 783 // intrinsic ID to TargetTransformInfo. 784 static bool isHandledNonTargetIntrinsic(Intrinsic::ID ID) { 785 switch (ID) { 786 case Intrinsic::masked_load: 787 case Intrinsic::masked_store: 788 return true; 789 } 790 return false; 791 } 792 static bool isHandledNonTargetIntrinsic(const Value *V) { 793 if (auto *II = dyn_cast<IntrinsicInst>(V)) 794 return isHandledNonTargetIntrinsic(II->getIntrinsicID()); 795 return false; 796 } 797 798 bool processNode(DomTreeNode *Node); 799 800 bool handleBranchCondition(Instruction *CondInst, const BranchInst *BI, 801 const BasicBlock *BB, const BasicBlock *Pred); 802 803 Value *getMatchingValue(LoadValue &InVal, ParseMemoryInst &MemInst, 804 unsigned CurrentGeneration); 805 806 bool overridingStores(const ParseMemoryInst &Earlier, 807 const ParseMemoryInst &Later); 808 809 Value *getOrCreateResult(Value *Inst, Type *ExpectedType) const { 810 if (auto *LI = dyn_cast<LoadInst>(Inst)) 811 return LI; 812 if (auto *SI = dyn_cast<StoreInst>(Inst)) 813 return SI->getValueOperand(); 814 assert(isa<IntrinsicInst>(Inst) && "Instruction not supported"); 815 auto *II = cast<IntrinsicInst>(Inst); 816 if (isHandledNonTargetIntrinsic(II->getIntrinsicID())) 817 return getOrCreateResultNonTargetMemIntrinsic(II, ExpectedType); 818 return TTI.getOrCreateResultFromMemIntrinsic(II, ExpectedType); 819 } 820 821 Value *getOrCreateResultNonTargetMemIntrinsic(IntrinsicInst *II, 822 Type *ExpectedType) const { 823 switch (II->getIntrinsicID()) { 824 case Intrinsic::masked_load: 825 return II; 826 case Intrinsic::masked_store: 827 return II->getOperand(0); 828 } 829 return nullptr; 830 } 831 832 /// Return true if the instruction is known to only operate on memory 833 /// provably invariant in the given "generation". 834 bool isOperatingOnInvariantMemAt(Instruction *I, unsigned GenAt); 835 836 bool isSameMemGeneration(unsigned EarlierGeneration, unsigned LaterGeneration, 837 Instruction *EarlierInst, Instruction *LaterInst); 838 839 bool isNonTargetIntrinsicMatch(const IntrinsicInst *Earlier, 840 const IntrinsicInst *Later) { 841 auto IsSubmask = [](const Value *Mask0, const Value *Mask1) { 842 // Is Mask0 a submask of Mask1? 843 if (Mask0 == Mask1) 844 return true; 845 if (isa<UndefValue>(Mask0) || isa<UndefValue>(Mask1)) 846 return false; 847 auto *Vec0 = dyn_cast<ConstantVector>(Mask0); 848 auto *Vec1 = dyn_cast<ConstantVector>(Mask1); 849 if (!Vec0 || !Vec1) 850 return false; 851 assert(Vec0->getType() == Vec1->getType() && 852 "Masks should have the same type"); 853 for (int i = 0, e = Vec0->getNumOperands(); i != e; ++i) { 854 Constant *Elem0 = Vec0->getOperand(i); 855 Constant *Elem1 = Vec1->getOperand(i); 856 auto *Int0 = dyn_cast<ConstantInt>(Elem0); 857 if (Int0 && Int0->isZero()) 858 continue; 859 auto *Int1 = dyn_cast<ConstantInt>(Elem1); 860 if (Int1 && !Int1->isZero()) 861 continue; 862 if (isa<UndefValue>(Elem0) || isa<UndefValue>(Elem1)) 863 return false; 864 if (Elem0 == Elem1) 865 continue; 866 return false; 867 } 868 return true; 869 }; 870 auto PtrOp = [](const IntrinsicInst *II) { 871 if (II->getIntrinsicID() == Intrinsic::masked_load) 872 return II->getOperand(0); 873 if (II->getIntrinsicID() == Intrinsic::masked_store) 874 return II->getOperand(1); 875 llvm_unreachable("Unexpected IntrinsicInst"); 876 }; 877 auto MaskOp = [](const IntrinsicInst *II) { 878 if (II->getIntrinsicID() == Intrinsic::masked_load) 879 return II->getOperand(2); 880 if (II->getIntrinsicID() == Intrinsic::masked_store) 881 return II->getOperand(3); 882 llvm_unreachable("Unexpected IntrinsicInst"); 883 }; 884 auto ThruOp = [](const IntrinsicInst *II) { 885 if (II->getIntrinsicID() == Intrinsic::masked_load) 886 return II->getOperand(3); 887 llvm_unreachable("Unexpected IntrinsicInst"); 888 }; 889 890 if (PtrOp(Earlier) != PtrOp(Later)) 891 return false; 892 893 Intrinsic::ID IDE = Earlier->getIntrinsicID(); 894 Intrinsic::ID IDL = Later->getIntrinsicID(); 895 // We could really use specific intrinsic classes for masked loads 896 // and stores in IntrinsicInst.h. 897 if (IDE == Intrinsic::masked_load && IDL == Intrinsic::masked_load) { 898 // Trying to replace later masked load with the earlier one. 899 // Check that the pointers are the same, and 900 // - masks and pass-throughs are the same, or 901 // - replacee's pass-through is "undef" and replacer's mask is a 902 // super-set of the replacee's mask. 903 if (MaskOp(Earlier) == MaskOp(Later) && ThruOp(Earlier) == ThruOp(Later)) 904 return true; 905 if (!isa<UndefValue>(ThruOp(Later))) 906 return false; 907 return IsSubmask(MaskOp(Later), MaskOp(Earlier)); 908 } 909 if (IDE == Intrinsic::masked_store && IDL == Intrinsic::masked_load) { 910 // Trying to replace a load of a stored value with the store's value. 911 // Check that the pointers are the same, and 912 // - load's mask is a subset of store's mask, and 913 // - load's pass-through is "undef". 914 if (!IsSubmask(MaskOp(Later), MaskOp(Earlier))) 915 return false; 916 return isa<UndefValue>(ThruOp(Later)); 917 } 918 if (IDE == Intrinsic::masked_load && IDL == Intrinsic::masked_store) { 919 // Trying to remove a store of the loaded value. 920 // Check that the pointers are the same, and 921 // - store's mask is a subset of the load's mask. 922 return IsSubmask(MaskOp(Later), MaskOp(Earlier)); 923 } 924 if (IDE == Intrinsic::masked_store && IDL == Intrinsic::masked_store) { 925 // Trying to remove a dead store (earlier). 926 // Check that the pointers are the same, 927 // - the to-be-removed store's mask is a subset of the other store's 928 // mask. 929 return IsSubmask(MaskOp(Earlier), MaskOp(Later)); 930 } 931 return false; 932 } 933 934 void removeMSSA(Instruction &Inst) { 935 if (!MSSA) 936 return; 937 if (VerifyMemorySSA) 938 MSSA->verifyMemorySSA(); 939 // Removing a store here can leave MemorySSA in an unoptimized state by 940 // creating MemoryPhis that have identical arguments and by creating 941 // MemoryUses whose defining access is not an actual clobber. The phi case 942 // is handled by MemorySSA when passing OptimizePhis = true to 943 // removeMemoryAccess. The non-optimized MemoryUse case is lazily updated 944 // by MemorySSA's getClobberingMemoryAccess. 945 MSSAUpdater->removeMemoryAccess(&Inst, true); 946 } 947 }; 948 949 } // end anonymous namespace 950 951 /// Determine if the memory referenced by LaterInst is from the same heap 952 /// version as EarlierInst. 953 /// This is currently called in two scenarios: 954 /// 955 /// load p 956 /// ... 957 /// load p 958 /// 959 /// and 960 /// 961 /// x = load p 962 /// ... 963 /// store x, p 964 /// 965 /// in both cases we want to verify that there are no possible writes to the 966 /// memory referenced by p between the earlier and later instruction. 967 bool EarlyCSE::isSameMemGeneration(unsigned EarlierGeneration, 968 unsigned LaterGeneration, 969 Instruction *EarlierInst, 970 Instruction *LaterInst) { 971 // Check the simple memory generation tracking first. 972 if (EarlierGeneration == LaterGeneration) 973 return true; 974 975 if (!MSSA) 976 return false; 977 978 // If MemorySSA has determined that one of EarlierInst or LaterInst does not 979 // read/write memory, then we can safely return true here. 980 // FIXME: We could be more aggressive when checking doesNotAccessMemory(), 981 // onlyReadsMemory(), mayReadFromMemory(), and mayWriteToMemory() in this pass 982 // by also checking the MemorySSA MemoryAccess on the instruction. Initial 983 // experiments suggest this isn't worthwhile, at least for C/C++ code compiled 984 // with the default optimization pipeline. 985 auto *EarlierMA = MSSA->getMemoryAccess(EarlierInst); 986 if (!EarlierMA) 987 return true; 988 auto *LaterMA = MSSA->getMemoryAccess(LaterInst); 989 if (!LaterMA) 990 return true; 991 992 // Since we know LaterDef dominates LaterInst and EarlierInst dominates 993 // LaterInst, if LaterDef dominates EarlierInst then it can't occur between 994 // EarlierInst and LaterInst and neither can any other write that potentially 995 // clobbers LaterInst. 996 MemoryAccess *LaterDef; 997 if (ClobberCounter < EarlyCSEMssaOptCap) { 998 LaterDef = MSSA->getWalker()->getClobberingMemoryAccess(LaterInst); 999 ClobberCounter++; 1000 } else 1001 LaterDef = LaterMA->getDefiningAccess(); 1002 1003 return MSSA->dominates(LaterDef, EarlierMA); 1004 } 1005 1006 bool EarlyCSE::isOperatingOnInvariantMemAt(Instruction *I, unsigned GenAt) { 1007 // A location loaded from with an invariant_load is assumed to *never* change 1008 // within the visible scope of the compilation. 1009 if (auto *LI = dyn_cast<LoadInst>(I)) 1010 if (LI->hasMetadata(LLVMContext::MD_invariant_load)) 1011 return true; 1012 1013 auto MemLocOpt = MemoryLocation::getOrNone(I); 1014 if (!MemLocOpt) 1015 // "target" intrinsic forms of loads aren't currently known to 1016 // MemoryLocation::get. TODO 1017 return false; 1018 MemoryLocation MemLoc = *MemLocOpt; 1019 if (!AvailableInvariants.count(MemLoc)) 1020 return false; 1021 1022 // Is the generation at which this became invariant older than the 1023 // current one? 1024 return AvailableInvariants.lookup(MemLoc) <= GenAt; 1025 } 1026 1027 bool EarlyCSE::handleBranchCondition(Instruction *CondInst, 1028 const BranchInst *BI, const BasicBlock *BB, 1029 const BasicBlock *Pred) { 1030 assert(BI->isConditional() && "Should be a conditional branch!"); 1031 assert(BI->getCondition() == CondInst && "Wrong condition?"); 1032 assert(BI->getSuccessor(0) == BB || BI->getSuccessor(1) == BB); 1033 auto *TorF = (BI->getSuccessor(0) == BB) 1034 ? ConstantInt::getTrue(BB->getContext()) 1035 : ConstantInt::getFalse(BB->getContext()); 1036 auto MatchBinOp = [](Instruction *I, unsigned Opcode, Value *&LHS, 1037 Value *&RHS) { 1038 if (Opcode == Instruction::And && 1039 match(I, m_LogicalAnd(m_Value(LHS), m_Value(RHS)))) 1040 return true; 1041 else if (Opcode == Instruction::Or && 1042 match(I, m_LogicalOr(m_Value(LHS), m_Value(RHS)))) 1043 return true; 1044 return false; 1045 }; 1046 // If the condition is AND operation, we can propagate its operands into the 1047 // true branch. If it is OR operation, we can propagate them into the false 1048 // branch. 1049 unsigned PropagateOpcode = 1050 (BI->getSuccessor(0) == BB) ? Instruction::And : Instruction::Or; 1051 1052 bool MadeChanges = false; 1053 SmallVector<Instruction *, 4> WorkList; 1054 SmallPtrSet<Instruction *, 4> Visited; 1055 WorkList.push_back(CondInst); 1056 while (!WorkList.empty()) { 1057 Instruction *Curr = WorkList.pop_back_val(); 1058 1059 AvailableValues.insert(Curr, TorF); 1060 LLVM_DEBUG(dbgs() << "EarlyCSE CVP: Add conditional value for '" 1061 << Curr->getName() << "' as " << *TorF << " in " 1062 << BB->getName() << "\n"); 1063 if (!DebugCounter::shouldExecute(CSECounter)) { 1064 LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n"); 1065 } else { 1066 // Replace all dominated uses with the known value. 1067 if (unsigned Count = replaceDominatedUsesWith(Curr, TorF, DT, 1068 BasicBlockEdge(Pred, BB))) { 1069 NumCSECVP += Count; 1070 MadeChanges = true; 1071 } 1072 } 1073 1074 Value *LHS, *RHS; 1075 if (MatchBinOp(Curr, PropagateOpcode, LHS, RHS)) 1076 for (auto &Op : { LHS, RHS }) 1077 if (Instruction *OPI = dyn_cast<Instruction>(Op)) 1078 if (SimpleValue::canHandle(OPI) && Visited.insert(OPI).second) 1079 WorkList.push_back(OPI); 1080 } 1081 1082 return MadeChanges; 1083 } 1084 1085 Value *EarlyCSE::getMatchingValue(LoadValue &InVal, ParseMemoryInst &MemInst, 1086 unsigned CurrentGeneration) { 1087 if (InVal.DefInst == nullptr) 1088 return nullptr; 1089 if (InVal.MatchingId != MemInst.getMatchingId()) 1090 return nullptr; 1091 // We don't yet handle removing loads with ordering of any kind. 1092 if (MemInst.isVolatile() || !MemInst.isUnordered()) 1093 return nullptr; 1094 // We can't replace an atomic load with one which isn't also atomic. 1095 if (MemInst.isLoad() && !InVal.IsAtomic && MemInst.isAtomic()) 1096 return nullptr; 1097 // The value V returned from this function is used differently depending 1098 // on whether MemInst is a load or a store. If it's a load, we will replace 1099 // MemInst with V, if it's a store, we will check if V is the same as the 1100 // available value. 1101 bool MemInstMatching = !MemInst.isLoad(); 1102 Instruction *Matching = MemInstMatching ? MemInst.get() : InVal.DefInst; 1103 Instruction *Other = MemInstMatching ? InVal.DefInst : MemInst.get(); 1104 1105 // For stores check the result values before checking memory generation 1106 // (otherwise isSameMemGeneration may crash). 1107 Value *Result = MemInst.isStore() 1108 ? getOrCreateResult(Matching, Other->getType()) 1109 : nullptr; 1110 if (MemInst.isStore() && InVal.DefInst != Result) 1111 return nullptr; 1112 1113 // Deal with non-target memory intrinsics. 1114 bool MatchingNTI = isHandledNonTargetIntrinsic(Matching); 1115 bool OtherNTI = isHandledNonTargetIntrinsic(Other); 1116 if (OtherNTI != MatchingNTI) 1117 return nullptr; 1118 if (OtherNTI && MatchingNTI) { 1119 if (!isNonTargetIntrinsicMatch(cast<IntrinsicInst>(InVal.DefInst), 1120 cast<IntrinsicInst>(MemInst.get()))) 1121 return nullptr; 1122 } 1123 1124 if (!isOperatingOnInvariantMemAt(MemInst.get(), InVal.Generation) && 1125 !isSameMemGeneration(InVal.Generation, CurrentGeneration, InVal.DefInst, 1126 MemInst.get())) 1127 return nullptr; 1128 1129 if (!Result) 1130 Result = getOrCreateResult(Matching, Other->getType()); 1131 return Result; 1132 } 1133 1134 bool EarlyCSE::overridingStores(const ParseMemoryInst &Earlier, 1135 const ParseMemoryInst &Later) { 1136 // Can we remove Earlier store because of Later store? 1137 1138 assert(Earlier.isUnordered() && !Earlier.isVolatile() && 1139 "Violated invariant"); 1140 if (Earlier.getPointerOperand() != Later.getPointerOperand()) 1141 return false; 1142 if (Earlier.getMatchingId() != Later.getMatchingId()) 1143 return false; 1144 // At the moment, we don't remove ordered stores, but do remove 1145 // unordered atomic stores. There's no special requirement (for 1146 // unordered atomics) about removing atomic stores only in favor of 1147 // other atomic stores since we were going to execute the non-atomic 1148 // one anyway and the atomic one might never have become visible. 1149 if (!Earlier.isUnordered() || !Later.isUnordered()) 1150 return false; 1151 1152 // Deal with non-target memory intrinsics. 1153 bool ENTI = isHandledNonTargetIntrinsic(Earlier.get()); 1154 bool LNTI = isHandledNonTargetIntrinsic(Later.get()); 1155 if (ENTI && LNTI) 1156 return isNonTargetIntrinsicMatch(cast<IntrinsicInst>(Earlier.get()), 1157 cast<IntrinsicInst>(Later.get())); 1158 1159 // Because of the check above, at least one of them is false. 1160 // For now disallow matching intrinsics with non-intrinsics, 1161 // so assume that the stores match if neither is an intrinsic. 1162 return ENTI == LNTI; 1163 } 1164 1165 bool EarlyCSE::processNode(DomTreeNode *Node) { 1166 bool Changed = false; 1167 BasicBlock *BB = Node->getBlock(); 1168 1169 // If this block has a single predecessor, then the predecessor is the parent 1170 // of the domtree node and all of the live out memory values are still current 1171 // in this block. If this block has multiple predecessors, then they could 1172 // have invalidated the live-out memory values of our parent value. For now, 1173 // just be conservative and invalidate memory if this block has multiple 1174 // predecessors. 1175 if (!BB->getSinglePredecessor()) 1176 ++CurrentGeneration; 1177 1178 // If this node has a single predecessor which ends in a conditional branch, 1179 // we can infer the value of the branch condition given that we took this 1180 // path. We need the single predecessor to ensure there's not another path 1181 // which reaches this block where the condition might hold a different 1182 // value. Since we're adding this to the scoped hash table (like any other 1183 // def), it will have been popped if we encounter a future merge block. 1184 if (BasicBlock *Pred = BB->getSinglePredecessor()) { 1185 auto *BI = dyn_cast<BranchInst>(Pred->getTerminator()); 1186 if (BI && BI->isConditional()) { 1187 auto *CondInst = dyn_cast<Instruction>(BI->getCondition()); 1188 if (CondInst && SimpleValue::canHandle(CondInst)) 1189 Changed |= handleBranchCondition(CondInst, BI, BB, Pred); 1190 } 1191 } 1192 1193 /// LastStore - Keep track of the last non-volatile store that we saw... for 1194 /// as long as there in no instruction that reads memory. If we see a store 1195 /// to the same location, we delete the dead store. This zaps trivial dead 1196 /// stores which can occur in bitfield code among other things. 1197 Instruction *LastStore = nullptr; 1198 1199 // See if any instructions in the block can be eliminated. If so, do it. If 1200 // not, add them to AvailableValues. 1201 for (Instruction &Inst : make_early_inc_range(BB->getInstList())) { 1202 // Dead instructions should just be removed. 1203 if (isInstructionTriviallyDead(&Inst, &TLI)) { 1204 LLVM_DEBUG(dbgs() << "EarlyCSE DCE: " << Inst << '\n'); 1205 if (!DebugCounter::shouldExecute(CSECounter)) { 1206 LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n"); 1207 continue; 1208 } 1209 1210 salvageKnowledge(&Inst, &AC); 1211 salvageDebugInfo(Inst); 1212 removeMSSA(Inst); 1213 Inst.eraseFromParent(); 1214 Changed = true; 1215 ++NumSimplify; 1216 continue; 1217 } 1218 1219 // Skip assume intrinsics, they don't really have side effects (although 1220 // they're marked as such to ensure preservation of control dependencies), 1221 // and this pass will not bother with its removal. However, we should mark 1222 // its condition as true for all dominated blocks. 1223 if (match(&Inst, m_Intrinsic<Intrinsic::assume>())) { 1224 auto *CondI = 1225 dyn_cast<Instruction>(cast<CallInst>(Inst).getArgOperand(0)); 1226 if (CondI && SimpleValue::canHandle(CondI)) { 1227 LLVM_DEBUG(dbgs() << "EarlyCSE considering assumption: " << Inst 1228 << '\n'); 1229 AvailableValues.insert(CondI, ConstantInt::getTrue(BB->getContext())); 1230 } else 1231 LLVM_DEBUG(dbgs() << "EarlyCSE skipping assumption: " << Inst << '\n'); 1232 continue; 1233 } 1234 1235 // Likewise, noalias intrinsics don't actually write. 1236 if (match(&Inst, 1237 m_Intrinsic<Intrinsic::experimental_noalias_scope_decl>())) { 1238 LLVM_DEBUG(dbgs() << "EarlyCSE skipping noalias intrinsic: " << Inst 1239 << '\n'); 1240 continue; 1241 } 1242 1243 // Skip sideeffect intrinsics, for the same reason as assume intrinsics. 1244 if (match(&Inst, m_Intrinsic<Intrinsic::sideeffect>())) { 1245 LLVM_DEBUG(dbgs() << "EarlyCSE skipping sideeffect: " << Inst << '\n'); 1246 continue; 1247 } 1248 1249 // We can skip all invariant.start intrinsics since they only read memory, 1250 // and we can forward values across it. For invariant starts without 1251 // invariant ends, we can use the fact that the invariantness never ends to 1252 // start a scope in the current generaton which is true for all future 1253 // generations. Also, we dont need to consume the last store since the 1254 // semantics of invariant.start allow us to perform DSE of the last 1255 // store, if there was a store following invariant.start. Consider: 1256 // 1257 // store 30, i8* p 1258 // invariant.start(p) 1259 // store 40, i8* p 1260 // We can DSE the store to 30, since the store 40 to invariant location p 1261 // causes undefined behaviour. 1262 if (match(&Inst, m_Intrinsic<Intrinsic::invariant_start>())) { 1263 // If there are any uses, the scope might end. 1264 if (!Inst.use_empty()) 1265 continue; 1266 MemoryLocation MemLoc = 1267 MemoryLocation::getForArgument(&cast<CallInst>(Inst), 1, TLI); 1268 // Don't start a scope if we already have a better one pushed 1269 if (!AvailableInvariants.count(MemLoc)) 1270 AvailableInvariants.insert(MemLoc, CurrentGeneration); 1271 continue; 1272 } 1273 1274 if (isGuard(&Inst)) { 1275 if (auto *CondI = 1276 dyn_cast<Instruction>(cast<CallInst>(Inst).getArgOperand(0))) { 1277 if (SimpleValue::canHandle(CondI)) { 1278 // Do we already know the actual value of this condition? 1279 if (auto *KnownCond = AvailableValues.lookup(CondI)) { 1280 // Is the condition known to be true? 1281 if (isa<ConstantInt>(KnownCond) && 1282 cast<ConstantInt>(KnownCond)->isOne()) { 1283 LLVM_DEBUG(dbgs() 1284 << "EarlyCSE removing guard: " << Inst << '\n'); 1285 salvageKnowledge(&Inst, &AC); 1286 removeMSSA(Inst); 1287 Inst.eraseFromParent(); 1288 Changed = true; 1289 continue; 1290 } else 1291 // Use the known value if it wasn't true. 1292 cast<CallInst>(Inst).setArgOperand(0, KnownCond); 1293 } 1294 // The condition we're on guarding here is true for all dominated 1295 // locations. 1296 AvailableValues.insert(CondI, ConstantInt::getTrue(BB->getContext())); 1297 } 1298 } 1299 1300 // Guard intrinsics read all memory, but don't write any memory. 1301 // Accordingly, don't update the generation but consume the last store (to 1302 // avoid an incorrect DSE). 1303 LastStore = nullptr; 1304 continue; 1305 } 1306 1307 // If the instruction can be simplified (e.g. X+0 = X) then replace it with 1308 // its simpler value. 1309 if (Value *V = SimplifyInstruction(&Inst, SQ)) { 1310 LLVM_DEBUG(dbgs() << "EarlyCSE Simplify: " << Inst << " to: " << *V 1311 << '\n'); 1312 if (!DebugCounter::shouldExecute(CSECounter)) { 1313 LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n"); 1314 } else { 1315 bool Killed = false; 1316 if (!Inst.use_empty()) { 1317 Inst.replaceAllUsesWith(V); 1318 Changed = true; 1319 } 1320 if (isInstructionTriviallyDead(&Inst, &TLI)) { 1321 salvageKnowledge(&Inst, &AC); 1322 removeMSSA(Inst); 1323 Inst.eraseFromParent(); 1324 Changed = true; 1325 Killed = true; 1326 } 1327 if (Changed) 1328 ++NumSimplify; 1329 if (Killed) 1330 continue; 1331 } 1332 } 1333 1334 // If this is a simple instruction that we can value number, process it. 1335 if (SimpleValue::canHandle(&Inst)) { 1336 // See if the instruction has an available value. If so, use it. 1337 if (Value *V = AvailableValues.lookup(&Inst)) { 1338 LLVM_DEBUG(dbgs() << "EarlyCSE CSE: " << Inst << " to: " << *V 1339 << '\n'); 1340 if (!DebugCounter::shouldExecute(CSECounter)) { 1341 LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n"); 1342 continue; 1343 } 1344 if (auto *I = dyn_cast<Instruction>(V)) 1345 I->andIRFlags(&Inst); 1346 Inst.replaceAllUsesWith(V); 1347 salvageKnowledge(&Inst, &AC); 1348 removeMSSA(Inst); 1349 Inst.eraseFromParent(); 1350 Changed = true; 1351 ++NumCSE; 1352 continue; 1353 } 1354 1355 // Otherwise, just remember that this value is available. 1356 AvailableValues.insert(&Inst, &Inst); 1357 continue; 1358 } 1359 1360 ParseMemoryInst MemInst(&Inst, TTI); 1361 // If this is a non-volatile load, process it. 1362 if (MemInst.isValid() && MemInst.isLoad()) { 1363 // (conservatively) we can't peak past the ordering implied by this 1364 // operation, but we can add this load to our set of available values 1365 if (MemInst.isVolatile() || !MemInst.isUnordered()) { 1366 LastStore = nullptr; 1367 ++CurrentGeneration; 1368 } 1369 1370 if (MemInst.isInvariantLoad()) { 1371 // If we pass an invariant load, we know that memory location is 1372 // indefinitely constant from the moment of first dereferenceability. 1373 // We conservatively treat the invariant_load as that moment. If we 1374 // pass a invariant load after already establishing a scope, don't 1375 // restart it since we want to preserve the earliest point seen. 1376 auto MemLoc = MemoryLocation::get(&Inst); 1377 if (!AvailableInvariants.count(MemLoc)) 1378 AvailableInvariants.insert(MemLoc, CurrentGeneration); 1379 } 1380 1381 // If we have an available version of this load, and if it is the right 1382 // generation or the load is known to be from an invariant location, 1383 // replace this instruction. 1384 // 1385 // If either the dominating load or the current load are invariant, then 1386 // we can assume the current load loads the same value as the dominating 1387 // load. 1388 LoadValue InVal = AvailableLoads.lookup(MemInst.getPointerOperand()); 1389 if (Value *Op = getMatchingValue(InVal, MemInst, CurrentGeneration)) { 1390 LLVM_DEBUG(dbgs() << "EarlyCSE CSE LOAD: " << Inst 1391 << " to: " << *InVal.DefInst << '\n'); 1392 if (!DebugCounter::shouldExecute(CSECounter)) { 1393 LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n"); 1394 continue; 1395 } 1396 if (!Inst.use_empty()) 1397 Inst.replaceAllUsesWith(Op); 1398 salvageKnowledge(&Inst, &AC); 1399 removeMSSA(Inst); 1400 Inst.eraseFromParent(); 1401 Changed = true; 1402 ++NumCSELoad; 1403 continue; 1404 } 1405 1406 // Otherwise, remember that we have this instruction. 1407 AvailableLoads.insert(MemInst.getPointerOperand(), 1408 LoadValue(&Inst, CurrentGeneration, 1409 MemInst.getMatchingId(), 1410 MemInst.isAtomic())); 1411 LastStore = nullptr; 1412 continue; 1413 } 1414 1415 // If this instruction may read from memory or throw (and potentially read 1416 // from memory in the exception handler), forget LastStore. Load/store 1417 // intrinsics will indicate both a read and a write to memory. The target 1418 // may override this (e.g. so that a store intrinsic does not read from 1419 // memory, and thus will be treated the same as a regular store for 1420 // commoning purposes). 1421 if ((Inst.mayReadFromMemory() || Inst.mayThrow()) && 1422 !(MemInst.isValid() && !MemInst.mayReadFromMemory())) 1423 LastStore = nullptr; 1424 1425 // If this is a read-only call, process it. 1426 if (CallValue::canHandle(&Inst)) { 1427 // If we have an available version of this call, and if it is the right 1428 // generation, replace this instruction. 1429 std::pair<Instruction *, unsigned> InVal = AvailableCalls.lookup(&Inst); 1430 if (InVal.first != nullptr && 1431 isSameMemGeneration(InVal.second, CurrentGeneration, InVal.first, 1432 &Inst)) { 1433 LLVM_DEBUG(dbgs() << "EarlyCSE CSE CALL: " << Inst 1434 << " to: " << *InVal.first << '\n'); 1435 if (!DebugCounter::shouldExecute(CSECounter)) { 1436 LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n"); 1437 continue; 1438 } 1439 if (!Inst.use_empty()) 1440 Inst.replaceAllUsesWith(InVal.first); 1441 salvageKnowledge(&Inst, &AC); 1442 removeMSSA(Inst); 1443 Inst.eraseFromParent(); 1444 Changed = true; 1445 ++NumCSECall; 1446 continue; 1447 } 1448 1449 // Otherwise, remember that we have this instruction. 1450 AvailableCalls.insert(&Inst, std::make_pair(&Inst, CurrentGeneration)); 1451 continue; 1452 } 1453 1454 // A release fence requires that all stores complete before it, but does 1455 // not prevent the reordering of following loads 'before' the fence. As a 1456 // result, we don't need to consider it as writing to memory and don't need 1457 // to advance the generation. We do need to prevent DSE across the fence, 1458 // but that's handled above. 1459 if (auto *FI = dyn_cast<FenceInst>(&Inst)) 1460 if (FI->getOrdering() == AtomicOrdering::Release) { 1461 assert(Inst.mayReadFromMemory() && "relied on to prevent DSE above"); 1462 continue; 1463 } 1464 1465 // write back DSE - If we write back the same value we just loaded from 1466 // the same location and haven't passed any intervening writes or ordering 1467 // operations, we can remove the write. The primary benefit is in allowing 1468 // the available load table to remain valid and value forward past where 1469 // the store originally was. 1470 if (MemInst.isValid() && MemInst.isStore()) { 1471 LoadValue InVal = AvailableLoads.lookup(MemInst.getPointerOperand()); 1472 if (InVal.DefInst && 1473 InVal.DefInst == getMatchingValue(InVal, MemInst, CurrentGeneration)) { 1474 // It is okay to have a LastStore to a different pointer here if MemorySSA 1475 // tells us that the load and store are from the same memory generation. 1476 // In that case, LastStore should keep its present value since we're 1477 // removing the current store. 1478 assert((!LastStore || 1479 ParseMemoryInst(LastStore, TTI).getPointerOperand() == 1480 MemInst.getPointerOperand() || 1481 MSSA) && 1482 "can't have an intervening store if not using MemorySSA!"); 1483 LLVM_DEBUG(dbgs() << "EarlyCSE DSE (writeback): " << Inst << '\n'); 1484 if (!DebugCounter::shouldExecute(CSECounter)) { 1485 LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n"); 1486 continue; 1487 } 1488 salvageKnowledge(&Inst, &AC); 1489 removeMSSA(Inst); 1490 Inst.eraseFromParent(); 1491 Changed = true; 1492 ++NumDSE; 1493 // We can avoid incrementing the generation count since we were able 1494 // to eliminate this store. 1495 continue; 1496 } 1497 } 1498 1499 // Okay, this isn't something we can CSE at all. Check to see if it is 1500 // something that could modify memory. If so, our available memory values 1501 // cannot be used so bump the generation count. 1502 if (Inst.mayWriteToMemory()) { 1503 ++CurrentGeneration; 1504 1505 if (MemInst.isValid() && MemInst.isStore()) { 1506 // We do a trivial form of DSE if there are two stores to the same 1507 // location with no intervening loads. Delete the earlier store. 1508 if (LastStore) { 1509 if (overridingStores(ParseMemoryInst(LastStore, TTI), MemInst)) { 1510 LLVM_DEBUG(dbgs() << "EarlyCSE DEAD STORE: " << *LastStore 1511 << " due to: " << Inst << '\n'); 1512 if (!DebugCounter::shouldExecute(CSECounter)) { 1513 LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n"); 1514 } else { 1515 salvageKnowledge(&Inst, &AC); 1516 removeMSSA(*LastStore); 1517 LastStore->eraseFromParent(); 1518 Changed = true; 1519 ++NumDSE; 1520 LastStore = nullptr; 1521 } 1522 } 1523 // fallthrough - we can exploit information about this store 1524 } 1525 1526 // Okay, we just invalidated anything we knew about loaded values. Try 1527 // to salvage *something* by remembering that the stored value is a live 1528 // version of the pointer. It is safe to forward from volatile stores 1529 // to non-volatile loads, so we don't have to check for volatility of 1530 // the store. 1531 AvailableLoads.insert(MemInst.getPointerOperand(), 1532 LoadValue(&Inst, CurrentGeneration, 1533 MemInst.getMatchingId(), 1534 MemInst.isAtomic())); 1535 1536 // Remember that this was the last unordered store we saw for DSE. We 1537 // don't yet handle DSE on ordered or volatile stores since we don't 1538 // have a good way to model the ordering requirement for following 1539 // passes once the store is removed. We could insert a fence, but 1540 // since fences are slightly stronger than stores in their ordering, 1541 // it's not clear this is a profitable transform. Another option would 1542 // be to merge the ordering with that of the post dominating store. 1543 if (MemInst.isUnordered() && !MemInst.isVolatile()) 1544 LastStore = &Inst; 1545 else 1546 LastStore = nullptr; 1547 } 1548 } 1549 } 1550 1551 return Changed; 1552 } 1553 1554 bool EarlyCSE::run() { 1555 // Note, deque is being used here because there is significant performance 1556 // gains over vector when the container becomes very large due to the 1557 // specific access patterns. For more information see the mailing list 1558 // discussion on this: 1559 // http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20120116/135228.html 1560 std::deque<StackNode *> nodesToProcess; 1561 1562 bool Changed = false; 1563 1564 // Process the root node. 1565 nodesToProcess.push_back(new StackNode( 1566 AvailableValues, AvailableLoads, AvailableInvariants, AvailableCalls, 1567 CurrentGeneration, DT.getRootNode(), 1568 DT.getRootNode()->begin(), DT.getRootNode()->end())); 1569 1570 assert(!CurrentGeneration && "Create a new EarlyCSE instance to rerun it."); 1571 1572 // Process the stack. 1573 while (!nodesToProcess.empty()) { 1574 // Grab the first item off the stack. Set the current generation, remove 1575 // the node from the stack, and process it. 1576 StackNode *NodeToProcess = nodesToProcess.back(); 1577 1578 // Initialize class members. 1579 CurrentGeneration = NodeToProcess->currentGeneration(); 1580 1581 // Check if the node needs to be processed. 1582 if (!NodeToProcess->isProcessed()) { 1583 // Process the node. 1584 Changed |= processNode(NodeToProcess->node()); 1585 NodeToProcess->childGeneration(CurrentGeneration); 1586 NodeToProcess->process(); 1587 } else if (NodeToProcess->childIter() != NodeToProcess->end()) { 1588 // Push the next child onto the stack. 1589 DomTreeNode *child = NodeToProcess->nextChild(); 1590 nodesToProcess.push_back( 1591 new StackNode(AvailableValues, AvailableLoads, AvailableInvariants, 1592 AvailableCalls, NodeToProcess->childGeneration(), 1593 child, child->begin(), child->end())); 1594 } else { 1595 // It has been processed, and there are no more children to process, 1596 // so delete it and pop it off the stack. 1597 delete NodeToProcess; 1598 nodesToProcess.pop_back(); 1599 } 1600 } // while (!nodes...) 1601 1602 return Changed; 1603 } 1604 1605 PreservedAnalyses EarlyCSEPass::run(Function &F, 1606 FunctionAnalysisManager &AM) { 1607 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 1608 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 1609 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 1610 auto &AC = AM.getResult<AssumptionAnalysis>(F); 1611 auto *MSSA = 1612 UseMemorySSA ? &AM.getResult<MemorySSAAnalysis>(F).getMSSA() : nullptr; 1613 1614 EarlyCSE CSE(F.getParent()->getDataLayout(), TLI, TTI, DT, AC, MSSA); 1615 1616 if (!CSE.run()) 1617 return PreservedAnalyses::all(); 1618 1619 PreservedAnalyses PA; 1620 PA.preserveSet<CFGAnalyses>(); 1621 PA.preserve<GlobalsAA>(); 1622 if (UseMemorySSA) 1623 PA.preserve<MemorySSAAnalysis>(); 1624 return PA; 1625 } 1626 1627 namespace { 1628 1629 /// A simple and fast domtree-based CSE pass. 1630 /// 1631 /// This pass does a simple depth-first walk over the dominator tree, 1632 /// eliminating trivially redundant instructions and using instsimplify to 1633 /// canonicalize things as it goes. It is intended to be fast and catch obvious 1634 /// cases so that instcombine and other passes are more effective. It is 1635 /// expected that a later pass of GVN will catch the interesting/hard cases. 1636 template<bool UseMemorySSA> 1637 class EarlyCSELegacyCommonPass : public FunctionPass { 1638 public: 1639 static char ID; 1640 1641 EarlyCSELegacyCommonPass() : FunctionPass(ID) { 1642 if (UseMemorySSA) 1643 initializeEarlyCSEMemSSALegacyPassPass(*PassRegistry::getPassRegistry()); 1644 else 1645 initializeEarlyCSELegacyPassPass(*PassRegistry::getPassRegistry()); 1646 } 1647 1648 bool runOnFunction(Function &F) override { 1649 if (skipFunction(F)) 1650 return false; 1651 1652 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); 1653 auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 1654 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1655 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 1656 auto *MSSA = 1657 UseMemorySSA ? &getAnalysis<MemorySSAWrapperPass>().getMSSA() : nullptr; 1658 1659 EarlyCSE CSE(F.getParent()->getDataLayout(), TLI, TTI, DT, AC, MSSA); 1660 1661 return CSE.run(); 1662 } 1663 1664 void getAnalysisUsage(AnalysisUsage &AU) const override { 1665 AU.addRequired<AssumptionCacheTracker>(); 1666 AU.addRequired<DominatorTreeWrapperPass>(); 1667 AU.addRequired<TargetLibraryInfoWrapperPass>(); 1668 AU.addRequired<TargetTransformInfoWrapperPass>(); 1669 if (UseMemorySSA) { 1670 AU.addRequired<AAResultsWrapperPass>(); 1671 AU.addRequired<MemorySSAWrapperPass>(); 1672 AU.addPreserved<MemorySSAWrapperPass>(); 1673 } 1674 AU.addPreserved<GlobalsAAWrapperPass>(); 1675 AU.addPreserved<AAResultsWrapperPass>(); 1676 AU.setPreservesCFG(); 1677 } 1678 }; 1679 1680 } // end anonymous namespace 1681 1682 using EarlyCSELegacyPass = EarlyCSELegacyCommonPass</*UseMemorySSA=*/false>; 1683 1684 template<> 1685 char EarlyCSELegacyPass::ID = 0; 1686 1687 INITIALIZE_PASS_BEGIN(EarlyCSELegacyPass, "early-cse", "Early CSE", false, 1688 false) 1689 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 1690 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 1691 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 1692 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 1693 INITIALIZE_PASS_END(EarlyCSELegacyPass, "early-cse", "Early CSE", false, false) 1694 1695 using EarlyCSEMemSSALegacyPass = 1696 EarlyCSELegacyCommonPass</*UseMemorySSA=*/true>; 1697 1698 template<> 1699 char EarlyCSEMemSSALegacyPass::ID = 0; 1700 1701 FunctionPass *llvm::createEarlyCSEPass(bool UseMemorySSA) { 1702 if (UseMemorySSA) 1703 return new EarlyCSEMemSSALegacyPass(); 1704 else 1705 return new EarlyCSELegacyPass(); 1706 } 1707 1708 INITIALIZE_PASS_BEGIN(EarlyCSEMemSSALegacyPass, "early-cse-memssa", 1709 "Early CSE w/ MemorySSA", false, false) 1710 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 1711 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 1712 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 1713 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 1714 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 1715 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass) 1716 INITIALIZE_PASS_END(EarlyCSEMemSSALegacyPass, "early-cse-memssa", 1717 "Early CSE w/ MemorySSA", false, false) 1718