1 //===- GVN.cpp - Eliminate redundant values and loads ---------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass performs global value numbering to eliminate fully redundant 10 // instructions. It also performs simple dead load elimination. 11 // 12 // Note that this pass does the value numbering itself; it does not use the 13 // ValueNumbering analysis passes. 14 // 15 //===----------------------------------------------------------------------===// 16 17 #include "llvm/Transforms/Scalar/GVN.h" 18 #include "llvm/ADT/DenseMap.h" 19 #include "llvm/ADT/DepthFirstIterator.h" 20 #include "llvm/ADT/Hashing.h" 21 #include "llvm/ADT/MapVector.h" 22 #include "llvm/ADT/PostOrderIterator.h" 23 #include "llvm/ADT/STLExtras.h" 24 #include "llvm/ADT/SetVector.h" 25 #include "llvm/ADT/SmallPtrSet.h" 26 #include "llvm/ADT/SmallVector.h" 27 #include "llvm/ADT/Statistic.h" 28 #include "llvm/Analysis/AliasAnalysis.h" 29 #include "llvm/Analysis/AssumeBundleQueries.h" 30 #include "llvm/Analysis/AssumptionCache.h" 31 #include "llvm/Analysis/CFG.h" 32 #include "llvm/Analysis/DomTreeUpdater.h" 33 #include "llvm/Analysis/GlobalsModRef.h" 34 #include "llvm/Analysis/InstructionPrecedenceTracking.h" 35 #include "llvm/Analysis/InstructionSimplify.h" 36 #include "llvm/Analysis/LoopInfo.h" 37 #include "llvm/Analysis/MemoryBuiltins.h" 38 #include "llvm/Analysis/MemoryDependenceAnalysis.h" 39 #include "llvm/Analysis/MemorySSA.h" 40 #include "llvm/Analysis/MemorySSAUpdater.h" 41 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 42 #include "llvm/Analysis/PHITransAddr.h" 43 #include "llvm/Analysis/TargetLibraryInfo.h" 44 #include "llvm/Analysis/ValueTracking.h" 45 #include "llvm/IR/Attributes.h" 46 #include "llvm/IR/BasicBlock.h" 47 #include "llvm/IR/Constant.h" 48 #include "llvm/IR/Constants.h" 49 #include "llvm/IR/DebugLoc.h" 50 #include "llvm/IR/Dominators.h" 51 #include "llvm/IR/Function.h" 52 #include "llvm/IR/InstrTypes.h" 53 #include "llvm/IR/Instruction.h" 54 #include "llvm/IR/Instructions.h" 55 #include "llvm/IR/IntrinsicInst.h" 56 #include "llvm/IR/LLVMContext.h" 57 #include "llvm/IR/Metadata.h" 58 #include "llvm/IR/Module.h" 59 #include "llvm/IR/PassManager.h" 60 #include "llvm/IR/PatternMatch.h" 61 #include "llvm/IR/Type.h" 62 #include "llvm/IR/Use.h" 63 #include "llvm/IR/Value.h" 64 #include "llvm/InitializePasses.h" 65 #include "llvm/Pass.h" 66 #include "llvm/Support/Casting.h" 67 #include "llvm/Support/CommandLine.h" 68 #include "llvm/Support/Compiler.h" 69 #include "llvm/Support/Debug.h" 70 #include "llvm/Support/raw_ostream.h" 71 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h" 72 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 73 #include "llvm/Transforms/Utils/Local.h" 74 #include "llvm/Transforms/Utils/SSAUpdater.h" 75 #include "llvm/Transforms/Utils/VNCoercion.h" 76 #include <algorithm> 77 #include <cassert> 78 #include <cstdint> 79 #include <utility> 80 81 using namespace llvm; 82 using namespace llvm::gvn; 83 using namespace llvm::VNCoercion; 84 using namespace PatternMatch; 85 86 #define DEBUG_TYPE "gvn" 87 88 STATISTIC(NumGVNInstr, "Number of instructions deleted"); 89 STATISTIC(NumGVNLoad, "Number of loads deleted"); 90 STATISTIC(NumGVNPRE, "Number of instructions PRE'd"); 91 STATISTIC(NumGVNBlocks, "Number of blocks merged"); 92 STATISTIC(NumGVNSimpl, "Number of instructions simplified"); 93 STATISTIC(NumGVNEqProp, "Number of equalities propagated"); 94 STATISTIC(NumPRELoad, "Number of loads PRE'd"); 95 STATISTIC(NumPRELoopLoad, "Number of loop loads PRE'd"); 96 97 STATISTIC(IsValueFullyAvailableInBlockNumSpeculationsMax, 98 "Number of blocks speculated as available in " 99 "IsValueFullyAvailableInBlock(), max"); 100 STATISTIC(MaxBBSpeculationCutoffReachedTimes, 101 "Number of times we we reached gvn-max-block-speculations cut-off " 102 "preventing further exploration"); 103 104 static cl::opt<bool> GVNEnablePRE("enable-pre", cl::init(true), cl::Hidden); 105 static cl::opt<bool> GVNEnableLoadPRE("enable-load-pre", cl::init(true)); 106 static cl::opt<bool> GVNEnableLoadInLoopPRE("enable-load-in-loop-pre", 107 cl::init(true)); 108 static cl::opt<bool> 109 GVNEnableSplitBackedgeInLoadPRE("enable-split-backedge-in-load-pre", 110 cl::init(false)); 111 static cl::opt<bool> GVNEnableMemDep("enable-gvn-memdep", cl::init(true)); 112 113 static cl::opt<uint32_t> MaxNumDeps( 114 "gvn-max-num-deps", cl::Hidden, cl::init(100), 115 cl::desc("Max number of dependences to attempt Load PRE (default = 100)")); 116 117 // This is based on IsValueFullyAvailableInBlockNumSpeculationsMax stat. 118 static cl::opt<uint32_t> MaxBBSpeculations( 119 "gvn-max-block-speculations", cl::Hidden, cl::init(600), 120 cl::desc("Max number of blocks we're willing to speculate on (and recurse " 121 "into) when deducing if a value is fully available or not in GVN " 122 "(default = 600)")); 123 124 struct llvm::GVNPass::Expression { 125 uint32_t opcode; 126 bool commutative = false; 127 // The type is not necessarily the result type of the expression, it may be 128 // any additional type needed to disambiguate the expression. 129 Type *type = nullptr; 130 SmallVector<uint32_t, 4> varargs; 131 132 Expression(uint32_t o = ~2U) : opcode(o) {} 133 134 bool operator==(const Expression &other) const { 135 if (opcode != other.opcode) 136 return false; 137 if (opcode == ~0U || opcode == ~1U) 138 return true; 139 if (type != other.type) 140 return false; 141 if (varargs != other.varargs) 142 return false; 143 return true; 144 } 145 146 friend hash_code hash_value(const Expression &Value) { 147 return hash_combine( 148 Value.opcode, Value.type, 149 hash_combine_range(Value.varargs.begin(), Value.varargs.end())); 150 } 151 }; 152 153 namespace llvm { 154 155 template <> struct DenseMapInfo<GVNPass::Expression> { 156 static inline GVNPass::Expression getEmptyKey() { return ~0U; } 157 static inline GVNPass::Expression getTombstoneKey() { return ~1U; } 158 159 static unsigned getHashValue(const GVNPass::Expression &e) { 160 using llvm::hash_value; 161 162 return static_cast<unsigned>(hash_value(e)); 163 } 164 165 static bool isEqual(const GVNPass::Expression &LHS, 166 const GVNPass::Expression &RHS) { 167 return LHS == RHS; 168 } 169 }; 170 171 } // end namespace llvm 172 173 /// Represents a particular available value that we know how to materialize. 174 /// Materialization of an AvailableValue never fails. An AvailableValue is 175 /// implicitly associated with a rematerialization point which is the 176 /// location of the instruction from which it was formed. 177 struct llvm::gvn::AvailableValue { 178 enum class ValType { 179 SimpleVal, // A simple offsetted value that is accessed. 180 LoadVal, // A value produced by a load. 181 MemIntrin, // A memory intrinsic which is loaded from. 182 UndefVal, // A UndefValue representing a value from dead block (which 183 // is not yet physically removed from the CFG). 184 SelectVal, // A pointer select which is loaded from and for which the load 185 // can be replace by a value select. 186 }; 187 188 /// Val - The value that is live out of the block. 189 Value *Val; 190 /// Kind of the live-out value. 191 ValType Kind; 192 193 /// Offset - The byte offset in Val that is interesting for the load query. 194 unsigned Offset = 0; 195 196 static AvailableValue get(Value *V, unsigned Offset = 0) { 197 AvailableValue Res; 198 Res.Val = V; 199 Res.Kind = ValType::SimpleVal; 200 Res.Offset = Offset; 201 return Res; 202 } 203 204 static AvailableValue getMI(MemIntrinsic *MI, unsigned Offset = 0) { 205 AvailableValue Res; 206 Res.Val = MI; 207 Res.Kind = ValType::MemIntrin; 208 Res.Offset = Offset; 209 return Res; 210 } 211 212 static AvailableValue getLoad(LoadInst *Load, unsigned Offset = 0) { 213 AvailableValue Res; 214 Res.Val = Load; 215 Res.Kind = ValType::LoadVal; 216 Res.Offset = Offset; 217 return Res; 218 } 219 220 static AvailableValue getUndef() { 221 AvailableValue Res; 222 Res.Val = nullptr; 223 Res.Kind = ValType::UndefVal; 224 Res.Offset = 0; 225 return Res; 226 } 227 228 static AvailableValue getSelect(SelectInst *Sel) { 229 AvailableValue Res; 230 Res.Val = Sel; 231 Res.Kind = ValType::SelectVal; 232 Res.Offset = 0; 233 return Res; 234 } 235 236 bool isSimpleValue() const { return Kind == ValType::SimpleVal; } 237 bool isCoercedLoadValue() const { return Kind == ValType::LoadVal; } 238 bool isMemIntrinValue() const { return Kind == ValType::MemIntrin; } 239 bool isUndefValue() const { return Kind == ValType::UndefVal; } 240 bool isSelectValue() const { return Kind == ValType::SelectVal; } 241 242 Value *getSimpleValue() const { 243 assert(isSimpleValue() && "Wrong accessor"); 244 return Val; 245 } 246 247 LoadInst *getCoercedLoadValue() const { 248 assert(isCoercedLoadValue() && "Wrong accessor"); 249 return cast<LoadInst>(Val); 250 } 251 252 MemIntrinsic *getMemIntrinValue() const { 253 assert(isMemIntrinValue() && "Wrong accessor"); 254 return cast<MemIntrinsic>(Val); 255 } 256 257 SelectInst *getSelectValue() const { 258 assert(isSelectValue() && "Wrong accessor"); 259 return cast<SelectInst>(Val); 260 } 261 262 /// Emit code at the specified insertion point to adjust the value defined 263 /// here to the specified type. This handles various coercion cases. 264 Value *MaterializeAdjustedValue(LoadInst *Load, Instruction *InsertPt, 265 GVNPass &gvn) const; 266 }; 267 268 /// Represents an AvailableValue which can be rematerialized at the end of 269 /// the associated BasicBlock. 270 struct llvm::gvn::AvailableValueInBlock { 271 /// BB - The basic block in question. 272 BasicBlock *BB = nullptr; 273 274 /// AV - The actual available value 275 AvailableValue AV; 276 277 static AvailableValueInBlock get(BasicBlock *BB, AvailableValue &&AV) { 278 AvailableValueInBlock Res; 279 Res.BB = BB; 280 Res.AV = std::move(AV); 281 return Res; 282 } 283 284 static AvailableValueInBlock get(BasicBlock *BB, Value *V, 285 unsigned Offset = 0) { 286 return get(BB, AvailableValue::get(V, Offset)); 287 } 288 289 static AvailableValueInBlock getUndef(BasicBlock *BB) { 290 return get(BB, AvailableValue::getUndef()); 291 } 292 293 static AvailableValueInBlock getSelect(BasicBlock *BB, SelectInst *Sel) { 294 return get(BB, AvailableValue::getSelect(Sel)); 295 } 296 297 /// Emit code at the end of this block to adjust the value defined here to 298 /// the specified type. This handles various coercion cases. 299 Value *MaterializeAdjustedValue(LoadInst *Load, GVNPass &gvn) const { 300 return AV.MaterializeAdjustedValue(Load, BB->getTerminator(), gvn); 301 } 302 }; 303 304 //===----------------------------------------------------------------------===// 305 // ValueTable Internal Functions 306 //===----------------------------------------------------------------------===// 307 308 GVNPass::Expression GVNPass::ValueTable::createExpr(Instruction *I) { 309 Expression e; 310 e.type = I->getType(); 311 e.opcode = I->getOpcode(); 312 if (const GCRelocateInst *GCR = dyn_cast<GCRelocateInst>(I)) { 313 // gc.relocate is 'special' call: its second and third operands are 314 // not real values, but indices into statepoint's argument list. 315 // Use the refered to values for purposes of identity. 316 e.varargs.push_back(lookupOrAdd(GCR->getOperand(0))); 317 e.varargs.push_back(lookupOrAdd(GCR->getBasePtr())); 318 e.varargs.push_back(lookupOrAdd(GCR->getDerivedPtr())); 319 } else { 320 for (Use &Op : I->operands()) 321 e.varargs.push_back(lookupOrAdd(Op)); 322 } 323 if (I->isCommutative()) { 324 // Ensure that commutative instructions that only differ by a permutation 325 // of their operands get the same value number by sorting the operand value 326 // numbers. Since commutative operands are the 1st two operands it is more 327 // efficient to sort by hand rather than using, say, std::sort. 328 assert(I->getNumOperands() >= 2 && "Unsupported commutative instruction!"); 329 if (e.varargs[0] > e.varargs[1]) 330 std::swap(e.varargs[0], e.varargs[1]); 331 e.commutative = true; 332 } 333 334 if (auto *C = dyn_cast<CmpInst>(I)) { 335 // Sort the operand value numbers so x<y and y>x get the same value number. 336 CmpInst::Predicate Predicate = C->getPredicate(); 337 if (e.varargs[0] > e.varargs[1]) { 338 std::swap(e.varargs[0], e.varargs[1]); 339 Predicate = CmpInst::getSwappedPredicate(Predicate); 340 } 341 e.opcode = (C->getOpcode() << 8) | Predicate; 342 e.commutative = true; 343 } else if (auto *E = dyn_cast<InsertValueInst>(I)) { 344 e.varargs.append(E->idx_begin(), E->idx_end()); 345 } else if (auto *SVI = dyn_cast<ShuffleVectorInst>(I)) { 346 ArrayRef<int> ShuffleMask = SVI->getShuffleMask(); 347 e.varargs.append(ShuffleMask.begin(), ShuffleMask.end()); 348 } 349 350 return e; 351 } 352 353 GVNPass::Expression GVNPass::ValueTable::createCmpExpr( 354 unsigned Opcode, CmpInst::Predicate Predicate, Value *LHS, Value *RHS) { 355 assert((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) && 356 "Not a comparison!"); 357 Expression e; 358 e.type = CmpInst::makeCmpResultType(LHS->getType()); 359 e.varargs.push_back(lookupOrAdd(LHS)); 360 e.varargs.push_back(lookupOrAdd(RHS)); 361 362 // Sort the operand value numbers so x<y and y>x get the same value number. 363 if (e.varargs[0] > e.varargs[1]) { 364 std::swap(e.varargs[0], e.varargs[1]); 365 Predicate = CmpInst::getSwappedPredicate(Predicate); 366 } 367 e.opcode = (Opcode << 8) | Predicate; 368 e.commutative = true; 369 return e; 370 } 371 372 GVNPass::Expression 373 GVNPass::ValueTable::createExtractvalueExpr(ExtractValueInst *EI) { 374 assert(EI && "Not an ExtractValueInst?"); 375 Expression e; 376 e.type = EI->getType(); 377 e.opcode = 0; 378 379 WithOverflowInst *WO = dyn_cast<WithOverflowInst>(EI->getAggregateOperand()); 380 if (WO != nullptr && EI->getNumIndices() == 1 && *EI->idx_begin() == 0) { 381 // EI is an extract from one of our with.overflow intrinsics. Synthesize 382 // a semantically equivalent expression instead of an extract value 383 // expression. 384 e.opcode = WO->getBinaryOp(); 385 e.varargs.push_back(lookupOrAdd(WO->getLHS())); 386 e.varargs.push_back(lookupOrAdd(WO->getRHS())); 387 return e; 388 } 389 390 // Not a recognised intrinsic. Fall back to producing an extract value 391 // expression. 392 e.opcode = EI->getOpcode(); 393 for (Use &Op : EI->operands()) 394 e.varargs.push_back(lookupOrAdd(Op)); 395 396 append_range(e.varargs, EI->indices()); 397 398 return e; 399 } 400 401 GVNPass::Expression GVNPass::ValueTable::createGEPExpr(GetElementPtrInst *GEP) { 402 Expression E; 403 Type *PtrTy = GEP->getType()->getScalarType(); 404 const DataLayout &DL = GEP->getModule()->getDataLayout(); 405 unsigned BitWidth = DL.getIndexTypeSizeInBits(PtrTy); 406 MapVector<Value *, APInt> VariableOffsets; 407 APInt ConstantOffset(BitWidth, 0); 408 if (PtrTy->isOpaquePointerTy() && 409 GEP->collectOffset(DL, BitWidth, VariableOffsets, ConstantOffset)) { 410 // For opaque pointers, convert into offset representation, to recognize 411 // equivalent address calculations that use different type encoding. 412 LLVMContext &Context = GEP->getContext(); 413 E.opcode = GEP->getOpcode(); 414 E.type = nullptr; 415 E.varargs.push_back(lookupOrAdd(GEP->getPointerOperand())); 416 for (const auto &Pair : VariableOffsets) { 417 E.varargs.push_back(lookupOrAdd(Pair.first)); 418 E.varargs.push_back(lookupOrAdd(ConstantInt::get(Context, Pair.second))); 419 } 420 if (!ConstantOffset.isZero()) 421 E.varargs.push_back( 422 lookupOrAdd(ConstantInt::get(Context, ConstantOffset))); 423 } else { 424 // If converting to offset representation fails (for typed pointers and 425 // scalable vectors), fall back to type-based implementation: 426 E.opcode = GEP->getOpcode(); 427 E.type = GEP->getSourceElementType(); 428 for (Use &Op : GEP->operands()) 429 E.varargs.push_back(lookupOrAdd(Op)); 430 } 431 return E; 432 } 433 434 //===----------------------------------------------------------------------===// 435 // ValueTable External Functions 436 //===----------------------------------------------------------------------===// 437 438 GVNPass::ValueTable::ValueTable() = default; 439 GVNPass::ValueTable::ValueTable(const ValueTable &) = default; 440 GVNPass::ValueTable::ValueTable(ValueTable &&) = default; 441 GVNPass::ValueTable::~ValueTable() = default; 442 GVNPass::ValueTable & 443 GVNPass::ValueTable::operator=(const GVNPass::ValueTable &Arg) = default; 444 445 /// add - Insert a value into the table with a specified value number. 446 void GVNPass::ValueTable::add(Value *V, uint32_t num) { 447 valueNumbering.insert(std::make_pair(V, num)); 448 if (PHINode *PN = dyn_cast<PHINode>(V)) 449 NumberingPhi[num] = PN; 450 } 451 452 uint32_t GVNPass::ValueTable::lookupOrAddCall(CallInst *C) { 453 if (AA->doesNotAccessMemory(C)) { 454 Expression exp = createExpr(C); 455 uint32_t e = assignExpNewValueNum(exp).first; 456 valueNumbering[C] = e; 457 return e; 458 } else if (MD && AA->onlyReadsMemory(C)) { 459 Expression exp = createExpr(C); 460 auto ValNum = assignExpNewValueNum(exp); 461 if (ValNum.second) { 462 valueNumbering[C] = ValNum.first; 463 return ValNum.first; 464 } 465 466 MemDepResult local_dep = MD->getDependency(C); 467 468 if (!local_dep.isDef() && !local_dep.isNonLocal()) { 469 valueNumbering[C] = nextValueNumber; 470 return nextValueNumber++; 471 } 472 473 if (local_dep.isDef()) { 474 // For masked load/store intrinsics, the local_dep may actully be 475 // a normal load or store instruction. 476 CallInst *local_cdep = dyn_cast<CallInst>(local_dep.getInst()); 477 478 if (!local_cdep || local_cdep->arg_size() != C->arg_size()) { 479 valueNumbering[C] = nextValueNumber; 480 return nextValueNumber++; 481 } 482 483 for (unsigned i = 0, e = C->arg_size(); i < e; ++i) { 484 uint32_t c_vn = lookupOrAdd(C->getArgOperand(i)); 485 uint32_t cd_vn = lookupOrAdd(local_cdep->getArgOperand(i)); 486 if (c_vn != cd_vn) { 487 valueNumbering[C] = nextValueNumber; 488 return nextValueNumber++; 489 } 490 } 491 492 uint32_t v = lookupOrAdd(local_cdep); 493 valueNumbering[C] = v; 494 return v; 495 } 496 497 // Non-local case. 498 const MemoryDependenceResults::NonLocalDepInfo &deps = 499 MD->getNonLocalCallDependency(C); 500 // FIXME: Move the checking logic to MemDep! 501 CallInst* cdep = nullptr; 502 503 // Check to see if we have a single dominating call instruction that is 504 // identical to C. 505 for (unsigned i = 0, e = deps.size(); i != e; ++i) { 506 const NonLocalDepEntry *I = &deps[i]; 507 if (I->getResult().isNonLocal()) 508 continue; 509 510 // We don't handle non-definitions. If we already have a call, reject 511 // instruction dependencies. 512 if (!I->getResult().isDef() || cdep != nullptr) { 513 cdep = nullptr; 514 break; 515 } 516 517 CallInst *NonLocalDepCall = dyn_cast<CallInst>(I->getResult().getInst()); 518 // FIXME: All duplicated with non-local case. 519 if (NonLocalDepCall && DT->properlyDominates(I->getBB(), C->getParent())){ 520 cdep = NonLocalDepCall; 521 continue; 522 } 523 524 cdep = nullptr; 525 break; 526 } 527 528 if (!cdep) { 529 valueNumbering[C] = nextValueNumber; 530 return nextValueNumber++; 531 } 532 533 if (cdep->arg_size() != C->arg_size()) { 534 valueNumbering[C] = nextValueNumber; 535 return nextValueNumber++; 536 } 537 for (unsigned i = 0, e = C->arg_size(); i < e; ++i) { 538 uint32_t c_vn = lookupOrAdd(C->getArgOperand(i)); 539 uint32_t cd_vn = lookupOrAdd(cdep->getArgOperand(i)); 540 if (c_vn != cd_vn) { 541 valueNumbering[C] = nextValueNumber; 542 return nextValueNumber++; 543 } 544 } 545 546 uint32_t v = lookupOrAdd(cdep); 547 valueNumbering[C] = v; 548 return v; 549 } else { 550 valueNumbering[C] = nextValueNumber; 551 return nextValueNumber++; 552 } 553 } 554 555 /// Returns true if a value number exists for the specified value. 556 bool GVNPass::ValueTable::exists(Value *V) const { 557 return valueNumbering.count(V) != 0; 558 } 559 560 /// lookup_or_add - Returns the value number for the specified value, assigning 561 /// it a new number if it did not have one before. 562 uint32_t GVNPass::ValueTable::lookupOrAdd(Value *V) { 563 DenseMap<Value*, uint32_t>::iterator VI = valueNumbering.find(V); 564 if (VI != valueNumbering.end()) 565 return VI->second; 566 567 if (!isa<Instruction>(V)) { 568 valueNumbering[V] = nextValueNumber; 569 return nextValueNumber++; 570 } 571 572 Instruction* I = cast<Instruction>(V); 573 Expression exp; 574 switch (I->getOpcode()) { 575 case Instruction::Call: 576 return lookupOrAddCall(cast<CallInst>(I)); 577 case Instruction::FNeg: 578 case Instruction::Add: 579 case Instruction::FAdd: 580 case Instruction::Sub: 581 case Instruction::FSub: 582 case Instruction::Mul: 583 case Instruction::FMul: 584 case Instruction::UDiv: 585 case Instruction::SDiv: 586 case Instruction::FDiv: 587 case Instruction::URem: 588 case Instruction::SRem: 589 case Instruction::FRem: 590 case Instruction::Shl: 591 case Instruction::LShr: 592 case Instruction::AShr: 593 case Instruction::And: 594 case Instruction::Or: 595 case Instruction::Xor: 596 case Instruction::ICmp: 597 case Instruction::FCmp: 598 case Instruction::Trunc: 599 case Instruction::ZExt: 600 case Instruction::SExt: 601 case Instruction::FPToUI: 602 case Instruction::FPToSI: 603 case Instruction::UIToFP: 604 case Instruction::SIToFP: 605 case Instruction::FPTrunc: 606 case Instruction::FPExt: 607 case Instruction::PtrToInt: 608 case Instruction::IntToPtr: 609 case Instruction::AddrSpaceCast: 610 case Instruction::BitCast: 611 case Instruction::Select: 612 case Instruction::Freeze: 613 case Instruction::ExtractElement: 614 case Instruction::InsertElement: 615 case Instruction::ShuffleVector: 616 case Instruction::InsertValue: 617 exp = createExpr(I); 618 break; 619 case Instruction::GetElementPtr: 620 exp = createGEPExpr(cast<GetElementPtrInst>(I)); 621 break; 622 case Instruction::ExtractValue: 623 exp = createExtractvalueExpr(cast<ExtractValueInst>(I)); 624 break; 625 case Instruction::PHI: 626 valueNumbering[V] = nextValueNumber; 627 NumberingPhi[nextValueNumber] = cast<PHINode>(V); 628 return nextValueNumber++; 629 default: 630 valueNumbering[V] = nextValueNumber; 631 return nextValueNumber++; 632 } 633 634 uint32_t e = assignExpNewValueNum(exp).first; 635 valueNumbering[V] = e; 636 return e; 637 } 638 639 /// Returns the value number of the specified value. Fails if 640 /// the value has not yet been numbered. 641 uint32_t GVNPass::ValueTable::lookup(Value *V, bool Verify) const { 642 DenseMap<Value*, uint32_t>::const_iterator VI = valueNumbering.find(V); 643 if (Verify) { 644 assert(VI != valueNumbering.end() && "Value not numbered?"); 645 return VI->second; 646 } 647 return (VI != valueNumbering.end()) ? VI->second : 0; 648 } 649 650 /// Returns the value number of the given comparison, 651 /// assigning it a new number if it did not have one before. Useful when 652 /// we deduced the result of a comparison, but don't immediately have an 653 /// instruction realizing that comparison to hand. 654 uint32_t GVNPass::ValueTable::lookupOrAddCmp(unsigned Opcode, 655 CmpInst::Predicate Predicate, 656 Value *LHS, Value *RHS) { 657 Expression exp = createCmpExpr(Opcode, Predicate, LHS, RHS); 658 return assignExpNewValueNum(exp).first; 659 } 660 661 /// Remove all entries from the ValueTable. 662 void GVNPass::ValueTable::clear() { 663 valueNumbering.clear(); 664 expressionNumbering.clear(); 665 NumberingPhi.clear(); 666 PhiTranslateTable.clear(); 667 nextValueNumber = 1; 668 Expressions.clear(); 669 ExprIdx.clear(); 670 nextExprNumber = 0; 671 } 672 673 /// Remove a value from the value numbering. 674 void GVNPass::ValueTable::erase(Value *V) { 675 uint32_t Num = valueNumbering.lookup(V); 676 valueNumbering.erase(V); 677 // If V is PHINode, V <--> value number is an one-to-one mapping. 678 if (isa<PHINode>(V)) 679 NumberingPhi.erase(Num); 680 } 681 682 /// verifyRemoved - Verify that the value is removed from all internal data 683 /// structures. 684 void GVNPass::ValueTable::verifyRemoved(const Value *V) const { 685 for (DenseMap<Value*, uint32_t>::const_iterator 686 I = valueNumbering.begin(), E = valueNumbering.end(); I != E; ++I) { 687 assert(I->first != V && "Inst still occurs in value numbering map!"); 688 } 689 } 690 691 //===----------------------------------------------------------------------===// 692 // GVN Pass 693 //===----------------------------------------------------------------------===// 694 695 bool GVNPass::isPREEnabled() const { 696 return Options.AllowPRE.value_or(GVNEnablePRE); 697 } 698 699 bool GVNPass::isLoadPREEnabled() const { 700 return Options.AllowLoadPRE.value_or(GVNEnableLoadPRE); 701 } 702 703 bool GVNPass::isLoadInLoopPREEnabled() const { 704 return Options.AllowLoadInLoopPRE.value_or(GVNEnableLoadInLoopPRE); 705 } 706 707 bool GVNPass::isLoadPRESplitBackedgeEnabled() const { 708 return Options.AllowLoadPRESplitBackedge.value_or( 709 GVNEnableSplitBackedgeInLoadPRE); 710 } 711 712 bool GVNPass::isMemDepEnabled() const { 713 return Options.AllowMemDep.value_or(GVNEnableMemDep); 714 } 715 716 PreservedAnalyses GVNPass::run(Function &F, FunctionAnalysisManager &AM) { 717 // FIXME: The order of evaluation of these 'getResult' calls is very 718 // significant! Re-ordering these variables will cause GVN when run alone to 719 // be less effective! We should fix memdep and basic-aa to not exhibit this 720 // behavior, but until then don't change the order here. 721 auto &AC = AM.getResult<AssumptionAnalysis>(F); 722 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 723 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 724 auto &AA = AM.getResult<AAManager>(F); 725 auto *MemDep = 726 isMemDepEnabled() ? &AM.getResult<MemoryDependenceAnalysis>(F) : nullptr; 727 auto *LI = AM.getCachedResult<LoopAnalysis>(F); 728 auto *MSSA = AM.getCachedResult<MemorySSAAnalysis>(F); 729 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 730 bool Changed = runImpl(F, AC, DT, TLI, AA, MemDep, LI, &ORE, 731 MSSA ? &MSSA->getMSSA() : nullptr); 732 if (!Changed) 733 return PreservedAnalyses::all(); 734 PreservedAnalyses PA; 735 PA.preserve<DominatorTreeAnalysis>(); 736 PA.preserve<TargetLibraryAnalysis>(); 737 if (MSSA) 738 PA.preserve<MemorySSAAnalysis>(); 739 if (LI) 740 PA.preserve<LoopAnalysis>(); 741 return PA; 742 } 743 744 void GVNPass::printPipeline( 745 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) { 746 static_cast<PassInfoMixin<GVNPass> *>(this)->printPipeline( 747 OS, MapClassName2PassName); 748 749 OS << "<"; 750 if (Options.AllowPRE != None) 751 OS << (Options.AllowPRE.value() ? "" : "no-") << "pre;"; 752 if (Options.AllowLoadPRE != None) 753 OS << (Options.AllowLoadPRE.value() ? "" : "no-") << "load-pre;"; 754 if (Options.AllowLoadPRESplitBackedge != None) 755 OS << (Options.AllowLoadPRESplitBackedge.value() ? "" : "no-") 756 << "split-backedge-load-pre;"; 757 if (Options.AllowMemDep != None) 758 OS << (Options.AllowMemDep.value() ? "" : "no-") << "memdep"; 759 OS << ">"; 760 } 761 762 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 763 LLVM_DUMP_METHOD void GVNPass::dump(DenseMap<uint32_t, Value *> &d) const { 764 errs() << "{\n"; 765 for (auto &I : d) { 766 errs() << I.first << "\n"; 767 I.second->dump(); 768 } 769 errs() << "}\n"; 770 } 771 #endif 772 773 enum class AvailabilityState : char { 774 /// We know the block *is not* fully available. This is a fixpoint. 775 Unavailable = 0, 776 /// We know the block *is* fully available. This is a fixpoint. 777 Available = 1, 778 /// We do not know whether the block is fully available or not, 779 /// but we are currently speculating that it will be. 780 /// If it would have turned out that the block was, in fact, not fully 781 /// available, this would have been cleaned up into an Unavailable. 782 SpeculativelyAvailable = 2, 783 }; 784 785 /// Return true if we can prove that the value 786 /// we're analyzing is fully available in the specified block. As we go, keep 787 /// track of which blocks we know are fully alive in FullyAvailableBlocks. This 788 /// map is actually a tri-state map with the following values: 789 /// 0) we know the block *is not* fully available. 790 /// 1) we know the block *is* fully available. 791 /// 2) we do not know whether the block is fully available or not, but we are 792 /// currently speculating that it will be. 793 static bool IsValueFullyAvailableInBlock( 794 BasicBlock *BB, 795 DenseMap<BasicBlock *, AvailabilityState> &FullyAvailableBlocks) { 796 SmallVector<BasicBlock *, 32> Worklist; 797 Optional<BasicBlock *> UnavailableBB; 798 799 // The number of times we didn't find an entry for a block in a map and 800 // optimistically inserted an entry marking block as speculatively available. 801 unsigned NumNewNewSpeculativelyAvailableBBs = 0; 802 803 #ifndef NDEBUG 804 SmallSet<BasicBlock *, 32> NewSpeculativelyAvailableBBs; 805 SmallVector<BasicBlock *, 32> AvailableBBs; 806 #endif 807 808 Worklist.emplace_back(BB); 809 while (!Worklist.empty()) { 810 BasicBlock *CurrBB = Worklist.pop_back_val(); // LoadFO - depth-first! 811 // Optimistically assume that the block is Speculatively Available and check 812 // to see if we already know about this block in one lookup. 813 std::pair<DenseMap<BasicBlock *, AvailabilityState>::iterator, bool> IV = 814 FullyAvailableBlocks.try_emplace( 815 CurrBB, AvailabilityState::SpeculativelyAvailable); 816 AvailabilityState &State = IV.first->second; 817 818 // Did the entry already exist for this block? 819 if (!IV.second) { 820 if (State == AvailabilityState::Unavailable) { 821 UnavailableBB = CurrBB; 822 break; // Backpropagate unavailability info. 823 } 824 825 #ifndef NDEBUG 826 AvailableBBs.emplace_back(CurrBB); 827 #endif 828 continue; // Don't recurse further, but continue processing worklist. 829 } 830 831 // No entry found for block. 832 ++NumNewNewSpeculativelyAvailableBBs; 833 bool OutOfBudget = NumNewNewSpeculativelyAvailableBBs > MaxBBSpeculations; 834 835 // If we have exhausted our budget, mark this block as unavailable. 836 // Also, if this block has no predecessors, the value isn't live-in here. 837 if (OutOfBudget || pred_empty(CurrBB)) { 838 MaxBBSpeculationCutoffReachedTimes += (int)OutOfBudget; 839 State = AvailabilityState::Unavailable; 840 UnavailableBB = CurrBB; 841 break; // Backpropagate unavailability info. 842 } 843 844 // Tentatively consider this block as speculatively available. 845 #ifndef NDEBUG 846 NewSpeculativelyAvailableBBs.insert(CurrBB); 847 #endif 848 // And further recurse into block's predecessors, in depth-first order! 849 Worklist.append(pred_begin(CurrBB), pred_end(CurrBB)); 850 } 851 852 #if LLVM_ENABLE_STATS 853 IsValueFullyAvailableInBlockNumSpeculationsMax.updateMax( 854 NumNewNewSpeculativelyAvailableBBs); 855 #endif 856 857 // If the block isn't marked as fixpoint yet 858 // (the Unavailable and Available states are fixpoints) 859 auto MarkAsFixpointAndEnqueueSuccessors = 860 [&](BasicBlock *BB, AvailabilityState FixpointState) { 861 auto It = FullyAvailableBlocks.find(BB); 862 if (It == FullyAvailableBlocks.end()) 863 return; // Never queried this block, leave as-is. 864 switch (AvailabilityState &State = It->second) { 865 case AvailabilityState::Unavailable: 866 case AvailabilityState::Available: 867 return; // Don't backpropagate further, continue processing worklist. 868 case AvailabilityState::SpeculativelyAvailable: // Fix it! 869 State = FixpointState; 870 #ifndef NDEBUG 871 assert(NewSpeculativelyAvailableBBs.erase(BB) && 872 "Found a speculatively available successor leftover?"); 873 #endif 874 // Queue successors for further processing. 875 Worklist.append(succ_begin(BB), succ_end(BB)); 876 return; 877 } 878 }; 879 880 if (UnavailableBB) { 881 // Okay, we have encountered an unavailable block. 882 // Mark speculatively available blocks reachable from UnavailableBB as 883 // unavailable as well. Paths are terminated when they reach blocks not in 884 // FullyAvailableBlocks or they are not marked as speculatively available. 885 Worklist.clear(); 886 Worklist.append(succ_begin(*UnavailableBB), succ_end(*UnavailableBB)); 887 while (!Worklist.empty()) 888 MarkAsFixpointAndEnqueueSuccessors(Worklist.pop_back_val(), 889 AvailabilityState::Unavailable); 890 } 891 892 #ifndef NDEBUG 893 Worklist.clear(); 894 for (BasicBlock *AvailableBB : AvailableBBs) 895 Worklist.append(succ_begin(AvailableBB), succ_end(AvailableBB)); 896 while (!Worklist.empty()) 897 MarkAsFixpointAndEnqueueSuccessors(Worklist.pop_back_val(), 898 AvailabilityState::Available); 899 900 assert(NewSpeculativelyAvailableBBs.empty() && 901 "Must have fixed all the new speculatively available blocks."); 902 #endif 903 904 return !UnavailableBB; 905 } 906 907 /// Given a set of loads specified by ValuesPerBlock, 908 /// construct SSA form, allowing us to eliminate Load. This returns the value 909 /// that should be used at Load's definition site. 910 static Value * 911 ConstructSSAForLoadSet(LoadInst *Load, 912 SmallVectorImpl<AvailableValueInBlock> &ValuesPerBlock, 913 GVNPass &gvn) { 914 // Check for the fully redundant, dominating load case. In this case, we can 915 // just use the dominating value directly. 916 if (ValuesPerBlock.size() == 1 && 917 gvn.getDominatorTree().properlyDominates(ValuesPerBlock[0].BB, 918 Load->getParent())) { 919 assert(!ValuesPerBlock[0].AV.isUndefValue() && 920 "Dead BB dominate this block"); 921 return ValuesPerBlock[0].MaterializeAdjustedValue(Load, gvn); 922 } 923 924 // Otherwise, we have to construct SSA form. 925 SmallVector<PHINode*, 8> NewPHIs; 926 SSAUpdater SSAUpdate(&NewPHIs); 927 SSAUpdate.Initialize(Load->getType(), Load->getName()); 928 929 for (const AvailableValueInBlock &AV : ValuesPerBlock) { 930 BasicBlock *BB = AV.BB; 931 932 if (AV.AV.isUndefValue()) 933 continue; 934 935 if (SSAUpdate.HasValueForBlock(BB)) 936 continue; 937 938 // If the value is the load that we will be eliminating, and the block it's 939 // available in is the block that the load is in, then don't add it as 940 // SSAUpdater will resolve the value to the relevant phi which may let it 941 // avoid phi construction entirely if there's actually only one value. 942 if (BB == Load->getParent() && 943 ((AV.AV.isSimpleValue() && AV.AV.getSimpleValue() == Load) || 944 (AV.AV.isCoercedLoadValue() && AV.AV.getCoercedLoadValue() == Load))) 945 continue; 946 947 SSAUpdate.AddAvailableValue(BB, AV.MaterializeAdjustedValue(Load, gvn)); 948 } 949 950 // Perform PHI construction. 951 return SSAUpdate.GetValueInMiddleOfBlock(Load->getParent()); 952 } 953 954 static LoadInst *findDominatingLoad(Value *Ptr, Type *LoadTy, SelectInst *Sel, 955 DominatorTree &DT) { 956 for (Value *U : Ptr->users()) { 957 auto *LI = dyn_cast<LoadInst>(U); 958 if (LI && LI->getType() == LoadTy && LI->getParent() == Sel->getParent() && 959 DT.dominates(LI, Sel)) 960 return LI; 961 } 962 return nullptr; 963 } 964 965 Value *AvailableValue::MaterializeAdjustedValue(LoadInst *Load, 966 Instruction *InsertPt, 967 GVNPass &gvn) const { 968 Value *Res; 969 Type *LoadTy = Load->getType(); 970 const DataLayout &DL = Load->getModule()->getDataLayout(); 971 if (isSimpleValue()) { 972 Res = getSimpleValue(); 973 if (Res->getType() != LoadTy) { 974 Res = getStoreValueForLoad(Res, Offset, LoadTy, InsertPt, DL); 975 976 LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset 977 << " " << *getSimpleValue() << '\n' 978 << *Res << '\n' 979 << "\n\n\n"); 980 } 981 } else if (isCoercedLoadValue()) { 982 LoadInst *CoercedLoad = getCoercedLoadValue(); 983 if (CoercedLoad->getType() == LoadTy && Offset == 0) { 984 Res = CoercedLoad; 985 } else { 986 Res = getLoadValueForLoad(CoercedLoad, Offset, LoadTy, InsertPt, DL); 987 // We would like to use gvn.markInstructionForDeletion here, but we can't 988 // because the load is already memoized into the leader map table that GVN 989 // tracks. It is potentially possible to remove the load from the table, 990 // but then there all of the operations based on it would need to be 991 // rehashed. Just leave the dead load around. 992 gvn.getMemDep().removeInstruction(CoercedLoad); 993 LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL LOAD:\nOffset: " << Offset 994 << " " << *getCoercedLoadValue() << '\n' 995 << *Res << '\n' 996 << "\n\n\n"); 997 } 998 } else if (isMemIntrinValue()) { 999 Res = getMemInstValueForLoad(getMemIntrinValue(), Offset, LoadTy, 1000 InsertPt, DL); 1001 LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset 1002 << " " << *getMemIntrinValue() << '\n' 1003 << *Res << '\n' 1004 << "\n\n\n"); 1005 } else if (isSelectValue()) { 1006 // Introduce a new value select for a load from an eligible pointer select. 1007 SelectInst *Sel = getSelectValue(); 1008 LoadInst *L1 = findDominatingLoad(Sel->getOperand(1), LoadTy, Sel, 1009 gvn.getDominatorTree()); 1010 LoadInst *L2 = findDominatingLoad(Sel->getOperand(2), LoadTy, Sel, 1011 gvn.getDominatorTree()); 1012 assert(L1 && L2 && 1013 "must be able to obtain dominating loads for both value operands of " 1014 "the select"); 1015 Res = SelectInst::Create(Sel->getCondition(), L1, L2, "", Sel); 1016 } else { 1017 llvm_unreachable("Should not materialize value from dead block"); 1018 } 1019 assert(Res && "failed to materialize?"); 1020 return Res; 1021 } 1022 1023 static bool isLifetimeStart(const Instruction *Inst) { 1024 if (const IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst)) 1025 return II->getIntrinsicID() == Intrinsic::lifetime_start; 1026 return false; 1027 } 1028 1029 /// Assuming To can be reached from both From and Between, does Between lie on 1030 /// every path from From to To? 1031 static bool liesBetween(const Instruction *From, Instruction *Between, 1032 const Instruction *To, DominatorTree *DT) { 1033 if (From->getParent() == Between->getParent()) 1034 return DT->dominates(From, Between); 1035 SmallSet<BasicBlock *, 1> Exclusion; 1036 Exclusion.insert(Between->getParent()); 1037 return !isPotentiallyReachable(From, To, &Exclusion, DT); 1038 } 1039 1040 /// Try to locate the three instruction involved in a missed 1041 /// load-elimination case that is due to an intervening store. 1042 static void reportMayClobberedLoad(LoadInst *Load, MemDepResult DepInfo, 1043 DominatorTree *DT, 1044 OptimizationRemarkEmitter *ORE) { 1045 using namespace ore; 1046 1047 User *OtherAccess = nullptr; 1048 1049 OptimizationRemarkMissed R(DEBUG_TYPE, "LoadClobbered", Load); 1050 R << "load of type " << NV("Type", Load->getType()) << " not eliminated" 1051 << setExtraArgs(); 1052 1053 for (auto *U : Load->getPointerOperand()->users()) { 1054 if (U != Load && (isa<LoadInst>(U) || isa<StoreInst>(U)) && 1055 cast<Instruction>(U)->getFunction() == Load->getFunction() && 1056 DT->dominates(cast<Instruction>(U), Load)) { 1057 // Use the most immediately dominating value 1058 if (OtherAccess) { 1059 if (DT->dominates(cast<Instruction>(OtherAccess), cast<Instruction>(U))) 1060 OtherAccess = U; 1061 else 1062 assert(U == OtherAccess || DT->dominates(cast<Instruction>(U), 1063 cast<Instruction>(OtherAccess))); 1064 } else 1065 OtherAccess = U; 1066 } 1067 } 1068 1069 if (!OtherAccess) { 1070 // There is no dominating use, check if we can find a closest non-dominating 1071 // use that lies between any other potentially available use and Load. 1072 for (auto *U : Load->getPointerOperand()->users()) { 1073 if (U != Load && (isa<LoadInst>(U) || isa<StoreInst>(U)) && 1074 cast<Instruction>(U)->getFunction() == Load->getFunction() && 1075 isPotentiallyReachable(cast<Instruction>(U), Load, nullptr, DT)) { 1076 if (OtherAccess) { 1077 if (liesBetween(cast<Instruction>(OtherAccess), cast<Instruction>(U), 1078 Load, DT)) { 1079 OtherAccess = U; 1080 } else if (!liesBetween(cast<Instruction>(U), 1081 cast<Instruction>(OtherAccess), Load, DT)) { 1082 // These uses are both partially available at Load were it not for 1083 // the clobber, but neither lies strictly after the other. 1084 OtherAccess = nullptr; 1085 break; 1086 } // else: keep current OtherAccess since it lies between U and Load 1087 } else { 1088 OtherAccess = U; 1089 } 1090 } 1091 } 1092 } 1093 1094 if (OtherAccess) 1095 R << " in favor of " << NV("OtherAccess", OtherAccess); 1096 1097 R << " because it is clobbered by " << NV("ClobberedBy", DepInfo.getInst()); 1098 1099 ORE->emit(R); 1100 } 1101 1102 /// Check if a load from pointer-select \p Address in \p DepBB can be converted 1103 /// to a value select. The following conditions need to be satisfied: 1104 /// 1. The pointer select (\p Address) must be defined in \p DepBB. 1105 /// 2. Both value operands of the pointer select must be loaded in the same 1106 /// basic block, before the pointer select. 1107 /// 3. There must be no instructions between the found loads and \p End that may 1108 /// clobber the loads. 1109 static Optional<AvailableValue> 1110 tryToConvertLoadOfPtrSelect(BasicBlock *DepBB, BasicBlock::iterator End, 1111 Value *Address, Type *LoadTy, DominatorTree &DT, 1112 AAResults *AA) { 1113 1114 auto *Sel = dyn_cast_or_null<SelectInst>(Address); 1115 if (!Sel || DepBB != Sel->getParent()) 1116 return None; 1117 1118 LoadInst *L1 = findDominatingLoad(Sel->getOperand(1), LoadTy, Sel, DT); 1119 LoadInst *L2 = findDominatingLoad(Sel->getOperand(2), LoadTy, Sel, DT); 1120 if (!L1 || !L2) 1121 return None; 1122 1123 // Ensure there are no accesses that may modify the locations referenced by 1124 // either L1 or L2 between L1, L2 and the specified End iterator. 1125 Instruction *EarlierLoad = L1->comesBefore(L2) ? L1 : L2; 1126 MemoryLocation L1Loc = MemoryLocation::get(L1); 1127 MemoryLocation L2Loc = MemoryLocation::get(L2); 1128 if (any_of(make_range(EarlierLoad->getIterator(), End), [&](Instruction &I) { 1129 return isModSet(AA->getModRefInfo(&I, L1Loc)) || 1130 isModSet(AA->getModRefInfo(&I, L2Loc)); 1131 })) 1132 return None; 1133 1134 return AvailableValue::getSelect(Sel); 1135 } 1136 1137 bool GVNPass::AnalyzeLoadAvailability(LoadInst *Load, MemDepResult DepInfo, 1138 Value *Address, AvailableValue &Res) { 1139 if (!DepInfo.isDef() && !DepInfo.isClobber()) { 1140 assert(isa<SelectInst>(Address)); 1141 if (auto R = tryToConvertLoadOfPtrSelect( 1142 Load->getParent(), Load->getIterator(), Address, Load->getType(), 1143 getDominatorTree(), getAliasAnalysis())) { 1144 Res = *R; 1145 return true; 1146 } 1147 return false; 1148 } 1149 1150 assert((DepInfo.isDef() || DepInfo.isClobber()) && 1151 "expected a local dependence"); 1152 assert(Load->isUnordered() && "rules below are incorrect for ordered access"); 1153 1154 const DataLayout &DL = Load->getModule()->getDataLayout(); 1155 1156 Instruction *DepInst = DepInfo.getInst(); 1157 if (DepInfo.isClobber()) { 1158 // If the dependence is to a store that writes to a superset of the bits 1159 // read by the load, we can extract the bits we need for the load from the 1160 // stored value. 1161 if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) { 1162 // Can't forward from non-atomic to atomic without violating memory model. 1163 if (Address && Load->isAtomic() <= DepSI->isAtomic()) { 1164 int Offset = 1165 analyzeLoadFromClobberingStore(Load->getType(), Address, DepSI, DL); 1166 if (Offset != -1) { 1167 Res = AvailableValue::get(DepSI->getValueOperand(), Offset); 1168 return true; 1169 } 1170 } 1171 } 1172 1173 // Check to see if we have something like this: 1174 // load i32* P 1175 // load i8* (P+1) 1176 // if we have this, replace the later with an extraction from the former. 1177 if (LoadInst *DepLoad = dyn_cast<LoadInst>(DepInst)) { 1178 // If this is a clobber and L is the first instruction in its block, then 1179 // we have the first instruction in the entry block. 1180 // Can't forward from non-atomic to atomic without violating memory model. 1181 if (DepLoad != Load && Address && 1182 Load->isAtomic() <= DepLoad->isAtomic()) { 1183 Type *LoadType = Load->getType(); 1184 int Offset = -1; 1185 1186 // If MD reported clobber, check it was nested. 1187 if (DepInfo.isClobber() && 1188 canCoerceMustAliasedValueToLoad(DepLoad, LoadType, DL)) { 1189 const auto ClobberOff = MD->getClobberOffset(DepLoad); 1190 // GVN has no deal with a negative offset. 1191 Offset = (ClobberOff == None || *ClobberOff < 0) ? -1 : *ClobberOff; 1192 } 1193 if (Offset == -1) 1194 Offset = 1195 analyzeLoadFromClobberingLoad(LoadType, Address, DepLoad, DL); 1196 if (Offset != -1) { 1197 Res = AvailableValue::getLoad(DepLoad, Offset); 1198 return true; 1199 } 1200 } 1201 } 1202 1203 // If the clobbering value is a memset/memcpy/memmove, see if we can 1204 // forward a value on from it. 1205 if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInst)) { 1206 if (Address && !Load->isAtomic()) { 1207 int Offset = analyzeLoadFromClobberingMemInst(Load->getType(), Address, 1208 DepMI, DL); 1209 if (Offset != -1) { 1210 Res = AvailableValue::getMI(DepMI, Offset); 1211 return true; 1212 } 1213 } 1214 } 1215 1216 // Nothing known about this clobber, have to be conservative 1217 LLVM_DEBUG( 1218 // fast print dep, using operator<< on instruction is too slow. 1219 dbgs() << "GVN: load "; Load->printAsOperand(dbgs()); 1220 dbgs() << " is clobbered by " << *DepInst << '\n';); 1221 if (ORE->allowExtraAnalysis(DEBUG_TYPE)) 1222 reportMayClobberedLoad(Load, DepInfo, DT, ORE); 1223 1224 return false; 1225 } 1226 assert(DepInfo.isDef() && "follows from above"); 1227 1228 // Loading the alloca -> undef. 1229 // Loading immediately after lifetime begin -> undef. 1230 if (isa<AllocaInst>(DepInst) || isLifetimeStart(DepInst)) { 1231 Res = AvailableValue::get(UndefValue::get(Load->getType())); 1232 return true; 1233 } 1234 1235 if (Constant *InitVal = 1236 getInitialValueOfAllocation(DepInst, TLI, Load->getType())) { 1237 Res = AvailableValue::get(InitVal); 1238 return true; 1239 } 1240 1241 if (StoreInst *S = dyn_cast<StoreInst>(DepInst)) { 1242 // Reject loads and stores that are to the same address but are of 1243 // different types if we have to. If the stored value is convertable to 1244 // the loaded value, we can reuse it. 1245 if (!canCoerceMustAliasedValueToLoad(S->getValueOperand(), Load->getType(), 1246 DL)) 1247 return false; 1248 1249 // Can't forward from non-atomic to atomic without violating memory model. 1250 if (S->isAtomic() < Load->isAtomic()) 1251 return false; 1252 1253 Res = AvailableValue::get(S->getValueOperand()); 1254 return true; 1255 } 1256 1257 if (LoadInst *LD = dyn_cast<LoadInst>(DepInst)) { 1258 // If the types mismatch and we can't handle it, reject reuse of the load. 1259 // If the stored value is larger or equal to the loaded value, we can reuse 1260 // it. 1261 if (!canCoerceMustAliasedValueToLoad(LD, Load->getType(), DL)) 1262 return false; 1263 1264 // Can't forward from non-atomic to atomic without violating memory model. 1265 if (LD->isAtomic() < Load->isAtomic()) 1266 return false; 1267 1268 Res = AvailableValue::getLoad(LD); 1269 return true; 1270 } 1271 1272 // Unknown def - must be conservative 1273 LLVM_DEBUG( 1274 // fast print dep, using operator<< on instruction is too slow. 1275 dbgs() << "GVN: load "; Load->printAsOperand(dbgs()); 1276 dbgs() << " has unknown def " << *DepInst << '\n';); 1277 return false; 1278 } 1279 1280 void GVNPass::AnalyzeLoadAvailability(LoadInst *Load, LoadDepVect &Deps, 1281 AvailValInBlkVect &ValuesPerBlock, 1282 UnavailBlkVect &UnavailableBlocks) { 1283 // Filter out useless results (non-locals, etc). Keep track of the blocks 1284 // where we have a value available in repl, also keep track of whether we see 1285 // dependencies that produce an unknown value for the load (such as a call 1286 // that could potentially clobber the load). 1287 unsigned NumDeps = Deps.size(); 1288 for (unsigned i = 0, e = NumDeps; i != e; ++i) { 1289 BasicBlock *DepBB = Deps[i].getBB(); 1290 MemDepResult DepInfo = Deps[i].getResult(); 1291 1292 if (DeadBlocks.count(DepBB)) { 1293 // Dead dependent mem-op disguise as a load evaluating the same value 1294 // as the load in question. 1295 ValuesPerBlock.push_back(AvailableValueInBlock::getUndef(DepBB)); 1296 continue; 1297 } 1298 1299 // The address being loaded in this non-local block may not be the same as 1300 // the pointer operand of the load if PHI translation occurs. Make sure 1301 // to consider the right address. 1302 Value *Address = Deps[i].getAddress(); 1303 1304 if (!DepInfo.isDef() && !DepInfo.isClobber()) { 1305 if (auto R = tryToConvertLoadOfPtrSelect( 1306 DepBB, DepBB->end(), Address, Load->getType(), getDominatorTree(), 1307 getAliasAnalysis())) { 1308 ValuesPerBlock.push_back( 1309 AvailableValueInBlock::get(DepBB, std::move(*R))); 1310 continue; 1311 } 1312 UnavailableBlocks.push_back(DepBB); 1313 continue; 1314 } 1315 1316 AvailableValue AV; 1317 if (AnalyzeLoadAvailability(Load, DepInfo, Address, AV)) { 1318 // subtlety: because we know this was a non-local dependency, we know 1319 // it's safe to materialize anywhere between the instruction within 1320 // DepInfo and the end of it's block. 1321 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, 1322 std::move(AV))); 1323 } else { 1324 UnavailableBlocks.push_back(DepBB); 1325 } 1326 } 1327 1328 assert(NumDeps == ValuesPerBlock.size() + UnavailableBlocks.size() && 1329 "post condition violation"); 1330 } 1331 1332 void GVNPass::eliminatePartiallyRedundantLoad( 1333 LoadInst *Load, AvailValInBlkVect &ValuesPerBlock, 1334 MapVector<BasicBlock *, Value *> &AvailableLoads) { 1335 for (const auto &AvailableLoad : AvailableLoads) { 1336 BasicBlock *UnavailableBlock = AvailableLoad.first; 1337 Value *LoadPtr = AvailableLoad.second; 1338 1339 auto *NewLoad = 1340 new LoadInst(Load->getType(), LoadPtr, Load->getName() + ".pre", 1341 Load->isVolatile(), Load->getAlign(), Load->getOrdering(), 1342 Load->getSyncScopeID(), UnavailableBlock->getTerminator()); 1343 NewLoad->setDebugLoc(Load->getDebugLoc()); 1344 if (MSSAU) { 1345 auto *MSSA = MSSAU->getMemorySSA(); 1346 // Get the defining access of the original load or use the load if it is a 1347 // MemoryDef (e.g. because it is volatile). The inserted loads are 1348 // guaranteed to load from the same definition. 1349 auto *LoadAcc = MSSA->getMemoryAccess(Load); 1350 auto *DefiningAcc = 1351 isa<MemoryDef>(LoadAcc) ? LoadAcc : LoadAcc->getDefiningAccess(); 1352 auto *NewAccess = MSSAU->createMemoryAccessInBB( 1353 NewLoad, DefiningAcc, NewLoad->getParent(), 1354 MemorySSA::BeforeTerminator); 1355 if (auto *NewDef = dyn_cast<MemoryDef>(NewAccess)) 1356 MSSAU->insertDef(NewDef, /*RenameUses=*/true); 1357 else 1358 MSSAU->insertUse(cast<MemoryUse>(NewAccess), /*RenameUses=*/true); 1359 } 1360 1361 // Transfer the old load's AA tags to the new load. 1362 AAMDNodes Tags = Load->getAAMetadata(); 1363 if (Tags) 1364 NewLoad->setAAMetadata(Tags); 1365 1366 if (auto *MD = Load->getMetadata(LLVMContext::MD_invariant_load)) 1367 NewLoad->setMetadata(LLVMContext::MD_invariant_load, MD); 1368 if (auto *InvGroupMD = Load->getMetadata(LLVMContext::MD_invariant_group)) 1369 NewLoad->setMetadata(LLVMContext::MD_invariant_group, InvGroupMD); 1370 if (auto *RangeMD = Load->getMetadata(LLVMContext::MD_range)) 1371 NewLoad->setMetadata(LLVMContext::MD_range, RangeMD); 1372 if (auto *AccessMD = Load->getMetadata(LLVMContext::MD_access_group)) 1373 if (LI && 1374 LI->getLoopFor(Load->getParent()) == LI->getLoopFor(UnavailableBlock)) 1375 NewLoad->setMetadata(LLVMContext::MD_access_group, AccessMD); 1376 1377 // We do not propagate the old load's debug location, because the new 1378 // load now lives in a different BB, and we want to avoid a jumpy line 1379 // table. 1380 // FIXME: How do we retain source locations without causing poor debugging 1381 // behavior? 1382 1383 // Add the newly created load. 1384 ValuesPerBlock.push_back( 1385 AvailableValueInBlock::get(UnavailableBlock, NewLoad)); 1386 MD->invalidateCachedPointerInfo(LoadPtr); 1387 LLVM_DEBUG(dbgs() << "GVN INSERTED " << *NewLoad << '\n'); 1388 } 1389 1390 // Perform PHI construction. 1391 Value *V = ConstructSSAForLoadSet(Load, ValuesPerBlock, *this); 1392 Load->replaceAllUsesWith(V); 1393 if (isa<PHINode>(V)) 1394 V->takeName(Load); 1395 if (Instruction *I = dyn_cast<Instruction>(V)) 1396 I->setDebugLoc(Load->getDebugLoc()); 1397 if (V->getType()->isPtrOrPtrVectorTy()) 1398 MD->invalidateCachedPointerInfo(V); 1399 markInstructionForDeletion(Load); 1400 ORE->emit([&]() { 1401 return OptimizationRemark(DEBUG_TYPE, "LoadPRE", Load) 1402 << "load eliminated by PRE"; 1403 }); 1404 } 1405 1406 bool GVNPass::PerformLoadPRE(LoadInst *Load, AvailValInBlkVect &ValuesPerBlock, 1407 UnavailBlkVect &UnavailableBlocks) { 1408 // Okay, we have *some* definitions of the value. This means that the value 1409 // is available in some of our (transitive) predecessors. Lets think about 1410 // doing PRE of this load. This will involve inserting a new load into the 1411 // predecessor when it's not available. We could do this in general, but 1412 // prefer to not increase code size. As such, we only do this when we know 1413 // that we only have to insert *one* load (which means we're basically moving 1414 // the load, not inserting a new one). 1415 1416 SmallPtrSet<BasicBlock *, 4> Blockers(UnavailableBlocks.begin(), 1417 UnavailableBlocks.end()); 1418 1419 // Let's find the first basic block with more than one predecessor. Walk 1420 // backwards through predecessors if needed. 1421 BasicBlock *LoadBB = Load->getParent(); 1422 BasicBlock *TmpBB = LoadBB; 1423 1424 // Check that there is no implicit control flow instructions above our load in 1425 // its block. If there is an instruction that doesn't always pass the 1426 // execution to the following instruction, then moving through it may become 1427 // invalid. For example: 1428 // 1429 // int arr[LEN]; 1430 // int index = ???; 1431 // ... 1432 // guard(0 <= index && index < LEN); 1433 // use(arr[index]); 1434 // 1435 // It is illegal to move the array access to any point above the guard, 1436 // because if the index is out of bounds we should deoptimize rather than 1437 // access the array. 1438 // Check that there is no guard in this block above our instruction. 1439 bool MustEnsureSafetyOfSpeculativeExecution = 1440 ICF->isDominatedByICFIFromSameBlock(Load); 1441 1442 while (TmpBB->getSinglePredecessor()) { 1443 TmpBB = TmpBB->getSinglePredecessor(); 1444 if (TmpBB == LoadBB) // Infinite (unreachable) loop. 1445 return false; 1446 if (Blockers.count(TmpBB)) 1447 return false; 1448 1449 // If any of these blocks has more than one successor (i.e. if the edge we 1450 // just traversed was critical), then there are other paths through this 1451 // block along which the load may not be anticipated. Hoisting the load 1452 // above this block would be adding the load to execution paths along 1453 // which it was not previously executed. 1454 if (TmpBB->getTerminator()->getNumSuccessors() != 1) 1455 return false; 1456 1457 // Check that there is no implicit control flow in a block above. 1458 MustEnsureSafetyOfSpeculativeExecution = 1459 MustEnsureSafetyOfSpeculativeExecution || ICF->hasICF(TmpBB); 1460 } 1461 1462 assert(TmpBB); 1463 LoadBB = TmpBB; 1464 1465 // Check to see how many predecessors have the loaded value fully 1466 // available. 1467 MapVector<BasicBlock *, Value *> PredLoads; 1468 DenseMap<BasicBlock *, AvailabilityState> FullyAvailableBlocks; 1469 for (const AvailableValueInBlock &AV : ValuesPerBlock) 1470 FullyAvailableBlocks[AV.BB] = AvailabilityState::Available; 1471 for (BasicBlock *UnavailableBB : UnavailableBlocks) 1472 FullyAvailableBlocks[UnavailableBB] = AvailabilityState::Unavailable; 1473 1474 SmallVector<BasicBlock *, 4> CriticalEdgePred; 1475 for (BasicBlock *Pred : predecessors(LoadBB)) { 1476 // If any predecessor block is an EH pad that does not allow non-PHI 1477 // instructions before the terminator, we can't PRE the load. 1478 if (Pred->getTerminator()->isEHPad()) { 1479 LLVM_DEBUG( 1480 dbgs() << "COULD NOT PRE LOAD BECAUSE OF AN EH PAD PREDECESSOR '" 1481 << Pred->getName() << "': " << *Load << '\n'); 1482 return false; 1483 } 1484 1485 if (IsValueFullyAvailableInBlock(Pred, FullyAvailableBlocks)) { 1486 continue; 1487 } 1488 1489 if (Pred->getTerminator()->getNumSuccessors() != 1) { 1490 if (isa<IndirectBrInst>(Pred->getTerminator())) { 1491 LLVM_DEBUG( 1492 dbgs() << "COULD NOT PRE LOAD BECAUSE OF INDBR CRITICAL EDGE '" 1493 << Pred->getName() << "': " << *Load << '\n'); 1494 return false; 1495 } 1496 1497 if (LoadBB->isEHPad()) { 1498 LLVM_DEBUG( 1499 dbgs() << "COULD NOT PRE LOAD BECAUSE OF AN EH PAD CRITICAL EDGE '" 1500 << Pred->getName() << "': " << *Load << '\n'); 1501 return false; 1502 } 1503 1504 // Do not split backedge as it will break the canonical loop form. 1505 if (!isLoadPRESplitBackedgeEnabled()) 1506 if (DT->dominates(LoadBB, Pred)) { 1507 LLVM_DEBUG( 1508 dbgs() 1509 << "COULD NOT PRE LOAD BECAUSE OF A BACKEDGE CRITICAL EDGE '" 1510 << Pred->getName() << "': " << *Load << '\n'); 1511 return false; 1512 } 1513 1514 CriticalEdgePred.push_back(Pred); 1515 } else { 1516 // Only add the predecessors that will not be split for now. 1517 PredLoads[Pred] = nullptr; 1518 } 1519 } 1520 1521 // Decide whether PRE is profitable for this load. 1522 unsigned NumUnavailablePreds = PredLoads.size() + CriticalEdgePred.size(); 1523 assert(NumUnavailablePreds != 0 && 1524 "Fully available value should already be eliminated!"); 1525 1526 // If this load is unavailable in multiple predecessors, reject it. 1527 // FIXME: If we could restructure the CFG, we could make a common pred with 1528 // all the preds that don't have an available Load and insert a new load into 1529 // that one block. 1530 if (NumUnavailablePreds != 1) 1531 return false; 1532 1533 // Now we know where we will insert load. We must ensure that it is safe 1534 // to speculatively execute the load at that points. 1535 if (MustEnsureSafetyOfSpeculativeExecution) { 1536 if (CriticalEdgePred.size()) 1537 if (!isSafeToSpeculativelyExecute(Load, LoadBB->getFirstNonPHI(), DT)) 1538 return false; 1539 for (auto &PL : PredLoads) 1540 if (!isSafeToSpeculativelyExecute(Load, PL.first->getTerminator(), DT)) 1541 return false; 1542 } 1543 1544 // Split critical edges, and update the unavailable predecessors accordingly. 1545 for (BasicBlock *OrigPred : CriticalEdgePred) { 1546 BasicBlock *NewPred = splitCriticalEdges(OrigPred, LoadBB); 1547 assert(!PredLoads.count(OrigPred) && "Split edges shouldn't be in map!"); 1548 PredLoads[NewPred] = nullptr; 1549 LLVM_DEBUG(dbgs() << "Split critical edge " << OrigPred->getName() << "->" 1550 << LoadBB->getName() << '\n'); 1551 } 1552 1553 // Check if the load can safely be moved to all the unavailable predecessors. 1554 bool CanDoPRE = true; 1555 const DataLayout &DL = Load->getModule()->getDataLayout(); 1556 SmallVector<Instruction*, 8> NewInsts; 1557 for (auto &PredLoad : PredLoads) { 1558 BasicBlock *UnavailablePred = PredLoad.first; 1559 1560 // Do PHI translation to get its value in the predecessor if necessary. The 1561 // returned pointer (if non-null) is guaranteed to dominate UnavailablePred. 1562 // We do the translation for each edge we skipped by going from Load's block 1563 // to LoadBB, otherwise we might miss pieces needing translation. 1564 1565 // If all preds have a single successor, then we know it is safe to insert 1566 // the load on the pred (?!?), so we can insert code to materialize the 1567 // pointer if it is not available. 1568 Value *LoadPtr = Load->getPointerOperand(); 1569 BasicBlock *Cur = Load->getParent(); 1570 while (Cur != LoadBB) { 1571 PHITransAddr Address(LoadPtr, DL, AC); 1572 LoadPtr = Address.PHITranslateWithInsertion( 1573 Cur, Cur->getSinglePredecessor(), *DT, NewInsts); 1574 if (!LoadPtr) { 1575 CanDoPRE = false; 1576 break; 1577 } 1578 Cur = Cur->getSinglePredecessor(); 1579 } 1580 1581 if (LoadPtr) { 1582 PHITransAddr Address(LoadPtr, DL, AC); 1583 LoadPtr = Address.PHITranslateWithInsertion(LoadBB, UnavailablePred, *DT, 1584 NewInsts); 1585 } 1586 // If we couldn't find or insert a computation of this phi translated value, 1587 // we fail PRE. 1588 if (!LoadPtr) { 1589 LLVM_DEBUG(dbgs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: " 1590 << *Load->getPointerOperand() << "\n"); 1591 CanDoPRE = false; 1592 break; 1593 } 1594 1595 PredLoad.second = LoadPtr; 1596 } 1597 1598 if (!CanDoPRE) { 1599 while (!NewInsts.empty()) { 1600 // Erase instructions generated by the failed PHI translation before 1601 // trying to number them. PHI translation might insert instructions 1602 // in basic blocks other than the current one, and we delete them 1603 // directly, as markInstructionForDeletion only allows removing from the 1604 // current basic block. 1605 NewInsts.pop_back_val()->eraseFromParent(); 1606 } 1607 // HINT: Don't revert the edge-splitting as following transformation may 1608 // also need to split these critical edges. 1609 return !CriticalEdgePred.empty(); 1610 } 1611 1612 // Okay, we can eliminate this load by inserting a reload in the predecessor 1613 // and using PHI construction to get the value in the other predecessors, do 1614 // it. 1615 LLVM_DEBUG(dbgs() << "GVN REMOVING PRE LOAD: " << *Load << '\n'); 1616 LLVM_DEBUG(if (!NewInsts.empty()) dbgs() << "INSERTED " << NewInsts.size() 1617 << " INSTS: " << *NewInsts.back() 1618 << '\n'); 1619 1620 // Assign value numbers to the new instructions. 1621 for (Instruction *I : NewInsts) { 1622 // Instructions that have been inserted in predecessor(s) to materialize 1623 // the load address do not retain their original debug locations. Doing 1624 // so could lead to confusing (but correct) source attributions. 1625 I->updateLocationAfterHoist(); 1626 1627 // FIXME: We really _ought_ to insert these value numbers into their 1628 // parent's availability map. However, in doing so, we risk getting into 1629 // ordering issues. If a block hasn't been processed yet, we would be 1630 // marking a value as AVAIL-IN, which isn't what we intend. 1631 VN.lookupOrAdd(I); 1632 } 1633 1634 eliminatePartiallyRedundantLoad(Load, ValuesPerBlock, PredLoads); 1635 ++NumPRELoad; 1636 return true; 1637 } 1638 1639 bool GVNPass::performLoopLoadPRE(LoadInst *Load, 1640 AvailValInBlkVect &ValuesPerBlock, 1641 UnavailBlkVect &UnavailableBlocks) { 1642 if (!LI) 1643 return false; 1644 1645 const Loop *L = LI->getLoopFor(Load->getParent()); 1646 // TODO: Generalize to other loop blocks that dominate the latch. 1647 if (!L || L->getHeader() != Load->getParent()) 1648 return false; 1649 1650 BasicBlock *Preheader = L->getLoopPreheader(); 1651 BasicBlock *Latch = L->getLoopLatch(); 1652 if (!Preheader || !Latch) 1653 return false; 1654 1655 Value *LoadPtr = Load->getPointerOperand(); 1656 // Must be available in preheader. 1657 if (!L->isLoopInvariant(LoadPtr)) 1658 return false; 1659 1660 // We plan to hoist the load to preheader without introducing a new fault. 1661 // In order to do it, we need to prove that we cannot side-exit the loop 1662 // once loop header is first entered before execution of the load. 1663 if (ICF->isDominatedByICFIFromSameBlock(Load)) 1664 return false; 1665 1666 BasicBlock *LoopBlock = nullptr; 1667 for (auto *Blocker : UnavailableBlocks) { 1668 // Blockers from outside the loop are handled in preheader. 1669 if (!L->contains(Blocker)) 1670 continue; 1671 1672 // Only allow one loop block. Loop header is not less frequently executed 1673 // than each loop block, and likely it is much more frequently executed. But 1674 // in case of multiple loop blocks, we need extra information (such as block 1675 // frequency info) to understand whether it is profitable to PRE into 1676 // multiple loop blocks. 1677 if (LoopBlock) 1678 return false; 1679 1680 // Do not sink into inner loops. This may be non-profitable. 1681 if (L != LI->getLoopFor(Blocker)) 1682 return false; 1683 1684 // Blocks that dominate the latch execute on every single iteration, maybe 1685 // except the last one. So PREing into these blocks doesn't make much sense 1686 // in most cases. But the blocks that do not necessarily execute on each 1687 // iteration are sometimes much colder than the header, and this is when 1688 // PRE is potentially profitable. 1689 if (DT->dominates(Blocker, Latch)) 1690 return false; 1691 1692 // Make sure that the terminator itself doesn't clobber. 1693 if (Blocker->getTerminator()->mayWriteToMemory()) 1694 return false; 1695 1696 LoopBlock = Blocker; 1697 } 1698 1699 if (!LoopBlock) 1700 return false; 1701 1702 // Make sure the memory at this pointer cannot be freed, therefore we can 1703 // safely reload from it after clobber. 1704 if (LoadPtr->canBeFreed()) 1705 return false; 1706 1707 // TODO: Support critical edge splitting if blocker has more than 1 successor. 1708 MapVector<BasicBlock *, Value *> AvailableLoads; 1709 AvailableLoads[LoopBlock] = LoadPtr; 1710 AvailableLoads[Preheader] = LoadPtr; 1711 1712 LLVM_DEBUG(dbgs() << "GVN REMOVING PRE LOOP LOAD: " << *Load << '\n'); 1713 eliminatePartiallyRedundantLoad(Load, ValuesPerBlock, AvailableLoads); 1714 ++NumPRELoopLoad; 1715 return true; 1716 } 1717 1718 static void reportLoadElim(LoadInst *Load, Value *AvailableValue, 1719 OptimizationRemarkEmitter *ORE) { 1720 using namespace ore; 1721 1722 ORE->emit([&]() { 1723 return OptimizationRemark(DEBUG_TYPE, "LoadElim", Load) 1724 << "load of type " << NV("Type", Load->getType()) << " eliminated" 1725 << setExtraArgs() << " in favor of " 1726 << NV("InfavorOfValue", AvailableValue); 1727 }); 1728 } 1729 1730 /// Attempt to eliminate a load whose dependencies are 1731 /// non-local by performing PHI construction. 1732 bool GVNPass::processNonLocalLoad(LoadInst *Load) { 1733 // non-local speculations are not allowed under asan. 1734 if (Load->getParent()->getParent()->hasFnAttribute( 1735 Attribute::SanitizeAddress) || 1736 Load->getParent()->getParent()->hasFnAttribute( 1737 Attribute::SanitizeHWAddress)) 1738 return false; 1739 1740 // Step 1: Find the non-local dependencies of the load. 1741 LoadDepVect Deps; 1742 MD->getNonLocalPointerDependency(Load, Deps); 1743 1744 // If we had to process more than one hundred blocks to find the 1745 // dependencies, this load isn't worth worrying about. Optimizing 1746 // it will be too expensive. 1747 unsigned NumDeps = Deps.size(); 1748 if (NumDeps > MaxNumDeps) 1749 return false; 1750 1751 // If we had a phi translation failure, we'll have a single entry which is a 1752 // clobber in the current block. Reject this early. 1753 if (NumDeps == 1 && 1754 !Deps[0].getResult().isDef() && !Deps[0].getResult().isClobber()) { 1755 LLVM_DEBUG(dbgs() << "GVN: non-local load "; Load->printAsOperand(dbgs()); 1756 dbgs() << " has unknown dependencies\n";); 1757 return false; 1758 } 1759 1760 bool Changed = false; 1761 // If this load follows a GEP, see if we can PRE the indices before analyzing. 1762 if (GetElementPtrInst *GEP = 1763 dyn_cast<GetElementPtrInst>(Load->getOperand(0))) { 1764 for (Use &U : GEP->indices()) 1765 if (Instruction *I = dyn_cast<Instruction>(U.get())) 1766 Changed |= performScalarPRE(I); 1767 } 1768 1769 // Step 2: Analyze the availability of the load 1770 AvailValInBlkVect ValuesPerBlock; 1771 UnavailBlkVect UnavailableBlocks; 1772 AnalyzeLoadAvailability(Load, Deps, ValuesPerBlock, UnavailableBlocks); 1773 1774 // If we have no predecessors that produce a known value for this load, exit 1775 // early. 1776 if (ValuesPerBlock.empty()) 1777 return Changed; 1778 1779 // Step 3: Eliminate fully redundancy. 1780 // 1781 // If all of the instructions we depend on produce a known value for this 1782 // load, then it is fully redundant and we can use PHI insertion to compute 1783 // its value. Insert PHIs and remove the fully redundant value now. 1784 if (UnavailableBlocks.empty()) { 1785 LLVM_DEBUG(dbgs() << "GVN REMOVING NONLOCAL LOAD: " << *Load << '\n'); 1786 1787 // Perform PHI construction. 1788 Value *V = ConstructSSAForLoadSet(Load, ValuesPerBlock, *this); 1789 Load->replaceAllUsesWith(V); 1790 1791 if (isa<PHINode>(V)) 1792 V->takeName(Load); 1793 if (Instruction *I = dyn_cast<Instruction>(V)) 1794 // If instruction I has debug info, then we should not update it. 1795 // Also, if I has a null DebugLoc, then it is still potentially incorrect 1796 // to propagate Load's DebugLoc because Load may not post-dominate I. 1797 if (Load->getDebugLoc() && Load->getParent() == I->getParent()) 1798 I->setDebugLoc(Load->getDebugLoc()); 1799 if (V->getType()->isPtrOrPtrVectorTy()) 1800 MD->invalidateCachedPointerInfo(V); 1801 markInstructionForDeletion(Load); 1802 ++NumGVNLoad; 1803 reportLoadElim(Load, V, ORE); 1804 return true; 1805 } 1806 1807 // Step 4: Eliminate partial redundancy. 1808 if (!isPREEnabled() || !isLoadPREEnabled()) 1809 return Changed; 1810 if (!isLoadInLoopPREEnabled() && LI && LI->getLoopFor(Load->getParent())) 1811 return Changed; 1812 1813 if (performLoopLoadPRE(Load, ValuesPerBlock, UnavailableBlocks) || 1814 PerformLoadPRE(Load, ValuesPerBlock, UnavailableBlocks)) 1815 return true; 1816 1817 return Changed; 1818 } 1819 1820 static bool impliesEquivalanceIfTrue(CmpInst* Cmp) { 1821 if (Cmp->getPredicate() == CmpInst::Predicate::ICMP_EQ) 1822 return true; 1823 1824 // Floating point comparisons can be equal, but not equivalent. Cases: 1825 // NaNs for unordered operators 1826 // +0.0 vs 0.0 for all operators 1827 if (Cmp->getPredicate() == CmpInst::Predicate::FCMP_OEQ || 1828 (Cmp->getPredicate() == CmpInst::Predicate::FCMP_UEQ && 1829 Cmp->getFastMathFlags().noNaNs())) { 1830 Value *LHS = Cmp->getOperand(0); 1831 Value *RHS = Cmp->getOperand(1); 1832 // If we can prove either side non-zero, then equality must imply 1833 // equivalence. 1834 // FIXME: We should do this optimization if 'no signed zeros' is 1835 // applicable via an instruction-level fast-math-flag or some other 1836 // indicator that relaxed FP semantics are being used. 1837 if (isa<ConstantFP>(LHS) && !cast<ConstantFP>(LHS)->isZero()) 1838 return true; 1839 if (isa<ConstantFP>(RHS) && !cast<ConstantFP>(RHS)->isZero()) 1840 return true;; 1841 // TODO: Handle vector floating point constants 1842 } 1843 return false; 1844 } 1845 1846 static bool impliesEquivalanceIfFalse(CmpInst* Cmp) { 1847 if (Cmp->getPredicate() == CmpInst::Predicate::ICMP_NE) 1848 return true; 1849 1850 // Floating point comparisons can be equal, but not equivelent. Cases: 1851 // NaNs for unordered operators 1852 // +0.0 vs 0.0 for all operators 1853 if ((Cmp->getPredicate() == CmpInst::Predicate::FCMP_ONE && 1854 Cmp->getFastMathFlags().noNaNs()) || 1855 Cmp->getPredicate() == CmpInst::Predicate::FCMP_UNE) { 1856 Value *LHS = Cmp->getOperand(0); 1857 Value *RHS = Cmp->getOperand(1); 1858 // If we can prove either side non-zero, then equality must imply 1859 // equivalence. 1860 // FIXME: We should do this optimization if 'no signed zeros' is 1861 // applicable via an instruction-level fast-math-flag or some other 1862 // indicator that relaxed FP semantics are being used. 1863 if (isa<ConstantFP>(LHS) && !cast<ConstantFP>(LHS)->isZero()) 1864 return true; 1865 if (isa<ConstantFP>(RHS) && !cast<ConstantFP>(RHS)->isZero()) 1866 return true;; 1867 // TODO: Handle vector floating point constants 1868 } 1869 return false; 1870 } 1871 1872 1873 static bool hasUsersIn(Value *V, BasicBlock *BB) { 1874 for (User *U : V->users()) 1875 if (isa<Instruction>(U) && 1876 cast<Instruction>(U)->getParent() == BB) 1877 return true; 1878 return false; 1879 } 1880 1881 bool GVNPass::processAssumeIntrinsic(AssumeInst *IntrinsicI) { 1882 Value *V = IntrinsicI->getArgOperand(0); 1883 1884 if (ConstantInt *Cond = dyn_cast<ConstantInt>(V)) { 1885 if (Cond->isZero()) { 1886 Type *Int8Ty = Type::getInt8Ty(V->getContext()); 1887 // Insert a new store to null instruction before the load to indicate that 1888 // this code is not reachable. FIXME: We could insert unreachable 1889 // instruction directly because we can modify the CFG. 1890 auto *NewS = new StoreInst(PoisonValue::get(Int8Ty), 1891 Constant::getNullValue(Int8Ty->getPointerTo()), 1892 IntrinsicI); 1893 if (MSSAU) { 1894 const MemoryUseOrDef *FirstNonDom = nullptr; 1895 const auto *AL = 1896 MSSAU->getMemorySSA()->getBlockAccesses(IntrinsicI->getParent()); 1897 1898 // If there are accesses in the current basic block, find the first one 1899 // that does not come before NewS. The new memory access is inserted 1900 // after the found access or before the terminator if no such access is 1901 // found. 1902 if (AL) { 1903 for (auto &Acc : *AL) { 1904 if (auto *Current = dyn_cast<MemoryUseOrDef>(&Acc)) 1905 if (!Current->getMemoryInst()->comesBefore(NewS)) { 1906 FirstNonDom = Current; 1907 break; 1908 } 1909 } 1910 } 1911 1912 // This added store is to null, so it will never executed and we can 1913 // just use the LiveOnEntry def as defining access. 1914 auto *NewDef = 1915 FirstNonDom ? MSSAU->createMemoryAccessBefore( 1916 NewS, MSSAU->getMemorySSA()->getLiveOnEntryDef(), 1917 const_cast<MemoryUseOrDef *>(FirstNonDom)) 1918 : MSSAU->createMemoryAccessInBB( 1919 NewS, MSSAU->getMemorySSA()->getLiveOnEntryDef(), 1920 NewS->getParent(), MemorySSA::BeforeTerminator); 1921 1922 MSSAU->insertDef(cast<MemoryDef>(NewDef), /*RenameUses=*/false); 1923 } 1924 } 1925 if (isAssumeWithEmptyBundle(*IntrinsicI)) 1926 markInstructionForDeletion(IntrinsicI); 1927 return false; 1928 } else if (isa<Constant>(V)) { 1929 // If it's not false, and constant, it must evaluate to true. This means our 1930 // assume is assume(true), and thus, pointless, and we don't want to do 1931 // anything more here. 1932 return false; 1933 } 1934 1935 Constant *True = ConstantInt::getTrue(V->getContext()); 1936 bool Changed = false; 1937 1938 for (BasicBlock *Successor : successors(IntrinsicI->getParent())) { 1939 BasicBlockEdge Edge(IntrinsicI->getParent(), Successor); 1940 1941 // This property is only true in dominated successors, propagateEquality 1942 // will check dominance for us. 1943 Changed |= propagateEquality(V, True, Edge, false); 1944 } 1945 1946 // We can replace assume value with true, which covers cases like this: 1947 // call void @llvm.assume(i1 %cmp) 1948 // br i1 %cmp, label %bb1, label %bb2 ; will change %cmp to true 1949 ReplaceOperandsWithMap[V] = True; 1950 1951 // Similarly, after assume(!NotV) we know that NotV == false. 1952 Value *NotV; 1953 if (match(V, m_Not(m_Value(NotV)))) 1954 ReplaceOperandsWithMap[NotV] = ConstantInt::getFalse(V->getContext()); 1955 1956 // If we find an equality fact, canonicalize all dominated uses in this block 1957 // to one of the two values. We heuristically choice the "oldest" of the 1958 // two where age is determined by value number. (Note that propagateEquality 1959 // above handles the cross block case.) 1960 // 1961 // Key case to cover are: 1962 // 1) 1963 // %cmp = fcmp oeq float 3.000000e+00, %0 ; const on lhs could happen 1964 // call void @llvm.assume(i1 %cmp) 1965 // ret float %0 ; will change it to ret float 3.000000e+00 1966 // 2) 1967 // %load = load float, float* %addr 1968 // %cmp = fcmp oeq float %load, %0 1969 // call void @llvm.assume(i1 %cmp) 1970 // ret float %load ; will change it to ret float %0 1971 if (auto *CmpI = dyn_cast<CmpInst>(V)) { 1972 if (impliesEquivalanceIfTrue(CmpI)) { 1973 Value *CmpLHS = CmpI->getOperand(0); 1974 Value *CmpRHS = CmpI->getOperand(1); 1975 // Heuristically pick the better replacement -- the choice of heuristic 1976 // isn't terribly important here, but the fact we canonicalize on some 1977 // replacement is for exposing other simplifications. 1978 // TODO: pull this out as a helper function and reuse w/existing 1979 // (slightly different) logic. 1980 if (isa<Constant>(CmpLHS) && !isa<Constant>(CmpRHS)) 1981 std::swap(CmpLHS, CmpRHS); 1982 if (!isa<Instruction>(CmpLHS) && isa<Instruction>(CmpRHS)) 1983 std::swap(CmpLHS, CmpRHS); 1984 if ((isa<Argument>(CmpLHS) && isa<Argument>(CmpRHS)) || 1985 (isa<Instruction>(CmpLHS) && isa<Instruction>(CmpRHS))) { 1986 // Move the 'oldest' value to the right-hand side, using the value 1987 // number as a proxy for age. 1988 uint32_t LVN = VN.lookupOrAdd(CmpLHS); 1989 uint32_t RVN = VN.lookupOrAdd(CmpRHS); 1990 if (LVN < RVN) 1991 std::swap(CmpLHS, CmpRHS); 1992 } 1993 1994 // Handle degenerate case where we either haven't pruned a dead path or a 1995 // removed a trivial assume yet. 1996 if (isa<Constant>(CmpLHS) && isa<Constant>(CmpRHS)) 1997 return Changed; 1998 1999 LLVM_DEBUG(dbgs() << "Replacing dominated uses of " 2000 << *CmpLHS << " with " 2001 << *CmpRHS << " in block " 2002 << IntrinsicI->getParent()->getName() << "\n"); 2003 2004 2005 // Setup the replacement map - this handles uses within the same block 2006 if (hasUsersIn(CmpLHS, IntrinsicI->getParent())) 2007 ReplaceOperandsWithMap[CmpLHS] = CmpRHS; 2008 2009 // NOTE: The non-block local cases are handled by the call to 2010 // propagateEquality above; this block is just about handling the block 2011 // local cases. TODO: There's a bunch of logic in propagateEqualiy which 2012 // isn't duplicated for the block local case, can we share it somehow? 2013 } 2014 } 2015 return Changed; 2016 } 2017 2018 static void patchAndReplaceAllUsesWith(Instruction *I, Value *Repl) { 2019 patchReplacementInstruction(I, Repl); 2020 I->replaceAllUsesWith(Repl); 2021 } 2022 2023 /// Attempt to eliminate a load, first by eliminating it 2024 /// locally, and then attempting non-local elimination if that fails. 2025 bool GVNPass::processLoad(LoadInst *L) { 2026 if (!MD) 2027 return false; 2028 2029 // This code hasn't been audited for ordered or volatile memory access 2030 if (!L->isUnordered()) 2031 return false; 2032 2033 if (L->use_empty()) { 2034 markInstructionForDeletion(L); 2035 return true; 2036 } 2037 2038 // ... to a pointer that has been loaded from before... 2039 MemDepResult Dep = MD->getDependency(L); 2040 2041 // If it is defined in another block, try harder. 2042 if (Dep.isNonLocal()) 2043 return processNonLocalLoad(L); 2044 2045 Value *Address = L->getPointerOperand(); 2046 // Only handle the local case below 2047 if (!Dep.isDef() && !Dep.isClobber() && !isa<SelectInst>(Address)) { 2048 // This might be a NonFuncLocal or an Unknown 2049 LLVM_DEBUG( 2050 // fast print dep, using operator<< on instruction is too slow. 2051 dbgs() << "GVN: load "; L->printAsOperand(dbgs()); 2052 dbgs() << " has unknown dependence\n";); 2053 return false; 2054 } 2055 2056 AvailableValue AV; 2057 if (AnalyzeLoadAvailability(L, Dep, Address, AV)) { 2058 Value *AvailableValue = AV.MaterializeAdjustedValue(L, L, *this); 2059 2060 // Replace the load! 2061 patchAndReplaceAllUsesWith(L, AvailableValue); 2062 markInstructionForDeletion(L); 2063 if (MSSAU) 2064 MSSAU->removeMemoryAccess(L); 2065 ++NumGVNLoad; 2066 reportLoadElim(L, AvailableValue, ORE); 2067 // Tell MDA to reexamine the reused pointer since we might have more 2068 // information after forwarding it. 2069 if (MD && AvailableValue->getType()->isPtrOrPtrVectorTy()) 2070 MD->invalidateCachedPointerInfo(AvailableValue); 2071 return true; 2072 } 2073 2074 return false; 2075 } 2076 2077 /// Return a pair the first field showing the value number of \p Exp and the 2078 /// second field showing whether it is a value number newly created. 2079 std::pair<uint32_t, bool> 2080 GVNPass::ValueTable::assignExpNewValueNum(Expression &Exp) { 2081 uint32_t &e = expressionNumbering[Exp]; 2082 bool CreateNewValNum = !e; 2083 if (CreateNewValNum) { 2084 Expressions.push_back(Exp); 2085 if (ExprIdx.size() < nextValueNumber + 1) 2086 ExprIdx.resize(nextValueNumber * 2); 2087 e = nextValueNumber; 2088 ExprIdx[nextValueNumber++] = nextExprNumber++; 2089 } 2090 return {e, CreateNewValNum}; 2091 } 2092 2093 /// Return whether all the values related with the same \p num are 2094 /// defined in \p BB. 2095 bool GVNPass::ValueTable::areAllValsInBB(uint32_t Num, const BasicBlock *BB, 2096 GVNPass &Gvn) { 2097 LeaderTableEntry *Vals = &Gvn.LeaderTable[Num]; 2098 while (Vals && Vals->BB == BB) 2099 Vals = Vals->Next; 2100 return !Vals; 2101 } 2102 2103 /// Wrap phiTranslateImpl to provide caching functionality. 2104 uint32_t GVNPass::ValueTable::phiTranslate(const BasicBlock *Pred, 2105 const BasicBlock *PhiBlock, 2106 uint32_t Num, GVNPass &Gvn) { 2107 auto FindRes = PhiTranslateTable.find({Num, Pred}); 2108 if (FindRes != PhiTranslateTable.end()) 2109 return FindRes->second; 2110 uint32_t NewNum = phiTranslateImpl(Pred, PhiBlock, Num, Gvn); 2111 PhiTranslateTable.insert({{Num, Pred}, NewNum}); 2112 return NewNum; 2113 } 2114 2115 // Return true if the value number \p Num and NewNum have equal value. 2116 // Return false if the result is unknown. 2117 bool GVNPass::ValueTable::areCallValsEqual(uint32_t Num, uint32_t NewNum, 2118 const BasicBlock *Pred, 2119 const BasicBlock *PhiBlock, 2120 GVNPass &Gvn) { 2121 CallInst *Call = nullptr; 2122 LeaderTableEntry *Vals = &Gvn.LeaderTable[Num]; 2123 while (Vals) { 2124 Call = dyn_cast<CallInst>(Vals->Val); 2125 if (Call && Call->getParent() == PhiBlock) 2126 break; 2127 Vals = Vals->Next; 2128 } 2129 2130 if (AA->doesNotAccessMemory(Call)) 2131 return true; 2132 2133 if (!MD || !AA->onlyReadsMemory(Call)) 2134 return false; 2135 2136 MemDepResult local_dep = MD->getDependency(Call); 2137 if (!local_dep.isNonLocal()) 2138 return false; 2139 2140 const MemoryDependenceResults::NonLocalDepInfo &deps = 2141 MD->getNonLocalCallDependency(Call); 2142 2143 // Check to see if the Call has no function local clobber. 2144 for (const NonLocalDepEntry &D : deps) { 2145 if (D.getResult().isNonFuncLocal()) 2146 return true; 2147 } 2148 return false; 2149 } 2150 2151 /// Translate value number \p Num using phis, so that it has the values of 2152 /// the phis in BB. 2153 uint32_t GVNPass::ValueTable::phiTranslateImpl(const BasicBlock *Pred, 2154 const BasicBlock *PhiBlock, 2155 uint32_t Num, GVNPass &Gvn) { 2156 if (PHINode *PN = NumberingPhi[Num]) { 2157 for (unsigned i = 0; i != PN->getNumIncomingValues(); ++i) { 2158 if (PN->getParent() == PhiBlock && PN->getIncomingBlock(i) == Pred) 2159 if (uint32_t TransVal = lookup(PN->getIncomingValue(i), false)) 2160 return TransVal; 2161 } 2162 return Num; 2163 } 2164 2165 // If there is any value related with Num is defined in a BB other than 2166 // PhiBlock, it cannot depend on a phi in PhiBlock without going through 2167 // a backedge. We can do an early exit in that case to save compile time. 2168 if (!areAllValsInBB(Num, PhiBlock, Gvn)) 2169 return Num; 2170 2171 if (Num >= ExprIdx.size() || ExprIdx[Num] == 0) 2172 return Num; 2173 Expression Exp = Expressions[ExprIdx[Num]]; 2174 2175 for (unsigned i = 0; i < Exp.varargs.size(); i++) { 2176 // For InsertValue and ExtractValue, some varargs are index numbers 2177 // instead of value numbers. Those index numbers should not be 2178 // translated. 2179 if ((i > 1 && Exp.opcode == Instruction::InsertValue) || 2180 (i > 0 && Exp.opcode == Instruction::ExtractValue) || 2181 (i > 1 && Exp.opcode == Instruction::ShuffleVector)) 2182 continue; 2183 Exp.varargs[i] = phiTranslate(Pred, PhiBlock, Exp.varargs[i], Gvn); 2184 } 2185 2186 if (Exp.commutative) { 2187 assert(Exp.varargs.size() >= 2 && "Unsupported commutative instruction!"); 2188 if (Exp.varargs[0] > Exp.varargs[1]) { 2189 std::swap(Exp.varargs[0], Exp.varargs[1]); 2190 uint32_t Opcode = Exp.opcode >> 8; 2191 if (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) 2192 Exp.opcode = (Opcode << 8) | 2193 CmpInst::getSwappedPredicate( 2194 static_cast<CmpInst::Predicate>(Exp.opcode & 255)); 2195 } 2196 } 2197 2198 if (uint32_t NewNum = expressionNumbering[Exp]) { 2199 if (Exp.opcode == Instruction::Call && NewNum != Num) 2200 return areCallValsEqual(Num, NewNum, Pred, PhiBlock, Gvn) ? NewNum : Num; 2201 return NewNum; 2202 } 2203 return Num; 2204 } 2205 2206 /// Erase stale entry from phiTranslate cache so phiTranslate can be computed 2207 /// again. 2208 void GVNPass::ValueTable::eraseTranslateCacheEntry( 2209 uint32_t Num, const BasicBlock &CurrBlock) { 2210 for (const BasicBlock *Pred : predecessors(&CurrBlock)) 2211 PhiTranslateTable.erase({Num, Pred}); 2212 } 2213 2214 // In order to find a leader for a given value number at a 2215 // specific basic block, we first obtain the list of all Values for that number, 2216 // and then scan the list to find one whose block dominates the block in 2217 // question. This is fast because dominator tree queries consist of only 2218 // a few comparisons of DFS numbers. 2219 Value *GVNPass::findLeader(const BasicBlock *BB, uint32_t num) { 2220 LeaderTableEntry Vals = LeaderTable[num]; 2221 if (!Vals.Val) return nullptr; 2222 2223 Value *Val = nullptr; 2224 if (DT->dominates(Vals.BB, BB)) { 2225 Val = Vals.Val; 2226 if (isa<Constant>(Val)) return Val; 2227 } 2228 2229 LeaderTableEntry* Next = Vals.Next; 2230 while (Next) { 2231 if (DT->dominates(Next->BB, BB)) { 2232 if (isa<Constant>(Next->Val)) return Next->Val; 2233 if (!Val) Val = Next->Val; 2234 } 2235 2236 Next = Next->Next; 2237 } 2238 2239 return Val; 2240 } 2241 2242 /// There is an edge from 'Src' to 'Dst'. Return 2243 /// true if every path from the entry block to 'Dst' passes via this edge. In 2244 /// particular 'Dst' must not be reachable via another edge from 'Src'. 2245 static bool isOnlyReachableViaThisEdge(const BasicBlockEdge &E, 2246 DominatorTree *DT) { 2247 // While in theory it is interesting to consider the case in which Dst has 2248 // more than one predecessor, because Dst might be part of a loop which is 2249 // only reachable from Src, in practice it is pointless since at the time 2250 // GVN runs all such loops have preheaders, which means that Dst will have 2251 // been changed to have only one predecessor, namely Src. 2252 const BasicBlock *Pred = E.getEnd()->getSinglePredecessor(); 2253 assert((!Pred || Pred == E.getStart()) && 2254 "No edge between these basic blocks!"); 2255 return Pred != nullptr; 2256 } 2257 2258 void GVNPass::assignBlockRPONumber(Function &F) { 2259 BlockRPONumber.clear(); 2260 uint32_t NextBlockNumber = 1; 2261 ReversePostOrderTraversal<Function *> RPOT(&F); 2262 for (BasicBlock *BB : RPOT) 2263 BlockRPONumber[BB] = NextBlockNumber++; 2264 InvalidBlockRPONumbers = false; 2265 } 2266 2267 bool GVNPass::replaceOperandsForInBlockEquality(Instruction *Instr) const { 2268 bool Changed = false; 2269 for (unsigned OpNum = 0; OpNum < Instr->getNumOperands(); ++OpNum) { 2270 Value *Operand = Instr->getOperand(OpNum); 2271 auto it = ReplaceOperandsWithMap.find(Operand); 2272 if (it != ReplaceOperandsWithMap.end()) { 2273 LLVM_DEBUG(dbgs() << "GVN replacing: " << *Operand << " with " 2274 << *it->second << " in instruction " << *Instr << '\n'); 2275 Instr->setOperand(OpNum, it->second); 2276 Changed = true; 2277 } 2278 } 2279 return Changed; 2280 } 2281 2282 /// The given values are known to be equal in every block 2283 /// dominated by 'Root'. Exploit this, for example by replacing 'LHS' with 2284 /// 'RHS' everywhere in the scope. Returns whether a change was made. 2285 /// If DominatesByEdge is false, then it means that we will propagate the RHS 2286 /// value starting from the end of Root.Start. 2287 bool GVNPass::propagateEquality(Value *LHS, Value *RHS, 2288 const BasicBlockEdge &Root, 2289 bool DominatesByEdge) { 2290 SmallVector<std::pair<Value*, Value*>, 4> Worklist; 2291 Worklist.push_back(std::make_pair(LHS, RHS)); 2292 bool Changed = false; 2293 // For speed, compute a conservative fast approximation to 2294 // DT->dominates(Root, Root.getEnd()); 2295 const bool RootDominatesEnd = isOnlyReachableViaThisEdge(Root, DT); 2296 2297 while (!Worklist.empty()) { 2298 std::pair<Value*, Value*> Item = Worklist.pop_back_val(); 2299 LHS = Item.first; RHS = Item.second; 2300 2301 if (LHS == RHS) 2302 continue; 2303 assert(LHS->getType() == RHS->getType() && "Equality but unequal types!"); 2304 2305 // Don't try to propagate equalities between constants. 2306 if (isa<Constant>(LHS) && isa<Constant>(RHS)) 2307 continue; 2308 2309 // Prefer a constant on the right-hand side, or an Argument if no constants. 2310 if (isa<Constant>(LHS) || (isa<Argument>(LHS) && !isa<Constant>(RHS))) 2311 std::swap(LHS, RHS); 2312 assert((isa<Argument>(LHS) || isa<Instruction>(LHS)) && "Unexpected value!"); 2313 2314 // If there is no obvious reason to prefer the left-hand side over the 2315 // right-hand side, ensure the longest lived term is on the right-hand side, 2316 // so the shortest lived term will be replaced by the longest lived. 2317 // This tends to expose more simplifications. 2318 uint32_t LVN = VN.lookupOrAdd(LHS); 2319 if ((isa<Argument>(LHS) && isa<Argument>(RHS)) || 2320 (isa<Instruction>(LHS) && isa<Instruction>(RHS))) { 2321 // Move the 'oldest' value to the right-hand side, using the value number 2322 // as a proxy for age. 2323 uint32_t RVN = VN.lookupOrAdd(RHS); 2324 if (LVN < RVN) { 2325 std::swap(LHS, RHS); 2326 LVN = RVN; 2327 } 2328 } 2329 2330 // If value numbering later sees that an instruction in the scope is equal 2331 // to 'LHS' then ensure it will be turned into 'RHS'. In order to preserve 2332 // the invariant that instructions only occur in the leader table for their 2333 // own value number (this is used by removeFromLeaderTable), do not do this 2334 // if RHS is an instruction (if an instruction in the scope is morphed into 2335 // LHS then it will be turned into RHS by the next GVN iteration anyway, so 2336 // using the leader table is about compiling faster, not optimizing better). 2337 // The leader table only tracks basic blocks, not edges. Only add to if we 2338 // have the simple case where the edge dominates the end. 2339 if (RootDominatesEnd && !isa<Instruction>(RHS)) 2340 addToLeaderTable(LVN, RHS, Root.getEnd()); 2341 2342 // Replace all occurrences of 'LHS' with 'RHS' everywhere in the scope. As 2343 // LHS always has at least one use that is not dominated by Root, this will 2344 // never do anything if LHS has only one use. 2345 if (!LHS->hasOneUse()) { 2346 unsigned NumReplacements = 2347 DominatesByEdge 2348 ? replaceDominatedUsesWith(LHS, RHS, *DT, Root) 2349 : replaceDominatedUsesWith(LHS, RHS, *DT, Root.getStart()); 2350 2351 Changed |= NumReplacements > 0; 2352 NumGVNEqProp += NumReplacements; 2353 // Cached information for anything that uses LHS will be invalid. 2354 if (MD) 2355 MD->invalidateCachedPointerInfo(LHS); 2356 } 2357 2358 // Now try to deduce additional equalities from this one. For example, if 2359 // the known equality was "(A != B)" == "false" then it follows that A and B 2360 // are equal in the scope. Only boolean equalities with an explicit true or 2361 // false RHS are currently supported. 2362 if (!RHS->getType()->isIntegerTy(1)) 2363 // Not a boolean equality - bail out. 2364 continue; 2365 ConstantInt *CI = dyn_cast<ConstantInt>(RHS); 2366 if (!CI) 2367 // RHS neither 'true' nor 'false' - bail out. 2368 continue; 2369 // Whether RHS equals 'true'. Otherwise it equals 'false'. 2370 bool isKnownTrue = CI->isMinusOne(); 2371 bool isKnownFalse = !isKnownTrue; 2372 2373 // If "A && B" is known true then both A and B are known true. If "A || B" 2374 // is known false then both A and B are known false. 2375 Value *A, *B; 2376 if ((isKnownTrue && match(LHS, m_LogicalAnd(m_Value(A), m_Value(B)))) || 2377 (isKnownFalse && match(LHS, m_LogicalOr(m_Value(A), m_Value(B))))) { 2378 Worklist.push_back(std::make_pair(A, RHS)); 2379 Worklist.push_back(std::make_pair(B, RHS)); 2380 continue; 2381 } 2382 2383 // If we are propagating an equality like "(A == B)" == "true" then also 2384 // propagate the equality A == B. When propagating a comparison such as 2385 // "(A >= B)" == "true", replace all instances of "A < B" with "false". 2386 if (CmpInst *Cmp = dyn_cast<CmpInst>(LHS)) { 2387 Value *Op0 = Cmp->getOperand(0), *Op1 = Cmp->getOperand(1); 2388 2389 // If "A == B" is known true, or "A != B" is known false, then replace 2390 // A with B everywhere in the scope. For floating point operations, we 2391 // have to be careful since equality does not always imply equivalance. 2392 if ((isKnownTrue && impliesEquivalanceIfTrue(Cmp)) || 2393 (isKnownFalse && impliesEquivalanceIfFalse(Cmp))) 2394 Worklist.push_back(std::make_pair(Op0, Op1)); 2395 2396 // If "A >= B" is known true, replace "A < B" with false everywhere. 2397 CmpInst::Predicate NotPred = Cmp->getInversePredicate(); 2398 Constant *NotVal = ConstantInt::get(Cmp->getType(), isKnownFalse); 2399 // Since we don't have the instruction "A < B" immediately to hand, work 2400 // out the value number that it would have and use that to find an 2401 // appropriate instruction (if any). 2402 uint32_t NextNum = VN.getNextUnusedValueNumber(); 2403 uint32_t Num = VN.lookupOrAddCmp(Cmp->getOpcode(), NotPred, Op0, Op1); 2404 // If the number we were assigned was brand new then there is no point in 2405 // looking for an instruction realizing it: there cannot be one! 2406 if (Num < NextNum) { 2407 Value *NotCmp = findLeader(Root.getEnd(), Num); 2408 if (NotCmp && isa<Instruction>(NotCmp)) { 2409 unsigned NumReplacements = 2410 DominatesByEdge 2411 ? replaceDominatedUsesWith(NotCmp, NotVal, *DT, Root) 2412 : replaceDominatedUsesWith(NotCmp, NotVal, *DT, 2413 Root.getStart()); 2414 Changed |= NumReplacements > 0; 2415 NumGVNEqProp += NumReplacements; 2416 // Cached information for anything that uses NotCmp will be invalid. 2417 if (MD) 2418 MD->invalidateCachedPointerInfo(NotCmp); 2419 } 2420 } 2421 // Ensure that any instruction in scope that gets the "A < B" value number 2422 // is replaced with false. 2423 // The leader table only tracks basic blocks, not edges. Only add to if we 2424 // have the simple case where the edge dominates the end. 2425 if (RootDominatesEnd) 2426 addToLeaderTable(Num, NotVal, Root.getEnd()); 2427 2428 continue; 2429 } 2430 } 2431 2432 return Changed; 2433 } 2434 2435 /// When calculating availability, handle an instruction 2436 /// by inserting it into the appropriate sets 2437 bool GVNPass::processInstruction(Instruction *I) { 2438 // Ignore dbg info intrinsics. 2439 if (isa<DbgInfoIntrinsic>(I)) 2440 return false; 2441 2442 // If the instruction can be easily simplified then do so now in preference 2443 // to value numbering it. Value numbering often exposes redundancies, for 2444 // example if it determines that %y is equal to %x then the instruction 2445 // "%z = and i32 %x, %y" becomes "%z = and i32 %x, %x" which we now simplify. 2446 const DataLayout &DL = I->getModule()->getDataLayout(); 2447 if (Value *V = simplifyInstruction(I, {DL, TLI, DT, AC})) { 2448 bool Changed = false; 2449 if (!I->use_empty()) { 2450 // Simplification can cause a special instruction to become not special. 2451 // For example, devirtualization to a willreturn function. 2452 ICF->removeUsersOf(I); 2453 I->replaceAllUsesWith(V); 2454 Changed = true; 2455 } 2456 if (isInstructionTriviallyDead(I, TLI)) { 2457 markInstructionForDeletion(I); 2458 Changed = true; 2459 } 2460 if (Changed) { 2461 if (MD && V->getType()->isPtrOrPtrVectorTy()) 2462 MD->invalidateCachedPointerInfo(V); 2463 ++NumGVNSimpl; 2464 return true; 2465 } 2466 } 2467 2468 if (auto *Assume = dyn_cast<AssumeInst>(I)) 2469 return processAssumeIntrinsic(Assume); 2470 2471 if (LoadInst *Load = dyn_cast<LoadInst>(I)) { 2472 if (processLoad(Load)) 2473 return true; 2474 2475 unsigned Num = VN.lookupOrAdd(Load); 2476 addToLeaderTable(Num, Load, Load->getParent()); 2477 return false; 2478 } 2479 2480 // For conditional branches, we can perform simple conditional propagation on 2481 // the condition value itself. 2482 if (BranchInst *BI = dyn_cast<BranchInst>(I)) { 2483 if (!BI->isConditional()) 2484 return false; 2485 2486 if (isa<Constant>(BI->getCondition())) 2487 return processFoldableCondBr(BI); 2488 2489 Value *BranchCond = BI->getCondition(); 2490 BasicBlock *TrueSucc = BI->getSuccessor(0); 2491 BasicBlock *FalseSucc = BI->getSuccessor(1); 2492 // Avoid multiple edges early. 2493 if (TrueSucc == FalseSucc) 2494 return false; 2495 2496 BasicBlock *Parent = BI->getParent(); 2497 bool Changed = false; 2498 2499 Value *TrueVal = ConstantInt::getTrue(TrueSucc->getContext()); 2500 BasicBlockEdge TrueE(Parent, TrueSucc); 2501 Changed |= propagateEquality(BranchCond, TrueVal, TrueE, true); 2502 2503 Value *FalseVal = ConstantInt::getFalse(FalseSucc->getContext()); 2504 BasicBlockEdge FalseE(Parent, FalseSucc); 2505 Changed |= propagateEquality(BranchCond, FalseVal, FalseE, true); 2506 2507 return Changed; 2508 } 2509 2510 // For switches, propagate the case values into the case destinations. 2511 if (SwitchInst *SI = dyn_cast<SwitchInst>(I)) { 2512 Value *SwitchCond = SI->getCondition(); 2513 BasicBlock *Parent = SI->getParent(); 2514 bool Changed = false; 2515 2516 // Remember how many outgoing edges there are to every successor. 2517 SmallDenseMap<BasicBlock *, unsigned, 16> SwitchEdges; 2518 for (unsigned i = 0, n = SI->getNumSuccessors(); i != n; ++i) 2519 ++SwitchEdges[SI->getSuccessor(i)]; 2520 2521 for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end(); 2522 i != e; ++i) { 2523 BasicBlock *Dst = i->getCaseSuccessor(); 2524 // If there is only a single edge, propagate the case value into it. 2525 if (SwitchEdges.lookup(Dst) == 1) { 2526 BasicBlockEdge E(Parent, Dst); 2527 Changed |= propagateEquality(SwitchCond, i->getCaseValue(), E, true); 2528 } 2529 } 2530 return Changed; 2531 } 2532 2533 // Instructions with void type don't return a value, so there's 2534 // no point in trying to find redundancies in them. 2535 if (I->getType()->isVoidTy()) 2536 return false; 2537 2538 uint32_t NextNum = VN.getNextUnusedValueNumber(); 2539 unsigned Num = VN.lookupOrAdd(I); 2540 2541 // Allocations are always uniquely numbered, so we can save time and memory 2542 // by fast failing them. 2543 if (isa<AllocaInst>(I) || I->isTerminator() || isa<PHINode>(I)) { 2544 addToLeaderTable(Num, I, I->getParent()); 2545 return false; 2546 } 2547 2548 // If the number we were assigned was a brand new VN, then we don't 2549 // need to do a lookup to see if the number already exists 2550 // somewhere in the domtree: it can't! 2551 if (Num >= NextNum) { 2552 addToLeaderTable(Num, I, I->getParent()); 2553 return false; 2554 } 2555 2556 // Perform fast-path value-number based elimination of values inherited from 2557 // dominators. 2558 Value *Repl = findLeader(I->getParent(), Num); 2559 if (!Repl) { 2560 // Failure, just remember this instance for future use. 2561 addToLeaderTable(Num, I, I->getParent()); 2562 return false; 2563 } else if (Repl == I) { 2564 // If I was the result of a shortcut PRE, it might already be in the table 2565 // and the best replacement for itself. Nothing to do. 2566 return false; 2567 } 2568 2569 // Remove it! 2570 patchAndReplaceAllUsesWith(I, Repl); 2571 if (MD && Repl->getType()->isPtrOrPtrVectorTy()) 2572 MD->invalidateCachedPointerInfo(Repl); 2573 markInstructionForDeletion(I); 2574 return true; 2575 } 2576 2577 /// runOnFunction - This is the main transformation entry point for a function. 2578 bool GVNPass::runImpl(Function &F, AssumptionCache &RunAC, DominatorTree &RunDT, 2579 const TargetLibraryInfo &RunTLI, AAResults &RunAA, 2580 MemoryDependenceResults *RunMD, LoopInfo *LI, 2581 OptimizationRemarkEmitter *RunORE, MemorySSA *MSSA) { 2582 AC = &RunAC; 2583 DT = &RunDT; 2584 VN.setDomTree(DT); 2585 TLI = &RunTLI; 2586 VN.setAliasAnalysis(&RunAA); 2587 MD = RunMD; 2588 ImplicitControlFlowTracking ImplicitCFT; 2589 ICF = &ImplicitCFT; 2590 this->LI = LI; 2591 VN.setMemDep(MD); 2592 ORE = RunORE; 2593 InvalidBlockRPONumbers = true; 2594 MemorySSAUpdater Updater(MSSA); 2595 MSSAU = MSSA ? &Updater : nullptr; 2596 2597 bool Changed = false; 2598 bool ShouldContinue = true; 2599 2600 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager); 2601 // Merge unconditional branches, allowing PRE to catch more 2602 // optimization opportunities. 2603 for (BasicBlock &BB : llvm::make_early_inc_range(F)) { 2604 bool removedBlock = MergeBlockIntoPredecessor(&BB, &DTU, LI, MSSAU, MD); 2605 if (removedBlock) 2606 ++NumGVNBlocks; 2607 2608 Changed |= removedBlock; 2609 } 2610 2611 unsigned Iteration = 0; 2612 while (ShouldContinue) { 2613 LLVM_DEBUG(dbgs() << "GVN iteration: " << Iteration << "\n"); 2614 (void) Iteration; 2615 ShouldContinue = iterateOnFunction(F); 2616 Changed |= ShouldContinue; 2617 ++Iteration; 2618 } 2619 2620 if (isPREEnabled()) { 2621 // Fabricate val-num for dead-code in order to suppress assertion in 2622 // performPRE(). 2623 assignValNumForDeadCode(); 2624 bool PREChanged = true; 2625 while (PREChanged) { 2626 PREChanged = performPRE(F); 2627 Changed |= PREChanged; 2628 } 2629 } 2630 2631 // FIXME: Should perform GVN again after PRE does something. PRE can move 2632 // computations into blocks where they become fully redundant. Note that 2633 // we can't do this until PRE's critical edge splitting updates memdep. 2634 // Actually, when this happens, we should just fully integrate PRE into GVN. 2635 2636 cleanupGlobalSets(); 2637 // Do not cleanup DeadBlocks in cleanupGlobalSets() as it's called for each 2638 // iteration. 2639 DeadBlocks.clear(); 2640 2641 if (MSSA && VerifyMemorySSA) 2642 MSSA->verifyMemorySSA(); 2643 2644 return Changed; 2645 } 2646 2647 bool GVNPass::processBlock(BasicBlock *BB) { 2648 // FIXME: Kill off InstrsToErase by doing erasing eagerly in a helper function 2649 // (and incrementing BI before processing an instruction). 2650 assert(InstrsToErase.empty() && 2651 "We expect InstrsToErase to be empty across iterations"); 2652 if (DeadBlocks.count(BB)) 2653 return false; 2654 2655 // Clearing map before every BB because it can be used only for single BB. 2656 ReplaceOperandsWithMap.clear(); 2657 bool ChangedFunction = false; 2658 2659 // Since we may not have visited the input blocks of the phis, we can't 2660 // use our normal hash approach for phis. Instead, simply look for 2661 // obvious duplicates. The first pass of GVN will tend to create 2662 // identical phis, and the second or later passes can eliminate them. 2663 ChangedFunction |= EliminateDuplicatePHINodes(BB); 2664 2665 for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); 2666 BI != BE;) { 2667 if (!ReplaceOperandsWithMap.empty()) 2668 ChangedFunction |= replaceOperandsForInBlockEquality(&*BI); 2669 ChangedFunction |= processInstruction(&*BI); 2670 2671 if (InstrsToErase.empty()) { 2672 ++BI; 2673 continue; 2674 } 2675 2676 // If we need some instructions deleted, do it now. 2677 NumGVNInstr += InstrsToErase.size(); 2678 2679 // Avoid iterator invalidation. 2680 bool AtStart = BI == BB->begin(); 2681 if (!AtStart) 2682 --BI; 2683 2684 for (auto *I : InstrsToErase) { 2685 assert(I->getParent() == BB && "Removing instruction from wrong block?"); 2686 LLVM_DEBUG(dbgs() << "GVN removed: " << *I << '\n'); 2687 salvageKnowledge(I, AC); 2688 salvageDebugInfo(*I); 2689 if (MD) MD->removeInstruction(I); 2690 if (MSSAU) 2691 MSSAU->removeMemoryAccess(I); 2692 LLVM_DEBUG(verifyRemoved(I)); 2693 ICF->removeInstruction(I); 2694 I->eraseFromParent(); 2695 } 2696 InstrsToErase.clear(); 2697 2698 if (AtStart) 2699 BI = BB->begin(); 2700 else 2701 ++BI; 2702 } 2703 2704 return ChangedFunction; 2705 } 2706 2707 // Instantiate an expression in a predecessor that lacked it. 2708 bool GVNPass::performScalarPREInsertion(Instruction *Instr, BasicBlock *Pred, 2709 BasicBlock *Curr, unsigned int ValNo) { 2710 // Because we are going top-down through the block, all value numbers 2711 // will be available in the predecessor by the time we need them. Any 2712 // that weren't originally present will have been instantiated earlier 2713 // in this loop. 2714 bool success = true; 2715 for (unsigned i = 0, e = Instr->getNumOperands(); i != e; ++i) { 2716 Value *Op = Instr->getOperand(i); 2717 if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op)) 2718 continue; 2719 // This could be a newly inserted instruction, in which case, we won't 2720 // find a value number, and should give up before we hurt ourselves. 2721 // FIXME: Rewrite the infrastructure to let it easier to value number 2722 // and process newly inserted instructions. 2723 if (!VN.exists(Op)) { 2724 success = false; 2725 break; 2726 } 2727 uint32_t TValNo = 2728 VN.phiTranslate(Pred, Curr, VN.lookup(Op), *this); 2729 if (Value *V = findLeader(Pred, TValNo)) { 2730 Instr->setOperand(i, V); 2731 } else { 2732 success = false; 2733 break; 2734 } 2735 } 2736 2737 // Fail out if we encounter an operand that is not available in 2738 // the PRE predecessor. This is typically because of loads which 2739 // are not value numbered precisely. 2740 if (!success) 2741 return false; 2742 2743 Instr->insertBefore(Pred->getTerminator()); 2744 Instr->setName(Instr->getName() + ".pre"); 2745 Instr->setDebugLoc(Instr->getDebugLoc()); 2746 2747 ICF->insertInstructionTo(Instr, Pred); 2748 2749 unsigned Num = VN.lookupOrAdd(Instr); 2750 VN.add(Instr, Num); 2751 2752 // Update the availability map to include the new instruction. 2753 addToLeaderTable(Num, Instr, Pred); 2754 return true; 2755 } 2756 2757 bool GVNPass::performScalarPRE(Instruction *CurInst) { 2758 if (isa<AllocaInst>(CurInst) || CurInst->isTerminator() || 2759 isa<PHINode>(CurInst) || CurInst->getType()->isVoidTy() || 2760 CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() || 2761 isa<DbgInfoIntrinsic>(CurInst)) 2762 return false; 2763 2764 // Don't do PRE on compares. The PHI would prevent CodeGenPrepare from 2765 // sinking the compare again, and it would force the code generator to 2766 // move the i1 from processor flags or predicate registers into a general 2767 // purpose register. 2768 if (isa<CmpInst>(CurInst)) 2769 return false; 2770 2771 // Don't do PRE on GEPs. The inserted PHI would prevent CodeGenPrepare from 2772 // sinking the addressing mode computation back to its uses. Extending the 2773 // GEP's live range increases the register pressure, and therefore it can 2774 // introduce unnecessary spills. 2775 // 2776 // This doesn't prevent Load PRE. PHI translation will make the GEP available 2777 // to the load by moving it to the predecessor block if necessary. 2778 if (isa<GetElementPtrInst>(CurInst)) 2779 return false; 2780 2781 if (auto *CallB = dyn_cast<CallBase>(CurInst)) { 2782 // We don't currently value number ANY inline asm calls. 2783 if (CallB->isInlineAsm()) 2784 return false; 2785 // Don't do PRE on convergent calls. 2786 if (CallB->isConvergent()) 2787 return false; 2788 } 2789 2790 uint32_t ValNo = VN.lookup(CurInst); 2791 2792 // Look for the predecessors for PRE opportunities. We're 2793 // only trying to solve the basic diamond case, where 2794 // a value is computed in the successor and one predecessor, 2795 // but not the other. We also explicitly disallow cases 2796 // where the successor is its own predecessor, because they're 2797 // more complicated to get right. 2798 unsigned NumWith = 0; 2799 unsigned NumWithout = 0; 2800 BasicBlock *PREPred = nullptr; 2801 BasicBlock *CurrentBlock = CurInst->getParent(); 2802 2803 // Update the RPO numbers for this function. 2804 if (InvalidBlockRPONumbers) 2805 assignBlockRPONumber(*CurrentBlock->getParent()); 2806 2807 SmallVector<std::pair<Value *, BasicBlock *>, 8> predMap; 2808 for (BasicBlock *P : predecessors(CurrentBlock)) { 2809 // We're not interested in PRE where blocks with predecessors that are 2810 // not reachable. 2811 if (!DT->isReachableFromEntry(P)) { 2812 NumWithout = 2; 2813 break; 2814 } 2815 // It is not safe to do PRE when P->CurrentBlock is a loop backedge, and 2816 // when CurInst has operand defined in CurrentBlock (so it may be defined 2817 // by phi in the loop header). 2818 assert(BlockRPONumber.count(P) && BlockRPONumber.count(CurrentBlock) && 2819 "Invalid BlockRPONumber map."); 2820 if (BlockRPONumber[P] >= BlockRPONumber[CurrentBlock] && 2821 llvm::any_of(CurInst->operands(), [&](const Use &U) { 2822 if (auto *Inst = dyn_cast<Instruction>(U.get())) 2823 return Inst->getParent() == CurrentBlock; 2824 return false; 2825 })) { 2826 NumWithout = 2; 2827 break; 2828 } 2829 2830 uint32_t TValNo = VN.phiTranslate(P, CurrentBlock, ValNo, *this); 2831 Value *predV = findLeader(P, TValNo); 2832 if (!predV) { 2833 predMap.push_back(std::make_pair(static_cast<Value *>(nullptr), P)); 2834 PREPred = P; 2835 ++NumWithout; 2836 } else if (predV == CurInst) { 2837 /* CurInst dominates this predecessor. */ 2838 NumWithout = 2; 2839 break; 2840 } else { 2841 predMap.push_back(std::make_pair(predV, P)); 2842 ++NumWith; 2843 } 2844 } 2845 2846 // Don't do PRE when it might increase code size, i.e. when 2847 // we would need to insert instructions in more than one pred. 2848 if (NumWithout > 1 || NumWith == 0) 2849 return false; 2850 2851 // We may have a case where all predecessors have the instruction, 2852 // and we just need to insert a phi node. Otherwise, perform 2853 // insertion. 2854 Instruction *PREInstr = nullptr; 2855 2856 if (NumWithout != 0) { 2857 if (!isSafeToSpeculativelyExecute(CurInst)) { 2858 // It is only valid to insert a new instruction if the current instruction 2859 // is always executed. An instruction with implicit control flow could 2860 // prevent us from doing it. If we cannot speculate the execution, then 2861 // PRE should be prohibited. 2862 if (ICF->isDominatedByICFIFromSameBlock(CurInst)) 2863 return false; 2864 } 2865 2866 // Don't do PRE across indirect branch. 2867 if (isa<IndirectBrInst>(PREPred->getTerminator())) 2868 return false; 2869 2870 // We can't do PRE safely on a critical edge, so instead we schedule 2871 // the edge to be split and perform the PRE the next time we iterate 2872 // on the function. 2873 unsigned SuccNum = GetSuccessorNumber(PREPred, CurrentBlock); 2874 if (isCriticalEdge(PREPred->getTerminator(), SuccNum)) { 2875 toSplit.push_back(std::make_pair(PREPred->getTerminator(), SuccNum)); 2876 return false; 2877 } 2878 // We need to insert somewhere, so let's give it a shot 2879 PREInstr = CurInst->clone(); 2880 if (!performScalarPREInsertion(PREInstr, PREPred, CurrentBlock, ValNo)) { 2881 // If we failed insertion, make sure we remove the instruction. 2882 LLVM_DEBUG(verifyRemoved(PREInstr)); 2883 PREInstr->deleteValue(); 2884 return false; 2885 } 2886 } 2887 2888 // Either we should have filled in the PRE instruction, or we should 2889 // not have needed insertions. 2890 assert(PREInstr != nullptr || NumWithout == 0); 2891 2892 ++NumGVNPRE; 2893 2894 // Create a PHI to make the value available in this block. 2895 PHINode *Phi = 2896 PHINode::Create(CurInst->getType(), predMap.size(), 2897 CurInst->getName() + ".pre-phi", &CurrentBlock->front()); 2898 for (unsigned i = 0, e = predMap.size(); i != e; ++i) { 2899 if (Value *V = predMap[i].first) { 2900 // If we use an existing value in this phi, we have to patch the original 2901 // value because the phi will be used to replace a later value. 2902 patchReplacementInstruction(CurInst, V); 2903 Phi->addIncoming(V, predMap[i].second); 2904 } else 2905 Phi->addIncoming(PREInstr, PREPred); 2906 } 2907 2908 VN.add(Phi, ValNo); 2909 // After creating a new PHI for ValNo, the phi translate result for ValNo will 2910 // be changed, so erase the related stale entries in phi translate cache. 2911 VN.eraseTranslateCacheEntry(ValNo, *CurrentBlock); 2912 addToLeaderTable(ValNo, Phi, CurrentBlock); 2913 Phi->setDebugLoc(CurInst->getDebugLoc()); 2914 CurInst->replaceAllUsesWith(Phi); 2915 if (MD && Phi->getType()->isPtrOrPtrVectorTy()) 2916 MD->invalidateCachedPointerInfo(Phi); 2917 VN.erase(CurInst); 2918 removeFromLeaderTable(ValNo, CurInst, CurrentBlock); 2919 2920 LLVM_DEBUG(dbgs() << "GVN PRE removed: " << *CurInst << '\n'); 2921 if (MD) 2922 MD->removeInstruction(CurInst); 2923 if (MSSAU) 2924 MSSAU->removeMemoryAccess(CurInst); 2925 LLVM_DEBUG(verifyRemoved(CurInst)); 2926 // FIXME: Intended to be markInstructionForDeletion(CurInst), but it causes 2927 // some assertion failures. 2928 ICF->removeInstruction(CurInst); 2929 CurInst->eraseFromParent(); 2930 ++NumGVNInstr; 2931 2932 return true; 2933 } 2934 2935 /// Perform a purely local form of PRE that looks for diamond 2936 /// control flow patterns and attempts to perform simple PRE at the join point. 2937 bool GVNPass::performPRE(Function &F) { 2938 bool Changed = false; 2939 for (BasicBlock *CurrentBlock : depth_first(&F.getEntryBlock())) { 2940 // Nothing to PRE in the entry block. 2941 if (CurrentBlock == &F.getEntryBlock()) 2942 continue; 2943 2944 // Don't perform PRE on an EH pad. 2945 if (CurrentBlock->isEHPad()) 2946 continue; 2947 2948 for (BasicBlock::iterator BI = CurrentBlock->begin(), 2949 BE = CurrentBlock->end(); 2950 BI != BE;) { 2951 Instruction *CurInst = &*BI++; 2952 Changed |= performScalarPRE(CurInst); 2953 } 2954 } 2955 2956 if (splitCriticalEdges()) 2957 Changed = true; 2958 2959 return Changed; 2960 } 2961 2962 /// Split the critical edge connecting the given two blocks, and return 2963 /// the block inserted to the critical edge. 2964 BasicBlock *GVNPass::splitCriticalEdges(BasicBlock *Pred, BasicBlock *Succ) { 2965 // GVN does not require loop-simplify, do not try to preserve it if it is not 2966 // possible. 2967 BasicBlock *BB = SplitCriticalEdge( 2968 Pred, Succ, 2969 CriticalEdgeSplittingOptions(DT, LI, MSSAU).unsetPreserveLoopSimplify()); 2970 if (BB) { 2971 if (MD) 2972 MD->invalidateCachedPredecessors(); 2973 InvalidBlockRPONumbers = true; 2974 } 2975 return BB; 2976 } 2977 2978 /// Split critical edges found during the previous 2979 /// iteration that may enable further optimization. 2980 bool GVNPass::splitCriticalEdges() { 2981 if (toSplit.empty()) 2982 return false; 2983 2984 bool Changed = false; 2985 do { 2986 std::pair<Instruction *, unsigned> Edge = toSplit.pop_back_val(); 2987 Changed |= SplitCriticalEdge(Edge.first, Edge.second, 2988 CriticalEdgeSplittingOptions(DT, LI, MSSAU)) != 2989 nullptr; 2990 } while (!toSplit.empty()); 2991 if (Changed) { 2992 if (MD) 2993 MD->invalidateCachedPredecessors(); 2994 InvalidBlockRPONumbers = true; 2995 } 2996 return Changed; 2997 } 2998 2999 /// Executes one iteration of GVN 3000 bool GVNPass::iterateOnFunction(Function &F) { 3001 cleanupGlobalSets(); 3002 3003 // Top-down walk of the dominator tree 3004 bool Changed = false; 3005 // Needed for value numbering with phi construction to work. 3006 // RPOT walks the graph in its constructor and will not be invalidated during 3007 // processBlock. 3008 ReversePostOrderTraversal<Function *> RPOT(&F); 3009 3010 for (BasicBlock *BB : RPOT) 3011 Changed |= processBlock(BB); 3012 3013 return Changed; 3014 } 3015 3016 void GVNPass::cleanupGlobalSets() { 3017 VN.clear(); 3018 LeaderTable.clear(); 3019 BlockRPONumber.clear(); 3020 TableAllocator.Reset(); 3021 ICF->clear(); 3022 InvalidBlockRPONumbers = true; 3023 } 3024 3025 /// Verify that the specified instruction does not occur in our 3026 /// internal data structures. 3027 void GVNPass::verifyRemoved(const Instruction *Inst) const { 3028 VN.verifyRemoved(Inst); 3029 3030 // Walk through the value number scope to make sure the instruction isn't 3031 // ferreted away in it. 3032 for (const auto &I : LeaderTable) { 3033 const LeaderTableEntry *Node = &I.second; 3034 assert(Node->Val != Inst && "Inst still in value numbering scope!"); 3035 3036 while (Node->Next) { 3037 Node = Node->Next; 3038 assert(Node->Val != Inst && "Inst still in value numbering scope!"); 3039 } 3040 } 3041 } 3042 3043 /// BB is declared dead, which implied other blocks become dead as well. This 3044 /// function is to add all these blocks to "DeadBlocks". For the dead blocks' 3045 /// live successors, update their phi nodes by replacing the operands 3046 /// corresponding to dead blocks with UndefVal. 3047 void GVNPass::addDeadBlock(BasicBlock *BB) { 3048 SmallVector<BasicBlock *, 4> NewDead; 3049 SmallSetVector<BasicBlock *, 4> DF; 3050 3051 NewDead.push_back(BB); 3052 while (!NewDead.empty()) { 3053 BasicBlock *D = NewDead.pop_back_val(); 3054 if (DeadBlocks.count(D)) 3055 continue; 3056 3057 // All blocks dominated by D are dead. 3058 SmallVector<BasicBlock *, 8> Dom; 3059 DT->getDescendants(D, Dom); 3060 DeadBlocks.insert(Dom.begin(), Dom.end()); 3061 3062 // Figure out the dominance-frontier(D). 3063 for (BasicBlock *B : Dom) { 3064 for (BasicBlock *S : successors(B)) { 3065 if (DeadBlocks.count(S)) 3066 continue; 3067 3068 bool AllPredDead = true; 3069 for (BasicBlock *P : predecessors(S)) 3070 if (!DeadBlocks.count(P)) { 3071 AllPredDead = false; 3072 break; 3073 } 3074 3075 if (!AllPredDead) { 3076 // S could be proved dead later on. That is why we don't update phi 3077 // operands at this moment. 3078 DF.insert(S); 3079 } else { 3080 // While S is not dominated by D, it is dead by now. This could take 3081 // place if S already have a dead predecessor before D is declared 3082 // dead. 3083 NewDead.push_back(S); 3084 } 3085 } 3086 } 3087 } 3088 3089 // For the dead blocks' live successors, update their phi nodes by replacing 3090 // the operands corresponding to dead blocks with UndefVal. 3091 for (BasicBlock *B : DF) { 3092 if (DeadBlocks.count(B)) 3093 continue; 3094 3095 // First, split the critical edges. This might also create additional blocks 3096 // to preserve LoopSimplify form and adjust edges accordingly. 3097 SmallVector<BasicBlock *, 4> Preds(predecessors(B)); 3098 for (BasicBlock *P : Preds) { 3099 if (!DeadBlocks.count(P)) 3100 continue; 3101 3102 if (llvm::is_contained(successors(P), B) && 3103 isCriticalEdge(P->getTerminator(), B)) { 3104 if (BasicBlock *S = splitCriticalEdges(P, B)) 3105 DeadBlocks.insert(P = S); 3106 } 3107 } 3108 3109 // Now poison the incoming values from the dead predecessors. 3110 for (BasicBlock *P : predecessors(B)) { 3111 if (!DeadBlocks.count(P)) 3112 continue; 3113 for (PHINode &Phi : B->phis()) { 3114 Phi.setIncomingValueForBlock(P, PoisonValue::get(Phi.getType())); 3115 if (MD) 3116 MD->invalidateCachedPointerInfo(&Phi); 3117 } 3118 } 3119 } 3120 } 3121 3122 // If the given branch is recognized as a foldable branch (i.e. conditional 3123 // branch with constant condition), it will perform following analyses and 3124 // transformation. 3125 // 1) If the dead out-coming edge is a critical-edge, split it. Let 3126 // R be the target of the dead out-coming edge. 3127 // 1) Identify the set of dead blocks implied by the branch's dead outcoming 3128 // edge. The result of this step will be {X| X is dominated by R} 3129 // 2) Identify those blocks which haves at least one dead predecessor. The 3130 // result of this step will be dominance-frontier(R). 3131 // 3) Update the PHIs in DF(R) by replacing the operands corresponding to 3132 // dead blocks with "UndefVal" in an hope these PHIs will optimized away. 3133 // 3134 // Return true iff *NEW* dead code are found. 3135 bool GVNPass::processFoldableCondBr(BranchInst *BI) { 3136 if (!BI || BI->isUnconditional()) 3137 return false; 3138 3139 // If a branch has two identical successors, we cannot declare either dead. 3140 if (BI->getSuccessor(0) == BI->getSuccessor(1)) 3141 return false; 3142 3143 ConstantInt *Cond = dyn_cast<ConstantInt>(BI->getCondition()); 3144 if (!Cond) 3145 return false; 3146 3147 BasicBlock *DeadRoot = 3148 Cond->getZExtValue() ? BI->getSuccessor(1) : BI->getSuccessor(0); 3149 if (DeadBlocks.count(DeadRoot)) 3150 return false; 3151 3152 if (!DeadRoot->getSinglePredecessor()) 3153 DeadRoot = splitCriticalEdges(BI->getParent(), DeadRoot); 3154 3155 addDeadBlock(DeadRoot); 3156 return true; 3157 } 3158 3159 // performPRE() will trigger assert if it comes across an instruction without 3160 // associated val-num. As it normally has far more live instructions than dead 3161 // instructions, it makes more sense just to "fabricate" a val-number for the 3162 // dead code than checking if instruction involved is dead or not. 3163 void GVNPass::assignValNumForDeadCode() { 3164 for (BasicBlock *BB : DeadBlocks) { 3165 for (Instruction &Inst : *BB) { 3166 unsigned ValNum = VN.lookupOrAdd(&Inst); 3167 addToLeaderTable(ValNum, &Inst, BB); 3168 } 3169 } 3170 } 3171 3172 class llvm::gvn::GVNLegacyPass : public FunctionPass { 3173 public: 3174 static char ID; // Pass identification, replacement for typeid 3175 3176 explicit GVNLegacyPass(bool NoMemDepAnalysis = !GVNEnableMemDep) 3177 : FunctionPass(ID), Impl(GVNOptions().setMemDep(!NoMemDepAnalysis)) { 3178 initializeGVNLegacyPassPass(*PassRegistry::getPassRegistry()); 3179 } 3180 3181 bool runOnFunction(Function &F) override { 3182 if (skipFunction(F)) 3183 return false; 3184 3185 auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>(); 3186 3187 auto *MSSAWP = getAnalysisIfAvailable<MemorySSAWrapperPass>(); 3188 return Impl.runImpl( 3189 F, getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F), 3190 getAnalysis<DominatorTreeWrapperPass>().getDomTree(), 3191 getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F), 3192 getAnalysis<AAResultsWrapperPass>().getAAResults(), 3193 Impl.isMemDepEnabled() 3194 ? &getAnalysis<MemoryDependenceWrapperPass>().getMemDep() 3195 : nullptr, 3196 LIWP ? &LIWP->getLoopInfo() : nullptr, 3197 &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(), 3198 MSSAWP ? &MSSAWP->getMSSA() : nullptr); 3199 } 3200 3201 void getAnalysisUsage(AnalysisUsage &AU) const override { 3202 AU.addRequired<AssumptionCacheTracker>(); 3203 AU.addRequired<DominatorTreeWrapperPass>(); 3204 AU.addRequired<TargetLibraryInfoWrapperPass>(); 3205 AU.addRequired<LoopInfoWrapperPass>(); 3206 if (Impl.isMemDepEnabled()) 3207 AU.addRequired<MemoryDependenceWrapperPass>(); 3208 AU.addRequired<AAResultsWrapperPass>(); 3209 AU.addPreserved<DominatorTreeWrapperPass>(); 3210 AU.addPreserved<GlobalsAAWrapperPass>(); 3211 AU.addPreserved<TargetLibraryInfoWrapperPass>(); 3212 AU.addPreserved<LoopInfoWrapperPass>(); 3213 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 3214 AU.addPreserved<MemorySSAWrapperPass>(); 3215 } 3216 3217 private: 3218 GVNPass Impl; 3219 }; 3220 3221 char GVNLegacyPass::ID = 0; 3222 3223 INITIALIZE_PASS_BEGIN(GVNLegacyPass, "gvn", "Global Value Numbering", false, false) 3224 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 3225 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass) 3226 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 3227 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 3228 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 3229 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 3230 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 3231 INITIALIZE_PASS_END(GVNLegacyPass, "gvn", "Global Value Numbering", false, false) 3232 3233 // The public interface to this file... 3234 FunctionPass *llvm::createGVNPass(bool NoMemDepAnalysis) { 3235 return new GVNLegacyPass(NoMemDepAnalysis); 3236 } 3237