1 //===- NewGVN.cpp - Global Value Numbering Pass ---------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// This file implements the new LLVM's Global Value Numbering pass. 11 /// GVN partitions values computed by a function into congruence classes. 12 /// Values ending up in the same congruence class are guaranteed to be the same 13 /// for every execution of the program. In that respect, congruency is a 14 /// compile-time approximation of equivalence of values at runtime. 15 /// The algorithm implemented here uses a sparse formulation and it's based 16 /// on the ideas described in the paper: 17 /// "A Sparse Algorithm for Predicated Global Value Numbering" from 18 /// Karthik Gargi. 19 /// 20 /// A brief overview of the algorithm: The algorithm is essentially the same as 21 /// the standard RPO value numbering algorithm (a good reference is the paper 22 /// "SCC based value numbering" by L. Taylor Simpson) with one major difference: 23 /// The RPO algorithm proceeds, on every iteration, to process every reachable 24 /// block and every instruction in that block. This is because the standard RPO 25 /// algorithm does not track what things have the same value number, it only 26 /// tracks what the value number of a given operation is (the mapping is 27 /// operation -> value number). Thus, when a value number of an operation 28 /// changes, it must reprocess everything to ensure all uses of a value number 29 /// get updated properly. In constrast, the sparse algorithm we use *also* 30 /// tracks what operations have a given value number (IE it also tracks the 31 /// reverse mapping from value number -> operations with that value number), so 32 /// that it only needs to reprocess the instructions that are affected when 33 /// something's value number changes. The vast majority of complexity and code 34 /// in this file is devoted to tracking what value numbers could change for what 35 /// instructions when various things happen. The rest of the algorithm is 36 /// devoted to performing symbolic evaluation, forward propagation, and 37 /// simplification of operations based on the value numbers deduced so far 38 /// 39 /// In order to make the GVN mostly-complete, we use a technique derived from 40 /// "Detection of Redundant Expressions: A Complete and Polynomial-time 41 /// Algorithm in SSA" by R.R. Pai. The source of incompleteness in most SSA 42 /// based GVN algorithms is related to their inability to detect equivalence 43 /// between phi of ops (IE phi(a+b, c+d)) and op of phis (phi(a,c) + phi(b, d)). 44 /// We resolve this issue by generating the equivalent "phi of ops" form for 45 /// each op of phis we see, in a way that only takes polynomial time to resolve. 46 /// 47 /// We also do not perform elimination by using any published algorithm. All 48 /// published algorithms are O(Instructions). Instead, we use a technique that 49 /// is O(number of operations with the same value number), enabling us to skip 50 /// trying to eliminate things that have unique value numbers. 51 // 52 //===----------------------------------------------------------------------===// 53 54 #include "llvm/Transforms/Scalar/NewGVN.h" 55 #include "llvm/ADT/ArrayRef.h" 56 #include "llvm/ADT/BitVector.h" 57 #include "llvm/ADT/DenseMap.h" 58 #include "llvm/ADT/DenseMapInfo.h" 59 #include "llvm/ADT/DenseSet.h" 60 #include "llvm/ADT/DepthFirstIterator.h" 61 #include "llvm/ADT/GraphTraits.h" 62 #include "llvm/ADT/Hashing.h" 63 #include "llvm/ADT/PointerIntPair.h" 64 #include "llvm/ADT/PostOrderIterator.h" 65 #include "llvm/ADT/SmallPtrSet.h" 66 #include "llvm/ADT/SmallVector.h" 67 #include "llvm/ADT/SparseBitVector.h" 68 #include "llvm/ADT/Statistic.h" 69 #include "llvm/ADT/iterator_range.h" 70 #include "llvm/Analysis/AliasAnalysis.h" 71 #include "llvm/Analysis/AssumptionCache.h" 72 #include "llvm/Analysis/CFGPrinter.h" 73 #include "llvm/Analysis/ConstantFolding.h" 74 #include "llvm/Analysis/GlobalsModRef.h" 75 #include "llvm/Analysis/InstructionSimplify.h" 76 #include "llvm/Analysis/MemoryBuiltins.h" 77 #include "llvm/Analysis/MemorySSA.h" 78 #include "llvm/Analysis/TargetLibraryInfo.h" 79 #include "llvm/IR/Argument.h" 80 #include "llvm/IR/BasicBlock.h" 81 #include "llvm/IR/Constant.h" 82 #include "llvm/IR/Constants.h" 83 #include "llvm/IR/Dominators.h" 84 #include "llvm/IR/Function.h" 85 #include "llvm/IR/InstrTypes.h" 86 #include "llvm/IR/Instruction.h" 87 #include "llvm/IR/Instructions.h" 88 #include "llvm/IR/IntrinsicInst.h" 89 #include "llvm/IR/Intrinsics.h" 90 #include "llvm/IR/LLVMContext.h" 91 #include "llvm/IR/PatternMatch.h" 92 #include "llvm/IR/Type.h" 93 #include "llvm/IR/Use.h" 94 #include "llvm/IR/User.h" 95 #include "llvm/IR/Value.h" 96 #include "llvm/InitializePasses.h" 97 #include "llvm/Pass.h" 98 #include "llvm/Support/Allocator.h" 99 #include "llvm/Support/ArrayRecycler.h" 100 #include "llvm/Support/Casting.h" 101 #include "llvm/Support/CommandLine.h" 102 #include "llvm/Support/Debug.h" 103 #include "llvm/Support/DebugCounter.h" 104 #include "llvm/Support/ErrorHandling.h" 105 #include "llvm/Support/PointerLikeTypeTraits.h" 106 #include "llvm/Support/raw_ostream.h" 107 #include "llvm/Transforms/Scalar.h" 108 #include "llvm/Transforms/Scalar/GVNExpression.h" 109 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h" 110 #include "llvm/Transforms/Utils/Local.h" 111 #include "llvm/Transforms/Utils/PredicateInfo.h" 112 #include "llvm/Transforms/Utils/VNCoercion.h" 113 #include <algorithm> 114 #include <cassert> 115 #include <cstdint> 116 #include <iterator> 117 #include <map> 118 #include <memory> 119 #include <set> 120 #include <string> 121 #include <tuple> 122 #include <utility> 123 #include <vector> 124 125 using namespace llvm; 126 using namespace llvm::GVNExpression; 127 using namespace llvm::VNCoercion; 128 using namespace llvm::PatternMatch; 129 130 #define DEBUG_TYPE "newgvn" 131 132 STATISTIC(NumGVNInstrDeleted, "Number of instructions deleted"); 133 STATISTIC(NumGVNBlocksDeleted, "Number of blocks deleted"); 134 STATISTIC(NumGVNOpsSimplified, "Number of Expressions simplified"); 135 STATISTIC(NumGVNPhisAllSame, "Number of PHIs whos arguments are all the same"); 136 STATISTIC(NumGVNMaxIterations, 137 "Maximum Number of iterations it took to converge GVN"); 138 STATISTIC(NumGVNLeaderChanges, "Number of leader changes"); 139 STATISTIC(NumGVNSortedLeaderChanges, "Number of sorted leader changes"); 140 STATISTIC(NumGVNAvoidedSortedLeaderChanges, 141 "Number of avoided sorted leader changes"); 142 STATISTIC(NumGVNDeadStores, "Number of redundant/dead stores eliminated"); 143 STATISTIC(NumGVNPHIOfOpsCreated, "Number of PHI of ops created"); 144 STATISTIC(NumGVNPHIOfOpsEliminations, 145 "Number of things eliminated using PHI of ops"); 146 DEBUG_COUNTER(VNCounter, "newgvn-vn", 147 "Controls which instructions are value numbered"); 148 DEBUG_COUNTER(PHIOfOpsCounter, "newgvn-phi", 149 "Controls which instructions we create phi of ops for"); 150 // Currently store defining access refinement is too slow due to basicaa being 151 // egregiously slow. This flag lets us keep it working while we work on this 152 // issue. 153 static cl::opt<bool> EnableStoreRefinement("enable-store-refinement", 154 cl::init(false), cl::Hidden); 155 156 /// Currently, the generation "phi of ops" can result in correctness issues. 157 static cl::opt<bool> EnablePhiOfOps("enable-phi-of-ops", cl::init(true), 158 cl::Hidden); 159 160 //===----------------------------------------------------------------------===// 161 // GVN Pass 162 //===----------------------------------------------------------------------===// 163 164 // Anchor methods. 165 namespace llvm { 166 namespace GVNExpression { 167 168 Expression::~Expression() = default; 169 BasicExpression::~BasicExpression() = default; 170 CallExpression::~CallExpression() = default; 171 LoadExpression::~LoadExpression() = default; 172 StoreExpression::~StoreExpression() = default; 173 AggregateValueExpression::~AggregateValueExpression() = default; 174 PHIExpression::~PHIExpression() = default; 175 176 } // end namespace GVNExpression 177 } // end namespace llvm 178 179 namespace { 180 181 // Tarjan's SCC finding algorithm with Nuutila's improvements 182 // SCCIterator is actually fairly complex for the simple thing we want. 183 // It also wants to hand us SCC's that are unrelated to the phi node we ask 184 // about, and have us process them there or risk redoing work. 185 // Graph traits over a filter iterator also doesn't work that well here. 186 // This SCC finder is specialized to walk use-def chains, and only follows 187 // instructions, 188 // not generic values (arguments, etc). 189 struct TarjanSCC { 190 TarjanSCC() : Components(1) {} 191 192 void Start(const Instruction *Start) { 193 if (Root.lookup(Start) == 0) 194 FindSCC(Start); 195 } 196 197 const SmallPtrSetImpl<const Value *> &getComponentFor(const Value *V) const { 198 unsigned ComponentID = ValueToComponent.lookup(V); 199 200 assert(ComponentID > 0 && 201 "Asking for a component for a value we never processed"); 202 return Components[ComponentID]; 203 } 204 205 private: 206 void FindSCC(const Instruction *I) { 207 Root[I] = ++DFSNum; 208 // Store the DFS Number we had before it possibly gets incremented. 209 unsigned int OurDFS = DFSNum; 210 for (auto &Op : I->operands()) { 211 if (auto *InstOp = dyn_cast<Instruction>(Op)) { 212 if (Root.lookup(Op) == 0) 213 FindSCC(InstOp); 214 if (!InComponent.count(Op)) 215 Root[I] = std::min(Root.lookup(I), Root.lookup(Op)); 216 } 217 } 218 // See if we really were the root of a component, by seeing if we still have 219 // our DFSNumber. If we do, we are the root of the component, and we have 220 // completed a component. If we do not, we are not the root of a component, 221 // and belong on the component stack. 222 if (Root.lookup(I) == OurDFS) { 223 unsigned ComponentID = Components.size(); 224 Components.resize(Components.size() + 1); 225 auto &Component = Components.back(); 226 Component.insert(I); 227 LLVM_DEBUG(dbgs() << "Component root is " << *I << "\n"); 228 InComponent.insert(I); 229 ValueToComponent[I] = ComponentID; 230 // Pop a component off the stack and label it. 231 while (!Stack.empty() && Root.lookup(Stack.back()) >= OurDFS) { 232 auto *Member = Stack.back(); 233 LLVM_DEBUG(dbgs() << "Component member is " << *Member << "\n"); 234 Component.insert(Member); 235 InComponent.insert(Member); 236 ValueToComponent[Member] = ComponentID; 237 Stack.pop_back(); 238 } 239 } else { 240 // Part of a component, push to stack 241 Stack.push_back(I); 242 } 243 } 244 245 unsigned int DFSNum = 1; 246 SmallPtrSet<const Value *, 8> InComponent; 247 DenseMap<const Value *, unsigned int> Root; 248 SmallVector<const Value *, 8> Stack; 249 250 // Store the components as vector of ptr sets, because we need the topo order 251 // of SCC's, but not individual member order 252 SmallVector<SmallPtrSet<const Value *, 8>, 8> Components; 253 254 DenseMap<const Value *, unsigned> ValueToComponent; 255 }; 256 257 // Congruence classes represent the set of expressions/instructions 258 // that are all the same *during some scope in the function*. 259 // That is, because of the way we perform equality propagation, and 260 // because of memory value numbering, it is not correct to assume 261 // you can willy-nilly replace any member with any other at any 262 // point in the function. 263 // 264 // For any Value in the Member set, it is valid to replace any dominated member 265 // with that Value. 266 // 267 // Every congruence class has a leader, and the leader is used to symbolize 268 // instructions in a canonical way (IE every operand of an instruction that is a 269 // member of the same congruence class will always be replaced with leader 270 // during symbolization). To simplify symbolization, we keep the leader as a 271 // constant if class can be proved to be a constant value. Otherwise, the 272 // leader is the member of the value set with the smallest DFS number. Each 273 // congruence class also has a defining expression, though the expression may be 274 // null. If it exists, it can be used for forward propagation and reassociation 275 // of values. 276 277 // For memory, we also track a representative MemoryAccess, and a set of memory 278 // members for MemoryPhis (which have no real instructions). Note that for 279 // memory, it seems tempting to try to split the memory members into a 280 // MemoryCongruenceClass or something. Unfortunately, this does not work 281 // easily. The value numbering of a given memory expression depends on the 282 // leader of the memory congruence class, and the leader of memory congruence 283 // class depends on the value numbering of a given memory expression. This 284 // leads to wasted propagation, and in some cases, missed optimization. For 285 // example: If we had value numbered two stores together before, but now do not, 286 // we move them to a new value congruence class. This in turn will move at one 287 // of the memorydefs to a new memory congruence class. Which in turn, affects 288 // the value numbering of the stores we just value numbered (because the memory 289 // congruence class is part of the value number). So while theoretically 290 // possible to split them up, it turns out to be *incredibly* complicated to get 291 // it to work right, because of the interdependency. While structurally 292 // slightly messier, it is algorithmically much simpler and faster to do what we 293 // do here, and track them both at once in the same class. 294 // Note: The default iterators for this class iterate over values 295 class CongruenceClass { 296 public: 297 using MemberType = Value; 298 using MemberSet = SmallPtrSet<MemberType *, 4>; 299 using MemoryMemberType = MemoryPhi; 300 using MemoryMemberSet = SmallPtrSet<const MemoryMemberType *, 2>; 301 302 explicit CongruenceClass(unsigned ID) : ID(ID) {} 303 CongruenceClass(unsigned ID, Value *Leader, const Expression *E) 304 : ID(ID), RepLeader(Leader), DefiningExpr(E) {} 305 306 unsigned getID() const { return ID; } 307 308 // True if this class has no members left. This is mainly used for assertion 309 // purposes, and for skipping empty classes. 310 bool isDead() const { 311 // If it's both dead from a value perspective, and dead from a memory 312 // perspective, it's really dead. 313 return empty() && memory_empty(); 314 } 315 316 // Leader functions 317 Value *getLeader() const { return RepLeader; } 318 void setLeader(Value *Leader) { RepLeader = Leader; } 319 const std::pair<Value *, unsigned int> &getNextLeader() const { 320 return NextLeader; 321 } 322 void resetNextLeader() { NextLeader = {nullptr, ~0}; } 323 void addPossibleNextLeader(std::pair<Value *, unsigned int> LeaderPair) { 324 if (LeaderPair.second < NextLeader.second) 325 NextLeader = LeaderPair; 326 } 327 328 Value *getStoredValue() const { return RepStoredValue; } 329 void setStoredValue(Value *Leader) { RepStoredValue = Leader; } 330 const MemoryAccess *getMemoryLeader() const { return RepMemoryAccess; } 331 void setMemoryLeader(const MemoryAccess *Leader) { RepMemoryAccess = Leader; } 332 333 // Forward propagation info 334 const Expression *getDefiningExpr() const { return DefiningExpr; } 335 336 // Value member set 337 bool empty() const { return Members.empty(); } 338 unsigned size() const { return Members.size(); } 339 MemberSet::const_iterator begin() const { return Members.begin(); } 340 MemberSet::const_iterator end() const { return Members.end(); } 341 void insert(MemberType *M) { Members.insert(M); } 342 void erase(MemberType *M) { Members.erase(M); } 343 void swap(MemberSet &Other) { Members.swap(Other); } 344 345 // Memory member set 346 bool memory_empty() const { return MemoryMembers.empty(); } 347 unsigned memory_size() const { return MemoryMembers.size(); } 348 MemoryMemberSet::const_iterator memory_begin() const { 349 return MemoryMembers.begin(); 350 } 351 MemoryMemberSet::const_iterator memory_end() const { 352 return MemoryMembers.end(); 353 } 354 iterator_range<MemoryMemberSet::const_iterator> memory() const { 355 return make_range(memory_begin(), memory_end()); 356 } 357 358 void memory_insert(const MemoryMemberType *M) { MemoryMembers.insert(M); } 359 void memory_erase(const MemoryMemberType *M) { MemoryMembers.erase(M); } 360 361 // Store count 362 unsigned getStoreCount() const { return StoreCount; } 363 void incStoreCount() { ++StoreCount; } 364 void decStoreCount() { 365 assert(StoreCount != 0 && "Store count went negative"); 366 --StoreCount; 367 } 368 369 // True if this class has no memory members. 370 bool definesNoMemory() const { return StoreCount == 0 && memory_empty(); } 371 372 // Return true if two congruence classes are equivalent to each other. This 373 // means that every field but the ID number and the dead field are equivalent. 374 bool isEquivalentTo(const CongruenceClass *Other) const { 375 if (!Other) 376 return false; 377 if (this == Other) 378 return true; 379 380 if (std::tie(StoreCount, RepLeader, RepStoredValue, RepMemoryAccess) != 381 std::tie(Other->StoreCount, Other->RepLeader, Other->RepStoredValue, 382 Other->RepMemoryAccess)) 383 return false; 384 if (DefiningExpr != Other->DefiningExpr) 385 if (!DefiningExpr || !Other->DefiningExpr || 386 *DefiningExpr != *Other->DefiningExpr) 387 return false; 388 389 if (Members.size() != Other->Members.size()) 390 return false; 391 392 return all_of(Members, 393 [&](const Value *V) { return Other->Members.count(V); }); 394 } 395 396 private: 397 unsigned ID; 398 399 // Representative leader. 400 Value *RepLeader = nullptr; 401 402 // The most dominating leader after our current leader, because the member set 403 // is not sorted and is expensive to keep sorted all the time. 404 std::pair<Value *, unsigned int> NextLeader = {nullptr, ~0U}; 405 406 // If this is represented by a store, the value of the store. 407 Value *RepStoredValue = nullptr; 408 409 // If this class contains MemoryDefs or MemoryPhis, this is the leading memory 410 // access. 411 const MemoryAccess *RepMemoryAccess = nullptr; 412 413 // Defining Expression. 414 const Expression *DefiningExpr = nullptr; 415 416 // Actual members of this class. 417 MemberSet Members; 418 419 // This is the set of MemoryPhis that exist in the class. MemoryDefs and 420 // MemoryUses have real instructions representing them, so we only need to 421 // track MemoryPhis here. 422 MemoryMemberSet MemoryMembers; 423 424 // Number of stores in this congruence class. 425 // This is used so we can detect store equivalence changes properly. 426 int StoreCount = 0; 427 }; 428 429 } // end anonymous namespace 430 431 namespace llvm { 432 433 struct ExactEqualsExpression { 434 const Expression &E; 435 436 explicit ExactEqualsExpression(const Expression &E) : E(E) {} 437 438 hash_code getComputedHash() const { return E.getComputedHash(); } 439 440 bool operator==(const Expression &Other) const { 441 return E.exactlyEquals(Other); 442 } 443 }; 444 445 template <> struct DenseMapInfo<const Expression *> { 446 static const Expression *getEmptyKey() { 447 auto Val = static_cast<uintptr_t>(-1); 448 Val <<= PointerLikeTypeTraits<const Expression *>::NumLowBitsAvailable; 449 return reinterpret_cast<const Expression *>(Val); 450 } 451 452 static const Expression *getTombstoneKey() { 453 auto Val = static_cast<uintptr_t>(~1U); 454 Val <<= PointerLikeTypeTraits<const Expression *>::NumLowBitsAvailable; 455 return reinterpret_cast<const Expression *>(Val); 456 } 457 458 static unsigned getHashValue(const Expression *E) { 459 return E->getComputedHash(); 460 } 461 462 static unsigned getHashValue(const ExactEqualsExpression &E) { 463 return E.getComputedHash(); 464 } 465 466 static bool isEqual(const ExactEqualsExpression &LHS, const Expression *RHS) { 467 if (RHS == getTombstoneKey() || RHS == getEmptyKey()) 468 return false; 469 return LHS == *RHS; 470 } 471 472 static bool isEqual(const Expression *LHS, const Expression *RHS) { 473 if (LHS == RHS) 474 return true; 475 if (LHS == getTombstoneKey() || RHS == getTombstoneKey() || 476 LHS == getEmptyKey() || RHS == getEmptyKey()) 477 return false; 478 // Compare hashes before equality. This is *not* what the hashtable does, 479 // since it is computing it modulo the number of buckets, whereas we are 480 // using the full hash keyspace. Since the hashes are precomputed, this 481 // check is *much* faster than equality. 482 if (LHS->getComputedHash() != RHS->getComputedHash()) 483 return false; 484 return *LHS == *RHS; 485 } 486 }; 487 488 } // end namespace llvm 489 490 namespace { 491 492 class NewGVN { 493 Function &F; 494 DominatorTree *DT = nullptr; 495 const TargetLibraryInfo *TLI = nullptr; 496 AliasAnalysis *AA = nullptr; 497 MemorySSA *MSSA = nullptr; 498 MemorySSAWalker *MSSAWalker = nullptr; 499 AssumptionCache *AC = nullptr; 500 const DataLayout &DL; 501 std::unique_ptr<PredicateInfo> PredInfo; 502 503 // These are the only two things the create* functions should have 504 // side-effects on due to allocating memory. 505 mutable BumpPtrAllocator ExpressionAllocator; 506 mutable ArrayRecycler<Value *> ArgRecycler; 507 mutable TarjanSCC SCCFinder; 508 const SimplifyQuery SQ; 509 510 // Number of function arguments, used by ranking 511 unsigned int NumFuncArgs = 0; 512 513 // RPOOrdering of basic blocks 514 DenseMap<const DomTreeNode *, unsigned> RPOOrdering; 515 516 // Congruence class info. 517 518 // This class is called INITIAL in the paper. It is the class everything 519 // startsout in, and represents any value. Being an optimistic analysis, 520 // anything in the TOP class has the value TOP, which is indeterminate and 521 // equivalent to everything. 522 CongruenceClass *TOPClass = nullptr; 523 std::vector<CongruenceClass *> CongruenceClasses; 524 unsigned NextCongruenceNum = 0; 525 526 // Value Mappings. 527 DenseMap<Value *, CongruenceClass *> ValueToClass; 528 DenseMap<Value *, const Expression *> ValueToExpression; 529 530 // Value PHI handling, used to make equivalence between phi(op, op) and 531 // op(phi, phi). 532 // These mappings just store various data that would normally be part of the 533 // IR. 534 SmallPtrSet<const Instruction *, 8> PHINodeUses; 535 536 DenseMap<const Value *, bool> OpSafeForPHIOfOps; 537 538 // Map a temporary instruction we created to a parent block. 539 DenseMap<const Value *, BasicBlock *> TempToBlock; 540 541 // Map between the already in-program instructions and the temporary phis we 542 // created that they are known equivalent to. 543 DenseMap<const Value *, PHINode *> RealToTemp; 544 545 // In order to know when we should re-process instructions that have 546 // phi-of-ops, we track the set of expressions that they needed as 547 // leaders. When we discover new leaders for those expressions, we process the 548 // associated phi-of-op instructions again in case they have changed. The 549 // other way they may change is if they had leaders, and those leaders 550 // disappear. However, at the point they have leaders, there are uses of the 551 // relevant operands in the created phi node, and so they will get reprocessed 552 // through the normal user marking we perform. 553 mutable DenseMap<const Value *, SmallPtrSet<Value *, 2>> AdditionalUsers; 554 DenseMap<const Expression *, SmallPtrSet<Instruction *, 2>> 555 ExpressionToPhiOfOps; 556 557 // Map from temporary operation to MemoryAccess. 558 DenseMap<const Instruction *, MemoryUseOrDef *> TempToMemory; 559 560 // Set of all temporary instructions we created. 561 // Note: This will include instructions that were just created during value 562 // numbering. The way to test if something is using them is to check 563 // RealToTemp. 564 DenseSet<Instruction *> AllTempInstructions; 565 566 // This is the set of instructions to revisit on a reachability change. At 567 // the end of the main iteration loop it will contain at least all the phi of 568 // ops instructions that will be changed to phis, as well as regular phis. 569 // During the iteration loop, it may contain other things, such as phi of ops 570 // instructions that used edge reachability to reach a result, and so need to 571 // be revisited when the edge changes, independent of whether the phi they 572 // depended on changes. 573 DenseMap<BasicBlock *, SparseBitVector<>> RevisitOnReachabilityChange; 574 575 // Mapping from predicate info we used to the instructions we used it with. 576 // In order to correctly ensure propagation, we must keep track of what 577 // comparisons we used, so that when the values of the comparisons change, we 578 // propagate the information to the places we used the comparison. 579 mutable DenseMap<const Value *, SmallPtrSet<Instruction *, 2>> 580 PredicateToUsers; 581 582 // the same reasoning as PredicateToUsers. When we skip MemoryAccesses for 583 // stores, we no longer can rely solely on the def-use chains of MemorySSA. 584 mutable DenseMap<const MemoryAccess *, SmallPtrSet<MemoryAccess *, 2>> 585 MemoryToUsers; 586 587 // A table storing which memorydefs/phis represent a memory state provably 588 // equivalent to another memory state. 589 // We could use the congruence class machinery, but the MemoryAccess's are 590 // abstract memory states, so they can only ever be equivalent to each other, 591 // and not to constants, etc. 592 DenseMap<const MemoryAccess *, CongruenceClass *> MemoryAccessToClass; 593 594 // We could, if we wanted, build MemoryPhiExpressions and 595 // MemoryVariableExpressions, etc, and value number them the same way we value 596 // number phi expressions. For the moment, this seems like overkill. They 597 // can only exist in one of three states: they can be TOP (equal to 598 // everything), Equivalent to something else, or unique. Because we do not 599 // create expressions for them, we need to simulate leader change not just 600 // when they change class, but when they change state. Note: We can do the 601 // same thing for phis, and avoid having phi expressions if we wanted, We 602 // should eventually unify in one direction or the other, so this is a little 603 // bit of an experiment in which turns out easier to maintain. 604 enum MemoryPhiState { MPS_Invalid, MPS_TOP, MPS_Equivalent, MPS_Unique }; 605 DenseMap<const MemoryPhi *, MemoryPhiState> MemoryPhiState; 606 607 enum InstCycleState { ICS_Unknown, ICS_CycleFree, ICS_Cycle }; 608 mutable DenseMap<const Instruction *, InstCycleState> InstCycleState; 609 610 // Expression to class mapping. 611 using ExpressionClassMap = DenseMap<const Expression *, CongruenceClass *>; 612 ExpressionClassMap ExpressionToClass; 613 614 // We have a single expression that represents currently DeadExpressions. 615 // For dead expressions we can prove will stay dead, we mark them with 616 // DFS number zero. However, it's possible in the case of phi nodes 617 // for us to assume/prove all arguments are dead during fixpointing. 618 // We use DeadExpression for that case. 619 DeadExpression *SingletonDeadExpression = nullptr; 620 621 // Which values have changed as a result of leader changes. 622 SmallPtrSet<Value *, 8> LeaderChanges; 623 624 // Reachability info. 625 using BlockEdge = BasicBlockEdge; 626 DenseSet<BlockEdge> ReachableEdges; 627 SmallPtrSet<const BasicBlock *, 8> ReachableBlocks; 628 629 // This is a bitvector because, on larger functions, we may have 630 // thousands of touched instructions at once (entire blocks, 631 // instructions with hundreds of uses, etc). Even with optimization 632 // for when we mark whole blocks as touched, when this was a 633 // SmallPtrSet or DenseSet, for some functions, we spent >20% of all 634 // the time in GVN just managing this list. The bitvector, on the 635 // other hand, efficiently supports test/set/clear of both 636 // individual and ranges, as well as "find next element" This 637 // enables us to use it as a worklist with essentially 0 cost. 638 BitVector TouchedInstructions; 639 640 DenseMap<const BasicBlock *, std::pair<unsigned, unsigned>> BlockInstRange; 641 642 #ifndef NDEBUG 643 // Debugging for how many times each block and instruction got processed. 644 DenseMap<const Value *, unsigned> ProcessedCount; 645 #endif 646 647 // DFS info. 648 // This contains a mapping from Instructions to DFS numbers. 649 // The numbering starts at 1. An instruction with DFS number zero 650 // means that the instruction is dead. 651 DenseMap<const Value *, unsigned> InstrDFS; 652 653 // This contains the mapping DFS numbers to instructions. 654 SmallVector<Value *, 32> DFSToInstr; 655 656 // Deletion info. 657 SmallPtrSet<Instruction *, 8> InstructionsToErase; 658 659 public: 660 NewGVN(Function &F, DominatorTree *DT, AssumptionCache *AC, 661 TargetLibraryInfo *TLI, AliasAnalysis *AA, MemorySSA *MSSA, 662 const DataLayout &DL) 663 : F(F), DT(DT), TLI(TLI), AA(AA), MSSA(MSSA), AC(AC), DL(DL), 664 PredInfo(std::make_unique<PredicateInfo>(F, *DT, *AC)), 665 SQ(DL, TLI, DT, AC, /*CtxI=*/nullptr, /*UseInstrInfo=*/false, 666 /*CanUseUndef=*/false) {} 667 668 bool runGVN(); 669 670 private: 671 // Expression handling. 672 const Expression *createExpression(Instruction *) const; 673 const Expression *createBinaryExpression(unsigned, Type *, Value *, Value *, 674 Instruction *) const; 675 676 // Our canonical form for phi arguments is a pair of incoming value, incoming 677 // basic block. 678 using ValPair = std::pair<Value *, BasicBlock *>; 679 680 PHIExpression *createPHIExpression(ArrayRef<ValPair>, const Instruction *, 681 BasicBlock *, bool &HasBackEdge, 682 bool &OriginalOpsConstant) const; 683 const DeadExpression *createDeadExpression() const; 684 const VariableExpression *createVariableExpression(Value *) const; 685 const ConstantExpression *createConstantExpression(Constant *) const; 686 const Expression *createVariableOrConstant(Value *V) const; 687 const UnknownExpression *createUnknownExpression(Instruction *) const; 688 const StoreExpression *createStoreExpression(StoreInst *, 689 const MemoryAccess *) const; 690 LoadExpression *createLoadExpression(Type *, Value *, LoadInst *, 691 const MemoryAccess *) const; 692 const CallExpression *createCallExpression(CallInst *, 693 const MemoryAccess *) const; 694 const AggregateValueExpression * 695 createAggregateValueExpression(Instruction *) const; 696 bool setBasicExpressionInfo(Instruction *, BasicExpression *) const; 697 698 // Congruence class handling. 699 CongruenceClass *createCongruenceClass(Value *Leader, const Expression *E) { 700 auto *result = new CongruenceClass(NextCongruenceNum++, Leader, E); 701 CongruenceClasses.emplace_back(result); 702 return result; 703 } 704 705 CongruenceClass *createMemoryClass(MemoryAccess *MA) { 706 auto *CC = createCongruenceClass(nullptr, nullptr); 707 CC->setMemoryLeader(MA); 708 return CC; 709 } 710 711 CongruenceClass *ensureLeaderOfMemoryClass(MemoryAccess *MA) { 712 auto *CC = getMemoryClass(MA); 713 if (CC->getMemoryLeader() != MA) 714 CC = createMemoryClass(MA); 715 return CC; 716 } 717 718 CongruenceClass *createSingletonCongruenceClass(Value *Member) { 719 CongruenceClass *CClass = createCongruenceClass(Member, nullptr); 720 CClass->insert(Member); 721 ValueToClass[Member] = CClass; 722 return CClass; 723 } 724 725 void initializeCongruenceClasses(Function &F); 726 const Expression *makePossiblePHIOfOps(Instruction *, 727 SmallPtrSetImpl<Value *> &); 728 Value *findLeaderForInst(Instruction *ValueOp, 729 SmallPtrSetImpl<Value *> &Visited, 730 MemoryAccess *MemAccess, Instruction *OrigInst, 731 BasicBlock *PredBB); 732 bool OpIsSafeForPHIOfOpsHelper(Value *V, const BasicBlock *PHIBlock, 733 SmallPtrSetImpl<const Value *> &Visited, 734 SmallVectorImpl<Instruction *> &Worklist); 735 bool OpIsSafeForPHIOfOps(Value *Op, const BasicBlock *PHIBlock, 736 SmallPtrSetImpl<const Value *> &); 737 void addPhiOfOps(PHINode *Op, BasicBlock *BB, Instruction *ExistingValue); 738 void removePhiOfOps(Instruction *I, PHINode *PHITemp); 739 740 // Value number an Instruction or MemoryPhi. 741 void valueNumberMemoryPhi(MemoryPhi *); 742 void valueNumberInstruction(Instruction *); 743 744 // Symbolic evaluation. 745 const Expression *checkSimplificationResults(Expression *, Instruction *, 746 Value *) const; 747 const Expression *performSymbolicEvaluation(Value *, 748 SmallPtrSetImpl<Value *> &) const; 749 const Expression *performSymbolicLoadCoercion(Type *, Value *, LoadInst *, 750 Instruction *, 751 MemoryAccess *) const; 752 const Expression *performSymbolicLoadEvaluation(Instruction *) const; 753 const Expression *performSymbolicStoreEvaluation(Instruction *) const; 754 const Expression *performSymbolicCallEvaluation(Instruction *) const; 755 void sortPHIOps(MutableArrayRef<ValPair> Ops) const; 756 const Expression *performSymbolicPHIEvaluation(ArrayRef<ValPair>, 757 Instruction *I, 758 BasicBlock *PHIBlock) const; 759 const Expression *performSymbolicAggrValueEvaluation(Instruction *) const; 760 const Expression *performSymbolicCmpEvaluation(Instruction *) const; 761 const Expression *performSymbolicPredicateInfoEvaluation(Instruction *) const; 762 763 // Congruence finding. 764 bool someEquivalentDominates(const Instruction *, const Instruction *) const; 765 Value *lookupOperandLeader(Value *) const; 766 CongruenceClass *getClassForExpression(const Expression *E) const; 767 void performCongruenceFinding(Instruction *, const Expression *); 768 void moveValueToNewCongruenceClass(Instruction *, const Expression *, 769 CongruenceClass *, CongruenceClass *); 770 void moveMemoryToNewCongruenceClass(Instruction *, MemoryAccess *, 771 CongruenceClass *, CongruenceClass *); 772 Value *getNextValueLeader(CongruenceClass *) const; 773 const MemoryAccess *getNextMemoryLeader(CongruenceClass *) const; 774 bool setMemoryClass(const MemoryAccess *From, CongruenceClass *To); 775 CongruenceClass *getMemoryClass(const MemoryAccess *MA) const; 776 const MemoryAccess *lookupMemoryLeader(const MemoryAccess *) const; 777 bool isMemoryAccessTOP(const MemoryAccess *) const; 778 779 // Ranking 780 unsigned int getRank(const Value *) const; 781 bool shouldSwapOperands(const Value *, const Value *) const; 782 783 // Reachability handling. 784 void updateReachableEdge(BasicBlock *, BasicBlock *); 785 void processOutgoingEdges(Instruction *, BasicBlock *); 786 Value *findConditionEquivalence(Value *) const; 787 788 // Elimination. 789 struct ValueDFS; 790 void convertClassToDFSOrdered(const CongruenceClass &, 791 SmallVectorImpl<ValueDFS> &, 792 DenseMap<const Value *, unsigned int> &, 793 SmallPtrSetImpl<Instruction *> &) const; 794 void convertClassToLoadsAndStores(const CongruenceClass &, 795 SmallVectorImpl<ValueDFS> &) const; 796 797 bool eliminateInstructions(Function &); 798 void replaceInstruction(Instruction *, Value *); 799 void markInstructionForDeletion(Instruction *); 800 void deleteInstructionsInBlock(BasicBlock *); 801 Value *findPHIOfOpsLeader(const Expression *, const Instruction *, 802 const BasicBlock *) const; 803 804 // Various instruction touch utilities 805 template <typename Map, typename KeyType> 806 void touchAndErase(Map &, const KeyType &); 807 void markUsersTouched(Value *); 808 void markMemoryUsersTouched(const MemoryAccess *); 809 void markMemoryDefTouched(const MemoryAccess *); 810 void markPredicateUsersTouched(Instruction *); 811 void markValueLeaderChangeTouched(CongruenceClass *CC); 812 void markMemoryLeaderChangeTouched(CongruenceClass *CC); 813 void markPhiOfOpsChanged(const Expression *E); 814 void addPredicateUsers(const PredicateBase *, Instruction *) const; 815 void addMemoryUsers(const MemoryAccess *To, MemoryAccess *U) const; 816 void addAdditionalUsers(Value *To, Value *User) const; 817 818 // Main loop of value numbering 819 void iterateTouchedInstructions(); 820 821 // Utilities. 822 void cleanupTables(); 823 std::pair<unsigned, unsigned> assignDFSNumbers(BasicBlock *, unsigned); 824 void updateProcessedCount(const Value *V); 825 void verifyMemoryCongruency() const; 826 void verifyIterationSettled(Function &F); 827 void verifyStoreExpressions() const; 828 bool singleReachablePHIPath(SmallPtrSet<const MemoryAccess *, 8> &, 829 const MemoryAccess *, const MemoryAccess *) const; 830 BasicBlock *getBlockForValue(Value *V) const; 831 void deleteExpression(const Expression *E) const; 832 MemoryUseOrDef *getMemoryAccess(const Instruction *) const; 833 MemoryPhi *getMemoryAccess(const BasicBlock *) const; 834 template <class T, class Range> T *getMinDFSOfRange(const Range &) const; 835 836 unsigned InstrToDFSNum(const Value *V) const { 837 assert(isa<Instruction>(V) && "This should not be used for MemoryAccesses"); 838 return InstrDFS.lookup(V); 839 } 840 841 unsigned InstrToDFSNum(const MemoryAccess *MA) const { 842 return MemoryToDFSNum(MA); 843 } 844 845 Value *InstrFromDFSNum(unsigned DFSNum) { return DFSToInstr[DFSNum]; } 846 847 // Given a MemoryAccess, return the relevant instruction DFS number. Note: 848 // This deliberately takes a value so it can be used with Use's, which will 849 // auto-convert to Value's but not to MemoryAccess's. 850 unsigned MemoryToDFSNum(const Value *MA) const { 851 assert(isa<MemoryAccess>(MA) && 852 "This should not be used with instructions"); 853 return isa<MemoryUseOrDef>(MA) 854 ? InstrToDFSNum(cast<MemoryUseOrDef>(MA)->getMemoryInst()) 855 : InstrDFS.lookup(MA); 856 } 857 858 bool isCycleFree(const Instruction *) const; 859 bool isBackedge(BasicBlock *From, BasicBlock *To) const; 860 861 // Debug counter info. When verifying, we have to reset the value numbering 862 // debug counter to the same state it started in to get the same results. 863 int64_t StartingVNCounter = 0; 864 }; 865 866 } // end anonymous namespace 867 868 template <typename T> 869 static bool equalsLoadStoreHelper(const T &LHS, const Expression &RHS) { 870 if (!isa<LoadExpression>(RHS) && !isa<StoreExpression>(RHS)) 871 return false; 872 return LHS.MemoryExpression::equals(RHS); 873 } 874 875 bool LoadExpression::equals(const Expression &Other) const { 876 return equalsLoadStoreHelper(*this, Other); 877 } 878 879 bool StoreExpression::equals(const Expression &Other) const { 880 if (!equalsLoadStoreHelper(*this, Other)) 881 return false; 882 // Make sure that store vs store includes the value operand. 883 if (const auto *S = dyn_cast<StoreExpression>(&Other)) 884 if (getStoredValue() != S->getStoredValue()) 885 return false; 886 return true; 887 } 888 889 // Determine if the edge From->To is a backedge 890 bool NewGVN::isBackedge(BasicBlock *From, BasicBlock *To) const { 891 return From == To || 892 RPOOrdering.lookup(DT->getNode(From)) >= 893 RPOOrdering.lookup(DT->getNode(To)); 894 } 895 896 #ifndef NDEBUG 897 static std::string getBlockName(const BasicBlock *B) { 898 return DOTGraphTraits<DOTFuncInfo *>::getSimpleNodeLabel(B, nullptr); 899 } 900 #endif 901 902 // Get a MemoryAccess for an instruction, fake or real. 903 MemoryUseOrDef *NewGVN::getMemoryAccess(const Instruction *I) const { 904 auto *Result = MSSA->getMemoryAccess(I); 905 return Result ? Result : TempToMemory.lookup(I); 906 } 907 908 // Get a MemoryPhi for a basic block. These are all real. 909 MemoryPhi *NewGVN::getMemoryAccess(const BasicBlock *BB) const { 910 return MSSA->getMemoryAccess(BB); 911 } 912 913 // Get the basic block from an instruction/memory value. 914 BasicBlock *NewGVN::getBlockForValue(Value *V) const { 915 if (auto *I = dyn_cast<Instruction>(V)) { 916 auto *Parent = I->getParent(); 917 if (Parent) 918 return Parent; 919 Parent = TempToBlock.lookup(V); 920 assert(Parent && "Every fake instruction should have a block"); 921 return Parent; 922 } 923 924 auto *MP = dyn_cast<MemoryPhi>(V); 925 assert(MP && "Should have been an instruction or a MemoryPhi"); 926 return MP->getBlock(); 927 } 928 929 // Delete a definitely dead expression, so it can be reused by the expression 930 // allocator. Some of these are not in creation functions, so we have to accept 931 // const versions. 932 void NewGVN::deleteExpression(const Expression *E) const { 933 assert(isa<BasicExpression>(E)); 934 auto *BE = cast<BasicExpression>(E); 935 const_cast<BasicExpression *>(BE)->deallocateOperands(ArgRecycler); 936 ExpressionAllocator.Deallocate(E); 937 } 938 939 // If V is a predicateinfo copy, get the thing it is a copy of. 940 static Value *getCopyOf(const Value *V) { 941 if (auto *II = dyn_cast<IntrinsicInst>(V)) 942 if (II->getIntrinsicID() == Intrinsic::ssa_copy) 943 return II->getOperand(0); 944 return nullptr; 945 } 946 947 // Return true if V is really PN, even accounting for predicateinfo copies. 948 static bool isCopyOfPHI(const Value *V, const PHINode *PN) { 949 return V == PN || getCopyOf(V) == PN; 950 } 951 952 static bool isCopyOfAPHI(const Value *V) { 953 auto *CO = getCopyOf(V); 954 return CO && isa<PHINode>(CO); 955 } 956 957 // Sort PHI Operands into a canonical order. What we use here is an RPO 958 // order. The BlockInstRange numbers are generated in an RPO walk of the basic 959 // blocks. 960 void NewGVN::sortPHIOps(MutableArrayRef<ValPair> Ops) const { 961 llvm::sort(Ops, [&](const ValPair &P1, const ValPair &P2) { 962 return BlockInstRange.lookup(P1.second).first < 963 BlockInstRange.lookup(P2.second).first; 964 }); 965 } 966 967 // Return true if V is a value that will always be available (IE can 968 // be placed anywhere) in the function. We don't do globals here 969 // because they are often worse to put in place. 970 static bool alwaysAvailable(Value *V) { 971 return isa<Constant>(V) || isa<Argument>(V); 972 } 973 974 // Create a PHIExpression from an array of {incoming edge, value} pairs. I is 975 // the original instruction we are creating a PHIExpression for (but may not be 976 // a phi node). We require, as an invariant, that all the PHIOperands in the 977 // same block are sorted the same way. sortPHIOps will sort them into a 978 // canonical order. 979 PHIExpression *NewGVN::createPHIExpression(ArrayRef<ValPair> PHIOperands, 980 const Instruction *I, 981 BasicBlock *PHIBlock, 982 bool &HasBackedge, 983 bool &OriginalOpsConstant) const { 984 unsigned NumOps = PHIOperands.size(); 985 auto *E = new (ExpressionAllocator) PHIExpression(NumOps, PHIBlock); 986 987 E->allocateOperands(ArgRecycler, ExpressionAllocator); 988 E->setType(PHIOperands.begin()->first->getType()); 989 E->setOpcode(Instruction::PHI); 990 991 // Filter out unreachable phi operands. 992 auto Filtered = make_filter_range(PHIOperands, [&](const ValPair &P) { 993 auto *BB = P.second; 994 if (auto *PHIOp = dyn_cast<PHINode>(I)) 995 if (isCopyOfPHI(P.first, PHIOp)) 996 return false; 997 if (!ReachableEdges.count({BB, PHIBlock})) 998 return false; 999 // Things in TOPClass are equivalent to everything. 1000 if (ValueToClass.lookup(P.first) == TOPClass) 1001 return false; 1002 OriginalOpsConstant = OriginalOpsConstant && isa<Constant>(P.first); 1003 HasBackedge = HasBackedge || isBackedge(BB, PHIBlock); 1004 return lookupOperandLeader(P.first) != I; 1005 }); 1006 std::transform(Filtered.begin(), Filtered.end(), op_inserter(E), 1007 [&](const ValPair &P) -> Value * { 1008 return lookupOperandLeader(P.first); 1009 }); 1010 return E; 1011 } 1012 1013 // Set basic expression info (Arguments, type, opcode) for Expression 1014 // E from Instruction I in block B. 1015 bool NewGVN::setBasicExpressionInfo(Instruction *I, BasicExpression *E) const { 1016 bool AllConstant = true; 1017 if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) 1018 E->setType(GEP->getSourceElementType()); 1019 else 1020 E->setType(I->getType()); 1021 E->setOpcode(I->getOpcode()); 1022 E->allocateOperands(ArgRecycler, ExpressionAllocator); 1023 1024 // Transform the operand array into an operand leader array, and keep track of 1025 // whether all members are constant. 1026 std::transform(I->op_begin(), I->op_end(), op_inserter(E), [&](Value *O) { 1027 auto Operand = lookupOperandLeader(O); 1028 AllConstant = AllConstant && isa<Constant>(Operand); 1029 return Operand; 1030 }); 1031 1032 return AllConstant; 1033 } 1034 1035 const Expression *NewGVN::createBinaryExpression(unsigned Opcode, Type *T, 1036 Value *Arg1, Value *Arg2, 1037 Instruction *I) const { 1038 auto *E = new (ExpressionAllocator) BasicExpression(2); 1039 1040 E->setType(T); 1041 E->setOpcode(Opcode); 1042 E->allocateOperands(ArgRecycler, ExpressionAllocator); 1043 if (Instruction::isCommutative(Opcode)) { 1044 // Ensure that commutative instructions that only differ by a permutation 1045 // of their operands get the same value number by sorting the operand value 1046 // numbers. Since all commutative instructions have two operands it is more 1047 // efficient to sort by hand rather than using, say, std::sort. 1048 if (shouldSwapOperands(Arg1, Arg2)) 1049 std::swap(Arg1, Arg2); 1050 } 1051 E->op_push_back(lookupOperandLeader(Arg1)); 1052 E->op_push_back(lookupOperandLeader(Arg2)); 1053 1054 Value *V = SimplifyBinOp(Opcode, E->getOperand(0), E->getOperand(1), SQ); 1055 if (const Expression *SimplifiedE = checkSimplificationResults(E, I, V)) 1056 return SimplifiedE; 1057 return E; 1058 } 1059 1060 // Take a Value returned by simplification of Expression E/Instruction 1061 // I, and see if it resulted in a simpler expression. If so, return 1062 // that expression. 1063 const Expression *NewGVN::checkSimplificationResults(Expression *E, 1064 Instruction *I, 1065 Value *V) const { 1066 if (!V) 1067 return nullptr; 1068 if (auto *C = dyn_cast<Constant>(V)) { 1069 if (I) 1070 LLVM_DEBUG(dbgs() << "Simplified " << *I << " to " 1071 << " constant " << *C << "\n"); 1072 NumGVNOpsSimplified++; 1073 assert(isa<BasicExpression>(E) && 1074 "We should always have had a basic expression here"); 1075 deleteExpression(E); 1076 return createConstantExpression(C); 1077 } else if (isa<Argument>(V) || isa<GlobalVariable>(V)) { 1078 if (I) 1079 LLVM_DEBUG(dbgs() << "Simplified " << *I << " to " 1080 << " variable " << *V << "\n"); 1081 deleteExpression(E); 1082 return createVariableExpression(V); 1083 } 1084 1085 CongruenceClass *CC = ValueToClass.lookup(V); 1086 if (CC) { 1087 if (CC->getLeader() && CC->getLeader() != I) { 1088 // If we simplified to something else, we need to communicate 1089 // that we're users of the value we simplified to. 1090 if (I != V) { 1091 // Don't add temporary instructions to the user lists. 1092 if (!AllTempInstructions.count(I)) 1093 addAdditionalUsers(V, I); 1094 } 1095 return createVariableOrConstant(CC->getLeader()); 1096 } 1097 if (CC->getDefiningExpr()) { 1098 // If we simplified to something else, we need to communicate 1099 // that we're users of the value we simplified to. 1100 if (I != V) { 1101 // Don't add temporary instructions to the user lists. 1102 if (!AllTempInstructions.count(I)) 1103 addAdditionalUsers(V, I); 1104 } 1105 1106 if (I) 1107 LLVM_DEBUG(dbgs() << "Simplified " << *I << " to " 1108 << " expression " << *CC->getDefiningExpr() << "\n"); 1109 NumGVNOpsSimplified++; 1110 deleteExpression(E); 1111 return CC->getDefiningExpr(); 1112 } 1113 } 1114 1115 return nullptr; 1116 } 1117 1118 // Create a value expression from the instruction I, replacing operands with 1119 // their leaders. 1120 1121 const Expression *NewGVN::createExpression(Instruction *I) const { 1122 auto *E = new (ExpressionAllocator) BasicExpression(I->getNumOperands()); 1123 1124 bool AllConstant = setBasicExpressionInfo(I, E); 1125 1126 if (I->isCommutative()) { 1127 // Ensure that commutative instructions that only differ by a permutation 1128 // of their operands get the same value number by sorting the operand value 1129 // numbers. Since all commutative instructions have two operands it is more 1130 // efficient to sort by hand rather than using, say, std::sort. 1131 assert(I->getNumOperands() == 2 && "Unsupported commutative instruction!"); 1132 if (shouldSwapOperands(E->getOperand(0), E->getOperand(1))) 1133 E->swapOperands(0, 1); 1134 } 1135 // Perform simplification. 1136 if (auto *CI = dyn_cast<CmpInst>(I)) { 1137 // Sort the operand value numbers so x<y and y>x get the same value 1138 // number. 1139 CmpInst::Predicate Predicate = CI->getPredicate(); 1140 if (shouldSwapOperands(E->getOperand(0), E->getOperand(1))) { 1141 E->swapOperands(0, 1); 1142 Predicate = CmpInst::getSwappedPredicate(Predicate); 1143 } 1144 E->setOpcode((CI->getOpcode() << 8) | Predicate); 1145 // TODO: 25% of our time is spent in SimplifyCmpInst with pointer operands 1146 assert(I->getOperand(0)->getType() == I->getOperand(1)->getType() && 1147 "Wrong types on cmp instruction"); 1148 assert((E->getOperand(0)->getType() == I->getOperand(0)->getType() && 1149 E->getOperand(1)->getType() == I->getOperand(1)->getType())); 1150 Value *V = 1151 SimplifyCmpInst(Predicate, E->getOperand(0), E->getOperand(1), SQ); 1152 if (const Expression *SimplifiedE = checkSimplificationResults(E, I, V)) 1153 return SimplifiedE; 1154 } else if (isa<SelectInst>(I)) { 1155 if (isa<Constant>(E->getOperand(0)) || 1156 E->getOperand(1) == E->getOperand(2)) { 1157 assert(E->getOperand(1)->getType() == I->getOperand(1)->getType() && 1158 E->getOperand(2)->getType() == I->getOperand(2)->getType()); 1159 Value *V = SimplifySelectInst(E->getOperand(0), E->getOperand(1), 1160 E->getOperand(2), SQ); 1161 if (const Expression *SimplifiedE = checkSimplificationResults(E, I, V)) 1162 return SimplifiedE; 1163 } 1164 } else if (I->isBinaryOp()) { 1165 Value *V = 1166 SimplifyBinOp(E->getOpcode(), E->getOperand(0), E->getOperand(1), SQ); 1167 if (const Expression *SimplifiedE = checkSimplificationResults(E, I, V)) 1168 return SimplifiedE; 1169 } else if (auto *CI = dyn_cast<CastInst>(I)) { 1170 Value *V = 1171 SimplifyCastInst(CI->getOpcode(), E->getOperand(0), CI->getType(), SQ); 1172 if (const Expression *SimplifiedE = checkSimplificationResults(E, I, V)) 1173 return SimplifiedE; 1174 } else if (isa<GetElementPtrInst>(I)) { 1175 Value *V = SimplifyGEPInst( 1176 E->getType(), ArrayRef<Value *>(E->op_begin(), E->op_end()), SQ); 1177 if (const Expression *SimplifiedE = checkSimplificationResults(E, I, V)) 1178 return SimplifiedE; 1179 } else if (AllConstant) { 1180 // We don't bother trying to simplify unless all of the operands 1181 // were constant. 1182 // TODO: There are a lot of Simplify*'s we could call here, if we 1183 // wanted to. The original motivating case for this code was a 1184 // zext i1 false to i8, which we don't have an interface to 1185 // simplify (IE there is no SimplifyZExt). 1186 1187 SmallVector<Constant *, 8> C; 1188 for (Value *Arg : E->operands()) 1189 C.emplace_back(cast<Constant>(Arg)); 1190 1191 if (Value *V = ConstantFoldInstOperands(I, C, DL, TLI)) 1192 if (const Expression *SimplifiedE = checkSimplificationResults(E, I, V)) 1193 return SimplifiedE; 1194 } 1195 return E; 1196 } 1197 1198 const AggregateValueExpression * 1199 NewGVN::createAggregateValueExpression(Instruction *I) const { 1200 if (auto *II = dyn_cast<InsertValueInst>(I)) { 1201 auto *E = new (ExpressionAllocator) 1202 AggregateValueExpression(I->getNumOperands(), II->getNumIndices()); 1203 setBasicExpressionInfo(I, E); 1204 E->allocateIntOperands(ExpressionAllocator); 1205 std::copy(II->idx_begin(), II->idx_end(), int_op_inserter(E)); 1206 return E; 1207 } else if (auto *EI = dyn_cast<ExtractValueInst>(I)) { 1208 auto *E = new (ExpressionAllocator) 1209 AggregateValueExpression(I->getNumOperands(), EI->getNumIndices()); 1210 setBasicExpressionInfo(EI, E); 1211 E->allocateIntOperands(ExpressionAllocator); 1212 std::copy(EI->idx_begin(), EI->idx_end(), int_op_inserter(E)); 1213 return E; 1214 } 1215 llvm_unreachable("Unhandled type of aggregate value operation"); 1216 } 1217 1218 const DeadExpression *NewGVN::createDeadExpression() const { 1219 // DeadExpression has no arguments and all DeadExpression's are the same, 1220 // so we only need one of them. 1221 return SingletonDeadExpression; 1222 } 1223 1224 const VariableExpression *NewGVN::createVariableExpression(Value *V) const { 1225 auto *E = new (ExpressionAllocator) VariableExpression(V); 1226 E->setOpcode(V->getValueID()); 1227 return E; 1228 } 1229 1230 const Expression *NewGVN::createVariableOrConstant(Value *V) const { 1231 if (auto *C = dyn_cast<Constant>(V)) 1232 return createConstantExpression(C); 1233 return createVariableExpression(V); 1234 } 1235 1236 const ConstantExpression *NewGVN::createConstantExpression(Constant *C) const { 1237 auto *E = new (ExpressionAllocator) ConstantExpression(C); 1238 E->setOpcode(C->getValueID()); 1239 return E; 1240 } 1241 1242 const UnknownExpression *NewGVN::createUnknownExpression(Instruction *I) const { 1243 auto *E = new (ExpressionAllocator) UnknownExpression(I); 1244 E->setOpcode(I->getOpcode()); 1245 return E; 1246 } 1247 1248 const CallExpression * 1249 NewGVN::createCallExpression(CallInst *CI, const MemoryAccess *MA) const { 1250 // FIXME: Add operand bundles for calls. 1251 // FIXME: Allow commutative matching for intrinsics. 1252 auto *E = 1253 new (ExpressionAllocator) CallExpression(CI->getNumOperands(), CI, MA); 1254 setBasicExpressionInfo(CI, E); 1255 return E; 1256 } 1257 1258 // Return true if some equivalent of instruction Inst dominates instruction U. 1259 bool NewGVN::someEquivalentDominates(const Instruction *Inst, 1260 const Instruction *U) const { 1261 auto *CC = ValueToClass.lookup(Inst); 1262 // This must be an instruction because we are only called from phi nodes 1263 // in the case that the value it needs to check against is an instruction. 1264 1265 // The most likely candidates for dominance are the leader and the next leader. 1266 // The leader or nextleader will dominate in all cases where there is an 1267 // equivalent that is higher up in the dom tree. 1268 // We can't *only* check them, however, because the 1269 // dominator tree could have an infinite number of non-dominating siblings 1270 // with instructions that are in the right congruence class. 1271 // A 1272 // B C D E F G 1273 // | 1274 // H 1275 // Instruction U could be in H, with equivalents in every other sibling. 1276 // Depending on the rpo order picked, the leader could be the equivalent in 1277 // any of these siblings. 1278 if (!CC) 1279 return false; 1280 if (alwaysAvailable(CC->getLeader())) 1281 return true; 1282 if (DT->dominates(cast<Instruction>(CC->getLeader()), U)) 1283 return true; 1284 if (CC->getNextLeader().first && 1285 DT->dominates(cast<Instruction>(CC->getNextLeader().first), U)) 1286 return true; 1287 return llvm::any_of(*CC, [&](const Value *Member) { 1288 return Member != CC->getLeader() && 1289 DT->dominates(cast<Instruction>(Member), U); 1290 }); 1291 } 1292 1293 // See if we have a congruence class and leader for this operand, and if so, 1294 // return it. Otherwise, return the operand itself. 1295 Value *NewGVN::lookupOperandLeader(Value *V) const { 1296 CongruenceClass *CC = ValueToClass.lookup(V); 1297 if (CC) { 1298 // Everything in TOP is represented by undef, as it can be any value. 1299 // We do have to make sure we get the type right though, so we can't set the 1300 // RepLeader to undef. 1301 if (CC == TOPClass) 1302 return UndefValue::get(V->getType()); 1303 return CC->getStoredValue() ? CC->getStoredValue() : CC->getLeader(); 1304 } 1305 1306 return V; 1307 } 1308 1309 const MemoryAccess *NewGVN::lookupMemoryLeader(const MemoryAccess *MA) const { 1310 auto *CC = getMemoryClass(MA); 1311 assert(CC->getMemoryLeader() && 1312 "Every MemoryAccess should be mapped to a congruence class with a " 1313 "representative memory access"); 1314 return CC->getMemoryLeader(); 1315 } 1316 1317 // Return true if the MemoryAccess is really equivalent to everything. This is 1318 // equivalent to the lattice value "TOP" in most lattices. This is the initial 1319 // state of all MemoryAccesses. 1320 bool NewGVN::isMemoryAccessTOP(const MemoryAccess *MA) const { 1321 return getMemoryClass(MA) == TOPClass; 1322 } 1323 1324 LoadExpression *NewGVN::createLoadExpression(Type *LoadType, Value *PointerOp, 1325 LoadInst *LI, 1326 const MemoryAccess *MA) const { 1327 auto *E = 1328 new (ExpressionAllocator) LoadExpression(1, LI, lookupMemoryLeader(MA)); 1329 E->allocateOperands(ArgRecycler, ExpressionAllocator); 1330 E->setType(LoadType); 1331 1332 // Give store and loads same opcode so they value number together. 1333 E->setOpcode(0); 1334 E->op_push_back(PointerOp); 1335 1336 // TODO: Value number heap versions. We may be able to discover 1337 // things alias analysis can't on it's own (IE that a store and a 1338 // load have the same value, and thus, it isn't clobbering the load). 1339 return E; 1340 } 1341 1342 const StoreExpression * 1343 NewGVN::createStoreExpression(StoreInst *SI, const MemoryAccess *MA) const { 1344 auto *StoredValueLeader = lookupOperandLeader(SI->getValueOperand()); 1345 auto *E = new (ExpressionAllocator) 1346 StoreExpression(SI->getNumOperands(), SI, StoredValueLeader, MA); 1347 E->allocateOperands(ArgRecycler, ExpressionAllocator); 1348 E->setType(SI->getValueOperand()->getType()); 1349 1350 // Give store and loads same opcode so they value number together. 1351 E->setOpcode(0); 1352 E->op_push_back(lookupOperandLeader(SI->getPointerOperand())); 1353 1354 // TODO: Value number heap versions. We may be able to discover 1355 // things alias analysis can't on it's own (IE that a store and a 1356 // load have the same value, and thus, it isn't clobbering the load). 1357 return E; 1358 } 1359 1360 const Expression *NewGVN::performSymbolicStoreEvaluation(Instruction *I) const { 1361 // Unlike loads, we never try to eliminate stores, so we do not check if they 1362 // are simple and avoid value numbering them. 1363 auto *SI = cast<StoreInst>(I); 1364 auto *StoreAccess = getMemoryAccess(SI); 1365 // Get the expression, if any, for the RHS of the MemoryDef. 1366 const MemoryAccess *StoreRHS = StoreAccess->getDefiningAccess(); 1367 if (EnableStoreRefinement) 1368 StoreRHS = MSSAWalker->getClobberingMemoryAccess(StoreAccess); 1369 // If we bypassed the use-def chains, make sure we add a use. 1370 StoreRHS = lookupMemoryLeader(StoreRHS); 1371 if (StoreRHS != StoreAccess->getDefiningAccess()) 1372 addMemoryUsers(StoreRHS, StoreAccess); 1373 // If we are defined by ourselves, use the live on entry def. 1374 if (StoreRHS == StoreAccess) 1375 StoreRHS = MSSA->getLiveOnEntryDef(); 1376 1377 if (SI->isSimple()) { 1378 // See if we are defined by a previous store expression, it already has a 1379 // value, and it's the same value as our current store. FIXME: Right now, we 1380 // only do this for simple stores, we should expand to cover memcpys, etc. 1381 const auto *LastStore = createStoreExpression(SI, StoreRHS); 1382 const auto *LastCC = ExpressionToClass.lookup(LastStore); 1383 // We really want to check whether the expression we matched was a store. No 1384 // easy way to do that. However, we can check that the class we found has a 1385 // store, which, assuming the value numbering state is not corrupt, is 1386 // sufficient, because we must also be equivalent to that store's expression 1387 // for it to be in the same class as the load. 1388 if (LastCC && LastCC->getStoredValue() == LastStore->getStoredValue()) 1389 return LastStore; 1390 // Also check if our value operand is defined by a load of the same memory 1391 // location, and the memory state is the same as it was then (otherwise, it 1392 // could have been overwritten later. See test32 in 1393 // transforms/DeadStoreElimination/simple.ll). 1394 if (auto *LI = dyn_cast<LoadInst>(LastStore->getStoredValue())) 1395 if ((lookupOperandLeader(LI->getPointerOperand()) == 1396 LastStore->getOperand(0)) && 1397 (lookupMemoryLeader(getMemoryAccess(LI)->getDefiningAccess()) == 1398 StoreRHS)) 1399 return LastStore; 1400 deleteExpression(LastStore); 1401 } 1402 1403 // If the store is not equivalent to anything, value number it as a store that 1404 // produces a unique memory state (instead of using it's MemoryUse, we use 1405 // it's MemoryDef). 1406 return createStoreExpression(SI, StoreAccess); 1407 } 1408 1409 // See if we can extract the value of a loaded pointer from a load, a store, or 1410 // a memory instruction. 1411 const Expression * 1412 NewGVN::performSymbolicLoadCoercion(Type *LoadType, Value *LoadPtr, 1413 LoadInst *LI, Instruction *DepInst, 1414 MemoryAccess *DefiningAccess) const { 1415 assert((!LI || LI->isSimple()) && "Not a simple load"); 1416 if (auto *DepSI = dyn_cast<StoreInst>(DepInst)) { 1417 // Can't forward from non-atomic to atomic without violating memory model. 1418 // Also don't need to coerce if they are the same type, we will just 1419 // propagate. 1420 if (LI->isAtomic() > DepSI->isAtomic() || 1421 LoadType == DepSI->getValueOperand()->getType()) 1422 return nullptr; 1423 int Offset = analyzeLoadFromClobberingStore(LoadType, LoadPtr, DepSI, DL); 1424 if (Offset >= 0) { 1425 if (auto *C = dyn_cast<Constant>( 1426 lookupOperandLeader(DepSI->getValueOperand()))) { 1427 LLVM_DEBUG(dbgs() << "Coercing load from store " << *DepSI 1428 << " to constant " << *C << "\n"); 1429 return createConstantExpression( 1430 getConstantStoreValueForLoad(C, Offset, LoadType, DL)); 1431 } 1432 } 1433 } else if (auto *DepLI = dyn_cast<LoadInst>(DepInst)) { 1434 // Can't forward from non-atomic to atomic without violating memory model. 1435 if (LI->isAtomic() > DepLI->isAtomic()) 1436 return nullptr; 1437 int Offset = analyzeLoadFromClobberingLoad(LoadType, LoadPtr, DepLI, DL); 1438 if (Offset >= 0) { 1439 // We can coerce a constant load into a load. 1440 if (auto *C = dyn_cast<Constant>(lookupOperandLeader(DepLI))) 1441 if (auto *PossibleConstant = 1442 getConstantLoadValueForLoad(C, Offset, LoadType, DL)) { 1443 LLVM_DEBUG(dbgs() << "Coercing load from load " << *LI 1444 << " to constant " << *PossibleConstant << "\n"); 1445 return createConstantExpression(PossibleConstant); 1446 } 1447 } 1448 } else if (auto *DepMI = dyn_cast<MemIntrinsic>(DepInst)) { 1449 int Offset = analyzeLoadFromClobberingMemInst(LoadType, LoadPtr, DepMI, DL); 1450 if (Offset >= 0) { 1451 if (auto *PossibleConstant = 1452 getConstantMemInstValueForLoad(DepMI, Offset, LoadType, DL)) { 1453 LLVM_DEBUG(dbgs() << "Coercing load from meminst " << *DepMI 1454 << " to constant " << *PossibleConstant << "\n"); 1455 return createConstantExpression(PossibleConstant); 1456 } 1457 } 1458 } 1459 1460 // All of the below are only true if the loaded pointer is produced 1461 // by the dependent instruction. 1462 if (LoadPtr != lookupOperandLeader(DepInst) && 1463 !AA->isMustAlias(LoadPtr, DepInst)) 1464 return nullptr; 1465 // If this load really doesn't depend on anything, then we must be loading an 1466 // undef value. This can happen when loading for a fresh allocation with no 1467 // intervening stores, for example. Note that this is only true in the case 1468 // that the result of the allocation is pointer equal to the load ptr. 1469 if (isa<AllocaInst>(DepInst) || isMallocLikeFn(DepInst, TLI) || 1470 isAlignedAllocLikeFn(DepInst, TLI)) { 1471 return createConstantExpression(UndefValue::get(LoadType)); 1472 } 1473 // If this load occurs either right after a lifetime begin, 1474 // then the loaded value is undefined. 1475 else if (auto *II = dyn_cast<IntrinsicInst>(DepInst)) { 1476 if (II->getIntrinsicID() == Intrinsic::lifetime_start) 1477 return createConstantExpression(UndefValue::get(LoadType)); 1478 } 1479 // If this load follows a calloc (which zero initializes memory), 1480 // then the loaded value is zero 1481 else if (isCallocLikeFn(DepInst, TLI)) { 1482 return createConstantExpression(Constant::getNullValue(LoadType)); 1483 } 1484 1485 return nullptr; 1486 } 1487 1488 const Expression *NewGVN::performSymbolicLoadEvaluation(Instruction *I) const { 1489 auto *LI = cast<LoadInst>(I); 1490 1491 // We can eliminate in favor of non-simple loads, but we won't be able to 1492 // eliminate the loads themselves. 1493 if (!LI->isSimple()) 1494 return nullptr; 1495 1496 Value *LoadAddressLeader = lookupOperandLeader(LI->getPointerOperand()); 1497 // Load of undef is undef. 1498 if (isa<UndefValue>(LoadAddressLeader)) 1499 return createConstantExpression(UndefValue::get(LI->getType())); 1500 MemoryAccess *OriginalAccess = getMemoryAccess(I); 1501 MemoryAccess *DefiningAccess = 1502 MSSAWalker->getClobberingMemoryAccess(OriginalAccess); 1503 1504 if (!MSSA->isLiveOnEntryDef(DefiningAccess)) { 1505 if (auto *MD = dyn_cast<MemoryDef>(DefiningAccess)) { 1506 Instruction *DefiningInst = MD->getMemoryInst(); 1507 // If the defining instruction is not reachable, replace with undef. 1508 if (!ReachableBlocks.count(DefiningInst->getParent())) 1509 return createConstantExpression(UndefValue::get(LI->getType())); 1510 // This will handle stores and memory insts. We only do if it the 1511 // defining access has a different type, or it is a pointer produced by 1512 // certain memory operations that cause the memory to have a fixed value 1513 // (IE things like calloc). 1514 if (const auto *CoercionResult = 1515 performSymbolicLoadCoercion(LI->getType(), LoadAddressLeader, LI, 1516 DefiningInst, DefiningAccess)) 1517 return CoercionResult; 1518 } 1519 } 1520 1521 const auto *LE = createLoadExpression(LI->getType(), LoadAddressLeader, LI, 1522 DefiningAccess); 1523 // If our MemoryLeader is not our defining access, add a use to the 1524 // MemoryLeader, so that we get reprocessed when it changes. 1525 if (LE->getMemoryLeader() != DefiningAccess) 1526 addMemoryUsers(LE->getMemoryLeader(), OriginalAccess); 1527 return LE; 1528 } 1529 1530 const Expression * 1531 NewGVN::performSymbolicPredicateInfoEvaluation(Instruction *I) const { 1532 auto *PI = PredInfo->getPredicateInfoFor(I); 1533 if (!PI) 1534 return nullptr; 1535 1536 LLVM_DEBUG(dbgs() << "Found predicate info from instruction !\n"); 1537 1538 const Optional<PredicateConstraint> &Constraint = PI->getConstraint(); 1539 if (!Constraint) 1540 return nullptr; 1541 1542 CmpInst::Predicate Predicate = Constraint->Predicate; 1543 Value *CmpOp0 = I->getOperand(0); 1544 Value *CmpOp1 = Constraint->OtherOp; 1545 1546 Value *FirstOp = lookupOperandLeader(CmpOp0); 1547 Value *SecondOp = lookupOperandLeader(CmpOp1); 1548 Value *AdditionallyUsedValue = CmpOp0; 1549 1550 // Sort the ops. 1551 if (shouldSwapOperands(FirstOp, SecondOp)) { 1552 std::swap(FirstOp, SecondOp); 1553 Predicate = CmpInst::getSwappedPredicate(Predicate); 1554 AdditionallyUsedValue = CmpOp1; 1555 } 1556 1557 if (Predicate == CmpInst::ICMP_EQ) { 1558 addPredicateUsers(PI, I); 1559 addAdditionalUsers(AdditionallyUsedValue, I); 1560 return createVariableOrConstant(FirstOp); 1561 } 1562 1563 // Handle the special case of floating point. 1564 if (Predicate == CmpInst::FCMP_OEQ && isa<ConstantFP>(FirstOp) && 1565 !cast<ConstantFP>(FirstOp)->isZero()) { 1566 addPredicateUsers(PI, I); 1567 addAdditionalUsers(AdditionallyUsedValue, I); 1568 return createConstantExpression(cast<Constant>(FirstOp)); 1569 } 1570 1571 return nullptr; 1572 } 1573 1574 // Evaluate read only and pure calls, and create an expression result. 1575 const Expression *NewGVN::performSymbolicCallEvaluation(Instruction *I) const { 1576 auto *CI = cast<CallInst>(I); 1577 if (auto *II = dyn_cast<IntrinsicInst>(I)) { 1578 // Intrinsics with the returned attribute are copies of arguments. 1579 if (auto *ReturnedValue = II->getReturnedArgOperand()) { 1580 if (II->getIntrinsicID() == Intrinsic::ssa_copy) 1581 if (const auto *Result = performSymbolicPredicateInfoEvaluation(I)) 1582 return Result; 1583 return createVariableOrConstant(ReturnedValue); 1584 } 1585 } 1586 if (AA->doesNotAccessMemory(CI)) { 1587 return createCallExpression(CI, TOPClass->getMemoryLeader()); 1588 } else if (AA->onlyReadsMemory(CI)) { 1589 if (auto *MA = MSSA->getMemoryAccess(CI)) { 1590 auto *DefiningAccess = MSSAWalker->getClobberingMemoryAccess(MA); 1591 return createCallExpression(CI, DefiningAccess); 1592 } else // MSSA determined that CI does not access memory. 1593 return createCallExpression(CI, TOPClass->getMemoryLeader()); 1594 } 1595 return nullptr; 1596 } 1597 1598 // Retrieve the memory class for a given MemoryAccess. 1599 CongruenceClass *NewGVN::getMemoryClass(const MemoryAccess *MA) const { 1600 auto *Result = MemoryAccessToClass.lookup(MA); 1601 assert(Result && "Should have found memory class"); 1602 return Result; 1603 } 1604 1605 // Update the MemoryAccess equivalence table to say that From is equal to To, 1606 // and return true if this is different from what already existed in the table. 1607 bool NewGVN::setMemoryClass(const MemoryAccess *From, 1608 CongruenceClass *NewClass) { 1609 assert(NewClass && 1610 "Every MemoryAccess should be getting mapped to a non-null class"); 1611 LLVM_DEBUG(dbgs() << "Setting " << *From); 1612 LLVM_DEBUG(dbgs() << " equivalent to congruence class "); 1613 LLVM_DEBUG(dbgs() << NewClass->getID() 1614 << " with current MemoryAccess leader "); 1615 LLVM_DEBUG(dbgs() << *NewClass->getMemoryLeader() << "\n"); 1616 1617 auto LookupResult = MemoryAccessToClass.find(From); 1618 bool Changed = false; 1619 // If it's already in the table, see if the value changed. 1620 if (LookupResult != MemoryAccessToClass.end()) { 1621 auto *OldClass = LookupResult->second; 1622 if (OldClass != NewClass) { 1623 // If this is a phi, we have to handle memory member updates. 1624 if (auto *MP = dyn_cast<MemoryPhi>(From)) { 1625 OldClass->memory_erase(MP); 1626 NewClass->memory_insert(MP); 1627 // This may have killed the class if it had no non-memory members 1628 if (OldClass->getMemoryLeader() == From) { 1629 if (OldClass->definesNoMemory()) { 1630 OldClass->setMemoryLeader(nullptr); 1631 } else { 1632 OldClass->setMemoryLeader(getNextMemoryLeader(OldClass)); 1633 LLVM_DEBUG(dbgs() << "Memory class leader change for class " 1634 << OldClass->getID() << " to " 1635 << *OldClass->getMemoryLeader() 1636 << " due to removal of a memory member " << *From 1637 << "\n"); 1638 markMemoryLeaderChangeTouched(OldClass); 1639 } 1640 } 1641 } 1642 // It wasn't equivalent before, and now it is. 1643 LookupResult->second = NewClass; 1644 Changed = true; 1645 } 1646 } 1647 1648 return Changed; 1649 } 1650 1651 // Determine if a instruction is cycle-free. That means the values in the 1652 // instruction don't depend on any expressions that can change value as a result 1653 // of the instruction. For example, a non-cycle free instruction would be v = 1654 // phi(0, v+1). 1655 bool NewGVN::isCycleFree(const Instruction *I) const { 1656 // In order to compute cycle-freeness, we do SCC finding on the instruction, 1657 // and see what kind of SCC it ends up in. If it is a singleton, it is 1658 // cycle-free. If it is not in a singleton, it is only cycle free if the 1659 // other members are all phi nodes (as they do not compute anything, they are 1660 // copies). 1661 auto ICS = InstCycleState.lookup(I); 1662 if (ICS == ICS_Unknown) { 1663 SCCFinder.Start(I); 1664 auto &SCC = SCCFinder.getComponentFor(I); 1665 // It's cycle free if it's size 1 or the SCC is *only* phi nodes. 1666 if (SCC.size() == 1) 1667 InstCycleState.insert({I, ICS_CycleFree}); 1668 else { 1669 bool AllPhis = llvm::all_of(SCC, [](const Value *V) { 1670 return isa<PHINode>(V) || isCopyOfAPHI(V); 1671 }); 1672 ICS = AllPhis ? ICS_CycleFree : ICS_Cycle; 1673 for (auto *Member : SCC) 1674 if (auto *MemberPhi = dyn_cast<PHINode>(Member)) 1675 InstCycleState.insert({MemberPhi, ICS}); 1676 } 1677 } 1678 if (ICS == ICS_Cycle) 1679 return false; 1680 return true; 1681 } 1682 1683 // Evaluate PHI nodes symbolically and create an expression result. 1684 const Expression * 1685 NewGVN::performSymbolicPHIEvaluation(ArrayRef<ValPair> PHIOps, 1686 Instruction *I, 1687 BasicBlock *PHIBlock) const { 1688 // True if one of the incoming phi edges is a backedge. 1689 bool HasBackedge = false; 1690 // All constant tracks the state of whether all the *original* phi operands 1691 // This is really shorthand for "this phi cannot cycle due to forward 1692 // change in value of the phi is guaranteed not to later change the value of 1693 // the phi. IE it can't be v = phi(undef, v+1) 1694 bool OriginalOpsConstant = true; 1695 auto *E = cast<PHIExpression>(createPHIExpression( 1696 PHIOps, I, PHIBlock, HasBackedge, OriginalOpsConstant)); 1697 // We match the semantics of SimplifyPhiNode from InstructionSimplify here. 1698 // See if all arguments are the same. 1699 // We track if any were undef because they need special handling. 1700 bool HasUndef = false; 1701 auto Filtered = make_filter_range(E->operands(), [&](Value *Arg) { 1702 if (isa<UndefValue>(Arg)) { 1703 HasUndef = true; 1704 return false; 1705 } 1706 return true; 1707 }); 1708 // If we are left with no operands, it's dead. 1709 if (Filtered.empty()) { 1710 // If it has undef at this point, it means there are no-non-undef arguments, 1711 // and thus, the value of the phi node must be undef. 1712 if (HasUndef) { 1713 LLVM_DEBUG( 1714 dbgs() << "PHI Node " << *I 1715 << " has no non-undef arguments, valuing it as undef\n"); 1716 return createConstantExpression(UndefValue::get(I->getType())); 1717 } 1718 1719 LLVM_DEBUG(dbgs() << "No arguments of PHI node " << *I << " are live\n"); 1720 deleteExpression(E); 1721 return createDeadExpression(); 1722 } 1723 Value *AllSameValue = *(Filtered.begin()); 1724 ++Filtered.begin(); 1725 // Can't use std::equal here, sadly, because filter.begin moves. 1726 if (llvm::all_of(Filtered, [&](Value *Arg) { return Arg == AllSameValue; })) { 1727 // In LLVM's non-standard representation of phi nodes, it's possible to have 1728 // phi nodes with cycles (IE dependent on other phis that are .... dependent 1729 // on the original phi node), especially in weird CFG's where some arguments 1730 // are unreachable, or uninitialized along certain paths. This can cause 1731 // infinite loops during evaluation. We work around this by not trying to 1732 // really evaluate them independently, but instead using a variable 1733 // expression to say if one is equivalent to the other. 1734 // We also special case undef, so that if we have an undef, we can't use the 1735 // common value unless it dominates the phi block. 1736 if (HasUndef) { 1737 // If we have undef and at least one other value, this is really a 1738 // multivalued phi, and we need to know if it's cycle free in order to 1739 // evaluate whether we can ignore the undef. The other parts of this are 1740 // just shortcuts. If there is no backedge, or all operands are 1741 // constants, it also must be cycle free. 1742 if (HasBackedge && !OriginalOpsConstant && 1743 !isa<UndefValue>(AllSameValue) && !isCycleFree(I)) 1744 return E; 1745 1746 // Only have to check for instructions 1747 if (auto *AllSameInst = dyn_cast<Instruction>(AllSameValue)) 1748 if (!someEquivalentDominates(AllSameInst, I)) 1749 return E; 1750 } 1751 // Can't simplify to something that comes later in the iteration. 1752 // Otherwise, when and if it changes congruence class, we will never catch 1753 // up. We will always be a class behind it. 1754 if (isa<Instruction>(AllSameValue) && 1755 InstrToDFSNum(AllSameValue) > InstrToDFSNum(I)) 1756 return E; 1757 NumGVNPhisAllSame++; 1758 LLVM_DEBUG(dbgs() << "Simplified PHI node " << *I << " to " << *AllSameValue 1759 << "\n"); 1760 deleteExpression(E); 1761 return createVariableOrConstant(AllSameValue); 1762 } 1763 return E; 1764 } 1765 1766 const Expression * 1767 NewGVN::performSymbolicAggrValueEvaluation(Instruction *I) const { 1768 if (auto *EI = dyn_cast<ExtractValueInst>(I)) { 1769 auto *WO = dyn_cast<WithOverflowInst>(EI->getAggregateOperand()); 1770 if (WO && EI->getNumIndices() == 1 && *EI->idx_begin() == 0) 1771 // EI is an extract from one of our with.overflow intrinsics. Synthesize 1772 // a semantically equivalent expression instead of an extract value 1773 // expression. 1774 return createBinaryExpression(WO->getBinaryOp(), EI->getType(), 1775 WO->getLHS(), WO->getRHS(), I); 1776 } 1777 1778 return createAggregateValueExpression(I); 1779 } 1780 1781 const Expression *NewGVN::performSymbolicCmpEvaluation(Instruction *I) const { 1782 assert(isa<CmpInst>(I) && "Expected a cmp instruction."); 1783 1784 auto *CI = cast<CmpInst>(I); 1785 // See if our operands are equal to those of a previous predicate, and if so, 1786 // if it implies true or false. 1787 auto Op0 = lookupOperandLeader(CI->getOperand(0)); 1788 auto Op1 = lookupOperandLeader(CI->getOperand(1)); 1789 auto OurPredicate = CI->getPredicate(); 1790 if (shouldSwapOperands(Op0, Op1)) { 1791 std::swap(Op0, Op1); 1792 OurPredicate = CI->getSwappedPredicate(); 1793 } 1794 1795 // Avoid processing the same info twice. 1796 const PredicateBase *LastPredInfo = nullptr; 1797 // See if we know something about the comparison itself, like it is the target 1798 // of an assume. 1799 auto *CmpPI = PredInfo->getPredicateInfoFor(I); 1800 if (dyn_cast_or_null<PredicateAssume>(CmpPI)) 1801 return createConstantExpression(ConstantInt::getTrue(CI->getType())); 1802 1803 if (Op0 == Op1) { 1804 // This condition does not depend on predicates, no need to add users 1805 if (CI->isTrueWhenEqual()) 1806 return createConstantExpression(ConstantInt::getTrue(CI->getType())); 1807 else if (CI->isFalseWhenEqual()) 1808 return createConstantExpression(ConstantInt::getFalse(CI->getType())); 1809 } 1810 1811 // NOTE: Because we are comparing both operands here and below, and using 1812 // previous comparisons, we rely on fact that predicateinfo knows to mark 1813 // comparisons that use renamed operands as users of the earlier comparisons. 1814 // It is *not* enough to just mark predicateinfo renamed operands as users of 1815 // the earlier comparisons, because the *other* operand may have changed in a 1816 // previous iteration. 1817 // Example: 1818 // icmp slt %a, %b 1819 // %b.0 = ssa.copy(%b) 1820 // false branch: 1821 // icmp slt %c, %b.0 1822 1823 // %c and %a may start out equal, and thus, the code below will say the second 1824 // %icmp is false. c may become equal to something else, and in that case the 1825 // %second icmp *must* be reexamined, but would not if only the renamed 1826 // %operands are considered users of the icmp. 1827 1828 // *Currently* we only check one level of comparisons back, and only mark one 1829 // level back as touched when changes happen. If you modify this code to look 1830 // back farther through comparisons, you *must* mark the appropriate 1831 // comparisons as users in PredicateInfo.cpp, or you will cause bugs. See if 1832 // we know something just from the operands themselves 1833 1834 // See if our operands have predicate info, so that we may be able to derive 1835 // something from a previous comparison. 1836 for (const auto &Op : CI->operands()) { 1837 auto *PI = PredInfo->getPredicateInfoFor(Op); 1838 if (const auto *PBranch = dyn_cast_or_null<PredicateBranch>(PI)) { 1839 if (PI == LastPredInfo) 1840 continue; 1841 LastPredInfo = PI; 1842 // In phi of ops cases, we may have predicate info that we are evaluating 1843 // in a different context. 1844 if (!DT->dominates(PBranch->To, getBlockForValue(I))) 1845 continue; 1846 // TODO: Along the false edge, we may know more things too, like 1847 // icmp of 1848 // same operands is false. 1849 // TODO: We only handle actual comparison conditions below, not 1850 // and/or. 1851 auto *BranchCond = dyn_cast<CmpInst>(PBranch->Condition); 1852 if (!BranchCond) 1853 continue; 1854 auto *BranchOp0 = lookupOperandLeader(BranchCond->getOperand(0)); 1855 auto *BranchOp1 = lookupOperandLeader(BranchCond->getOperand(1)); 1856 auto BranchPredicate = BranchCond->getPredicate(); 1857 if (shouldSwapOperands(BranchOp0, BranchOp1)) { 1858 std::swap(BranchOp0, BranchOp1); 1859 BranchPredicate = BranchCond->getSwappedPredicate(); 1860 } 1861 if (BranchOp0 == Op0 && BranchOp1 == Op1) { 1862 if (PBranch->TrueEdge) { 1863 // If we know the previous predicate is true and we are in the true 1864 // edge then we may be implied true or false. 1865 if (CmpInst::isImpliedTrueByMatchingCmp(BranchPredicate, 1866 OurPredicate)) { 1867 addPredicateUsers(PI, I); 1868 return createConstantExpression( 1869 ConstantInt::getTrue(CI->getType())); 1870 } 1871 1872 if (CmpInst::isImpliedFalseByMatchingCmp(BranchPredicate, 1873 OurPredicate)) { 1874 addPredicateUsers(PI, I); 1875 return createConstantExpression( 1876 ConstantInt::getFalse(CI->getType())); 1877 } 1878 } else { 1879 // Just handle the ne and eq cases, where if we have the same 1880 // operands, we may know something. 1881 if (BranchPredicate == OurPredicate) { 1882 addPredicateUsers(PI, I); 1883 // Same predicate, same ops,we know it was false, so this is false. 1884 return createConstantExpression( 1885 ConstantInt::getFalse(CI->getType())); 1886 } else if (BranchPredicate == 1887 CmpInst::getInversePredicate(OurPredicate)) { 1888 addPredicateUsers(PI, I); 1889 // Inverse predicate, we know the other was false, so this is true. 1890 return createConstantExpression( 1891 ConstantInt::getTrue(CI->getType())); 1892 } 1893 } 1894 } 1895 } 1896 } 1897 // Create expression will take care of simplifyCmpInst 1898 return createExpression(I); 1899 } 1900 1901 // Substitute and symbolize the value before value numbering. 1902 const Expression * 1903 NewGVN::performSymbolicEvaluation(Value *V, 1904 SmallPtrSetImpl<Value *> &Visited) const { 1905 const Expression *E = nullptr; 1906 if (auto *C = dyn_cast<Constant>(V)) 1907 E = createConstantExpression(C); 1908 else if (isa<Argument>(V) || isa<GlobalVariable>(V)) { 1909 E = createVariableExpression(V); 1910 } else { 1911 // TODO: memory intrinsics. 1912 // TODO: Some day, we should do the forward propagation and reassociation 1913 // parts of the algorithm. 1914 auto *I = cast<Instruction>(V); 1915 switch (I->getOpcode()) { 1916 case Instruction::ExtractValue: 1917 case Instruction::InsertValue: 1918 E = performSymbolicAggrValueEvaluation(I); 1919 break; 1920 case Instruction::PHI: { 1921 SmallVector<ValPair, 3> Ops; 1922 auto *PN = cast<PHINode>(I); 1923 for (unsigned i = 0; i < PN->getNumOperands(); ++i) 1924 Ops.push_back({PN->getIncomingValue(i), PN->getIncomingBlock(i)}); 1925 // Sort to ensure the invariant createPHIExpression requires is met. 1926 sortPHIOps(Ops); 1927 E = performSymbolicPHIEvaluation(Ops, I, getBlockForValue(I)); 1928 } break; 1929 case Instruction::Call: 1930 E = performSymbolicCallEvaluation(I); 1931 break; 1932 case Instruction::Store: 1933 E = performSymbolicStoreEvaluation(I); 1934 break; 1935 case Instruction::Load: 1936 E = performSymbolicLoadEvaluation(I); 1937 break; 1938 case Instruction::BitCast: 1939 case Instruction::AddrSpaceCast: 1940 E = createExpression(I); 1941 break; 1942 case Instruction::ICmp: 1943 case Instruction::FCmp: 1944 E = performSymbolicCmpEvaluation(I); 1945 break; 1946 case Instruction::FNeg: 1947 case Instruction::Add: 1948 case Instruction::FAdd: 1949 case Instruction::Sub: 1950 case Instruction::FSub: 1951 case Instruction::Mul: 1952 case Instruction::FMul: 1953 case Instruction::UDiv: 1954 case Instruction::SDiv: 1955 case Instruction::FDiv: 1956 case Instruction::URem: 1957 case Instruction::SRem: 1958 case Instruction::FRem: 1959 case Instruction::Shl: 1960 case Instruction::LShr: 1961 case Instruction::AShr: 1962 case Instruction::And: 1963 case Instruction::Or: 1964 case Instruction::Xor: 1965 case Instruction::Trunc: 1966 case Instruction::ZExt: 1967 case Instruction::SExt: 1968 case Instruction::FPToUI: 1969 case Instruction::FPToSI: 1970 case Instruction::UIToFP: 1971 case Instruction::SIToFP: 1972 case Instruction::FPTrunc: 1973 case Instruction::FPExt: 1974 case Instruction::PtrToInt: 1975 case Instruction::IntToPtr: 1976 case Instruction::Select: 1977 case Instruction::ExtractElement: 1978 case Instruction::InsertElement: 1979 case Instruction::GetElementPtr: 1980 E = createExpression(I); 1981 break; 1982 case Instruction::ShuffleVector: 1983 // FIXME: Add support for shufflevector to createExpression. 1984 return nullptr; 1985 default: 1986 return nullptr; 1987 } 1988 } 1989 return E; 1990 } 1991 1992 // Look up a container of values/instructions in a map, and touch all the 1993 // instructions in the container. Then erase value from the map. 1994 template <typename Map, typename KeyType> 1995 void NewGVN::touchAndErase(Map &M, const KeyType &Key) { 1996 const auto Result = M.find_as(Key); 1997 if (Result != M.end()) { 1998 for (const typename Map::mapped_type::value_type Mapped : Result->second) 1999 TouchedInstructions.set(InstrToDFSNum(Mapped)); 2000 M.erase(Result); 2001 } 2002 } 2003 2004 void NewGVN::addAdditionalUsers(Value *To, Value *User) const { 2005 assert(User && To != User); 2006 if (isa<Instruction>(To)) 2007 AdditionalUsers[To].insert(User); 2008 } 2009 2010 void NewGVN::markUsersTouched(Value *V) { 2011 // Now mark the users as touched. 2012 for (auto *User : V->users()) { 2013 assert(isa<Instruction>(User) && "Use of value not within an instruction?"); 2014 TouchedInstructions.set(InstrToDFSNum(User)); 2015 } 2016 touchAndErase(AdditionalUsers, V); 2017 } 2018 2019 void NewGVN::addMemoryUsers(const MemoryAccess *To, MemoryAccess *U) const { 2020 LLVM_DEBUG(dbgs() << "Adding memory user " << *U << " to " << *To << "\n"); 2021 MemoryToUsers[To].insert(U); 2022 } 2023 2024 void NewGVN::markMemoryDefTouched(const MemoryAccess *MA) { 2025 TouchedInstructions.set(MemoryToDFSNum(MA)); 2026 } 2027 2028 void NewGVN::markMemoryUsersTouched(const MemoryAccess *MA) { 2029 if (isa<MemoryUse>(MA)) 2030 return; 2031 for (auto U : MA->users()) 2032 TouchedInstructions.set(MemoryToDFSNum(U)); 2033 touchAndErase(MemoryToUsers, MA); 2034 } 2035 2036 // Add I to the set of users of a given predicate. 2037 void NewGVN::addPredicateUsers(const PredicateBase *PB, Instruction *I) const { 2038 // Don't add temporary instructions to the user lists. 2039 if (AllTempInstructions.count(I)) 2040 return; 2041 2042 if (auto *PBranch = dyn_cast<PredicateBranch>(PB)) 2043 PredicateToUsers[PBranch->Condition].insert(I); 2044 else if (auto *PAssume = dyn_cast<PredicateAssume>(PB)) 2045 PredicateToUsers[PAssume->Condition].insert(I); 2046 } 2047 2048 // Touch all the predicates that depend on this instruction. 2049 void NewGVN::markPredicateUsersTouched(Instruction *I) { 2050 touchAndErase(PredicateToUsers, I); 2051 } 2052 2053 // Mark users affected by a memory leader change. 2054 void NewGVN::markMemoryLeaderChangeTouched(CongruenceClass *CC) { 2055 for (auto M : CC->memory()) 2056 markMemoryDefTouched(M); 2057 } 2058 2059 // Touch the instructions that need to be updated after a congruence class has a 2060 // leader change, and mark changed values. 2061 void NewGVN::markValueLeaderChangeTouched(CongruenceClass *CC) { 2062 for (auto M : *CC) { 2063 if (auto *I = dyn_cast<Instruction>(M)) 2064 TouchedInstructions.set(InstrToDFSNum(I)); 2065 LeaderChanges.insert(M); 2066 } 2067 } 2068 2069 // Give a range of things that have instruction DFS numbers, this will return 2070 // the member of the range with the smallest dfs number. 2071 template <class T, class Range> 2072 T *NewGVN::getMinDFSOfRange(const Range &R) const { 2073 std::pair<T *, unsigned> MinDFS = {nullptr, ~0U}; 2074 for (const auto X : R) { 2075 auto DFSNum = InstrToDFSNum(X); 2076 if (DFSNum < MinDFS.second) 2077 MinDFS = {X, DFSNum}; 2078 } 2079 return MinDFS.first; 2080 } 2081 2082 // This function returns the MemoryAccess that should be the next leader of 2083 // congruence class CC, under the assumption that the current leader is going to 2084 // disappear. 2085 const MemoryAccess *NewGVN::getNextMemoryLeader(CongruenceClass *CC) const { 2086 // TODO: If this ends up to slow, we can maintain a next memory leader like we 2087 // do for regular leaders. 2088 // Make sure there will be a leader to find. 2089 assert(!CC->definesNoMemory() && "Can't get next leader if there is none"); 2090 if (CC->getStoreCount() > 0) { 2091 if (auto *NL = dyn_cast_or_null<StoreInst>(CC->getNextLeader().first)) 2092 return getMemoryAccess(NL); 2093 // Find the store with the minimum DFS number. 2094 auto *V = getMinDFSOfRange<Value>(make_filter_range( 2095 *CC, [&](const Value *V) { return isa<StoreInst>(V); })); 2096 return getMemoryAccess(cast<StoreInst>(V)); 2097 } 2098 assert(CC->getStoreCount() == 0); 2099 2100 // Given our assertion, hitting this part must mean 2101 // !OldClass->memory_empty() 2102 if (CC->memory_size() == 1) 2103 return *CC->memory_begin(); 2104 return getMinDFSOfRange<const MemoryPhi>(CC->memory()); 2105 } 2106 2107 // This function returns the next value leader of a congruence class, under the 2108 // assumption that the current leader is going away. This should end up being 2109 // the next most dominating member. 2110 Value *NewGVN::getNextValueLeader(CongruenceClass *CC) const { 2111 // We don't need to sort members if there is only 1, and we don't care about 2112 // sorting the TOP class because everything either gets out of it or is 2113 // unreachable. 2114 2115 if (CC->size() == 1 || CC == TOPClass) { 2116 return *(CC->begin()); 2117 } else if (CC->getNextLeader().first) { 2118 ++NumGVNAvoidedSortedLeaderChanges; 2119 return CC->getNextLeader().first; 2120 } else { 2121 ++NumGVNSortedLeaderChanges; 2122 // NOTE: If this ends up to slow, we can maintain a dual structure for 2123 // member testing/insertion, or keep things mostly sorted, and sort only 2124 // here, or use SparseBitVector or .... 2125 return getMinDFSOfRange<Value>(*CC); 2126 } 2127 } 2128 2129 // Move a MemoryAccess, currently in OldClass, to NewClass, including updates to 2130 // the memory members, etc for the move. 2131 // 2132 // The invariants of this function are: 2133 // 2134 // - I must be moving to NewClass from OldClass 2135 // - The StoreCount of OldClass and NewClass is expected to have been updated 2136 // for I already if it is a store. 2137 // - The OldClass memory leader has not been updated yet if I was the leader. 2138 void NewGVN::moveMemoryToNewCongruenceClass(Instruction *I, 2139 MemoryAccess *InstMA, 2140 CongruenceClass *OldClass, 2141 CongruenceClass *NewClass) { 2142 // If the leader is I, and we had a representative MemoryAccess, it should 2143 // be the MemoryAccess of OldClass. 2144 assert((!InstMA || !OldClass->getMemoryLeader() || 2145 OldClass->getLeader() != I || 2146 MemoryAccessToClass.lookup(OldClass->getMemoryLeader()) == 2147 MemoryAccessToClass.lookup(InstMA)) && 2148 "Representative MemoryAccess mismatch"); 2149 // First, see what happens to the new class 2150 if (!NewClass->getMemoryLeader()) { 2151 // Should be a new class, or a store becoming a leader of a new class. 2152 assert(NewClass->size() == 1 || 2153 (isa<StoreInst>(I) && NewClass->getStoreCount() == 1)); 2154 NewClass->setMemoryLeader(InstMA); 2155 // Mark it touched if we didn't just create a singleton 2156 LLVM_DEBUG(dbgs() << "Memory class leader change for class " 2157 << NewClass->getID() 2158 << " due to new memory instruction becoming leader\n"); 2159 markMemoryLeaderChangeTouched(NewClass); 2160 } 2161 setMemoryClass(InstMA, NewClass); 2162 // Now, fixup the old class if necessary 2163 if (OldClass->getMemoryLeader() == InstMA) { 2164 if (!OldClass->definesNoMemory()) { 2165 OldClass->setMemoryLeader(getNextMemoryLeader(OldClass)); 2166 LLVM_DEBUG(dbgs() << "Memory class leader change for class " 2167 << OldClass->getID() << " to " 2168 << *OldClass->getMemoryLeader() 2169 << " due to removal of old leader " << *InstMA << "\n"); 2170 markMemoryLeaderChangeTouched(OldClass); 2171 } else 2172 OldClass->setMemoryLeader(nullptr); 2173 } 2174 } 2175 2176 // Move a value, currently in OldClass, to be part of NewClass 2177 // Update OldClass and NewClass for the move (including changing leaders, etc). 2178 void NewGVN::moveValueToNewCongruenceClass(Instruction *I, const Expression *E, 2179 CongruenceClass *OldClass, 2180 CongruenceClass *NewClass) { 2181 if (I == OldClass->getNextLeader().first) 2182 OldClass->resetNextLeader(); 2183 2184 OldClass->erase(I); 2185 NewClass->insert(I); 2186 2187 if (NewClass->getLeader() != I) 2188 NewClass->addPossibleNextLeader({I, InstrToDFSNum(I)}); 2189 // Handle our special casing of stores. 2190 if (auto *SI = dyn_cast<StoreInst>(I)) { 2191 OldClass->decStoreCount(); 2192 // Okay, so when do we want to make a store a leader of a class? 2193 // If we have a store defined by an earlier load, we want the earlier load 2194 // to lead the class. 2195 // If we have a store defined by something else, we want the store to lead 2196 // the class so everything else gets the "something else" as a value. 2197 // If we have a store as the single member of the class, we want the store 2198 // as the leader 2199 if (NewClass->getStoreCount() == 0 && !NewClass->getStoredValue()) { 2200 // If it's a store expression we are using, it means we are not equivalent 2201 // to something earlier. 2202 if (auto *SE = dyn_cast<StoreExpression>(E)) { 2203 NewClass->setStoredValue(SE->getStoredValue()); 2204 markValueLeaderChangeTouched(NewClass); 2205 // Shift the new class leader to be the store 2206 LLVM_DEBUG(dbgs() << "Changing leader of congruence class " 2207 << NewClass->getID() << " from " 2208 << *NewClass->getLeader() << " to " << *SI 2209 << " because store joined class\n"); 2210 // If we changed the leader, we have to mark it changed because we don't 2211 // know what it will do to symbolic evaluation. 2212 NewClass->setLeader(SI); 2213 } 2214 // We rely on the code below handling the MemoryAccess change. 2215 } 2216 NewClass->incStoreCount(); 2217 } 2218 // True if there is no memory instructions left in a class that had memory 2219 // instructions before. 2220 2221 // If it's not a memory use, set the MemoryAccess equivalence 2222 auto *InstMA = dyn_cast_or_null<MemoryDef>(getMemoryAccess(I)); 2223 if (InstMA) 2224 moveMemoryToNewCongruenceClass(I, InstMA, OldClass, NewClass); 2225 ValueToClass[I] = NewClass; 2226 // See if we destroyed the class or need to swap leaders. 2227 if (OldClass->empty() && OldClass != TOPClass) { 2228 if (OldClass->getDefiningExpr()) { 2229 LLVM_DEBUG(dbgs() << "Erasing expression " << *OldClass->getDefiningExpr() 2230 << " from table\n"); 2231 // We erase it as an exact expression to make sure we don't just erase an 2232 // equivalent one. 2233 auto Iter = ExpressionToClass.find_as( 2234 ExactEqualsExpression(*OldClass->getDefiningExpr())); 2235 if (Iter != ExpressionToClass.end()) 2236 ExpressionToClass.erase(Iter); 2237 #ifdef EXPENSIVE_CHECKS 2238 assert( 2239 (*OldClass->getDefiningExpr() != *E || ExpressionToClass.lookup(E)) && 2240 "We erased the expression we just inserted, which should not happen"); 2241 #endif 2242 } 2243 } else if (OldClass->getLeader() == I) { 2244 // When the leader changes, the value numbering of 2245 // everything may change due to symbolization changes, so we need to 2246 // reprocess. 2247 LLVM_DEBUG(dbgs() << "Value class leader change for class " 2248 << OldClass->getID() << "\n"); 2249 ++NumGVNLeaderChanges; 2250 // Destroy the stored value if there are no more stores to represent it. 2251 // Note that this is basically clean up for the expression removal that 2252 // happens below. If we remove stores from a class, we may leave it as a 2253 // class of equivalent memory phis. 2254 if (OldClass->getStoreCount() == 0) { 2255 if (OldClass->getStoredValue()) 2256 OldClass->setStoredValue(nullptr); 2257 } 2258 OldClass->setLeader(getNextValueLeader(OldClass)); 2259 OldClass->resetNextLeader(); 2260 markValueLeaderChangeTouched(OldClass); 2261 } 2262 } 2263 2264 // For a given expression, mark the phi of ops instructions that could have 2265 // changed as a result. 2266 void NewGVN::markPhiOfOpsChanged(const Expression *E) { 2267 touchAndErase(ExpressionToPhiOfOps, E); 2268 } 2269 2270 // Perform congruence finding on a given value numbering expression. 2271 void NewGVN::performCongruenceFinding(Instruction *I, const Expression *E) { 2272 // This is guaranteed to return something, since it will at least find 2273 // TOP. 2274 2275 CongruenceClass *IClass = ValueToClass.lookup(I); 2276 assert(IClass && "Should have found a IClass"); 2277 // Dead classes should have been eliminated from the mapping. 2278 assert(!IClass->isDead() && "Found a dead class"); 2279 2280 CongruenceClass *EClass = nullptr; 2281 if (const auto *VE = dyn_cast<VariableExpression>(E)) { 2282 EClass = ValueToClass.lookup(VE->getVariableValue()); 2283 } else if (isa<DeadExpression>(E)) { 2284 EClass = TOPClass; 2285 } 2286 if (!EClass) { 2287 auto lookupResult = ExpressionToClass.insert({E, nullptr}); 2288 2289 // If it's not in the value table, create a new congruence class. 2290 if (lookupResult.second) { 2291 CongruenceClass *NewClass = createCongruenceClass(nullptr, E); 2292 auto place = lookupResult.first; 2293 place->second = NewClass; 2294 2295 // Constants and variables should always be made the leader. 2296 if (const auto *CE = dyn_cast<ConstantExpression>(E)) { 2297 NewClass->setLeader(CE->getConstantValue()); 2298 } else if (const auto *SE = dyn_cast<StoreExpression>(E)) { 2299 StoreInst *SI = SE->getStoreInst(); 2300 NewClass->setLeader(SI); 2301 NewClass->setStoredValue(SE->getStoredValue()); 2302 // The RepMemoryAccess field will be filled in properly by the 2303 // moveValueToNewCongruenceClass call. 2304 } else { 2305 NewClass->setLeader(I); 2306 } 2307 assert(!isa<VariableExpression>(E) && 2308 "VariableExpression should have been handled already"); 2309 2310 EClass = NewClass; 2311 LLVM_DEBUG(dbgs() << "Created new congruence class for " << *I 2312 << " using expression " << *E << " at " 2313 << NewClass->getID() << " and leader " 2314 << *(NewClass->getLeader())); 2315 if (NewClass->getStoredValue()) 2316 LLVM_DEBUG(dbgs() << " and stored value " 2317 << *(NewClass->getStoredValue())); 2318 LLVM_DEBUG(dbgs() << "\n"); 2319 } else { 2320 EClass = lookupResult.first->second; 2321 if (isa<ConstantExpression>(E)) 2322 assert((isa<Constant>(EClass->getLeader()) || 2323 (EClass->getStoredValue() && 2324 isa<Constant>(EClass->getStoredValue()))) && 2325 "Any class with a constant expression should have a " 2326 "constant leader"); 2327 2328 assert(EClass && "Somehow don't have an eclass"); 2329 2330 assert(!EClass->isDead() && "We accidentally looked up a dead class"); 2331 } 2332 } 2333 bool ClassChanged = IClass != EClass; 2334 bool LeaderChanged = LeaderChanges.erase(I); 2335 if (ClassChanged || LeaderChanged) { 2336 LLVM_DEBUG(dbgs() << "New class " << EClass->getID() << " for expression " 2337 << *E << "\n"); 2338 if (ClassChanged) { 2339 moveValueToNewCongruenceClass(I, E, IClass, EClass); 2340 markPhiOfOpsChanged(E); 2341 } 2342 2343 markUsersTouched(I); 2344 if (MemoryAccess *MA = getMemoryAccess(I)) 2345 markMemoryUsersTouched(MA); 2346 if (auto *CI = dyn_cast<CmpInst>(I)) 2347 markPredicateUsersTouched(CI); 2348 } 2349 // If we changed the class of the store, we want to ensure nothing finds the 2350 // old store expression. In particular, loads do not compare against stored 2351 // value, so they will find old store expressions (and associated class 2352 // mappings) if we leave them in the table. 2353 if (ClassChanged && isa<StoreInst>(I)) { 2354 auto *OldE = ValueToExpression.lookup(I); 2355 // It could just be that the old class died. We don't want to erase it if we 2356 // just moved classes. 2357 if (OldE && isa<StoreExpression>(OldE) && *E != *OldE) { 2358 // Erase this as an exact expression to ensure we don't erase expressions 2359 // equivalent to it. 2360 auto Iter = ExpressionToClass.find_as(ExactEqualsExpression(*OldE)); 2361 if (Iter != ExpressionToClass.end()) 2362 ExpressionToClass.erase(Iter); 2363 } 2364 } 2365 ValueToExpression[I] = E; 2366 } 2367 2368 // Process the fact that Edge (from, to) is reachable, including marking 2369 // any newly reachable blocks and instructions for processing. 2370 void NewGVN::updateReachableEdge(BasicBlock *From, BasicBlock *To) { 2371 // Check if the Edge was reachable before. 2372 if (ReachableEdges.insert({From, To}).second) { 2373 // If this block wasn't reachable before, all instructions are touched. 2374 if (ReachableBlocks.insert(To).second) { 2375 LLVM_DEBUG(dbgs() << "Block " << getBlockName(To) 2376 << " marked reachable\n"); 2377 const auto &InstRange = BlockInstRange.lookup(To); 2378 TouchedInstructions.set(InstRange.first, InstRange.second); 2379 } else { 2380 LLVM_DEBUG(dbgs() << "Block " << getBlockName(To) 2381 << " was reachable, but new edge {" 2382 << getBlockName(From) << "," << getBlockName(To) 2383 << "} to it found\n"); 2384 2385 // We've made an edge reachable to an existing block, which may 2386 // impact predicates. Otherwise, only mark the phi nodes as touched, as 2387 // they are the only thing that depend on new edges. Anything using their 2388 // values will get propagated to if necessary. 2389 if (MemoryAccess *MemPhi = getMemoryAccess(To)) 2390 TouchedInstructions.set(InstrToDFSNum(MemPhi)); 2391 2392 // FIXME: We should just add a union op on a Bitvector and 2393 // SparseBitVector. We can do it word by word faster than we are doing it 2394 // here. 2395 for (auto InstNum : RevisitOnReachabilityChange[To]) 2396 TouchedInstructions.set(InstNum); 2397 } 2398 } 2399 } 2400 2401 // Given a predicate condition (from a switch, cmp, or whatever) and a block, 2402 // see if we know some constant value for it already. 2403 Value *NewGVN::findConditionEquivalence(Value *Cond) const { 2404 auto Result = lookupOperandLeader(Cond); 2405 return isa<Constant>(Result) ? Result : nullptr; 2406 } 2407 2408 // Process the outgoing edges of a block for reachability. 2409 void NewGVN::processOutgoingEdges(Instruction *TI, BasicBlock *B) { 2410 // Evaluate reachability of terminator instruction. 2411 Value *Cond; 2412 BasicBlock *TrueSucc, *FalseSucc; 2413 if (match(TI, m_Br(m_Value(Cond), TrueSucc, FalseSucc))) { 2414 Value *CondEvaluated = findConditionEquivalence(Cond); 2415 if (!CondEvaluated) { 2416 if (auto *I = dyn_cast<Instruction>(Cond)) { 2417 const Expression *E = createExpression(I); 2418 if (const auto *CE = dyn_cast<ConstantExpression>(E)) { 2419 CondEvaluated = CE->getConstantValue(); 2420 } 2421 } else if (isa<ConstantInt>(Cond)) { 2422 CondEvaluated = Cond; 2423 } 2424 } 2425 ConstantInt *CI; 2426 if (CondEvaluated && (CI = dyn_cast<ConstantInt>(CondEvaluated))) { 2427 if (CI->isOne()) { 2428 LLVM_DEBUG(dbgs() << "Condition for Terminator " << *TI 2429 << " evaluated to true\n"); 2430 updateReachableEdge(B, TrueSucc); 2431 } else if (CI->isZero()) { 2432 LLVM_DEBUG(dbgs() << "Condition for Terminator " << *TI 2433 << " evaluated to false\n"); 2434 updateReachableEdge(B, FalseSucc); 2435 } 2436 } else { 2437 updateReachableEdge(B, TrueSucc); 2438 updateReachableEdge(B, FalseSucc); 2439 } 2440 } else if (auto *SI = dyn_cast<SwitchInst>(TI)) { 2441 // For switches, propagate the case values into the case 2442 // destinations. 2443 2444 Value *SwitchCond = SI->getCondition(); 2445 Value *CondEvaluated = findConditionEquivalence(SwitchCond); 2446 // See if we were able to turn this switch statement into a constant. 2447 if (CondEvaluated && isa<ConstantInt>(CondEvaluated)) { 2448 auto *CondVal = cast<ConstantInt>(CondEvaluated); 2449 // We should be able to get case value for this. 2450 auto Case = *SI->findCaseValue(CondVal); 2451 if (Case.getCaseSuccessor() == SI->getDefaultDest()) { 2452 // We proved the value is outside of the range of the case. 2453 // We can't do anything other than mark the default dest as reachable, 2454 // and go home. 2455 updateReachableEdge(B, SI->getDefaultDest()); 2456 return; 2457 } 2458 // Now get where it goes and mark it reachable. 2459 BasicBlock *TargetBlock = Case.getCaseSuccessor(); 2460 updateReachableEdge(B, TargetBlock); 2461 } else { 2462 for (unsigned i = 0, e = SI->getNumSuccessors(); i != e; ++i) { 2463 BasicBlock *TargetBlock = SI->getSuccessor(i); 2464 updateReachableEdge(B, TargetBlock); 2465 } 2466 } 2467 } else { 2468 // Otherwise this is either unconditional, or a type we have no 2469 // idea about. Just mark successors as reachable. 2470 for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i) { 2471 BasicBlock *TargetBlock = TI->getSuccessor(i); 2472 updateReachableEdge(B, TargetBlock); 2473 } 2474 2475 // This also may be a memory defining terminator, in which case, set it 2476 // equivalent only to itself. 2477 // 2478 auto *MA = getMemoryAccess(TI); 2479 if (MA && !isa<MemoryUse>(MA)) { 2480 auto *CC = ensureLeaderOfMemoryClass(MA); 2481 if (setMemoryClass(MA, CC)) 2482 markMemoryUsersTouched(MA); 2483 } 2484 } 2485 } 2486 2487 // Remove the PHI of Ops PHI for I 2488 void NewGVN::removePhiOfOps(Instruction *I, PHINode *PHITemp) { 2489 InstrDFS.erase(PHITemp); 2490 // It's still a temp instruction. We keep it in the array so it gets erased. 2491 // However, it's no longer used by I, or in the block 2492 TempToBlock.erase(PHITemp); 2493 RealToTemp.erase(I); 2494 // We don't remove the users from the phi node uses. This wastes a little 2495 // time, but such is life. We could use two sets to track which were there 2496 // are the start of NewGVN, and which were added, but right nowt he cost of 2497 // tracking is more than the cost of checking for more phi of ops. 2498 } 2499 2500 // Add PHI Op in BB as a PHI of operations version of ExistingValue. 2501 void NewGVN::addPhiOfOps(PHINode *Op, BasicBlock *BB, 2502 Instruction *ExistingValue) { 2503 InstrDFS[Op] = InstrToDFSNum(ExistingValue); 2504 AllTempInstructions.insert(Op); 2505 TempToBlock[Op] = BB; 2506 RealToTemp[ExistingValue] = Op; 2507 // Add all users to phi node use, as they are now uses of the phi of ops phis 2508 // and may themselves be phi of ops. 2509 for (auto *U : ExistingValue->users()) 2510 if (auto *UI = dyn_cast<Instruction>(U)) 2511 PHINodeUses.insert(UI); 2512 } 2513 2514 static bool okayForPHIOfOps(const Instruction *I) { 2515 if (!EnablePhiOfOps) 2516 return false; 2517 return isa<BinaryOperator>(I) || isa<SelectInst>(I) || isa<CmpInst>(I) || 2518 isa<LoadInst>(I); 2519 } 2520 2521 bool NewGVN::OpIsSafeForPHIOfOpsHelper( 2522 Value *V, const BasicBlock *PHIBlock, 2523 SmallPtrSetImpl<const Value *> &Visited, 2524 SmallVectorImpl<Instruction *> &Worklist) { 2525 2526 if (!isa<Instruction>(V)) 2527 return true; 2528 auto OISIt = OpSafeForPHIOfOps.find(V); 2529 if (OISIt != OpSafeForPHIOfOps.end()) 2530 return OISIt->second; 2531 2532 // Keep walking until we either dominate the phi block, or hit a phi, or run 2533 // out of things to check. 2534 if (DT->properlyDominates(getBlockForValue(V), PHIBlock)) { 2535 OpSafeForPHIOfOps.insert({V, true}); 2536 return true; 2537 } 2538 // PHI in the same block. 2539 if (isa<PHINode>(V) && getBlockForValue(V) == PHIBlock) { 2540 OpSafeForPHIOfOps.insert({V, false}); 2541 return false; 2542 } 2543 2544 auto *OrigI = cast<Instruction>(V); 2545 for (auto *Op : OrigI->operand_values()) { 2546 if (!isa<Instruction>(Op)) 2547 continue; 2548 // Stop now if we find an unsafe operand. 2549 auto OISIt = OpSafeForPHIOfOps.find(OrigI); 2550 if (OISIt != OpSafeForPHIOfOps.end()) { 2551 if (!OISIt->second) { 2552 OpSafeForPHIOfOps.insert({V, false}); 2553 return false; 2554 } 2555 continue; 2556 } 2557 if (!Visited.insert(Op).second) 2558 continue; 2559 Worklist.push_back(cast<Instruction>(Op)); 2560 } 2561 return true; 2562 } 2563 2564 // Return true if this operand will be safe to use for phi of ops. 2565 // 2566 // The reason some operands are unsafe is that we are not trying to recursively 2567 // translate everything back through phi nodes. We actually expect some lookups 2568 // of expressions to fail. In particular, a lookup where the expression cannot 2569 // exist in the predecessor. This is true even if the expression, as shown, can 2570 // be determined to be constant. 2571 bool NewGVN::OpIsSafeForPHIOfOps(Value *V, const BasicBlock *PHIBlock, 2572 SmallPtrSetImpl<const Value *> &Visited) { 2573 SmallVector<Instruction *, 4> Worklist; 2574 if (!OpIsSafeForPHIOfOpsHelper(V, PHIBlock, Visited, Worklist)) 2575 return false; 2576 while (!Worklist.empty()) { 2577 auto *I = Worklist.pop_back_val(); 2578 if (!OpIsSafeForPHIOfOpsHelper(I, PHIBlock, Visited, Worklist)) 2579 return false; 2580 } 2581 OpSafeForPHIOfOps.insert({V, true}); 2582 return true; 2583 } 2584 2585 // Try to find a leader for instruction TransInst, which is a phi translated 2586 // version of something in our original program. Visited is used to ensure we 2587 // don't infinite loop during translations of cycles. OrigInst is the 2588 // instruction in the original program, and PredBB is the predecessor we 2589 // translated it through. 2590 Value *NewGVN::findLeaderForInst(Instruction *TransInst, 2591 SmallPtrSetImpl<Value *> &Visited, 2592 MemoryAccess *MemAccess, Instruction *OrigInst, 2593 BasicBlock *PredBB) { 2594 unsigned IDFSNum = InstrToDFSNum(OrigInst); 2595 // Make sure it's marked as a temporary instruction. 2596 AllTempInstructions.insert(TransInst); 2597 // and make sure anything that tries to add it's DFS number is 2598 // redirected to the instruction we are making a phi of ops 2599 // for. 2600 TempToBlock.insert({TransInst, PredBB}); 2601 InstrDFS.insert({TransInst, IDFSNum}); 2602 2603 const Expression *E = performSymbolicEvaluation(TransInst, Visited); 2604 InstrDFS.erase(TransInst); 2605 AllTempInstructions.erase(TransInst); 2606 TempToBlock.erase(TransInst); 2607 if (MemAccess) 2608 TempToMemory.erase(TransInst); 2609 if (!E) 2610 return nullptr; 2611 auto *FoundVal = findPHIOfOpsLeader(E, OrigInst, PredBB); 2612 if (!FoundVal) { 2613 ExpressionToPhiOfOps[E].insert(OrigInst); 2614 LLVM_DEBUG(dbgs() << "Cannot find phi of ops operand for " << *TransInst 2615 << " in block " << getBlockName(PredBB) << "\n"); 2616 return nullptr; 2617 } 2618 if (auto *SI = dyn_cast<StoreInst>(FoundVal)) 2619 FoundVal = SI->getValueOperand(); 2620 return FoundVal; 2621 } 2622 2623 // When we see an instruction that is an op of phis, generate the equivalent phi 2624 // of ops form. 2625 const Expression * 2626 NewGVN::makePossiblePHIOfOps(Instruction *I, 2627 SmallPtrSetImpl<Value *> &Visited) { 2628 if (!okayForPHIOfOps(I)) 2629 return nullptr; 2630 2631 if (!Visited.insert(I).second) 2632 return nullptr; 2633 // For now, we require the instruction be cycle free because we don't 2634 // *always* create a phi of ops for instructions that could be done as phi 2635 // of ops, we only do it if we think it is useful. If we did do it all the 2636 // time, we could remove the cycle free check. 2637 if (!isCycleFree(I)) 2638 return nullptr; 2639 2640 SmallPtrSet<const Value *, 8> ProcessedPHIs; 2641 // TODO: We don't do phi translation on memory accesses because it's 2642 // complicated. For a load, we'd need to be able to simulate a new memoryuse, 2643 // which we don't have a good way of doing ATM. 2644 auto *MemAccess = getMemoryAccess(I); 2645 // If the memory operation is defined by a memory operation this block that 2646 // isn't a MemoryPhi, transforming the pointer backwards through a scalar phi 2647 // can't help, as it would still be killed by that memory operation. 2648 if (MemAccess && !isa<MemoryPhi>(MemAccess->getDefiningAccess()) && 2649 MemAccess->getDefiningAccess()->getBlock() == I->getParent()) 2650 return nullptr; 2651 2652 // Convert op of phis to phi of ops 2653 SmallPtrSet<const Value *, 10> VisitedOps; 2654 SmallVector<Value *, 4> Ops(I->operand_values()); 2655 BasicBlock *SamePHIBlock = nullptr; 2656 PHINode *OpPHI = nullptr; 2657 if (!DebugCounter::shouldExecute(PHIOfOpsCounter)) 2658 return nullptr; 2659 for (auto *Op : Ops) { 2660 if (!isa<PHINode>(Op)) { 2661 auto *ValuePHI = RealToTemp.lookup(Op); 2662 if (!ValuePHI) 2663 continue; 2664 LLVM_DEBUG(dbgs() << "Found possible dependent phi of ops\n"); 2665 Op = ValuePHI; 2666 } 2667 OpPHI = cast<PHINode>(Op); 2668 if (!SamePHIBlock) { 2669 SamePHIBlock = getBlockForValue(OpPHI); 2670 } else if (SamePHIBlock != getBlockForValue(OpPHI)) { 2671 LLVM_DEBUG( 2672 dbgs() 2673 << "PHIs for operands are not all in the same block, aborting\n"); 2674 return nullptr; 2675 } 2676 // No point in doing this for one-operand phis. 2677 if (OpPHI->getNumOperands() == 1) { 2678 OpPHI = nullptr; 2679 continue; 2680 } 2681 } 2682 2683 if (!OpPHI) 2684 return nullptr; 2685 2686 SmallVector<ValPair, 4> PHIOps; 2687 SmallPtrSet<Value *, 4> Deps; 2688 auto *PHIBlock = getBlockForValue(OpPHI); 2689 RevisitOnReachabilityChange[PHIBlock].reset(InstrToDFSNum(I)); 2690 for (unsigned PredNum = 0; PredNum < OpPHI->getNumOperands(); ++PredNum) { 2691 auto *PredBB = OpPHI->getIncomingBlock(PredNum); 2692 Value *FoundVal = nullptr; 2693 SmallPtrSet<Value *, 4> CurrentDeps; 2694 // We could just skip unreachable edges entirely but it's tricky to do 2695 // with rewriting existing phi nodes. 2696 if (ReachableEdges.count({PredBB, PHIBlock})) { 2697 // Clone the instruction, create an expression from it that is 2698 // translated back into the predecessor, and see if we have a leader. 2699 Instruction *ValueOp = I->clone(); 2700 if (MemAccess) 2701 TempToMemory.insert({ValueOp, MemAccess}); 2702 bool SafeForPHIOfOps = true; 2703 VisitedOps.clear(); 2704 for (auto &Op : ValueOp->operands()) { 2705 auto *OrigOp = &*Op; 2706 // When these operand changes, it could change whether there is a 2707 // leader for us or not, so we have to add additional users. 2708 if (isa<PHINode>(Op)) { 2709 Op = Op->DoPHITranslation(PHIBlock, PredBB); 2710 if (Op != OrigOp && Op != I) 2711 CurrentDeps.insert(Op); 2712 } else if (auto *ValuePHI = RealToTemp.lookup(Op)) { 2713 if (getBlockForValue(ValuePHI) == PHIBlock) 2714 Op = ValuePHI->getIncomingValueForBlock(PredBB); 2715 } 2716 // If we phi-translated the op, it must be safe. 2717 SafeForPHIOfOps = 2718 SafeForPHIOfOps && 2719 (Op != OrigOp || OpIsSafeForPHIOfOps(Op, PHIBlock, VisitedOps)); 2720 } 2721 // FIXME: For those things that are not safe we could generate 2722 // expressions all the way down, and see if this comes out to a 2723 // constant. For anything where that is true, and unsafe, we should 2724 // have made a phi-of-ops (or value numbered it equivalent to something) 2725 // for the pieces already. 2726 FoundVal = !SafeForPHIOfOps ? nullptr 2727 : findLeaderForInst(ValueOp, Visited, 2728 MemAccess, I, PredBB); 2729 ValueOp->deleteValue(); 2730 if (!FoundVal) { 2731 // We failed to find a leader for the current ValueOp, but this might 2732 // change in case of the translated operands change. 2733 if (SafeForPHIOfOps) 2734 for (auto Dep : CurrentDeps) 2735 addAdditionalUsers(Dep, I); 2736 2737 return nullptr; 2738 } 2739 Deps.insert(CurrentDeps.begin(), CurrentDeps.end()); 2740 } else { 2741 LLVM_DEBUG(dbgs() << "Skipping phi of ops operand for incoming block " 2742 << getBlockName(PredBB) 2743 << " because the block is unreachable\n"); 2744 FoundVal = UndefValue::get(I->getType()); 2745 RevisitOnReachabilityChange[PHIBlock].set(InstrToDFSNum(I)); 2746 } 2747 2748 PHIOps.push_back({FoundVal, PredBB}); 2749 LLVM_DEBUG(dbgs() << "Found phi of ops operand " << *FoundVal << " in " 2750 << getBlockName(PredBB) << "\n"); 2751 } 2752 for (auto Dep : Deps) 2753 addAdditionalUsers(Dep, I); 2754 sortPHIOps(PHIOps); 2755 auto *E = performSymbolicPHIEvaluation(PHIOps, I, PHIBlock); 2756 if (isa<ConstantExpression>(E) || isa<VariableExpression>(E)) { 2757 LLVM_DEBUG( 2758 dbgs() 2759 << "Not creating real PHI of ops because it simplified to existing " 2760 "value or constant\n"); 2761 return E; 2762 } 2763 auto *ValuePHI = RealToTemp.lookup(I); 2764 bool NewPHI = false; 2765 if (!ValuePHI) { 2766 ValuePHI = 2767 PHINode::Create(I->getType(), OpPHI->getNumOperands(), "phiofops"); 2768 addPhiOfOps(ValuePHI, PHIBlock, I); 2769 NewPHI = true; 2770 NumGVNPHIOfOpsCreated++; 2771 } 2772 if (NewPHI) { 2773 for (auto PHIOp : PHIOps) 2774 ValuePHI->addIncoming(PHIOp.first, PHIOp.second); 2775 } else { 2776 TempToBlock[ValuePHI] = PHIBlock; 2777 unsigned int i = 0; 2778 for (auto PHIOp : PHIOps) { 2779 ValuePHI->setIncomingValue(i, PHIOp.first); 2780 ValuePHI->setIncomingBlock(i, PHIOp.second); 2781 ++i; 2782 } 2783 } 2784 RevisitOnReachabilityChange[PHIBlock].set(InstrToDFSNum(I)); 2785 LLVM_DEBUG(dbgs() << "Created phi of ops " << *ValuePHI << " for " << *I 2786 << "\n"); 2787 2788 return E; 2789 } 2790 2791 // The algorithm initially places the values of the routine in the TOP 2792 // congruence class. The leader of TOP is the undetermined value `undef`. 2793 // When the algorithm has finished, values still in TOP are unreachable. 2794 void NewGVN::initializeCongruenceClasses(Function &F) { 2795 NextCongruenceNum = 0; 2796 2797 // Note that even though we use the live on entry def as a representative 2798 // MemoryAccess, it is *not* the same as the actual live on entry def. We 2799 // have no real equivalemnt to undef for MemoryAccesses, and so we really 2800 // should be checking whether the MemoryAccess is top if we want to know if it 2801 // is equivalent to everything. Otherwise, what this really signifies is that 2802 // the access "it reaches all the way back to the beginning of the function" 2803 2804 // Initialize all other instructions to be in TOP class. 2805 TOPClass = createCongruenceClass(nullptr, nullptr); 2806 TOPClass->setMemoryLeader(MSSA->getLiveOnEntryDef()); 2807 // The live on entry def gets put into it's own class 2808 MemoryAccessToClass[MSSA->getLiveOnEntryDef()] = 2809 createMemoryClass(MSSA->getLiveOnEntryDef()); 2810 2811 for (auto DTN : nodes(DT)) { 2812 BasicBlock *BB = DTN->getBlock(); 2813 // All MemoryAccesses are equivalent to live on entry to start. They must 2814 // be initialized to something so that initial changes are noticed. For 2815 // the maximal answer, we initialize them all to be the same as 2816 // liveOnEntry. 2817 auto *MemoryBlockDefs = MSSA->getBlockDefs(BB); 2818 if (MemoryBlockDefs) 2819 for (const auto &Def : *MemoryBlockDefs) { 2820 MemoryAccessToClass[&Def] = TOPClass; 2821 auto *MD = dyn_cast<MemoryDef>(&Def); 2822 // Insert the memory phis into the member list. 2823 if (!MD) { 2824 const MemoryPhi *MP = cast<MemoryPhi>(&Def); 2825 TOPClass->memory_insert(MP); 2826 MemoryPhiState.insert({MP, MPS_TOP}); 2827 } 2828 2829 if (MD && isa<StoreInst>(MD->getMemoryInst())) 2830 TOPClass->incStoreCount(); 2831 } 2832 2833 // FIXME: This is trying to discover which instructions are uses of phi 2834 // nodes. We should move this into one of the myriad of places that walk 2835 // all the operands already. 2836 for (auto &I : *BB) { 2837 if (isa<PHINode>(&I)) 2838 for (auto *U : I.users()) 2839 if (auto *UInst = dyn_cast<Instruction>(U)) 2840 if (InstrToDFSNum(UInst) != 0 && okayForPHIOfOps(UInst)) 2841 PHINodeUses.insert(UInst); 2842 // Don't insert void terminators into the class. We don't value number 2843 // them, and they just end up sitting in TOP. 2844 if (I.isTerminator() && I.getType()->isVoidTy()) 2845 continue; 2846 TOPClass->insert(&I); 2847 ValueToClass[&I] = TOPClass; 2848 } 2849 } 2850 2851 // Initialize arguments to be in their own unique congruence classes 2852 for (auto &FA : F.args()) 2853 createSingletonCongruenceClass(&FA); 2854 } 2855 2856 void NewGVN::cleanupTables() { 2857 for (unsigned i = 0, e = CongruenceClasses.size(); i != e; ++i) { 2858 LLVM_DEBUG(dbgs() << "Congruence class " << CongruenceClasses[i]->getID() 2859 << " has " << CongruenceClasses[i]->size() 2860 << " members\n"); 2861 // Make sure we delete the congruence class (probably worth switching to 2862 // a unique_ptr at some point. 2863 delete CongruenceClasses[i]; 2864 CongruenceClasses[i] = nullptr; 2865 } 2866 2867 // Destroy the value expressions 2868 SmallVector<Instruction *, 8> TempInst(AllTempInstructions.begin(), 2869 AllTempInstructions.end()); 2870 AllTempInstructions.clear(); 2871 2872 // We have to drop all references for everything first, so there are no uses 2873 // left as we delete them. 2874 for (auto *I : TempInst) { 2875 I->dropAllReferences(); 2876 } 2877 2878 while (!TempInst.empty()) { 2879 auto *I = TempInst.pop_back_val(); 2880 I->deleteValue(); 2881 } 2882 2883 ValueToClass.clear(); 2884 ArgRecycler.clear(ExpressionAllocator); 2885 ExpressionAllocator.Reset(); 2886 CongruenceClasses.clear(); 2887 ExpressionToClass.clear(); 2888 ValueToExpression.clear(); 2889 RealToTemp.clear(); 2890 AdditionalUsers.clear(); 2891 ExpressionToPhiOfOps.clear(); 2892 TempToBlock.clear(); 2893 TempToMemory.clear(); 2894 PHINodeUses.clear(); 2895 OpSafeForPHIOfOps.clear(); 2896 ReachableBlocks.clear(); 2897 ReachableEdges.clear(); 2898 #ifndef NDEBUG 2899 ProcessedCount.clear(); 2900 #endif 2901 InstrDFS.clear(); 2902 InstructionsToErase.clear(); 2903 DFSToInstr.clear(); 2904 BlockInstRange.clear(); 2905 TouchedInstructions.clear(); 2906 MemoryAccessToClass.clear(); 2907 PredicateToUsers.clear(); 2908 MemoryToUsers.clear(); 2909 RevisitOnReachabilityChange.clear(); 2910 } 2911 2912 // Assign local DFS number mapping to instructions, and leave space for Value 2913 // PHI's. 2914 std::pair<unsigned, unsigned> NewGVN::assignDFSNumbers(BasicBlock *B, 2915 unsigned Start) { 2916 unsigned End = Start; 2917 if (MemoryAccess *MemPhi = getMemoryAccess(B)) { 2918 InstrDFS[MemPhi] = End++; 2919 DFSToInstr.emplace_back(MemPhi); 2920 } 2921 2922 // Then the real block goes next. 2923 for (auto &I : *B) { 2924 // There's no need to call isInstructionTriviallyDead more than once on 2925 // an instruction. Therefore, once we know that an instruction is dead 2926 // we change its DFS number so that it doesn't get value numbered. 2927 if (isInstructionTriviallyDead(&I, TLI)) { 2928 InstrDFS[&I] = 0; 2929 LLVM_DEBUG(dbgs() << "Skipping trivially dead instruction " << I << "\n"); 2930 markInstructionForDeletion(&I); 2931 continue; 2932 } 2933 if (isa<PHINode>(&I)) 2934 RevisitOnReachabilityChange[B].set(End); 2935 InstrDFS[&I] = End++; 2936 DFSToInstr.emplace_back(&I); 2937 } 2938 2939 // All of the range functions taken half-open ranges (open on the end side). 2940 // So we do not subtract one from count, because at this point it is one 2941 // greater than the last instruction. 2942 return std::make_pair(Start, End); 2943 } 2944 2945 void NewGVN::updateProcessedCount(const Value *V) { 2946 #ifndef NDEBUG 2947 if (ProcessedCount.count(V) == 0) { 2948 ProcessedCount.insert({V, 1}); 2949 } else { 2950 ++ProcessedCount[V]; 2951 assert(ProcessedCount[V] < 100 && 2952 "Seem to have processed the same Value a lot"); 2953 } 2954 #endif 2955 } 2956 2957 // Evaluate MemoryPhi nodes symbolically, just like PHI nodes 2958 void NewGVN::valueNumberMemoryPhi(MemoryPhi *MP) { 2959 // If all the arguments are the same, the MemoryPhi has the same value as the 2960 // argument. Filter out unreachable blocks and self phis from our operands. 2961 // TODO: We could do cycle-checking on the memory phis to allow valueizing for 2962 // self-phi checking. 2963 const BasicBlock *PHIBlock = MP->getBlock(); 2964 auto Filtered = make_filter_range(MP->operands(), [&](const Use &U) { 2965 return cast<MemoryAccess>(U) != MP && 2966 !isMemoryAccessTOP(cast<MemoryAccess>(U)) && 2967 ReachableEdges.count({MP->getIncomingBlock(U), PHIBlock}); 2968 }); 2969 // If all that is left is nothing, our memoryphi is undef. We keep it as 2970 // InitialClass. Note: The only case this should happen is if we have at 2971 // least one self-argument. 2972 if (Filtered.begin() == Filtered.end()) { 2973 if (setMemoryClass(MP, TOPClass)) 2974 markMemoryUsersTouched(MP); 2975 return; 2976 } 2977 2978 // Transform the remaining operands into operand leaders. 2979 // FIXME: mapped_iterator should have a range version. 2980 auto LookupFunc = [&](const Use &U) { 2981 return lookupMemoryLeader(cast<MemoryAccess>(U)); 2982 }; 2983 auto MappedBegin = map_iterator(Filtered.begin(), LookupFunc); 2984 auto MappedEnd = map_iterator(Filtered.end(), LookupFunc); 2985 2986 // and now check if all the elements are equal. 2987 // Sadly, we can't use std::equals since these are random access iterators. 2988 const auto *AllSameValue = *MappedBegin; 2989 ++MappedBegin; 2990 bool AllEqual = std::all_of( 2991 MappedBegin, MappedEnd, 2992 [&AllSameValue](const MemoryAccess *V) { return V == AllSameValue; }); 2993 2994 if (AllEqual) 2995 LLVM_DEBUG(dbgs() << "Memory Phi value numbered to " << *AllSameValue 2996 << "\n"); 2997 else 2998 LLVM_DEBUG(dbgs() << "Memory Phi value numbered to itself\n"); 2999 // If it's equal to something, it's in that class. Otherwise, it has to be in 3000 // a class where it is the leader (other things may be equivalent to it, but 3001 // it needs to start off in its own class, which means it must have been the 3002 // leader, and it can't have stopped being the leader because it was never 3003 // removed). 3004 CongruenceClass *CC = 3005 AllEqual ? getMemoryClass(AllSameValue) : ensureLeaderOfMemoryClass(MP); 3006 auto OldState = MemoryPhiState.lookup(MP); 3007 assert(OldState != MPS_Invalid && "Invalid memory phi state"); 3008 auto NewState = AllEqual ? MPS_Equivalent : MPS_Unique; 3009 MemoryPhiState[MP] = NewState; 3010 if (setMemoryClass(MP, CC) || OldState != NewState) 3011 markMemoryUsersTouched(MP); 3012 } 3013 3014 // Value number a single instruction, symbolically evaluating, performing 3015 // congruence finding, and updating mappings. 3016 void NewGVN::valueNumberInstruction(Instruction *I) { 3017 LLVM_DEBUG(dbgs() << "Processing instruction " << *I << "\n"); 3018 if (!I->isTerminator()) { 3019 const Expression *Symbolized = nullptr; 3020 SmallPtrSet<Value *, 2> Visited; 3021 if (DebugCounter::shouldExecute(VNCounter)) { 3022 Symbolized = performSymbolicEvaluation(I, Visited); 3023 // Make a phi of ops if necessary 3024 if (Symbolized && !isa<ConstantExpression>(Symbolized) && 3025 !isa<VariableExpression>(Symbolized) && PHINodeUses.count(I)) { 3026 auto *PHIE = makePossiblePHIOfOps(I, Visited); 3027 // If we created a phi of ops, use it. 3028 // If we couldn't create one, make sure we don't leave one lying around 3029 if (PHIE) { 3030 Symbolized = PHIE; 3031 } else if (auto *Op = RealToTemp.lookup(I)) { 3032 removePhiOfOps(I, Op); 3033 } 3034 } 3035 } else { 3036 // Mark the instruction as unused so we don't value number it again. 3037 InstrDFS[I] = 0; 3038 } 3039 // If we couldn't come up with a symbolic expression, use the unknown 3040 // expression 3041 if (Symbolized == nullptr) 3042 Symbolized = createUnknownExpression(I); 3043 performCongruenceFinding(I, Symbolized); 3044 } else { 3045 // Handle terminators that return values. All of them produce values we 3046 // don't currently understand. We don't place non-value producing 3047 // terminators in a class. 3048 if (!I->getType()->isVoidTy()) { 3049 auto *Symbolized = createUnknownExpression(I); 3050 performCongruenceFinding(I, Symbolized); 3051 } 3052 processOutgoingEdges(I, I->getParent()); 3053 } 3054 } 3055 3056 // Check if there is a path, using single or equal argument phi nodes, from 3057 // First to Second. 3058 bool NewGVN::singleReachablePHIPath( 3059 SmallPtrSet<const MemoryAccess *, 8> &Visited, const MemoryAccess *First, 3060 const MemoryAccess *Second) const { 3061 if (First == Second) 3062 return true; 3063 if (MSSA->isLiveOnEntryDef(First)) 3064 return false; 3065 3066 // This is not perfect, but as we're just verifying here, we can live with 3067 // the loss of precision. The real solution would be that of doing strongly 3068 // connected component finding in this routine, and it's probably not worth 3069 // the complexity for the time being. So, we just keep a set of visited 3070 // MemoryAccess and return true when we hit a cycle. 3071 if (Visited.count(First)) 3072 return true; 3073 Visited.insert(First); 3074 3075 const auto *EndDef = First; 3076 for (auto *ChainDef : optimized_def_chain(First)) { 3077 if (ChainDef == Second) 3078 return true; 3079 if (MSSA->isLiveOnEntryDef(ChainDef)) 3080 return false; 3081 EndDef = ChainDef; 3082 } 3083 auto *MP = cast<MemoryPhi>(EndDef); 3084 auto ReachableOperandPred = [&](const Use &U) { 3085 return ReachableEdges.count({MP->getIncomingBlock(U), MP->getBlock()}); 3086 }; 3087 auto FilteredPhiArgs = 3088 make_filter_range(MP->operands(), ReachableOperandPred); 3089 SmallVector<const Value *, 32> OperandList; 3090 llvm::copy(FilteredPhiArgs, std::back_inserter(OperandList)); 3091 bool Okay = is_splat(OperandList); 3092 if (Okay) 3093 return singleReachablePHIPath(Visited, cast<MemoryAccess>(OperandList[0]), 3094 Second); 3095 return false; 3096 } 3097 3098 // Verify the that the memory equivalence table makes sense relative to the 3099 // congruence classes. Note that this checking is not perfect, and is currently 3100 // subject to very rare false negatives. It is only useful for 3101 // testing/debugging. 3102 void NewGVN::verifyMemoryCongruency() const { 3103 #ifndef NDEBUG 3104 // Verify that the memory table equivalence and memory member set match 3105 for (const auto *CC : CongruenceClasses) { 3106 if (CC == TOPClass || CC->isDead()) 3107 continue; 3108 if (CC->getStoreCount() != 0) { 3109 assert((CC->getStoredValue() || !isa<StoreInst>(CC->getLeader())) && 3110 "Any class with a store as a leader should have a " 3111 "representative stored value"); 3112 assert(CC->getMemoryLeader() && 3113 "Any congruence class with a store should have a " 3114 "representative access"); 3115 } 3116 3117 if (CC->getMemoryLeader()) 3118 assert(MemoryAccessToClass.lookup(CC->getMemoryLeader()) == CC && 3119 "Representative MemoryAccess does not appear to be reverse " 3120 "mapped properly"); 3121 for (auto M : CC->memory()) 3122 assert(MemoryAccessToClass.lookup(M) == CC && 3123 "Memory member does not appear to be reverse mapped properly"); 3124 } 3125 3126 // Anything equivalent in the MemoryAccess table should be in the same 3127 // congruence class. 3128 3129 // Filter out the unreachable and trivially dead entries, because they may 3130 // never have been updated if the instructions were not processed. 3131 auto ReachableAccessPred = 3132 [&](const std::pair<const MemoryAccess *, CongruenceClass *> Pair) { 3133 bool Result = ReachableBlocks.count(Pair.first->getBlock()); 3134 if (!Result || MSSA->isLiveOnEntryDef(Pair.first) || 3135 MemoryToDFSNum(Pair.first) == 0) 3136 return false; 3137 if (auto *MemDef = dyn_cast<MemoryDef>(Pair.first)) 3138 return !isInstructionTriviallyDead(MemDef->getMemoryInst()); 3139 3140 // We could have phi nodes which operands are all trivially dead, 3141 // so we don't process them. 3142 if (auto *MemPHI = dyn_cast<MemoryPhi>(Pair.first)) { 3143 for (auto &U : MemPHI->incoming_values()) { 3144 if (auto *I = dyn_cast<Instruction>(&*U)) { 3145 if (!isInstructionTriviallyDead(I)) 3146 return true; 3147 } 3148 } 3149 return false; 3150 } 3151 3152 return true; 3153 }; 3154 3155 auto Filtered = make_filter_range(MemoryAccessToClass, ReachableAccessPred); 3156 for (auto KV : Filtered) { 3157 if (auto *FirstMUD = dyn_cast<MemoryUseOrDef>(KV.first)) { 3158 auto *SecondMUD = dyn_cast<MemoryUseOrDef>(KV.second->getMemoryLeader()); 3159 if (FirstMUD && SecondMUD) { 3160 SmallPtrSet<const MemoryAccess *, 8> VisitedMAS; 3161 assert((singleReachablePHIPath(VisitedMAS, FirstMUD, SecondMUD) || 3162 ValueToClass.lookup(FirstMUD->getMemoryInst()) == 3163 ValueToClass.lookup(SecondMUD->getMemoryInst())) && 3164 "The instructions for these memory operations should have " 3165 "been in the same congruence class or reachable through" 3166 "a single argument phi"); 3167 } 3168 } else if (auto *FirstMP = dyn_cast<MemoryPhi>(KV.first)) { 3169 // We can only sanely verify that MemoryDefs in the operand list all have 3170 // the same class. 3171 auto ReachableOperandPred = [&](const Use &U) { 3172 return ReachableEdges.count( 3173 {FirstMP->getIncomingBlock(U), FirstMP->getBlock()}) && 3174 isa<MemoryDef>(U); 3175 3176 }; 3177 // All arguments should in the same class, ignoring unreachable arguments 3178 auto FilteredPhiArgs = 3179 make_filter_range(FirstMP->operands(), ReachableOperandPred); 3180 SmallVector<const CongruenceClass *, 16> PhiOpClasses; 3181 std::transform(FilteredPhiArgs.begin(), FilteredPhiArgs.end(), 3182 std::back_inserter(PhiOpClasses), [&](const Use &U) { 3183 const MemoryDef *MD = cast<MemoryDef>(U); 3184 return ValueToClass.lookup(MD->getMemoryInst()); 3185 }); 3186 assert(is_splat(PhiOpClasses) && 3187 "All MemoryPhi arguments should be in the same class"); 3188 } 3189 } 3190 #endif 3191 } 3192 3193 // Verify that the sparse propagation we did actually found the maximal fixpoint 3194 // We do this by storing the value to class mapping, touching all instructions, 3195 // and redoing the iteration to see if anything changed. 3196 void NewGVN::verifyIterationSettled(Function &F) { 3197 #ifndef NDEBUG 3198 LLVM_DEBUG(dbgs() << "Beginning iteration verification\n"); 3199 if (DebugCounter::isCounterSet(VNCounter)) 3200 DebugCounter::setCounterValue(VNCounter, StartingVNCounter); 3201 3202 // Note that we have to store the actual classes, as we may change existing 3203 // classes during iteration. This is because our memory iteration propagation 3204 // is not perfect, and so may waste a little work. But it should generate 3205 // exactly the same congruence classes we have now, with different IDs. 3206 std::map<const Value *, CongruenceClass> BeforeIteration; 3207 3208 for (auto &KV : ValueToClass) { 3209 if (auto *I = dyn_cast<Instruction>(KV.first)) 3210 // Skip unused/dead instructions. 3211 if (InstrToDFSNum(I) == 0) 3212 continue; 3213 BeforeIteration.insert({KV.first, *KV.second}); 3214 } 3215 3216 TouchedInstructions.set(); 3217 TouchedInstructions.reset(0); 3218 iterateTouchedInstructions(); 3219 DenseSet<std::pair<const CongruenceClass *, const CongruenceClass *>> 3220 EqualClasses; 3221 for (const auto &KV : ValueToClass) { 3222 if (auto *I = dyn_cast<Instruction>(KV.first)) 3223 // Skip unused/dead instructions. 3224 if (InstrToDFSNum(I) == 0) 3225 continue; 3226 // We could sink these uses, but i think this adds a bit of clarity here as 3227 // to what we are comparing. 3228 auto *BeforeCC = &BeforeIteration.find(KV.first)->second; 3229 auto *AfterCC = KV.second; 3230 // Note that the classes can't change at this point, so we memoize the set 3231 // that are equal. 3232 if (!EqualClasses.count({BeforeCC, AfterCC})) { 3233 assert(BeforeCC->isEquivalentTo(AfterCC) && 3234 "Value number changed after main loop completed!"); 3235 EqualClasses.insert({BeforeCC, AfterCC}); 3236 } 3237 } 3238 #endif 3239 } 3240 3241 // Verify that for each store expression in the expression to class mapping, 3242 // only the latest appears, and multiple ones do not appear. 3243 // Because loads do not use the stored value when doing equality with stores, 3244 // if we don't erase the old store expressions from the table, a load can find 3245 // a no-longer valid StoreExpression. 3246 void NewGVN::verifyStoreExpressions() const { 3247 #ifndef NDEBUG 3248 // This is the only use of this, and it's not worth defining a complicated 3249 // densemapinfo hash/equality function for it. 3250 std::set< 3251 std::pair<const Value *, 3252 std::tuple<const Value *, const CongruenceClass *, Value *>>> 3253 StoreExpressionSet; 3254 for (const auto &KV : ExpressionToClass) { 3255 if (auto *SE = dyn_cast<StoreExpression>(KV.first)) { 3256 // Make sure a version that will conflict with loads is not already there 3257 auto Res = StoreExpressionSet.insert( 3258 {SE->getOperand(0), std::make_tuple(SE->getMemoryLeader(), KV.second, 3259 SE->getStoredValue())}); 3260 bool Okay = Res.second; 3261 // It's okay to have the same expression already in there if it is 3262 // identical in nature. 3263 // This can happen when the leader of the stored value changes over time. 3264 if (!Okay) 3265 Okay = (std::get<1>(Res.first->second) == KV.second) && 3266 (lookupOperandLeader(std::get<2>(Res.first->second)) == 3267 lookupOperandLeader(SE->getStoredValue())); 3268 assert(Okay && "Stored expression conflict exists in expression table"); 3269 auto *ValueExpr = ValueToExpression.lookup(SE->getStoreInst()); 3270 assert(ValueExpr && ValueExpr->equals(*SE) && 3271 "StoreExpression in ExpressionToClass is not latest " 3272 "StoreExpression for value"); 3273 } 3274 } 3275 #endif 3276 } 3277 3278 // This is the main value numbering loop, it iterates over the initial touched 3279 // instruction set, propagating value numbers, marking things touched, etc, 3280 // until the set of touched instructions is completely empty. 3281 void NewGVN::iterateTouchedInstructions() { 3282 unsigned int Iterations = 0; 3283 // Figure out where touchedinstructions starts 3284 int FirstInstr = TouchedInstructions.find_first(); 3285 // Nothing set, nothing to iterate, just return. 3286 if (FirstInstr == -1) 3287 return; 3288 const BasicBlock *LastBlock = getBlockForValue(InstrFromDFSNum(FirstInstr)); 3289 while (TouchedInstructions.any()) { 3290 ++Iterations; 3291 // Walk through all the instructions in all the blocks in RPO. 3292 // TODO: As we hit a new block, we should push and pop equalities into a 3293 // table lookupOperandLeader can use, to catch things PredicateInfo 3294 // might miss, like edge-only equivalences. 3295 for (unsigned InstrNum : TouchedInstructions.set_bits()) { 3296 3297 // This instruction was found to be dead. We don't bother looking 3298 // at it again. 3299 if (InstrNum == 0) { 3300 TouchedInstructions.reset(InstrNum); 3301 continue; 3302 } 3303 3304 Value *V = InstrFromDFSNum(InstrNum); 3305 const BasicBlock *CurrBlock = getBlockForValue(V); 3306 3307 // If we hit a new block, do reachability processing. 3308 if (CurrBlock != LastBlock) { 3309 LastBlock = CurrBlock; 3310 bool BlockReachable = ReachableBlocks.count(CurrBlock); 3311 const auto &CurrInstRange = BlockInstRange.lookup(CurrBlock); 3312 3313 // If it's not reachable, erase any touched instructions and move on. 3314 if (!BlockReachable) { 3315 TouchedInstructions.reset(CurrInstRange.first, CurrInstRange.second); 3316 LLVM_DEBUG(dbgs() << "Skipping instructions in block " 3317 << getBlockName(CurrBlock) 3318 << " because it is unreachable\n"); 3319 continue; 3320 } 3321 updateProcessedCount(CurrBlock); 3322 } 3323 // Reset after processing (because we may mark ourselves as touched when 3324 // we propagate equalities). 3325 TouchedInstructions.reset(InstrNum); 3326 3327 if (auto *MP = dyn_cast<MemoryPhi>(V)) { 3328 LLVM_DEBUG(dbgs() << "Processing MemoryPhi " << *MP << "\n"); 3329 valueNumberMemoryPhi(MP); 3330 } else if (auto *I = dyn_cast<Instruction>(V)) { 3331 valueNumberInstruction(I); 3332 } else { 3333 llvm_unreachable("Should have been a MemoryPhi or Instruction"); 3334 } 3335 updateProcessedCount(V); 3336 } 3337 } 3338 NumGVNMaxIterations = std::max(NumGVNMaxIterations.getValue(), Iterations); 3339 } 3340 3341 // This is the main transformation entry point. 3342 bool NewGVN::runGVN() { 3343 if (DebugCounter::isCounterSet(VNCounter)) 3344 StartingVNCounter = DebugCounter::getCounterValue(VNCounter); 3345 bool Changed = false; 3346 NumFuncArgs = F.arg_size(); 3347 MSSAWalker = MSSA->getWalker(); 3348 SingletonDeadExpression = new (ExpressionAllocator) DeadExpression(); 3349 3350 // Count number of instructions for sizing of hash tables, and come 3351 // up with a global dfs numbering for instructions. 3352 unsigned ICount = 1; 3353 // Add an empty instruction to account for the fact that we start at 1 3354 DFSToInstr.emplace_back(nullptr); 3355 // Note: We want ideal RPO traversal of the blocks, which is not quite the 3356 // same as dominator tree order, particularly with regard whether backedges 3357 // get visited first or second, given a block with multiple successors. 3358 // If we visit in the wrong order, we will end up performing N times as many 3359 // iterations. 3360 // The dominator tree does guarantee that, for a given dom tree node, it's 3361 // parent must occur before it in the RPO ordering. Thus, we only need to sort 3362 // the siblings. 3363 ReversePostOrderTraversal<Function *> RPOT(&F); 3364 unsigned Counter = 0; 3365 for (auto &B : RPOT) { 3366 auto *Node = DT->getNode(B); 3367 assert(Node && "RPO and Dominator tree should have same reachability"); 3368 RPOOrdering[Node] = ++Counter; 3369 } 3370 // Sort dominator tree children arrays into RPO. 3371 for (auto &B : RPOT) { 3372 auto *Node = DT->getNode(B); 3373 if (Node->getNumChildren() > 1) 3374 llvm::sort(*Node, [&](const DomTreeNode *A, const DomTreeNode *B) { 3375 return RPOOrdering[A] < RPOOrdering[B]; 3376 }); 3377 } 3378 3379 // Now a standard depth first ordering of the domtree is equivalent to RPO. 3380 for (auto DTN : depth_first(DT->getRootNode())) { 3381 BasicBlock *B = DTN->getBlock(); 3382 const auto &BlockRange = assignDFSNumbers(B, ICount); 3383 BlockInstRange.insert({B, BlockRange}); 3384 ICount += BlockRange.second - BlockRange.first; 3385 } 3386 initializeCongruenceClasses(F); 3387 3388 TouchedInstructions.resize(ICount); 3389 // Ensure we don't end up resizing the expressionToClass map, as 3390 // that can be quite expensive. At most, we have one expression per 3391 // instruction. 3392 ExpressionToClass.reserve(ICount); 3393 3394 // Initialize the touched instructions to include the entry block. 3395 const auto &InstRange = BlockInstRange.lookup(&F.getEntryBlock()); 3396 TouchedInstructions.set(InstRange.first, InstRange.second); 3397 LLVM_DEBUG(dbgs() << "Block " << getBlockName(&F.getEntryBlock()) 3398 << " marked reachable\n"); 3399 ReachableBlocks.insert(&F.getEntryBlock()); 3400 3401 iterateTouchedInstructions(); 3402 verifyMemoryCongruency(); 3403 verifyIterationSettled(F); 3404 verifyStoreExpressions(); 3405 3406 Changed |= eliminateInstructions(F); 3407 3408 // Delete all instructions marked for deletion. 3409 for (Instruction *ToErase : InstructionsToErase) { 3410 if (!ToErase->use_empty()) 3411 ToErase->replaceAllUsesWith(UndefValue::get(ToErase->getType())); 3412 3413 assert(ToErase->getParent() && 3414 "BB containing ToErase deleted unexpectedly!"); 3415 ToErase->eraseFromParent(); 3416 } 3417 Changed |= !InstructionsToErase.empty(); 3418 3419 // Delete all unreachable blocks. 3420 auto UnreachableBlockPred = [&](const BasicBlock &BB) { 3421 return !ReachableBlocks.count(&BB); 3422 }; 3423 3424 for (auto &BB : make_filter_range(F, UnreachableBlockPred)) { 3425 LLVM_DEBUG(dbgs() << "We believe block " << getBlockName(&BB) 3426 << " is unreachable\n"); 3427 deleteInstructionsInBlock(&BB); 3428 Changed = true; 3429 } 3430 3431 cleanupTables(); 3432 return Changed; 3433 } 3434 3435 struct NewGVN::ValueDFS { 3436 int DFSIn = 0; 3437 int DFSOut = 0; 3438 int LocalNum = 0; 3439 3440 // Only one of Def and U will be set. 3441 // The bool in the Def tells us whether the Def is the stored value of a 3442 // store. 3443 PointerIntPair<Value *, 1, bool> Def; 3444 Use *U = nullptr; 3445 3446 bool operator<(const ValueDFS &Other) const { 3447 // It's not enough that any given field be less than - we have sets 3448 // of fields that need to be evaluated together to give a proper ordering. 3449 // For example, if you have; 3450 // DFS (1, 3) 3451 // Val 0 3452 // DFS (1, 2) 3453 // Val 50 3454 // We want the second to be less than the first, but if we just go field 3455 // by field, we will get to Val 0 < Val 50 and say the first is less than 3456 // the second. We only want it to be less than if the DFS orders are equal. 3457 // 3458 // Each LLVM instruction only produces one value, and thus the lowest-level 3459 // differentiator that really matters for the stack (and what we use as as a 3460 // replacement) is the local dfs number. 3461 // Everything else in the structure is instruction level, and only affects 3462 // the order in which we will replace operands of a given instruction. 3463 // 3464 // For a given instruction (IE things with equal dfsin, dfsout, localnum), 3465 // the order of replacement of uses does not matter. 3466 // IE given, 3467 // a = 5 3468 // b = a + a 3469 // When you hit b, you will have two valuedfs with the same dfsin, out, and 3470 // localnum. 3471 // The .val will be the same as well. 3472 // The .u's will be different. 3473 // You will replace both, and it does not matter what order you replace them 3474 // in (IE whether you replace operand 2, then operand 1, or operand 1, then 3475 // operand 2). 3476 // Similarly for the case of same dfsin, dfsout, localnum, but different 3477 // .val's 3478 // a = 5 3479 // b = 6 3480 // c = a + b 3481 // in c, we will a valuedfs for a, and one for b,with everything the same 3482 // but .val and .u. 3483 // It does not matter what order we replace these operands in. 3484 // You will always end up with the same IR, and this is guaranteed. 3485 return std::tie(DFSIn, DFSOut, LocalNum, Def, U) < 3486 std::tie(Other.DFSIn, Other.DFSOut, Other.LocalNum, Other.Def, 3487 Other.U); 3488 } 3489 }; 3490 3491 // This function converts the set of members for a congruence class from values, 3492 // to sets of defs and uses with associated DFS info. The total number of 3493 // reachable uses for each value is stored in UseCount, and instructions that 3494 // seem 3495 // dead (have no non-dead uses) are stored in ProbablyDead. 3496 void NewGVN::convertClassToDFSOrdered( 3497 const CongruenceClass &Dense, SmallVectorImpl<ValueDFS> &DFSOrderedSet, 3498 DenseMap<const Value *, unsigned int> &UseCounts, 3499 SmallPtrSetImpl<Instruction *> &ProbablyDead) const { 3500 for (auto D : Dense) { 3501 // First add the value. 3502 BasicBlock *BB = getBlockForValue(D); 3503 // Constants are handled prior to ever calling this function, so 3504 // we should only be left with instructions as members. 3505 assert(BB && "Should have figured out a basic block for value"); 3506 ValueDFS VDDef; 3507 DomTreeNode *DomNode = DT->getNode(BB); 3508 VDDef.DFSIn = DomNode->getDFSNumIn(); 3509 VDDef.DFSOut = DomNode->getDFSNumOut(); 3510 // If it's a store, use the leader of the value operand, if it's always 3511 // available, or the value operand. TODO: We could do dominance checks to 3512 // find a dominating leader, but not worth it ATM. 3513 if (auto *SI = dyn_cast<StoreInst>(D)) { 3514 auto Leader = lookupOperandLeader(SI->getValueOperand()); 3515 if (alwaysAvailable(Leader)) { 3516 VDDef.Def.setPointer(Leader); 3517 } else { 3518 VDDef.Def.setPointer(SI->getValueOperand()); 3519 VDDef.Def.setInt(true); 3520 } 3521 } else { 3522 VDDef.Def.setPointer(D); 3523 } 3524 assert(isa<Instruction>(D) && 3525 "The dense set member should always be an instruction"); 3526 Instruction *Def = cast<Instruction>(D); 3527 VDDef.LocalNum = InstrToDFSNum(D); 3528 DFSOrderedSet.push_back(VDDef); 3529 // If there is a phi node equivalent, add it 3530 if (auto *PN = RealToTemp.lookup(Def)) { 3531 auto *PHIE = 3532 dyn_cast_or_null<PHIExpression>(ValueToExpression.lookup(Def)); 3533 if (PHIE) { 3534 VDDef.Def.setInt(false); 3535 VDDef.Def.setPointer(PN); 3536 VDDef.LocalNum = 0; 3537 DFSOrderedSet.push_back(VDDef); 3538 } 3539 } 3540 3541 unsigned int UseCount = 0; 3542 // Now add the uses. 3543 for (auto &U : Def->uses()) { 3544 if (auto *I = dyn_cast<Instruction>(U.getUser())) { 3545 // Don't try to replace into dead uses 3546 if (InstructionsToErase.count(I)) 3547 continue; 3548 ValueDFS VDUse; 3549 // Put the phi node uses in the incoming block. 3550 BasicBlock *IBlock; 3551 if (auto *P = dyn_cast<PHINode>(I)) { 3552 IBlock = P->getIncomingBlock(U); 3553 // Make phi node users appear last in the incoming block 3554 // they are from. 3555 VDUse.LocalNum = InstrDFS.size() + 1; 3556 } else { 3557 IBlock = getBlockForValue(I); 3558 VDUse.LocalNum = InstrToDFSNum(I); 3559 } 3560 3561 // Skip uses in unreachable blocks, as we're going 3562 // to delete them. 3563 if (ReachableBlocks.count(IBlock) == 0) 3564 continue; 3565 3566 DomTreeNode *DomNode = DT->getNode(IBlock); 3567 VDUse.DFSIn = DomNode->getDFSNumIn(); 3568 VDUse.DFSOut = DomNode->getDFSNumOut(); 3569 VDUse.U = &U; 3570 ++UseCount; 3571 DFSOrderedSet.emplace_back(VDUse); 3572 } 3573 } 3574 3575 // If there are no uses, it's probably dead (but it may have side-effects, 3576 // so not definitely dead. Otherwise, store the number of uses so we can 3577 // track if it becomes dead later). 3578 if (UseCount == 0) 3579 ProbablyDead.insert(Def); 3580 else 3581 UseCounts[Def] = UseCount; 3582 } 3583 } 3584 3585 // This function converts the set of members for a congruence class from values, 3586 // to the set of defs for loads and stores, with associated DFS info. 3587 void NewGVN::convertClassToLoadsAndStores( 3588 const CongruenceClass &Dense, 3589 SmallVectorImpl<ValueDFS> &LoadsAndStores) const { 3590 for (auto D : Dense) { 3591 if (!isa<LoadInst>(D) && !isa<StoreInst>(D)) 3592 continue; 3593 3594 BasicBlock *BB = getBlockForValue(D); 3595 ValueDFS VD; 3596 DomTreeNode *DomNode = DT->getNode(BB); 3597 VD.DFSIn = DomNode->getDFSNumIn(); 3598 VD.DFSOut = DomNode->getDFSNumOut(); 3599 VD.Def.setPointer(D); 3600 3601 // If it's an instruction, use the real local dfs number. 3602 if (auto *I = dyn_cast<Instruction>(D)) 3603 VD.LocalNum = InstrToDFSNum(I); 3604 else 3605 llvm_unreachable("Should have been an instruction"); 3606 3607 LoadsAndStores.emplace_back(VD); 3608 } 3609 } 3610 3611 static void patchAndReplaceAllUsesWith(Instruction *I, Value *Repl) { 3612 patchReplacementInstruction(I, Repl); 3613 I->replaceAllUsesWith(Repl); 3614 } 3615 3616 void NewGVN::deleteInstructionsInBlock(BasicBlock *BB) { 3617 LLVM_DEBUG(dbgs() << " BasicBlock Dead:" << *BB); 3618 ++NumGVNBlocksDeleted; 3619 3620 // Delete the instructions backwards, as it has a reduced likelihood of having 3621 // to update as many def-use and use-def chains. Start after the terminator. 3622 auto StartPoint = BB->rbegin(); 3623 ++StartPoint; 3624 // Note that we explicitly recalculate BB->rend() on each iteration, 3625 // as it may change when we remove the first instruction. 3626 for (BasicBlock::reverse_iterator I(StartPoint); I != BB->rend();) { 3627 Instruction &Inst = *I++; 3628 if (!Inst.use_empty()) 3629 Inst.replaceAllUsesWith(UndefValue::get(Inst.getType())); 3630 if (isa<LandingPadInst>(Inst)) 3631 continue; 3632 salvageKnowledge(&Inst, AC); 3633 3634 Inst.eraseFromParent(); 3635 ++NumGVNInstrDeleted; 3636 } 3637 // Now insert something that simplifycfg will turn into an unreachable. 3638 Type *Int8Ty = Type::getInt8Ty(BB->getContext()); 3639 new StoreInst(UndefValue::get(Int8Ty), 3640 Constant::getNullValue(Int8Ty->getPointerTo()), 3641 BB->getTerminator()); 3642 } 3643 3644 void NewGVN::markInstructionForDeletion(Instruction *I) { 3645 LLVM_DEBUG(dbgs() << "Marking " << *I << " for deletion\n"); 3646 InstructionsToErase.insert(I); 3647 } 3648 3649 void NewGVN::replaceInstruction(Instruction *I, Value *V) { 3650 LLVM_DEBUG(dbgs() << "Replacing " << *I << " with " << *V << "\n"); 3651 patchAndReplaceAllUsesWith(I, V); 3652 // We save the actual erasing to avoid invalidating memory 3653 // dependencies until we are done with everything. 3654 markInstructionForDeletion(I); 3655 } 3656 3657 namespace { 3658 3659 // This is a stack that contains both the value and dfs info of where 3660 // that value is valid. 3661 class ValueDFSStack { 3662 public: 3663 Value *back() const { return ValueStack.back(); } 3664 std::pair<int, int> dfs_back() const { return DFSStack.back(); } 3665 3666 void push_back(Value *V, int DFSIn, int DFSOut) { 3667 ValueStack.emplace_back(V); 3668 DFSStack.emplace_back(DFSIn, DFSOut); 3669 } 3670 3671 bool empty() const { return DFSStack.empty(); } 3672 3673 bool isInScope(int DFSIn, int DFSOut) const { 3674 if (empty()) 3675 return false; 3676 return DFSIn >= DFSStack.back().first && DFSOut <= DFSStack.back().second; 3677 } 3678 3679 void popUntilDFSScope(int DFSIn, int DFSOut) { 3680 3681 // These two should always be in sync at this point. 3682 assert(ValueStack.size() == DFSStack.size() && 3683 "Mismatch between ValueStack and DFSStack"); 3684 while ( 3685 !DFSStack.empty() && 3686 !(DFSIn >= DFSStack.back().first && DFSOut <= DFSStack.back().second)) { 3687 DFSStack.pop_back(); 3688 ValueStack.pop_back(); 3689 } 3690 } 3691 3692 private: 3693 SmallVector<Value *, 8> ValueStack; 3694 SmallVector<std::pair<int, int>, 8> DFSStack; 3695 }; 3696 3697 } // end anonymous namespace 3698 3699 // Given an expression, get the congruence class for it. 3700 CongruenceClass *NewGVN::getClassForExpression(const Expression *E) const { 3701 if (auto *VE = dyn_cast<VariableExpression>(E)) 3702 return ValueToClass.lookup(VE->getVariableValue()); 3703 else if (isa<DeadExpression>(E)) 3704 return TOPClass; 3705 return ExpressionToClass.lookup(E); 3706 } 3707 3708 // Given a value and a basic block we are trying to see if it is available in, 3709 // see if the value has a leader available in that block. 3710 Value *NewGVN::findPHIOfOpsLeader(const Expression *E, 3711 const Instruction *OrigInst, 3712 const BasicBlock *BB) const { 3713 // It would already be constant if we could make it constant 3714 if (auto *CE = dyn_cast<ConstantExpression>(E)) 3715 return CE->getConstantValue(); 3716 if (auto *VE = dyn_cast<VariableExpression>(E)) { 3717 auto *V = VE->getVariableValue(); 3718 if (alwaysAvailable(V) || DT->dominates(getBlockForValue(V), BB)) 3719 return VE->getVariableValue(); 3720 } 3721 3722 auto *CC = getClassForExpression(E); 3723 if (!CC) 3724 return nullptr; 3725 if (alwaysAvailable(CC->getLeader())) 3726 return CC->getLeader(); 3727 3728 for (auto Member : *CC) { 3729 auto *MemberInst = dyn_cast<Instruction>(Member); 3730 if (MemberInst == OrigInst) 3731 continue; 3732 // Anything that isn't an instruction is always available. 3733 if (!MemberInst) 3734 return Member; 3735 if (DT->dominates(getBlockForValue(MemberInst), BB)) 3736 return Member; 3737 } 3738 return nullptr; 3739 } 3740 3741 bool NewGVN::eliminateInstructions(Function &F) { 3742 // This is a non-standard eliminator. The normal way to eliminate is 3743 // to walk the dominator tree in order, keeping track of available 3744 // values, and eliminating them. However, this is mildly 3745 // pointless. It requires doing lookups on every instruction, 3746 // regardless of whether we will ever eliminate it. For 3747 // instructions part of most singleton congruence classes, we know we 3748 // will never eliminate them. 3749 3750 // Instead, this eliminator looks at the congruence classes directly, sorts 3751 // them into a DFS ordering of the dominator tree, and then we just 3752 // perform elimination straight on the sets by walking the congruence 3753 // class member uses in order, and eliminate the ones dominated by the 3754 // last member. This is worst case O(E log E) where E = number of 3755 // instructions in a single congruence class. In theory, this is all 3756 // instructions. In practice, it is much faster, as most instructions are 3757 // either in singleton congruence classes or can't possibly be eliminated 3758 // anyway (if there are no overlapping DFS ranges in class). 3759 // When we find something not dominated, it becomes the new leader 3760 // for elimination purposes. 3761 // TODO: If we wanted to be faster, We could remove any members with no 3762 // overlapping ranges while sorting, as we will never eliminate anything 3763 // with those members, as they don't dominate anything else in our set. 3764 3765 bool AnythingReplaced = false; 3766 3767 // Since we are going to walk the domtree anyway, and we can't guarantee the 3768 // DFS numbers are updated, we compute some ourselves. 3769 DT->updateDFSNumbers(); 3770 3771 // Go through all of our phi nodes, and kill the arguments associated with 3772 // unreachable edges. 3773 auto ReplaceUnreachablePHIArgs = [&](PHINode *PHI, BasicBlock *BB) { 3774 for (auto &Operand : PHI->incoming_values()) 3775 if (!ReachableEdges.count({PHI->getIncomingBlock(Operand), BB})) { 3776 LLVM_DEBUG(dbgs() << "Replacing incoming value of " << PHI 3777 << " for block " 3778 << getBlockName(PHI->getIncomingBlock(Operand)) 3779 << " with undef due to it being unreachable\n"); 3780 Operand.set(UndefValue::get(PHI->getType())); 3781 } 3782 }; 3783 // Replace unreachable phi arguments. 3784 // At this point, RevisitOnReachabilityChange only contains: 3785 // 3786 // 1. PHIs 3787 // 2. Temporaries that will convert to PHIs 3788 // 3. Operations that are affected by an unreachable edge but do not fit into 3789 // 1 or 2 (rare). 3790 // So it is a slight overshoot of what we want. We could make it exact by 3791 // using two SparseBitVectors per block. 3792 DenseMap<const BasicBlock *, unsigned> ReachablePredCount; 3793 for (auto &KV : ReachableEdges) 3794 ReachablePredCount[KV.getEnd()]++; 3795 for (auto &BBPair : RevisitOnReachabilityChange) { 3796 for (auto InstNum : BBPair.second) { 3797 auto *Inst = InstrFromDFSNum(InstNum); 3798 auto *PHI = dyn_cast<PHINode>(Inst); 3799 PHI = PHI ? PHI : dyn_cast_or_null<PHINode>(RealToTemp.lookup(Inst)); 3800 if (!PHI) 3801 continue; 3802 auto *BB = BBPair.first; 3803 if (ReachablePredCount.lookup(BB) != PHI->getNumIncomingValues()) 3804 ReplaceUnreachablePHIArgs(PHI, BB); 3805 } 3806 } 3807 3808 // Map to store the use counts 3809 DenseMap<const Value *, unsigned int> UseCounts; 3810 for (auto *CC : reverse(CongruenceClasses)) { 3811 LLVM_DEBUG(dbgs() << "Eliminating in congruence class " << CC->getID() 3812 << "\n"); 3813 // Track the equivalent store info so we can decide whether to try 3814 // dead store elimination. 3815 SmallVector<ValueDFS, 8> PossibleDeadStores; 3816 SmallPtrSet<Instruction *, 8> ProbablyDead; 3817 if (CC->isDead() || CC->empty()) 3818 continue; 3819 // Everything still in the TOP class is unreachable or dead. 3820 if (CC == TOPClass) { 3821 for (auto M : *CC) { 3822 auto *VTE = ValueToExpression.lookup(M); 3823 if (VTE && isa<DeadExpression>(VTE)) 3824 markInstructionForDeletion(cast<Instruction>(M)); 3825 assert((!ReachableBlocks.count(cast<Instruction>(M)->getParent()) || 3826 InstructionsToErase.count(cast<Instruction>(M))) && 3827 "Everything in TOP should be unreachable or dead at this " 3828 "point"); 3829 } 3830 continue; 3831 } 3832 3833 assert(CC->getLeader() && "We should have had a leader"); 3834 // If this is a leader that is always available, and it's a 3835 // constant or has no equivalences, just replace everything with 3836 // it. We then update the congruence class with whatever members 3837 // are left. 3838 Value *Leader = 3839 CC->getStoredValue() ? CC->getStoredValue() : CC->getLeader(); 3840 if (alwaysAvailable(Leader)) { 3841 CongruenceClass::MemberSet MembersLeft; 3842 for (auto M : *CC) { 3843 Value *Member = M; 3844 // Void things have no uses we can replace. 3845 if (Member == Leader || !isa<Instruction>(Member) || 3846 Member->getType()->isVoidTy()) { 3847 MembersLeft.insert(Member); 3848 continue; 3849 } 3850 LLVM_DEBUG(dbgs() << "Found replacement " << *(Leader) << " for " 3851 << *Member << "\n"); 3852 auto *I = cast<Instruction>(Member); 3853 assert(Leader != I && "About to accidentally remove our leader"); 3854 replaceInstruction(I, Leader); 3855 AnythingReplaced = true; 3856 } 3857 CC->swap(MembersLeft); 3858 } else { 3859 // If this is a singleton, we can skip it. 3860 if (CC->size() != 1 || RealToTemp.count(Leader)) { 3861 // This is a stack because equality replacement/etc may place 3862 // constants in the middle of the member list, and we want to use 3863 // those constant values in preference to the current leader, over 3864 // the scope of those constants. 3865 ValueDFSStack EliminationStack; 3866 3867 // Convert the members to DFS ordered sets and then merge them. 3868 SmallVector<ValueDFS, 8> DFSOrderedSet; 3869 convertClassToDFSOrdered(*CC, DFSOrderedSet, UseCounts, ProbablyDead); 3870 3871 // Sort the whole thing. 3872 llvm::sort(DFSOrderedSet); 3873 for (auto &VD : DFSOrderedSet) { 3874 int MemberDFSIn = VD.DFSIn; 3875 int MemberDFSOut = VD.DFSOut; 3876 Value *Def = VD.Def.getPointer(); 3877 bool FromStore = VD.Def.getInt(); 3878 Use *U = VD.U; 3879 // We ignore void things because we can't get a value from them. 3880 if (Def && Def->getType()->isVoidTy()) 3881 continue; 3882 auto *DefInst = dyn_cast_or_null<Instruction>(Def); 3883 if (DefInst && AllTempInstructions.count(DefInst)) { 3884 auto *PN = cast<PHINode>(DefInst); 3885 3886 // If this is a value phi and that's the expression we used, insert 3887 // it into the program 3888 // remove from temp instruction list. 3889 AllTempInstructions.erase(PN); 3890 auto *DefBlock = getBlockForValue(Def); 3891 LLVM_DEBUG(dbgs() << "Inserting fully real phi of ops" << *Def 3892 << " into block " 3893 << getBlockName(getBlockForValue(Def)) << "\n"); 3894 PN->insertBefore(&DefBlock->front()); 3895 Def = PN; 3896 NumGVNPHIOfOpsEliminations++; 3897 } 3898 3899 if (EliminationStack.empty()) { 3900 LLVM_DEBUG(dbgs() << "Elimination Stack is empty\n"); 3901 } else { 3902 LLVM_DEBUG(dbgs() << "Elimination Stack Top DFS numbers are (" 3903 << EliminationStack.dfs_back().first << "," 3904 << EliminationStack.dfs_back().second << ")\n"); 3905 } 3906 3907 LLVM_DEBUG(dbgs() << "Current DFS numbers are (" << MemberDFSIn << "," 3908 << MemberDFSOut << ")\n"); 3909 // First, we see if we are out of scope or empty. If so, 3910 // and there equivalences, we try to replace the top of 3911 // stack with equivalences (if it's on the stack, it must 3912 // not have been eliminated yet). 3913 // Then we synchronize to our current scope, by 3914 // popping until we are back within a DFS scope that 3915 // dominates the current member. 3916 // Then, what happens depends on a few factors 3917 // If the stack is now empty, we need to push 3918 // If we have a constant or a local equivalence we want to 3919 // start using, we also push. 3920 // Otherwise, we walk along, processing members who are 3921 // dominated by this scope, and eliminate them. 3922 bool ShouldPush = Def && EliminationStack.empty(); 3923 bool OutOfScope = 3924 !EliminationStack.isInScope(MemberDFSIn, MemberDFSOut); 3925 3926 if (OutOfScope || ShouldPush) { 3927 // Sync to our current scope. 3928 EliminationStack.popUntilDFSScope(MemberDFSIn, MemberDFSOut); 3929 bool ShouldPush = Def && EliminationStack.empty(); 3930 if (ShouldPush) { 3931 EliminationStack.push_back(Def, MemberDFSIn, MemberDFSOut); 3932 } 3933 } 3934 3935 // Skip the Def's, we only want to eliminate on their uses. But mark 3936 // dominated defs as dead. 3937 if (Def) { 3938 // For anything in this case, what and how we value number 3939 // guarantees that any side-effets that would have occurred (ie 3940 // throwing, etc) can be proven to either still occur (because it's 3941 // dominated by something that has the same side-effects), or never 3942 // occur. Otherwise, we would not have been able to prove it value 3943 // equivalent to something else. For these things, we can just mark 3944 // it all dead. Note that this is different from the "ProbablyDead" 3945 // set, which may not be dominated by anything, and thus, are only 3946 // easy to prove dead if they are also side-effect free. Note that 3947 // because stores are put in terms of the stored value, we skip 3948 // stored values here. If the stored value is really dead, it will 3949 // still be marked for deletion when we process it in its own class. 3950 if (!EliminationStack.empty() && Def != EliminationStack.back() && 3951 isa<Instruction>(Def) && !FromStore) 3952 markInstructionForDeletion(cast<Instruction>(Def)); 3953 continue; 3954 } 3955 // At this point, we know it is a Use we are trying to possibly 3956 // replace. 3957 3958 assert(isa<Instruction>(U->get()) && 3959 "Current def should have been an instruction"); 3960 assert(isa<Instruction>(U->getUser()) && 3961 "Current user should have been an instruction"); 3962 3963 // If the thing we are replacing into is already marked to be dead, 3964 // this use is dead. Note that this is true regardless of whether 3965 // we have anything dominating the use or not. We do this here 3966 // because we are already walking all the uses anyway. 3967 Instruction *InstUse = cast<Instruction>(U->getUser()); 3968 if (InstructionsToErase.count(InstUse)) { 3969 auto &UseCount = UseCounts[U->get()]; 3970 if (--UseCount == 0) { 3971 ProbablyDead.insert(cast<Instruction>(U->get())); 3972 } 3973 } 3974 3975 // If we get to this point, and the stack is empty we must have a use 3976 // with nothing we can use to eliminate this use, so just skip it. 3977 if (EliminationStack.empty()) 3978 continue; 3979 3980 Value *DominatingLeader = EliminationStack.back(); 3981 3982 auto *II = dyn_cast<IntrinsicInst>(DominatingLeader); 3983 bool isSSACopy = II && II->getIntrinsicID() == Intrinsic::ssa_copy; 3984 if (isSSACopy) 3985 DominatingLeader = II->getOperand(0); 3986 3987 // Don't replace our existing users with ourselves. 3988 if (U->get() == DominatingLeader) 3989 continue; 3990 LLVM_DEBUG(dbgs() 3991 << "Found replacement " << *DominatingLeader << " for " 3992 << *U->get() << " in " << *(U->getUser()) << "\n"); 3993 3994 // If we replaced something in an instruction, handle the patching of 3995 // metadata. Skip this if we are replacing predicateinfo with its 3996 // original operand, as we already know we can just drop it. 3997 auto *ReplacedInst = cast<Instruction>(U->get()); 3998 auto *PI = PredInfo->getPredicateInfoFor(ReplacedInst); 3999 if (!PI || DominatingLeader != PI->OriginalOp) 4000 patchReplacementInstruction(ReplacedInst, DominatingLeader); 4001 U->set(DominatingLeader); 4002 // This is now a use of the dominating leader, which means if the 4003 // dominating leader was dead, it's now live! 4004 auto &LeaderUseCount = UseCounts[DominatingLeader]; 4005 // It's about to be alive again. 4006 if (LeaderUseCount == 0 && isa<Instruction>(DominatingLeader)) 4007 ProbablyDead.erase(cast<Instruction>(DominatingLeader)); 4008 // For copy instructions, we use their operand as a leader, 4009 // which means we remove a user of the copy and it may become dead. 4010 if (isSSACopy) { 4011 unsigned &IIUseCount = UseCounts[II]; 4012 if (--IIUseCount == 0) 4013 ProbablyDead.insert(II); 4014 } 4015 ++LeaderUseCount; 4016 AnythingReplaced = true; 4017 } 4018 } 4019 } 4020 4021 // At this point, anything still in the ProbablyDead set is actually dead if 4022 // would be trivially dead. 4023 for (auto *I : ProbablyDead) 4024 if (wouldInstructionBeTriviallyDead(I)) 4025 markInstructionForDeletion(I); 4026 4027 // Cleanup the congruence class. 4028 CongruenceClass::MemberSet MembersLeft; 4029 for (auto *Member : *CC) 4030 if (!isa<Instruction>(Member) || 4031 !InstructionsToErase.count(cast<Instruction>(Member))) 4032 MembersLeft.insert(Member); 4033 CC->swap(MembersLeft); 4034 4035 // If we have possible dead stores to look at, try to eliminate them. 4036 if (CC->getStoreCount() > 0) { 4037 convertClassToLoadsAndStores(*CC, PossibleDeadStores); 4038 llvm::sort(PossibleDeadStores); 4039 ValueDFSStack EliminationStack; 4040 for (auto &VD : PossibleDeadStores) { 4041 int MemberDFSIn = VD.DFSIn; 4042 int MemberDFSOut = VD.DFSOut; 4043 Instruction *Member = cast<Instruction>(VD.Def.getPointer()); 4044 if (EliminationStack.empty() || 4045 !EliminationStack.isInScope(MemberDFSIn, MemberDFSOut)) { 4046 // Sync to our current scope. 4047 EliminationStack.popUntilDFSScope(MemberDFSIn, MemberDFSOut); 4048 if (EliminationStack.empty()) { 4049 EliminationStack.push_back(Member, MemberDFSIn, MemberDFSOut); 4050 continue; 4051 } 4052 } 4053 // We already did load elimination, so nothing to do here. 4054 if (isa<LoadInst>(Member)) 4055 continue; 4056 assert(!EliminationStack.empty()); 4057 Instruction *Leader = cast<Instruction>(EliminationStack.back()); 4058 (void)Leader; 4059 assert(DT->dominates(Leader->getParent(), Member->getParent())); 4060 // Member is dominater by Leader, and thus dead 4061 LLVM_DEBUG(dbgs() << "Marking dead store " << *Member 4062 << " that is dominated by " << *Leader << "\n"); 4063 markInstructionForDeletion(Member); 4064 CC->erase(Member); 4065 ++NumGVNDeadStores; 4066 } 4067 } 4068 } 4069 return AnythingReplaced; 4070 } 4071 4072 // This function provides global ranking of operations so that we can place them 4073 // in a canonical order. Note that rank alone is not necessarily enough for a 4074 // complete ordering, as constants all have the same rank. However, generally, 4075 // we will simplify an operation with all constants so that it doesn't matter 4076 // what order they appear in. 4077 unsigned int NewGVN::getRank(const Value *V) const { 4078 // Prefer constants to undef to anything else 4079 // Undef is a constant, have to check it first. 4080 // Prefer smaller constants to constantexprs 4081 if (isa<ConstantExpr>(V)) 4082 return 2; 4083 if (isa<UndefValue>(V)) 4084 return 1; 4085 if (isa<Constant>(V)) 4086 return 0; 4087 else if (auto *A = dyn_cast<Argument>(V)) 4088 return 3 + A->getArgNo(); 4089 4090 // Need to shift the instruction DFS by number of arguments + 3 to account for 4091 // the constant and argument ranking above. 4092 unsigned Result = InstrToDFSNum(V); 4093 if (Result > 0) 4094 return 4 + NumFuncArgs + Result; 4095 // Unreachable or something else, just return a really large number. 4096 return ~0; 4097 } 4098 4099 // This is a function that says whether two commutative operations should 4100 // have their order swapped when canonicalizing. 4101 bool NewGVN::shouldSwapOperands(const Value *A, const Value *B) const { 4102 // Because we only care about a total ordering, and don't rewrite expressions 4103 // in this order, we order by rank, which will give a strict weak ordering to 4104 // everything but constants, and then we order by pointer address. 4105 return std::make_pair(getRank(A), A) > std::make_pair(getRank(B), B); 4106 } 4107 4108 namespace { 4109 4110 class NewGVNLegacyPass : public FunctionPass { 4111 public: 4112 // Pass identification, replacement for typeid. 4113 static char ID; 4114 4115 NewGVNLegacyPass() : FunctionPass(ID) { 4116 initializeNewGVNLegacyPassPass(*PassRegistry::getPassRegistry()); 4117 } 4118 4119 bool runOnFunction(Function &F) override; 4120 4121 private: 4122 void getAnalysisUsage(AnalysisUsage &AU) const override { 4123 AU.addRequired<AssumptionCacheTracker>(); 4124 AU.addRequired<DominatorTreeWrapperPass>(); 4125 AU.addRequired<TargetLibraryInfoWrapperPass>(); 4126 AU.addRequired<MemorySSAWrapperPass>(); 4127 AU.addRequired<AAResultsWrapperPass>(); 4128 AU.addPreserved<DominatorTreeWrapperPass>(); 4129 AU.addPreserved<GlobalsAAWrapperPass>(); 4130 } 4131 }; 4132 4133 } // end anonymous namespace 4134 4135 bool NewGVNLegacyPass::runOnFunction(Function &F) { 4136 if (skipFunction(F)) 4137 return false; 4138 return NewGVN(F, &getAnalysis<DominatorTreeWrapperPass>().getDomTree(), 4139 &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F), 4140 &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F), 4141 &getAnalysis<AAResultsWrapperPass>().getAAResults(), 4142 &getAnalysis<MemorySSAWrapperPass>().getMSSA(), 4143 F.getParent()->getDataLayout()) 4144 .runGVN(); 4145 } 4146 4147 char NewGVNLegacyPass::ID = 0; 4148 4149 INITIALIZE_PASS_BEGIN(NewGVNLegacyPass, "newgvn", "Global Value Numbering", 4150 false, false) 4151 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 4152 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass) 4153 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 4154 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 4155 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 4156 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 4157 INITIALIZE_PASS_END(NewGVNLegacyPass, "newgvn", "Global Value Numbering", false, 4158 false) 4159 4160 // createGVNPass - The public interface to this file. 4161 FunctionPass *llvm::createNewGVNPass() { return new NewGVNLegacyPass(); } 4162 4163 PreservedAnalyses NewGVNPass::run(Function &F, AnalysisManager<Function> &AM) { 4164 // Apparently the order in which we get these results matter for 4165 // the old GVN (see Chandler's comment in GVN.cpp). I'll keep 4166 // the same order here, just in case. 4167 auto &AC = AM.getResult<AssumptionAnalysis>(F); 4168 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 4169 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 4170 auto &AA = AM.getResult<AAManager>(F); 4171 auto &MSSA = AM.getResult<MemorySSAAnalysis>(F).getMSSA(); 4172 bool Changed = 4173 NewGVN(F, &DT, &AC, &TLI, &AA, &MSSA, F.getParent()->getDataLayout()) 4174 .runGVN(); 4175 if (!Changed) 4176 return PreservedAnalyses::all(); 4177 PreservedAnalyses PA; 4178 PA.preserve<DominatorTreeAnalysis>(); 4179 PA.preserve<GlobalsAA>(); 4180 return PA; 4181 } 4182