1 //===- NewGVN.cpp - Global Value Numbering Pass ---------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// This file implements the new LLVM's Global Value Numbering pass. 11 /// GVN partitions values computed by a function into congruence classes. 12 /// Values ending up in the same congruence class are guaranteed to be the same 13 /// for every execution of the program. In that respect, congruency is a 14 /// compile-time approximation of equivalence of values at runtime. 15 /// The algorithm implemented here uses a sparse formulation and it's based 16 /// on the ideas described in the paper: 17 /// "A Sparse Algorithm for Predicated Global Value Numbering" from 18 /// Karthik Gargi. 19 /// 20 /// A brief overview of the algorithm: The algorithm is essentially the same as 21 /// the standard RPO value numbering algorithm (a good reference is the paper 22 /// "SCC based value numbering" by L. Taylor Simpson) with one major difference: 23 /// The RPO algorithm proceeds, on every iteration, to process every reachable 24 /// block and every instruction in that block. This is because the standard RPO 25 /// algorithm does not track what things have the same value number, it only 26 /// tracks what the value number of a given operation is (the mapping is 27 /// operation -> value number). Thus, when a value number of an operation 28 /// changes, it must reprocess everything to ensure all uses of a value number 29 /// get updated properly. In constrast, the sparse algorithm we use *also* 30 /// tracks what operations have a given value number (IE it also tracks the 31 /// reverse mapping from value number -> operations with that value number), so 32 /// that it only needs to reprocess the instructions that are affected when 33 /// something's value number changes. The vast majority of complexity and code 34 /// in this file is devoted to tracking what value numbers could change for what 35 /// instructions when various things happen. The rest of the algorithm is 36 /// devoted to performing symbolic evaluation, forward propagation, and 37 /// simplification of operations based on the value numbers deduced so far 38 /// 39 /// In order to make the GVN mostly-complete, we use a technique derived from 40 /// "Detection of Redundant Expressions: A Complete and Polynomial-time 41 /// Algorithm in SSA" by R.R. Pai. The source of incompleteness in most SSA 42 /// based GVN algorithms is related to their inability to detect equivalence 43 /// between phi of ops (IE phi(a+b, c+d)) and op of phis (phi(a,c) + phi(b, d)). 44 /// We resolve this issue by generating the equivalent "phi of ops" form for 45 /// each op of phis we see, in a way that only takes polynomial time to resolve. 46 /// 47 /// We also do not perform elimination by using any published algorithm. All 48 /// published algorithms are O(Instructions). Instead, we use a technique that 49 /// is O(number of operations with the same value number), enabling us to skip 50 /// trying to eliminate things that have unique value numbers. 51 // 52 //===----------------------------------------------------------------------===// 53 54 #include "llvm/Transforms/Scalar/NewGVN.h" 55 #include "llvm/ADT/ArrayRef.h" 56 #include "llvm/ADT/BitVector.h" 57 #include "llvm/ADT/DenseMap.h" 58 #include "llvm/ADT/DenseMapInfo.h" 59 #include "llvm/ADT/DenseSet.h" 60 #include "llvm/ADT/DepthFirstIterator.h" 61 #include "llvm/ADT/GraphTraits.h" 62 #include "llvm/ADT/Hashing.h" 63 #include "llvm/ADT/PointerIntPair.h" 64 #include "llvm/ADT/PostOrderIterator.h" 65 #include "llvm/ADT/SetOperations.h" 66 #include "llvm/ADT/SmallPtrSet.h" 67 #include "llvm/ADT/SmallVector.h" 68 #include "llvm/ADT/SparseBitVector.h" 69 #include "llvm/ADT/Statistic.h" 70 #include "llvm/ADT/iterator_range.h" 71 #include "llvm/Analysis/AliasAnalysis.h" 72 #include "llvm/Analysis/AssumptionCache.h" 73 #include "llvm/Analysis/CFGPrinter.h" 74 #include "llvm/Analysis/ConstantFolding.h" 75 #include "llvm/Analysis/GlobalsModRef.h" 76 #include "llvm/Analysis/InstructionSimplify.h" 77 #include "llvm/Analysis/MemoryBuiltins.h" 78 #include "llvm/Analysis/MemorySSA.h" 79 #include "llvm/Analysis/TargetLibraryInfo.h" 80 #include "llvm/Analysis/ValueTracking.h" 81 #include "llvm/IR/Argument.h" 82 #include "llvm/IR/BasicBlock.h" 83 #include "llvm/IR/Constant.h" 84 #include "llvm/IR/Constants.h" 85 #include "llvm/IR/Dominators.h" 86 #include "llvm/IR/Function.h" 87 #include "llvm/IR/InstrTypes.h" 88 #include "llvm/IR/Instruction.h" 89 #include "llvm/IR/Instructions.h" 90 #include "llvm/IR/IntrinsicInst.h" 91 #include "llvm/IR/Intrinsics.h" 92 #include "llvm/IR/LLVMContext.h" 93 #include "llvm/IR/PatternMatch.h" 94 #include "llvm/IR/Type.h" 95 #include "llvm/IR/Use.h" 96 #include "llvm/IR/User.h" 97 #include "llvm/IR/Value.h" 98 #include "llvm/InitializePasses.h" 99 #include "llvm/Pass.h" 100 #include "llvm/Support/Allocator.h" 101 #include "llvm/Support/ArrayRecycler.h" 102 #include "llvm/Support/Casting.h" 103 #include "llvm/Support/CommandLine.h" 104 #include "llvm/Support/Debug.h" 105 #include "llvm/Support/DebugCounter.h" 106 #include "llvm/Support/ErrorHandling.h" 107 #include "llvm/Support/PointerLikeTypeTraits.h" 108 #include "llvm/Support/raw_ostream.h" 109 #include "llvm/Transforms/Scalar.h" 110 #include "llvm/Transforms/Scalar/GVNExpression.h" 111 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h" 112 #include "llvm/Transforms/Utils/Local.h" 113 #include "llvm/Transforms/Utils/PredicateInfo.h" 114 #include "llvm/Transforms/Utils/VNCoercion.h" 115 #include <algorithm> 116 #include <cassert> 117 #include <cstdint> 118 #include <iterator> 119 #include <map> 120 #include <memory> 121 #include <set> 122 #include <string> 123 #include <tuple> 124 #include <utility> 125 #include <vector> 126 127 using namespace llvm; 128 using namespace llvm::GVNExpression; 129 using namespace llvm::VNCoercion; 130 using namespace llvm::PatternMatch; 131 132 #define DEBUG_TYPE "newgvn" 133 134 STATISTIC(NumGVNInstrDeleted, "Number of instructions deleted"); 135 STATISTIC(NumGVNBlocksDeleted, "Number of blocks deleted"); 136 STATISTIC(NumGVNOpsSimplified, "Number of Expressions simplified"); 137 STATISTIC(NumGVNPhisAllSame, "Number of PHIs whos arguments are all the same"); 138 STATISTIC(NumGVNMaxIterations, 139 "Maximum Number of iterations it took to converge GVN"); 140 STATISTIC(NumGVNLeaderChanges, "Number of leader changes"); 141 STATISTIC(NumGVNSortedLeaderChanges, "Number of sorted leader changes"); 142 STATISTIC(NumGVNAvoidedSortedLeaderChanges, 143 "Number of avoided sorted leader changes"); 144 STATISTIC(NumGVNDeadStores, "Number of redundant/dead stores eliminated"); 145 STATISTIC(NumGVNPHIOfOpsCreated, "Number of PHI of ops created"); 146 STATISTIC(NumGVNPHIOfOpsEliminations, 147 "Number of things eliminated using PHI of ops"); 148 DEBUG_COUNTER(VNCounter, "newgvn-vn", 149 "Controls which instructions are value numbered"); 150 DEBUG_COUNTER(PHIOfOpsCounter, "newgvn-phi", 151 "Controls which instructions we create phi of ops for"); 152 // Currently store defining access refinement is too slow due to basicaa being 153 // egregiously slow. This flag lets us keep it working while we work on this 154 // issue. 155 static cl::opt<bool> EnableStoreRefinement("enable-store-refinement", 156 cl::init(false), cl::Hidden); 157 158 /// Currently, the generation "phi of ops" can result in correctness issues. 159 static cl::opt<bool> EnablePhiOfOps("enable-phi-of-ops", cl::init(true), 160 cl::Hidden); 161 162 //===----------------------------------------------------------------------===// 163 // GVN Pass 164 //===----------------------------------------------------------------------===// 165 166 // Anchor methods. 167 namespace llvm { 168 namespace GVNExpression { 169 170 Expression::~Expression() = default; 171 BasicExpression::~BasicExpression() = default; 172 CallExpression::~CallExpression() = default; 173 LoadExpression::~LoadExpression() = default; 174 StoreExpression::~StoreExpression() = default; 175 AggregateValueExpression::~AggregateValueExpression() = default; 176 PHIExpression::~PHIExpression() = default; 177 178 } // end namespace GVNExpression 179 } // end namespace llvm 180 181 namespace { 182 183 // Tarjan's SCC finding algorithm with Nuutila's improvements 184 // SCCIterator is actually fairly complex for the simple thing we want. 185 // It also wants to hand us SCC's that are unrelated to the phi node we ask 186 // about, and have us process them there or risk redoing work. 187 // Graph traits over a filter iterator also doesn't work that well here. 188 // This SCC finder is specialized to walk use-def chains, and only follows 189 // instructions, 190 // not generic values (arguments, etc). 191 struct TarjanSCC { 192 TarjanSCC() : Components(1) {} 193 194 void Start(const Instruction *Start) { 195 if (Root.lookup(Start) == 0) 196 FindSCC(Start); 197 } 198 199 const SmallPtrSetImpl<const Value *> &getComponentFor(const Value *V) const { 200 unsigned ComponentID = ValueToComponent.lookup(V); 201 202 assert(ComponentID > 0 && 203 "Asking for a component for a value we never processed"); 204 return Components[ComponentID]; 205 } 206 207 private: 208 void FindSCC(const Instruction *I) { 209 Root[I] = ++DFSNum; 210 // Store the DFS Number we had before it possibly gets incremented. 211 unsigned int OurDFS = DFSNum; 212 for (auto &Op : I->operands()) { 213 if (auto *InstOp = dyn_cast<Instruction>(Op)) { 214 if (Root.lookup(Op) == 0) 215 FindSCC(InstOp); 216 if (!InComponent.count(Op)) 217 Root[I] = std::min(Root.lookup(I), Root.lookup(Op)); 218 } 219 } 220 // See if we really were the root of a component, by seeing if we still have 221 // our DFSNumber. If we do, we are the root of the component, and we have 222 // completed a component. If we do not, we are not the root of a component, 223 // and belong on the component stack. 224 if (Root.lookup(I) == OurDFS) { 225 unsigned ComponentID = Components.size(); 226 Components.resize(Components.size() + 1); 227 auto &Component = Components.back(); 228 Component.insert(I); 229 LLVM_DEBUG(dbgs() << "Component root is " << *I << "\n"); 230 InComponent.insert(I); 231 ValueToComponent[I] = ComponentID; 232 // Pop a component off the stack and label it. 233 while (!Stack.empty() && Root.lookup(Stack.back()) >= OurDFS) { 234 auto *Member = Stack.back(); 235 LLVM_DEBUG(dbgs() << "Component member is " << *Member << "\n"); 236 Component.insert(Member); 237 InComponent.insert(Member); 238 ValueToComponent[Member] = ComponentID; 239 Stack.pop_back(); 240 } 241 } else { 242 // Part of a component, push to stack 243 Stack.push_back(I); 244 } 245 } 246 247 unsigned int DFSNum = 1; 248 SmallPtrSet<const Value *, 8> InComponent; 249 DenseMap<const Value *, unsigned int> Root; 250 SmallVector<const Value *, 8> Stack; 251 252 // Store the components as vector of ptr sets, because we need the topo order 253 // of SCC's, but not individual member order 254 SmallVector<SmallPtrSet<const Value *, 8>, 8> Components; 255 256 DenseMap<const Value *, unsigned> ValueToComponent; 257 }; 258 259 // Congruence classes represent the set of expressions/instructions 260 // that are all the same *during some scope in the function*. 261 // That is, because of the way we perform equality propagation, and 262 // because of memory value numbering, it is not correct to assume 263 // you can willy-nilly replace any member with any other at any 264 // point in the function. 265 // 266 // For any Value in the Member set, it is valid to replace any dominated member 267 // with that Value. 268 // 269 // Every congruence class has a leader, and the leader is used to symbolize 270 // instructions in a canonical way (IE every operand of an instruction that is a 271 // member of the same congruence class will always be replaced with leader 272 // during symbolization). To simplify symbolization, we keep the leader as a 273 // constant if class can be proved to be a constant value. Otherwise, the 274 // leader is the member of the value set with the smallest DFS number. Each 275 // congruence class also has a defining expression, though the expression may be 276 // null. If it exists, it can be used for forward propagation and reassociation 277 // of values. 278 279 // For memory, we also track a representative MemoryAccess, and a set of memory 280 // members for MemoryPhis (which have no real instructions). Note that for 281 // memory, it seems tempting to try to split the memory members into a 282 // MemoryCongruenceClass or something. Unfortunately, this does not work 283 // easily. The value numbering of a given memory expression depends on the 284 // leader of the memory congruence class, and the leader of memory congruence 285 // class depends on the value numbering of a given memory expression. This 286 // leads to wasted propagation, and in some cases, missed optimization. For 287 // example: If we had value numbered two stores together before, but now do not, 288 // we move them to a new value congruence class. This in turn will move at one 289 // of the memorydefs to a new memory congruence class. Which in turn, affects 290 // the value numbering of the stores we just value numbered (because the memory 291 // congruence class is part of the value number). So while theoretically 292 // possible to split them up, it turns out to be *incredibly* complicated to get 293 // it to work right, because of the interdependency. While structurally 294 // slightly messier, it is algorithmically much simpler and faster to do what we 295 // do here, and track them both at once in the same class. 296 // Note: The default iterators for this class iterate over values 297 class CongruenceClass { 298 public: 299 using MemberType = Value; 300 using MemberSet = SmallPtrSet<MemberType *, 4>; 301 using MemoryMemberType = MemoryPhi; 302 using MemoryMemberSet = SmallPtrSet<const MemoryMemberType *, 2>; 303 304 explicit CongruenceClass(unsigned ID) : ID(ID) {} 305 CongruenceClass(unsigned ID, Value *Leader, const Expression *E) 306 : ID(ID), RepLeader(Leader), DefiningExpr(E) {} 307 308 unsigned getID() const { return ID; } 309 310 // True if this class has no members left. This is mainly used for assertion 311 // purposes, and for skipping empty classes. 312 bool isDead() const { 313 // If it's both dead from a value perspective, and dead from a memory 314 // perspective, it's really dead. 315 return empty() && memory_empty(); 316 } 317 318 // Leader functions 319 Value *getLeader() const { return RepLeader; } 320 void setLeader(Value *Leader) { RepLeader = Leader; } 321 const std::pair<Value *, unsigned int> &getNextLeader() const { 322 return NextLeader; 323 } 324 void resetNextLeader() { NextLeader = {nullptr, ~0}; } 325 void addPossibleNextLeader(std::pair<Value *, unsigned int> LeaderPair) { 326 if (LeaderPair.second < NextLeader.second) 327 NextLeader = LeaderPair; 328 } 329 330 Value *getStoredValue() const { return RepStoredValue; } 331 void setStoredValue(Value *Leader) { RepStoredValue = Leader; } 332 const MemoryAccess *getMemoryLeader() const { return RepMemoryAccess; } 333 void setMemoryLeader(const MemoryAccess *Leader) { RepMemoryAccess = Leader; } 334 335 // Forward propagation info 336 const Expression *getDefiningExpr() const { return DefiningExpr; } 337 338 // Value member set 339 bool empty() const { return Members.empty(); } 340 unsigned size() const { return Members.size(); } 341 MemberSet::const_iterator begin() const { return Members.begin(); } 342 MemberSet::const_iterator end() const { return Members.end(); } 343 void insert(MemberType *M) { Members.insert(M); } 344 void erase(MemberType *M) { Members.erase(M); } 345 void swap(MemberSet &Other) { Members.swap(Other); } 346 347 // Memory member set 348 bool memory_empty() const { return MemoryMembers.empty(); } 349 unsigned memory_size() const { return MemoryMembers.size(); } 350 MemoryMemberSet::const_iterator memory_begin() const { 351 return MemoryMembers.begin(); 352 } 353 MemoryMemberSet::const_iterator memory_end() const { 354 return MemoryMembers.end(); 355 } 356 iterator_range<MemoryMemberSet::const_iterator> memory() const { 357 return make_range(memory_begin(), memory_end()); 358 } 359 360 void memory_insert(const MemoryMemberType *M) { MemoryMembers.insert(M); } 361 void memory_erase(const MemoryMemberType *M) { MemoryMembers.erase(M); } 362 363 // Store count 364 unsigned getStoreCount() const { return StoreCount; } 365 void incStoreCount() { ++StoreCount; } 366 void decStoreCount() { 367 assert(StoreCount != 0 && "Store count went negative"); 368 --StoreCount; 369 } 370 371 // True if this class has no memory members. 372 bool definesNoMemory() const { return StoreCount == 0 && memory_empty(); } 373 374 // Return true if two congruence classes are equivalent to each other. This 375 // means that every field but the ID number and the dead field are equivalent. 376 bool isEquivalentTo(const CongruenceClass *Other) const { 377 if (!Other) 378 return false; 379 if (this == Other) 380 return true; 381 382 if (std::tie(StoreCount, RepLeader, RepStoredValue, RepMemoryAccess) != 383 std::tie(Other->StoreCount, Other->RepLeader, Other->RepStoredValue, 384 Other->RepMemoryAccess)) 385 return false; 386 if (DefiningExpr != Other->DefiningExpr) 387 if (!DefiningExpr || !Other->DefiningExpr || 388 *DefiningExpr != *Other->DefiningExpr) 389 return false; 390 391 if (Members.size() != Other->Members.size()) 392 return false; 393 394 return llvm::set_is_subset(Members, Other->Members); 395 } 396 397 private: 398 unsigned ID; 399 400 // Representative leader. 401 Value *RepLeader = nullptr; 402 403 // The most dominating leader after our current leader, because the member set 404 // is not sorted and is expensive to keep sorted all the time. 405 std::pair<Value *, unsigned int> NextLeader = {nullptr, ~0U}; 406 407 // If this is represented by a store, the value of the store. 408 Value *RepStoredValue = nullptr; 409 410 // If this class contains MemoryDefs or MemoryPhis, this is the leading memory 411 // access. 412 const MemoryAccess *RepMemoryAccess = nullptr; 413 414 // Defining Expression. 415 const Expression *DefiningExpr = nullptr; 416 417 // Actual members of this class. 418 MemberSet Members; 419 420 // This is the set of MemoryPhis that exist in the class. MemoryDefs and 421 // MemoryUses have real instructions representing them, so we only need to 422 // track MemoryPhis here. 423 MemoryMemberSet MemoryMembers; 424 425 // Number of stores in this congruence class. 426 // This is used so we can detect store equivalence changes properly. 427 int StoreCount = 0; 428 }; 429 430 } // end anonymous namespace 431 432 namespace llvm { 433 434 struct ExactEqualsExpression { 435 const Expression &E; 436 437 explicit ExactEqualsExpression(const Expression &E) : E(E) {} 438 439 hash_code getComputedHash() const { return E.getComputedHash(); } 440 441 bool operator==(const Expression &Other) const { 442 return E.exactlyEquals(Other); 443 } 444 }; 445 446 template <> struct DenseMapInfo<const Expression *> { 447 static const Expression *getEmptyKey() { 448 auto Val = static_cast<uintptr_t>(-1); 449 Val <<= PointerLikeTypeTraits<const Expression *>::NumLowBitsAvailable; 450 return reinterpret_cast<const Expression *>(Val); 451 } 452 453 static const Expression *getTombstoneKey() { 454 auto Val = static_cast<uintptr_t>(~1U); 455 Val <<= PointerLikeTypeTraits<const Expression *>::NumLowBitsAvailable; 456 return reinterpret_cast<const Expression *>(Val); 457 } 458 459 static unsigned getHashValue(const Expression *E) { 460 return E->getComputedHash(); 461 } 462 463 static unsigned getHashValue(const ExactEqualsExpression &E) { 464 return E.getComputedHash(); 465 } 466 467 static bool isEqual(const ExactEqualsExpression &LHS, const Expression *RHS) { 468 if (RHS == getTombstoneKey() || RHS == getEmptyKey()) 469 return false; 470 return LHS == *RHS; 471 } 472 473 static bool isEqual(const Expression *LHS, const Expression *RHS) { 474 if (LHS == RHS) 475 return true; 476 if (LHS == getTombstoneKey() || RHS == getTombstoneKey() || 477 LHS == getEmptyKey() || RHS == getEmptyKey()) 478 return false; 479 // Compare hashes before equality. This is *not* what the hashtable does, 480 // since it is computing it modulo the number of buckets, whereas we are 481 // using the full hash keyspace. Since the hashes are precomputed, this 482 // check is *much* faster than equality. 483 if (LHS->getComputedHash() != RHS->getComputedHash()) 484 return false; 485 return *LHS == *RHS; 486 } 487 }; 488 489 } // end namespace llvm 490 491 namespace { 492 493 class NewGVN { 494 Function &F; 495 DominatorTree *DT = nullptr; 496 const TargetLibraryInfo *TLI = nullptr; 497 AliasAnalysis *AA = nullptr; 498 MemorySSA *MSSA = nullptr; 499 MemorySSAWalker *MSSAWalker = nullptr; 500 AssumptionCache *AC = nullptr; 501 const DataLayout &DL; 502 std::unique_ptr<PredicateInfo> PredInfo; 503 504 // These are the only two things the create* functions should have 505 // side-effects on due to allocating memory. 506 mutable BumpPtrAllocator ExpressionAllocator; 507 mutable ArrayRecycler<Value *> ArgRecycler; 508 mutable TarjanSCC SCCFinder; 509 const SimplifyQuery SQ; 510 511 // Number of function arguments, used by ranking 512 unsigned int NumFuncArgs = 0; 513 514 // RPOOrdering of basic blocks 515 DenseMap<const DomTreeNode *, unsigned> RPOOrdering; 516 517 // Congruence class info. 518 519 // This class is called INITIAL in the paper. It is the class everything 520 // startsout in, and represents any value. Being an optimistic analysis, 521 // anything in the TOP class has the value TOP, which is indeterminate and 522 // equivalent to everything. 523 CongruenceClass *TOPClass = nullptr; 524 std::vector<CongruenceClass *> CongruenceClasses; 525 unsigned NextCongruenceNum = 0; 526 527 // Value Mappings. 528 DenseMap<Value *, CongruenceClass *> ValueToClass; 529 DenseMap<Value *, const Expression *> ValueToExpression; 530 531 // Value PHI handling, used to make equivalence between phi(op, op) and 532 // op(phi, phi). 533 // These mappings just store various data that would normally be part of the 534 // IR. 535 SmallPtrSet<const Instruction *, 8> PHINodeUses; 536 537 DenseMap<const Value *, bool> OpSafeForPHIOfOps; 538 539 // Map a temporary instruction we created to a parent block. 540 DenseMap<const Value *, BasicBlock *> TempToBlock; 541 542 // Map between the already in-program instructions and the temporary phis we 543 // created that they are known equivalent to. 544 DenseMap<const Value *, PHINode *> RealToTemp; 545 546 // In order to know when we should re-process instructions that have 547 // phi-of-ops, we track the set of expressions that they needed as 548 // leaders. When we discover new leaders for those expressions, we process the 549 // associated phi-of-op instructions again in case they have changed. The 550 // other way they may change is if they had leaders, and those leaders 551 // disappear. However, at the point they have leaders, there are uses of the 552 // relevant operands in the created phi node, and so they will get reprocessed 553 // through the normal user marking we perform. 554 mutable DenseMap<const Value *, SmallPtrSet<Value *, 2>> AdditionalUsers; 555 DenseMap<const Expression *, SmallPtrSet<Instruction *, 2>> 556 ExpressionToPhiOfOps; 557 558 // Map from temporary operation to MemoryAccess. 559 DenseMap<const Instruction *, MemoryUseOrDef *> TempToMemory; 560 561 // Set of all temporary instructions we created. 562 // Note: This will include instructions that were just created during value 563 // numbering. The way to test if something is using them is to check 564 // RealToTemp. 565 DenseSet<Instruction *> AllTempInstructions; 566 567 // This is the set of instructions to revisit on a reachability change. At 568 // the end of the main iteration loop it will contain at least all the phi of 569 // ops instructions that will be changed to phis, as well as regular phis. 570 // During the iteration loop, it may contain other things, such as phi of ops 571 // instructions that used edge reachability to reach a result, and so need to 572 // be revisited when the edge changes, independent of whether the phi they 573 // depended on changes. 574 DenseMap<BasicBlock *, SparseBitVector<>> RevisitOnReachabilityChange; 575 576 // Mapping from predicate info we used to the instructions we used it with. 577 // In order to correctly ensure propagation, we must keep track of what 578 // comparisons we used, so that when the values of the comparisons change, we 579 // propagate the information to the places we used the comparison. 580 mutable DenseMap<const Value *, SmallPtrSet<Instruction *, 2>> 581 PredicateToUsers; 582 583 // the same reasoning as PredicateToUsers. When we skip MemoryAccesses for 584 // stores, we no longer can rely solely on the def-use chains of MemorySSA. 585 mutable DenseMap<const MemoryAccess *, SmallPtrSet<MemoryAccess *, 2>> 586 MemoryToUsers; 587 588 // A table storing which memorydefs/phis represent a memory state provably 589 // equivalent to another memory state. 590 // We could use the congruence class machinery, but the MemoryAccess's are 591 // abstract memory states, so they can only ever be equivalent to each other, 592 // and not to constants, etc. 593 DenseMap<const MemoryAccess *, CongruenceClass *> MemoryAccessToClass; 594 595 // We could, if we wanted, build MemoryPhiExpressions and 596 // MemoryVariableExpressions, etc, and value number them the same way we value 597 // number phi expressions. For the moment, this seems like overkill. They 598 // can only exist in one of three states: they can be TOP (equal to 599 // everything), Equivalent to something else, or unique. Because we do not 600 // create expressions for them, we need to simulate leader change not just 601 // when they change class, but when they change state. Note: We can do the 602 // same thing for phis, and avoid having phi expressions if we wanted, We 603 // should eventually unify in one direction or the other, so this is a little 604 // bit of an experiment in which turns out easier to maintain. 605 enum MemoryPhiState { MPS_Invalid, MPS_TOP, MPS_Equivalent, MPS_Unique }; 606 DenseMap<const MemoryPhi *, MemoryPhiState> MemoryPhiState; 607 608 enum InstCycleState { ICS_Unknown, ICS_CycleFree, ICS_Cycle }; 609 mutable DenseMap<const Instruction *, InstCycleState> InstCycleState; 610 611 // Expression to class mapping. 612 using ExpressionClassMap = DenseMap<const Expression *, CongruenceClass *>; 613 ExpressionClassMap ExpressionToClass; 614 615 // We have a single expression that represents currently DeadExpressions. 616 // For dead expressions we can prove will stay dead, we mark them with 617 // DFS number zero. However, it's possible in the case of phi nodes 618 // for us to assume/prove all arguments are dead during fixpointing. 619 // We use DeadExpression for that case. 620 DeadExpression *SingletonDeadExpression = nullptr; 621 622 // Which values have changed as a result of leader changes. 623 SmallPtrSet<Value *, 8> LeaderChanges; 624 625 // Reachability info. 626 using BlockEdge = BasicBlockEdge; 627 DenseSet<BlockEdge> ReachableEdges; 628 SmallPtrSet<const BasicBlock *, 8> ReachableBlocks; 629 630 // This is a bitvector because, on larger functions, we may have 631 // thousands of touched instructions at once (entire blocks, 632 // instructions with hundreds of uses, etc). Even with optimization 633 // for when we mark whole blocks as touched, when this was a 634 // SmallPtrSet or DenseSet, for some functions, we spent >20% of all 635 // the time in GVN just managing this list. The bitvector, on the 636 // other hand, efficiently supports test/set/clear of both 637 // individual and ranges, as well as "find next element" This 638 // enables us to use it as a worklist with essentially 0 cost. 639 BitVector TouchedInstructions; 640 641 DenseMap<const BasicBlock *, std::pair<unsigned, unsigned>> BlockInstRange; 642 mutable DenseMap<const IntrinsicInst *, const Value *> IntrinsicInstPred; 643 644 #ifndef NDEBUG 645 // Debugging for how many times each block and instruction got processed. 646 DenseMap<const Value *, unsigned> ProcessedCount; 647 #endif 648 649 // DFS info. 650 // This contains a mapping from Instructions to DFS numbers. 651 // The numbering starts at 1. An instruction with DFS number zero 652 // means that the instruction is dead. 653 DenseMap<const Value *, unsigned> InstrDFS; 654 655 // This contains the mapping DFS numbers to instructions. 656 SmallVector<Value *, 32> DFSToInstr; 657 658 // Deletion info. 659 SmallPtrSet<Instruction *, 8> InstructionsToErase; 660 661 public: 662 NewGVN(Function &F, DominatorTree *DT, AssumptionCache *AC, 663 TargetLibraryInfo *TLI, AliasAnalysis *AA, MemorySSA *MSSA, 664 const DataLayout &DL) 665 : F(F), DT(DT), TLI(TLI), AA(AA), MSSA(MSSA), AC(AC), DL(DL), 666 PredInfo(std::make_unique<PredicateInfo>(F, *DT, *AC)), 667 SQ(DL, TLI, DT, AC, /*CtxI=*/nullptr, /*UseInstrInfo=*/false, 668 /*CanUseUndef=*/false) {} 669 670 bool runGVN(); 671 672 private: 673 /// Helper struct return a Expression with an optional extra dependency. 674 struct ExprResult { 675 const Expression *Expr; 676 Value *ExtraDep; 677 const PredicateBase *PredDep; 678 679 ExprResult(const Expression *Expr, Value *ExtraDep = nullptr, 680 const PredicateBase *PredDep = nullptr) 681 : Expr(Expr), ExtraDep(ExtraDep), PredDep(PredDep) {} 682 ExprResult(const ExprResult &) = delete; 683 ExprResult(ExprResult &&Other) 684 : Expr(Other.Expr), ExtraDep(Other.ExtraDep), PredDep(Other.PredDep) { 685 Other.Expr = nullptr; 686 Other.ExtraDep = nullptr; 687 Other.PredDep = nullptr; 688 } 689 ExprResult &operator=(const ExprResult &Other) = delete; 690 ExprResult &operator=(ExprResult &&Other) = delete; 691 692 ~ExprResult() { assert(!ExtraDep && "unhandled ExtraDep"); } 693 694 operator bool() const { return Expr; } 695 696 static ExprResult none() { return {nullptr, nullptr, nullptr}; } 697 static ExprResult some(const Expression *Expr, Value *ExtraDep = nullptr) { 698 return {Expr, ExtraDep, nullptr}; 699 } 700 static ExprResult some(const Expression *Expr, 701 const PredicateBase *PredDep) { 702 return {Expr, nullptr, PredDep}; 703 } 704 static ExprResult some(const Expression *Expr, Value *ExtraDep, 705 const PredicateBase *PredDep) { 706 return {Expr, ExtraDep, PredDep}; 707 } 708 }; 709 710 // Expression handling. 711 ExprResult createExpression(Instruction *) const; 712 const Expression *createBinaryExpression(unsigned, Type *, Value *, Value *, 713 Instruction *) const; 714 715 // Our canonical form for phi arguments is a pair of incoming value, incoming 716 // basic block. 717 using ValPair = std::pair<Value *, BasicBlock *>; 718 719 PHIExpression *createPHIExpression(ArrayRef<ValPair>, const Instruction *, 720 BasicBlock *, bool &HasBackEdge, 721 bool &OriginalOpsConstant) const; 722 const DeadExpression *createDeadExpression() const; 723 const VariableExpression *createVariableExpression(Value *) const; 724 const ConstantExpression *createConstantExpression(Constant *) const; 725 const Expression *createVariableOrConstant(Value *V) const; 726 const UnknownExpression *createUnknownExpression(Instruction *) const; 727 const StoreExpression *createStoreExpression(StoreInst *, 728 const MemoryAccess *) const; 729 LoadExpression *createLoadExpression(Type *, Value *, LoadInst *, 730 const MemoryAccess *) const; 731 const CallExpression *createCallExpression(CallInst *, 732 const MemoryAccess *) const; 733 const AggregateValueExpression * 734 createAggregateValueExpression(Instruction *) const; 735 bool setBasicExpressionInfo(Instruction *, BasicExpression *) const; 736 737 // Congruence class handling. 738 CongruenceClass *createCongruenceClass(Value *Leader, const Expression *E) { 739 auto *result = new CongruenceClass(NextCongruenceNum++, Leader, E); 740 CongruenceClasses.emplace_back(result); 741 return result; 742 } 743 744 CongruenceClass *createMemoryClass(MemoryAccess *MA) { 745 auto *CC = createCongruenceClass(nullptr, nullptr); 746 CC->setMemoryLeader(MA); 747 return CC; 748 } 749 750 CongruenceClass *ensureLeaderOfMemoryClass(MemoryAccess *MA) { 751 auto *CC = getMemoryClass(MA); 752 if (CC->getMemoryLeader() != MA) 753 CC = createMemoryClass(MA); 754 return CC; 755 } 756 757 CongruenceClass *createSingletonCongruenceClass(Value *Member) { 758 CongruenceClass *CClass = createCongruenceClass(Member, nullptr); 759 CClass->insert(Member); 760 ValueToClass[Member] = CClass; 761 return CClass; 762 } 763 764 void initializeCongruenceClasses(Function &F); 765 const Expression *makePossiblePHIOfOps(Instruction *, 766 SmallPtrSetImpl<Value *> &); 767 Value *findLeaderForInst(Instruction *ValueOp, 768 SmallPtrSetImpl<Value *> &Visited, 769 MemoryAccess *MemAccess, Instruction *OrigInst, 770 BasicBlock *PredBB); 771 bool OpIsSafeForPHIOfOpsHelper(Value *V, const BasicBlock *PHIBlock, 772 SmallPtrSetImpl<const Value *> &Visited, 773 SmallVectorImpl<Instruction *> &Worklist); 774 bool OpIsSafeForPHIOfOps(Value *Op, const BasicBlock *PHIBlock, 775 SmallPtrSetImpl<const Value *> &); 776 void addPhiOfOps(PHINode *Op, BasicBlock *BB, Instruction *ExistingValue); 777 void removePhiOfOps(Instruction *I, PHINode *PHITemp); 778 779 // Value number an Instruction or MemoryPhi. 780 void valueNumberMemoryPhi(MemoryPhi *); 781 void valueNumberInstruction(Instruction *); 782 783 // Symbolic evaluation. 784 ExprResult checkExprResults(Expression *, Instruction *, Value *) const; 785 ExprResult performSymbolicEvaluation(Value *, 786 SmallPtrSetImpl<Value *> &) const; 787 const Expression *performSymbolicLoadCoercion(Type *, Value *, LoadInst *, 788 Instruction *, 789 MemoryAccess *) const; 790 const Expression *performSymbolicLoadEvaluation(Instruction *) const; 791 const Expression *performSymbolicStoreEvaluation(Instruction *) const; 792 ExprResult performSymbolicCallEvaluation(Instruction *) const; 793 void sortPHIOps(MutableArrayRef<ValPair> Ops) const; 794 const Expression *performSymbolicPHIEvaluation(ArrayRef<ValPair>, 795 Instruction *I, 796 BasicBlock *PHIBlock) const; 797 const Expression *performSymbolicAggrValueEvaluation(Instruction *) const; 798 ExprResult performSymbolicCmpEvaluation(Instruction *) const; 799 ExprResult performSymbolicPredicateInfoEvaluation(IntrinsicInst *) const; 800 801 // Congruence finding. 802 bool someEquivalentDominates(const Instruction *, const Instruction *) const; 803 Value *lookupOperandLeader(Value *) const; 804 CongruenceClass *getClassForExpression(const Expression *E) const; 805 void performCongruenceFinding(Instruction *, const Expression *); 806 void moveValueToNewCongruenceClass(Instruction *, const Expression *, 807 CongruenceClass *, CongruenceClass *); 808 void moveMemoryToNewCongruenceClass(Instruction *, MemoryAccess *, 809 CongruenceClass *, CongruenceClass *); 810 Value *getNextValueLeader(CongruenceClass *) const; 811 const MemoryAccess *getNextMemoryLeader(CongruenceClass *) const; 812 bool setMemoryClass(const MemoryAccess *From, CongruenceClass *To); 813 CongruenceClass *getMemoryClass(const MemoryAccess *MA) const; 814 const MemoryAccess *lookupMemoryLeader(const MemoryAccess *) const; 815 bool isMemoryAccessTOP(const MemoryAccess *) const; 816 817 // Ranking 818 unsigned int getRank(const Value *) const; 819 bool shouldSwapOperands(const Value *, const Value *) const; 820 bool shouldSwapOperandsForIntrinsic(const Value *, const Value *, 821 const IntrinsicInst *I) const; 822 823 // Reachability handling. 824 void updateReachableEdge(BasicBlock *, BasicBlock *); 825 void processOutgoingEdges(Instruction *, BasicBlock *); 826 Value *findConditionEquivalence(Value *) const; 827 828 // Elimination. 829 struct ValueDFS; 830 void convertClassToDFSOrdered(const CongruenceClass &, 831 SmallVectorImpl<ValueDFS> &, 832 DenseMap<const Value *, unsigned int> &, 833 SmallPtrSetImpl<Instruction *> &) const; 834 void convertClassToLoadsAndStores(const CongruenceClass &, 835 SmallVectorImpl<ValueDFS> &) const; 836 837 bool eliminateInstructions(Function &); 838 void replaceInstruction(Instruction *, Value *); 839 void markInstructionForDeletion(Instruction *); 840 void deleteInstructionsInBlock(BasicBlock *); 841 Value *findPHIOfOpsLeader(const Expression *, const Instruction *, 842 const BasicBlock *) const; 843 844 // Various instruction touch utilities 845 template <typename Map, typename KeyType> 846 void touchAndErase(Map &, const KeyType &); 847 void markUsersTouched(Value *); 848 void markMemoryUsersTouched(const MemoryAccess *); 849 void markMemoryDefTouched(const MemoryAccess *); 850 void markPredicateUsersTouched(Instruction *); 851 void markValueLeaderChangeTouched(CongruenceClass *CC); 852 void markMemoryLeaderChangeTouched(CongruenceClass *CC); 853 void markPhiOfOpsChanged(const Expression *E); 854 void addMemoryUsers(const MemoryAccess *To, MemoryAccess *U) const; 855 void addAdditionalUsers(Value *To, Value *User) const; 856 void addAdditionalUsers(ExprResult &Res, Instruction *User) const; 857 858 // Main loop of value numbering 859 void iterateTouchedInstructions(); 860 861 // Utilities. 862 void cleanupTables(); 863 std::pair<unsigned, unsigned> assignDFSNumbers(BasicBlock *, unsigned); 864 void updateProcessedCount(const Value *V); 865 void verifyMemoryCongruency() const; 866 void verifyIterationSettled(Function &F); 867 void verifyStoreExpressions() const; 868 bool singleReachablePHIPath(SmallPtrSet<const MemoryAccess *, 8> &, 869 const MemoryAccess *, const MemoryAccess *) const; 870 BasicBlock *getBlockForValue(Value *V) const; 871 void deleteExpression(const Expression *E) const; 872 MemoryUseOrDef *getMemoryAccess(const Instruction *) const; 873 MemoryPhi *getMemoryAccess(const BasicBlock *) const; 874 template <class T, class Range> T *getMinDFSOfRange(const Range &) const; 875 876 unsigned InstrToDFSNum(const Value *V) const { 877 assert(isa<Instruction>(V) && "This should not be used for MemoryAccesses"); 878 return InstrDFS.lookup(V); 879 } 880 881 unsigned InstrToDFSNum(const MemoryAccess *MA) const { 882 return MemoryToDFSNum(MA); 883 } 884 885 Value *InstrFromDFSNum(unsigned DFSNum) { return DFSToInstr[DFSNum]; } 886 887 // Given a MemoryAccess, return the relevant instruction DFS number. Note: 888 // This deliberately takes a value so it can be used with Use's, which will 889 // auto-convert to Value's but not to MemoryAccess's. 890 unsigned MemoryToDFSNum(const Value *MA) const { 891 assert(isa<MemoryAccess>(MA) && 892 "This should not be used with instructions"); 893 return isa<MemoryUseOrDef>(MA) 894 ? InstrToDFSNum(cast<MemoryUseOrDef>(MA)->getMemoryInst()) 895 : InstrDFS.lookup(MA); 896 } 897 898 bool isCycleFree(const Instruction *) const; 899 bool isBackedge(BasicBlock *From, BasicBlock *To) const; 900 901 // Debug counter info. When verifying, we have to reset the value numbering 902 // debug counter to the same state it started in to get the same results. 903 int64_t StartingVNCounter = 0; 904 }; 905 906 } // end anonymous namespace 907 908 template <typename T> 909 static bool equalsLoadStoreHelper(const T &LHS, const Expression &RHS) { 910 if (!isa<LoadExpression>(RHS) && !isa<StoreExpression>(RHS)) 911 return false; 912 return LHS.MemoryExpression::equals(RHS); 913 } 914 915 bool LoadExpression::equals(const Expression &Other) const { 916 return equalsLoadStoreHelper(*this, Other); 917 } 918 919 bool StoreExpression::equals(const Expression &Other) const { 920 if (!equalsLoadStoreHelper(*this, Other)) 921 return false; 922 // Make sure that store vs store includes the value operand. 923 if (const auto *S = dyn_cast<StoreExpression>(&Other)) 924 if (getStoredValue() != S->getStoredValue()) 925 return false; 926 return true; 927 } 928 929 // Determine if the edge From->To is a backedge 930 bool NewGVN::isBackedge(BasicBlock *From, BasicBlock *To) const { 931 return From == To || 932 RPOOrdering.lookup(DT->getNode(From)) >= 933 RPOOrdering.lookup(DT->getNode(To)); 934 } 935 936 #ifndef NDEBUG 937 static std::string getBlockName(const BasicBlock *B) { 938 return DOTGraphTraits<DOTFuncInfo *>::getSimpleNodeLabel(B, nullptr); 939 } 940 #endif 941 942 // Get a MemoryAccess for an instruction, fake or real. 943 MemoryUseOrDef *NewGVN::getMemoryAccess(const Instruction *I) const { 944 auto *Result = MSSA->getMemoryAccess(I); 945 return Result ? Result : TempToMemory.lookup(I); 946 } 947 948 // Get a MemoryPhi for a basic block. These are all real. 949 MemoryPhi *NewGVN::getMemoryAccess(const BasicBlock *BB) const { 950 return MSSA->getMemoryAccess(BB); 951 } 952 953 // Get the basic block from an instruction/memory value. 954 BasicBlock *NewGVN::getBlockForValue(Value *V) const { 955 if (auto *I = dyn_cast<Instruction>(V)) { 956 auto *Parent = I->getParent(); 957 if (Parent) 958 return Parent; 959 Parent = TempToBlock.lookup(V); 960 assert(Parent && "Every fake instruction should have a block"); 961 return Parent; 962 } 963 964 auto *MP = dyn_cast<MemoryPhi>(V); 965 assert(MP && "Should have been an instruction or a MemoryPhi"); 966 return MP->getBlock(); 967 } 968 969 // Delete a definitely dead expression, so it can be reused by the expression 970 // allocator. Some of these are not in creation functions, so we have to accept 971 // const versions. 972 void NewGVN::deleteExpression(const Expression *E) const { 973 assert(isa<BasicExpression>(E)); 974 auto *BE = cast<BasicExpression>(E); 975 const_cast<BasicExpression *>(BE)->deallocateOperands(ArgRecycler); 976 ExpressionAllocator.Deallocate(E); 977 } 978 979 // If V is a predicateinfo copy, get the thing it is a copy of. 980 static Value *getCopyOf(const Value *V) { 981 if (auto *II = dyn_cast<IntrinsicInst>(V)) 982 if (II->getIntrinsicID() == Intrinsic::ssa_copy) 983 return II->getOperand(0); 984 return nullptr; 985 } 986 987 // Return true if V is really PN, even accounting for predicateinfo copies. 988 static bool isCopyOfPHI(const Value *V, const PHINode *PN) { 989 return V == PN || getCopyOf(V) == PN; 990 } 991 992 static bool isCopyOfAPHI(const Value *V) { 993 auto *CO = getCopyOf(V); 994 return CO && isa<PHINode>(CO); 995 } 996 997 // Sort PHI Operands into a canonical order. What we use here is an RPO 998 // order. The BlockInstRange numbers are generated in an RPO walk of the basic 999 // blocks. 1000 void NewGVN::sortPHIOps(MutableArrayRef<ValPair> Ops) const { 1001 llvm::sort(Ops, [&](const ValPair &P1, const ValPair &P2) { 1002 return BlockInstRange.lookup(P1.second).first < 1003 BlockInstRange.lookup(P2.second).first; 1004 }); 1005 } 1006 1007 // Return true if V is a value that will always be available (IE can 1008 // be placed anywhere) in the function. We don't do globals here 1009 // because they are often worse to put in place. 1010 static bool alwaysAvailable(Value *V) { 1011 return isa<Constant>(V) || isa<Argument>(V); 1012 } 1013 1014 // Create a PHIExpression from an array of {incoming edge, value} pairs. I is 1015 // the original instruction we are creating a PHIExpression for (but may not be 1016 // a phi node). We require, as an invariant, that all the PHIOperands in the 1017 // same block are sorted the same way. sortPHIOps will sort them into a 1018 // canonical order. 1019 PHIExpression *NewGVN::createPHIExpression(ArrayRef<ValPair> PHIOperands, 1020 const Instruction *I, 1021 BasicBlock *PHIBlock, 1022 bool &HasBackedge, 1023 bool &OriginalOpsConstant) const { 1024 unsigned NumOps = PHIOperands.size(); 1025 auto *E = new (ExpressionAllocator) PHIExpression(NumOps, PHIBlock); 1026 1027 E->allocateOperands(ArgRecycler, ExpressionAllocator); 1028 E->setType(PHIOperands.begin()->first->getType()); 1029 E->setOpcode(Instruction::PHI); 1030 1031 // Filter out unreachable phi operands. 1032 auto Filtered = make_filter_range(PHIOperands, [&](const ValPair &P) { 1033 auto *BB = P.second; 1034 if (auto *PHIOp = dyn_cast<PHINode>(I)) 1035 if (isCopyOfPHI(P.first, PHIOp)) 1036 return false; 1037 if (!ReachableEdges.count({BB, PHIBlock})) 1038 return false; 1039 // Things in TOPClass are equivalent to everything. 1040 if (ValueToClass.lookup(P.first) == TOPClass) 1041 return false; 1042 OriginalOpsConstant = OriginalOpsConstant && isa<Constant>(P.first); 1043 HasBackedge = HasBackedge || isBackedge(BB, PHIBlock); 1044 return lookupOperandLeader(P.first) != I; 1045 }); 1046 std::transform(Filtered.begin(), Filtered.end(), op_inserter(E), 1047 [&](const ValPair &P) -> Value * { 1048 return lookupOperandLeader(P.first); 1049 }); 1050 return E; 1051 } 1052 1053 // Set basic expression info (Arguments, type, opcode) for Expression 1054 // E from Instruction I in block B. 1055 bool NewGVN::setBasicExpressionInfo(Instruction *I, BasicExpression *E) const { 1056 bool AllConstant = true; 1057 if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) 1058 E->setType(GEP->getSourceElementType()); 1059 else 1060 E->setType(I->getType()); 1061 E->setOpcode(I->getOpcode()); 1062 E->allocateOperands(ArgRecycler, ExpressionAllocator); 1063 1064 // Transform the operand array into an operand leader array, and keep track of 1065 // whether all members are constant. 1066 std::transform(I->op_begin(), I->op_end(), op_inserter(E), [&](Value *O) { 1067 auto Operand = lookupOperandLeader(O); 1068 AllConstant = AllConstant && isa<Constant>(Operand); 1069 return Operand; 1070 }); 1071 1072 return AllConstant; 1073 } 1074 1075 const Expression *NewGVN::createBinaryExpression(unsigned Opcode, Type *T, 1076 Value *Arg1, Value *Arg2, 1077 Instruction *I) const { 1078 auto *E = new (ExpressionAllocator) BasicExpression(2); 1079 1080 E->setType(T); 1081 E->setOpcode(Opcode); 1082 E->allocateOperands(ArgRecycler, ExpressionAllocator); 1083 if (Instruction::isCommutative(Opcode)) { 1084 // Ensure that commutative instructions that only differ by a permutation 1085 // of their operands get the same value number by sorting the operand value 1086 // numbers. Since all commutative instructions have two operands it is more 1087 // efficient to sort by hand rather than using, say, std::sort. 1088 if (shouldSwapOperands(Arg1, Arg2)) 1089 std::swap(Arg1, Arg2); 1090 } 1091 E->op_push_back(lookupOperandLeader(Arg1)); 1092 E->op_push_back(lookupOperandLeader(Arg2)); 1093 1094 Value *V = SimplifyBinOp(Opcode, E->getOperand(0), E->getOperand(1), SQ); 1095 if (auto Simplified = checkExprResults(E, I, V)) { 1096 addAdditionalUsers(Simplified, I); 1097 return Simplified.Expr; 1098 } 1099 return E; 1100 } 1101 1102 // Take a Value returned by simplification of Expression E/Instruction 1103 // I, and see if it resulted in a simpler expression. If so, return 1104 // that expression. 1105 NewGVN::ExprResult NewGVN::checkExprResults(Expression *E, Instruction *I, 1106 Value *V) const { 1107 if (!V) 1108 return ExprResult::none(); 1109 1110 if (auto *C = dyn_cast<Constant>(V)) { 1111 if (I) 1112 LLVM_DEBUG(dbgs() << "Simplified " << *I << " to " 1113 << " constant " << *C << "\n"); 1114 NumGVNOpsSimplified++; 1115 assert(isa<BasicExpression>(E) && 1116 "We should always have had a basic expression here"); 1117 deleteExpression(E); 1118 return ExprResult::some(createConstantExpression(C)); 1119 } else if (isa<Argument>(V) || isa<GlobalVariable>(V)) { 1120 if (I) 1121 LLVM_DEBUG(dbgs() << "Simplified " << *I << " to " 1122 << " variable " << *V << "\n"); 1123 deleteExpression(E); 1124 return ExprResult::some(createVariableExpression(V)); 1125 } 1126 1127 CongruenceClass *CC = ValueToClass.lookup(V); 1128 if (CC) { 1129 if (CC->getLeader() && CC->getLeader() != I) { 1130 return ExprResult::some(createVariableOrConstant(CC->getLeader()), V); 1131 } 1132 if (CC->getDefiningExpr()) { 1133 if (I) 1134 LLVM_DEBUG(dbgs() << "Simplified " << *I << " to " 1135 << " expression " << *CC->getDefiningExpr() << "\n"); 1136 NumGVNOpsSimplified++; 1137 deleteExpression(E); 1138 return ExprResult::some(CC->getDefiningExpr(), V); 1139 } 1140 } 1141 1142 return ExprResult::none(); 1143 } 1144 1145 // Create a value expression from the instruction I, replacing operands with 1146 // their leaders. 1147 1148 NewGVN::ExprResult NewGVN::createExpression(Instruction *I) const { 1149 auto *E = new (ExpressionAllocator) BasicExpression(I->getNumOperands()); 1150 1151 bool AllConstant = setBasicExpressionInfo(I, E); 1152 1153 if (I->isCommutative()) { 1154 // Ensure that commutative instructions that only differ by a permutation 1155 // of their operands get the same value number by sorting the operand value 1156 // numbers. Since all commutative instructions have two operands it is more 1157 // efficient to sort by hand rather than using, say, std::sort. 1158 assert(I->getNumOperands() == 2 && "Unsupported commutative instruction!"); 1159 if (shouldSwapOperands(E->getOperand(0), E->getOperand(1))) 1160 E->swapOperands(0, 1); 1161 } 1162 // Perform simplification. 1163 if (auto *CI = dyn_cast<CmpInst>(I)) { 1164 // Sort the operand value numbers so x<y and y>x get the same value 1165 // number. 1166 CmpInst::Predicate Predicate = CI->getPredicate(); 1167 if (shouldSwapOperands(E->getOperand(0), E->getOperand(1))) { 1168 E->swapOperands(0, 1); 1169 Predicate = CmpInst::getSwappedPredicate(Predicate); 1170 } 1171 E->setOpcode((CI->getOpcode() << 8) | Predicate); 1172 // TODO: 25% of our time is spent in SimplifyCmpInst with pointer operands 1173 assert(I->getOperand(0)->getType() == I->getOperand(1)->getType() && 1174 "Wrong types on cmp instruction"); 1175 assert((E->getOperand(0)->getType() == I->getOperand(0)->getType() && 1176 E->getOperand(1)->getType() == I->getOperand(1)->getType())); 1177 Value *V = 1178 SimplifyCmpInst(Predicate, E->getOperand(0), E->getOperand(1), SQ); 1179 if (auto Simplified = checkExprResults(E, I, V)) 1180 return Simplified; 1181 } else if (isa<SelectInst>(I)) { 1182 if (isa<Constant>(E->getOperand(0)) || 1183 E->getOperand(1) == E->getOperand(2)) { 1184 assert(E->getOperand(1)->getType() == I->getOperand(1)->getType() && 1185 E->getOperand(2)->getType() == I->getOperand(2)->getType()); 1186 Value *V = SimplifySelectInst(E->getOperand(0), E->getOperand(1), 1187 E->getOperand(2), SQ); 1188 if (auto Simplified = checkExprResults(E, I, V)) 1189 return Simplified; 1190 } 1191 } else if (I->isBinaryOp()) { 1192 Value *V = 1193 SimplifyBinOp(E->getOpcode(), E->getOperand(0), E->getOperand(1), SQ); 1194 if (auto Simplified = checkExprResults(E, I, V)) 1195 return Simplified; 1196 } else if (auto *CI = dyn_cast<CastInst>(I)) { 1197 Value *V = 1198 SimplifyCastInst(CI->getOpcode(), E->getOperand(0), CI->getType(), SQ); 1199 if (auto Simplified = checkExprResults(E, I, V)) 1200 return Simplified; 1201 } else if (auto *GEPI = dyn_cast<GetElementPtrInst>(I)) { 1202 Value *V = 1203 SimplifyGEPInst(GEPI->getSourceElementType(), *E->op_begin(), 1204 makeArrayRef(std::next(E->op_begin()), E->op_end()), 1205 GEPI->isInBounds(), SQ); 1206 if (auto Simplified = checkExprResults(E, I, V)) 1207 return Simplified; 1208 } else if (AllConstant) { 1209 // We don't bother trying to simplify unless all of the operands 1210 // were constant. 1211 // TODO: There are a lot of Simplify*'s we could call here, if we 1212 // wanted to. The original motivating case for this code was a 1213 // zext i1 false to i8, which we don't have an interface to 1214 // simplify (IE there is no SimplifyZExt). 1215 1216 SmallVector<Constant *, 8> C; 1217 for (Value *Arg : E->operands()) 1218 C.emplace_back(cast<Constant>(Arg)); 1219 1220 if (Value *V = ConstantFoldInstOperands(I, C, DL, TLI)) 1221 if (auto Simplified = checkExprResults(E, I, V)) 1222 return Simplified; 1223 } 1224 return ExprResult::some(E); 1225 } 1226 1227 const AggregateValueExpression * 1228 NewGVN::createAggregateValueExpression(Instruction *I) const { 1229 if (auto *II = dyn_cast<InsertValueInst>(I)) { 1230 auto *E = new (ExpressionAllocator) 1231 AggregateValueExpression(I->getNumOperands(), II->getNumIndices()); 1232 setBasicExpressionInfo(I, E); 1233 E->allocateIntOperands(ExpressionAllocator); 1234 std::copy(II->idx_begin(), II->idx_end(), int_op_inserter(E)); 1235 return E; 1236 } else if (auto *EI = dyn_cast<ExtractValueInst>(I)) { 1237 auto *E = new (ExpressionAllocator) 1238 AggregateValueExpression(I->getNumOperands(), EI->getNumIndices()); 1239 setBasicExpressionInfo(EI, E); 1240 E->allocateIntOperands(ExpressionAllocator); 1241 std::copy(EI->idx_begin(), EI->idx_end(), int_op_inserter(E)); 1242 return E; 1243 } 1244 llvm_unreachable("Unhandled type of aggregate value operation"); 1245 } 1246 1247 const DeadExpression *NewGVN::createDeadExpression() const { 1248 // DeadExpression has no arguments and all DeadExpression's are the same, 1249 // so we only need one of them. 1250 return SingletonDeadExpression; 1251 } 1252 1253 const VariableExpression *NewGVN::createVariableExpression(Value *V) const { 1254 auto *E = new (ExpressionAllocator) VariableExpression(V); 1255 E->setOpcode(V->getValueID()); 1256 return E; 1257 } 1258 1259 const Expression *NewGVN::createVariableOrConstant(Value *V) const { 1260 if (auto *C = dyn_cast<Constant>(V)) 1261 return createConstantExpression(C); 1262 return createVariableExpression(V); 1263 } 1264 1265 const ConstantExpression *NewGVN::createConstantExpression(Constant *C) const { 1266 auto *E = new (ExpressionAllocator) ConstantExpression(C); 1267 E->setOpcode(C->getValueID()); 1268 return E; 1269 } 1270 1271 const UnknownExpression *NewGVN::createUnknownExpression(Instruction *I) const { 1272 auto *E = new (ExpressionAllocator) UnknownExpression(I); 1273 E->setOpcode(I->getOpcode()); 1274 return E; 1275 } 1276 1277 const CallExpression * 1278 NewGVN::createCallExpression(CallInst *CI, const MemoryAccess *MA) const { 1279 // FIXME: Add operand bundles for calls. 1280 // FIXME: Allow commutative matching for intrinsics. 1281 auto *E = 1282 new (ExpressionAllocator) CallExpression(CI->getNumOperands(), CI, MA); 1283 setBasicExpressionInfo(CI, E); 1284 return E; 1285 } 1286 1287 // Return true if some equivalent of instruction Inst dominates instruction U. 1288 bool NewGVN::someEquivalentDominates(const Instruction *Inst, 1289 const Instruction *U) const { 1290 auto *CC = ValueToClass.lookup(Inst); 1291 // This must be an instruction because we are only called from phi nodes 1292 // in the case that the value it needs to check against is an instruction. 1293 1294 // The most likely candidates for dominance are the leader and the next leader. 1295 // The leader or nextleader will dominate in all cases where there is an 1296 // equivalent that is higher up in the dom tree. 1297 // We can't *only* check them, however, because the 1298 // dominator tree could have an infinite number of non-dominating siblings 1299 // with instructions that are in the right congruence class. 1300 // A 1301 // B C D E F G 1302 // | 1303 // H 1304 // Instruction U could be in H, with equivalents in every other sibling. 1305 // Depending on the rpo order picked, the leader could be the equivalent in 1306 // any of these siblings. 1307 if (!CC) 1308 return false; 1309 if (alwaysAvailable(CC->getLeader())) 1310 return true; 1311 if (DT->dominates(cast<Instruction>(CC->getLeader()), U)) 1312 return true; 1313 if (CC->getNextLeader().first && 1314 DT->dominates(cast<Instruction>(CC->getNextLeader().first), U)) 1315 return true; 1316 return llvm::any_of(*CC, [&](const Value *Member) { 1317 return Member != CC->getLeader() && 1318 DT->dominates(cast<Instruction>(Member), U); 1319 }); 1320 } 1321 1322 // See if we have a congruence class and leader for this operand, and if so, 1323 // return it. Otherwise, return the operand itself. 1324 Value *NewGVN::lookupOperandLeader(Value *V) const { 1325 CongruenceClass *CC = ValueToClass.lookup(V); 1326 if (CC) { 1327 // Everything in TOP is represented by poison, as it can be any value. 1328 // We do have to make sure we get the type right though, so we can't set the 1329 // RepLeader to poison. 1330 if (CC == TOPClass) 1331 return PoisonValue::get(V->getType()); 1332 return CC->getStoredValue() ? CC->getStoredValue() : CC->getLeader(); 1333 } 1334 1335 return V; 1336 } 1337 1338 const MemoryAccess *NewGVN::lookupMemoryLeader(const MemoryAccess *MA) const { 1339 auto *CC = getMemoryClass(MA); 1340 assert(CC->getMemoryLeader() && 1341 "Every MemoryAccess should be mapped to a congruence class with a " 1342 "representative memory access"); 1343 return CC->getMemoryLeader(); 1344 } 1345 1346 // Return true if the MemoryAccess is really equivalent to everything. This is 1347 // equivalent to the lattice value "TOP" in most lattices. This is the initial 1348 // state of all MemoryAccesses. 1349 bool NewGVN::isMemoryAccessTOP(const MemoryAccess *MA) const { 1350 return getMemoryClass(MA) == TOPClass; 1351 } 1352 1353 LoadExpression *NewGVN::createLoadExpression(Type *LoadType, Value *PointerOp, 1354 LoadInst *LI, 1355 const MemoryAccess *MA) const { 1356 auto *E = 1357 new (ExpressionAllocator) LoadExpression(1, LI, lookupMemoryLeader(MA)); 1358 E->allocateOperands(ArgRecycler, ExpressionAllocator); 1359 E->setType(LoadType); 1360 1361 // Give store and loads same opcode so they value number together. 1362 E->setOpcode(0); 1363 E->op_push_back(PointerOp); 1364 1365 // TODO: Value number heap versions. We may be able to discover 1366 // things alias analysis can't on it's own (IE that a store and a 1367 // load have the same value, and thus, it isn't clobbering the load). 1368 return E; 1369 } 1370 1371 const StoreExpression * 1372 NewGVN::createStoreExpression(StoreInst *SI, const MemoryAccess *MA) const { 1373 auto *StoredValueLeader = lookupOperandLeader(SI->getValueOperand()); 1374 auto *E = new (ExpressionAllocator) 1375 StoreExpression(SI->getNumOperands(), SI, StoredValueLeader, MA); 1376 E->allocateOperands(ArgRecycler, ExpressionAllocator); 1377 E->setType(SI->getValueOperand()->getType()); 1378 1379 // Give store and loads same opcode so they value number together. 1380 E->setOpcode(0); 1381 E->op_push_back(lookupOperandLeader(SI->getPointerOperand())); 1382 1383 // TODO: Value number heap versions. We may be able to discover 1384 // things alias analysis can't on it's own (IE that a store and a 1385 // load have the same value, and thus, it isn't clobbering the load). 1386 return E; 1387 } 1388 1389 const Expression *NewGVN::performSymbolicStoreEvaluation(Instruction *I) const { 1390 // Unlike loads, we never try to eliminate stores, so we do not check if they 1391 // are simple and avoid value numbering them. 1392 auto *SI = cast<StoreInst>(I); 1393 auto *StoreAccess = getMemoryAccess(SI); 1394 // Get the expression, if any, for the RHS of the MemoryDef. 1395 const MemoryAccess *StoreRHS = StoreAccess->getDefiningAccess(); 1396 if (EnableStoreRefinement) 1397 StoreRHS = MSSAWalker->getClobberingMemoryAccess(StoreAccess); 1398 // If we bypassed the use-def chains, make sure we add a use. 1399 StoreRHS = lookupMemoryLeader(StoreRHS); 1400 if (StoreRHS != StoreAccess->getDefiningAccess()) 1401 addMemoryUsers(StoreRHS, StoreAccess); 1402 // If we are defined by ourselves, use the live on entry def. 1403 if (StoreRHS == StoreAccess) 1404 StoreRHS = MSSA->getLiveOnEntryDef(); 1405 1406 if (SI->isSimple()) { 1407 // See if we are defined by a previous store expression, it already has a 1408 // value, and it's the same value as our current store. FIXME: Right now, we 1409 // only do this for simple stores, we should expand to cover memcpys, etc. 1410 const auto *LastStore = createStoreExpression(SI, StoreRHS); 1411 const auto *LastCC = ExpressionToClass.lookup(LastStore); 1412 // We really want to check whether the expression we matched was a store. No 1413 // easy way to do that. However, we can check that the class we found has a 1414 // store, which, assuming the value numbering state is not corrupt, is 1415 // sufficient, because we must also be equivalent to that store's expression 1416 // for it to be in the same class as the load. 1417 if (LastCC && LastCC->getStoredValue() == LastStore->getStoredValue()) 1418 return LastStore; 1419 // Also check if our value operand is defined by a load of the same memory 1420 // location, and the memory state is the same as it was then (otherwise, it 1421 // could have been overwritten later. See test32 in 1422 // transforms/DeadStoreElimination/simple.ll). 1423 if (auto *LI = dyn_cast<LoadInst>(LastStore->getStoredValue())) 1424 if ((lookupOperandLeader(LI->getPointerOperand()) == 1425 LastStore->getOperand(0)) && 1426 (lookupMemoryLeader(getMemoryAccess(LI)->getDefiningAccess()) == 1427 StoreRHS)) 1428 return LastStore; 1429 deleteExpression(LastStore); 1430 } 1431 1432 // If the store is not equivalent to anything, value number it as a store that 1433 // produces a unique memory state (instead of using it's MemoryUse, we use 1434 // it's MemoryDef). 1435 return createStoreExpression(SI, StoreAccess); 1436 } 1437 1438 // See if we can extract the value of a loaded pointer from a load, a store, or 1439 // a memory instruction. 1440 const Expression * 1441 NewGVN::performSymbolicLoadCoercion(Type *LoadType, Value *LoadPtr, 1442 LoadInst *LI, Instruction *DepInst, 1443 MemoryAccess *DefiningAccess) const { 1444 assert((!LI || LI->isSimple()) && "Not a simple load"); 1445 if (auto *DepSI = dyn_cast<StoreInst>(DepInst)) { 1446 // Can't forward from non-atomic to atomic without violating memory model. 1447 // Also don't need to coerce if they are the same type, we will just 1448 // propagate. 1449 if (LI->isAtomic() > DepSI->isAtomic() || 1450 LoadType == DepSI->getValueOperand()->getType()) 1451 return nullptr; 1452 int Offset = analyzeLoadFromClobberingStore(LoadType, LoadPtr, DepSI, DL); 1453 if (Offset >= 0) { 1454 if (auto *C = dyn_cast<Constant>( 1455 lookupOperandLeader(DepSI->getValueOperand()))) { 1456 LLVM_DEBUG(dbgs() << "Coercing load from store " << *DepSI 1457 << " to constant " << *C << "\n"); 1458 return createConstantExpression( 1459 getConstantStoreValueForLoad(C, Offset, LoadType, DL)); 1460 } 1461 } 1462 } else if (auto *DepLI = dyn_cast<LoadInst>(DepInst)) { 1463 // Can't forward from non-atomic to atomic without violating memory model. 1464 if (LI->isAtomic() > DepLI->isAtomic()) 1465 return nullptr; 1466 int Offset = analyzeLoadFromClobberingLoad(LoadType, LoadPtr, DepLI, DL); 1467 if (Offset >= 0) { 1468 // We can coerce a constant load into a load. 1469 if (auto *C = dyn_cast<Constant>(lookupOperandLeader(DepLI))) 1470 if (auto *PossibleConstant = 1471 getConstantLoadValueForLoad(C, Offset, LoadType, DL)) { 1472 LLVM_DEBUG(dbgs() << "Coercing load from load " << *LI 1473 << " to constant " << *PossibleConstant << "\n"); 1474 return createConstantExpression(PossibleConstant); 1475 } 1476 } 1477 } else if (auto *DepMI = dyn_cast<MemIntrinsic>(DepInst)) { 1478 int Offset = analyzeLoadFromClobberingMemInst(LoadType, LoadPtr, DepMI, DL); 1479 if (Offset >= 0) { 1480 if (auto *PossibleConstant = 1481 getConstantMemInstValueForLoad(DepMI, Offset, LoadType, DL)) { 1482 LLVM_DEBUG(dbgs() << "Coercing load from meminst " << *DepMI 1483 << " to constant " << *PossibleConstant << "\n"); 1484 return createConstantExpression(PossibleConstant); 1485 } 1486 } 1487 } 1488 1489 // All of the below are only true if the loaded pointer is produced 1490 // by the dependent instruction. 1491 if (LoadPtr != lookupOperandLeader(DepInst) && 1492 !AA->isMustAlias(LoadPtr, DepInst)) 1493 return nullptr; 1494 // If this load really doesn't depend on anything, then we must be loading an 1495 // undef value. This can happen when loading for a fresh allocation with no 1496 // intervening stores, for example. Note that this is only true in the case 1497 // that the result of the allocation is pointer equal to the load ptr. 1498 if (isa<AllocaInst>(DepInst)) { 1499 return createConstantExpression(UndefValue::get(LoadType)); 1500 } 1501 // If this load occurs either right after a lifetime begin, 1502 // then the loaded value is undefined. 1503 else if (auto *II = dyn_cast<IntrinsicInst>(DepInst)) { 1504 if (II->getIntrinsicID() == Intrinsic::lifetime_start) 1505 return createConstantExpression(UndefValue::get(LoadType)); 1506 } else if (isAllocationFn(DepInst, TLI)) 1507 if (auto *InitVal = getInitialValueOfAllocation(cast<CallBase>(DepInst), 1508 TLI, LoadType)) 1509 return createConstantExpression(InitVal); 1510 1511 return nullptr; 1512 } 1513 1514 const Expression *NewGVN::performSymbolicLoadEvaluation(Instruction *I) const { 1515 auto *LI = cast<LoadInst>(I); 1516 1517 // We can eliminate in favor of non-simple loads, but we won't be able to 1518 // eliminate the loads themselves. 1519 if (!LI->isSimple()) 1520 return nullptr; 1521 1522 Value *LoadAddressLeader = lookupOperandLeader(LI->getPointerOperand()); 1523 // Load of undef is UB. 1524 if (isa<UndefValue>(LoadAddressLeader)) 1525 return createConstantExpression(PoisonValue::get(LI->getType())); 1526 MemoryAccess *OriginalAccess = getMemoryAccess(I); 1527 MemoryAccess *DefiningAccess = 1528 MSSAWalker->getClobberingMemoryAccess(OriginalAccess); 1529 1530 if (!MSSA->isLiveOnEntryDef(DefiningAccess)) { 1531 if (auto *MD = dyn_cast<MemoryDef>(DefiningAccess)) { 1532 Instruction *DefiningInst = MD->getMemoryInst(); 1533 // If the defining instruction is not reachable, replace with poison. 1534 if (!ReachableBlocks.count(DefiningInst->getParent())) 1535 return createConstantExpression(PoisonValue::get(LI->getType())); 1536 // This will handle stores and memory insts. We only do if it the 1537 // defining access has a different type, or it is a pointer produced by 1538 // certain memory operations that cause the memory to have a fixed value 1539 // (IE things like calloc). 1540 if (const auto *CoercionResult = 1541 performSymbolicLoadCoercion(LI->getType(), LoadAddressLeader, LI, 1542 DefiningInst, DefiningAccess)) 1543 return CoercionResult; 1544 } 1545 } 1546 1547 const auto *LE = createLoadExpression(LI->getType(), LoadAddressLeader, LI, 1548 DefiningAccess); 1549 // If our MemoryLeader is not our defining access, add a use to the 1550 // MemoryLeader, so that we get reprocessed when it changes. 1551 if (LE->getMemoryLeader() != DefiningAccess) 1552 addMemoryUsers(LE->getMemoryLeader(), OriginalAccess); 1553 return LE; 1554 } 1555 1556 NewGVN::ExprResult 1557 NewGVN::performSymbolicPredicateInfoEvaluation(IntrinsicInst *I) const { 1558 auto *PI = PredInfo->getPredicateInfoFor(I); 1559 if (!PI) 1560 return ExprResult::none(); 1561 1562 LLVM_DEBUG(dbgs() << "Found predicate info from instruction !\n"); 1563 1564 const Optional<PredicateConstraint> &Constraint = PI->getConstraint(); 1565 if (!Constraint) 1566 return ExprResult::none(); 1567 1568 CmpInst::Predicate Predicate = Constraint->Predicate; 1569 Value *CmpOp0 = I->getOperand(0); 1570 Value *CmpOp1 = Constraint->OtherOp; 1571 1572 Value *FirstOp = lookupOperandLeader(CmpOp0); 1573 Value *SecondOp = lookupOperandLeader(CmpOp1); 1574 Value *AdditionallyUsedValue = CmpOp0; 1575 1576 // Sort the ops. 1577 if (shouldSwapOperandsForIntrinsic(FirstOp, SecondOp, I)) { 1578 std::swap(FirstOp, SecondOp); 1579 Predicate = CmpInst::getSwappedPredicate(Predicate); 1580 AdditionallyUsedValue = CmpOp1; 1581 } 1582 1583 if (Predicate == CmpInst::ICMP_EQ) 1584 return ExprResult::some(createVariableOrConstant(FirstOp), 1585 AdditionallyUsedValue, PI); 1586 1587 // Handle the special case of floating point. 1588 if (Predicate == CmpInst::FCMP_OEQ && isa<ConstantFP>(FirstOp) && 1589 !cast<ConstantFP>(FirstOp)->isZero()) 1590 return ExprResult::some(createConstantExpression(cast<Constant>(FirstOp)), 1591 AdditionallyUsedValue, PI); 1592 1593 return ExprResult::none(); 1594 } 1595 1596 // Evaluate read only and pure calls, and create an expression result. 1597 NewGVN::ExprResult NewGVN::performSymbolicCallEvaluation(Instruction *I) const { 1598 auto *CI = cast<CallInst>(I); 1599 if (auto *II = dyn_cast<IntrinsicInst>(I)) { 1600 // Intrinsics with the returned attribute are copies of arguments. 1601 if (auto *ReturnedValue = II->getReturnedArgOperand()) { 1602 if (II->getIntrinsicID() == Intrinsic::ssa_copy) 1603 if (auto Res = performSymbolicPredicateInfoEvaluation(II)) 1604 return Res; 1605 return ExprResult::some(createVariableOrConstant(ReturnedValue)); 1606 } 1607 } 1608 if (AA->doesNotAccessMemory(CI)) { 1609 return ExprResult::some( 1610 createCallExpression(CI, TOPClass->getMemoryLeader())); 1611 } else if (AA->onlyReadsMemory(CI)) { 1612 if (auto *MA = MSSA->getMemoryAccess(CI)) { 1613 auto *DefiningAccess = MSSAWalker->getClobberingMemoryAccess(MA); 1614 return ExprResult::some(createCallExpression(CI, DefiningAccess)); 1615 } else // MSSA determined that CI does not access memory. 1616 return ExprResult::some( 1617 createCallExpression(CI, TOPClass->getMemoryLeader())); 1618 } 1619 return ExprResult::none(); 1620 } 1621 1622 // Retrieve the memory class for a given MemoryAccess. 1623 CongruenceClass *NewGVN::getMemoryClass(const MemoryAccess *MA) const { 1624 auto *Result = MemoryAccessToClass.lookup(MA); 1625 assert(Result && "Should have found memory class"); 1626 return Result; 1627 } 1628 1629 // Update the MemoryAccess equivalence table to say that From is equal to To, 1630 // and return true if this is different from what already existed in the table. 1631 bool NewGVN::setMemoryClass(const MemoryAccess *From, 1632 CongruenceClass *NewClass) { 1633 assert(NewClass && 1634 "Every MemoryAccess should be getting mapped to a non-null class"); 1635 LLVM_DEBUG(dbgs() << "Setting " << *From); 1636 LLVM_DEBUG(dbgs() << " equivalent to congruence class "); 1637 LLVM_DEBUG(dbgs() << NewClass->getID() 1638 << " with current MemoryAccess leader "); 1639 LLVM_DEBUG(dbgs() << *NewClass->getMemoryLeader() << "\n"); 1640 1641 auto LookupResult = MemoryAccessToClass.find(From); 1642 bool Changed = false; 1643 // If it's already in the table, see if the value changed. 1644 if (LookupResult != MemoryAccessToClass.end()) { 1645 auto *OldClass = LookupResult->second; 1646 if (OldClass != NewClass) { 1647 // If this is a phi, we have to handle memory member updates. 1648 if (auto *MP = dyn_cast<MemoryPhi>(From)) { 1649 OldClass->memory_erase(MP); 1650 NewClass->memory_insert(MP); 1651 // This may have killed the class if it had no non-memory members 1652 if (OldClass->getMemoryLeader() == From) { 1653 if (OldClass->definesNoMemory()) { 1654 OldClass->setMemoryLeader(nullptr); 1655 } else { 1656 OldClass->setMemoryLeader(getNextMemoryLeader(OldClass)); 1657 LLVM_DEBUG(dbgs() << "Memory class leader change for class " 1658 << OldClass->getID() << " to " 1659 << *OldClass->getMemoryLeader() 1660 << " due to removal of a memory member " << *From 1661 << "\n"); 1662 markMemoryLeaderChangeTouched(OldClass); 1663 } 1664 } 1665 } 1666 // It wasn't equivalent before, and now it is. 1667 LookupResult->second = NewClass; 1668 Changed = true; 1669 } 1670 } 1671 1672 return Changed; 1673 } 1674 1675 // Determine if a instruction is cycle-free. That means the values in the 1676 // instruction don't depend on any expressions that can change value as a result 1677 // of the instruction. For example, a non-cycle free instruction would be v = 1678 // phi(0, v+1). 1679 bool NewGVN::isCycleFree(const Instruction *I) const { 1680 // In order to compute cycle-freeness, we do SCC finding on the instruction, 1681 // and see what kind of SCC it ends up in. If it is a singleton, it is 1682 // cycle-free. If it is not in a singleton, it is only cycle free if the 1683 // other members are all phi nodes (as they do not compute anything, they are 1684 // copies). 1685 auto ICS = InstCycleState.lookup(I); 1686 if (ICS == ICS_Unknown) { 1687 SCCFinder.Start(I); 1688 auto &SCC = SCCFinder.getComponentFor(I); 1689 // It's cycle free if it's size 1 or the SCC is *only* phi nodes. 1690 if (SCC.size() == 1) 1691 InstCycleState.insert({I, ICS_CycleFree}); 1692 else { 1693 bool AllPhis = llvm::all_of(SCC, [](const Value *V) { 1694 return isa<PHINode>(V) || isCopyOfAPHI(V); 1695 }); 1696 ICS = AllPhis ? ICS_CycleFree : ICS_Cycle; 1697 for (auto *Member : SCC) 1698 if (auto *MemberPhi = dyn_cast<PHINode>(Member)) 1699 InstCycleState.insert({MemberPhi, ICS}); 1700 } 1701 } 1702 if (ICS == ICS_Cycle) 1703 return false; 1704 return true; 1705 } 1706 1707 // Evaluate PHI nodes symbolically and create an expression result. 1708 const Expression * 1709 NewGVN::performSymbolicPHIEvaluation(ArrayRef<ValPair> PHIOps, 1710 Instruction *I, 1711 BasicBlock *PHIBlock) const { 1712 // True if one of the incoming phi edges is a backedge. 1713 bool HasBackedge = false; 1714 // All constant tracks the state of whether all the *original* phi operands 1715 // This is really shorthand for "this phi cannot cycle due to forward 1716 // change in value of the phi is guaranteed not to later change the value of 1717 // the phi. IE it can't be v = phi(undef, v+1) 1718 bool OriginalOpsConstant = true; 1719 auto *E = cast<PHIExpression>(createPHIExpression( 1720 PHIOps, I, PHIBlock, HasBackedge, OriginalOpsConstant)); 1721 // We match the semantics of SimplifyPhiNode from InstructionSimplify here. 1722 // See if all arguments are the same. 1723 // We track if any were undef because they need special handling. 1724 bool HasUndef = false, HasPoison = false; 1725 auto Filtered = make_filter_range(E->operands(), [&](Value *Arg) { 1726 if (isa<PoisonValue>(Arg)) { 1727 HasPoison = true; 1728 return false; 1729 } 1730 if (isa<UndefValue>(Arg)) { 1731 HasUndef = true; 1732 return false; 1733 } 1734 return true; 1735 }); 1736 // If we are left with no operands, it's dead. 1737 if (Filtered.empty()) { 1738 // If it has undef or poison at this point, it means there are no-non-undef 1739 // arguments, and thus, the value of the phi node must be undef. 1740 if (HasUndef) { 1741 LLVM_DEBUG( 1742 dbgs() << "PHI Node " << *I 1743 << " has no non-undef arguments, valuing it as undef\n"); 1744 return createConstantExpression(UndefValue::get(I->getType())); 1745 } 1746 if (HasPoison) { 1747 LLVM_DEBUG( 1748 dbgs() << "PHI Node " << *I 1749 << " has no non-poison arguments, valuing it as poison\n"); 1750 return createConstantExpression(PoisonValue::get(I->getType())); 1751 } 1752 1753 LLVM_DEBUG(dbgs() << "No arguments of PHI node " << *I << " are live\n"); 1754 deleteExpression(E); 1755 return createDeadExpression(); 1756 } 1757 Value *AllSameValue = *(Filtered.begin()); 1758 ++Filtered.begin(); 1759 // Can't use std::equal here, sadly, because filter.begin moves. 1760 if (llvm::all_of(Filtered, [&](Value *Arg) { return Arg == AllSameValue; })) { 1761 // Can't fold phi(undef, X) -> X unless X can't be poison (thus X is undef 1762 // in the worst case). 1763 if (HasUndef && !isGuaranteedNotToBePoison(AllSameValue, AC, nullptr, DT)) 1764 return E; 1765 1766 // In LLVM's non-standard representation of phi nodes, it's possible to have 1767 // phi nodes with cycles (IE dependent on other phis that are .... dependent 1768 // on the original phi node), especially in weird CFG's where some arguments 1769 // are unreachable, or uninitialized along certain paths. This can cause 1770 // infinite loops during evaluation. We work around this by not trying to 1771 // really evaluate them independently, but instead using a variable 1772 // expression to say if one is equivalent to the other. 1773 // We also special case undef/poison, so that if we have an undef, we can't 1774 // use the common value unless it dominates the phi block. 1775 if (HasPoison || HasUndef) { 1776 // If we have undef and at least one other value, this is really a 1777 // multivalued phi, and we need to know if it's cycle free in order to 1778 // evaluate whether we can ignore the undef. The other parts of this are 1779 // just shortcuts. If there is no backedge, or all operands are 1780 // constants, it also must be cycle free. 1781 if (HasBackedge && !OriginalOpsConstant && 1782 !isa<UndefValue>(AllSameValue) && !isCycleFree(I)) 1783 return E; 1784 1785 // Only have to check for instructions 1786 if (auto *AllSameInst = dyn_cast<Instruction>(AllSameValue)) 1787 if (!someEquivalentDominates(AllSameInst, I)) 1788 return E; 1789 } 1790 // Can't simplify to something that comes later in the iteration. 1791 // Otherwise, when and if it changes congruence class, we will never catch 1792 // up. We will always be a class behind it. 1793 if (isa<Instruction>(AllSameValue) && 1794 InstrToDFSNum(AllSameValue) > InstrToDFSNum(I)) 1795 return E; 1796 NumGVNPhisAllSame++; 1797 LLVM_DEBUG(dbgs() << "Simplified PHI node " << *I << " to " << *AllSameValue 1798 << "\n"); 1799 deleteExpression(E); 1800 return createVariableOrConstant(AllSameValue); 1801 } 1802 return E; 1803 } 1804 1805 const Expression * 1806 NewGVN::performSymbolicAggrValueEvaluation(Instruction *I) const { 1807 if (auto *EI = dyn_cast<ExtractValueInst>(I)) { 1808 auto *WO = dyn_cast<WithOverflowInst>(EI->getAggregateOperand()); 1809 if (WO && EI->getNumIndices() == 1 && *EI->idx_begin() == 0) 1810 // EI is an extract from one of our with.overflow intrinsics. Synthesize 1811 // a semantically equivalent expression instead of an extract value 1812 // expression. 1813 return createBinaryExpression(WO->getBinaryOp(), EI->getType(), 1814 WO->getLHS(), WO->getRHS(), I); 1815 } 1816 1817 return createAggregateValueExpression(I); 1818 } 1819 1820 NewGVN::ExprResult NewGVN::performSymbolicCmpEvaluation(Instruction *I) const { 1821 assert(isa<CmpInst>(I) && "Expected a cmp instruction."); 1822 1823 auto *CI = cast<CmpInst>(I); 1824 // See if our operands are equal to those of a previous predicate, and if so, 1825 // if it implies true or false. 1826 auto Op0 = lookupOperandLeader(CI->getOperand(0)); 1827 auto Op1 = lookupOperandLeader(CI->getOperand(1)); 1828 auto OurPredicate = CI->getPredicate(); 1829 if (shouldSwapOperands(Op0, Op1)) { 1830 std::swap(Op0, Op1); 1831 OurPredicate = CI->getSwappedPredicate(); 1832 } 1833 1834 // Avoid processing the same info twice. 1835 const PredicateBase *LastPredInfo = nullptr; 1836 // See if we know something about the comparison itself, like it is the target 1837 // of an assume. 1838 auto *CmpPI = PredInfo->getPredicateInfoFor(I); 1839 if (isa_and_nonnull<PredicateAssume>(CmpPI)) 1840 return ExprResult::some( 1841 createConstantExpression(ConstantInt::getTrue(CI->getType()))); 1842 1843 if (Op0 == Op1) { 1844 // This condition does not depend on predicates, no need to add users 1845 if (CI->isTrueWhenEqual()) 1846 return ExprResult::some( 1847 createConstantExpression(ConstantInt::getTrue(CI->getType()))); 1848 else if (CI->isFalseWhenEqual()) 1849 return ExprResult::some( 1850 createConstantExpression(ConstantInt::getFalse(CI->getType()))); 1851 } 1852 1853 // NOTE: Because we are comparing both operands here and below, and using 1854 // previous comparisons, we rely on fact that predicateinfo knows to mark 1855 // comparisons that use renamed operands as users of the earlier comparisons. 1856 // It is *not* enough to just mark predicateinfo renamed operands as users of 1857 // the earlier comparisons, because the *other* operand may have changed in a 1858 // previous iteration. 1859 // Example: 1860 // icmp slt %a, %b 1861 // %b.0 = ssa.copy(%b) 1862 // false branch: 1863 // icmp slt %c, %b.0 1864 1865 // %c and %a may start out equal, and thus, the code below will say the second 1866 // %icmp is false. c may become equal to something else, and in that case the 1867 // %second icmp *must* be reexamined, but would not if only the renamed 1868 // %operands are considered users of the icmp. 1869 1870 // *Currently* we only check one level of comparisons back, and only mark one 1871 // level back as touched when changes happen. If you modify this code to look 1872 // back farther through comparisons, you *must* mark the appropriate 1873 // comparisons as users in PredicateInfo.cpp, or you will cause bugs. See if 1874 // we know something just from the operands themselves 1875 1876 // See if our operands have predicate info, so that we may be able to derive 1877 // something from a previous comparison. 1878 for (const auto &Op : CI->operands()) { 1879 auto *PI = PredInfo->getPredicateInfoFor(Op); 1880 if (const auto *PBranch = dyn_cast_or_null<PredicateBranch>(PI)) { 1881 if (PI == LastPredInfo) 1882 continue; 1883 LastPredInfo = PI; 1884 // In phi of ops cases, we may have predicate info that we are evaluating 1885 // in a different context. 1886 if (!DT->dominates(PBranch->To, getBlockForValue(I))) 1887 continue; 1888 // TODO: Along the false edge, we may know more things too, like 1889 // icmp of 1890 // same operands is false. 1891 // TODO: We only handle actual comparison conditions below, not 1892 // and/or. 1893 auto *BranchCond = dyn_cast<CmpInst>(PBranch->Condition); 1894 if (!BranchCond) 1895 continue; 1896 auto *BranchOp0 = lookupOperandLeader(BranchCond->getOperand(0)); 1897 auto *BranchOp1 = lookupOperandLeader(BranchCond->getOperand(1)); 1898 auto BranchPredicate = BranchCond->getPredicate(); 1899 if (shouldSwapOperands(BranchOp0, BranchOp1)) { 1900 std::swap(BranchOp0, BranchOp1); 1901 BranchPredicate = BranchCond->getSwappedPredicate(); 1902 } 1903 if (BranchOp0 == Op0 && BranchOp1 == Op1) { 1904 if (PBranch->TrueEdge) { 1905 // If we know the previous predicate is true and we are in the true 1906 // edge then we may be implied true or false. 1907 if (CmpInst::isImpliedTrueByMatchingCmp(BranchPredicate, 1908 OurPredicate)) { 1909 return ExprResult::some( 1910 createConstantExpression(ConstantInt::getTrue(CI->getType())), 1911 PI); 1912 } 1913 1914 if (CmpInst::isImpliedFalseByMatchingCmp(BranchPredicate, 1915 OurPredicate)) { 1916 return ExprResult::some( 1917 createConstantExpression(ConstantInt::getFalse(CI->getType())), 1918 PI); 1919 } 1920 } else { 1921 // Just handle the ne and eq cases, where if we have the same 1922 // operands, we may know something. 1923 if (BranchPredicate == OurPredicate) { 1924 // Same predicate, same ops,we know it was false, so this is false. 1925 return ExprResult::some( 1926 createConstantExpression(ConstantInt::getFalse(CI->getType())), 1927 PI); 1928 } else if (BranchPredicate == 1929 CmpInst::getInversePredicate(OurPredicate)) { 1930 // Inverse predicate, we know the other was false, so this is true. 1931 return ExprResult::some( 1932 createConstantExpression(ConstantInt::getTrue(CI->getType())), 1933 PI); 1934 } 1935 } 1936 } 1937 } 1938 } 1939 // Create expression will take care of simplifyCmpInst 1940 return createExpression(I); 1941 } 1942 1943 // Substitute and symbolize the value before value numbering. 1944 NewGVN::ExprResult 1945 NewGVN::performSymbolicEvaluation(Value *V, 1946 SmallPtrSetImpl<Value *> &Visited) const { 1947 1948 const Expression *E = nullptr; 1949 if (auto *C = dyn_cast<Constant>(V)) 1950 E = createConstantExpression(C); 1951 else if (isa<Argument>(V) || isa<GlobalVariable>(V)) { 1952 E = createVariableExpression(V); 1953 } else { 1954 // TODO: memory intrinsics. 1955 // TODO: Some day, we should do the forward propagation and reassociation 1956 // parts of the algorithm. 1957 auto *I = cast<Instruction>(V); 1958 switch (I->getOpcode()) { 1959 case Instruction::ExtractValue: 1960 case Instruction::InsertValue: 1961 E = performSymbolicAggrValueEvaluation(I); 1962 break; 1963 case Instruction::PHI: { 1964 SmallVector<ValPair, 3> Ops; 1965 auto *PN = cast<PHINode>(I); 1966 for (unsigned i = 0; i < PN->getNumOperands(); ++i) 1967 Ops.push_back({PN->getIncomingValue(i), PN->getIncomingBlock(i)}); 1968 // Sort to ensure the invariant createPHIExpression requires is met. 1969 sortPHIOps(Ops); 1970 E = performSymbolicPHIEvaluation(Ops, I, getBlockForValue(I)); 1971 } break; 1972 case Instruction::Call: 1973 return performSymbolicCallEvaluation(I); 1974 break; 1975 case Instruction::Store: 1976 E = performSymbolicStoreEvaluation(I); 1977 break; 1978 case Instruction::Load: 1979 E = performSymbolicLoadEvaluation(I); 1980 break; 1981 case Instruction::BitCast: 1982 case Instruction::AddrSpaceCast: 1983 return createExpression(I); 1984 break; 1985 case Instruction::ICmp: 1986 case Instruction::FCmp: 1987 return performSymbolicCmpEvaluation(I); 1988 break; 1989 case Instruction::FNeg: 1990 case Instruction::Add: 1991 case Instruction::FAdd: 1992 case Instruction::Sub: 1993 case Instruction::FSub: 1994 case Instruction::Mul: 1995 case Instruction::FMul: 1996 case Instruction::UDiv: 1997 case Instruction::SDiv: 1998 case Instruction::FDiv: 1999 case Instruction::URem: 2000 case Instruction::SRem: 2001 case Instruction::FRem: 2002 case Instruction::Shl: 2003 case Instruction::LShr: 2004 case Instruction::AShr: 2005 case Instruction::And: 2006 case Instruction::Or: 2007 case Instruction::Xor: 2008 case Instruction::Trunc: 2009 case Instruction::ZExt: 2010 case Instruction::SExt: 2011 case Instruction::FPToUI: 2012 case Instruction::FPToSI: 2013 case Instruction::UIToFP: 2014 case Instruction::SIToFP: 2015 case Instruction::FPTrunc: 2016 case Instruction::FPExt: 2017 case Instruction::PtrToInt: 2018 case Instruction::IntToPtr: 2019 case Instruction::Select: 2020 case Instruction::ExtractElement: 2021 case Instruction::InsertElement: 2022 case Instruction::GetElementPtr: 2023 return createExpression(I); 2024 break; 2025 case Instruction::ShuffleVector: 2026 // FIXME: Add support for shufflevector to createExpression. 2027 return ExprResult::none(); 2028 default: 2029 return ExprResult::none(); 2030 } 2031 } 2032 return ExprResult::some(E); 2033 } 2034 2035 // Look up a container of values/instructions in a map, and touch all the 2036 // instructions in the container. Then erase value from the map. 2037 template <typename Map, typename KeyType> 2038 void NewGVN::touchAndErase(Map &M, const KeyType &Key) { 2039 const auto Result = M.find_as(Key); 2040 if (Result != M.end()) { 2041 for (const typename Map::mapped_type::value_type Mapped : Result->second) 2042 TouchedInstructions.set(InstrToDFSNum(Mapped)); 2043 M.erase(Result); 2044 } 2045 } 2046 2047 void NewGVN::addAdditionalUsers(Value *To, Value *User) const { 2048 assert(User && To != User); 2049 if (isa<Instruction>(To)) 2050 AdditionalUsers[To].insert(User); 2051 } 2052 2053 void NewGVN::addAdditionalUsers(ExprResult &Res, Instruction *User) const { 2054 if (Res.ExtraDep && Res.ExtraDep != User) 2055 addAdditionalUsers(Res.ExtraDep, User); 2056 Res.ExtraDep = nullptr; 2057 2058 if (Res.PredDep) { 2059 if (const auto *PBranch = dyn_cast<PredicateBranch>(Res.PredDep)) 2060 PredicateToUsers[PBranch->Condition].insert(User); 2061 else if (const auto *PAssume = dyn_cast<PredicateAssume>(Res.PredDep)) 2062 PredicateToUsers[PAssume->Condition].insert(User); 2063 } 2064 Res.PredDep = nullptr; 2065 } 2066 2067 void NewGVN::markUsersTouched(Value *V) { 2068 // Now mark the users as touched. 2069 for (auto *User : V->users()) { 2070 assert(isa<Instruction>(User) && "Use of value not within an instruction?"); 2071 TouchedInstructions.set(InstrToDFSNum(User)); 2072 } 2073 touchAndErase(AdditionalUsers, V); 2074 } 2075 2076 void NewGVN::addMemoryUsers(const MemoryAccess *To, MemoryAccess *U) const { 2077 LLVM_DEBUG(dbgs() << "Adding memory user " << *U << " to " << *To << "\n"); 2078 MemoryToUsers[To].insert(U); 2079 } 2080 2081 void NewGVN::markMemoryDefTouched(const MemoryAccess *MA) { 2082 TouchedInstructions.set(MemoryToDFSNum(MA)); 2083 } 2084 2085 void NewGVN::markMemoryUsersTouched(const MemoryAccess *MA) { 2086 if (isa<MemoryUse>(MA)) 2087 return; 2088 for (auto U : MA->users()) 2089 TouchedInstructions.set(MemoryToDFSNum(U)); 2090 touchAndErase(MemoryToUsers, MA); 2091 } 2092 2093 // Touch all the predicates that depend on this instruction. 2094 void NewGVN::markPredicateUsersTouched(Instruction *I) { 2095 touchAndErase(PredicateToUsers, I); 2096 } 2097 2098 // Mark users affected by a memory leader change. 2099 void NewGVN::markMemoryLeaderChangeTouched(CongruenceClass *CC) { 2100 for (auto M : CC->memory()) 2101 markMemoryDefTouched(M); 2102 } 2103 2104 // Touch the instructions that need to be updated after a congruence class has a 2105 // leader change, and mark changed values. 2106 void NewGVN::markValueLeaderChangeTouched(CongruenceClass *CC) { 2107 for (auto M : *CC) { 2108 if (auto *I = dyn_cast<Instruction>(M)) 2109 TouchedInstructions.set(InstrToDFSNum(I)); 2110 LeaderChanges.insert(M); 2111 } 2112 } 2113 2114 // Give a range of things that have instruction DFS numbers, this will return 2115 // the member of the range with the smallest dfs number. 2116 template <class T, class Range> 2117 T *NewGVN::getMinDFSOfRange(const Range &R) const { 2118 std::pair<T *, unsigned> MinDFS = {nullptr, ~0U}; 2119 for (const auto X : R) { 2120 auto DFSNum = InstrToDFSNum(X); 2121 if (DFSNum < MinDFS.second) 2122 MinDFS = {X, DFSNum}; 2123 } 2124 return MinDFS.first; 2125 } 2126 2127 // This function returns the MemoryAccess that should be the next leader of 2128 // congruence class CC, under the assumption that the current leader is going to 2129 // disappear. 2130 const MemoryAccess *NewGVN::getNextMemoryLeader(CongruenceClass *CC) const { 2131 // TODO: If this ends up to slow, we can maintain a next memory leader like we 2132 // do for regular leaders. 2133 // Make sure there will be a leader to find. 2134 assert(!CC->definesNoMemory() && "Can't get next leader if there is none"); 2135 if (CC->getStoreCount() > 0) { 2136 if (auto *NL = dyn_cast_or_null<StoreInst>(CC->getNextLeader().first)) 2137 return getMemoryAccess(NL); 2138 // Find the store with the minimum DFS number. 2139 auto *V = getMinDFSOfRange<Value>(make_filter_range( 2140 *CC, [&](const Value *V) { return isa<StoreInst>(V); })); 2141 return getMemoryAccess(cast<StoreInst>(V)); 2142 } 2143 assert(CC->getStoreCount() == 0); 2144 2145 // Given our assertion, hitting this part must mean 2146 // !OldClass->memory_empty() 2147 if (CC->memory_size() == 1) 2148 return *CC->memory_begin(); 2149 return getMinDFSOfRange<const MemoryPhi>(CC->memory()); 2150 } 2151 2152 // This function returns the next value leader of a congruence class, under the 2153 // assumption that the current leader is going away. This should end up being 2154 // the next most dominating member. 2155 Value *NewGVN::getNextValueLeader(CongruenceClass *CC) const { 2156 // We don't need to sort members if there is only 1, and we don't care about 2157 // sorting the TOP class because everything either gets out of it or is 2158 // unreachable. 2159 2160 if (CC->size() == 1 || CC == TOPClass) { 2161 return *(CC->begin()); 2162 } else if (CC->getNextLeader().first) { 2163 ++NumGVNAvoidedSortedLeaderChanges; 2164 return CC->getNextLeader().first; 2165 } else { 2166 ++NumGVNSortedLeaderChanges; 2167 // NOTE: If this ends up to slow, we can maintain a dual structure for 2168 // member testing/insertion, or keep things mostly sorted, and sort only 2169 // here, or use SparseBitVector or .... 2170 return getMinDFSOfRange<Value>(*CC); 2171 } 2172 } 2173 2174 // Move a MemoryAccess, currently in OldClass, to NewClass, including updates to 2175 // the memory members, etc for the move. 2176 // 2177 // The invariants of this function are: 2178 // 2179 // - I must be moving to NewClass from OldClass 2180 // - The StoreCount of OldClass and NewClass is expected to have been updated 2181 // for I already if it is a store. 2182 // - The OldClass memory leader has not been updated yet if I was the leader. 2183 void NewGVN::moveMemoryToNewCongruenceClass(Instruction *I, 2184 MemoryAccess *InstMA, 2185 CongruenceClass *OldClass, 2186 CongruenceClass *NewClass) { 2187 // If the leader is I, and we had a representative MemoryAccess, it should 2188 // be the MemoryAccess of OldClass. 2189 assert((!InstMA || !OldClass->getMemoryLeader() || 2190 OldClass->getLeader() != I || 2191 MemoryAccessToClass.lookup(OldClass->getMemoryLeader()) == 2192 MemoryAccessToClass.lookup(InstMA)) && 2193 "Representative MemoryAccess mismatch"); 2194 // First, see what happens to the new class 2195 if (!NewClass->getMemoryLeader()) { 2196 // Should be a new class, or a store becoming a leader of a new class. 2197 assert(NewClass->size() == 1 || 2198 (isa<StoreInst>(I) && NewClass->getStoreCount() == 1)); 2199 NewClass->setMemoryLeader(InstMA); 2200 // Mark it touched if we didn't just create a singleton 2201 LLVM_DEBUG(dbgs() << "Memory class leader change for class " 2202 << NewClass->getID() 2203 << " due to new memory instruction becoming leader\n"); 2204 markMemoryLeaderChangeTouched(NewClass); 2205 } 2206 setMemoryClass(InstMA, NewClass); 2207 // Now, fixup the old class if necessary 2208 if (OldClass->getMemoryLeader() == InstMA) { 2209 if (!OldClass->definesNoMemory()) { 2210 OldClass->setMemoryLeader(getNextMemoryLeader(OldClass)); 2211 LLVM_DEBUG(dbgs() << "Memory class leader change for class " 2212 << OldClass->getID() << " to " 2213 << *OldClass->getMemoryLeader() 2214 << " due to removal of old leader " << *InstMA << "\n"); 2215 markMemoryLeaderChangeTouched(OldClass); 2216 } else 2217 OldClass->setMemoryLeader(nullptr); 2218 } 2219 } 2220 2221 // Move a value, currently in OldClass, to be part of NewClass 2222 // Update OldClass and NewClass for the move (including changing leaders, etc). 2223 void NewGVN::moveValueToNewCongruenceClass(Instruction *I, const Expression *E, 2224 CongruenceClass *OldClass, 2225 CongruenceClass *NewClass) { 2226 if (I == OldClass->getNextLeader().first) 2227 OldClass->resetNextLeader(); 2228 2229 OldClass->erase(I); 2230 NewClass->insert(I); 2231 2232 if (NewClass->getLeader() != I) 2233 NewClass->addPossibleNextLeader({I, InstrToDFSNum(I)}); 2234 // Handle our special casing of stores. 2235 if (auto *SI = dyn_cast<StoreInst>(I)) { 2236 OldClass->decStoreCount(); 2237 // Okay, so when do we want to make a store a leader of a class? 2238 // If we have a store defined by an earlier load, we want the earlier load 2239 // to lead the class. 2240 // If we have a store defined by something else, we want the store to lead 2241 // the class so everything else gets the "something else" as a value. 2242 // If we have a store as the single member of the class, we want the store 2243 // as the leader 2244 if (NewClass->getStoreCount() == 0 && !NewClass->getStoredValue()) { 2245 // If it's a store expression we are using, it means we are not equivalent 2246 // to something earlier. 2247 if (auto *SE = dyn_cast<StoreExpression>(E)) { 2248 NewClass->setStoredValue(SE->getStoredValue()); 2249 markValueLeaderChangeTouched(NewClass); 2250 // Shift the new class leader to be the store 2251 LLVM_DEBUG(dbgs() << "Changing leader of congruence class " 2252 << NewClass->getID() << " from " 2253 << *NewClass->getLeader() << " to " << *SI 2254 << " because store joined class\n"); 2255 // If we changed the leader, we have to mark it changed because we don't 2256 // know what it will do to symbolic evaluation. 2257 NewClass->setLeader(SI); 2258 } 2259 // We rely on the code below handling the MemoryAccess change. 2260 } 2261 NewClass->incStoreCount(); 2262 } 2263 // True if there is no memory instructions left in a class that had memory 2264 // instructions before. 2265 2266 // If it's not a memory use, set the MemoryAccess equivalence 2267 auto *InstMA = dyn_cast_or_null<MemoryDef>(getMemoryAccess(I)); 2268 if (InstMA) 2269 moveMemoryToNewCongruenceClass(I, InstMA, OldClass, NewClass); 2270 ValueToClass[I] = NewClass; 2271 // See if we destroyed the class or need to swap leaders. 2272 if (OldClass->empty() && OldClass != TOPClass) { 2273 if (OldClass->getDefiningExpr()) { 2274 LLVM_DEBUG(dbgs() << "Erasing expression " << *OldClass->getDefiningExpr() 2275 << " from table\n"); 2276 // We erase it as an exact expression to make sure we don't just erase an 2277 // equivalent one. 2278 auto Iter = ExpressionToClass.find_as( 2279 ExactEqualsExpression(*OldClass->getDefiningExpr())); 2280 if (Iter != ExpressionToClass.end()) 2281 ExpressionToClass.erase(Iter); 2282 #ifdef EXPENSIVE_CHECKS 2283 assert( 2284 (*OldClass->getDefiningExpr() != *E || ExpressionToClass.lookup(E)) && 2285 "We erased the expression we just inserted, which should not happen"); 2286 #endif 2287 } 2288 } else if (OldClass->getLeader() == I) { 2289 // When the leader changes, the value numbering of 2290 // everything may change due to symbolization changes, so we need to 2291 // reprocess. 2292 LLVM_DEBUG(dbgs() << "Value class leader change for class " 2293 << OldClass->getID() << "\n"); 2294 ++NumGVNLeaderChanges; 2295 // Destroy the stored value if there are no more stores to represent it. 2296 // Note that this is basically clean up for the expression removal that 2297 // happens below. If we remove stores from a class, we may leave it as a 2298 // class of equivalent memory phis. 2299 if (OldClass->getStoreCount() == 0) { 2300 if (OldClass->getStoredValue()) 2301 OldClass->setStoredValue(nullptr); 2302 } 2303 OldClass->setLeader(getNextValueLeader(OldClass)); 2304 OldClass->resetNextLeader(); 2305 markValueLeaderChangeTouched(OldClass); 2306 } 2307 } 2308 2309 // For a given expression, mark the phi of ops instructions that could have 2310 // changed as a result. 2311 void NewGVN::markPhiOfOpsChanged(const Expression *E) { 2312 touchAndErase(ExpressionToPhiOfOps, E); 2313 } 2314 2315 // Perform congruence finding on a given value numbering expression. 2316 void NewGVN::performCongruenceFinding(Instruction *I, const Expression *E) { 2317 // This is guaranteed to return something, since it will at least find 2318 // TOP. 2319 2320 CongruenceClass *IClass = ValueToClass.lookup(I); 2321 assert(IClass && "Should have found a IClass"); 2322 // Dead classes should have been eliminated from the mapping. 2323 assert(!IClass->isDead() && "Found a dead class"); 2324 2325 CongruenceClass *EClass = nullptr; 2326 if (const auto *VE = dyn_cast<VariableExpression>(E)) { 2327 EClass = ValueToClass.lookup(VE->getVariableValue()); 2328 } else if (isa<DeadExpression>(E)) { 2329 EClass = TOPClass; 2330 } 2331 if (!EClass) { 2332 auto lookupResult = ExpressionToClass.insert({E, nullptr}); 2333 2334 // If it's not in the value table, create a new congruence class. 2335 if (lookupResult.second) { 2336 CongruenceClass *NewClass = createCongruenceClass(nullptr, E); 2337 auto place = lookupResult.first; 2338 place->second = NewClass; 2339 2340 // Constants and variables should always be made the leader. 2341 if (const auto *CE = dyn_cast<ConstantExpression>(E)) { 2342 NewClass->setLeader(CE->getConstantValue()); 2343 } else if (const auto *SE = dyn_cast<StoreExpression>(E)) { 2344 StoreInst *SI = SE->getStoreInst(); 2345 NewClass->setLeader(SI); 2346 NewClass->setStoredValue(SE->getStoredValue()); 2347 // The RepMemoryAccess field will be filled in properly by the 2348 // moveValueToNewCongruenceClass call. 2349 } else { 2350 NewClass->setLeader(I); 2351 } 2352 assert(!isa<VariableExpression>(E) && 2353 "VariableExpression should have been handled already"); 2354 2355 EClass = NewClass; 2356 LLVM_DEBUG(dbgs() << "Created new congruence class for " << *I 2357 << " using expression " << *E << " at " 2358 << NewClass->getID() << " and leader " 2359 << *(NewClass->getLeader())); 2360 if (NewClass->getStoredValue()) 2361 LLVM_DEBUG(dbgs() << " and stored value " 2362 << *(NewClass->getStoredValue())); 2363 LLVM_DEBUG(dbgs() << "\n"); 2364 } else { 2365 EClass = lookupResult.first->second; 2366 if (isa<ConstantExpression>(E)) 2367 assert((isa<Constant>(EClass->getLeader()) || 2368 (EClass->getStoredValue() && 2369 isa<Constant>(EClass->getStoredValue()))) && 2370 "Any class with a constant expression should have a " 2371 "constant leader"); 2372 2373 assert(EClass && "Somehow don't have an eclass"); 2374 2375 assert(!EClass->isDead() && "We accidentally looked up a dead class"); 2376 } 2377 } 2378 bool ClassChanged = IClass != EClass; 2379 bool LeaderChanged = LeaderChanges.erase(I); 2380 if (ClassChanged || LeaderChanged) { 2381 LLVM_DEBUG(dbgs() << "New class " << EClass->getID() << " for expression " 2382 << *E << "\n"); 2383 if (ClassChanged) { 2384 moveValueToNewCongruenceClass(I, E, IClass, EClass); 2385 markPhiOfOpsChanged(E); 2386 } 2387 2388 markUsersTouched(I); 2389 if (MemoryAccess *MA = getMemoryAccess(I)) 2390 markMemoryUsersTouched(MA); 2391 if (auto *CI = dyn_cast<CmpInst>(I)) 2392 markPredicateUsersTouched(CI); 2393 } 2394 // If we changed the class of the store, we want to ensure nothing finds the 2395 // old store expression. In particular, loads do not compare against stored 2396 // value, so they will find old store expressions (and associated class 2397 // mappings) if we leave them in the table. 2398 if (ClassChanged && isa<StoreInst>(I)) { 2399 auto *OldE = ValueToExpression.lookup(I); 2400 // It could just be that the old class died. We don't want to erase it if we 2401 // just moved classes. 2402 if (OldE && isa<StoreExpression>(OldE) && *E != *OldE) { 2403 // Erase this as an exact expression to ensure we don't erase expressions 2404 // equivalent to it. 2405 auto Iter = ExpressionToClass.find_as(ExactEqualsExpression(*OldE)); 2406 if (Iter != ExpressionToClass.end()) 2407 ExpressionToClass.erase(Iter); 2408 } 2409 } 2410 ValueToExpression[I] = E; 2411 } 2412 2413 // Process the fact that Edge (from, to) is reachable, including marking 2414 // any newly reachable blocks and instructions for processing. 2415 void NewGVN::updateReachableEdge(BasicBlock *From, BasicBlock *To) { 2416 // Check if the Edge was reachable before. 2417 if (ReachableEdges.insert({From, To}).second) { 2418 // If this block wasn't reachable before, all instructions are touched. 2419 if (ReachableBlocks.insert(To).second) { 2420 LLVM_DEBUG(dbgs() << "Block " << getBlockName(To) 2421 << " marked reachable\n"); 2422 const auto &InstRange = BlockInstRange.lookup(To); 2423 TouchedInstructions.set(InstRange.first, InstRange.second); 2424 } else { 2425 LLVM_DEBUG(dbgs() << "Block " << getBlockName(To) 2426 << " was reachable, but new edge {" 2427 << getBlockName(From) << "," << getBlockName(To) 2428 << "} to it found\n"); 2429 2430 // We've made an edge reachable to an existing block, which may 2431 // impact predicates. Otherwise, only mark the phi nodes as touched, as 2432 // they are the only thing that depend on new edges. Anything using their 2433 // values will get propagated to if necessary. 2434 if (MemoryAccess *MemPhi = getMemoryAccess(To)) 2435 TouchedInstructions.set(InstrToDFSNum(MemPhi)); 2436 2437 // FIXME: We should just add a union op on a Bitvector and 2438 // SparseBitVector. We can do it word by word faster than we are doing it 2439 // here. 2440 for (auto InstNum : RevisitOnReachabilityChange[To]) 2441 TouchedInstructions.set(InstNum); 2442 } 2443 } 2444 } 2445 2446 // Given a predicate condition (from a switch, cmp, or whatever) and a block, 2447 // see if we know some constant value for it already. 2448 Value *NewGVN::findConditionEquivalence(Value *Cond) const { 2449 auto Result = lookupOperandLeader(Cond); 2450 return isa<Constant>(Result) ? Result : nullptr; 2451 } 2452 2453 // Process the outgoing edges of a block for reachability. 2454 void NewGVN::processOutgoingEdges(Instruction *TI, BasicBlock *B) { 2455 // Evaluate reachability of terminator instruction. 2456 Value *Cond; 2457 BasicBlock *TrueSucc, *FalseSucc; 2458 if (match(TI, m_Br(m_Value(Cond), TrueSucc, FalseSucc))) { 2459 Value *CondEvaluated = findConditionEquivalence(Cond); 2460 if (!CondEvaluated) { 2461 if (auto *I = dyn_cast<Instruction>(Cond)) { 2462 SmallPtrSet<Value *, 4> Visited; 2463 auto Res = performSymbolicEvaluation(I, Visited); 2464 if (const auto *CE = dyn_cast_or_null<ConstantExpression>(Res.Expr)) { 2465 CondEvaluated = CE->getConstantValue(); 2466 addAdditionalUsers(Res, I); 2467 } else { 2468 // Did not use simplification result, no need to add the extra 2469 // dependency. 2470 Res.ExtraDep = nullptr; 2471 } 2472 } else if (isa<ConstantInt>(Cond)) { 2473 CondEvaluated = Cond; 2474 } 2475 } 2476 ConstantInt *CI; 2477 if (CondEvaluated && (CI = dyn_cast<ConstantInt>(CondEvaluated))) { 2478 if (CI->isOne()) { 2479 LLVM_DEBUG(dbgs() << "Condition for Terminator " << *TI 2480 << " evaluated to true\n"); 2481 updateReachableEdge(B, TrueSucc); 2482 } else if (CI->isZero()) { 2483 LLVM_DEBUG(dbgs() << "Condition for Terminator " << *TI 2484 << " evaluated to false\n"); 2485 updateReachableEdge(B, FalseSucc); 2486 } 2487 } else { 2488 updateReachableEdge(B, TrueSucc); 2489 updateReachableEdge(B, FalseSucc); 2490 } 2491 } else if (auto *SI = dyn_cast<SwitchInst>(TI)) { 2492 // For switches, propagate the case values into the case 2493 // destinations. 2494 2495 Value *SwitchCond = SI->getCondition(); 2496 Value *CondEvaluated = findConditionEquivalence(SwitchCond); 2497 // See if we were able to turn this switch statement into a constant. 2498 if (CondEvaluated && isa<ConstantInt>(CondEvaluated)) { 2499 auto *CondVal = cast<ConstantInt>(CondEvaluated); 2500 // We should be able to get case value for this. 2501 auto Case = *SI->findCaseValue(CondVal); 2502 if (Case.getCaseSuccessor() == SI->getDefaultDest()) { 2503 // We proved the value is outside of the range of the case. 2504 // We can't do anything other than mark the default dest as reachable, 2505 // and go home. 2506 updateReachableEdge(B, SI->getDefaultDest()); 2507 return; 2508 } 2509 // Now get where it goes and mark it reachable. 2510 BasicBlock *TargetBlock = Case.getCaseSuccessor(); 2511 updateReachableEdge(B, TargetBlock); 2512 } else { 2513 for (unsigned i = 0, e = SI->getNumSuccessors(); i != e; ++i) { 2514 BasicBlock *TargetBlock = SI->getSuccessor(i); 2515 updateReachableEdge(B, TargetBlock); 2516 } 2517 } 2518 } else { 2519 // Otherwise this is either unconditional, or a type we have no 2520 // idea about. Just mark successors as reachable. 2521 for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i) { 2522 BasicBlock *TargetBlock = TI->getSuccessor(i); 2523 updateReachableEdge(B, TargetBlock); 2524 } 2525 2526 // This also may be a memory defining terminator, in which case, set it 2527 // equivalent only to itself. 2528 // 2529 auto *MA = getMemoryAccess(TI); 2530 if (MA && !isa<MemoryUse>(MA)) { 2531 auto *CC = ensureLeaderOfMemoryClass(MA); 2532 if (setMemoryClass(MA, CC)) 2533 markMemoryUsersTouched(MA); 2534 } 2535 } 2536 } 2537 2538 // Remove the PHI of Ops PHI for I 2539 void NewGVN::removePhiOfOps(Instruction *I, PHINode *PHITemp) { 2540 InstrDFS.erase(PHITemp); 2541 // It's still a temp instruction. We keep it in the array so it gets erased. 2542 // However, it's no longer used by I, or in the block 2543 TempToBlock.erase(PHITemp); 2544 RealToTemp.erase(I); 2545 // We don't remove the users from the phi node uses. This wastes a little 2546 // time, but such is life. We could use two sets to track which were there 2547 // are the start of NewGVN, and which were added, but right nowt he cost of 2548 // tracking is more than the cost of checking for more phi of ops. 2549 } 2550 2551 // Add PHI Op in BB as a PHI of operations version of ExistingValue. 2552 void NewGVN::addPhiOfOps(PHINode *Op, BasicBlock *BB, 2553 Instruction *ExistingValue) { 2554 InstrDFS[Op] = InstrToDFSNum(ExistingValue); 2555 AllTempInstructions.insert(Op); 2556 TempToBlock[Op] = BB; 2557 RealToTemp[ExistingValue] = Op; 2558 // Add all users to phi node use, as they are now uses of the phi of ops phis 2559 // and may themselves be phi of ops. 2560 for (auto *U : ExistingValue->users()) 2561 if (auto *UI = dyn_cast<Instruction>(U)) 2562 PHINodeUses.insert(UI); 2563 } 2564 2565 static bool okayForPHIOfOps(const Instruction *I) { 2566 if (!EnablePhiOfOps) 2567 return false; 2568 return isa<BinaryOperator>(I) || isa<SelectInst>(I) || isa<CmpInst>(I) || 2569 isa<LoadInst>(I); 2570 } 2571 2572 bool NewGVN::OpIsSafeForPHIOfOpsHelper( 2573 Value *V, const BasicBlock *PHIBlock, 2574 SmallPtrSetImpl<const Value *> &Visited, 2575 SmallVectorImpl<Instruction *> &Worklist) { 2576 2577 if (!isa<Instruction>(V)) 2578 return true; 2579 auto OISIt = OpSafeForPHIOfOps.find(V); 2580 if (OISIt != OpSafeForPHIOfOps.end()) 2581 return OISIt->second; 2582 2583 // Keep walking until we either dominate the phi block, or hit a phi, or run 2584 // out of things to check. 2585 if (DT->properlyDominates(getBlockForValue(V), PHIBlock)) { 2586 OpSafeForPHIOfOps.insert({V, true}); 2587 return true; 2588 } 2589 // PHI in the same block. 2590 if (isa<PHINode>(V) && getBlockForValue(V) == PHIBlock) { 2591 OpSafeForPHIOfOps.insert({V, false}); 2592 return false; 2593 } 2594 2595 auto *OrigI = cast<Instruction>(V); 2596 // When we hit an instruction that reads memory (load, call, etc), we must 2597 // consider any store that may happen in the loop. For now, we assume the 2598 // worst: there is a store in the loop that alias with this read. 2599 // The case where the load is outside the loop is already covered by the 2600 // dominator check above. 2601 // TODO: relax this condition 2602 if (OrigI->mayReadFromMemory()) 2603 return false; 2604 2605 for (auto *Op : OrigI->operand_values()) { 2606 if (!isa<Instruction>(Op)) 2607 continue; 2608 // Stop now if we find an unsafe operand. 2609 auto OISIt = OpSafeForPHIOfOps.find(OrigI); 2610 if (OISIt != OpSafeForPHIOfOps.end()) { 2611 if (!OISIt->second) { 2612 OpSafeForPHIOfOps.insert({V, false}); 2613 return false; 2614 } 2615 continue; 2616 } 2617 if (!Visited.insert(Op).second) 2618 continue; 2619 Worklist.push_back(cast<Instruction>(Op)); 2620 } 2621 return true; 2622 } 2623 2624 // Return true if this operand will be safe to use for phi of ops. 2625 // 2626 // The reason some operands are unsafe is that we are not trying to recursively 2627 // translate everything back through phi nodes. We actually expect some lookups 2628 // of expressions to fail. In particular, a lookup where the expression cannot 2629 // exist in the predecessor. This is true even if the expression, as shown, can 2630 // be determined to be constant. 2631 bool NewGVN::OpIsSafeForPHIOfOps(Value *V, const BasicBlock *PHIBlock, 2632 SmallPtrSetImpl<const Value *> &Visited) { 2633 SmallVector<Instruction *, 4> Worklist; 2634 if (!OpIsSafeForPHIOfOpsHelper(V, PHIBlock, Visited, Worklist)) 2635 return false; 2636 while (!Worklist.empty()) { 2637 auto *I = Worklist.pop_back_val(); 2638 if (!OpIsSafeForPHIOfOpsHelper(I, PHIBlock, Visited, Worklist)) 2639 return false; 2640 } 2641 OpSafeForPHIOfOps.insert({V, true}); 2642 return true; 2643 } 2644 2645 // Try to find a leader for instruction TransInst, which is a phi translated 2646 // version of something in our original program. Visited is used to ensure we 2647 // don't infinite loop during translations of cycles. OrigInst is the 2648 // instruction in the original program, and PredBB is the predecessor we 2649 // translated it through. 2650 Value *NewGVN::findLeaderForInst(Instruction *TransInst, 2651 SmallPtrSetImpl<Value *> &Visited, 2652 MemoryAccess *MemAccess, Instruction *OrigInst, 2653 BasicBlock *PredBB) { 2654 unsigned IDFSNum = InstrToDFSNum(OrigInst); 2655 // Make sure it's marked as a temporary instruction. 2656 AllTempInstructions.insert(TransInst); 2657 // and make sure anything that tries to add it's DFS number is 2658 // redirected to the instruction we are making a phi of ops 2659 // for. 2660 TempToBlock.insert({TransInst, PredBB}); 2661 InstrDFS.insert({TransInst, IDFSNum}); 2662 2663 auto Res = performSymbolicEvaluation(TransInst, Visited); 2664 const Expression *E = Res.Expr; 2665 addAdditionalUsers(Res, OrigInst); 2666 InstrDFS.erase(TransInst); 2667 AllTempInstructions.erase(TransInst); 2668 TempToBlock.erase(TransInst); 2669 if (MemAccess) 2670 TempToMemory.erase(TransInst); 2671 if (!E) 2672 return nullptr; 2673 auto *FoundVal = findPHIOfOpsLeader(E, OrigInst, PredBB); 2674 if (!FoundVal) { 2675 ExpressionToPhiOfOps[E].insert(OrigInst); 2676 LLVM_DEBUG(dbgs() << "Cannot find phi of ops operand for " << *TransInst 2677 << " in block " << getBlockName(PredBB) << "\n"); 2678 return nullptr; 2679 } 2680 if (auto *SI = dyn_cast<StoreInst>(FoundVal)) 2681 FoundVal = SI->getValueOperand(); 2682 return FoundVal; 2683 } 2684 2685 // When we see an instruction that is an op of phis, generate the equivalent phi 2686 // of ops form. 2687 const Expression * 2688 NewGVN::makePossiblePHIOfOps(Instruction *I, 2689 SmallPtrSetImpl<Value *> &Visited) { 2690 if (!okayForPHIOfOps(I)) 2691 return nullptr; 2692 2693 if (!Visited.insert(I).second) 2694 return nullptr; 2695 // For now, we require the instruction be cycle free because we don't 2696 // *always* create a phi of ops for instructions that could be done as phi 2697 // of ops, we only do it if we think it is useful. If we did do it all the 2698 // time, we could remove the cycle free check. 2699 if (!isCycleFree(I)) 2700 return nullptr; 2701 2702 SmallPtrSet<const Value *, 8> ProcessedPHIs; 2703 // TODO: We don't do phi translation on memory accesses because it's 2704 // complicated. For a load, we'd need to be able to simulate a new memoryuse, 2705 // which we don't have a good way of doing ATM. 2706 auto *MemAccess = getMemoryAccess(I); 2707 // If the memory operation is defined by a memory operation this block that 2708 // isn't a MemoryPhi, transforming the pointer backwards through a scalar phi 2709 // can't help, as it would still be killed by that memory operation. 2710 if (MemAccess && !isa<MemoryPhi>(MemAccess->getDefiningAccess()) && 2711 MemAccess->getDefiningAccess()->getBlock() == I->getParent()) 2712 return nullptr; 2713 2714 // Convert op of phis to phi of ops 2715 SmallPtrSet<const Value *, 10> VisitedOps; 2716 SmallVector<Value *, 4> Ops(I->operand_values()); 2717 BasicBlock *SamePHIBlock = nullptr; 2718 PHINode *OpPHI = nullptr; 2719 if (!DebugCounter::shouldExecute(PHIOfOpsCounter)) 2720 return nullptr; 2721 for (auto *Op : Ops) { 2722 if (!isa<PHINode>(Op)) { 2723 auto *ValuePHI = RealToTemp.lookup(Op); 2724 if (!ValuePHI) 2725 continue; 2726 LLVM_DEBUG(dbgs() << "Found possible dependent phi of ops\n"); 2727 Op = ValuePHI; 2728 } 2729 OpPHI = cast<PHINode>(Op); 2730 if (!SamePHIBlock) { 2731 SamePHIBlock = getBlockForValue(OpPHI); 2732 } else if (SamePHIBlock != getBlockForValue(OpPHI)) { 2733 LLVM_DEBUG( 2734 dbgs() 2735 << "PHIs for operands are not all in the same block, aborting\n"); 2736 return nullptr; 2737 } 2738 // No point in doing this for one-operand phis. 2739 if (OpPHI->getNumOperands() == 1) { 2740 OpPHI = nullptr; 2741 continue; 2742 } 2743 } 2744 2745 if (!OpPHI) 2746 return nullptr; 2747 2748 SmallVector<ValPair, 4> PHIOps; 2749 SmallPtrSet<Value *, 4> Deps; 2750 auto *PHIBlock = getBlockForValue(OpPHI); 2751 RevisitOnReachabilityChange[PHIBlock].reset(InstrToDFSNum(I)); 2752 for (unsigned PredNum = 0; PredNum < OpPHI->getNumOperands(); ++PredNum) { 2753 auto *PredBB = OpPHI->getIncomingBlock(PredNum); 2754 Value *FoundVal = nullptr; 2755 SmallPtrSet<Value *, 4> CurrentDeps; 2756 // We could just skip unreachable edges entirely but it's tricky to do 2757 // with rewriting existing phi nodes. 2758 if (ReachableEdges.count({PredBB, PHIBlock})) { 2759 // Clone the instruction, create an expression from it that is 2760 // translated back into the predecessor, and see if we have a leader. 2761 Instruction *ValueOp = I->clone(); 2762 if (MemAccess) 2763 TempToMemory.insert({ValueOp, MemAccess}); 2764 bool SafeForPHIOfOps = true; 2765 VisitedOps.clear(); 2766 for (auto &Op : ValueOp->operands()) { 2767 auto *OrigOp = &*Op; 2768 // When these operand changes, it could change whether there is a 2769 // leader for us or not, so we have to add additional users. 2770 if (isa<PHINode>(Op)) { 2771 Op = Op->DoPHITranslation(PHIBlock, PredBB); 2772 if (Op != OrigOp && Op != I) 2773 CurrentDeps.insert(Op); 2774 } else if (auto *ValuePHI = RealToTemp.lookup(Op)) { 2775 if (getBlockForValue(ValuePHI) == PHIBlock) 2776 Op = ValuePHI->getIncomingValueForBlock(PredBB); 2777 } 2778 // If we phi-translated the op, it must be safe. 2779 SafeForPHIOfOps = 2780 SafeForPHIOfOps && 2781 (Op != OrigOp || OpIsSafeForPHIOfOps(Op, PHIBlock, VisitedOps)); 2782 } 2783 // FIXME: For those things that are not safe we could generate 2784 // expressions all the way down, and see if this comes out to a 2785 // constant. For anything where that is true, and unsafe, we should 2786 // have made a phi-of-ops (or value numbered it equivalent to something) 2787 // for the pieces already. 2788 FoundVal = !SafeForPHIOfOps ? nullptr 2789 : findLeaderForInst(ValueOp, Visited, 2790 MemAccess, I, PredBB); 2791 ValueOp->deleteValue(); 2792 if (!FoundVal) { 2793 // We failed to find a leader for the current ValueOp, but this might 2794 // change in case of the translated operands change. 2795 if (SafeForPHIOfOps) 2796 for (auto Dep : CurrentDeps) 2797 addAdditionalUsers(Dep, I); 2798 2799 return nullptr; 2800 } 2801 Deps.insert(CurrentDeps.begin(), CurrentDeps.end()); 2802 } else { 2803 LLVM_DEBUG(dbgs() << "Skipping phi of ops operand for incoming block " 2804 << getBlockName(PredBB) 2805 << " because the block is unreachable\n"); 2806 FoundVal = PoisonValue::get(I->getType()); 2807 RevisitOnReachabilityChange[PHIBlock].set(InstrToDFSNum(I)); 2808 } 2809 2810 PHIOps.push_back({FoundVal, PredBB}); 2811 LLVM_DEBUG(dbgs() << "Found phi of ops operand " << *FoundVal << " in " 2812 << getBlockName(PredBB) << "\n"); 2813 } 2814 for (auto Dep : Deps) 2815 addAdditionalUsers(Dep, I); 2816 sortPHIOps(PHIOps); 2817 auto *E = performSymbolicPHIEvaluation(PHIOps, I, PHIBlock); 2818 if (isa<ConstantExpression>(E) || isa<VariableExpression>(E)) { 2819 LLVM_DEBUG( 2820 dbgs() 2821 << "Not creating real PHI of ops because it simplified to existing " 2822 "value or constant\n"); 2823 // We have leaders for all operands, but do not create a real PHI node with 2824 // those leaders as operands, so the link between the operands and the 2825 // PHI-of-ops is not materialized in the IR. If any of those leaders 2826 // changes, the PHI-of-op may change also, so we need to add the operands as 2827 // additional users. 2828 for (auto &O : PHIOps) 2829 addAdditionalUsers(O.first, I); 2830 2831 return E; 2832 } 2833 auto *ValuePHI = RealToTemp.lookup(I); 2834 bool NewPHI = false; 2835 if (!ValuePHI) { 2836 ValuePHI = 2837 PHINode::Create(I->getType(), OpPHI->getNumOperands(), "phiofops"); 2838 addPhiOfOps(ValuePHI, PHIBlock, I); 2839 NewPHI = true; 2840 NumGVNPHIOfOpsCreated++; 2841 } 2842 if (NewPHI) { 2843 for (auto PHIOp : PHIOps) 2844 ValuePHI->addIncoming(PHIOp.first, PHIOp.second); 2845 } else { 2846 TempToBlock[ValuePHI] = PHIBlock; 2847 unsigned int i = 0; 2848 for (auto PHIOp : PHIOps) { 2849 ValuePHI->setIncomingValue(i, PHIOp.first); 2850 ValuePHI->setIncomingBlock(i, PHIOp.second); 2851 ++i; 2852 } 2853 } 2854 RevisitOnReachabilityChange[PHIBlock].set(InstrToDFSNum(I)); 2855 LLVM_DEBUG(dbgs() << "Created phi of ops " << *ValuePHI << " for " << *I 2856 << "\n"); 2857 2858 return E; 2859 } 2860 2861 // The algorithm initially places the values of the routine in the TOP 2862 // congruence class. The leader of TOP is the undetermined value `poison`. 2863 // When the algorithm has finished, values still in TOP are unreachable. 2864 void NewGVN::initializeCongruenceClasses(Function &F) { 2865 NextCongruenceNum = 0; 2866 2867 // Note that even though we use the live on entry def as a representative 2868 // MemoryAccess, it is *not* the same as the actual live on entry def. We 2869 // have no real equivalent to poison for MemoryAccesses, and so we really 2870 // should be checking whether the MemoryAccess is top if we want to know if it 2871 // is equivalent to everything. Otherwise, what this really signifies is that 2872 // the access "it reaches all the way back to the beginning of the function" 2873 2874 // Initialize all other instructions to be in TOP class. 2875 TOPClass = createCongruenceClass(nullptr, nullptr); 2876 TOPClass->setMemoryLeader(MSSA->getLiveOnEntryDef()); 2877 // The live on entry def gets put into it's own class 2878 MemoryAccessToClass[MSSA->getLiveOnEntryDef()] = 2879 createMemoryClass(MSSA->getLiveOnEntryDef()); 2880 2881 for (auto DTN : nodes(DT)) { 2882 BasicBlock *BB = DTN->getBlock(); 2883 // All MemoryAccesses are equivalent to live on entry to start. They must 2884 // be initialized to something so that initial changes are noticed. For 2885 // the maximal answer, we initialize them all to be the same as 2886 // liveOnEntry. 2887 auto *MemoryBlockDefs = MSSA->getBlockDefs(BB); 2888 if (MemoryBlockDefs) 2889 for (const auto &Def : *MemoryBlockDefs) { 2890 MemoryAccessToClass[&Def] = TOPClass; 2891 auto *MD = dyn_cast<MemoryDef>(&Def); 2892 // Insert the memory phis into the member list. 2893 if (!MD) { 2894 const MemoryPhi *MP = cast<MemoryPhi>(&Def); 2895 TOPClass->memory_insert(MP); 2896 MemoryPhiState.insert({MP, MPS_TOP}); 2897 } 2898 2899 if (MD && isa<StoreInst>(MD->getMemoryInst())) 2900 TOPClass->incStoreCount(); 2901 } 2902 2903 // FIXME: This is trying to discover which instructions are uses of phi 2904 // nodes. We should move this into one of the myriad of places that walk 2905 // all the operands already. 2906 for (auto &I : *BB) { 2907 if (isa<PHINode>(&I)) 2908 for (auto *U : I.users()) 2909 if (auto *UInst = dyn_cast<Instruction>(U)) 2910 if (InstrToDFSNum(UInst) != 0 && okayForPHIOfOps(UInst)) 2911 PHINodeUses.insert(UInst); 2912 // Don't insert void terminators into the class. We don't value number 2913 // them, and they just end up sitting in TOP. 2914 if (I.isTerminator() && I.getType()->isVoidTy()) 2915 continue; 2916 TOPClass->insert(&I); 2917 ValueToClass[&I] = TOPClass; 2918 } 2919 } 2920 2921 // Initialize arguments to be in their own unique congruence classes 2922 for (auto &FA : F.args()) 2923 createSingletonCongruenceClass(&FA); 2924 } 2925 2926 void NewGVN::cleanupTables() { 2927 for (unsigned i = 0, e = CongruenceClasses.size(); i != e; ++i) { 2928 LLVM_DEBUG(dbgs() << "Congruence class " << CongruenceClasses[i]->getID() 2929 << " has " << CongruenceClasses[i]->size() 2930 << " members\n"); 2931 // Make sure we delete the congruence class (probably worth switching to 2932 // a unique_ptr at some point. 2933 delete CongruenceClasses[i]; 2934 CongruenceClasses[i] = nullptr; 2935 } 2936 2937 // Destroy the value expressions 2938 SmallVector<Instruction *, 8> TempInst(AllTempInstructions.begin(), 2939 AllTempInstructions.end()); 2940 AllTempInstructions.clear(); 2941 2942 // We have to drop all references for everything first, so there are no uses 2943 // left as we delete them. 2944 for (auto *I : TempInst) { 2945 I->dropAllReferences(); 2946 } 2947 2948 while (!TempInst.empty()) { 2949 auto *I = TempInst.pop_back_val(); 2950 I->deleteValue(); 2951 } 2952 2953 ValueToClass.clear(); 2954 ArgRecycler.clear(ExpressionAllocator); 2955 ExpressionAllocator.Reset(); 2956 CongruenceClasses.clear(); 2957 ExpressionToClass.clear(); 2958 ValueToExpression.clear(); 2959 RealToTemp.clear(); 2960 AdditionalUsers.clear(); 2961 ExpressionToPhiOfOps.clear(); 2962 TempToBlock.clear(); 2963 TempToMemory.clear(); 2964 PHINodeUses.clear(); 2965 OpSafeForPHIOfOps.clear(); 2966 ReachableBlocks.clear(); 2967 ReachableEdges.clear(); 2968 #ifndef NDEBUG 2969 ProcessedCount.clear(); 2970 #endif 2971 InstrDFS.clear(); 2972 InstructionsToErase.clear(); 2973 DFSToInstr.clear(); 2974 BlockInstRange.clear(); 2975 TouchedInstructions.clear(); 2976 MemoryAccessToClass.clear(); 2977 PredicateToUsers.clear(); 2978 MemoryToUsers.clear(); 2979 RevisitOnReachabilityChange.clear(); 2980 IntrinsicInstPred.clear(); 2981 } 2982 2983 // Assign local DFS number mapping to instructions, and leave space for Value 2984 // PHI's. 2985 std::pair<unsigned, unsigned> NewGVN::assignDFSNumbers(BasicBlock *B, 2986 unsigned Start) { 2987 unsigned End = Start; 2988 if (MemoryAccess *MemPhi = getMemoryAccess(B)) { 2989 InstrDFS[MemPhi] = End++; 2990 DFSToInstr.emplace_back(MemPhi); 2991 } 2992 2993 // Then the real block goes next. 2994 for (auto &I : *B) { 2995 // There's no need to call isInstructionTriviallyDead more than once on 2996 // an instruction. Therefore, once we know that an instruction is dead 2997 // we change its DFS number so that it doesn't get value numbered. 2998 if (isInstructionTriviallyDead(&I, TLI)) { 2999 InstrDFS[&I] = 0; 3000 LLVM_DEBUG(dbgs() << "Skipping trivially dead instruction " << I << "\n"); 3001 markInstructionForDeletion(&I); 3002 continue; 3003 } 3004 if (isa<PHINode>(&I)) 3005 RevisitOnReachabilityChange[B].set(End); 3006 InstrDFS[&I] = End++; 3007 DFSToInstr.emplace_back(&I); 3008 } 3009 3010 // All of the range functions taken half-open ranges (open on the end side). 3011 // So we do not subtract one from count, because at this point it is one 3012 // greater than the last instruction. 3013 return std::make_pair(Start, End); 3014 } 3015 3016 void NewGVN::updateProcessedCount(const Value *V) { 3017 #ifndef NDEBUG 3018 if (ProcessedCount.count(V) == 0) { 3019 ProcessedCount.insert({V, 1}); 3020 } else { 3021 ++ProcessedCount[V]; 3022 assert(ProcessedCount[V] < 100 && 3023 "Seem to have processed the same Value a lot"); 3024 } 3025 #endif 3026 } 3027 3028 // Evaluate MemoryPhi nodes symbolically, just like PHI nodes 3029 void NewGVN::valueNumberMemoryPhi(MemoryPhi *MP) { 3030 // If all the arguments are the same, the MemoryPhi has the same value as the 3031 // argument. Filter out unreachable blocks and self phis from our operands. 3032 // TODO: We could do cycle-checking on the memory phis to allow valueizing for 3033 // self-phi checking. 3034 const BasicBlock *PHIBlock = MP->getBlock(); 3035 auto Filtered = make_filter_range(MP->operands(), [&](const Use &U) { 3036 return cast<MemoryAccess>(U) != MP && 3037 !isMemoryAccessTOP(cast<MemoryAccess>(U)) && 3038 ReachableEdges.count({MP->getIncomingBlock(U), PHIBlock}); 3039 }); 3040 // If all that is left is nothing, our memoryphi is poison. We keep it as 3041 // InitialClass. Note: The only case this should happen is if we have at 3042 // least one self-argument. 3043 if (Filtered.begin() == Filtered.end()) { 3044 if (setMemoryClass(MP, TOPClass)) 3045 markMemoryUsersTouched(MP); 3046 return; 3047 } 3048 3049 // Transform the remaining operands into operand leaders. 3050 // FIXME: mapped_iterator should have a range version. 3051 auto LookupFunc = [&](const Use &U) { 3052 return lookupMemoryLeader(cast<MemoryAccess>(U)); 3053 }; 3054 auto MappedBegin = map_iterator(Filtered.begin(), LookupFunc); 3055 auto MappedEnd = map_iterator(Filtered.end(), LookupFunc); 3056 3057 // and now check if all the elements are equal. 3058 // Sadly, we can't use std::equals since these are random access iterators. 3059 const auto *AllSameValue = *MappedBegin; 3060 ++MappedBegin; 3061 bool AllEqual = std::all_of( 3062 MappedBegin, MappedEnd, 3063 [&AllSameValue](const MemoryAccess *V) { return V == AllSameValue; }); 3064 3065 if (AllEqual) 3066 LLVM_DEBUG(dbgs() << "Memory Phi value numbered to " << *AllSameValue 3067 << "\n"); 3068 else 3069 LLVM_DEBUG(dbgs() << "Memory Phi value numbered to itself\n"); 3070 // If it's equal to something, it's in that class. Otherwise, it has to be in 3071 // a class where it is the leader (other things may be equivalent to it, but 3072 // it needs to start off in its own class, which means it must have been the 3073 // leader, and it can't have stopped being the leader because it was never 3074 // removed). 3075 CongruenceClass *CC = 3076 AllEqual ? getMemoryClass(AllSameValue) : ensureLeaderOfMemoryClass(MP); 3077 auto OldState = MemoryPhiState.lookup(MP); 3078 assert(OldState != MPS_Invalid && "Invalid memory phi state"); 3079 auto NewState = AllEqual ? MPS_Equivalent : MPS_Unique; 3080 MemoryPhiState[MP] = NewState; 3081 if (setMemoryClass(MP, CC) || OldState != NewState) 3082 markMemoryUsersTouched(MP); 3083 } 3084 3085 // Value number a single instruction, symbolically evaluating, performing 3086 // congruence finding, and updating mappings. 3087 void NewGVN::valueNumberInstruction(Instruction *I) { 3088 LLVM_DEBUG(dbgs() << "Processing instruction " << *I << "\n"); 3089 if (!I->isTerminator()) { 3090 const Expression *Symbolized = nullptr; 3091 SmallPtrSet<Value *, 2> Visited; 3092 if (DebugCounter::shouldExecute(VNCounter)) { 3093 auto Res = performSymbolicEvaluation(I, Visited); 3094 Symbolized = Res.Expr; 3095 addAdditionalUsers(Res, I); 3096 3097 // Make a phi of ops if necessary 3098 if (Symbolized && !isa<ConstantExpression>(Symbolized) && 3099 !isa<VariableExpression>(Symbolized) && PHINodeUses.count(I)) { 3100 auto *PHIE = makePossiblePHIOfOps(I, Visited); 3101 // If we created a phi of ops, use it. 3102 // If we couldn't create one, make sure we don't leave one lying around 3103 if (PHIE) { 3104 Symbolized = PHIE; 3105 } else if (auto *Op = RealToTemp.lookup(I)) { 3106 removePhiOfOps(I, Op); 3107 } 3108 } 3109 } else { 3110 // Mark the instruction as unused so we don't value number it again. 3111 InstrDFS[I] = 0; 3112 } 3113 // If we couldn't come up with a symbolic expression, use the unknown 3114 // expression 3115 if (Symbolized == nullptr) 3116 Symbolized = createUnknownExpression(I); 3117 performCongruenceFinding(I, Symbolized); 3118 } else { 3119 // Handle terminators that return values. All of them produce values we 3120 // don't currently understand. We don't place non-value producing 3121 // terminators in a class. 3122 if (!I->getType()->isVoidTy()) { 3123 auto *Symbolized = createUnknownExpression(I); 3124 performCongruenceFinding(I, Symbolized); 3125 } 3126 processOutgoingEdges(I, I->getParent()); 3127 } 3128 } 3129 3130 // Check if there is a path, using single or equal argument phi nodes, from 3131 // First to Second. 3132 bool NewGVN::singleReachablePHIPath( 3133 SmallPtrSet<const MemoryAccess *, 8> &Visited, const MemoryAccess *First, 3134 const MemoryAccess *Second) const { 3135 if (First == Second) 3136 return true; 3137 if (MSSA->isLiveOnEntryDef(First)) 3138 return false; 3139 3140 // This is not perfect, but as we're just verifying here, we can live with 3141 // the loss of precision. The real solution would be that of doing strongly 3142 // connected component finding in this routine, and it's probably not worth 3143 // the complexity for the time being. So, we just keep a set of visited 3144 // MemoryAccess and return true when we hit a cycle. 3145 if (Visited.count(First)) 3146 return true; 3147 Visited.insert(First); 3148 3149 const auto *EndDef = First; 3150 for (auto *ChainDef : optimized_def_chain(First)) { 3151 if (ChainDef == Second) 3152 return true; 3153 if (MSSA->isLiveOnEntryDef(ChainDef)) 3154 return false; 3155 EndDef = ChainDef; 3156 } 3157 auto *MP = cast<MemoryPhi>(EndDef); 3158 auto ReachableOperandPred = [&](const Use &U) { 3159 return ReachableEdges.count({MP->getIncomingBlock(U), MP->getBlock()}); 3160 }; 3161 auto FilteredPhiArgs = 3162 make_filter_range(MP->operands(), ReachableOperandPred); 3163 SmallVector<const Value *, 32> OperandList; 3164 llvm::copy(FilteredPhiArgs, std::back_inserter(OperandList)); 3165 bool Okay = is_splat(OperandList); 3166 if (Okay) 3167 return singleReachablePHIPath(Visited, cast<MemoryAccess>(OperandList[0]), 3168 Second); 3169 return false; 3170 } 3171 3172 // Verify the that the memory equivalence table makes sense relative to the 3173 // congruence classes. Note that this checking is not perfect, and is currently 3174 // subject to very rare false negatives. It is only useful for 3175 // testing/debugging. 3176 void NewGVN::verifyMemoryCongruency() const { 3177 #ifndef NDEBUG 3178 // Verify that the memory table equivalence and memory member set match 3179 for (const auto *CC : CongruenceClasses) { 3180 if (CC == TOPClass || CC->isDead()) 3181 continue; 3182 if (CC->getStoreCount() != 0) { 3183 assert((CC->getStoredValue() || !isa<StoreInst>(CC->getLeader())) && 3184 "Any class with a store as a leader should have a " 3185 "representative stored value"); 3186 assert(CC->getMemoryLeader() && 3187 "Any congruence class with a store should have a " 3188 "representative access"); 3189 } 3190 3191 if (CC->getMemoryLeader()) 3192 assert(MemoryAccessToClass.lookup(CC->getMemoryLeader()) == CC && 3193 "Representative MemoryAccess does not appear to be reverse " 3194 "mapped properly"); 3195 for (auto M : CC->memory()) 3196 assert(MemoryAccessToClass.lookup(M) == CC && 3197 "Memory member does not appear to be reverse mapped properly"); 3198 } 3199 3200 // Anything equivalent in the MemoryAccess table should be in the same 3201 // congruence class. 3202 3203 // Filter out the unreachable and trivially dead entries, because they may 3204 // never have been updated if the instructions were not processed. 3205 auto ReachableAccessPred = 3206 [&](const std::pair<const MemoryAccess *, CongruenceClass *> Pair) { 3207 bool Result = ReachableBlocks.count(Pair.first->getBlock()); 3208 if (!Result || MSSA->isLiveOnEntryDef(Pair.first) || 3209 MemoryToDFSNum(Pair.first) == 0) 3210 return false; 3211 if (auto *MemDef = dyn_cast<MemoryDef>(Pair.first)) 3212 return !isInstructionTriviallyDead(MemDef->getMemoryInst()); 3213 3214 // We could have phi nodes which operands are all trivially dead, 3215 // so we don't process them. 3216 if (auto *MemPHI = dyn_cast<MemoryPhi>(Pair.first)) { 3217 for (auto &U : MemPHI->incoming_values()) { 3218 if (auto *I = dyn_cast<Instruction>(&*U)) { 3219 if (!isInstructionTriviallyDead(I)) 3220 return true; 3221 } 3222 } 3223 return false; 3224 } 3225 3226 return true; 3227 }; 3228 3229 auto Filtered = make_filter_range(MemoryAccessToClass, ReachableAccessPred); 3230 for (auto KV : Filtered) { 3231 if (auto *FirstMUD = dyn_cast<MemoryUseOrDef>(KV.first)) { 3232 auto *SecondMUD = dyn_cast<MemoryUseOrDef>(KV.second->getMemoryLeader()); 3233 if (FirstMUD && SecondMUD) { 3234 SmallPtrSet<const MemoryAccess *, 8> VisitedMAS; 3235 assert((singleReachablePHIPath(VisitedMAS, FirstMUD, SecondMUD) || 3236 ValueToClass.lookup(FirstMUD->getMemoryInst()) == 3237 ValueToClass.lookup(SecondMUD->getMemoryInst())) && 3238 "The instructions for these memory operations should have " 3239 "been in the same congruence class or reachable through" 3240 "a single argument phi"); 3241 } 3242 } else if (auto *FirstMP = dyn_cast<MemoryPhi>(KV.first)) { 3243 // We can only sanely verify that MemoryDefs in the operand list all have 3244 // the same class. 3245 auto ReachableOperandPred = [&](const Use &U) { 3246 return ReachableEdges.count( 3247 {FirstMP->getIncomingBlock(U), FirstMP->getBlock()}) && 3248 isa<MemoryDef>(U); 3249 3250 }; 3251 // All arguments should in the same class, ignoring unreachable arguments 3252 auto FilteredPhiArgs = 3253 make_filter_range(FirstMP->operands(), ReachableOperandPred); 3254 SmallVector<const CongruenceClass *, 16> PhiOpClasses; 3255 std::transform(FilteredPhiArgs.begin(), FilteredPhiArgs.end(), 3256 std::back_inserter(PhiOpClasses), [&](const Use &U) { 3257 const MemoryDef *MD = cast<MemoryDef>(U); 3258 return ValueToClass.lookup(MD->getMemoryInst()); 3259 }); 3260 assert(is_splat(PhiOpClasses) && 3261 "All MemoryPhi arguments should be in the same class"); 3262 } 3263 } 3264 #endif 3265 } 3266 3267 // Verify that the sparse propagation we did actually found the maximal fixpoint 3268 // We do this by storing the value to class mapping, touching all instructions, 3269 // and redoing the iteration to see if anything changed. 3270 void NewGVN::verifyIterationSettled(Function &F) { 3271 #ifndef NDEBUG 3272 LLVM_DEBUG(dbgs() << "Beginning iteration verification\n"); 3273 if (DebugCounter::isCounterSet(VNCounter)) 3274 DebugCounter::setCounterValue(VNCounter, StartingVNCounter); 3275 3276 // Note that we have to store the actual classes, as we may change existing 3277 // classes during iteration. This is because our memory iteration propagation 3278 // is not perfect, and so may waste a little work. But it should generate 3279 // exactly the same congruence classes we have now, with different IDs. 3280 std::map<const Value *, CongruenceClass> BeforeIteration; 3281 3282 for (auto &KV : ValueToClass) { 3283 if (auto *I = dyn_cast<Instruction>(KV.first)) 3284 // Skip unused/dead instructions. 3285 if (InstrToDFSNum(I) == 0) 3286 continue; 3287 BeforeIteration.insert({KV.first, *KV.second}); 3288 } 3289 3290 TouchedInstructions.set(); 3291 TouchedInstructions.reset(0); 3292 iterateTouchedInstructions(); 3293 DenseSet<std::pair<const CongruenceClass *, const CongruenceClass *>> 3294 EqualClasses; 3295 for (const auto &KV : ValueToClass) { 3296 if (auto *I = dyn_cast<Instruction>(KV.first)) 3297 // Skip unused/dead instructions. 3298 if (InstrToDFSNum(I) == 0) 3299 continue; 3300 // We could sink these uses, but i think this adds a bit of clarity here as 3301 // to what we are comparing. 3302 auto *BeforeCC = &BeforeIteration.find(KV.first)->second; 3303 auto *AfterCC = KV.second; 3304 // Note that the classes can't change at this point, so we memoize the set 3305 // that are equal. 3306 if (!EqualClasses.count({BeforeCC, AfterCC})) { 3307 assert(BeforeCC->isEquivalentTo(AfterCC) && 3308 "Value number changed after main loop completed!"); 3309 EqualClasses.insert({BeforeCC, AfterCC}); 3310 } 3311 } 3312 #endif 3313 } 3314 3315 // Verify that for each store expression in the expression to class mapping, 3316 // only the latest appears, and multiple ones do not appear. 3317 // Because loads do not use the stored value when doing equality with stores, 3318 // if we don't erase the old store expressions from the table, a load can find 3319 // a no-longer valid StoreExpression. 3320 void NewGVN::verifyStoreExpressions() const { 3321 #ifndef NDEBUG 3322 // This is the only use of this, and it's not worth defining a complicated 3323 // densemapinfo hash/equality function for it. 3324 std::set< 3325 std::pair<const Value *, 3326 std::tuple<const Value *, const CongruenceClass *, Value *>>> 3327 StoreExpressionSet; 3328 for (const auto &KV : ExpressionToClass) { 3329 if (auto *SE = dyn_cast<StoreExpression>(KV.first)) { 3330 // Make sure a version that will conflict with loads is not already there 3331 auto Res = StoreExpressionSet.insert( 3332 {SE->getOperand(0), std::make_tuple(SE->getMemoryLeader(), KV.second, 3333 SE->getStoredValue())}); 3334 bool Okay = Res.second; 3335 // It's okay to have the same expression already in there if it is 3336 // identical in nature. 3337 // This can happen when the leader of the stored value changes over time. 3338 if (!Okay) 3339 Okay = (std::get<1>(Res.first->second) == KV.second) && 3340 (lookupOperandLeader(std::get<2>(Res.first->second)) == 3341 lookupOperandLeader(SE->getStoredValue())); 3342 assert(Okay && "Stored expression conflict exists in expression table"); 3343 auto *ValueExpr = ValueToExpression.lookup(SE->getStoreInst()); 3344 assert(ValueExpr && ValueExpr->equals(*SE) && 3345 "StoreExpression in ExpressionToClass is not latest " 3346 "StoreExpression for value"); 3347 } 3348 } 3349 #endif 3350 } 3351 3352 // This is the main value numbering loop, it iterates over the initial touched 3353 // instruction set, propagating value numbers, marking things touched, etc, 3354 // until the set of touched instructions is completely empty. 3355 void NewGVN::iterateTouchedInstructions() { 3356 unsigned int Iterations = 0; 3357 // Figure out where touchedinstructions starts 3358 int FirstInstr = TouchedInstructions.find_first(); 3359 // Nothing set, nothing to iterate, just return. 3360 if (FirstInstr == -1) 3361 return; 3362 const BasicBlock *LastBlock = getBlockForValue(InstrFromDFSNum(FirstInstr)); 3363 while (TouchedInstructions.any()) { 3364 ++Iterations; 3365 // Walk through all the instructions in all the blocks in RPO. 3366 // TODO: As we hit a new block, we should push and pop equalities into a 3367 // table lookupOperandLeader can use, to catch things PredicateInfo 3368 // might miss, like edge-only equivalences. 3369 for (unsigned InstrNum : TouchedInstructions.set_bits()) { 3370 3371 // This instruction was found to be dead. We don't bother looking 3372 // at it again. 3373 if (InstrNum == 0) { 3374 TouchedInstructions.reset(InstrNum); 3375 continue; 3376 } 3377 3378 Value *V = InstrFromDFSNum(InstrNum); 3379 const BasicBlock *CurrBlock = getBlockForValue(V); 3380 3381 // If we hit a new block, do reachability processing. 3382 if (CurrBlock != LastBlock) { 3383 LastBlock = CurrBlock; 3384 bool BlockReachable = ReachableBlocks.count(CurrBlock); 3385 const auto &CurrInstRange = BlockInstRange.lookup(CurrBlock); 3386 3387 // If it's not reachable, erase any touched instructions and move on. 3388 if (!BlockReachable) { 3389 TouchedInstructions.reset(CurrInstRange.first, CurrInstRange.second); 3390 LLVM_DEBUG(dbgs() << "Skipping instructions in block " 3391 << getBlockName(CurrBlock) 3392 << " because it is unreachable\n"); 3393 continue; 3394 } 3395 updateProcessedCount(CurrBlock); 3396 } 3397 // Reset after processing (because we may mark ourselves as touched when 3398 // we propagate equalities). 3399 TouchedInstructions.reset(InstrNum); 3400 3401 if (auto *MP = dyn_cast<MemoryPhi>(V)) { 3402 LLVM_DEBUG(dbgs() << "Processing MemoryPhi " << *MP << "\n"); 3403 valueNumberMemoryPhi(MP); 3404 } else if (auto *I = dyn_cast<Instruction>(V)) { 3405 valueNumberInstruction(I); 3406 } else { 3407 llvm_unreachable("Should have been a MemoryPhi or Instruction"); 3408 } 3409 updateProcessedCount(V); 3410 } 3411 } 3412 NumGVNMaxIterations = std::max(NumGVNMaxIterations.getValue(), Iterations); 3413 } 3414 3415 // This is the main transformation entry point. 3416 bool NewGVN::runGVN() { 3417 if (DebugCounter::isCounterSet(VNCounter)) 3418 StartingVNCounter = DebugCounter::getCounterValue(VNCounter); 3419 bool Changed = false; 3420 NumFuncArgs = F.arg_size(); 3421 MSSAWalker = MSSA->getWalker(); 3422 SingletonDeadExpression = new (ExpressionAllocator) DeadExpression(); 3423 3424 // Count number of instructions for sizing of hash tables, and come 3425 // up with a global dfs numbering for instructions. 3426 unsigned ICount = 1; 3427 // Add an empty instruction to account for the fact that we start at 1 3428 DFSToInstr.emplace_back(nullptr); 3429 // Note: We want ideal RPO traversal of the blocks, which is not quite the 3430 // same as dominator tree order, particularly with regard whether backedges 3431 // get visited first or second, given a block with multiple successors. 3432 // If we visit in the wrong order, we will end up performing N times as many 3433 // iterations. 3434 // The dominator tree does guarantee that, for a given dom tree node, it's 3435 // parent must occur before it in the RPO ordering. Thus, we only need to sort 3436 // the siblings. 3437 ReversePostOrderTraversal<Function *> RPOT(&F); 3438 unsigned Counter = 0; 3439 for (auto &B : RPOT) { 3440 auto *Node = DT->getNode(B); 3441 assert(Node && "RPO and Dominator tree should have same reachability"); 3442 RPOOrdering[Node] = ++Counter; 3443 } 3444 // Sort dominator tree children arrays into RPO. 3445 for (auto &B : RPOT) { 3446 auto *Node = DT->getNode(B); 3447 if (Node->getNumChildren() > 1) 3448 llvm::sort(*Node, [&](const DomTreeNode *A, const DomTreeNode *B) { 3449 return RPOOrdering[A] < RPOOrdering[B]; 3450 }); 3451 } 3452 3453 // Now a standard depth first ordering of the domtree is equivalent to RPO. 3454 for (auto DTN : depth_first(DT->getRootNode())) { 3455 BasicBlock *B = DTN->getBlock(); 3456 const auto &BlockRange = assignDFSNumbers(B, ICount); 3457 BlockInstRange.insert({B, BlockRange}); 3458 ICount += BlockRange.second - BlockRange.first; 3459 } 3460 initializeCongruenceClasses(F); 3461 3462 TouchedInstructions.resize(ICount); 3463 // Ensure we don't end up resizing the expressionToClass map, as 3464 // that can be quite expensive. At most, we have one expression per 3465 // instruction. 3466 ExpressionToClass.reserve(ICount); 3467 3468 // Initialize the touched instructions to include the entry block. 3469 const auto &InstRange = BlockInstRange.lookup(&F.getEntryBlock()); 3470 TouchedInstructions.set(InstRange.first, InstRange.second); 3471 LLVM_DEBUG(dbgs() << "Block " << getBlockName(&F.getEntryBlock()) 3472 << " marked reachable\n"); 3473 ReachableBlocks.insert(&F.getEntryBlock()); 3474 3475 iterateTouchedInstructions(); 3476 verifyMemoryCongruency(); 3477 verifyIterationSettled(F); 3478 verifyStoreExpressions(); 3479 3480 Changed |= eliminateInstructions(F); 3481 3482 // Delete all instructions marked for deletion. 3483 for (Instruction *ToErase : InstructionsToErase) { 3484 if (!ToErase->use_empty()) 3485 ToErase->replaceAllUsesWith(PoisonValue::get(ToErase->getType())); 3486 3487 assert(ToErase->getParent() && 3488 "BB containing ToErase deleted unexpectedly!"); 3489 ToErase->eraseFromParent(); 3490 } 3491 Changed |= !InstructionsToErase.empty(); 3492 3493 // Delete all unreachable blocks. 3494 auto UnreachableBlockPred = [&](const BasicBlock &BB) { 3495 return !ReachableBlocks.count(&BB); 3496 }; 3497 3498 for (auto &BB : make_filter_range(F, UnreachableBlockPred)) { 3499 LLVM_DEBUG(dbgs() << "We believe block " << getBlockName(&BB) 3500 << " is unreachable\n"); 3501 deleteInstructionsInBlock(&BB); 3502 Changed = true; 3503 } 3504 3505 cleanupTables(); 3506 return Changed; 3507 } 3508 3509 struct NewGVN::ValueDFS { 3510 int DFSIn = 0; 3511 int DFSOut = 0; 3512 int LocalNum = 0; 3513 3514 // Only one of Def and U will be set. 3515 // The bool in the Def tells us whether the Def is the stored value of a 3516 // store. 3517 PointerIntPair<Value *, 1, bool> Def; 3518 Use *U = nullptr; 3519 3520 bool operator<(const ValueDFS &Other) const { 3521 // It's not enough that any given field be less than - we have sets 3522 // of fields that need to be evaluated together to give a proper ordering. 3523 // For example, if you have; 3524 // DFS (1, 3) 3525 // Val 0 3526 // DFS (1, 2) 3527 // Val 50 3528 // We want the second to be less than the first, but if we just go field 3529 // by field, we will get to Val 0 < Val 50 and say the first is less than 3530 // the second. We only want it to be less than if the DFS orders are equal. 3531 // 3532 // Each LLVM instruction only produces one value, and thus the lowest-level 3533 // differentiator that really matters for the stack (and what we use as as a 3534 // replacement) is the local dfs number. 3535 // Everything else in the structure is instruction level, and only affects 3536 // the order in which we will replace operands of a given instruction. 3537 // 3538 // For a given instruction (IE things with equal dfsin, dfsout, localnum), 3539 // the order of replacement of uses does not matter. 3540 // IE given, 3541 // a = 5 3542 // b = a + a 3543 // When you hit b, you will have two valuedfs with the same dfsin, out, and 3544 // localnum. 3545 // The .val will be the same as well. 3546 // The .u's will be different. 3547 // You will replace both, and it does not matter what order you replace them 3548 // in (IE whether you replace operand 2, then operand 1, or operand 1, then 3549 // operand 2). 3550 // Similarly for the case of same dfsin, dfsout, localnum, but different 3551 // .val's 3552 // a = 5 3553 // b = 6 3554 // c = a + b 3555 // in c, we will a valuedfs for a, and one for b,with everything the same 3556 // but .val and .u. 3557 // It does not matter what order we replace these operands in. 3558 // You will always end up with the same IR, and this is guaranteed. 3559 return std::tie(DFSIn, DFSOut, LocalNum, Def, U) < 3560 std::tie(Other.DFSIn, Other.DFSOut, Other.LocalNum, Other.Def, 3561 Other.U); 3562 } 3563 }; 3564 3565 // This function converts the set of members for a congruence class from values, 3566 // to sets of defs and uses with associated DFS info. The total number of 3567 // reachable uses for each value is stored in UseCount, and instructions that 3568 // seem 3569 // dead (have no non-dead uses) are stored in ProbablyDead. 3570 void NewGVN::convertClassToDFSOrdered( 3571 const CongruenceClass &Dense, SmallVectorImpl<ValueDFS> &DFSOrderedSet, 3572 DenseMap<const Value *, unsigned int> &UseCounts, 3573 SmallPtrSetImpl<Instruction *> &ProbablyDead) const { 3574 for (auto D : Dense) { 3575 // First add the value. 3576 BasicBlock *BB = getBlockForValue(D); 3577 // Constants are handled prior to ever calling this function, so 3578 // we should only be left with instructions as members. 3579 assert(BB && "Should have figured out a basic block for value"); 3580 ValueDFS VDDef; 3581 DomTreeNode *DomNode = DT->getNode(BB); 3582 VDDef.DFSIn = DomNode->getDFSNumIn(); 3583 VDDef.DFSOut = DomNode->getDFSNumOut(); 3584 // If it's a store, use the leader of the value operand, if it's always 3585 // available, or the value operand. TODO: We could do dominance checks to 3586 // find a dominating leader, but not worth it ATM. 3587 if (auto *SI = dyn_cast<StoreInst>(D)) { 3588 auto Leader = lookupOperandLeader(SI->getValueOperand()); 3589 if (alwaysAvailable(Leader)) { 3590 VDDef.Def.setPointer(Leader); 3591 } else { 3592 VDDef.Def.setPointer(SI->getValueOperand()); 3593 VDDef.Def.setInt(true); 3594 } 3595 } else { 3596 VDDef.Def.setPointer(D); 3597 } 3598 assert(isa<Instruction>(D) && 3599 "The dense set member should always be an instruction"); 3600 Instruction *Def = cast<Instruction>(D); 3601 VDDef.LocalNum = InstrToDFSNum(D); 3602 DFSOrderedSet.push_back(VDDef); 3603 // If there is a phi node equivalent, add it 3604 if (auto *PN = RealToTemp.lookup(Def)) { 3605 auto *PHIE = 3606 dyn_cast_or_null<PHIExpression>(ValueToExpression.lookup(Def)); 3607 if (PHIE) { 3608 VDDef.Def.setInt(false); 3609 VDDef.Def.setPointer(PN); 3610 VDDef.LocalNum = 0; 3611 DFSOrderedSet.push_back(VDDef); 3612 } 3613 } 3614 3615 unsigned int UseCount = 0; 3616 // Now add the uses. 3617 for (auto &U : Def->uses()) { 3618 if (auto *I = dyn_cast<Instruction>(U.getUser())) { 3619 // Don't try to replace into dead uses 3620 if (InstructionsToErase.count(I)) 3621 continue; 3622 ValueDFS VDUse; 3623 // Put the phi node uses in the incoming block. 3624 BasicBlock *IBlock; 3625 if (auto *P = dyn_cast<PHINode>(I)) { 3626 IBlock = P->getIncomingBlock(U); 3627 // Make phi node users appear last in the incoming block 3628 // they are from. 3629 VDUse.LocalNum = InstrDFS.size() + 1; 3630 } else { 3631 IBlock = getBlockForValue(I); 3632 VDUse.LocalNum = InstrToDFSNum(I); 3633 } 3634 3635 // Skip uses in unreachable blocks, as we're going 3636 // to delete them. 3637 if (!ReachableBlocks.contains(IBlock)) 3638 continue; 3639 3640 DomTreeNode *DomNode = DT->getNode(IBlock); 3641 VDUse.DFSIn = DomNode->getDFSNumIn(); 3642 VDUse.DFSOut = DomNode->getDFSNumOut(); 3643 VDUse.U = &U; 3644 ++UseCount; 3645 DFSOrderedSet.emplace_back(VDUse); 3646 } 3647 } 3648 3649 // If there are no uses, it's probably dead (but it may have side-effects, 3650 // so not definitely dead. Otherwise, store the number of uses so we can 3651 // track if it becomes dead later). 3652 if (UseCount == 0) 3653 ProbablyDead.insert(Def); 3654 else 3655 UseCounts[Def] = UseCount; 3656 } 3657 } 3658 3659 // This function converts the set of members for a congruence class from values, 3660 // to the set of defs for loads and stores, with associated DFS info. 3661 void NewGVN::convertClassToLoadsAndStores( 3662 const CongruenceClass &Dense, 3663 SmallVectorImpl<ValueDFS> &LoadsAndStores) const { 3664 for (auto D : Dense) { 3665 if (!isa<LoadInst>(D) && !isa<StoreInst>(D)) 3666 continue; 3667 3668 BasicBlock *BB = getBlockForValue(D); 3669 ValueDFS VD; 3670 DomTreeNode *DomNode = DT->getNode(BB); 3671 VD.DFSIn = DomNode->getDFSNumIn(); 3672 VD.DFSOut = DomNode->getDFSNumOut(); 3673 VD.Def.setPointer(D); 3674 3675 // If it's an instruction, use the real local dfs number. 3676 if (auto *I = dyn_cast<Instruction>(D)) 3677 VD.LocalNum = InstrToDFSNum(I); 3678 else 3679 llvm_unreachable("Should have been an instruction"); 3680 3681 LoadsAndStores.emplace_back(VD); 3682 } 3683 } 3684 3685 static void patchAndReplaceAllUsesWith(Instruction *I, Value *Repl) { 3686 patchReplacementInstruction(I, Repl); 3687 I->replaceAllUsesWith(Repl); 3688 } 3689 3690 void NewGVN::deleteInstructionsInBlock(BasicBlock *BB) { 3691 LLVM_DEBUG(dbgs() << " BasicBlock Dead:" << *BB); 3692 ++NumGVNBlocksDeleted; 3693 3694 // Delete the instructions backwards, as it has a reduced likelihood of having 3695 // to update as many def-use and use-def chains. Start after the terminator. 3696 auto StartPoint = BB->rbegin(); 3697 ++StartPoint; 3698 // Note that we explicitly recalculate BB->rend() on each iteration, 3699 // as it may change when we remove the first instruction. 3700 for (BasicBlock::reverse_iterator I(StartPoint); I != BB->rend();) { 3701 Instruction &Inst = *I++; 3702 if (!Inst.use_empty()) 3703 Inst.replaceAllUsesWith(PoisonValue::get(Inst.getType())); 3704 if (isa<LandingPadInst>(Inst)) 3705 continue; 3706 salvageKnowledge(&Inst, AC); 3707 3708 Inst.eraseFromParent(); 3709 ++NumGVNInstrDeleted; 3710 } 3711 // Now insert something that simplifycfg will turn into an unreachable. 3712 Type *Int8Ty = Type::getInt8Ty(BB->getContext()); 3713 new StoreInst(PoisonValue::get(Int8Ty), 3714 Constant::getNullValue(Int8Ty->getPointerTo()), 3715 BB->getTerminator()); 3716 } 3717 3718 void NewGVN::markInstructionForDeletion(Instruction *I) { 3719 LLVM_DEBUG(dbgs() << "Marking " << *I << " for deletion\n"); 3720 InstructionsToErase.insert(I); 3721 } 3722 3723 void NewGVN::replaceInstruction(Instruction *I, Value *V) { 3724 LLVM_DEBUG(dbgs() << "Replacing " << *I << " with " << *V << "\n"); 3725 patchAndReplaceAllUsesWith(I, V); 3726 // We save the actual erasing to avoid invalidating memory 3727 // dependencies until we are done with everything. 3728 markInstructionForDeletion(I); 3729 } 3730 3731 namespace { 3732 3733 // This is a stack that contains both the value and dfs info of where 3734 // that value is valid. 3735 class ValueDFSStack { 3736 public: 3737 Value *back() const { return ValueStack.back(); } 3738 std::pair<int, int> dfs_back() const { return DFSStack.back(); } 3739 3740 void push_back(Value *V, int DFSIn, int DFSOut) { 3741 ValueStack.emplace_back(V); 3742 DFSStack.emplace_back(DFSIn, DFSOut); 3743 } 3744 3745 bool empty() const { return DFSStack.empty(); } 3746 3747 bool isInScope(int DFSIn, int DFSOut) const { 3748 if (empty()) 3749 return false; 3750 return DFSIn >= DFSStack.back().first && DFSOut <= DFSStack.back().second; 3751 } 3752 3753 void popUntilDFSScope(int DFSIn, int DFSOut) { 3754 3755 // These two should always be in sync at this point. 3756 assert(ValueStack.size() == DFSStack.size() && 3757 "Mismatch between ValueStack and DFSStack"); 3758 while ( 3759 !DFSStack.empty() && 3760 !(DFSIn >= DFSStack.back().first && DFSOut <= DFSStack.back().second)) { 3761 DFSStack.pop_back(); 3762 ValueStack.pop_back(); 3763 } 3764 } 3765 3766 private: 3767 SmallVector<Value *, 8> ValueStack; 3768 SmallVector<std::pair<int, int>, 8> DFSStack; 3769 }; 3770 3771 } // end anonymous namespace 3772 3773 // Given an expression, get the congruence class for it. 3774 CongruenceClass *NewGVN::getClassForExpression(const Expression *E) const { 3775 if (auto *VE = dyn_cast<VariableExpression>(E)) 3776 return ValueToClass.lookup(VE->getVariableValue()); 3777 else if (isa<DeadExpression>(E)) 3778 return TOPClass; 3779 return ExpressionToClass.lookup(E); 3780 } 3781 3782 // Given a value and a basic block we are trying to see if it is available in, 3783 // see if the value has a leader available in that block. 3784 Value *NewGVN::findPHIOfOpsLeader(const Expression *E, 3785 const Instruction *OrigInst, 3786 const BasicBlock *BB) const { 3787 // It would already be constant if we could make it constant 3788 if (auto *CE = dyn_cast<ConstantExpression>(E)) 3789 return CE->getConstantValue(); 3790 if (auto *VE = dyn_cast<VariableExpression>(E)) { 3791 auto *V = VE->getVariableValue(); 3792 if (alwaysAvailable(V) || DT->dominates(getBlockForValue(V), BB)) 3793 return VE->getVariableValue(); 3794 } 3795 3796 auto *CC = getClassForExpression(E); 3797 if (!CC) 3798 return nullptr; 3799 if (alwaysAvailable(CC->getLeader())) 3800 return CC->getLeader(); 3801 3802 for (auto Member : *CC) { 3803 auto *MemberInst = dyn_cast<Instruction>(Member); 3804 if (MemberInst == OrigInst) 3805 continue; 3806 // Anything that isn't an instruction is always available. 3807 if (!MemberInst) 3808 return Member; 3809 if (DT->dominates(getBlockForValue(MemberInst), BB)) 3810 return Member; 3811 } 3812 return nullptr; 3813 } 3814 3815 bool NewGVN::eliminateInstructions(Function &F) { 3816 // This is a non-standard eliminator. The normal way to eliminate is 3817 // to walk the dominator tree in order, keeping track of available 3818 // values, and eliminating them. However, this is mildly 3819 // pointless. It requires doing lookups on every instruction, 3820 // regardless of whether we will ever eliminate it. For 3821 // instructions part of most singleton congruence classes, we know we 3822 // will never eliminate them. 3823 3824 // Instead, this eliminator looks at the congruence classes directly, sorts 3825 // them into a DFS ordering of the dominator tree, and then we just 3826 // perform elimination straight on the sets by walking the congruence 3827 // class member uses in order, and eliminate the ones dominated by the 3828 // last member. This is worst case O(E log E) where E = number of 3829 // instructions in a single congruence class. In theory, this is all 3830 // instructions. In practice, it is much faster, as most instructions are 3831 // either in singleton congruence classes or can't possibly be eliminated 3832 // anyway (if there are no overlapping DFS ranges in class). 3833 // When we find something not dominated, it becomes the new leader 3834 // for elimination purposes. 3835 // TODO: If we wanted to be faster, We could remove any members with no 3836 // overlapping ranges while sorting, as we will never eliminate anything 3837 // with those members, as they don't dominate anything else in our set. 3838 3839 bool AnythingReplaced = false; 3840 3841 // Since we are going to walk the domtree anyway, and we can't guarantee the 3842 // DFS numbers are updated, we compute some ourselves. 3843 DT->updateDFSNumbers(); 3844 3845 // Go through all of our phi nodes, and kill the arguments associated with 3846 // unreachable edges. 3847 auto ReplaceUnreachablePHIArgs = [&](PHINode *PHI, BasicBlock *BB) { 3848 for (auto &Operand : PHI->incoming_values()) 3849 if (!ReachableEdges.count({PHI->getIncomingBlock(Operand), BB})) { 3850 LLVM_DEBUG(dbgs() << "Replacing incoming value of " << PHI 3851 << " for block " 3852 << getBlockName(PHI->getIncomingBlock(Operand)) 3853 << " with poison due to it being unreachable\n"); 3854 Operand.set(PoisonValue::get(PHI->getType())); 3855 } 3856 }; 3857 // Replace unreachable phi arguments. 3858 // At this point, RevisitOnReachabilityChange only contains: 3859 // 3860 // 1. PHIs 3861 // 2. Temporaries that will convert to PHIs 3862 // 3. Operations that are affected by an unreachable edge but do not fit into 3863 // 1 or 2 (rare). 3864 // So it is a slight overshoot of what we want. We could make it exact by 3865 // using two SparseBitVectors per block. 3866 DenseMap<const BasicBlock *, unsigned> ReachablePredCount; 3867 for (auto &KV : ReachableEdges) 3868 ReachablePredCount[KV.getEnd()]++; 3869 for (auto &BBPair : RevisitOnReachabilityChange) { 3870 for (auto InstNum : BBPair.second) { 3871 auto *Inst = InstrFromDFSNum(InstNum); 3872 auto *PHI = dyn_cast<PHINode>(Inst); 3873 PHI = PHI ? PHI : dyn_cast_or_null<PHINode>(RealToTemp.lookup(Inst)); 3874 if (!PHI) 3875 continue; 3876 auto *BB = BBPair.first; 3877 if (ReachablePredCount.lookup(BB) != PHI->getNumIncomingValues()) 3878 ReplaceUnreachablePHIArgs(PHI, BB); 3879 } 3880 } 3881 3882 // Map to store the use counts 3883 DenseMap<const Value *, unsigned int> UseCounts; 3884 for (auto *CC : reverse(CongruenceClasses)) { 3885 LLVM_DEBUG(dbgs() << "Eliminating in congruence class " << CC->getID() 3886 << "\n"); 3887 // Track the equivalent store info so we can decide whether to try 3888 // dead store elimination. 3889 SmallVector<ValueDFS, 8> PossibleDeadStores; 3890 SmallPtrSet<Instruction *, 8> ProbablyDead; 3891 if (CC->isDead() || CC->empty()) 3892 continue; 3893 // Everything still in the TOP class is unreachable or dead. 3894 if (CC == TOPClass) { 3895 for (auto M : *CC) { 3896 auto *VTE = ValueToExpression.lookup(M); 3897 if (VTE && isa<DeadExpression>(VTE)) 3898 markInstructionForDeletion(cast<Instruction>(M)); 3899 assert((!ReachableBlocks.count(cast<Instruction>(M)->getParent()) || 3900 InstructionsToErase.count(cast<Instruction>(M))) && 3901 "Everything in TOP should be unreachable or dead at this " 3902 "point"); 3903 } 3904 continue; 3905 } 3906 3907 assert(CC->getLeader() && "We should have had a leader"); 3908 // If this is a leader that is always available, and it's a 3909 // constant or has no equivalences, just replace everything with 3910 // it. We then update the congruence class with whatever members 3911 // are left. 3912 Value *Leader = 3913 CC->getStoredValue() ? CC->getStoredValue() : CC->getLeader(); 3914 if (alwaysAvailable(Leader)) { 3915 CongruenceClass::MemberSet MembersLeft; 3916 for (auto M : *CC) { 3917 Value *Member = M; 3918 // Void things have no uses we can replace. 3919 if (Member == Leader || !isa<Instruction>(Member) || 3920 Member->getType()->isVoidTy()) { 3921 MembersLeft.insert(Member); 3922 continue; 3923 } 3924 LLVM_DEBUG(dbgs() << "Found replacement " << *(Leader) << " for " 3925 << *Member << "\n"); 3926 auto *I = cast<Instruction>(Member); 3927 assert(Leader != I && "About to accidentally remove our leader"); 3928 replaceInstruction(I, Leader); 3929 AnythingReplaced = true; 3930 } 3931 CC->swap(MembersLeft); 3932 } else { 3933 // If this is a singleton, we can skip it. 3934 if (CC->size() != 1 || RealToTemp.count(Leader)) { 3935 // This is a stack because equality replacement/etc may place 3936 // constants in the middle of the member list, and we want to use 3937 // those constant values in preference to the current leader, over 3938 // the scope of those constants. 3939 ValueDFSStack EliminationStack; 3940 3941 // Convert the members to DFS ordered sets and then merge them. 3942 SmallVector<ValueDFS, 8> DFSOrderedSet; 3943 convertClassToDFSOrdered(*CC, DFSOrderedSet, UseCounts, ProbablyDead); 3944 3945 // Sort the whole thing. 3946 llvm::sort(DFSOrderedSet); 3947 for (auto &VD : DFSOrderedSet) { 3948 int MemberDFSIn = VD.DFSIn; 3949 int MemberDFSOut = VD.DFSOut; 3950 Value *Def = VD.Def.getPointer(); 3951 bool FromStore = VD.Def.getInt(); 3952 Use *U = VD.U; 3953 // We ignore void things because we can't get a value from them. 3954 if (Def && Def->getType()->isVoidTy()) 3955 continue; 3956 auto *DefInst = dyn_cast_or_null<Instruction>(Def); 3957 if (DefInst && AllTempInstructions.count(DefInst)) { 3958 auto *PN = cast<PHINode>(DefInst); 3959 3960 // If this is a value phi and that's the expression we used, insert 3961 // it into the program 3962 // remove from temp instruction list. 3963 AllTempInstructions.erase(PN); 3964 auto *DefBlock = getBlockForValue(Def); 3965 LLVM_DEBUG(dbgs() << "Inserting fully real phi of ops" << *Def 3966 << " into block " 3967 << getBlockName(getBlockForValue(Def)) << "\n"); 3968 PN->insertBefore(&DefBlock->front()); 3969 Def = PN; 3970 NumGVNPHIOfOpsEliminations++; 3971 } 3972 3973 if (EliminationStack.empty()) { 3974 LLVM_DEBUG(dbgs() << "Elimination Stack is empty\n"); 3975 } else { 3976 LLVM_DEBUG(dbgs() << "Elimination Stack Top DFS numbers are (" 3977 << EliminationStack.dfs_back().first << "," 3978 << EliminationStack.dfs_back().second << ")\n"); 3979 } 3980 3981 LLVM_DEBUG(dbgs() << "Current DFS numbers are (" << MemberDFSIn << "," 3982 << MemberDFSOut << ")\n"); 3983 // First, we see if we are out of scope or empty. If so, 3984 // and there equivalences, we try to replace the top of 3985 // stack with equivalences (if it's on the stack, it must 3986 // not have been eliminated yet). 3987 // Then we synchronize to our current scope, by 3988 // popping until we are back within a DFS scope that 3989 // dominates the current member. 3990 // Then, what happens depends on a few factors 3991 // If the stack is now empty, we need to push 3992 // If we have a constant or a local equivalence we want to 3993 // start using, we also push. 3994 // Otherwise, we walk along, processing members who are 3995 // dominated by this scope, and eliminate them. 3996 bool ShouldPush = Def && EliminationStack.empty(); 3997 bool OutOfScope = 3998 !EliminationStack.isInScope(MemberDFSIn, MemberDFSOut); 3999 4000 if (OutOfScope || ShouldPush) { 4001 // Sync to our current scope. 4002 EliminationStack.popUntilDFSScope(MemberDFSIn, MemberDFSOut); 4003 bool ShouldPush = Def && EliminationStack.empty(); 4004 if (ShouldPush) { 4005 EliminationStack.push_back(Def, MemberDFSIn, MemberDFSOut); 4006 } 4007 } 4008 4009 // Skip the Def's, we only want to eliminate on their uses. But mark 4010 // dominated defs as dead. 4011 if (Def) { 4012 // For anything in this case, what and how we value number 4013 // guarantees that any side-effets that would have occurred (ie 4014 // throwing, etc) can be proven to either still occur (because it's 4015 // dominated by something that has the same side-effects), or never 4016 // occur. Otherwise, we would not have been able to prove it value 4017 // equivalent to something else. For these things, we can just mark 4018 // it all dead. Note that this is different from the "ProbablyDead" 4019 // set, which may not be dominated by anything, and thus, are only 4020 // easy to prove dead if they are also side-effect free. Note that 4021 // because stores are put in terms of the stored value, we skip 4022 // stored values here. If the stored value is really dead, it will 4023 // still be marked for deletion when we process it in its own class. 4024 if (!EliminationStack.empty() && Def != EliminationStack.back() && 4025 isa<Instruction>(Def) && !FromStore) 4026 markInstructionForDeletion(cast<Instruction>(Def)); 4027 continue; 4028 } 4029 // At this point, we know it is a Use we are trying to possibly 4030 // replace. 4031 4032 assert(isa<Instruction>(U->get()) && 4033 "Current def should have been an instruction"); 4034 assert(isa<Instruction>(U->getUser()) && 4035 "Current user should have been an instruction"); 4036 4037 // If the thing we are replacing into is already marked to be dead, 4038 // this use is dead. Note that this is true regardless of whether 4039 // we have anything dominating the use or not. We do this here 4040 // because we are already walking all the uses anyway. 4041 Instruction *InstUse = cast<Instruction>(U->getUser()); 4042 if (InstructionsToErase.count(InstUse)) { 4043 auto &UseCount = UseCounts[U->get()]; 4044 if (--UseCount == 0) { 4045 ProbablyDead.insert(cast<Instruction>(U->get())); 4046 } 4047 } 4048 4049 // If we get to this point, and the stack is empty we must have a use 4050 // with nothing we can use to eliminate this use, so just skip it. 4051 if (EliminationStack.empty()) 4052 continue; 4053 4054 Value *DominatingLeader = EliminationStack.back(); 4055 4056 auto *II = dyn_cast<IntrinsicInst>(DominatingLeader); 4057 bool isSSACopy = II && II->getIntrinsicID() == Intrinsic::ssa_copy; 4058 if (isSSACopy) 4059 DominatingLeader = II->getOperand(0); 4060 4061 // Don't replace our existing users with ourselves. 4062 if (U->get() == DominatingLeader) 4063 continue; 4064 LLVM_DEBUG(dbgs() 4065 << "Found replacement " << *DominatingLeader << " for " 4066 << *U->get() << " in " << *(U->getUser()) << "\n"); 4067 4068 // If we replaced something in an instruction, handle the patching of 4069 // metadata. Skip this if we are replacing predicateinfo with its 4070 // original operand, as we already know we can just drop it. 4071 auto *ReplacedInst = cast<Instruction>(U->get()); 4072 auto *PI = PredInfo->getPredicateInfoFor(ReplacedInst); 4073 if (!PI || DominatingLeader != PI->OriginalOp) 4074 patchReplacementInstruction(ReplacedInst, DominatingLeader); 4075 U->set(DominatingLeader); 4076 // This is now a use of the dominating leader, which means if the 4077 // dominating leader was dead, it's now live! 4078 auto &LeaderUseCount = UseCounts[DominatingLeader]; 4079 // It's about to be alive again. 4080 if (LeaderUseCount == 0 && isa<Instruction>(DominatingLeader)) 4081 ProbablyDead.erase(cast<Instruction>(DominatingLeader)); 4082 // For copy instructions, we use their operand as a leader, 4083 // which means we remove a user of the copy and it may become dead. 4084 if (isSSACopy) { 4085 unsigned &IIUseCount = UseCounts[II]; 4086 if (--IIUseCount == 0) 4087 ProbablyDead.insert(II); 4088 } 4089 ++LeaderUseCount; 4090 AnythingReplaced = true; 4091 } 4092 } 4093 } 4094 4095 // At this point, anything still in the ProbablyDead set is actually dead if 4096 // would be trivially dead. 4097 for (auto *I : ProbablyDead) 4098 if (wouldInstructionBeTriviallyDead(I)) 4099 markInstructionForDeletion(I); 4100 4101 // Cleanup the congruence class. 4102 CongruenceClass::MemberSet MembersLeft; 4103 for (auto *Member : *CC) 4104 if (!isa<Instruction>(Member) || 4105 !InstructionsToErase.count(cast<Instruction>(Member))) 4106 MembersLeft.insert(Member); 4107 CC->swap(MembersLeft); 4108 4109 // If we have possible dead stores to look at, try to eliminate them. 4110 if (CC->getStoreCount() > 0) { 4111 convertClassToLoadsAndStores(*CC, PossibleDeadStores); 4112 llvm::sort(PossibleDeadStores); 4113 ValueDFSStack EliminationStack; 4114 for (auto &VD : PossibleDeadStores) { 4115 int MemberDFSIn = VD.DFSIn; 4116 int MemberDFSOut = VD.DFSOut; 4117 Instruction *Member = cast<Instruction>(VD.Def.getPointer()); 4118 if (EliminationStack.empty() || 4119 !EliminationStack.isInScope(MemberDFSIn, MemberDFSOut)) { 4120 // Sync to our current scope. 4121 EliminationStack.popUntilDFSScope(MemberDFSIn, MemberDFSOut); 4122 if (EliminationStack.empty()) { 4123 EliminationStack.push_back(Member, MemberDFSIn, MemberDFSOut); 4124 continue; 4125 } 4126 } 4127 // We already did load elimination, so nothing to do here. 4128 if (isa<LoadInst>(Member)) 4129 continue; 4130 assert(!EliminationStack.empty()); 4131 Instruction *Leader = cast<Instruction>(EliminationStack.back()); 4132 (void)Leader; 4133 assert(DT->dominates(Leader->getParent(), Member->getParent())); 4134 // Member is dominater by Leader, and thus dead 4135 LLVM_DEBUG(dbgs() << "Marking dead store " << *Member 4136 << " that is dominated by " << *Leader << "\n"); 4137 markInstructionForDeletion(Member); 4138 CC->erase(Member); 4139 ++NumGVNDeadStores; 4140 } 4141 } 4142 } 4143 return AnythingReplaced; 4144 } 4145 4146 // This function provides global ranking of operations so that we can place them 4147 // in a canonical order. Note that rank alone is not necessarily enough for a 4148 // complete ordering, as constants all have the same rank. However, generally, 4149 // we will simplify an operation with all constants so that it doesn't matter 4150 // what order they appear in. 4151 unsigned int NewGVN::getRank(const Value *V) const { 4152 // Prefer constants to undef to anything else 4153 // Undef is a constant, have to check it first. 4154 // Prefer poison to undef as it's less defined. 4155 // Prefer smaller constants to constantexprs 4156 // Note that the order here matters because of class inheritance 4157 if (isa<ConstantExpr>(V)) 4158 return 3; 4159 if (isa<PoisonValue>(V)) 4160 return 1; 4161 if (isa<UndefValue>(V)) 4162 return 2; 4163 if (isa<Constant>(V)) 4164 return 0; 4165 if (auto *A = dyn_cast<Argument>(V)) 4166 return 4 + A->getArgNo(); 4167 4168 // Need to shift the instruction DFS by number of arguments + 5 to account for 4169 // the constant and argument ranking above. 4170 unsigned Result = InstrToDFSNum(V); 4171 if (Result > 0) 4172 return 5 + NumFuncArgs + Result; 4173 // Unreachable or something else, just return a really large number. 4174 return ~0; 4175 } 4176 4177 // This is a function that says whether two commutative operations should 4178 // have their order swapped when canonicalizing. 4179 bool NewGVN::shouldSwapOperands(const Value *A, const Value *B) const { 4180 // Because we only care about a total ordering, and don't rewrite expressions 4181 // in this order, we order by rank, which will give a strict weak ordering to 4182 // everything but constants, and then we order by pointer address. 4183 return std::make_pair(getRank(A), A) > std::make_pair(getRank(B), B); 4184 } 4185 4186 bool NewGVN::shouldSwapOperandsForIntrinsic(const Value *A, const Value *B, 4187 const IntrinsicInst *I) const { 4188 auto LookupResult = IntrinsicInstPred.find(I); 4189 if (shouldSwapOperands(A, B)) { 4190 if (LookupResult == IntrinsicInstPred.end()) 4191 IntrinsicInstPred.insert({I, B}); 4192 else 4193 LookupResult->second = B; 4194 return true; 4195 } 4196 4197 if (LookupResult != IntrinsicInstPred.end()) { 4198 auto *SeenPredicate = LookupResult->second; 4199 if (SeenPredicate) { 4200 if (SeenPredicate == B) 4201 return true; 4202 else 4203 LookupResult->second = nullptr; 4204 } 4205 } 4206 return false; 4207 } 4208 4209 namespace { 4210 4211 class NewGVNLegacyPass : public FunctionPass { 4212 public: 4213 // Pass identification, replacement for typeid. 4214 static char ID; 4215 4216 NewGVNLegacyPass() : FunctionPass(ID) { 4217 initializeNewGVNLegacyPassPass(*PassRegistry::getPassRegistry()); 4218 } 4219 4220 bool runOnFunction(Function &F) override; 4221 4222 private: 4223 void getAnalysisUsage(AnalysisUsage &AU) const override { 4224 AU.addRequired<AssumptionCacheTracker>(); 4225 AU.addRequired<DominatorTreeWrapperPass>(); 4226 AU.addRequired<TargetLibraryInfoWrapperPass>(); 4227 AU.addRequired<MemorySSAWrapperPass>(); 4228 AU.addRequired<AAResultsWrapperPass>(); 4229 AU.addPreserved<DominatorTreeWrapperPass>(); 4230 AU.addPreserved<GlobalsAAWrapperPass>(); 4231 } 4232 }; 4233 4234 } // end anonymous namespace 4235 4236 bool NewGVNLegacyPass::runOnFunction(Function &F) { 4237 if (skipFunction(F)) 4238 return false; 4239 return NewGVN(F, &getAnalysis<DominatorTreeWrapperPass>().getDomTree(), 4240 &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F), 4241 &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F), 4242 &getAnalysis<AAResultsWrapperPass>().getAAResults(), 4243 &getAnalysis<MemorySSAWrapperPass>().getMSSA(), 4244 F.getParent()->getDataLayout()) 4245 .runGVN(); 4246 } 4247 4248 char NewGVNLegacyPass::ID = 0; 4249 4250 INITIALIZE_PASS_BEGIN(NewGVNLegacyPass, "newgvn", "Global Value Numbering", 4251 false, false) 4252 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 4253 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass) 4254 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 4255 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 4256 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 4257 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 4258 INITIALIZE_PASS_END(NewGVNLegacyPass, "newgvn", "Global Value Numbering", false, 4259 false) 4260 4261 // createGVNPass - The public interface to this file. 4262 FunctionPass *llvm::createNewGVNPass() { return new NewGVNLegacyPass(); } 4263 4264 PreservedAnalyses NewGVNPass::run(Function &F, AnalysisManager<Function> &AM) { 4265 // Apparently the order in which we get these results matter for 4266 // the old GVN (see Chandler's comment in GVN.cpp). I'll keep 4267 // the same order here, just in case. 4268 auto &AC = AM.getResult<AssumptionAnalysis>(F); 4269 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 4270 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 4271 auto &AA = AM.getResult<AAManager>(F); 4272 auto &MSSA = AM.getResult<MemorySSAAnalysis>(F).getMSSA(); 4273 bool Changed = 4274 NewGVN(F, &DT, &AC, &TLI, &AA, &MSSA, F.getParent()->getDataLayout()) 4275 .runGVN(); 4276 if (!Changed) 4277 return PreservedAnalyses::all(); 4278 PreservedAnalyses PA; 4279 PA.preserve<DominatorTreeAnalysis>(); 4280 return PA; 4281 } 4282