1 //===- RDFLiveness.cpp ----------------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Computation of the liveness information from the data-flow graph. 10 // 11 // The main functionality of this code is to compute block live-in 12 // information. With the live-in information in place, the placement 13 // of kill flags can also be recalculated. 14 // 15 // The block live-in calculation is based on the ideas from the following 16 // publication: 17 // 18 // Dibyendu Das, Ramakrishna Upadrasta, Benoit Dupont de Dinechin. 19 // "Efficient Liveness Computation Using Merge Sets and DJ-Graphs." 20 // ACM Transactions on Architecture and Code Optimization, Association for 21 // Computing Machinery, 2012, ACM TACO Special Issue on "High-Performance 22 // and Embedded Architectures and Compilers", 8 (4), 23 // <10.1145/2086696.2086706>. <hal-00647369> 24 // 25 #include "llvm/CodeGen/RDFLiveness.h" 26 #include "llvm/ADT/BitVector.h" 27 #include "llvm/ADT/DenseMap.h" 28 #include "llvm/ADT/STLExtras.h" 29 #include "llvm/ADT/SetVector.h" 30 #include "llvm/ADT/SmallSet.h" 31 #include "llvm/CodeGen/MachineBasicBlock.h" 32 #include "llvm/CodeGen/MachineDominanceFrontier.h" 33 #include "llvm/CodeGen/MachineDominators.h" 34 #include "llvm/CodeGen/MachineFunction.h" 35 #include "llvm/CodeGen/MachineInstr.h" 36 #include "llvm/CodeGen/RDFGraph.h" 37 #include "llvm/CodeGen/RDFRegisters.h" 38 #include "llvm/CodeGen/TargetRegisterInfo.h" 39 #include "llvm/MC/LaneBitmask.h" 40 #include "llvm/MC/MCRegisterInfo.h" 41 #include "llvm/Support/CommandLine.h" 42 #include "llvm/Support/ErrorHandling.h" 43 #include "llvm/Support/raw_ostream.h" 44 #include <algorithm> 45 #include <cassert> 46 #include <cstdint> 47 #include <iterator> 48 #include <map> 49 #include <unordered_map> 50 #include <utility> 51 #include <vector> 52 53 using namespace llvm; 54 using namespace rdf; 55 56 static cl::opt<unsigned> MaxRecNest("rdf-liveness-max-rec", cl::init(25), 57 cl::Hidden, cl::desc("Maximum recursion level")); 58 59 namespace llvm { 60 namespace rdf { 61 62 raw_ostream &operator<< (raw_ostream &OS, const Print<Liveness::RefMap> &P) { 63 OS << '{'; 64 for (const auto &I : P.Obj) { 65 OS << ' ' << printReg(I.first, &P.G.getTRI()) << '{'; 66 for (auto J = I.second.begin(), E = I.second.end(); J != E; ) { 67 OS << Print(J->first, P.G) << PrintLaneMaskOpt(J->second); 68 if (++J != E) 69 OS << ','; 70 } 71 OS << '}'; 72 } 73 OS << " }"; 74 return OS; 75 } 76 77 } // end namespace rdf 78 } // end namespace llvm 79 80 // The order in the returned sequence is the order of reaching defs in the 81 // upward traversal: the first def is the closest to the given reference RefA, 82 // the next one is further up, and so on. 83 // The list ends at a reaching phi def, or when the reference from RefA is 84 // covered by the defs in the list (see FullChain). 85 // This function provides two modes of operation: 86 // (1) Returning the sequence of reaching defs for a particular reference 87 // node. This sequence will terminate at the first phi node [1]. 88 // (2) Returning a partial sequence of reaching defs, where the final goal 89 // is to traverse past phi nodes to the actual defs arising from the code 90 // itself. 91 // In mode (2), the register reference for which the search was started 92 // may be different from the reference node RefA, for which this call was 93 // made, hence the argument RefRR, which holds the original register. 94 // Also, some definitions may have already been encountered in a previous 95 // call that will influence register covering. The register references 96 // already defined are passed in through DefRRs. 97 // In mode (1), the "continuation" considerations do not apply, and the 98 // RefRR is the same as the register in RefA, and the set DefRRs is empty. 99 // 100 // [1] It is possible for multiple phi nodes to be included in the returned 101 // sequence: 102 // SubA = phi ... 103 // SubB = phi ... 104 // ... = SuperAB(rdef:SubA), SuperAB"(rdef:SubB) 105 // However, these phi nodes are independent from one another in terms of 106 // the data-flow. 107 108 NodeList Liveness::getAllReachingDefs(RegisterRef RefRR, 109 NodeAddr<RefNode*> RefA, bool TopShadows, bool FullChain, 110 const RegisterAggr &DefRRs) { 111 NodeList RDefs; // Return value. 112 SetVector<NodeId> DefQ; 113 DenseMap<MachineInstr*, uint32_t> OrdMap; 114 115 // Dead defs will be treated as if they were live, since they are actually 116 // on the data-flow path. They cannot be ignored because even though they 117 // do not generate meaningful values, they still modify registers. 118 119 // If the reference is undefined, there is nothing to do. 120 if (RefA.Addr->getFlags() & NodeAttrs::Undef) 121 return RDefs; 122 123 // The initial queue should not have reaching defs for shadows. The 124 // whole point of a shadow is that it will have a reaching def that 125 // is not aliased to the reaching defs of the related shadows. 126 NodeId Start = RefA.Id; 127 auto SNA = DFG.addr<RefNode*>(Start); 128 if (NodeId RD = SNA.Addr->getReachingDef()) 129 DefQ.insert(RD); 130 if (TopShadows) { 131 for (auto S : DFG.getRelatedRefs(RefA.Addr->getOwner(DFG), RefA)) 132 if (NodeId RD = NodeAddr<RefNode*>(S).Addr->getReachingDef()) 133 DefQ.insert(RD); 134 } 135 136 // Collect all the reaching defs, going up until a phi node is encountered, 137 // or there are no more reaching defs. From this set, the actual set of 138 // reaching defs will be selected. 139 // The traversal upwards must go on until a covering def is encountered. 140 // It is possible that a collection of non-covering (individually) defs 141 // will be sufficient, but keep going until a covering one is found. 142 for (unsigned i = 0; i < DefQ.size(); ++i) { 143 auto TA = DFG.addr<DefNode*>(DefQ[i]); 144 if (TA.Addr->getFlags() & NodeAttrs::PhiRef) 145 continue; 146 // Stop at the covering/overwriting def of the initial register reference. 147 RegisterRef RR = TA.Addr->getRegRef(DFG); 148 if (!DFG.IsPreservingDef(TA)) 149 if (RegisterAggr::isCoverOf(RR, RefRR, PRI)) 150 continue; 151 // Get the next level of reaching defs. This will include multiple 152 // reaching defs for shadows. 153 for (auto S : DFG.getRelatedRefs(TA.Addr->getOwner(DFG), TA)) 154 if (NodeId RD = NodeAddr<RefNode*>(S).Addr->getReachingDef()) 155 DefQ.insert(RD); 156 // Don't visit sibling defs. They share the same reaching def (which 157 // will be visited anyway), but they define something not aliased to 158 // this ref. 159 } 160 161 // Return the MachineBasicBlock containing a given instruction. 162 auto Block = [this] (NodeAddr<InstrNode*> IA) -> MachineBasicBlock* { 163 if (IA.Addr->getKind() == NodeAttrs::Stmt) 164 return NodeAddr<StmtNode*>(IA).Addr->getCode()->getParent(); 165 assert(IA.Addr->getKind() == NodeAttrs::Phi); 166 NodeAddr<PhiNode*> PA = IA; 167 NodeAddr<BlockNode*> BA = PA.Addr->getOwner(DFG); 168 return BA.Addr->getCode(); 169 }; 170 171 SmallSet<NodeId,32> Defs; 172 173 // Remove all non-phi defs that are not aliased to RefRR, and separate 174 // the the remaining defs into buckets for containing blocks. 175 std::map<NodeId, NodeAddr<InstrNode*>> Owners; 176 std::map<MachineBasicBlock*, SmallVector<NodeId,32>> Blocks; 177 for (NodeId N : DefQ) { 178 auto TA = DFG.addr<DefNode*>(N); 179 bool IsPhi = TA.Addr->getFlags() & NodeAttrs::PhiRef; 180 if (!IsPhi && !PRI.alias(RefRR, TA.Addr->getRegRef(DFG))) 181 continue; 182 Defs.insert(TA.Id); 183 NodeAddr<InstrNode*> IA = TA.Addr->getOwner(DFG); 184 Owners[TA.Id] = IA; 185 Blocks[Block(IA)].push_back(IA.Id); 186 } 187 188 auto Precedes = [this,&OrdMap] (NodeId A, NodeId B) { 189 if (A == B) 190 return false; 191 NodeAddr<InstrNode*> OA = DFG.addr<InstrNode*>(A); 192 NodeAddr<InstrNode*> OB = DFG.addr<InstrNode*>(B); 193 bool StmtA = OA.Addr->getKind() == NodeAttrs::Stmt; 194 bool StmtB = OB.Addr->getKind() == NodeAttrs::Stmt; 195 if (StmtA && StmtB) { 196 const MachineInstr *InA = NodeAddr<StmtNode*>(OA).Addr->getCode(); 197 const MachineInstr *InB = NodeAddr<StmtNode*>(OB).Addr->getCode(); 198 assert(InA->getParent() == InB->getParent()); 199 auto FA = OrdMap.find(InA); 200 if (FA != OrdMap.end()) 201 return FA->second < OrdMap.find(InB)->second; 202 const MachineBasicBlock *BB = InA->getParent(); 203 for (auto It = BB->begin(), E = BB->end(); It != E; ++It) { 204 if (It == InA->getIterator()) 205 return true; 206 if (It == InB->getIterator()) 207 return false; 208 } 209 llvm_unreachable("InA and InB should be in the same block"); 210 } 211 // One of them is a phi node. 212 if (!StmtA && !StmtB) { 213 // Both are phis, which are unordered. Break the tie by id numbers. 214 return A < B; 215 } 216 // Only one of them is a phi. Phis always precede statements. 217 return !StmtA; 218 }; 219 220 auto GetOrder = [&OrdMap] (MachineBasicBlock &B) { 221 uint32_t Pos = 0; 222 for (MachineInstr &In : B) 223 OrdMap.insert({&In, ++Pos}); 224 }; 225 226 // For each block, sort the nodes in it. 227 std::vector<MachineBasicBlock*> TmpBB; 228 for (auto &Bucket : Blocks) { 229 TmpBB.push_back(Bucket.first); 230 if (Bucket.second.size() > 2) 231 GetOrder(*Bucket.first); 232 llvm::sort(Bucket.second, Precedes); 233 } 234 235 // Sort the blocks with respect to dominance. 236 llvm::sort(TmpBB, 237 [this](auto A, auto B) { return MDT.properlyDominates(A, B); }); 238 239 std::vector<NodeId> TmpInst; 240 for (MachineBasicBlock *MBB : llvm::reverse(TmpBB)) { 241 auto &Bucket = Blocks[MBB]; 242 TmpInst.insert(TmpInst.end(), Bucket.rbegin(), Bucket.rend()); 243 } 244 245 // The vector is a list of instructions, so that defs coming from 246 // the same instruction don't need to be artificially ordered. 247 // Then, when computing the initial segment, and iterating over an 248 // instruction, pick the defs that contribute to the covering (i.e. is 249 // not covered by previously added defs). Check the defs individually, 250 // i.e. first check each def if is covered or not (without adding them 251 // to the tracking set), and then add all the selected ones. 252 253 // The reason for this is this example: 254 // *d1<A>, *d2<B>, ... Assume A and B are aliased (can happen in phi nodes). 255 // *d3<C> If A \incl BuC, and B \incl AuC, then *d2 would be 256 // covered if we added A first, and A would be covered 257 // if we added B first. 258 // In this example we want both A and B, because we don't want to give 259 // either one priority over the other, since they belong to the same 260 // statement. 261 262 RegisterAggr RRs(DefRRs); 263 264 auto DefInSet = [&Defs] (NodeAddr<RefNode*> TA) -> bool { 265 return TA.Addr->getKind() == NodeAttrs::Def && 266 Defs.count(TA.Id); 267 }; 268 269 for (NodeId T : TmpInst) { 270 if (!FullChain && RRs.hasCoverOf(RefRR)) 271 break; 272 auto TA = DFG.addr<InstrNode*>(T); 273 bool IsPhi = DFG.IsCode<NodeAttrs::Phi>(TA); 274 NodeList Ds; 275 for (NodeAddr<DefNode*> DA : TA.Addr->members_if(DefInSet, DFG)) { 276 RegisterRef QR = DA.Addr->getRegRef(DFG); 277 // Add phi defs even if they are covered by subsequent defs. This is 278 // for cases where the reached use is not covered by any of the defs 279 // encountered so far: the phi def is needed to expose the liveness 280 // of that use to the entry of the block. 281 // Example: 282 // phi d1<R3>(,d2,), ... Phi def d1 is covered by d2. 283 // d2<R3>(d1,,u3), ... 284 // ..., u3<D1>(d2) This use needs to be live on entry. 285 if (FullChain || IsPhi || !RRs.hasCoverOf(QR)) 286 Ds.push_back(DA); 287 } 288 llvm::append_range(RDefs, Ds); 289 for (NodeAddr<DefNode*> DA : Ds) { 290 // When collecting a full chain of definitions, do not consider phi 291 // defs to actually define a register. 292 uint16_t Flags = DA.Addr->getFlags(); 293 if (!FullChain || !(Flags & NodeAttrs::PhiRef)) 294 if (!(Flags & NodeAttrs::Preserving)) // Don't care about Undef here. 295 RRs.insert(DA.Addr->getRegRef(DFG)); 296 } 297 } 298 299 auto DeadP = [](const NodeAddr<DefNode*> DA) -> bool { 300 return DA.Addr->getFlags() & NodeAttrs::Dead; 301 }; 302 llvm::erase_if(RDefs, DeadP); 303 304 return RDefs; 305 } 306 307 std::pair<NodeSet,bool> 308 Liveness::getAllReachingDefsRec(RegisterRef RefRR, NodeAddr<RefNode*> RefA, 309 NodeSet &Visited, const NodeSet &Defs) { 310 return getAllReachingDefsRecImpl(RefRR, RefA, Visited, Defs, 0, MaxRecNest); 311 } 312 313 std::pair<NodeSet,bool> 314 Liveness::getAllReachingDefsRecImpl(RegisterRef RefRR, NodeAddr<RefNode*> RefA, 315 NodeSet &Visited, const NodeSet &Defs, unsigned Nest, unsigned MaxNest) { 316 if (Nest > MaxNest) 317 return { NodeSet(), false }; 318 // Collect all defined registers. Do not consider phis to be defining 319 // anything, only collect "real" definitions. 320 RegisterAggr DefRRs(PRI); 321 for (NodeId D : Defs) { 322 const auto DA = DFG.addr<const DefNode*>(D); 323 if (!(DA.Addr->getFlags() & NodeAttrs::PhiRef)) 324 DefRRs.insert(DA.Addr->getRegRef(DFG)); 325 } 326 327 NodeList RDs = getAllReachingDefs(RefRR, RefA, false, true, DefRRs); 328 if (RDs.empty()) 329 return { Defs, true }; 330 331 // Make a copy of the preexisting definitions and add the newly found ones. 332 NodeSet TmpDefs = Defs; 333 for (NodeAddr<NodeBase*> R : RDs) 334 TmpDefs.insert(R.Id); 335 336 NodeSet Result = Defs; 337 338 for (NodeAddr<DefNode*> DA : RDs) { 339 Result.insert(DA.Id); 340 if (!(DA.Addr->getFlags() & NodeAttrs::PhiRef)) 341 continue; 342 NodeAddr<PhiNode*> PA = DA.Addr->getOwner(DFG); 343 if (!Visited.insert(PA.Id).second) 344 continue; 345 // Go over all phi uses and get the reaching defs for each use. 346 for (auto U : PA.Addr->members_if(DFG.IsRef<NodeAttrs::Use>, DFG)) { 347 const auto &T = getAllReachingDefsRecImpl(RefRR, U, Visited, TmpDefs, 348 Nest+1, MaxNest); 349 if (!T.second) 350 return { T.first, false }; 351 Result.insert(T.first.begin(), T.first.end()); 352 } 353 } 354 355 return { Result, true }; 356 } 357 358 /// Find the nearest ref node aliased to RefRR, going upwards in the data 359 /// flow, starting from the instruction immediately preceding Inst. 360 NodeAddr<RefNode*> Liveness::getNearestAliasedRef(RegisterRef RefRR, 361 NodeAddr<InstrNode*> IA) { 362 NodeAddr<BlockNode*> BA = IA.Addr->getOwner(DFG); 363 NodeList Ins = BA.Addr->members(DFG); 364 NodeId FindId = IA.Id; 365 auto E = Ins.rend(); 366 auto B = std::find_if(Ins.rbegin(), E, 367 [FindId] (const NodeAddr<InstrNode*> T) { 368 return T.Id == FindId; 369 }); 370 // Do not scan IA (which is what B would point to). 371 if (B != E) 372 ++B; 373 374 do { 375 // Process the range of instructions from B to E. 376 for (NodeAddr<InstrNode*> I : make_range(B, E)) { 377 NodeList Refs = I.Addr->members(DFG); 378 NodeAddr<RefNode*> Clob, Use; 379 // Scan all the refs in I aliased to RefRR, and return the one that 380 // is the closest to the output of I, i.e. def > clobber > use. 381 for (NodeAddr<RefNode*> R : Refs) { 382 if (!PRI.alias(R.Addr->getRegRef(DFG), RefRR)) 383 continue; 384 if (DFG.IsDef(R)) { 385 // If it's a non-clobbering def, just return it. 386 if (!(R.Addr->getFlags() & NodeAttrs::Clobbering)) 387 return R; 388 Clob = R; 389 } else { 390 Use = R; 391 } 392 } 393 if (Clob.Id != 0) 394 return Clob; 395 if (Use.Id != 0) 396 return Use; 397 } 398 399 // Go up to the immediate dominator, if any. 400 MachineBasicBlock *BB = BA.Addr->getCode(); 401 BA = NodeAddr<BlockNode*>(); 402 if (MachineDomTreeNode *N = MDT.getNode(BB)) { 403 if ((N = N->getIDom())) 404 BA = DFG.findBlock(N->getBlock()); 405 } 406 if (!BA.Id) 407 break; 408 409 Ins = BA.Addr->members(DFG); 410 B = Ins.rbegin(); 411 E = Ins.rend(); 412 } while (true); 413 414 return NodeAddr<RefNode*>(); 415 } 416 417 NodeSet Liveness::getAllReachedUses(RegisterRef RefRR, 418 NodeAddr<DefNode*> DefA, const RegisterAggr &DefRRs) { 419 NodeSet Uses; 420 421 // If the original register is already covered by all the intervening 422 // defs, no more uses can be reached. 423 if (DefRRs.hasCoverOf(RefRR)) 424 return Uses; 425 426 // Add all directly reached uses. 427 // If the def is dead, it does not provide a value for any use. 428 bool IsDead = DefA.Addr->getFlags() & NodeAttrs::Dead; 429 NodeId U = !IsDead ? DefA.Addr->getReachedUse() : 0; 430 while (U != 0) { 431 auto UA = DFG.addr<UseNode*>(U); 432 if (!(UA.Addr->getFlags() & NodeAttrs::Undef)) { 433 RegisterRef UR = UA.Addr->getRegRef(DFG); 434 if (PRI.alias(RefRR, UR) && !DefRRs.hasCoverOf(UR)) 435 Uses.insert(U); 436 } 437 U = UA.Addr->getSibling(); 438 } 439 440 // Traverse all reached defs. This time dead defs cannot be ignored. 441 for (NodeId D = DefA.Addr->getReachedDef(), NextD; D != 0; D = NextD) { 442 auto DA = DFG.addr<DefNode*>(D); 443 NextD = DA.Addr->getSibling(); 444 RegisterRef DR = DA.Addr->getRegRef(DFG); 445 // If this def is already covered, it cannot reach anything new. 446 // Similarly, skip it if it is not aliased to the interesting register. 447 if (DefRRs.hasCoverOf(DR) || !PRI.alias(RefRR, DR)) 448 continue; 449 NodeSet T; 450 if (DFG.IsPreservingDef(DA)) { 451 // If it is a preserving def, do not update the set of intervening defs. 452 T = getAllReachedUses(RefRR, DA, DefRRs); 453 } else { 454 RegisterAggr NewDefRRs = DefRRs; 455 NewDefRRs.insert(DR); 456 T = getAllReachedUses(RefRR, DA, NewDefRRs); 457 } 458 Uses.insert(T.begin(), T.end()); 459 } 460 return Uses; 461 } 462 463 void Liveness::computePhiInfo() { 464 RealUseMap.clear(); 465 466 NodeList Phis; 467 NodeAddr<FuncNode*> FA = DFG.getFunc(); 468 NodeList Blocks = FA.Addr->members(DFG); 469 for (NodeAddr<BlockNode*> BA : Blocks) { 470 auto Ps = BA.Addr->members_if(DFG.IsCode<NodeAttrs::Phi>, DFG); 471 llvm::append_range(Phis, Ps); 472 } 473 474 // phi use -> (map: reaching phi -> set of registers defined in between) 475 std::map<NodeId,std::map<NodeId,RegisterAggr>> PhiUp; 476 std::vector<NodeId> PhiUQ; // Work list of phis for upward propagation. 477 std::unordered_map<NodeId,RegisterAggr> PhiDRs; // Phi -> registers defined by it. 478 479 // Go over all phis. 480 for (NodeAddr<PhiNode*> PhiA : Phis) { 481 // Go over all defs and collect the reached uses that are non-phi uses 482 // (i.e. the "real uses"). 483 RefMap &RealUses = RealUseMap[PhiA.Id]; 484 NodeList PhiRefs = PhiA.Addr->members(DFG); 485 486 // Have a work queue of defs whose reached uses need to be found. 487 // For each def, add to the queue all reached (non-phi) defs. 488 SetVector<NodeId> DefQ; 489 NodeSet PhiDefs; 490 RegisterAggr DRs(PRI); 491 for (NodeAddr<RefNode*> R : PhiRefs) { 492 if (!DFG.IsRef<NodeAttrs::Def>(R)) 493 continue; 494 DRs.insert(R.Addr->getRegRef(DFG)); 495 DefQ.insert(R.Id); 496 PhiDefs.insert(R.Id); 497 } 498 PhiDRs.insert(std::make_pair(PhiA.Id, DRs)); 499 500 // Collect the super-set of all possible reached uses. This set will 501 // contain all uses reached from this phi, either directly from the 502 // phi defs, or (recursively) via non-phi defs reached by the phi defs. 503 // This set of uses will later be trimmed to only contain these uses that 504 // are actually reached by the phi defs. 505 for (unsigned i = 0; i < DefQ.size(); ++i) { 506 NodeAddr<DefNode*> DA = DFG.addr<DefNode*>(DefQ[i]); 507 // Visit all reached uses. Phi defs should not really have the "dead" 508 // flag set, but check it anyway for consistency. 509 bool IsDead = DA.Addr->getFlags() & NodeAttrs::Dead; 510 NodeId UN = !IsDead ? DA.Addr->getReachedUse() : 0; 511 while (UN != 0) { 512 NodeAddr<UseNode*> A = DFG.addr<UseNode*>(UN); 513 uint16_t F = A.Addr->getFlags(); 514 if ((F & (NodeAttrs::Undef | NodeAttrs::PhiRef)) == 0) { 515 RegisterRef R = A.Addr->getRegRef(DFG); 516 RealUses[R.Reg].insert({A.Id,R.Mask}); 517 } 518 UN = A.Addr->getSibling(); 519 } 520 // Visit all reached defs, and add them to the queue. These defs may 521 // override some of the uses collected here, but that will be handled 522 // later. 523 NodeId DN = DA.Addr->getReachedDef(); 524 while (DN != 0) { 525 NodeAddr<DefNode*> A = DFG.addr<DefNode*>(DN); 526 for (auto T : DFG.getRelatedRefs(A.Addr->getOwner(DFG), A)) { 527 uint16_t Flags = NodeAddr<DefNode*>(T).Addr->getFlags(); 528 // Must traverse the reached-def chain. Consider: 529 // def(D0) -> def(R0) -> def(R0) -> use(D0) 530 // The reachable use of D0 passes through a def of R0. 531 if (!(Flags & NodeAttrs::PhiRef)) 532 DefQ.insert(T.Id); 533 } 534 DN = A.Addr->getSibling(); 535 } 536 } 537 // Filter out these uses that appear to be reachable, but really 538 // are not. For example: 539 // 540 // R1:0 = d1 541 // = R1:0 u2 Reached by d1. 542 // R0 = d3 543 // = R1:0 u4 Still reached by d1: indirectly through 544 // the def d3. 545 // R1 = d5 546 // = R1:0 u6 Not reached by d1 (covered collectively 547 // by d3 and d5), but following reached 548 // defs and uses from d1 will lead here. 549 for (auto UI = RealUses.begin(), UE = RealUses.end(); UI != UE; ) { 550 // For each reached register UI->first, there is a set UI->second, of 551 // uses of it. For each such use, check if it is reached by this phi, 552 // i.e. check if the set of its reaching uses intersects the set of 553 // this phi's defs. 554 NodeRefSet Uses = UI->second; 555 UI->second.clear(); 556 for (std::pair<NodeId,LaneBitmask> I : Uses) { 557 auto UA = DFG.addr<UseNode*>(I.first); 558 // Undef flag is checked above. 559 assert((UA.Addr->getFlags() & NodeAttrs::Undef) == 0); 560 RegisterRef R(UI->first, I.second); 561 // Calculate the exposed part of the reached use. 562 RegisterAggr Covered(PRI); 563 for (NodeAddr<DefNode*> DA : getAllReachingDefs(R, UA)) { 564 if (PhiDefs.count(DA.Id)) 565 break; 566 Covered.insert(DA.Addr->getRegRef(DFG)); 567 } 568 if (RegisterRef RC = Covered.clearIn(R)) { 569 // We are updating the map for register UI->first, so we need 570 // to map RC to be expressed in terms of that register. 571 RegisterRef S = PRI.mapTo(RC, UI->first); 572 UI->second.insert({I.first, S.Mask}); 573 } 574 } 575 UI = UI->second.empty() ? RealUses.erase(UI) : std::next(UI); 576 } 577 578 // If this phi reaches some "real" uses, add it to the queue for upward 579 // propagation. 580 if (!RealUses.empty()) 581 PhiUQ.push_back(PhiA.Id); 582 583 // Go over all phi uses and check if the reaching def is another phi. 584 // Collect the phis that are among the reaching defs of these uses. 585 // While traversing the list of reaching defs for each phi use, accumulate 586 // the set of registers defined between this phi (PhiA) and the owner phi 587 // of the reaching def. 588 NodeSet SeenUses; 589 590 for (auto I : PhiRefs) { 591 if (!DFG.IsRef<NodeAttrs::Use>(I) || SeenUses.count(I.Id)) 592 continue; 593 NodeAddr<PhiUseNode*> PUA = I; 594 if (PUA.Addr->getReachingDef() == 0) 595 continue; 596 597 RegisterRef UR = PUA.Addr->getRegRef(DFG); 598 NodeList Ds = getAllReachingDefs(UR, PUA, true, false, NoRegs); 599 RegisterAggr DefRRs(PRI); 600 601 for (NodeAddr<DefNode*> D : Ds) { 602 if (D.Addr->getFlags() & NodeAttrs::PhiRef) { 603 NodeId RP = D.Addr->getOwner(DFG).Id; 604 std::map<NodeId,RegisterAggr> &M = PhiUp[PUA.Id]; 605 auto F = M.find(RP); 606 if (F == M.end()) 607 M.insert(std::make_pair(RP, DefRRs)); 608 else 609 F->second.insert(DefRRs); 610 } 611 DefRRs.insert(D.Addr->getRegRef(DFG)); 612 } 613 614 for (NodeAddr<PhiUseNode*> T : DFG.getRelatedRefs(PhiA, PUA)) 615 SeenUses.insert(T.Id); 616 } 617 } 618 619 if (Trace) { 620 dbgs() << "Phi-up-to-phi map with intervening defs:\n"; 621 for (auto I : PhiUp) { 622 dbgs() << "phi " << Print(I.first, DFG) << " -> {"; 623 for (auto R : I.second) 624 dbgs() << ' ' << Print(R.first, DFG) << Print(R.second, DFG); 625 dbgs() << " }\n"; 626 } 627 } 628 629 // Propagate the reached registers up in the phi chain. 630 // 631 // The following type of situation needs careful handling: 632 // 633 // phi d1<R1:0> (1) 634 // | 635 // ... d2<R1> 636 // | 637 // phi u3<R1:0> (2) 638 // | 639 // ... u4<R1> 640 // 641 // The phi node (2) defines a register pair R1:0, and reaches a "real" 642 // use u4 of just R1. The same phi node is also known to reach (upwards) 643 // the phi node (1). However, the use u4 is not reached by phi (1), 644 // because of the intervening definition d2 of R1. The data flow between 645 // phis (1) and (2) is restricted to R1:0 minus R1, i.e. R0. 646 // 647 // When propagating uses up the phi chains, get the all reaching defs 648 // for a given phi use, and traverse the list until the propagated ref 649 // is covered, or until reaching the final phi. Only assume that the 650 // reference reaches the phi in the latter case. 651 652 // The operation "clearIn" can be expensive. For a given set of intervening 653 // defs, cache the result of subtracting these defs from a given register 654 // ref. 655 using SubMap = std::unordered_map<RegisterRef, RegisterRef>; 656 std::unordered_map<RegisterAggr, SubMap> Subs; 657 auto ClearIn = [] (RegisterRef RR, const RegisterAggr &Mid, SubMap &SM) { 658 if (Mid.empty()) 659 return RR; 660 auto F = SM.find(RR); 661 if (F != SM.end()) 662 return F->second; 663 RegisterRef S = Mid.clearIn(RR); 664 SM.insert({RR, S}); 665 return S; 666 }; 667 668 // Go over all phis. 669 for (unsigned i = 0; i < PhiUQ.size(); ++i) { 670 auto PA = DFG.addr<PhiNode*>(PhiUQ[i]); 671 NodeList PUs = PA.Addr->members_if(DFG.IsRef<NodeAttrs::Use>, DFG); 672 RefMap &RUM = RealUseMap[PA.Id]; 673 674 for (NodeAddr<UseNode*> UA : PUs) { 675 std::map<NodeId,RegisterAggr> &PUM = PhiUp[UA.Id]; 676 RegisterRef UR = UA.Addr->getRegRef(DFG); 677 for (const std::pair<const NodeId, RegisterAggr> &P : PUM) { 678 bool Changed = false; 679 const RegisterAggr &MidDefs = P.second; 680 // Collect the set PropUp of uses that are reached by the current 681 // phi PA, and are not covered by any intervening def between the 682 // currently visited use UA and the upward phi P. 683 684 if (MidDefs.hasCoverOf(UR)) 685 continue; 686 SubMap &SM = Subs[MidDefs]; 687 688 // General algorithm: 689 // for each (R,U) : U is use node of R, U is reached by PA 690 // if MidDefs does not cover (R,U) 691 // then add (R-MidDefs,U) to RealUseMap[P] 692 // 693 for (const std::pair<const RegisterId, NodeRefSet> &T : RUM) { 694 RegisterRef R(T.first); 695 // The current phi (PA) could be a phi for a regmask. It could 696 // reach a whole variety of uses that are not related to the 697 // specific upward phi (P.first). 698 const RegisterAggr &DRs = PhiDRs.at(P.first); 699 if (!DRs.hasAliasOf(R)) 700 continue; 701 R = PRI.mapTo(DRs.intersectWith(R), T.first); 702 for (std::pair<NodeId,LaneBitmask> V : T.second) { 703 LaneBitmask M = R.Mask & V.second; 704 if (M.none()) 705 continue; 706 if (RegisterRef SS = ClearIn(RegisterRef(R.Reg, M), MidDefs, SM)) { 707 NodeRefSet &RS = RealUseMap[P.first][SS.Reg]; 708 Changed |= RS.insert({V.first,SS.Mask}).second; 709 } 710 } 711 } 712 713 if (Changed) 714 PhiUQ.push_back(P.first); 715 } 716 } 717 } 718 719 if (Trace) { 720 dbgs() << "Real use map:\n"; 721 for (auto I : RealUseMap) { 722 dbgs() << "phi " << Print(I.first, DFG); 723 NodeAddr<PhiNode*> PA = DFG.addr<PhiNode*>(I.first); 724 NodeList Ds = PA.Addr->members_if(DFG.IsRef<NodeAttrs::Def>, DFG); 725 if (!Ds.empty()) { 726 RegisterRef RR = NodeAddr<DefNode*>(Ds[0]).Addr->getRegRef(DFG); 727 dbgs() << '<' << Print(RR, DFG) << '>'; 728 } else { 729 dbgs() << "<noreg>"; 730 } 731 dbgs() << " -> " << Print(I.second, DFG) << '\n'; 732 } 733 } 734 } 735 736 void Liveness::computeLiveIns() { 737 // Populate the node-to-block map. This speeds up the calculations 738 // significantly. 739 NBMap.clear(); 740 for (NodeAddr<BlockNode*> BA : DFG.getFunc().Addr->members(DFG)) { 741 MachineBasicBlock *BB = BA.Addr->getCode(); 742 for (NodeAddr<InstrNode*> IA : BA.Addr->members(DFG)) { 743 for (NodeAddr<RefNode*> RA : IA.Addr->members(DFG)) 744 NBMap.insert(std::make_pair(RA.Id, BB)); 745 NBMap.insert(std::make_pair(IA.Id, BB)); 746 } 747 } 748 749 MachineFunction &MF = DFG.getMF(); 750 751 // Compute IDF first, then the inverse. 752 decltype(IIDF) IDF; 753 for (MachineBasicBlock &B : MF) { 754 auto F1 = MDF.find(&B); 755 if (F1 == MDF.end()) 756 continue; 757 SetVector<MachineBasicBlock*> IDFB(F1->second.begin(), F1->second.end()); 758 for (unsigned i = 0; i < IDFB.size(); ++i) { 759 auto F2 = MDF.find(IDFB[i]); 760 if (F2 != MDF.end()) 761 IDFB.insert(F2->second.begin(), F2->second.end()); 762 } 763 // Add B to the IDF(B). This will put B in the IIDF(B). 764 IDFB.insert(&B); 765 IDF[&B].insert(IDFB.begin(), IDFB.end()); 766 } 767 768 for (auto I : IDF) 769 for (auto *S : I.second) 770 IIDF[S].insert(I.first); 771 772 computePhiInfo(); 773 774 NodeAddr<FuncNode*> FA = DFG.getFunc(); 775 NodeList Blocks = FA.Addr->members(DFG); 776 777 // Build the phi live-on-entry map. 778 for (NodeAddr<BlockNode*> BA : Blocks) { 779 MachineBasicBlock *MB = BA.Addr->getCode(); 780 RefMap &LON = PhiLON[MB]; 781 for (auto P : BA.Addr->members_if(DFG.IsCode<NodeAttrs::Phi>, DFG)) 782 for (const RefMap::value_type &S : RealUseMap[P.Id]) 783 LON[S.first].insert(S.second.begin(), S.second.end()); 784 } 785 786 if (Trace) { 787 dbgs() << "Phi live-on-entry map:\n"; 788 for (auto &I : PhiLON) 789 dbgs() << "block #" << I.first->getNumber() << " -> " 790 << Print(I.second, DFG) << '\n'; 791 } 792 793 // Build the phi live-on-exit map. Each phi node has some set of reached 794 // "real" uses. Propagate this set backwards into the block predecessors 795 // through the reaching defs of the corresponding phi uses. 796 for (NodeAddr<BlockNode*> BA : Blocks) { 797 NodeList Phis = BA.Addr->members_if(DFG.IsCode<NodeAttrs::Phi>, DFG); 798 for (NodeAddr<PhiNode*> PA : Phis) { 799 RefMap &RUs = RealUseMap[PA.Id]; 800 if (RUs.empty()) 801 continue; 802 803 NodeSet SeenUses; 804 for (auto U : PA.Addr->members_if(DFG.IsRef<NodeAttrs::Use>, DFG)) { 805 if (!SeenUses.insert(U.Id).second) 806 continue; 807 NodeAddr<PhiUseNode*> PUA = U; 808 if (PUA.Addr->getReachingDef() == 0) 809 continue; 810 811 // Each phi has some set (possibly empty) of reached "real" uses, 812 // that is, uses that are part of the compiled program. Such a use 813 // may be located in some farther block, but following a chain of 814 // reaching defs will eventually lead to this phi. 815 // Any chain of reaching defs may fork at a phi node, but there 816 // will be a path upwards that will lead to this phi. Now, this 817 // chain will need to fork at this phi, since some of the reached 818 // uses may have definitions joining in from multiple predecessors. 819 // For each reached "real" use, identify the set of reaching defs 820 // coming from each predecessor P, and add them to PhiLOX[P]. 821 // 822 auto PrA = DFG.addr<BlockNode*>(PUA.Addr->getPredecessor()); 823 RefMap &LOX = PhiLOX[PrA.Addr->getCode()]; 824 825 for (const std::pair<const RegisterId, NodeRefSet> &RS : RUs) { 826 // We need to visit each individual use. 827 for (std::pair<NodeId,LaneBitmask> P : RS.second) { 828 // Create a register ref corresponding to the use, and find 829 // all reaching defs starting from the phi use, and treating 830 // all related shadows as a single use cluster. 831 RegisterRef S(RS.first, P.second); 832 NodeList Ds = getAllReachingDefs(S, PUA, true, false, NoRegs); 833 for (NodeAddr<DefNode*> D : Ds) { 834 // Calculate the mask corresponding to the visited def. 835 RegisterAggr TA(PRI); 836 TA.insert(D.Addr->getRegRef(DFG)).intersect(S); 837 LaneBitmask TM = TA.makeRegRef().Mask; 838 LOX[S.Reg].insert({D.Id, TM}); 839 } 840 } 841 } 842 843 for (NodeAddr<PhiUseNode*> T : DFG.getRelatedRefs(PA, PUA)) 844 SeenUses.insert(T.Id); 845 } // for U : phi uses 846 } // for P : Phis 847 } // for B : Blocks 848 849 if (Trace) { 850 dbgs() << "Phi live-on-exit map:\n"; 851 for (auto &I : PhiLOX) 852 dbgs() << "block #" << I.first->getNumber() << " -> " 853 << Print(I.second, DFG) << '\n'; 854 } 855 856 RefMap LiveIn; 857 traverse(&MF.front(), LiveIn); 858 859 // Add function live-ins to the live-in set of the function entry block. 860 LiveMap[&MF.front()].insert(DFG.getLiveIns()); 861 862 if (Trace) { 863 // Dump the liveness map 864 for (MachineBasicBlock &B : MF) { 865 std::vector<RegisterRef> LV; 866 for (const MachineBasicBlock::RegisterMaskPair &LI : B.liveins()) 867 LV.push_back(RegisterRef(LI.PhysReg, LI.LaneMask)); 868 llvm::sort(LV); 869 dbgs() << printMBBReference(B) << "\t rec = {"; 870 for (auto I : LV) 871 dbgs() << ' ' << Print(I, DFG); 872 dbgs() << " }\n"; 873 //dbgs() << "\tcomp = " << Print(LiveMap[&B], DFG) << '\n'; 874 875 LV.clear(); 876 const RegisterAggr &LG = LiveMap[&B]; 877 for (auto I = LG.rr_begin(), E = LG.rr_end(); I != E; ++I) 878 LV.push_back(*I); 879 llvm::sort(LV); 880 dbgs() << "\tcomp = {"; 881 for (auto I : LV) 882 dbgs() << ' ' << Print(I, DFG); 883 dbgs() << " }\n"; 884 885 } 886 } 887 } 888 889 void Liveness::resetLiveIns() { 890 for (auto &B : DFG.getMF()) { 891 // Remove all live-ins. 892 std::vector<unsigned> T; 893 for (const MachineBasicBlock::RegisterMaskPair &LI : B.liveins()) 894 T.push_back(LI.PhysReg); 895 for (auto I : T) 896 B.removeLiveIn(I); 897 // Add the newly computed live-ins. 898 const RegisterAggr &LiveIns = LiveMap[&B]; 899 for (const RegisterRef R : make_range(LiveIns.rr_begin(), LiveIns.rr_end())) 900 B.addLiveIn({MCPhysReg(R.Reg), R.Mask}); 901 } 902 } 903 904 void Liveness::resetKills() { 905 for (auto &B : DFG.getMF()) 906 resetKills(&B); 907 } 908 909 void Liveness::resetKills(MachineBasicBlock *B) { 910 auto CopyLiveIns = [this] (MachineBasicBlock *B, BitVector &LV) -> void { 911 for (auto I : B->liveins()) { 912 MCSubRegIndexIterator S(I.PhysReg, &TRI); 913 if (!S.isValid()) { 914 LV.set(I.PhysReg); 915 continue; 916 } 917 do { 918 LaneBitmask M = TRI.getSubRegIndexLaneMask(S.getSubRegIndex()); 919 if ((M & I.LaneMask).any()) 920 LV.set(S.getSubReg()); 921 ++S; 922 } while (S.isValid()); 923 } 924 }; 925 926 BitVector LiveIn(TRI.getNumRegs()), Live(TRI.getNumRegs()); 927 CopyLiveIns(B, LiveIn); 928 for (auto *SI : B->successors()) 929 CopyLiveIns(SI, Live); 930 931 for (MachineInstr &MI : llvm::reverse(*B)) { 932 if (MI.isDebugInstr()) 933 continue; 934 935 MI.clearKillInfo(); 936 for (auto &Op : MI.operands()) { 937 // An implicit def of a super-register may not necessarily start a 938 // live range of it, since an implicit use could be used to keep parts 939 // of it live. Instead of analyzing the implicit operands, ignore 940 // implicit defs. 941 if (!Op.isReg() || !Op.isDef() || Op.isImplicit()) 942 continue; 943 Register R = Op.getReg(); 944 if (!R.isPhysical()) 945 continue; 946 for (MCSubRegIterator SR(R, &TRI, true); SR.isValid(); ++SR) 947 Live.reset(*SR); 948 } 949 for (auto &Op : MI.operands()) { 950 if (!Op.isReg() || !Op.isUse() || Op.isUndef()) 951 continue; 952 Register R = Op.getReg(); 953 if (!R.isPhysical()) 954 continue; 955 bool IsLive = false; 956 for (MCRegAliasIterator AR(R, &TRI, true); AR.isValid(); ++AR) { 957 if (!Live[*AR]) 958 continue; 959 IsLive = true; 960 break; 961 } 962 if (!IsLive) 963 Op.setIsKill(true); 964 for (MCSubRegIterator SR(R, &TRI, true); SR.isValid(); ++SR) 965 Live.set(*SR); 966 } 967 } 968 } 969 970 // Helper function to obtain the basic block containing the reaching def 971 // of the given use. 972 MachineBasicBlock *Liveness::getBlockWithRef(NodeId RN) const { 973 auto F = NBMap.find(RN); 974 if (F != NBMap.end()) 975 return F->second; 976 llvm_unreachable("Node id not in map"); 977 } 978 979 void Liveness::traverse(MachineBasicBlock *B, RefMap &LiveIn) { 980 // The LiveIn map, for each (physical) register, contains the set of live 981 // reaching defs of that register that are live on entry to the associated 982 // block. 983 984 // The summary of the traversal algorithm: 985 // 986 // R is live-in in B, if there exists a U(R), such that rdef(R) dom B 987 // and (U \in IDF(B) or B dom U). 988 // 989 // for (C : children) { 990 // LU = {} 991 // traverse(C, LU) 992 // LiveUses += LU 993 // } 994 // 995 // LiveUses -= Defs(B); 996 // LiveUses += UpwardExposedUses(B); 997 // for (C : IIDF[B]) 998 // for (U : LiveUses) 999 // if (Rdef(U) dom C) 1000 // C.addLiveIn(U) 1001 // 1002 1003 // Go up the dominator tree (depth-first). 1004 MachineDomTreeNode *N = MDT.getNode(B); 1005 for (auto *I : *N) { 1006 RefMap L; 1007 MachineBasicBlock *SB = I->getBlock(); 1008 traverse(SB, L); 1009 1010 for (auto S : L) 1011 LiveIn[S.first].insert(S.second.begin(), S.second.end()); 1012 } 1013 1014 if (Trace) { 1015 dbgs() << "\n-- " << printMBBReference(*B) << ": " << __func__ 1016 << " after recursion into: {"; 1017 for (auto *I : *N) 1018 dbgs() << ' ' << I->getBlock()->getNumber(); 1019 dbgs() << " }\n"; 1020 dbgs() << " LiveIn: " << Print(LiveIn, DFG) << '\n'; 1021 dbgs() << " Local: " << Print(LiveMap[B], DFG) << '\n'; 1022 } 1023 1024 // Add reaching defs of phi uses that are live on exit from this block. 1025 RefMap &PUs = PhiLOX[B]; 1026 for (auto &S : PUs) 1027 LiveIn[S.first].insert(S.second.begin(), S.second.end()); 1028 1029 if (Trace) { 1030 dbgs() << "after LOX\n"; 1031 dbgs() << " LiveIn: " << Print(LiveIn, DFG) << '\n'; 1032 dbgs() << " Local: " << Print(LiveMap[B], DFG) << '\n'; 1033 } 1034 1035 // The LiveIn map at this point has all defs that are live-on-exit from B, 1036 // as if they were live-on-entry to B. First, we need to filter out all 1037 // defs that are present in this block. Then we will add reaching defs of 1038 // all upward-exposed uses. 1039 1040 // To filter out the defs, first make a copy of LiveIn, and then re-populate 1041 // LiveIn with the defs that should remain. 1042 RefMap LiveInCopy = LiveIn; 1043 LiveIn.clear(); 1044 1045 for (const std::pair<const RegisterId, NodeRefSet> &LE : LiveInCopy) { 1046 RegisterRef LRef(LE.first); 1047 NodeRefSet &NewDefs = LiveIn[LRef.Reg]; // To be filled. 1048 const NodeRefSet &OldDefs = LE.second; 1049 for (NodeRef OR : OldDefs) { 1050 // R is a def node that was live-on-exit 1051 auto DA = DFG.addr<DefNode*>(OR.first); 1052 NodeAddr<InstrNode*> IA = DA.Addr->getOwner(DFG); 1053 NodeAddr<BlockNode*> BA = IA.Addr->getOwner(DFG); 1054 if (B != BA.Addr->getCode()) { 1055 // Defs from a different block need to be preserved. Defs from this 1056 // block will need to be processed further, except for phi defs, the 1057 // liveness of which is handled through the PhiLON/PhiLOX maps. 1058 NewDefs.insert(OR); 1059 continue; 1060 } 1061 1062 // Defs from this block need to stop the liveness from being 1063 // propagated upwards. This only applies to non-preserving defs, 1064 // and to the parts of the register actually covered by those defs. 1065 // (Note that phi defs should always be preserving.) 1066 RegisterAggr RRs(PRI); 1067 LRef.Mask = OR.second; 1068 1069 if (!DFG.IsPreservingDef(DA)) { 1070 assert(!(IA.Addr->getFlags() & NodeAttrs::Phi)); 1071 // DA is a non-phi def that is live-on-exit from this block, and 1072 // that is also located in this block. LRef is a register ref 1073 // whose use this def reaches. If DA covers LRef, then no part 1074 // of LRef is exposed upwards.A 1075 if (RRs.insert(DA.Addr->getRegRef(DFG)).hasCoverOf(LRef)) 1076 continue; 1077 } 1078 1079 // DA itself was not sufficient to cover LRef. In general, it is 1080 // the last in a chain of aliased defs before the exit from this block. 1081 // There could be other defs in this block that are a part of that 1082 // chain. Check that now: accumulate the registers from these defs, 1083 // and if they all together cover LRef, it is not live-on-entry. 1084 for (NodeAddr<DefNode*> TA : getAllReachingDefs(DA)) { 1085 // DefNode -> InstrNode -> BlockNode. 1086 NodeAddr<InstrNode*> ITA = TA.Addr->getOwner(DFG); 1087 NodeAddr<BlockNode*> BTA = ITA.Addr->getOwner(DFG); 1088 // Reaching defs are ordered in the upward direction. 1089 if (BTA.Addr->getCode() != B) { 1090 // We have reached past the beginning of B, and the accumulated 1091 // registers are not covering LRef. The first def from the 1092 // upward chain will be live. 1093 // Subtract all accumulated defs (RRs) from LRef. 1094 RegisterRef T = RRs.clearIn(LRef); 1095 assert(T); 1096 NewDefs.insert({TA.Id,T.Mask}); 1097 break; 1098 } 1099 1100 // TA is in B. Only add this def to the accumulated cover if it is 1101 // not preserving. 1102 if (!(TA.Addr->getFlags() & NodeAttrs::Preserving)) 1103 RRs.insert(TA.Addr->getRegRef(DFG)); 1104 // If this is enough to cover LRef, then stop. 1105 if (RRs.hasCoverOf(LRef)) 1106 break; 1107 } 1108 } 1109 } 1110 1111 emptify(LiveIn); 1112 1113 if (Trace) { 1114 dbgs() << "after defs in block\n"; 1115 dbgs() << " LiveIn: " << Print(LiveIn, DFG) << '\n'; 1116 dbgs() << " Local: " << Print(LiveMap[B], DFG) << '\n'; 1117 } 1118 1119 // Scan the block for upward-exposed uses and add them to the tracking set. 1120 for (auto I : DFG.getFunc().Addr->findBlock(B, DFG).Addr->members(DFG)) { 1121 NodeAddr<InstrNode*> IA = I; 1122 if (IA.Addr->getKind() != NodeAttrs::Stmt) 1123 continue; 1124 for (NodeAddr<UseNode*> UA : IA.Addr->members_if(DFG.IsUse, DFG)) { 1125 if (UA.Addr->getFlags() & NodeAttrs::Undef) 1126 continue; 1127 RegisterRef RR = UA.Addr->getRegRef(DFG); 1128 for (NodeAddr<DefNode*> D : getAllReachingDefs(UA)) 1129 if (getBlockWithRef(D.Id) != B) 1130 LiveIn[RR.Reg].insert({D.Id,RR.Mask}); 1131 } 1132 } 1133 1134 if (Trace) { 1135 dbgs() << "after uses in block\n"; 1136 dbgs() << " LiveIn: " << Print(LiveIn, DFG) << '\n'; 1137 dbgs() << " Local: " << Print(LiveMap[B], DFG) << '\n'; 1138 } 1139 1140 // Phi uses should not be propagated up the dominator tree, since they 1141 // are not dominated by their corresponding reaching defs. 1142 RegisterAggr &Local = LiveMap[B]; 1143 RefMap &LON = PhiLON[B]; 1144 for (auto &R : LON) { 1145 LaneBitmask M; 1146 for (auto P : R.second) 1147 M |= P.second; 1148 Local.insert(RegisterRef(R.first,M)); 1149 } 1150 1151 if (Trace) { 1152 dbgs() << "after phi uses in block\n"; 1153 dbgs() << " LiveIn: " << Print(LiveIn, DFG) << '\n'; 1154 dbgs() << " Local: " << Print(Local, DFG) << '\n'; 1155 } 1156 1157 for (auto *C : IIDF[B]) { 1158 RegisterAggr &LiveC = LiveMap[C]; 1159 for (const std::pair<const RegisterId, NodeRefSet> &S : LiveIn) 1160 for (auto R : S.second) 1161 if (MDT.properlyDominates(getBlockWithRef(R.first), C)) 1162 LiveC.insert(RegisterRef(S.first, R.second)); 1163 } 1164 } 1165 1166 void Liveness::emptify(RefMap &M) { 1167 for (auto I = M.begin(), E = M.end(); I != E; ) 1168 I = I->second.empty() ? M.erase(I) : std::next(I); 1169 } 1170