1 //===- RDFGraph.cpp -------------------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Target-independent, SSA-based data flow graph for register data flow (RDF). 10 // 11 #include "llvm/ADT/BitVector.h" 12 #include "llvm/ADT/STLExtras.h" 13 #include "llvm/ADT/SetVector.h" 14 #include "llvm/CodeGen/MachineBasicBlock.h" 15 #include "llvm/CodeGen/MachineDominanceFrontier.h" 16 #include "llvm/CodeGen/MachineDominators.h" 17 #include "llvm/CodeGen/MachineFunction.h" 18 #include "llvm/CodeGen/MachineInstr.h" 19 #include "llvm/CodeGen/MachineOperand.h" 20 #include "llvm/CodeGen/MachineRegisterInfo.h" 21 #include "llvm/CodeGen/RDFGraph.h" 22 #include "llvm/CodeGen/RDFRegisters.h" 23 #include "llvm/CodeGen/TargetInstrInfo.h" 24 #include "llvm/CodeGen/TargetLowering.h" 25 #include "llvm/CodeGen/TargetRegisterInfo.h" 26 #include "llvm/CodeGen/TargetSubtargetInfo.h" 27 #include "llvm/IR/Function.h" 28 #include "llvm/MC/LaneBitmask.h" 29 #include "llvm/MC/MCInstrDesc.h" 30 #include "llvm/MC/MCRegisterInfo.h" 31 #include "llvm/Support/Debug.h" 32 #include "llvm/Support/ErrorHandling.h" 33 #include "llvm/Support/raw_ostream.h" 34 #include <algorithm> 35 #include <cassert> 36 #include <cstdint> 37 #include <cstring> 38 #include <iterator> 39 #include <set> 40 #include <utility> 41 #include <vector> 42 43 using namespace llvm; 44 using namespace rdf; 45 46 // Printing functions. Have them here first, so that the rest of the code 47 // can use them. 48 namespace llvm { 49 namespace rdf { 50 51 raw_ostream &operator<< (raw_ostream &OS, const PrintLaneMaskOpt &P) { 52 if (!P.Mask.all()) 53 OS << ':' << PrintLaneMask(P.Mask); 54 return OS; 55 } 56 57 raw_ostream &operator<< (raw_ostream &OS, const Print<RegisterRef> &P) { 58 auto &TRI = P.G.getTRI(); 59 if (P.Obj.Reg > 0 && P.Obj.Reg < TRI.getNumRegs()) 60 OS << TRI.getName(P.Obj.Reg); 61 else 62 OS << '#' << P.Obj.Reg; 63 OS << PrintLaneMaskOpt(P.Obj.Mask); 64 return OS; 65 } 66 67 raw_ostream &operator<< (raw_ostream &OS, const Print<NodeId> &P) { 68 auto NA = P.G.addr<NodeBase*>(P.Obj); 69 uint16_t Attrs = NA.Addr->getAttrs(); 70 uint16_t Kind = NodeAttrs::kind(Attrs); 71 uint16_t Flags = NodeAttrs::flags(Attrs); 72 switch (NodeAttrs::type(Attrs)) { 73 case NodeAttrs::Code: 74 switch (Kind) { 75 case NodeAttrs::Func: OS << 'f'; break; 76 case NodeAttrs::Block: OS << 'b'; break; 77 case NodeAttrs::Stmt: OS << 's'; break; 78 case NodeAttrs::Phi: OS << 'p'; break; 79 default: OS << "c?"; break; 80 } 81 break; 82 case NodeAttrs::Ref: 83 if (Flags & NodeAttrs::Undef) 84 OS << '/'; 85 if (Flags & NodeAttrs::Dead) 86 OS << '\\'; 87 if (Flags & NodeAttrs::Preserving) 88 OS << '+'; 89 if (Flags & NodeAttrs::Clobbering) 90 OS << '~'; 91 switch (Kind) { 92 case NodeAttrs::Use: OS << 'u'; break; 93 case NodeAttrs::Def: OS << 'd'; break; 94 case NodeAttrs::Block: OS << 'b'; break; 95 default: OS << "r?"; break; 96 } 97 break; 98 default: 99 OS << '?'; 100 break; 101 } 102 OS << P.Obj; 103 if (Flags & NodeAttrs::Shadow) 104 OS << '"'; 105 return OS; 106 } 107 108 static void printRefHeader(raw_ostream &OS, const NodeAddr<RefNode*> RA, 109 const DataFlowGraph &G) { 110 OS << Print<NodeId>(RA.Id, G) << '<' 111 << Print<RegisterRef>(RA.Addr->getRegRef(G), G) << '>'; 112 if (RA.Addr->getFlags() & NodeAttrs::Fixed) 113 OS << '!'; 114 } 115 116 raw_ostream &operator<< (raw_ostream &OS, const Print<NodeAddr<DefNode*>> &P) { 117 printRefHeader(OS, P.Obj, P.G); 118 OS << '('; 119 if (NodeId N = P.Obj.Addr->getReachingDef()) 120 OS << Print<NodeId>(N, P.G); 121 OS << ','; 122 if (NodeId N = P.Obj.Addr->getReachedDef()) 123 OS << Print<NodeId>(N, P.G); 124 OS << ','; 125 if (NodeId N = P.Obj.Addr->getReachedUse()) 126 OS << Print<NodeId>(N, P.G); 127 OS << "):"; 128 if (NodeId N = P.Obj.Addr->getSibling()) 129 OS << Print<NodeId>(N, P.G); 130 return OS; 131 } 132 133 raw_ostream &operator<< (raw_ostream &OS, const Print<NodeAddr<UseNode*>> &P) { 134 printRefHeader(OS, P.Obj, P.G); 135 OS << '('; 136 if (NodeId N = P.Obj.Addr->getReachingDef()) 137 OS << Print<NodeId>(N, P.G); 138 OS << "):"; 139 if (NodeId N = P.Obj.Addr->getSibling()) 140 OS << Print<NodeId>(N, P.G); 141 return OS; 142 } 143 144 raw_ostream &operator<< (raw_ostream &OS, 145 const Print<NodeAddr<PhiUseNode*>> &P) { 146 printRefHeader(OS, P.Obj, P.G); 147 OS << '('; 148 if (NodeId N = P.Obj.Addr->getReachingDef()) 149 OS << Print<NodeId>(N, P.G); 150 OS << ','; 151 if (NodeId N = P.Obj.Addr->getPredecessor()) 152 OS << Print<NodeId>(N, P.G); 153 OS << "):"; 154 if (NodeId N = P.Obj.Addr->getSibling()) 155 OS << Print<NodeId>(N, P.G); 156 return OS; 157 } 158 159 raw_ostream &operator<< (raw_ostream &OS, const Print<NodeAddr<RefNode*>> &P) { 160 switch (P.Obj.Addr->getKind()) { 161 case NodeAttrs::Def: 162 OS << PrintNode<DefNode*>(P.Obj, P.G); 163 break; 164 case NodeAttrs::Use: 165 if (P.Obj.Addr->getFlags() & NodeAttrs::PhiRef) 166 OS << PrintNode<PhiUseNode*>(P.Obj, P.G); 167 else 168 OS << PrintNode<UseNode*>(P.Obj, P.G); 169 break; 170 } 171 return OS; 172 } 173 174 raw_ostream &operator<< (raw_ostream &OS, const Print<NodeList> &P) { 175 unsigned N = P.Obj.size(); 176 for (auto I : P.Obj) { 177 OS << Print<NodeId>(I.Id, P.G); 178 if (--N) 179 OS << ' '; 180 } 181 return OS; 182 } 183 184 raw_ostream &operator<< (raw_ostream &OS, const Print<NodeSet> &P) { 185 unsigned N = P.Obj.size(); 186 for (auto I : P.Obj) { 187 OS << Print<NodeId>(I, P.G); 188 if (--N) 189 OS << ' '; 190 } 191 return OS; 192 } 193 194 namespace { 195 196 template <typename T> 197 struct PrintListV { 198 PrintListV(const NodeList &L, const DataFlowGraph &G) : List(L), G(G) {} 199 200 using Type = T; 201 const NodeList &List; 202 const DataFlowGraph &G; 203 }; 204 205 template <typename T> 206 raw_ostream &operator<< (raw_ostream &OS, const PrintListV<T> &P) { 207 unsigned N = P.List.size(); 208 for (NodeAddr<T> A : P.List) { 209 OS << PrintNode<T>(A, P.G); 210 if (--N) 211 OS << ", "; 212 } 213 return OS; 214 } 215 216 } // end anonymous namespace 217 218 raw_ostream &operator<< (raw_ostream &OS, const Print<NodeAddr<PhiNode*>> &P) { 219 OS << Print<NodeId>(P.Obj.Id, P.G) << ": phi [" 220 << PrintListV<RefNode*>(P.Obj.Addr->members(P.G), P.G) << ']'; 221 return OS; 222 } 223 224 raw_ostream &operator<<(raw_ostream &OS, const Print<NodeAddr<StmtNode *>> &P) { 225 const MachineInstr &MI = *P.Obj.Addr->getCode(); 226 unsigned Opc = MI.getOpcode(); 227 OS << Print<NodeId>(P.Obj.Id, P.G) << ": " << P.G.getTII().getName(Opc); 228 // Print the target for calls and branches (for readability). 229 if (MI.isCall() || MI.isBranch()) { 230 MachineInstr::const_mop_iterator T = 231 llvm::find_if(MI.operands(), 232 [] (const MachineOperand &Op) -> bool { 233 return Op.isMBB() || Op.isGlobal() || Op.isSymbol(); 234 }); 235 if (T != MI.operands_end()) { 236 OS << ' '; 237 if (T->isMBB()) 238 OS << printMBBReference(*T->getMBB()); 239 else if (T->isGlobal()) 240 OS << T->getGlobal()->getName(); 241 else if (T->isSymbol()) 242 OS << T->getSymbolName(); 243 } 244 } 245 OS << " [" << PrintListV<RefNode*>(P.Obj.Addr->members(P.G), P.G) << ']'; 246 return OS; 247 } 248 249 raw_ostream &operator<< (raw_ostream &OS, 250 const Print<NodeAddr<InstrNode*>> &P) { 251 switch (P.Obj.Addr->getKind()) { 252 case NodeAttrs::Phi: 253 OS << PrintNode<PhiNode*>(P.Obj, P.G); 254 break; 255 case NodeAttrs::Stmt: 256 OS << PrintNode<StmtNode*>(P.Obj, P.G); 257 break; 258 default: 259 OS << "instr? " << Print<NodeId>(P.Obj.Id, P.G); 260 break; 261 } 262 return OS; 263 } 264 265 raw_ostream &operator<< (raw_ostream &OS, 266 const Print<NodeAddr<BlockNode*>> &P) { 267 MachineBasicBlock *BB = P.Obj.Addr->getCode(); 268 unsigned NP = BB->pred_size(); 269 std::vector<int> Ns; 270 auto PrintBBs = [&OS] (std::vector<int> Ns) -> void { 271 unsigned N = Ns.size(); 272 for (int I : Ns) { 273 OS << "%bb." << I; 274 if (--N) 275 OS << ", "; 276 } 277 }; 278 279 OS << Print<NodeId>(P.Obj.Id, P.G) << ": --- " << printMBBReference(*BB) 280 << " --- preds(" << NP << "): "; 281 for (MachineBasicBlock *B : BB->predecessors()) 282 Ns.push_back(B->getNumber()); 283 PrintBBs(Ns); 284 285 unsigned NS = BB->succ_size(); 286 OS << " succs(" << NS << "): "; 287 Ns.clear(); 288 for (MachineBasicBlock *B : BB->successors()) 289 Ns.push_back(B->getNumber()); 290 PrintBBs(Ns); 291 OS << '\n'; 292 293 for (auto I : P.Obj.Addr->members(P.G)) 294 OS << PrintNode<InstrNode*>(I, P.G) << '\n'; 295 return OS; 296 } 297 298 raw_ostream &operator<<(raw_ostream &OS, const Print<NodeAddr<FuncNode *>> &P) { 299 OS << "DFG dump:[\n" << Print<NodeId>(P.Obj.Id, P.G) << ": Function: " 300 << P.Obj.Addr->getCode()->getName() << '\n'; 301 for (auto I : P.Obj.Addr->members(P.G)) 302 OS << PrintNode<BlockNode*>(I, P.G) << '\n'; 303 OS << "]\n"; 304 return OS; 305 } 306 307 raw_ostream &operator<< (raw_ostream &OS, const Print<RegisterSet> &P) { 308 OS << '{'; 309 for (auto I : P.Obj) 310 OS << ' ' << Print<RegisterRef>(I, P.G); 311 OS << " }"; 312 return OS; 313 } 314 315 raw_ostream &operator<< (raw_ostream &OS, const Print<RegisterAggr> &P) { 316 P.Obj.print(OS); 317 return OS; 318 } 319 320 raw_ostream &operator<< (raw_ostream &OS, 321 const Print<DataFlowGraph::DefStack> &P) { 322 for (auto I = P.Obj.top(), E = P.Obj.bottom(); I != E; ) { 323 OS << Print<NodeId>(I->Id, P.G) 324 << '<' << Print<RegisterRef>(I->Addr->getRegRef(P.G), P.G) << '>'; 325 I.down(); 326 if (I != E) 327 OS << ' '; 328 } 329 return OS; 330 } 331 332 } // end namespace rdf 333 } // end namespace llvm 334 335 // Node allocation functions. 336 // 337 // Node allocator is like a slab memory allocator: it allocates blocks of 338 // memory in sizes that are multiples of the size of a node. Each block has 339 // the same size. Nodes are allocated from the currently active block, and 340 // when it becomes full, a new one is created. 341 // There is a mapping scheme between node id and its location in a block, 342 // and within that block is described in the header file. 343 // 344 void NodeAllocator::startNewBlock() { 345 void *T = MemPool.Allocate(NodesPerBlock*NodeMemSize, NodeMemSize); 346 char *P = static_cast<char*>(T); 347 Blocks.push_back(P); 348 // Check if the block index is still within the allowed range, i.e. less 349 // than 2^N, where N is the number of bits in NodeId for the block index. 350 // BitsPerIndex is the number of bits per node index. 351 assert((Blocks.size() < ((size_t)1 << (8*sizeof(NodeId)-BitsPerIndex))) && 352 "Out of bits for block index"); 353 ActiveEnd = P; 354 } 355 356 bool NodeAllocator::needNewBlock() { 357 if (Blocks.empty()) 358 return true; 359 360 char *ActiveBegin = Blocks.back(); 361 uint32_t Index = (ActiveEnd-ActiveBegin)/NodeMemSize; 362 return Index >= NodesPerBlock; 363 } 364 365 NodeAddr<NodeBase*> NodeAllocator::New() { 366 if (needNewBlock()) 367 startNewBlock(); 368 369 uint32_t ActiveB = Blocks.size()-1; 370 uint32_t Index = (ActiveEnd - Blocks[ActiveB])/NodeMemSize; 371 NodeAddr<NodeBase*> NA = { reinterpret_cast<NodeBase*>(ActiveEnd), 372 makeId(ActiveB, Index) }; 373 ActiveEnd += NodeMemSize; 374 return NA; 375 } 376 377 NodeId NodeAllocator::id(const NodeBase *P) const { 378 uintptr_t A = reinterpret_cast<uintptr_t>(P); 379 for (unsigned i = 0, n = Blocks.size(); i != n; ++i) { 380 uintptr_t B = reinterpret_cast<uintptr_t>(Blocks[i]); 381 if (A < B || A >= B + NodesPerBlock*NodeMemSize) 382 continue; 383 uint32_t Idx = (A-B)/NodeMemSize; 384 return makeId(i, Idx); 385 } 386 llvm_unreachable("Invalid node address"); 387 } 388 389 void NodeAllocator::clear() { 390 MemPool.Reset(); 391 Blocks.clear(); 392 ActiveEnd = nullptr; 393 } 394 395 // Insert node NA after "this" in the circular chain. 396 void NodeBase::append(NodeAddr<NodeBase*> NA) { 397 NodeId Nx = Next; 398 // If NA is already "next", do nothing. 399 if (Next != NA.Id) { 400 Next = NA.Id; 401 NA.Addr->Next = Nx; 402 } 403 } 404 405 // Fundamental node manipulator functions. 406 407 // Obtain the register reference from a reference node. 408 RegisterRef RefNode::getRegRef(const DataFlowGraph &G) const { 409 assert(NodeAttrs::type(Attrs) == NodeAttrs::Ref); 410 if (NodeAttrs::flags(Attrs) & NodeAttrs::PhiRef) 411 return G.unpack(Ref.PR); 412 assert(Ref.Op != nullptr); 413 return G.makeRegRef(*Ref.Op); 414 } 415 416 // Set the register reference in the reference node directly (for references 417 // in phi nodes). 418 void RefNode::setRegRef(RegisterRef RR, DataFlowGraph &G) { 419 assert(NodeAttrs::type(Attrs) == NodeAttrs::Ref); 420 assert(NodeAttrs::flags(Attrs) & NodeAttrs::PhiRef); 421 Ref.PR = G.pack(RR); 422 } 423 424 // Set the register reference in the reference node based on a machine 425 // operand (for references in statement nodes). 426 void RefNode::setRegRef(MachineOperand *Op, DataFlowGraph &G) { 427 assert(NodeAttrs::type(Attrs) == NodeAttrs::Ref); 428 assert(!(NodeAttrs::flags(Attrs) & NodeAttrs::PhiRef)); 429 (void)G; 430 Ref.Op = Op; 431 } 432 433 // Get the owner of a given reference node. 434 NodeAddr<NodeBase*> RefNode::getOwner(const DataFlowGraph &G) { 435 NodeAddr<NodeBase*> NA = G.addr<NodeBase*>(getNext()); 436 437 while (NA.Addr != this) { 438 if (NA.Addr->getType() == NodeAttrs::Code) 439 return NA; 440 NA = G.addr<NodeBase*>(NA.Addr->getNext()); 441 } 442 llvm_unreachable("No owner in circular list"); 443 } 444 445 // Connect the def node to the reaching def node. 446 void DefNode::linkToDef(NodeId Self, NodeAddr<DefNode*> DA) { 447 Ref.RD = DA.Id; 448 Ref.Sib = DA.Addr->getReachedDef(); 449 DA.Addr->setReachedDef(Self); 450 } 451 452 // Connect the use node to the reaching def node. 453 void UseNode::linkToDef(NodeId Self, NodeAddr<DefNode*> DA) { 454 Ref.RD = DA.Id; 455 Ref.Sib = DA.Addr->getReachedUse(); 456 DA.Addr->setReachedUse(Self); 457 } 458 459 // Get the first member of the code node. 460 NodeAddr<NodeBase*> CodeNode::getFirstMember(const DataFlowGraph &G) const { 461 if (Code.FirstM == 0) 462 return NodeAddr<NodeBase*>(); 463 return G.addr<NodeBase*>(Code.FirstM); 464 } 465 466 // Get the last member of the code node. 467 NodeAddr<NodeBase*> CodeNode::getLastMember(const DataFlowGraph &G) const { 468 if (Code.LastM == 0) 469 return NodeAddr<NodeBase*>(); 470 return G.addr<NodeBase*>(Code.LastM); 471 } 472 473 // Add node NA at the end of the member list of the given code node. 474 void CodeNode::addMember(NodeAddr<NodeBase*> NA, const DataFlowGraph &G) { 475 NodeAddr<NodeBase*> ML = getLastMember(G); 476 if (ML.Id != 0) { 477 ML.Addr->append(NA); 478 } else { 479 Code.FirstM = NA.Id; 480 NodeId Self = G.id(this); 481 NA.Addr->setNext(Self); 482 } 483 Code.LastM = NA.Id; 484 } 485 486 // Add node NA after member node MA in the given code node. 487 void CodeNode::addMemberAfter(NodeAddr<NodeBase*> MA, NodeAddr<NodeBase*> NA, 488 const DataFlowGraph &G) { 489 MA.Addr->append(NA); 490 if (Code.LastM == MA.Id) 491 Code.LastM = NA.Id; 492 } 493 494 // Remove member node NA from the given code node. 495 void CodeNode::removeMember(NodeAddr<NodeBase*> NA, const DataFlowGraph &G) { 496 NodeAddr<NodeBase*> MA = getFirstMember(G); 497 assert(MA.Id != 0); 498 499 // Special handling if the member to remove is the first member. 500 if (MA.Id == NA.Id) { 501 if (Code.LastM == MA.Id) { 502 // If it is the only member, set both first and last to 0. 503 Code.FirstM = Code.LastM = 0; 504 } else { 505 // Otherwise, advance the first member. 506 Code.FirstM = MA.Addr->getNext(); 507 } 508 return; 509 } 510 511 while (MA.Addr != this) { 512 NodeId MX = MA.Addr->getNext(); 513 if (MX == NA.Id) { 514 MA.Addr->setNext(NA.Addr->getNext()); 515 // If the member to remove happens to be the last one, update the 516 // LastM indicator. 517 if (Code.LastM == NA.Id) 518 Code.LastM = MA.Id; 519 return; 520 } 521 MA = G.addr<NodeBase*>(MX); 522 } 523 llvm_unreachable("No such member"); 524 } 525 526 // Return the list of all members of the code node. 527 NodeList CodeNode::members(const DataFlowGraph &G) const { 528 static auto True = [] (NodeAddr<NodeBase*>) -> bool { return true; }; 529 return members_if(True, G); 530 } 531 532 // Return the owner of the given instr node. 533 NodeAddr<NodeBase*> InstrNode::getOwner(const DataFlowGraph &G) { 534 NodeAddr<NodeBase*> NA = G.addr<NodeBase*>(getNext()); 535 536 while (NA.Addr != this) { 537 assert(NA.Addr->getType() == NodeAttrs::Code); 538 if (NA.Addr->getKind() == NodeAttrs::Block) 539 return NA; 540 NA = G.addr<NodeBase*>(NA.Addr->getNext()); 541 } 542 llvm_unreachable("No owner in circular list"); 543 } 544 545 // Add the phi node PA to the given block node. 546 void BlockNode::addPhi(NodeAddr<PhiNode*> PA, const DataFlowGraph &G) { 547 NodeAddr<NodeBase*> M = getFirstMember(G); 548 if (M.Id == 0) { 549 addMember(PA, G); 550 return; 551 } 552 553 assert(M.Addr->getType() == NodeAttrs::Code); 554 if (M.Addr->getKind() == NodeAttrs::Stmt) { 555 // If the first member of the block is a statement, insert the phi as 556 // the first member. 557 Code.FirstM = PA.Id; 558 PA.Addr->setNext(M.Id); 559 } else { 560 // If the first member is a phi, find the last phi, and append PA to it. 561 assert(M.Addr->getKind() == NodeAttrs::Phi); 562 NodeAddr<NodeBase*> MN = M; 563 do { 564 M = MN; 565 MN = G.addr<NodeBase*>(M.Addr->getNext()); 566 assert(MN.Addr->getType() == NodeAttrs::Code); 567 } while (MN.Addr->getKind() == NodeAttrs::Phi); 568 569 // M is the last phi. 570 addMemberAfter(M, PA, G); 571 } 572 } 573 574 // Find the block node corresponding to the machine basic block BB in the 575 // given func node. 576 NodeAddr<BlockNode*> FuncNode::findBlock(const MachineBasicBlock *BB, 577 const DataFlowGraph &G) const { 578 auto EqBB = [BB] (NodeAddr<NodeBase*> NA) -> bool { 579 return NodeAddr<BlockNode*>(NA).Addr->getCode() == BB; 580 }; 581 NodeList Ms = members_if(EqBB, G); 582 if (!Ms.empty()) 583 return Ms[0]; 584 return NodeAddr<BlockNode*>(); 585 } 586 587 // Get the block node for the entry block in the given function. 588 NodeAddr<BlockNode*> FuncNode::getEntryBlock(const DataFlowGraph &G) { 589 MachineBasicBlock *EntryB = &getCode()->front(); 590 return findBlock(EntryB, G); 591 } 592 593 // Target operand information. 594 // 595 596 // For a given instruction, check if there are any bits of RR that can remain 597 // unchanged across this def. 598 bool TargetOperandInfo::isPreserving(const MachineInstr &In, unsigned OpNum) 599 const { 600 return TII.isPredicated(In); 601 } 602 603 // Check if the definition of RR produces an unspecified value. 604 bool TargetOperandInfo::isClobbering(const MachineInstr &In, unsigned OpNum) 605 const { 606 const MachineOperand &Op = In.getOperand(OpNum); 607 if (Op.isRegMask()) 608 return true; 609 assert(Op.isReg()); 610 if (In.isCall()) 611 if (Op.isDef() && Op.isDead()) 612 return true; 613 return false; 614 } 615 616 // Check if the given instruction specifically requires 617 bool TargetOperandInfo::isFixedReg(const MachineInstr &In, unsigned OpNum) 618 const { 619 if (In.isCall() || In.isReturn() || In.isInlineAsm()) 620 return true; 621 // Check for a tail call. 622 if (In.isBranch()) 623 for (const MachineOperand &O : In.operands()) 624 if (O.isGlobal() || O.isSymbol()) 625 return true; 626 627 const MCInstrDesc &D = In.getDesc(); 628 if (!D.getImplicitDefs() && !D.getImplicitUses()) 629 return false; 630 const MachineOperand &Op = In.getOperand(OpNum); 631 // If there is a sub-register, treat the operand as non-fixed. Currently, 632 // fixed registers are those that are listed in the descriptor as implicit 633 // uses or defs, and those lists do not allow sub-registers. 634 if (Op.getSubReg() != 0) 635 return false; 636 Register Reg = Op.getReg(); 637 const MCPhysReg *ImpR = Op.isDef() ? D.getImplicitDefs() 638 : D.getImplicitUses(); 639 if (!ImpR) 640 return false; 641 while (*ImpR) 642 if (*ImpR++ == Reg) 643 return true; 644 return false; 645 } 646 647 // 648 // The data flow graph construction. 649 // 650 651 DataFlowGraph::DataFlowGraph(MachineFunction &mf, const TargetInstrInfo &tii, 652 const TargetRegisterInfo &tri, const MachineDominatorTree &mdt, 653 const MachineDominanceFrontier &mdf, const TargetOperandInfo &toi) 654 : MF(mf), TII(tii), TRI(tri), PRI(tri, mf), MDT(mdt), MDF(mdf), TOI(toi), 655 LiveIns(PRI) { 656 } 657 658 // The implementation of the definition stack. 659 // Each register reference has its own definition stack. In particular, 660 // for a register references "Reg" and "Reg:subreg" will each have their 661 // own definition stacks. 662 663 // Construct a stack iterator. 664 DataFlowGraph::DefStack::Iterator::Iterator(const DataFlowGraph::DefStack &S, 665 bool Top) : DS(S) { 666 if (!Top) { 667 // Initialize to bottom. 668 Pos = 0; 669 return; 670 } 671 // Initialize to the top, i.e. top-most non-delimiter (or 0, if empty). 672 Pos = DS.Stack.size(); 673 while (Pos > 0 && DS.isDelimiter(DS.Stack[Pos-1])) 674 Pos--; 675 } 676 677 // Return the size of the stack, including block delimiters. 678 unsigned DataFlowGraph::DefStack::size() const { 679 unsigned S = 0; 680 for (auto I = top(), E = bottom(); I != E; I.down()) 681 S++; 682 return S; 683 } 684 685 // Remove the top entry from the stack. Remove all intervening delimiters 686 // so that after this, the stack is either empty, or the top of the stack 687 // is a non-delimiter. 688 void DataFlowGraph::DefStack::pop() { 689 assert(!empty()); 690 unsigned P = nextDown(Stack.size()); 691 Stack.resize(P); 692 } 693 694 // Push a delimiter for block node N on the stack. 695 void DataFlowGraph::DefStack::start_block(NodeId N) { 696 assert(N != 0); 697 Stack.push_back(NodeAddr<DefNode*>(nullptr, N)); 698 } 699 700 // Remove all nodes from the top of the stack, until the delimited for 701 // block node N is encountered. Remove the delimiter as well. In effect, 702 // this will remove from the stack all definitions from block N. 703 void DataFlowGraph::DefStack::clear_block(NodeId N) { 704 assert(N != 0); 705 unsigned P = Stack.size(); 706 while (P > 0) { 707 bool Found = isDelimiter(Stack[P-1], N); 708 P--; 709 if (Found) 710 break; 711 } 712 // This will also remove the delimiter, if found. 713 Stack.resize(P); 714 } 715 716 // Move the stack iterator up by one. 717 unsigned DataFlowGraph::DefStack::nextUp(unsigned P) const { 718 // Get the next valid position after P (skipping all delimiters). 719 // The input position P does not have to point to a non-delimiter. 720 unsigned SS = Stack.size(); 721 bool IsDelim; 722 assert(P < SS); 723 do { 724 P++; 725 IsDelim = isDelimiter(Stack[P-1]); 726 } while (P < SS && IsDelim); 727 assert(!IsDelim); 728 return P; 729 } 730 731 // Move the stack iterator down by one. 732 unsigned DataFlowGraph::DefStack::nextDown(unsigned P) const { 733 // Get the preceding valid position before P (skipping all delimiters). 734 // The input position P does not have to point to a non-delimiter. 735 assert(P > 0 && P <= Stack.size()); 736 bool IsDelim = isDelimiter(Stack[P-1]); 737 do { 738 if (--P == 0) 739 break; 740 IsDelim = isDelimiter(Stack[P-1]); 741 } while (P > 0 && IsDelim); 742 assert(!IsDelim); 743 return P; 744 } 745 746 // Register information. 747 748 RegisterSet DataFlowGraph::getLandingPadLiveIns() const { 749 RegisterSet LR; 750 const Function &F = MF.getFunction(); 751 const Constant *PF = F.hasPersonalityFn() ? F.getPersonalityFn() 752 : nullptr; 753 const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering(); 754 if (RegisterId R = TLI.getExceptionPointerRegister(PF)) 755 LR.insert(RegisterRef(R)); 756 if (!isFuncletEHPersonality(classifyEHPersonality(PF))) { 757 if (RegisterId R = TLI.getExceptionSelectorRegister(PF)) 758 LR.insert(RegisterRef(R)); 759 } 760 return LR; 761 } 762 763 // Node management functions. 764 765 // Get the pointer to the node with the id N. 766 NodeBase *DataFlowGraph::ptr(NodeId N) const { 767 if (N == 0) 768 return nullptr; 769 return Memory.ptr(N); 770 } 771 772 // Get the id of the node at the address P. 773 NodeId DataFlowGraph::id(const NodeBase *P) const { 774 if (P == nullptr) 775 return 0; 776 return Memory.id(P); 777 } 778 779 // Allocate a new node and set the attributes to Attrs. 780 NodeAddr<NodeBase*> DataFlowGraph::newNode(uint16_t Attrs) { 781 NodeAddr<NodeBase*> P = Memory.New(); 782 P.Addr->init(); 783 P.Addr->setAttrs(Attrs); 784 return P; 785 } 786 787 // Make a copy of the given node B, except for the data-flow links, which 788 // are set to 0. 789 NodeAddr<NodeBase*> DataFlowGraph::cloneNode(const NodeAddr<NodeBase*> B) { 790 NodeAddr<NodeBase*> NA = newNode(0); 791 memcpy(NA.Addr, B.Addr, sizeof(NodeBase)); 792 // Ref nodes need to have the data-flow links reset. 793 if (NA.Addr->getType() == NodeAttrs::Ref) { 794 NodeAddr<RefNode*> RA = NA; 795 RA.Addr->setReachingDef(0); 796 RA.Addr->setSibling(0); 797 if (NA.Addr->getKind() == NodeAttrs::Def) { 798 NodeAddr<DefNode*> DA = NA; 799 DA.Addr->setReachedDef(0); 800 DA.Addr->setReachedUse(0); 801 } 802 } 803 return NA; 804 } 805 806 // Allocation routines for specific node types/kinds. 807 808 NodeAddr<UseNode*> DataFlowGraph::newUse(NodeAddr<InstrNode*> Owner, 809 MachineOperand &Op, uint16_t Flags) { 810 NodeAddr<UseNode*> UA = newNode(NodeAttrs::Ref | NodeAttrs::Use | Flags); 811 UA.Addr->setRegRef(&Op, *this); 812 return UA; 813 } 814 815 NodeAddr<PhiUseNode*> DataFlowGraph::newPhiUse(NodeAddr<PhiNode*> Owner, 816 RegisterRef RR, NodeAddr<BlockNode*> PredB, uint16_t Flags) { 817 NodeAddr<PhiUseNode*> PUA = newNode(NodeAttrs::Ref | NodeAttrs::Use | Flags); 818 assert(Flags & NodeAttrs::PhiRef); 819 PUA.Addr->setRegRef(RR, *this); 820 PUA.Addr->setPredecessor(PredB.Id); 821 return PUA; 822 } 823 824 NodeAddr<DefNode*> DataFlowGraph::newDef(NodeAddr<InstrNode*> Owner, 825 MachineOperand &Op, uint16_t Flags) { 826 NodeAddr<DefNode*> DA = newNode(NodeAttrs::Ref | NodeAttrs::Def | Flags); 827 DA.Addr->setRegRef(&Op, *this); 828 return DA; 829 } 830 831 NodeAddr<DefNode*> DataFlowGraph::newDef(NodeAddr<InstrNode*> Owner, 832 RegisterRef RR, uint16_t Flags) { 833 NodeAddr<DefNode*> DA = newNode(NodeAttrs::Ref | NodeAttrs::Def | Flags); 834 assert(Flags & NodeAttrs::PhiRef); 835 DA.Addr->setRegRef(RR, *this); 836 return DA; 837 } 838 839 NodeAddr<PhiNode*> DataFlowGraph::newPhi(NodeAddr<BlockNode*> Owner) { 840 NodeAddr<PhiNode*> PA = newNode(NodeAttrs::Code | NodeAttrs::Phi); 841 Owner.Addr->addPhi(PA, *this); 842 return PA; 843 } 844 845 NodeAddr<StmtNode*> DataFlowGraph::newStmt(NodeAddr<BlockNode*> Owner, 846 MachineInstr *MI) { 847 NodeAddr<StmtNode*> SA = newNode(NodeAttrs::Code | NodeAttrs::Stmt); 848 SA.Addr->setCode(MI); 849 Owner.Addr->addMember(SA, *this); 850 return SA; 851 } 852 853 NodeAddr<BlockNode*> DataFlowGraph::newBlock(NodeAddr<FuncNode*> Owner, 854 MachineBasicBlock *BB) { 855 NodeAddr<BlockNode*> BA = newNode(NodeAttrs::Code | NodeAttrs::Block); 856 BA.Addr->setCode(BB); 857 Owner.Addr->addMember(BA, *this); 858 return BA; 859 } 860 861 NodeAddr<FuncNode*> DataFlowGraph::newFunc(MachineFunction *MF) { 862 NodeAddr<FuncNode*> FA = newNode(NodeAttrs::Code | NodeAttrs::Func); 863 FA.Addr->setCode(MF); 864 return FA; 865 } 866 867 // Build the data flow graph. 868 void DataFlowGraph::build(unsigned Options) { 869 reset(); 870 Func = newFunc(&MF); 871 872 if (MF.empty()) 873 return; 874 875 for (MachineBasicBlock &B : MF) { 876 NodeAddr<BlockNode*> BA = newBlock(Func, &B); 877 BlockNodes.insert(std::make_pair(&B, BA)); 878 for (MachineInstr &I : B) { 879 if (I.isDebugInstr()) 880 continue; 881 buildStmt(BA, I); 882 } 883 } 884 885 NodeAddr<BlockNode*> EA = Func.Addr->getEntryBlock(*this); 886 NodeList Blocks = Func.Addr->members(*this); 887 888 // Collect information about block references. 889 RegisterSet AllRefs; 890 for (NodeAddr<BlockNode*> BA : Blocks) 891 for (NodeAddr<InstrNode*> IA : BA.Addr->members(*this)) 892 for (NodeAddr<RefNode*> RA : IA.Addr->members(*this)) 893 AllRefs.insert(RA.Addr->getRegRef(*this)); 894 895 // Collect function live-ins and entry block live-ins. 896 MachineRegisterInfo &MRI = MF.getRegInfo(); 897 MachineBasicBlock &EntryB = *EA.Addr->getCode(); 898 assert(EntryB.pred_empty() && "Function entry block has predecessors"); 899 for (std::pair<unsigned,unsigned> P : MRI.liveins()) 900 LiveIns.insert(RegisterRef(P.first)); 901 if (MRI.tracksLiveness()) { 902 for (auto I : EntryB.liveins()) 903 LiveIns.insert(RegisterRef(I.PhysReg, I.LaneMask)); 904 } 905 906 // Add function-entry phi nodes for the live-in registers. 907 //for (std::pair<RegisterId,LaneBitmask> P : LiveIns) { 908 for (auto I = LiveIns.rr_begin(), E = LiveIns.rr_end(); I != E; ++I) { 909 RegisterRef RR = *I; 910 NodeAddr<PhiNode*> PA = newPhi(EA); 911 uint16_t PhiFlags = NodeAttrs::PhiRef | NodeAttrs::Preserving; 912 NodeAddr<DefNode*> DA = newDef(PA, RR, PhiFlags); 913 PA.Addr->addMember(DA, *this); 914 } 915 916 // Add phis for landing pads. 917 // Landing pads, unlike usual backs blocks, are not entered through 918 // branches in the program, or fall-throughs from other blocks. They 919 // are entered from the exception handling runtime and target's ABI 920 // may define certain registers as defined on entry to such a block. 921 RegisterSet EHRegs = getLandingPadLiveIns(); 922 if (!EHRegs.empty()) { 923 for (NodeAddr<BlockNode*> BA : Blocks) { 924 const MachineBasicBlock &B = *BA.Addr->getCode(); 925 if (!B.isEHPad()) 926 continue; 927 928 // Prepare a list of NodeIds of the block's predecessors. 929 NodeList Preds; 930 for (MachineBasicBlock *PB : B.predecessors()) 931 Preds.push_back(findBlock(PB)); 932 933 // Build phi nodes for each live-in. 934 for (RegisterRef RR : EHRegs) { 935 NodeAddr<PhiNode*> PA = newPhi(BA); 936 uint16_t PhiFlags = NodeAttrs::PhiRef | NodeAttrs::Preserving; 937 // Add def: 938 NodeAddr<DefNode*> DA = newDef(PA, RR, PhiFlags); 939 PA.Addr->addMember(DA, *this); 940 // Add uses (no reaching defs for phi uses): 941 for (NodeAddr<BlockNode*> PBA : Preds) { 942 NodeAddr<PhiUseNode*> PUA = newPhiUse(PA, RR, PBA); 943 PA.Addr->addMember(PUA, *this); 944 } 945 } 946 } 947 } 948 949 // Build a map "PhiM" which will contain, for each block, the set 950 // of references that will require phi definitions in that block. 951 BlockRefsMap PhiM; 952 for (NodeAddr<BlockNode*> BA : Blocks) 953 recordDefsForDF(PhiM, BA); 954 for (NodeAddr<BlockNode*> BA : Blocks) 955 buildPhis(PhiM, AllRefs, BA); 956 957 // Link all the refs. This will recursively traverse the dominator tree. 958 DefStackMap DM; 959 linkBlockRefs(DM, EA); 960 961 // Finally, remove all unused phi nodes. 962 if (!(Options & BuildOptions::KeepDeadPhis)) 963 removeUnusedPhis(); 964 } 965 966 RegisterRef DataFlowGraph::makeRegRef(unsigned Reg, unsigned Sub) const { 967 assert(PhysicalRegisterInfo::isRegMaskId(Reg) || 968 Register::isPhysicalRegister(Reg)); 969 assert(Reg != 0); 970 if (Sub != 0) 971 Reg = TRI.getSubReg(Reg, Sub); 972 return RegisterRef(Reg); 973 } 974 975 RegisterRef DataFlowGraph::makeRegRef(const MachineOperand &Op) const { 976 assert(Op.isReg() || Op.isRegMask()); 977 if (Op.isReg()) 978 return makeRegRef(Op.getReg(), Op.getSubReg()); 979 return RegisterRef(PRI.getRegMaskId(Op.getRegMask()), LaneBitmask::getAll()); 980 } 981 982 RegisterRef DataFlowGraph::restrictRef(RegisterRef AR, RegisterRef BR) const { 983 if (AR.Reg == BR.Reg) { 984 LaneBitmask M = AR.Mask & BR.Mask; 985 return M.any() ? RegisterRef(AR.Reg, M) : RegisterRef(); 986 } 987 // This isn't strictly correct, because the overlap may happen in the 988 // part masked out. 989 if (PRI.alias(AR, BR)) 990 return AR; 991 return RegisterRef(); 992 } 993 994 // For each stack in the map DefM, push the delimiter for block B on it. 995 void DataFlowGraph::markBlock(NodeId B, DefStackMap &DefM) { 996 // Push block delimiters. 997 for (auto &P : DefM) 998 P.second.start_block(B); 999 } 1000 1001 // Remove all definitions coming from block B from each stack in DefM. 1002 void DataFlowGraph::releaseBlock(NodeId B, DefStackMap &DefM) { 1003 // Pop all defs from this block from the definition stack. Defs that were 1004 // added to the map during the traversal of instructions will not have a 1005 // delimiter, but for those, the whole stack will be emptied. 1006 for (auto &P : DefM) 1007 P.second.clear_block(B); 1008 1009 // Finally, remove empty stacks from the map. 1010 for (auto I = DefM.begin(), E = DefM.end(), NextI = I; I != E; I = NextI) { 1011 NextI = std::next(I); 1012 // This preserves the validity of iterators other than I. 1013 if (I->second.empty()) 1014 DefM.erase(I); 1015 } 1016 } 1017 1018 // Push all definitions from the instruction node IA to an appropriate 1019 // stack in DefM. 1020 void DataFlowGraph::pushAllDefs(NodeAddr<InstrNode*> IA, DefStackMap &DefM) { 1021 pushClobbers(IA, DefM); 1022 pushDefs(IA, DefM); 1023 } 1024 1025 // Push all definitions from the instruction node IA to an appropriate 1026 // stack in DefM. 1027 void DataFlowGraph::pushClobbers(NodeAddr<InstrNode*> IA, DefStackMap &DefM) { 1028 NodeSet Visited; 1029 std::set<RegisterId> Defined; 1030 1031 // The important objectives of this function are: 1032 // - to be able to handle instructions both while the graph is being 1033 // constructed, and after the graph has been constructed, and 1034 // - maintain proper ordering of definitions on the stack for each 1035 // register reference: 1036 // - if there are two or more related defs in IA (i.e. coming from 1037 // the same machine operand), then only push one def on the stack, 1038 // - if there are multiple unrelated defs of non-overlapping 1039 // subregisters of S, then the stack for S will have both (in an 1040 // unspecified order), but the order does not matter from the data- 1041 // -flow perspective. 1042 1043 for (NodeAddr<DefNode*> DA : IA.Addr->members_if(IsDef, *this)) { 1044 if (Visited.count(DA.Id)) 1045 continue; 1046 if (!(DA.Addr->getFlags() & NodeAttrs::Clobbering)) 1047 continue; 1048 1049 NodeList Rel = getRelatedRefs(IA, DA); 1050 NodeAddr<DefNode*> PDA = Rel.front(); 1051 RegisterRef RR = PDA.Addr->getRegRef(*this); 1052 1053 // Push the definition on the stack for the register and all aliases. 1054 // The def stack traversal in linkNodeUp will check the exact aliasing. 1055 DefM[RR.Reg].push(DA); 1056 Defined.insert(RR.Reg); 1057 for (RegisterId A : PRI.getAliasSet(RR.Reg)) { 1058 // Check that we don't push the same def twice. 1059 assert(A != RR.Reg); 1060 if (!Defined.count(A)) 1061 DefM[A].push(DA); 1062 } 1063 // Mark all the related defs as visited. 1064 for (NodeAddr<NodeBase*> T : Rel) 1065 Visited.insert(T.Id); 1066 } 1067 } 1068 1069 // Push all definitions from the instruction node IA to an appropriate 1070 // stack in DefM. 1071 void DataFlowGraph::pushDefs(NodeAddr<InstrNode*> IA, DefStackMap &DefM) { 1072 NodeSet Visited; 1073 #ifndef NDEBUG 1074 std::set<RegisterId> Defined; 1075 #endif 1076 1077 // The important objectives of this function are: 1078 // - to be able to handle instructions both while the graph is being 1079 // constructed, and after the graph has been constructed, and 1080 // - maintain proper ordering of definitions on the stack for each 1081 // register reference: 1082 // - if there are two or more related defs in IA (i.e. coming from 1083 // the same machine operand), then only push one def on the stack, 1084 // - if there are multiple unrelated defs of non-overlapping 1085 // subregisters of S, then the stack for S will have both (in an 1086 // unspecified order), but the order does not matter from the data- 1087 // -flow perspective. 1088 1089 for (NodeAddr<DefNode*> DA : IA.Addr->members_if(IsDef, *this)) { 1090 if (Visited.count(DA.Id)) 1091 continue; 1092 if (DA.Addr->getFlags() & NodeAttrs::Clobbering) 1093 continue; 1094 1095 NodeList Rel = getRelatedRefs(IA, DA); 1096 NodeAddr<DefNode*> PDA = Rel.front(); 1097 RegisterRef RR = PDA.Addr->getRegRef(*this); 1098 #ifndef NDEBUG 1099 // Assert if the register is defined in two or more unrelated defs. 1100 // This could happen if there are two or more def operands defining it. 1101 if (!Defined.insert(RR.Reg).second) { 1102 MachineInstr *MI = NodeAddr<StmtNode*>(IA).Addr->getCode(); 1103 dbgs() << "Multiple definitions of register: " 1104 << Print<RegisterRef>(RR, *this) << " in\n " << *MI << "in " 1105 << printMBBReference(*MI->getParent()) << '\n'; 1106 llvm_unreachable(nullptr); 1107 } 1108 #endif 1109 // Push the definition on the stack for the register and all aliases. 1110 // The def stack traversal in linkNodeUp will check the exact aliasing. 1111 DefM[RR.Reg].push(DA); 1112 for (RegisterId A : PRI.getAliasSet(RR.Reg)) { 1113 // Check that we don't push the same def twice. 1114 assert(A != RR.Reg); 1115 DefM[A].push(DA); 1116 } 1117 // Mark all the related defs as visited. 1118 for (NodeAddr<NodeBase*> T : Rel) 1119 Visited.insert(T.Id); 1120 } 1121 } 1122 1123 // Return the list of all reference nodes related to RA, including RA itself. 1124 // See "getNextRelated" for the meaning of a "related reference". 1125 NodeList DataFlowGraph::getRelatedRefs(NodeAddr<InstrNode*> IA, 1126 NodeAddr<RefNode*> RA) const { 1127 assert(IA.Id != 0 && RA.Id != 0); 1128 1129 NodeList Refs; 1130 NodeId Start = RA.Id; 1131 do { 1132 Refs.push_back(RA); 1133 RA = getNextRelated(IA, RA); 1134 } while (RA.Id != 0 && RA.Id != Start); 1135 return Refs; 1136 } 1137 1138 // Clear all information in the graph. 1139 void DataFlowGraph::reset() { 1140 Memory.clear(); 1141 BlockNodes.clear(); 1142 Func = NodeAddr<FuncNode*>(); 1143 } 1144 1145 // Return the next reference node in the instruction node IA that is related 1146 // to RA. Conceptually, two reference nodes are related if they refer to the 1147 // same instance of a register access, but differ in flags or other minor 1148 // characteristics. Specific examples of related nodes are shadow reference 1149 // nodes. 1150 // Return the equivalent of nullptr if there are no more related references. 1151 NodeAddr<RefNode*> DataFlowGraph::getNextRelated(NodeAddr<InstrNode*> IA, 1152 NodeAddr<RefNode*> RA) const { 1153 assert(IA.Id != 0 && RA.Id != 0); 1154 1155 auto Related = [this,RA](NodeAddr<RefNode*> TA) -> bool { 1156 if (TA.Addr->getKind() != RA.Addr->getKind()) 1157 return false; 1158 if (TA.Addr->getRegRef(*this) != RA.Addr->getRegRef(*this)) 1159 return false; 1160 return true; 1161 }; 1162 auto RelatedStmt = [&Related,RA](NodeAddr<RefNode*> TA) -> bool { 1163 return Related(TA) && 1164 &RA.Addr->getOp() == &TA.Addr->getOp(); 1165 }; 1166 auto RelatedPhi = [&Related,RA](NodeAddr<RefNode*> TA) -> bool { 1167 if (!Related(TA)) 1168 return false; 1169 if (TA.Addr->getKind() != NodeAttrs::Use) 1170 return true; 1171 // For phi uses, compare predecessor blocks. 1172 const NodeAddr<const PhiUseNode*> TUA = TA; 1173 const NodeAddr<const PhiUseNode*> RUA = RA; 1174 return TUA.Addr->getPredecessor() == RUA.Addr->getPredecessor(); 1175 }; 1176 1177 RegisterRef RR = RA.Addr->getRegRef(*this); 1178 if (IA.Addr->getKind() == NodeAttrs::Stmt) 1179 return RA.Addr->getNextRef(RR, RelatedStmt, true, *this); 1180 return RA.Addr->getNextRef(RR, RelatedPhi, true, *this); 1181 } 1182 1183 // Find the next node related to RA in IA that satisfies condition P. 1184 // If such a node was found, return a pair where the second element is the 1185 // located node. If such a node does not exist, return a pair where the 1186 // first element is the element after which such a node should be inserted, 1187 // and the second element is a null-address. 1188 template <typename Predicate> 1189 std::pair<NodeAddr<RefNode*>,NodeAddr<RefNode*>> 1190 DataFlowGraph::locateNextRef(NodeAddr<InstrNode*> IA, NodeAddr<RefNode*> RA, 1191 Predicate P) const { 1192 assert(IA.Id != 0 && RA.Id != 0); 1193 1194 NodeAddr<RefNode*> NA; 1195 NodeId Start = RA.Id; 1196 while (true) { 1197 NA = getNextRelated(IA, RA); 1198 if (NA.Id == 0 || NA.Id == Start) 1199 break; 1200 if (P(NA)) 1201 break; 1202 RA = NA; 1203 } 1204 1205 if (NA.Id != 0 && NA.Id != Start) 1206 return std::make_pair(RA, NA); 1207 return std::make_pair(RA, NodeAddr<RefNode*>()); 1208 } 1209 1210 // Get the next shadow node in IA corresponding to RA, and optionally create 1211 // such a node if it does not exist. 1212 NodeAddr<RefNode*> DataFlowGraph::getNextShadow(NodeAddr<InstrNode*> IA, 1213 NodeAddr<RefNode*> RA, bool Create) { 1214 assert(IA.Id != 0 && RA.Id != 0); 1215 1216 uint16_t Flags = RA.Addr->getFlags() | NodeAttrs::Shadow; 1217 auto IsShadow = [Flags] (NodeAddr<RefNode*> TA) -> bool { 1218 return TA.Addr->getFlags() == Flags; 1219 }; 1220 auto Loc = locateNextRef(IA, RA, IsShadow); 1221 if (Loc.second.Id != 0 || !Create) 1222 return Loc.second; 1223 1224 // Create a copy of RA and mark is as shadow. 1225 NodeAddr<RefNode*> NA = cloneNode(RA); 1226 NA.Addr->setFlags(Flags | NodeAttrs::Shadow); 1227 IA.Addr->addMemberAfter(Loc.first, NA, *this); 1228 return NA; 1229 } 1230 1231 // Get the next shadow node in IA corresponding to RA. Return null-address 1232 // if such a node does not exist. 1233 NodeAddr<RefNode*> DataFlowGraph::getNextShadow(NodeAddr<InstrNode*> IA, 1234 NodeAddr<RefNode*> RA) const { 1235 assert(IA.Id != 0 && RA.Id != 0); 1236 uint16_t Flags = RA.Addr->getFlags() | NodeAttrs::Shadow; 1237 auto IsShadow = [Flags] (NodeAddr<RefNode*> TA) -> bool { 1238 return TA.Addr->getFlags() == Flags; 1239 }; 1240 return locateNextRef(IA, RA, IsShadow).second; 1241 } 1242 1243 // Create a new statement node in the block node BA that corresponds to 1244 // the machine instruction MI. 1245 void DataFlowGraph::buildStmt(NodeAddr<BlockNode*> BA, MachineInstr &In) { 1246 NodeAddr<StmtNode*> SA = newStmt(BA, &In); 1247 1248 auto isCall = [] (const MachineInstr &In) -> bool { 1249 if (In.isCall()) 1250 return true; 1251 // Is tail call? 1252 if (In.isBranch()) { 1253 for (const MachineOperand &Op : In.operands()) 1254 if (Op.isGlobal() || Op.isSymbol()) 1255 return true; 1256 // Assume indirect branches are calls. This is for the purpose of 1257 // keeping implicit operands, and so it won't hurt on intra-function 1258 // indirect branches. 1259 if (In.isIndirectBranch()) 1260 return true; 1261 } 1262 return false; 1263 }; 1264 1265 auto isDefUndef = [this] (const MachineInstr &In, RegisterRef DR) -> bool { 1266 // This instruction defines DR. Check if there is a use operand that 1267 // would make DR live on entry to the instruction. 1268 for (const MachineOperand &Op : In.operands()) { 1269 if (!Op.isReg() || Op.getReg() == 0 || !Op.isUse() || Op.isUndef()) 1270 continue; 1271 RegisterRef UR = makeRegRef(Op); 1272 if (PRI.alias(DR, UR)) 1273 return false; 1274 } 1275 return true; 1276 }; 1277 1278 bool IsCall = isCall(In); 1279 unsigned NumOps = In.getNumOperands(); 1280 1281 // Avoid duplicate implicit defs. This will not detect cases of implicit 1282 // defs that define registers that overlap, but it is not clear how to 1283 // interpret that in the absence of explicit defs. Overlapping explicit 1284 // defs are likely illegal already. 1285 BitVector DoneDefs(TRI.getNumRegs()); 1286 // Process explicit defs first. 1287 for (unsigned OpN = 0; OpN < NumOps; ++OpN) { 1288 MachineOperand &Op = In.getOperand(OpN); 1289 if (!Op.isReg() || !Op.isDef() || Op.isImplicit()) 1290 continue; 1291 Register R = Op.getReg(); 1292 if (!R || !Register::isPhysicalRegister(R)) 1293 continue; 1294 uint16_t Flags = NodeAttrs::None; 1295 if (TOI.isPreserving(In, OpN)) { 1296 Flags |= NodeAttrs::Preserving; 1297 // If the def is preserving, check if it is also undefined. 1298 if (isDefUndef(In, makeRegRef(Op))) 1299 Flags |= NodeAttrs::Undef; 1300 } 1301 if (TOI.isClobbering(In, OpN)) 1302 Flags |= NodeAttrs::Clobbering; 1303 if (TOI.isFixedReg(In, OpN)) 1304 Flags |= NodeAttrs::Fixed; 1305 if (IsCall && Op.isDead()) 1306 Flags |= NodeAttrs::Dead; 1307 NodeAddr<DefNode*> DA = newDef(SA, Op, Flags); 1308 SA.Addr->addMember(DA, *this); 1309 assert(!DoneDefs.test(R)); 1310 DoneDefs.set(R); 1311 } 1312 1313 // Process reg-masks (as clobbers). 1314 BitVector DoneClobbers(TRI.getNumRegs()); 1315 for (unsigned OpN = 0; OpN < NumOps; ++OpN) { 1316 MachineOperand &Op = In.getOperand(OpN); 1317 if (!Op.isRegMask()) 1318 continue; 1319 uint16_t Flags = NodeAttrs::Clobbering | NodeAttrs::Fixed | 1320 NodeAttrs::Dead; 1321 NodeAddr<DefNode*> DA = newDef(SA, Op, Flags); 1322 SA.Addr->addMember(DA, *this); 1323 // Record all clobbered registers in DoneDefs. 1324 const uint32_t *RM = Op.getRegMask(); 1325 for (unsigned i = 1, e = TRI.getNumRegs(); i != e; ++i) 1326 if (!(RM[i/32] & (1u << (i%32)))) 1327 DoneClobbers.set(i); 1328 } 1329 1330 // Process implicit defs, skipping those that have already been added 1331 // as explicit. 1332 for (unsigned OpN = 0; OpN < NumOps; ++OpN) { 1333 MachineOperand &Op = In.getOperand(OpN); 1334 if (!Op.isReg() || !Op.isDef() || !Op.isImplicit()) 1335 continue; 1336 Register R = Op.getReg(); 1337 if (!R || !Register::isPhysicalRegister(R) || DoneDefs.test(R)) 1338 continue; 1339 RegisterRef RR = makeRegRef(Op); 1340 uint16_t Flags = NodeAttrs::None; 1341 if (TOI.isPreserving(In, OpN)) { 1342 Flags |= NodeAttrs::Preserving; 1343 // If the def is preserving, check if it is also undefined. 1344 if (isDefUndef(In, RR)) 1345 Flags |= NodeAttrs::Undef; 1346 } 1347 if (TOI.isClobbering(In, OpN)) 1348 Flags |= NodeAttrs::Clobbering; 1349 if (TOI.isFixedReg(In, OpN)) 1350 Flags |= NodeAttrs::Fixed; 1351 if (IsCall && Op.isDead()) { 1352 if (DoneClobbers.test(R)) 1353 continue; 1354 Flags |= NodeAttrs::Dead; 1355 } 1356 NodeAddr<DefNode*> DA = newDef(SA, Op, Flags); 1357 SA.Addr->addMember(DA, *this); 1358 DoneDefs.set(R); 1359 } 1360 1361 for (unsigned OpN = 0; OpN < NumOps; ++OpN) { 1362 MachineOperand &Op = In.getOperand(OpN); 1363 if (!Op.isReg() || !Op.isUse()) 1364 continue; 1365 Register R = Op.getReg(); 1366 if (!R || !Register::isPhysicalRegister(R)) 1367 continue; 1368 uint16_t Flags = NodeAttrs::None; 1369 if (Op.isUndef()) 1370 Flags |= NodeAttrs::Undef; 1371 if (TOI.isFixedReg(In, OpN)) 1372 Flags |= NodeAttrs::Fixed; 1373 NodeAddr<UseNode*> UA = newUse(SA, Op, Flags); 1374 SA.Addr->addMember(UA, *this); 1375 } 1376 } 1377 1378 // Scan all defs in the block node BA and record in PhiM the locations of 1379 // phi nodes corresponding to these defs. 1380 void DataFlowGraph::recordDefsForDF(BlockRefsMap &PhiM, 1381 NodeAddr<BlockNode*> BA) { 1382 // Check all defs from block BA and record them in each block in BA's 1383 // iterated dominance frontier. This information will later be used to 1384 // create phi nodes. 1385 MachineBasicBlock *BB = BA.Addr->getCode(); 1386 assert(BB); 1387 auto DFLoc = MDF.find(BB); 1388 if (DFLoc == MDF.end() || DFLoc->second.empty()) 1389 return; 1390 1391 // Traverse all instructions in the block and collect the set of all 1392 // defined references. For each reference there will be a phi created 1393 // in the block's iterated dominance frontier. 1394 // This is done to make sure that each defined reference gets only one 1395 // phi node, even if it is defined multiple times. 1396 RegisterSet Defs; 1397 for (NodeAddr<InstrNode*> IA : BA.Addr->members(*this)) 1398 for (NodeAddr<RefNode*> RA : IA.Addr->members_if(IsDef, *this)) 1399 Defs.insert(RA.Addr->getRegRef(*this)); 1400 1401 // Calculate the iterated dominance frontier of BB. 1402 const MachineDominanceFrontier::DomSetType &DF = DFLoc->second; 1403 SetVector<MachineBasicBlock*> IDF(DF.begin(), DF.end()); 1404 for (unsigned i = 0; i < IDF.size(); ++i) { 1405 auto F = MDF.find(IDF[i]); 1406 if (F != MDF.end()) 1407 IDF.insert(F->second.begin(), F->second.end()); 1408 } 1409 1410 // Finally, add the set of defs to each block in the iterated dominance 1411 // frontier. 1412 for (auto DB : IDF) { 1413 NodeAddr<BlockNode*> DBA = findBlock(DB); 1414 PhiM[DBA.Id].insert(Defs.begin(), Defs.end()); 1415 } 1416 } 1417 1418 // Given the locations of phi nodes in the map PhiM, create the phi nodes 1419 // that are located in the block node BA. 1420 void DataFlowGraph::buildPhis(BlockRefsMap &PhiM, RegisterSet &AllRefs, 1421 NodeAddr<BlockNode*> BA) { 1422 // Check if this blocks has any DF defs, i.e. if there are any defs 1423 // that this block is in the iterated dominance frontier of. 1424 auto HasDF = PhiM.find(BA.Id); 1425 if (HasDF == PhiM.end() || HasDF->second.empty()) 1426 return; 1427 1428 // First, remove all R in Refs in such that there exists T in Refs 1429 // such that T covers R. In other words, only leave those refs that 1430 // are not covered by another ref (i.e. maximal with respect to covering). 1431 1432 auto MaxCoverIn = [this] (RegisterRef RR, RegisterSet &RRs) -> RegisterRef { 1433 for (RegisterRef I : RRs) 1434 if (I != RR && RegisterAggr::isCoverOf(I, RR, PRI)) 1435 RR = I; 1436 return RR; 1437 }; 1438 1439 RegisterSet MaxDF; 1440 for (RegisterRef I : HasDF->second) 1441 MaxDF.insert(MaxCoverIn(I, HasDF->second)); 1442 1443 std::vector<RegisterRef> MaxRefs; 1444 for (RegisterRef I : MaxDF) 1445 MaxRefs.push_back(MaxCoverIn(I, AllRefs)); 1446 1447 // Now, for each R in MaxRefs, get the alias closure of R. If the closure 1448 // only has R in it, create a phi a def for R. Otherwise, create a phi, 1449 // and add a def for each S in the closure. 1450 1451 // Sort the refs so that the phis will be created in a deterministic order. 1452 llvm::sort(MaxRefs); 1453 // Remove duplicates. 1454 auto NewEnd = std::unique(MaxRefs.begin(), MaxRefs.end()); 1455 MaxRefs.erase(NewEnd, MaxRefs.end()); 1456 1457 auto Aliased = [this,&MaxRefs](RegisterRef RR, 1458 std::vector<unsigned> &Closure) -> bool { 1459 for (unsigned I : Closure) 1460 if (PRI.alias(RR, MaxRefs[I])) 1461 return true; 1462 return false; 1463 }; 1464 1465 // Prepare a list of NodeIds of the block's predecessors. 1466 NodeList Preds; 1467 const MachineBasicBlock *MBB = BA.Addr->getCode(); 1468 for (MachineBasicBlock *PB : MBB->predecessors()) 1469 Preds.push_back(findBlock(PB)); 1470 1471 while (!MaxRefs.empty()) { 1472 // Put the first element in the closure, and then add all subsequent 1473 // elements from MaxRefs to it, if they alias at least one element 1474 // already in the closure. 1475 // ClosureIdx: vector of indices in MaxRefs of members of the closure. 1476 std::vector<unsigned> ClosureIdx = { 0 }; 1477 for (unsigned i = 1; i != MaxRefs.size(); ++i) 1478 if (Aliased(MaxRefs[i], ClosureIdx)) 1479 ClosureIdx.push_back(i); 1480 1481 // Build a phi for the closure. 1482 unsigned CS = ClosureIdx.size(); 1483 NodeAddr<PhiNode*> PA = newPhi(BA); 1484 1485 // Add defs. 1486 for (unsigned X = 0; X != CS; ++X) { 1487 RegisterRef RR = MaxRefs[ClosureIdx[X]]; 1488 uint16_t PhiFlags = NodeAttrs::PhiRef | NodeAttrs::Preserving; 1489 NodeAddr<DefNode*> DA = newDef(PA, RR, PhiFlags); 1490 PA.Addr->addMember(DA, *this); 1491 } 1492 // Add phi uses. 1493 for (NodeAddr<BlockNode*> PBA : Preds) { 1494 for (unsigned X = 0; X != CS; ++X) { 1495 RegisterRef RR = MaxRefs[ClosureIdx[X]]; 1496 NodeAddr<PhiUseNode*> PUA = newPhiUse(PA, RR, PBA); 1497 PA.Addr->addMember(PUA, *this); 1498 } 1499 } 1500 1501 // Erase from MaxRefs all elements in the closure. 1502 auto Begin = MaxRefs.begin(); 1503 for (unsigned Idx : llvm::reverse(ClosureIdx)) 1504 MaxRefs.erase(Begin + Idx); 1505 } 1506 } 1507 1508 // Remove any unneeded phi nodes that were created during the build process. 1509 void DataFlowGraph::removeUnusedPhis() { 1510 // This will remove unused phis, i.e. phis where each def does not reach 1511 // any uses or other defs. This will not detect or remove circular phi 1512 // chains that are otherwise dead. Unused/dead phis are created during 1513 // the build process and this function is intended to remove these cases 1514 // that are easily determinable to be unnecessary. 1515 1516 SetVector<NodeId> PhiQ; 1517 for (NodeAddr<BlockNode*> BA : Func.Addr->members(*this)) { 1518 for (auto P : BA.Addr->members_if(IsPhi, *this)) 1519 PhiQ.insert(P.Id); 1520 } 1521 1522 static auto HasUsedDef = [](NodeList &Ms) -> bool { 1523 for (NodeAddr<NodeBase*> M : Ms) { 1524 if (M.Addr->getKind() != NodeAttrs::Def) 1525 continue; 1526 NodeAddr<DefNode*> DA = M; 1527 if (DA.Addr->getReachedDef() != 0 || DA.Addr->getReachedUse() != 0) 1528 return true; 1529 } 1530 return false; 1531 }; 1532 1533 // Any phi, if it is removed, may affect other phis (make them dead). 1534 // For each removed phi, collect the potentially affected phis and add 1535 // them back to the queue. 1536 while (!PhiQ.empty()) { 1537 auto PA = addr<PhiNode*>(PhiQ[0]); 1538 PhiQ.remove(PA.Id); 1539 NodeList Refs = PA.Addr->members(*this); 1540 if (HasUsedDef(Refs)) 1541 continue; 1542 for (NodeAddr<RefNode*> RA : Refs) { 1543 if (NodeId RD = RA.Addr->getReachingDef()) { 1544 auto RDA = addr<DefNode*>(RD); 1545 NodeAddr<InstrNode*> OA = RDA.Addr->getOwner(*this); 1546 if (IsPhi(OA)) 1547 PhiQ.insert(OA.Id); 1548 } 1549 if (RA.Addr->isDef()) 1550 unlinkDef(RA, true); 1551 else 1552 unlinkUse(RA, true); 1553 } 1554 NodeAddr<BlockNode*> BA = PA.Addr->getOwner(*this); 1555 BA.Addr->removeMember(PA, *this); 1556 } 1557 } 1558 1559 // For a given reference node TA in an instruction node IA, connect the 1560 // reaching def of TA to the appropriate def node. Create any shadow nodes 1561 // as appropriate. 1562 template <typename T> 1563 void DataFlowGraph::linkRefUp(NodeAddr<InstrNode*> IA, NodeAddr<T> TA, 1564 DefStack &DS) { 1565 if (DS.empty()) 1566 return; 1567 RegisterRef RR = TA.Addr->getRegRef(*this); 1568 NodeAddr<T> TAP; 1569 1570 // References from the def stack that have been examined so far. 1571 RegisterAggr Defs(PRI); 1572 1573 for (auto I = DS.top(), E = DS.bottom(); I != E; I.down()) { 1574 RegisterRef QR = I->Addr->getRegRef(*this); 1575 1576 // Skip all defs that are aliased to any of the defs that we have already 1577 // seen. If this completes a cover of RR, stop the stack traversal. 1578 bool Alias = Defs.hasAliasOf(QR); 1579 bool Cover = Defs.insert(QR).hasCoverOf(RR); 1580 if (Alias) { 1581 if (Cover) 1582 break; 1583 continue; 1584 } 1585 1586 // The reaching def. 1587 NodeAddr<DefNode*> RDA = *I; 1588 1589 // Pick the reached node. 1590 if (TAP.Id == 0) { 1591 TAP = TA; 1592 } else { 1593 // Mark the existing ref as "shadow" and create a new shadow. 1594 TAP.Addr->setFlags(TAP.Addr->getFlags() | NodeAttrs::Shadow); 1595 TAP = getNextShadow(IA, TAP, true); 1596 } 1597 1598 // Create the link. 1599 TAP.Addr->linkToDef(TAP.Id, RDA); 1600 1601 if (Cover) 1602 break; 1603 } 1604 } 1605 1606 // Create data-flow links for all reference nodes in the statement node SA. 1607 template <typename Predicate> 1608 void DataFlowGraph::linkStmtRefs(DefStackMap &DefM, NodeAddr<StmtNode*> SA, 1609 Predicate P) { 1610 #ifndef NDEBUG 1611 RegisterSet Defs; 1612 #endif 1613 1614 // Link all nodes (upwards in the data-flow) with their reaching defs. 1615 for (NodeAddr<RefNode*> RA : SA.Addr->members_if(P, *this)) { 1616 uint16_t Kind = RA.Addr->getKind(); 1617 assert(Kind == NodeAttrs::Def || Kind == NodeAttrs::Use); 1618 RegisterRef RR = RA.Addr->getRegRef(*this); 1619 #ifndef NDEBUG 1620 // Do not expect multiple defs of the same reference. 1621 assert(Kind != NodeAttrs::Def || !Defs.count(RR)); 1622 Defs.insert(RR); 1623 #endif 1624 1625 auto F = DefM.find(RR.Reg); 1626 if (F == DefM.end()) 1627 continue; 1628 DefStack &DS = F->second; 1629 if (Kind == NodeAttrs::Use) 1630 linkRefUp<UseNode*>(SA, RA, DS); 1631 else if (Kind == NodeAttrs::Def) 1632 linkRefUp<DefNode*>(SA, RA, DS); 1633 else 1634 llvm_unreachable("Unexpected node in instruction"); 1635 } 1636 } 1637 1638 // Create data-flow links for all instructions in the block node BA. This 1639 // will include updating any phi nodes in BA. 1640 void DataFlowGraph::linkBlockRefs(DefStackMap &DefM, NodeAddr<BlockNode*> BA) { 1641 // Push block delimiters. 1642 markBlock(BA.Id, DefM); 1643 1644 auto IsClobber = [] (NodeAddr<RefNode*> RA) -> bool { 1645 return IsDef(RA) && (RA.Addr->getFlags() & NodeAttrs::Clobbering); 1646 }; 1647 auto IsNoClobber = [] (NodeAddr<RefNode*> RA) -> bool { 1648 return IsDef(RA) && !(RA.Addr->getFlags() & NodeAttrs::Clobbering); 1649 }; 1650 1651 assert(BA.Addr && "block node address is needed to create a data-flow link"); 1652 // For each non-phi instruction in the block, link all the defs and uses 1653 // to their reaching defs. For any member of the block (including phis), 1654 // push the defs on the corresponding stacks. 1655 for (NodeAddr<InstrNode*> IA : BA.Addr->members(*this)) { 1656 // Ignore phi nodes here. They will be linked part by part from the 1657 // predecessors. 1658 if (IA.Addr->getKind() == NodeAttrs::Stmt) { 1659 linkStmtRefs(DefM, IA, IsUse); 1660 linkStmtRefs(DefM, IA, IsClobber); 1661 } 1662 1663 // Push the definitions on the stack. 1664 pushClobbers(IA, DefM); 1665 1666 if (IA.Addr->getKind() == NodeAttrs::Stmt) 1667 linkStmtRefs(DefM, IA, IsNoClobber); 1668 1669 pushDefs(IA, DefM); 1670 } 1671 1672 // Recursively process all children in the dominator tree. 1673 MachineDomTreeNode *N = MDT.getNode(BA.Addr->getCode()); 1674 for (auto I : *N) { 1675 MachineBasicBlock *SB = I->getBlock(); 1676 NodeAddr<BlockNode*> SBA = findBlock(SB); 1677 linkBlockRefs(DefM, SBA); 1678 } 1679 1680 // Link the phi uses from the successor blocks. 1681 auto IsUseForBA = [BA](NodeAddr<NodeBase*> NA) -> bool { 1682 if (NA.Addr->getKind() != NodeAttrs::Use) 1683 return false; 1684 assert(NA.Addr->getFlags() & NodeAttrs::PhiRef); 1685 NodeAddr<PhiUseNode*> PUA = NA; 1686 return PUA.Addr->getPredecessor() == BA.Id; 1687 }; 1688 1689 RegisterSet EHLiveIns = getLandingPadLiveIns(); 1690 MachineBasicBlock *MBB = BA.Addr->getCode(); 1691 1692 for (MachineBasicBlock *SB : MBB->successors()) { 1693 bool IsEHPad = SB->isEHPad(); 1694 NodeAddr<BlockNode*> SBA = findBlock(SB); 1695 for (NodeAddr<InstrNode*> IA : SBA.Addr->members_if(IsPhi, *this)) { 1696 // Do not link phi uses for landing pad live-ins. 1697 if (IsEHPad) { 1698 // Find what register this phi is for. 1699 NodeAddr<RefNode*> RA = IA.Addr->getFirstMember(*this); 1700 assert(RA.Id != 0); 1701 if (EHLiveIns.count(RA.Addr->getRegRef(*this))) 1702 continue; 1703 } 1704 // Go over each phi use associated with MBB, and link it. 1705 for (auto U : IA.Addr->members_if(IsUseForBA, *this)) { 1706 NodeAddr<PhiUseNode*> PUA = U; 1707 RegisterRef RR = PUA.Addr->getRegRef(*this); 1708 linkRefUp<UseNode*>(IA, PUA, DefM[RR.Reg]); 1709 } 1710 } 1711 } 1712 1713 // Pop all defs from this block from the definition stacks. 1714 releaseBlock(BA.Id, DefM); 1715 } 1716 1717 // Remove the use node UA from any data-flow and structural links. 1718 void DataFlowGraph::unlinkUseDF(NodeAddr<UseNode*> UA) { 1719 NodeId RD = UA.Addr->getReachingDef(); 1720 NodeId Sib = UA.Addr->getSibling(); 1721 1722 if (RD == 0) { 1723 assert(Sib == 0); 1724 return; 1725 } 1726 1727 auto RDA = addr<DefNode*>(RD); 1728 auto TA = addr<UseNode*>(RDA.Addr->getReachedUse()); 1729 if (TA.Id == UA.Id) { 1730 RDA.Addr->setReachedUse(Sib); 1731 return; 1732 } 1733 1734 while (TA.Id != 0) { 1735 NodeId S = TA.Addr->getSibling(); 1736 if (S == UA.Id) { 1737 TA.Addr->setSibling(UA.Addr->getSibling()); 1738 return; 1739 } 1740 TA = addr<UseNode*>(S); 1741 } 1742 } 1743 1744 // Remove the def node DA from any data-flow and structural links. 1745 void DataFlowGraph::unlinkDefDF(NodeAddr<DefNode*> DA) { 1746 // 1747 // RD 1748 // | reached 1749 // | def 1750 // : 1751 // . 1752 // +----+ 1753 // ... -- | DA | -- ... -- 0 : sibling chain of DA 1754 // +----+ 1755 // | | reached 1756 // | : def 1757 // | . 1758 // | ... : Siblings (defs) 1759 // | 1760 // : reached 1761 // . use 1762 // ... : sibling chain of reached uses 1763 1764 NodeId RD = DA.Addr->getReachingDef(); 1765 1766 // Visit all siblings of the reached def and reset their reaching defs. 1767 // Also, defs reached by DA are now "promoted" to being reached by RD, 1768 // so all of them will need to be spliced into the sibling chain where 1769 // DA belongs. 1770 auto getAllNodes = [this] (NodeId N) -> NodeList { 1771 NodeList Res; 1772 while (N) { 1773 auto RA = addr<RefNode*>(N); 1774 // Keep the nodes in the exact sibling order. 1775 Res.push_back(RA); 1776 N = RA.Addr->getSibling(); 1777 } 1778 return Res; 1779 }; 1780 NodeList ReachedDefs = getAllNodes(DA.Addr->getReachedDef()); 1781 NodeList ReachedUses = getAllNodes(DA.Addr->getReachedUse()); 1782 1783 if (RD == 0) { 1784 for (NodeAddr<RefNode*> I : ReachedDefs) 1785 I.Addr->setSibling(0); 1786 for (NodeAddr<RefNode*> I : ReachedUses) 1787 I.Addr->setSibling(0); 1788 } 1789 for (NodeAddr<DefNode*> I : ReachedDefs) 1790 I.Addr->setReachingDef(RD); 1791 for (NodeAddr<UseNode*> I : ReachedUses) 1792 I.Addr->setReachingDef(RD); 1793 1794 NodeId Sib = DA.Addr->getSibling(); 1795 if (RD == 0) { 1796 assert(Sib == 0); 1797 return; 1798 } 1799 1800 // Update the reaching def node and remove DA from the sibling list. 1801 auto RDA = addr<DefNode*>(RD); 1802 auto TA = addr<DefNode*>(RDA.Addr->getReachedDef()); 1803 if (TA.Id == DA.Id) { 1804 // If DA is the first reached def, just update the RD's reached def 1805 // to the DA's sibling. 1806 RDA.Addr->setReachedDef(Sib); 1807 } else { 1808 // Otherwise, traverse the sibling list of the reached defs and remove 1809 // DA from it. 1810 while (TA.Id != 0) { 1811 NodeId S = TA.Addr->getSibling(); 1812 if (S == DA.Id) { 1813 TA.Addr->setSibling(Sib); 1814 break; 1815 } 1816 TA = addr<DefNode*>(S); 1817 } 1818 } 1819 1820 // Splice the DA's reached defs into the RDA's reached def chain. 1821 if (!ReachedDefs.empty()) { 1822 auto Last = NodeAddr<DefNode*>(ReachedDefs.back()); 1823 Last.Addr->setSibling(RDA.Addr->getReachedDef()); 1824 RDA.Addr->setReachedDef(ReachedDefs.front().Id); 1825 } 1826 // Splice the DA's reached uses into the RDA's reached use chain. 1827 if (!ReachedUses.empty()) { 1828 auto Last = NodeAddr<UseNode*>(ReachedUses.back()); 1829 Last.Addr->setSibling(RDA.Addr->getReachedUse()); 1830 RDA.Addr->setReachedUse(ReachedUses.front().Id); 1831 } 1832 } 1833