1 //===---- ScheduleDAGInstrs.cpp - MachineInstr Rescheduling ---------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file This implements the ScheduleDAGInstrs class, which implements 10 /// re-scheduling of MachineInstrs. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/CodeGen/ScheduleDAGInstrs.h" 15 16 #include "llvm/ADT/IntEqClasses.h" 17 #include "llvm/ADT/MapVector.h" 18 #include "llvm/ADT/SmallVector.h" 19 #include "llvm/ADT/SparseSet.h" 20 #include "llvm/ADT/iterator_range.h" 21 #include "llvm/Analysis/AliasAnalysis.h" 22 #include "llvm/Analysis/ValueTracking.h" 23 #include "llvm/CodeGen/LiveIntervals.h" 24 #include "llvm/CodeGen/LivePhysRegs.h" 25 #include "llvm/CodeGen/MachineBasicBlock.h" 26 #include "llvm/CodeGen/MachineFrameInfo.h" 27 #include "llvm/CodeGen/MachineFunction.h" 28 #include "llvm/CodeGen/MachineInstr.h" 29 #include "llvm/CodeGen/MachineInstrBundle.h" 30 #include "llvm/CodeGen/MachineMemOperand.h" 31 #include "llvm/CodeGen/MachineOperand.h" 32 #include "llvm/CodeGen/MachineRegisterInfo.h" 33 #include "llvm/CodeGen/PseudoSourceValue.h" 34 #include "llvm/CodeGen/RegisterPressure.h" 35 #include "llvm/CodeGen/ScheduleDAG.h" 36 #include "llvm/CodeGen/ScheduleDFS.h" 37 #include "llvm/CodeGen/SlotIndexes.h" 38 #include "llvm/CodeGen/TargetRegisterInfo.h" 39 #include "llvm/CodeGen/TargetSubtargetInfo.h" 40 #include "llvm/Config/llvm-config.h" 41 #include "llvm/IR/Constants.h" 42 #include "llvm/IR/Function.h" 43 #include "llvm/IR/Type.h" 44 #include "llvm/IR/Value.h" 45 #include "llvm/MC/LaneBitmask.h" 46 #include "llvm/MC/MCRegisterInfo.h" 47 #include "llvm/Support/Casting.h" 48 #include "llvm/Support/CommandLine.h" 49 #include "llvm/Support/Compiler.h" 50 #include "llvm/Support/Debug.h" 51 #include "llvm/Support/ErrorHandling.h" 52 #include "llvm/Support/Format.h" 53 #include "llvm/Support/raw_ostream.h" 54 #include <algorithm> 55 #include <cassert> 56 #include <iterator> 57 #include <utility> 58 #include <vector> 59 60 using namespace llvm; 61 62 #define DEBUG_TYPE "machine-scheduler" 63 64 static cl::opt<bool> 65 EnableAASchedMI("enable-aa-sched-mi", cl::Hidden, 66 cl::desc("Enable use of AA during MI DAG construction")); 67 68 static cl::opt<bool> UseTBAA("use-tbaa-in-sched-mi", cl::Hidden, 69 cl::init(true), cl::desc("Enable use of TBAA during MI DAG construction")); 70 71 // Note: the two options below might be used in tuning compile time vs 72 // output quality. Setting HugeRegion so large that it will never be 73 // reached means best-effort, but may be slow. 74 75 // When Stores and Loads maps (or NonAliasStores and NonAliasLoads) 76 // together hold this many SUs, a reduction of maps will be done. 77 static cl::opt<unsigned> HugeRegion("dag-maps-huge-region", cl::Hidden, 78 cl::init(1000), cl::desc("The limit to use while constructing the DAG " 79 "prior to scheduling, at which point a trade-off " 80 "is made to avoid excessive compile time.")); 81 82 static cl::opt<unsigned> ReductionSize( 83 "dag-maps-reduction-size", cl::Hidden, 84 cl::desc("A huge scheduling region will have maps reduced by this many " 85 "nodes at a time. Defaults to HugeRegion / 2.")); 86 87 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 88 static cl::opt<bool> SchedPrintCycles( 89 "sched-print-cycles", cl::Hidden, cl::init(false), 90 cl::desc("Report top/bottom cycles when dumping SUnit instances")); 91 #endif 92 93 static unsigned getReductionSize() { 94 // Always reduce a huge region with half of the elements, except 95 // when user sets this number explicitly. 96 if (ReductionSize.getNumOccurrences() == 0) 97 return HugeRegion / 2; 98 return ReductionSize; 99 } 100 101 static void dumpSUList(const ScheduleDAGInstrs::SUList &L) { 102 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 103 dbgs() << "{ "; 104 for (const SUnit *SU : L) { 105 dbgs() << "SU(" << SU->NodeNum << ")"; 106 if (SU != L.back()) 107 dbgs() << ", "; 108 } 109 dbgs() << "}\n"; 110 #endif 111 } 112 113 ScheduleDAGInstrs::ScheduleDAGInstrs(MachineFunction &mf, 114 const MachineLoopInfo *mli, 115 bool RemoveKillFlags) 116 : ScheduleDAG(mf), MLI(mli), MFI(mf.getFrameInfo()), 117 RemoveKillFlags(RemoveKillFlags), 118 UnknownValue(UndefValue::get( 119 Type::getVoidTy(mf.getFunction().getContext()))), Topo(SUnits, &ExitSU) { 120 DbgValues.clear(); 121 122 const TargetSubtargetInfo &ST = mf.getSubtarget(); 123 SchedModel.init(&ST); 124 } 125 126 /// If this machine instr has memory reference information and it can be 127 /// tracked to a normal reference to a known object, return the Value 128 /// for that object. This function returns false the memory location is 129 /// unknown or may alias anything. 130 static bool getUnderlyingObjectsForInstr(const MachineInstr *MI, 131 const MachineFrameInfo &MFI, 132 UnderlyingObjectsVector &Objects, 133 const DataLayout &DL) { 134 auto AllMMOsOkay = [&]() { 135 for (const MachineMemOperand *MMO : MI->memoperands()) { 136 // TODO: Figure out whether isAtomic is really necessary (see D57601). 137 if (MMO->isVolatile() || MMO->isAtomic()) 138 return false; 139 140 if (const PseudoSourceValue *PSV = MMO->getPseudoValue()) { 141 // Function that contain tail calls don't have unique PseudoSourceValue 142 // objects. Two PseudoSourceValues might refer to the same or 143 // overlapping locations. The client code calling this function assumes 144 // this is not the case. So return a conservative answer of no known 145 // object. 146 if (MFI.hasTailCall()) 147 return false; 148 149 // For now, ignore PseudoSourceValues which may alias LLVM IR values 150 // because the code that uses this function has no way to cope with 151 // such aliases. 152 if (PSV->isAliased(&MFI)) 153 return false; 154 155 bool MayAlias = PSV->mayAlias(&MFI); 156 Objects.emplace_back(PSV, MayAlias); 157 } else if (const Value *V = MMO->getValue()) { 158 SmallVector<Value *, 4> Objs; 159 if (!getUnderlyingObjectsForCodeGen(V, Objs)) 160 return false; 161 162 for (Value *V : Objs) { 163 assert(isIdentifiedObject(V)); 164 Objects.emplace_back(V, true); 165 } 166 } else 167 return false; 168 } 169 return true; 170 }; 171 172 if (!AllMMOsOkay()) { 173 Objects.clear(); 174 return false; 175 } 176 177 return true; 178 } 179 180 void ScheduleDAGInstrs::startBlock(MachineBasicBlock *bb) { 181 BB = bb; 182 } 183 184 void ScheduleDAGInstrs::finishBlock() { 185 // Subclasses should no longer refer to the old block. 186 BB = nullptr; 187 } 188 189 void ScheduleDAGInstrs::enterRegion(MachineBasicBlock *bb, 190 MachineBasicBlock::iterator begin, 191 MachineBasicBlock::iterator end, 192 unsigned regioninstrs) { 193 assert(bb == BB && "startBlock should set BB"); 194 RegionBegin = begin; 195 RegionEnd = end; 196 NumRegionInstrs = regioninstrs; 197 } 198 199 void ScheduleDAGInstrs::exitRegion() { 200 // Nothing to do. 201 } 202 203 void ScheduleDAGInstrs::addSchedBarrierDeps() { 204 MachineInstr *ExitMI = 205 RegionEnd != BB->end() 206 ? &*skipDebugInstructionsBackward(RegionEnd, RegionBegin) 207 : nullptr; 208 ExitSU.setInstr(ExitMI); 209 // Add dependencies on the defs and uses of the instruction. 210 if (ExitMI) { 211 for (const MachineOperand &MO : ExitMI->all_uses()) { 212 Register Reg = MO.getReg(); 213 if (Reg.isPhysical()) { 214 for (MCRegUnit Unit : TRI->regunits(Reg)) 215 Uses.insert(PhysRegSUOper(&ExitSU, -1, Unit)); 216 } else if (Reg.isVirtual() && MO.readsReg()) { 217 addVRegUseDeps(&ExitSU, MO.getOperandNo()); 218 } 219 } 220 } 221 if (!ExitMI || (!ExitMI->isCall() && !ExitMI->isBarrier())) { 222 // For others, e.g. fallthrough, conditional branch, assume the exit 223 // uses all the registers that are livein to the successor blocks. 224 for (const MachineBasicBlock *Succ : BB->successors()) { 225 for (const auto &LI : Succ->liveins()) { 226 for (MCRegUnitMaskIterator U(LI.PhysReg, TRI); U.isValid(); ++U) { 227 auto [Unit, Mask] = *U; 228 if ((Mask & LI.LaneMask).any() && !Uses.contains(Unit)) 229 Uses.insert(PhysRegSUOper(&ExitSU, -1, Unit)); 230 } 231 } 232 } 233 } 234 } 235 236 /// MO is an operand of SU's instruction that defines a physical register. Adds 237 /// data dependencies from SU to any uses of the physical register. 238 void ScheduleDAGInstrs::addPhysRegDataDeps(SUnit *SU, unsigned OperIdx) { 239 const MachineOperand &MO = SU->getInstr()->getOperand(OperIdx); 240 assert(MO.isDef() && "expect physreg def"); 241 Register Reg = MO.getReg(); 242 243 // Ask the target if address-backscheduling is desirable, and if so how much. 244 const TargetSubtargetInfo &ST = MF.getSubtarget(); 245 246 // Only use any non-zero latency for real defs/uses, in contrast to 247 // "fake" operands added by regalloc. 248 const MCInstrDesc &DefMIDesc = SU->getInstr()->getDesc(); 249 bool ImplicitPseudoDef = (OperIdx >= DefMIDesc.getNumOperands() && 250 !DefMIDesc.hasImplicitDefOfPhysReg(Reg)); 251 for (MCRegUnit Unit : TRI->regunits(Reg)) { 252 for (RegUnit2SUnitsMap::iterator I = Uses.find(Unit); I != Uses.end(); 253 ++I) { 254 SUnit *UseSU = I->SU; 255 if (UseSU == SU) 256 continue; 257 258 // Adjust the dependence latency using operand def/use information, 259 // then allow the target to perform its own adjustments. 260 MachineInstr *UseInstr = nullptr; 261 int UseOpIdx = I->OpIdx; 262 bool ImplicitPseudoUse = false; 263 SDep Dep; 264 if (UseOpIdx < 0) { 265 Dep = SDep(SU, SDep::Artificial); 266 } else { 267 // Set the hasPhysRegDefs only for physreg defs that have a use within 268 // the scheduling region. 269 SU->hasPhysRegDefs = true; 270 271 UseInstr = UseSU->getInstr(); 272 Register UseReg = UseInstr->getOperand(UseOpIdx).getReg(); 273 const MCInstrDesc &UseMIDesc = UseInstr->getDesc(); 274 ImplicitPseudoUse = UseOpIdx >= ((int)UseMIDesc.getNumOperands()) && 275 !UseMIDesc.hasImplicitUseOfPhysReg(UseReg); 276 277 Dep = SDep(SU, SDep::Data, UseReg); 278 } 279 if (!ImplicitPseudoDef && !ImplicitPseudoUse) { 280 Dep.setLatency(SchedModel.computeOperandLatency(SU->getInstr(), OperIdx, 281 UseInstr, UseOpIdx)); 282 } else { 283 Dep.setLatency(0); 284 } 285 ST.adjustSchedDependency(SU, OperIdx, UseSU, UseOpIdx, Dep); 286 UseSU->addPred(Dep); 287 } 288 } 289 } 290 291 /// Adds register dependencies (data, anti, and output) from this SUnit 292 /// to following instructions in the same scheduling region that depend the 293 /// physical register referenced at OperIdx. 294 void ScheduleDAGInstrs::addPhysRegDeps(SUnit *SU, unsigned OperIdx) { 295 MachineInstr *MI = SU->getInstr(); 296 MachineOperand &MO = MI->getOperand(OperIdx); 297 Register Reg = MO.getReg(); 298 // We do not need to track any dependencies for constant registers. 299 if (MRI.isConstantPhysReg(Reg)) 300 return; 301 302 const TargetSubtargetInfo &ST = MF.getSubtarget(); 303 304 // Optionally add output and anti dependencies. For anti 305 // dependencies we use a latency of 0 because for a multi-issue 306 // target we want to allow the defining instruction to issue 307 // in the same cycle as the using instruction. 308 // TODO: Using a latency of 1 here for output dependencies assumes 309 // there's no cost for reusing registers. 310 SDep::Kind Kind = MO.isUse() ? SDep::Anti : SDep::Output; 311 for (MCRegUnit Unit : TRI->regunits(Reg)) { 312 for (RegUnit2SUnitsMap::iterator I = Defs.find(Unit); I != Defs.end(); 313 ++I) { 314 SUnit *DefSU = I->SU; 315 if (DefSU == &ExitSU) 316 continue; 317 MachineInstr *DefInstr = DefSU->getInstr(); 318 MachineOperand &DefMO = DefInstr->getOperand(I->OpIdx); 319 if (DefSU != SU && 320 (Kind != SDep::Output || !MO.isDead() || !DefMO.isDead())) { 321 SDep Dep(SU, Kind, DefMO.getReg()); 322 if (Kind != SDep::Anti) { 323 Dep.setLatency( 324 SchedModel.computeOutputLatency(MI, OperIdx, DefInstr)); 325 } 326 ST.adjustSchedDependency(SU, OperIdx, DefSU, I->OpIdx, Dep); 327 DefSU->addPred(Dep); 328 } 329 } 330 } 331 332 if (MO.isUse()) { 333 SU->hasPhysRegUses = true; 334 // Either insert a new Reg2SUnits entry with an empty SUnits list, or 335 // retrieve the existing SUnits list for this register's uses. 336 // Push this SUnit on the use list. 337 for (MCRegUnit Unit : TRI->regunits(Reg)) 338 Uses.insert(PhysRegSUOper(SU, OperIdx, Unit)); 339 if (RemoveKillFlags) 340 MO.setIsKill(false); 341 } else { 342 addPhysRegDataDeps(SU, OperIdx); 343 344 // Clear previous uses and defs of this register and its subregisters. 345 for (MCRegUnit Unit : TRI->regunits(Reg)) { 346 Uses.eraseAll(Unit); 347 if (!MO.isDead()) 348 Defs.eraseAll(Unit); 349 } 350 351 if (MO.isDead() && SU->isCall) { 352 // Calls will not be reordered because of chain dependencies (see 353 // below). Since call operands are dead, calls may continue to be added 354 // to the DefList making dependence checking quadratic in the size of 355 // the block. Instead, we leave only one call at the back of the 356 // DefList. 357 for (MCRegUnit Unit : TRI->regunits(Reg)) { 358 RegUnit2SUnitsMap::RangePair P = Defs.equal_range(Unit); 359 RegUnit2SUnitsMap::iterator B = P.first; 360 RegUnit2SUnitsMap::iterator I = P.second; 361 for (bool isBegin = I == B; !isBegin; /* empty */) { 362 isBegin = (--I) == B; 363 if (!I->SU->isCall) 364 break; 365 I = Defs.erase(I); 366 } 367 } 368 } 369 370 // Defs are pushed in the order they are visited and never reordered. 371 for (MCRegUnit Unit : TRI->regunits(Reg)) 372 Defs.insert(PhysRegSUOper(SU, OperIdx, Unit)); 373 } 374 } 375 376 LaneBitmask ScheduleDAGInstrs::getLaneMaskForMO(const MachineOperand &MO) const 377 { 378 Register Reg = MO.getReg(); 379 // No point in tracking lanemasks if we don't have interesting subregisters. 380 const TargetRegisterClass &RC = *MRI.getRegClass(Reg); 381 if (!RC.HasDisjunctSubRegs) 382 return LaneBitmask::getAll(); 383 384 unsigned SubReg = MO.getSubReg(); 385 if (SubReg == 0) 386 return RC.getLaneMask(); 387 return TRI->getSubRegIndexLaneMask(SubReg); 388 } 389 390 bool ScheduleDAGInstrs::deadDefHasNoUse(const MachineOperand &MO) { 391 auto RegUse = CurrentVRegUses.find(MO.getReg()); 392 if (RegUse == CurrentVRegUses.end()) 393 return true; 394 return (RegUse->LaneMask & getLaneMaskForMO(MO)).none(); 395 } 396 397 /// Adds register output and data dependencies from this SUnit to instructions 398 /// that occur later in the same scheduling region if they read from or write to 399 /// the virtual register defined at OperIdx. 400 /// 401 /// TODO: Hoist loop induction variable increments. This has to be 402 /// reevaluated. Generally, IV scheduling should be done before coalescing. 403 void ScheduleDAGInstrs::addVRegDefDeps(SUnit *SU, unsigned OperIdx) { 404 MachineInstr *MI = SU->getInstr(); 405 MachineOperand &MO = MI->getOperand(OperIdx); 406 Register Reg = MO.getReg(); 407 408 LaneBitmask DefLaneMask; 409 LaneBitmask KillLaneMask; 410 if (TrackLaneMasks) { 411 bool IsKill = MO.getSubReg() == 0 || MO.isUndef(); 412 DefLaneMask = getLaneMaskForMO(MO); 413 // If we have a <read-undef> flag, none of the lane values comes from an 414 // earlier instruction. 415 KillLaneMask = IsKill ? LaneBitmask::getAll() : DefLaneMask; 416 417 if (MO.getSubReg() != 0 && MO.isUndef()) { 418 // There may be other subregister defs on the same instruction of the same 419 // register in later operands. The lanes of other defs will now be live 420 // after this instruction, so these should not be treated as killed by the 421 // instruction even though they appear to be killed in this one operand. 422 for (const MachineOperand &OtherMO : 423 llvm::drop_begin(MI->operands(), OperIdx + 1)) 424 if (OtherMO.isReg() && OtherMO.isDef() && OtherMO.getReg() == Reg) 425 KillLaneMask &= ~getLaneMaskForMO(OtherMO); 426 } 427 428 // Clear undef flag, we'll re-add it later once we know which subregister 429 // Def is first. 430 MO.setIsUndef(false); 431 } else { 432 DefLaneMask = LaneBitmask::getAll(); 433 KillLaneMask = LaneBitmask::getAll(); 434 } 435 436 if (MO.isDead()) { 437 assert(deadDefHasNoUse(MO) && "Dead defs should have no uses"); 438 } else { 439 // Add data dependence to all uses we found so far. 440 const TargetSubtargetInfo &ST = MF.getSubtarget(); 441 for (VReg2SUnitOperIdxMultiMap::iterator I = CurrentVRegUses.find(Reg), 442 E = CurrentVRegUses.end(); I != E; /*empty*/) { 443 LaneBitmask LaneMask = I->LaneMask; 444 // Ignore uses of other lanes. 445 if ((LaneMask & KillLaneMask).none()) { 446 ++I; 447 continue; 448 } 449 450 if ((LaneMask & DefLaneMask).any()) { 451 SUnit *UseSU = I->SU; 452 MachineInstr *Use = UseSU->getInstr(); 453 SDep Dep(SU, SDep::Data, Reg); 454 Dep.setLatency(SchedModel.computeOperandLatency(MI, OperIdx, Use, 455 I->OperandIndex)); 456 ST.adjustSchedDependency(SU, OperIdx, UseSU, I->OperandIndex, Dep); 457 UseSU->addPred(Dep); 458 } 459 460 LaneMask &= ~KillLaneMask; 461 // If we found a Def for all lanes of this use, remove it from the list. 462 if (LaneMask.any()) { 463 I->LaneMask = LaneMask; 464 ++I; 465 } else 466 I = CurrentVRegUses.erase(I); 467 } 468 } 469 470 // Shortcut: Singly defined vregs do not have output/anti dependencies. 471 if (MRI.hasOneDef(Reg)) 472 return; 473 474 // Add output dependence to the next nearest defs of this vreg. 475 // 476 // Unless this definition is dead, the output dependence should be 477 // transitively redundant with antidependencies from this definition's 478 // uses. We're conservative for now until we have a way to guarantee the uses 479 // are not eliminated sometime during scheduling. The output dependence edge 480 // is also useful if output latency exceeds def-use latency. 481 LaneBitmask LaneMask = DefLaneMask; 482 for (VReg2SUnit &V2SU : make_range(CurrentVRegDefs.find(Reg), 483 CurrentVRegDefs.end())) { 484 // Ignore defs for other lanes. 485 if ((V2SU.LaneMask & LaneMask).none()) 486 continue; 487 // Add an output dependence. 488 SUnit *DefSU = V2SU.SU; 489 // Ignore additional defs of the same lanes in one instruction. This can 490 // happen because lanemasks are shared for targets with too many 491 // subregisters. We also use some representration tricks/hacks where we 492 // add super-register defs/uses, to imply that although we only access parts 493 // of the reg we care about the full one. 494 if (DefSU == SU) 495 continue; 496 SDep Dep(SU, SDep::Output, Reg); 497 Dep.setLatency( 498 SchedModel.computeOutputLatency(MI, OperIdx, DefSU->getInstr())); 499 DefSU->addPred(Dep); 500 501 // Update current definition. This can get tricky if the def was about a 502 // bigger lanemask before. We then have to shrink it and create a new 503 // VReg2SUnit for the non-overlapping part. 504 LaneBitmask OverlapMask = V2SU.LaneMask & LaneMask; 505 LaneBitmask NonOverlapMask = V2SU.LaneMask & ~LaneMask; 506 V2SU.SU = SU; 507 V2SU.LaneMask = OverlapMask; 508 if (NonOverlapMask.any()) 509 CurrentVRegDefs.insert(VReg2SUnit(Reg, NonOverlapMask, DefSU)); 510 } 511 // If there was no CurrentVRegDefs entry for some lanes yet, create one. 512 if (LaneMask.any()) 513 CurrentVRegDefs.insert(VReg2SUnit(Reg, LaneMask, SU)); 514 } 515 516 /// Adds a register data dependency if the instruction that defines the 517 /// virtual register used at OperIdx is mapped to an SUnit. Add a register 518 /// antidependency from this SUnit to instructions that occur later in the same 519 /// scheduling region if they write the virtual register. 520 /// 521 /// TODO: Handle ExitSU "uses" properly. 522 void ScheduleDAGInstrs::addVRegUseDeps(SUnit *SU, unsigned OperIdx) { 523 const MachineInstr *MI = SU->getInstr(); 524 assert(!MI->isDebugOrPseudoInstr()); 525 526 const MachineOperand &MO = MI->getOperand(OperIdx); 527 Register Reg = MO.getReg(); 528 529 // Remember the use. Data dependencies will be added when we find the def. 530 LaneBitmask LaneMask = TrackLaneMasks ? getLaneMaskForMO(MO) 531 : LaneBitmask::getAll(); 532 CurrentVRegUses.insert(VReg2SUnitOperIdx(Reg, LaneMask, OperIdx, SU)); 533 534 // Add antidependences to the following defs of the vreg. 535 for (VReg2SUnit &V2SU : make_range(CurrentVRegDefs.find(Reg), 536 CurrentVRegDefs.end())) { 537 // Ignore defs for unrelated lanes. 538 LaneBitmask PrevDefLaneMask = V2SU.LaneMask; 539 if ((PrevDefLaneMask & LaneMask).none()) 540 continue; 541 if (V2SU.SU == SU) 542 continue; 543 544 V2SU.SU->addPred(SDep(SU, SDep::Anti, Reg)); 545 } 546 } 547 548 /// Returns true if MI is an instruction we are unable to reason about 549 /// (like a call or something with unmodeled side effects). 550 static inline bool isGlobalMemoryObject(MachineInstr *MI) { 551 return MI->isCall() || MI->hasUnmodeledSideEffects() || 552 (MI->hasOrderedMemoryRef() && !MI->isDereferenceableInvariantLoad()); 553 } 554 555 void ScheduleDAGInstrs::addChainDependency (SUnit *SUa, SUnit *SUb, 556 unsigned Latency) { 557 if (SUa->getInstr()->mayAlias(AAForDep, *SUb->getInstr(), UseTBAA)) { 558 SDep Dep(SUa, SDep::MayAliasMem); 559 Dep.setLatency(Latency); 560 SUb->addPred(Dep); 561 } 562 } 563 564 /// Creates an SUnit for each real instruction, numbered in top-down 565 /// topological order. The instruction order A < B, implies that no edge exists 566 /// from B to A. 567 /// 568 /// Map each real instruction to its SUnit. 569 /// 570 /// After initSUnits, the SUnits vector cannot be resized and the scheduler may 571 /// hang onto SUnit pointers. We may relax this in the future by using SUnit IDs 572 /// instead of pointers. 573 /// 574 /// MachineScheduler relies on initSUnits numbering the nodes by their order in 575 /// the original instruction list. 576 void ScheduleDAGInstrs::initSUnits() { 577 // We'll be allocating one SUnit for each real instruction in the region, 578 // which is contained within a basic block. 579 SUnits.reserve(NumRegionInstrs); 580 581 for (MachineInstr &MI : make_range(RegionBegin, RegionEnd)) { 582 if (MI.isDebugOrPseudoInstr()) 583 continue; 584 585 SUnit *SU = newSUnit(&MI); 586 MISUnitMap[&MI] = SU; 587 588 SU->isCall = MI.isCall(); 589 SU->isCommutable = MI.isCommutable(); 590 591 // Assign the Latency field of SU using target-provided information. 592 SU->Latency = SchedModel.computeInstrLatency(SU->getInstr()); 593 594 // If this SUnit uses a reserved or unbuffered resource, mark it as such. 595 // 596 // Reserved resources block an instruction from issuing and stall the 597 // entire pipeline. These are identified by BufferSize=0. 598 // 599 // Unbuffered resources prevent execution of subsequent instructions that 600 // require the same resources. This is used for in-order execution pipelines 601 // within an out-of-order core. These are identified by BufferSize=1. 602 if (SchedModel.hasInstrSchedModel()) { 603 const MCSchedClassDesc *SC = getSchedClass(SU); 604 for (const MCWriteProcResEntry &PRE : 605 make_range(SchedModel.getWriteProcResBegin(SC), 606 SchedModel.getWriteProcResEnd(SC))) { 607 switch (SchedModel.getProcResource(PRE.ProcResourceIdx)->BufferSize) { 608 case 0: 609 SU->hasReservedResource = true; 610 break; 611 case 1: 612 SU->isUnbuffered = true; 613 break; 614 default: 615 break; 616 } 617 } 618 } 619 } 620 } 621 622 class ScheduleDAGInstrs::Value2SUsMap : public MapVector<ValueType, SUList> { 623 /// Current total number of SUs in map. 624 unsigned NumNodes = 0; 625 626 /// 1 for loads, 0 for stores. (see comment in SUList) 627 unsigned TrueMemOrderLatency; 628 629 public: 630 Value2SUsMap(unsigned lat = 0) : TrueMemOrderLatency(lat) {} 631 632 /// To keep NumNodes up to date, insert() is used instead of 633 /// this operator w/ push_back(). 634 ValueType &operator[](const SUList &Key) { 635 llvm_unreachable("Don't use. Use insert() instead."); }; 636 637 /// Adds SU to the SUList of V. If Map grows huge, reduce its size by calling 638 /// reduce(). 639 void inline insert(SUnit *SU, ValueType V) { 640 MapVector::operator[](V).push_back(SU); 641 NumNodes++; 642 } 643 644 /// Clears the list of SUs mapped to V. 645 void inline clearList(ValueType V) { 646 iterator Itr = find(V); 647 if (Itr != end()) { 648 assert(NumNodes >= Itr->second.size()); 649 NumNodes -= Itr->second.size(); 650 651 Itr->second.clear(); 652 } 653 } 654 655 /// Clears map from all contents. 656 void clear() { 657 MapVector<ValueType, SUList>::clear(); 658 NumNodes = 0; 659 } 660 661 unsigned inline size() const { return NumNodes; } 662 663 /// Counts the number of SUs in this map after a reduction. 664 void reComputeSize() { 665 NumNodes = 0; 666 for (auto &I : *this) 667 NumNodes += I.second.size(); 668 } 669 670 unsigned inline getTrueMemOrderLatency() const { 671 return TrueMemOrderLatency; 672 } 673 674 void dump(); 675 }; 676 677 void ScheduleDAGInstrs::addChainDependencies(SUnit *SU, 678 Value2SUsMap &Val2SUsMap) { 679 for (auto &I : Val2SUsMap) 680 addChainDependencies(SU, I.second, 681 Val2SUsMap.getTrueMemOrderLatency()); 682 } 683 684 void ScheduleDAGInstrs::addChainDependencies(SUnit *SU, 685 Value2SUsMap &Val2SUsMap, 686 ValueType V) { 687 Value2SUsMap::iterator Itr = Val2SUsMap.find(V); 688 if (Itr != Val2SUsMap.end()) 689 addChainDependencies(SU, Itr->second, 690 Val2SUsMap.getTrueMemOrderLatency()); 691 } 692 693 void ScheduleDAGInstrs::addBarrierChain(Value2SUsMap &map) { 694 assert(BarrierChain != nullptr); 695 696 for (auto &[V, SUs] : map) { 697 (void)V; 698 for (auto *SU : SUs) 699 SU->addPredBarrier(BarrierChain); 700 } 701 map.clear(); 702 } 703 704 void ScheduleDAGInstrs::insertBarrierChain(Value2SUsMap &map) { 705 assert(BarrierChain != nullptr); 706 707 // Go through all lists of SUs. 708 for (Value2SUsMap::iterator I = map.begin(), EE = map.end(); I != EE;) { 709 Value2SUsMap::iterator CurrItr = I++; 710 SUList &sus = CurrItr->second; 711 SUList::iterator SUItr = sus.begin(), SUEE = sus.end(); 712 for (; SUItr != SUEE; ++SUItr) { 713 // Stop on BarrierChain or any instruction above it. 714 if ((*SUItr)->NodeNum <= BarrierChain->NodeNum) 715 break; 716 717 (*SUItr)->addPredBarrier(BarrierChain); 718 } 719 720 // Remove also the BarrierChain from list if present. 721 if (SUItr != SUEE && *SUItr == BarrierChain) 722 SUItr++; 723 724 // Remove all SUs that are now successors of BarrierChain. 725 if (SUItr != sus.begin()) 726 sus.erase(sus.begin(), SUItr); 727 } 728 729 // Remove all entries with empty su lists. 730 map.remove_if([&](std::pair<ValueType, SUList> &mapEntry) { 731 return (mapEntry.second.empty()); }); 732 733 // Recompute the size of the map (NumNodes). 734 map.reComputeSize(); 735 } 736 737 void ScheduleDAGInstrs::buildSchedGraph(AAResults *AA, 738 RegPressureTracker *RPTracker, 739 PressureDiffs *PDiffs, 740 LiveIntervals *LIS, 741 bool TrackLaneMasks) { 742 const TargetSubtargetInfo &ST = MF.getSubtarget(); 743 bool UseAA = EnableAASchedMI.getNumOccurrences() > 0 ? EnableAASchedMI 744 : ST.useAA(); 745 AAForDep = UseAA ? AA : nullptr; 746 747 BarrierChain = nullptr; 748 749 this->TrackLaneMasks = TrackLaneMasks; 750 MISUnitMap.clear(); 751 ScheduleDAG::clearDAG(); 752 753 // Create an SUnit for each real instruction. 754 initSUnits(); 755 756 if (PDiffs) 757 PDiffs->init(SUnits.size()); 758 759 // We build scheduling units by walking a block's instruction list 760 // from bottom to top. 761 762 // Each MIs' memory operand(s) is analyzed to a list of underlying 763 // objects. The SU is then inserted in the SUList(s) mapped from the 764 // Value(s). Each Value thus gets mapped to lists of SUs depending 765 // on it, stores and loads kept separately. Two SUs are trivially 766 // non-aliasing if they both depend on only identified Values and do 767 // not share any common Value. 768 Value2SUsMap Stores, Loads(1 /*TrueMemOrderLatency*/); 769 770 // Certain memory accesses are known to not alias any SU in Stores 771 // or Loads, and have therefore their own 'NonAlias' 772 // domain. E.g. spill / reload instructions never alias LLVM I/R 773 // Values. It would be nice to assume that this type of memory 774 // accesses always have a proper memory operand modelling, and are 775 // therefore never unanalyzable, but this is conservatively not 776 // done. 777 Value2SUsMap NonAliasStores, NonAliasLoads(1 /*TrueMemOrderLatency*/); 778 779 // Track all instructions that may raise floating-point exceptions. 780 // These do not depend on one other (or normal loads or stores), but 781 // must not be rescheduled across global barriers. Note that we don't 782 // really need a "map" here since we don't track those MIs by value; 783 // using the same Value2SUsMap data type here is simply a matter of 784 // convenience. 785 Value2SUsMap FPExceptions; 786 787 // Remove any stale debug info; sometimes BuildSchedGraph is called again 788 // without emitting the info from the previous call. 789 DbgValues.clear(); 790 FirstDbgValue = nullptr; 791 792 assert(Defs.empty() && Uses.empty() && 793 "Only BuildGraph should update Defs/Uses"); 794 Defs.setUniverse(TRI->getNumRegs()); 795 Uses.setUniverse(TRI->getNumRegs()); 796 797 assert(CurrentVRegDefs.empty() && "nobody else should use CurrentVRegDefs"); 798 assert(CurrentVRegUses.empty() && "nobody else should use CurrentVRegUses"); 799 unsigned NumVirtRegs = MRI.getNumVirtRegs(); 800 CurrentVRegDefs.setUniverse(NumVirtRegs); 801 CurrentVRegUses.setUniverse(NumVirtRegs); 802 803 // Model data dependencies between instructions being scheduled and the 804 // ExitSU. 805 addSchedBarrierDeps(); 806 807 // Walk the list of instructions, from bottom moving up. 808 MachineInstr *DbgMI = nullptr; 809 for (MachineBasicBlock::iterator MII = RegionEnd, MIE = RegionBegin; 810 MII != MIE; --MII) { 811 MachineInstr &MI = *std::prev(MII); 812 if (DbgMI) { 813 DbgValues.emplace_back(DbgMI, &MI); 814 DbgMI = nullptr; 815 } 816 817 if (MI.isDebugValue() || MI.isDebugPHI()) { 818 DbgMI = &MI; 819 continue; 820 } 821 822 if (MI.isDebugLabel() || MI.isDebugRef() || MI.isPseudoProbe()) 823 continue; 824 825 SUnit *SU = MISUnitMap[&MI]; 826 assert(SU && "No SUnit mapped to this MI"); 827 828 if (RPTracker) { 829 RegisterOperands RegOpers; 830 RegOpers.collect(MI, *TRI, MRI, TrackLaneMasks, false); 831 if (TrackLaneMasks) { 832 SlotIndex SlotIdx = LIS->getInstructionIndex(MI); 833 RegOpers.adjustLaneLiveness(*LIS, MRI, SlotIdx); 834 } 835 if (PDiffs != nullptr) 836 PDiffs->addInstruction(SU->NodeNum, RegOpers, MRI); 837 838 if (RPTracker->getPos() == RegionEnd || &*RPTracker->getPos() != &MI) 839 RPTracker->recedeSkipDebugValues(); 840 assert(&*RPTracker->getPos() == &MI && "RPTracker in sync"); 841 RPTracker->recede(RegOpers); 842 } 843 844 assert( 845 (CanHandleTerminators || (!MI.isTerminator() && !MI.isPosition())) && 846 "Cannot schedule terminators or labels!"); 847 848 // Add register-based dependencies (data, anti, and output). 849 // For some instructions (calls, returns, inline-asm, etc.) there can 850 // be explicit uses and implicit defs, in which case the use will appear 851 // on the operand list before the def. Do two passes over the operand 852 // list to make sure that defs are processed before any uses. 853 bool HasVRegDef = false; 854 for (unsigned j = 0, n = MI.getNumOperands(); j != n; ++j) { 855 const MachineOperand &MO = MI.getOperand(j); 856 if (!MO.isReg() || !MO.isDef()) 857 continue; 858 Register Reg = MO.getReg(); 859 if (Reg.isPhysical()) { 860 addPhysRegDeps(SU, j); 861 } else if (Reg.isVirtual()) { 862 HasVRegDef = true; 863 addVRegDefDeps(SU, j); 864 } 865 } 866 // Now process all uses. 867 for (unsigned j = 0, n = MI.getNumOperands(); j != n; ++j) { 868 const MachineOperand &MO = MI.getOperand(j); 869 // Only look at use operands. 870 // We do not need to check for MO.readsReg() here because subsequent 871 // subregister defs will get output dependence edges and need no 872 // additional use dependencies. 873 if (!MO.isReg() || !MO.isUse()) 874 continue; 875 Register Reg = MO.getReg(); 876 if (Reg.isPhysical()) { 877 addPhysRegDeps(SU, j); 878 } else if (Reg.isVirtual() && MO.readsReg()) { 879 addVRegUseDeps(SU, j); 880 } 881 } 882 883 // If we haven't seen any uses in this scheduling region, create a 884 // dependence edge to ExitSU to model the live-out latency. This is required 885 // for vreg defs with no in-region use, and prefetches with no vreg def. 886 // 887 // FIXME: NumDataSuccs would be more precise than NumSuccs here. This 888 // check currently relies on being called before adding chain deps. 889 if (SU->NumSuccs == 0 && SU->Latency > 1 && (HasVRegDef || MI.mayLoad())) { 890 SDep Dep(SU, SDep::Artificial); 891 Dep.setLatency(SU->Latency - 1); 892 ExitSU.addPred(Dep); 893 } 894 895 // Add memory dependencies (Note: isStoreToStackSlot and 896 // isLoadFromStackSLot are not usable after stack slots are lowered to 897 // actual addresses). 898 899 // This is a barrier event that acts as a pivotal node in the DAG. 900 if (isGlobalMemoryObject(&MI)) { 901 902 // Become the barrier chain. 903 if (BarrierChain) 904 BarrierChain->addPredBarrier(SU); 905 BarrierChain = SU; 906 907 LLVM_DEBUG(dbgs() << "Global memory object and new barrier chain: SU(" 908 << BarrierChain->NodeNum << ").\n";); 909 910 // Add dependencies against everything below it and clear maps. 911 addBarrierChain(Stores); 912 addBarrierChain(Loads); 913 addBarrierChain(NonAliasStores); 914 addBarrierChain(NonAliasLoads); 915 addBarrierChain(FPExceptions); 916 917 continue; 918 } 919 920 // Instructions that may raise FP exceptions may not be moved 921 // across any global barriers. 922 if (MI.mayRaiseFPException()) { 923 if (BarrierChain) 924 BarrierChain->addPredBarrier(SU); 925 926 FPExceptions.insert(SU, UnknownValue); 927 928 if (FPExceptions.size() >= HugeRegion) { 929 LLVM_DEBUG(dbgs() << "Reducing FPExceptions map.\n";); 930 Value2SUsMap empty; 931 reduceHugeMemNodeMaps(FPExceptions, empty, getReductionSize()); 932 } 933 } 934 935 // If it's not a store or a variant load, we're done. 936 if (!MI.mayStore() && 937 !(MI.mayLoad() && !MI.isDereferenceableInvariantLoad())) 938 continue; 939 940 // Always add dependecy edge to BarrierChain if present. 941 if (BarrierChain) 942 BarrierChain->addPredBarrier(SU); 943 944 // Find the underlying objects for MI. The Objs vector is either 945 // empty, or filled with the Values of memory locations which this 946 // SU depends on. 947 UnderlyingObjectsVector Objs; 948 bool ObjsFound = getUnderlyingObjectsForInstr(&MI, MFI, Objs, 949 MF.getDataLayout()); 950 951 if (MI.mayStore()) { 952 if (!ObjsFound) { 953 // An unknown store depends on all stores and loads. 954 addChainDependencies(SU, Stores); 955 addChainDependencies(SU, NonAliasStores); 956 addChainDependencies(SU, Loads); 957 addChainDependencies(SU, NonAliasLoads); 958 959 // Map this store to 'UnknownValue'. 960 Stores.insert(SU, UnknownValue); 961 } else { 962 // Add precise dependencies against all previously seen memory 963 // accesses mapped to the same Value(s). 964 for (const UnderlyingObject &UnderlObj : Objs) { 965 ValueType V = UnderlObj.getValue(); 966 bool ThisMayAlias = UnderlObj.mayAlias(); 967 968 // Add dependencies to previous stores and loads mapped to V. 969 addChainDependencies(SU, (ThisMayAlias ? Stores : NonAliasStores), V); 970 addChainDependencies(SU, (ThisMayAlias ? Loads : NonAliasLoads), V); 971 } 972 // Update the store map after all chains have been added to avoid adding 973 // self-loop edge if multiple underlying objects are present. 974 for (const UnderlyingObject &UnderlObj : Objs) { 975 ValueType V = UnderlObj.getValue(); 976 bool ThisMayAlias = UnderlObj.mayAlias(); 977 978 // Map this store to V. 979 (ThisMayAlias ? Stores : NonAliasStores).insert(SU, V); 980 } 981 // The store may have dependencies to unanalyzable loads and 982 // stores. 983 addChainDependencies(SU, Loads, UnknownValue); 984 addChainDependencies(SU, Stores, UnknownValue); 985 } 986 } else { // SU is a load. 987 if (!ObjsFound) { 988 // An unknown load depends on all stores. 989 addChainDependencies(SU, Stores); 990 addChainDependencies(SU, NonAliasStores); 991 992 Loads.insert(SU, UnknownValue); 993 } else { 994 for (const UnderlyingObject &UnderlObj : Objs) { 995 ValueType V = UnderlObj.getValue(); 996 bool ThisMayAlias = UnderlObj.mayAlias(); 997 998 // Add precise dependencies against all previously seen stores 999 // mapping to the same Value(s). 1000 addChainDependencies(SU, (ThisMayAlias ? Stores : NonAliasStores), V); 1001 1002 // Map this load to V. 1003 (ThisMayAlias ? Loads : NonAliasLoads).insert(SU, V); 1004 } 1005 // The load may have dependencies to unanalyzable stores. 1006 addChainDependencies(SU, Stores, UnknownValue); 1007 } 1008 } 1009 1010 // Reduce maps if they grow huge. 1011 if (Stores.size() + Loads.size() >= HugeRegion) { 1012 LLVM_DEBUG(dbgs() << "Reducing Stores and Loads maps.\n";); 1013 reduceHugeMemNodeMaps(Stores, Loads, getReductionSize()); 1014 } 1015 if (NonAliasStores.size() + NonAliasLoads.size() >= HugeRegion) { 1016 LLVM_DEBUG( 1017 dbgs() << "Reducing NonAliasStores and NonAliasLoads maps.\n";); 1018 reduceHugeMemNodeMaps(NonAliasStores, NonAliasLoads, getReductionSize()); 1019 } 1020 } 1021 1022 if (DbgMI) 1023 FirstDbgValue = DbgMI; 1024 1025 Defs.clear(); 1026 Uses.clear(); 1027 CurrentVRegDefs.clear(); 1028 CurrentVRegUses.clear(); 1029 1030 Topo.MarkDirty(); 1031 } 1032 1033 raw_ostream &llvm::operator<<(raw_ostream &OS, const PseudoSourceValue* PSV) { 1034 PSV->printCustom(OS); 1035 return OS; 1036 } 1037 1038 void ScheduleDAGInstrs::Value2SUsMap::dump() { 1039 for (const auto &[ValType, SUs] : *this) { 1040 if (isa<const Value *>(ValType)) { 1041 const Value *V = cast<const Value *>(ValType); 1042 if (isa<UndefValue>(V)) 1043 dbgs() << "Unknown"; 1044 else 1045 V->printAsOperand(dbgs()); 1046 } else if (isa<const PseudoSourceValue *>(ValType)) 1047 dbgs() << cast<const PseudoSourceValue *>(ValType); 1048 else 1049 llvm_unreachable("Unknown Value type."); 1050 1051 dbgs() << " : "; 1052 dumpSUList(SUs); 1053 } 1054 } 1055 1056 void ScheduleDAGInstrs::reduceHugeMemNodeMaps(Value2SUsMap &stores, 1057 Value2SUsMap &loads, unsigned N) { 1058 LLVM_DEBUG(dbgs() << "Before reduction:\nStoring SUnits:\n"; stores.dump(); 1059 dbgs() << "Loading SUnits:\n"; loads.dump()); 1060 1061 // Insert all SU's NodeNums into a vector and sort it. 1062 std::vector<unsigned> NodeNums; 1063 NodeNums.reserve(stores.size() + loads.size()); 1064 for (const auto &[V, SUs] : stores) { 1065 (void)V; 1066 for (const auto *SU : SUs) 1067 NodeNums.push_back(SU->NodeNum); 1068 } 1069 for (const auto &[V, SUs] : loads) { 1070 (void)V; 1071 for (const auto *SU : SUs) 1072 NodeNums.push_back(SU->NodeNum); 1073 } 1074 llvm::sort(NodeNums); 1075 1076 // The N last elements in NodeNums will be removed, and the SU with 1077 // the lowest NodeNum of them will become the new BarrierChain to 1078 // let the not yet seen SUs have a dependency to the removed SUs. 1079 assert(N <= NodeNums.size()); 1080 SUnit *newBarrierChain = &SUnits[*(NodeNums.end() - N)]; 1081 if (BarrierChain) { 1082 // The aliasing and non-aliasing maps reduce independently of each 1083 // other, but share a common BarrierChain. Check if the 1084 // newBarrierChain is above the former one. If it is not, it may 1085 // introduce a loop to use newBarrierChain, so keep the old one. 1086 if (newBarrierChain->NodeNum < BarrierChain->NodeNum) { 1087 BarrierChain->addPredBarrier(newBarrierChain); 1088 BarrierChain = newBarrierChain; 1089 LLVM_DEBUG(dbgs() << "Inserting new barrier chain: SU(" 1090 << BarrierChain->NodeNum << ").\n";); 1091 } 1092 else 1093 LLVM_DEBUG(dbgs() << "Keeping old barrier chain: SU(" 1094 << BarrierChain->NodeNum << ").\n";); 1095 } 1096 else 1097 BarrierChain = newBarrierChain; 1098 1099 insertBarrierChain(stores); 1100 insertBarrierChain(loads); 1101 1102 LLVM_DEBUG(dbgs() << "After reduction:\nStoring SUnits:\n"; stores.dump(); 1103 dbgs() << "Loading SUnits:\n"; loads.dump()); 1104 } 1105 1106 static void toggleKills(const MachineRegisterInfo &MRI, LivePhysRegs &LiveRegs, 1107 MachineInstr &MI, bool addToLiveRegs) { 1108 for (MachineOperand &MO : MI.operands()) { 1109 if (!MO.isReg() || !MO.readsReg()) 1110 continue; 1111 Register Reg = MO.getReg(); 1112 if (!Reg) 1113 continue; 1114 1115 // Things that are available after the instruction are killed by it. 1116 bool IsKill = LiveRegs.available(MRI, Reg); 1117 MO.setIsKill(IsKill); 1118 if (addToLiveRegs) 1119 LiveRegs.addReg(Reg); 1120 } 1121 } 1122 1123 void ScheduleDAGInstrs::fixupKills(MachineBasicBlock &MBB) { 1124 LLVM_DEBUG(dbgs() << "Fixup kills for " << printMBBReference(MBB) << '\n'); 1125 1126 LiveRegs.init(*TRI); 1127 LiveRegs.addLiveOuts(MBB); 1128 1129 // Examine block from end to start... 1130 for (MachineInstr &MI : llvm::reverse(MBB)) { 1131 if (MI.isDebugOrPseudoInstr()) 1132 continue; 1133 1134 // Update liveness. Registers that are defed but not used in this 1135 // instruction are now dead. Mark register and all subregs as they 1136 // are completely defined. 1137 for (ConstMIBundleOperands O(MI); O.isValid(); ++O) { 1138 const MachineOperand &MO = *O; 1139 if (MO.isReg()) { 1140 if (!MO.isDef()) 1141 continue; 1142 Register Reg = MO.getReg(); 1143 if (!Reg) 1144 continue; 1145 LiveRegs.removeReg(Reg); 1146 } else if (MO.isRegMask()) { 1147 LiveRegs.removeRegsInMask(MO); 1148 } 1149 } 1150 1151 // If there is a bundle header fix it up first. 1152 if (!MI.isBundled()) { 1153 toggleKills(MRI, LiveRegs, MI, true); 1154 } else { 1155 MachineBasicBlock::instr_iterator Bundle = MI.getIterator(); 1156 if (MI.isBundle()) 1157 toggleKills(MRI, LiveRegs, MI, false); 1158 1159 // Some targets make the (questionable) assumtion that the instructions 1160 // inside the bundle are ordered and consequently only the last use of 1161 // a register inside the bundle can kill it. 1162 MachineBasicBlock::instr_iterator I = std::next(Bundle); 1163 while (I->isBundledWithSucc()) 1164 ++I; 1165 do { 1166 if (!I->isDebugOrPseudoInstr()) 1167 toggleKills(MRI, LiveRegs, *I, true); 1168 --I; 1169 } while (I != Bundle); 1170 } 1171 } 1172 } 1173 1174 void ScheduleDAGInstrs::dumpNode(const SUnit &SU) const { 1175 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1176 dumpNodeName(SU); 1177 if (SchedPrintCycles) 1178 dbgs() << " [TopReadyCycle = " << SU.TopReadyCycle 1179 << ", BottomReadyCycle = " << SU.BotReadyCycle << "]"; 1180 dbgs() << ": "; 1181 SU.getInstr()->dump(); 1182 #endif 1183 } 1184 1185 void ScheduleDAGInstrs::dump() const { 1186 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1187 if (EntrySU.getInstr() != nullptr) 1188 dumpNodeAll(EntrySU); 1189 for (const SUnit &SU : SUnits) 1190 dumpNodeAll(SU); 1191 if (ExitSU.getInstr() != nullptr) 1192 dumpNodeAll(ExitSU); 1193 #endif 1194 } 1195 1196 std::string ScheduleDAGInstrs::getGraphNodeLabel(const SUnit *SU) const { 1197 std::string s; 1198 raw_string_ostream oss(s); 1199 if (SU == &EntrySU) 1200 oss << "<entry>"; 1201 else if (SU == &ExitSU) 1202 oss << "<exit>"; 1203 else 1204 SU->getInstr()->print(oss, /*IsStandalone=*/true); 1205 return oss.str(); 1206 } 1207 1208 /// Return the basic block label. It is not necessarilly unique because a block 1209 /// contains multiple scheduling regions. But it is fine for visualization. 1210 std::string ScheduleDAGInstrs::getDAGName() const { 1211 return "dag." + BB->getFullName(); 1212 } 1213 1214 bool ScheduleDAGInstrs::canAddEdge(SUnit *SuccSU, SUnit *PredSU) { 1215 return SuccSU == &ExitSU || !Topo.IsReachable(PredSU, SuccSU); 1216 } 1217 1218 bool ScheduleDAGInstrs::addEdge(SUnit *SuccSU, const SDep &PredDep) { 1219 if (SuccSU != &ExitSU) { 1220 // Do not use WillCreateCycle, it assumes SD scheduling. 1221 // If Pred is reachable from Succ, then the edge creates a cycle. 1222 if (Topo.IsReachable(PredDep.getSUnit(), SuccSU)) 1223 return false; 1224 Topo.AddPredQueued(SuccSU, PredDep.getSUnit()); 1225 } 1226 SuccSU->addPred(PredDep, /*Required=*/!PredDep.isArtificial()); 1227 // Return true regardless of whether a new edge needed to be inserted. 1228 return true; 1229 } 1230 1231 //===----------------------------------------------------------------------===// 1232 // SchedDFSResult Implementation 1233 //===----------------------------------------------------------------------===// 1234 1235 namespace llvm { 1236 1237 /// Internal state used to compute SchedDFSResult. 1238 class SchedDFSImpl { 1239 SchedDFSResult &R; 1240 1241 /// Join DAG nodes into equivalence classes by their subtree. 1242 IntEqClasses SubtreeClasses; 1243 /// List PredSU, SuccSU pairs that represent data edges between subtrees. 1244 std::vector<std::pair<const SUnit *, const SUnit*>> ConnectionPairs; 1245 1246 struct RootData { 1247 unsigned NodeID; 1248 unsigned ParentNodeID; ///< Parent node (member of the parent subtree). 1249 unsigned SubInstrCount = 0; ///< Instr count in this tree only, not 1250 /// children. 1251 1252 RootData(unsigned id): NodeID(id), 1253 ParentNodeID(SchedDFSResult::InvalidSubtreeID) {} 1254 1255 unsigned getSparseSetIndex() const { return NodeID; } 1256 }; 1257 1258 SparseSet<RootData> RootSet; 1259 1260 public: 1261 SchedDFSImpl(SchedDFSResult &r): R(r), SubtreeClasses(R.DFSNodeData.size()) { 1262 RootSet.setUniverse(R.DFSNodeData.size()); 1263 } 1264 1265 /// Returns true if this node been visited by the DFS traversal. 1266 /// 1267 /// During visitPostorderNode the Node's SubtreeID is assigned to the Node 1268 /// ID. Later, SubtreeID is updated but remains valid. 1269 bool isVisited(const SUnit *SU) const { 1270 return R.DFSNodeData[SU->NodeNum].SubtreeID 1271 != SchedDFSResult::InvalidSubtreeID; 1272 } 1273 1274 /// Initializes this node's instruction count. We don't need to flag the node 1275 /// visited until visitPostorder because the DAG cannot have cycles. 1276 void visitPreorder(const SUnit *SU) { 1277 R.DFSNodeData[SU->NodeNum].InstrCount = 1278 SU->getInstr()->isTransient() ? 0 : 1; 1279 } 1280 1281 /// Called once for each node after all predecessors are visited. Revisit this 1282 /// node's predecessors and potentially join them now that we know the ILP of 1283 /// the other predecessors. 1284 void visitPostorderNode(const SUnit *SU) { 1285 // Mark this node as the root of a subtree. It may be joined with its 1286 // successors later. 1287 R.DFSNodeData[SU->NodeNum].SubtreeID = SU->NodeNum; 1288 RootData RData(SU->NodeNum); 1289 RData.SubInstrCount = SU->getInstr()->isTransient() ? 0 : 1; 1290 1291 // If any predecessors are still in their own subtree, they either cannot be 1292 // joined or are large enough to remain separate. If this parent node's 1293 // total instruction count is not greater than a child subtree by at least 1294 // the subtree limit, then try to join it now since splitting subtrees is 1295 // only useful if multiple high-pressure paths are possible. 1296 unsigned InstrCount = R.DFSNodeData[SU->NodeNum].InstrCount; 1297 for (const SDep &PredDep : SU->Preds) { 1298 if (PredDep.getKind() != SDep::Data) 1299 continue; 1300 unsigned PredNum = PredDep.getSUnit()->NodeNum; 1301 if ((InstrCount - R.DFSNodeData[PredNum].InstrCount) < R.SubtreeLimit) 1302 joinPredSubtree(PredDep, SU, /*CheckLimit=*/false); 1303 1304 // Either link or merge the TreeData entry from the child to the parent. 1305 if (R.DFSNodeData[PredNum].SubtreeID == PredNum) { 1306 // If the predecessor's parent is invalid, this is a tree edge and the 1307 // current node is the parent. 1308 if (RootSet[PredNum].ParentNodeID == SchedDFSResult::InvalidSubtreeID) 1309 RootSet[PredNum].ParentNodeID = SU->NodeNum; 1310 } 1311 else if (RootSet.count(PredNum)) { 1312 // The predecessor is not a root, but is still in the root set. This 1313 // must be the new parent that it was just joined to. Note that 1314 // RootSet[PredNum].ParentNodeID may either be invalid or may still be 1315 // set to the original parent. 1316 RData.SubInstrCount += RootSet[PredNum].SubInstrCount; 1317 RootSet.erase(PredNum); 1318 } 1319 } 1320 RootSet[SU->NodeNum] = RData; 1321 } 1322 1323 /// Called once for each tree edge after calling visitPostOrderNode on 1324 /// the predecessor. Increment the parent node's instruction count and 1325 /// preemptively join this subtree to its parent's if it is small enough. 1326 void visitPostorderEdge(const SDep &PredDep, const SUnit *Succ) { 1327 R.DFSNodeData[Succ->NodeNum].InstrCount 1328 += R.DFSNodeData[PredDep.getSUnit()->NodeNum].InstrCount; 1329 joinPredSubtree(PredDep, Succ); 1330 } 1331 1332 /// Adds a connection for cross edges. 1333 void visitCrossEdge(const SDep &PredDep, const SUnit *Succ) { 1334 ConnectionPairs.emplace_back(PredDep.getSUnit(), Succ); 1335 } 1336 1337 /// Sets each node's subtree ID to the representative ID and record 1338 /// connections between trees. 1339 void finalize() { 1340 SubtreeClasses.compress(); 1341 R.DFSTreeData.resize(SubtreeClasses.getNumClasses()); 1342 assert(SubtreeClasses.getNumClasses() == RootSet.size() 1343 && "number of roots should match trees"); 1344 for (const RootData &Root : RootSet) { 1345 unsigned TreeID = SubtreeClasses[Root.NodeID]; 1346 if (Root.ParentNodeID != SchedDFSResult::InvalidSubtreeID) 1347 R.DFSTreeData[TreeID].ParentTreeID = SubtreeClasses[Root.ParentNodeID]; 1348 R.DFSTreeData[TreeID].SubInstrCount = Root.SubInstrCount; 1349 // Note that SubInstrCount may be greater than InstrCount if we joined 1350 // subtrees across a cross edge. InstrCount will be attributed to the 1351 // original parent, while SubInstrCount will be attributed to the joined 1352 // parent. 1353 } 1354 R.SubtreeConnections.resize(SubtreeClasses.getNumClasses()); 1355 R.SubtreeConnectLevels.resize(SubtreeClasses.getNumClasses()); 1356 LLVM_DEBUG(dbgs() << R.getNumSubtrees() << " subtrees:\n"); 1357 for (unsigned Idx = 0, End = R.DFSNodeData.size(); Idx != End; ++Idx) { 1358 R.DFSNodeData[Idx].SubtreeID = SubtreeClasses[Idx]; 1359 LLVM_DEBUG(dbgs() << " SU(" << Idx << ") in tree " 1360 << R.DFSNodeData[Idx].SubtreeID << '\n'); 1361 } 1362 for (const auto &[Pred, Succ] : ConnectionPairs) { 1363 unsigned PredTree = SubtreeClasses[Pred->NodeNum]; 1364 unsigned SuccTree = SubtreeClasses[Succ->NodeNum]; 1365 if (PredTree == SuccTree) 1366 continue; 1367 unsigned Depth = Pred->getDepth(); 1368 addConnection(PredTree, SuccTree, Depth); 1369 addConnection(SuccTree, PredTree, Depth); 1370 } 1371 } 1372 1373 protected: 1374 /// Joins the predecessor subtree with the successor that is its DFS parent. 1375 /// Applies some heuristics before joining. 1376 bool joinPredSubtree(const SDep &PredDep, const SUnit *Succ, 1377 bool CheckLimit = true) { 1378 assert(PredDep.getKind() == SDep::Data && "Subtrees are for data edges"); 1379 1380 // Check if the predecessor is already joined. 1381 const SUnit *PredSU = PredDep.getSUnit(); 1382 unsigned PredNum = PredSU->NodeNum; 1383 if (R.DFSNodeData[PredNum].SubtreeID != PredNum) 1384 return false; 1385 1386 // Four is the magic number of successors before a node is considered a 1387 // pinch point. 1388 unsigned NumDataSucs = 0; 1389 for (const SDep &SuccDep : PredSU->Succs) { 1390 if (SuccDep.getKind() == SDep::Data) { 1391 if (++NumDataSucs >= 4) 1392 return false; 1393 } 1394 } 1395 if (CheckLimit && R.DFSNodeData[PredNum].InstrCount > R.SubtreeLimit) 1396 return false; 1397 R.DFSNodeData[PredNum].SubtreeID = Succ->NodeNum; 1398 SubtreeClasses.join(Succ->NodeNum, PredNum); 1399 return true; 1400 } 1401 1402 /// Called by finalize() to record a connection between trees. 1403 void addConnection(unsigned FromTree, unsigned ToTree, unsigned Depth) { 1404 if (!Depth) 1405 return; 1406 1407 do { 1408 SmallVectorImpl<SchedDFSResult::Connection> &Connections = 1409 R.SubtreeConnections[FromTree]; 1410 for (SchedDFSResult::Connection &C : Connections) { 1411 if (C.TreeID == ToTree) { 1412 C.Level = std::max(C.Level, Depth); 1413 return; 1414 } 1415 } 1416 Connections.push_back(SchedDFSResult::Connection(ToTree, Depth)); 1417 FromTree = R.DFSTreeData[FromTree].ParentTreeID; 1418 } while (FromTree != SchedDFSResult::InvalidSubtreeID); 1419 } 1420 }; 1421 1422 } // end namespace llvm 1423 1424 namespace { 1425 1426 /// Manage the stack used by a reverse depth-first search over the DAG. 1427 class SchedDAGReverseDFS { 1428 std::vector<std::pair<const SUnit *, SUnit::const_pred_iterator>> DFSStack; 1429 1430 public: 1431 bool isComplete() const { return DFSStack.empty(); } 1432 1433 void follow(const SUnit *SU) { 1434 DFSStack.emplace_back(SU, SU->Preds.begin()); 1435 } 1436 void advance() { ++DFSStack.back().second; } 1437 1438 const SDep *backtrack() { 1439 DFSStack.pop_back(); 1440 return DFSStack.empty() ? nullptr : std::prev(DFSStack.back().second); 1441 } 1442 1443 const SUnit *getCurr() const { return DFSStack.back().first; } 1444 1445 SUnit::const_pred_iterator getPred() const { return DFSStack.back().second; } 1446 1447 SUnit::const_pred_iterator getPredEnd() const { 1448 return getCurr()->Preds.end(); 1449 } 1450 }; 1451 1452 } // end anonymous namespace 1453 1454 static bool hasDataSucc(const SUnit *SU) { 1455 for (const SDep &SuccDep : SU->Succs) { 1456 if (SuccDep.getKind() == SDep::Data && 1457 !SuccDep.getSUnit()->isBoundaryNode()) 1458 return true; 1459 } 1460 return false; 1461 } 1462 1463 /// Computes an ILP metric for all nodes in the subDAG reachable via depth-first 1464 /// search from this root. 1465 void SchedDFSResult::compute(ArrayRef<SUnit> SUnits) { 1466 if (!IsBottomUp) 1467 llvm_unreachable("Top-down ILP metric is unimplemented"); 1468 1469 SchedDFSImpl Impl(*this); 1470 for (const SUnit &SU : SUnits) { 1471 if (Impl.isVisited(&SU) || hasDataSucc(&SU)) 1472 continue; 1473 1474 SchedDAGReverseDFS DFS; 1475 Impl.visitPreorder(&SU); 1476 DFS.follow(&SU); 1477 while (true) { 1478 // Traverse the leftmost path as far as possible. 1479 while (DFS.getPred() != DFS.getPredEnd()) { 1480 const SDep &PredDep = *DFS.getPred(); 1481 DFS.advance(); 1482 // Ignore non-data edges. 1483 if (PredDep.getKind() != SDep::Data 1484 || PredDep.getSUnit()->isBoundaryNode()) { 1485 continue; 1486 } 1487 // An already visited edge is a cross edge, assuming an acyclic DAG. 1488 if (Impl.isVisited(PredDep.getSUnit())) { 1489 Impl.visitCrossEdge(PredDep, DFS.getCurr()); 1490 continue; 1491 } 1492 Impl.visitPreorder(PredDep.getSUnit()); 1493 DFS.follow(PredDep.getSUnit()); 1494 } 1495 // Visit the top of the stack in postorder and backtrack. 1496 const SUnit *Child = DFS.getCurr(); 1497 const SDep *PredDep = DFS.backtrack(); 1498 Impl.visitPostorderNode(Child); 1499 if (PredDep) 1500 Impl.visitPostorderEdge(*PredDep, DFS.getCurr()); 1501 if (DFS.isComplete()) 1502 break; 1503 } 1504 } 1505 Impl.finalize(); 1506 } 1507 1508 /// The root of the given SubtreeID was just scheduled. For all subtrees 1509 /// connected to this tree, record the depth of the connection so that the 1510 /// nearest connected subtrees can be prioritized. 1511 void SchedDFSResult::scheduleTree(unsigned SubtreeID) { 1512 for (const Connection &C : SubtreeConnections[SubtreeID]) { 1513 SubtreeConnectLevels[C.TreeID] = 1514 std::max(SubtreeConnectLevels[C.TreeID], C.Level); 1515 LLVM_DEBUG(dbgs() << " Tree: " << C.TreeID << " @" 1516 << SubtreeConnectLevels[C.TreeID] << '\n'); 1517 } 1518 } 1519 1520 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1521 LLVM_DUMP_METHOD void ILPValue::print(raw_ostream &OS) const { 1522 OS << InstrCount << " / " << Length << " = "; 1523 if (!Length) 1524 OS << "BADILP"; 1525 else 1526 OS << format("%g", ((double)InstrCount / Length)); 1527 } 1528 1529 LLVM_DUMP_METHOD void ILPValue::dump() const { 1530 dbgs() << *this << '\n'; 1531 } 1532 1533 namespace llvm { 1534 1535 LLVM_ATTRIBUTE_UNUSED 1536 raw_ostream &operator<<(raw_ostream &OS, const ILPValue &Val) { 1537 Val.print(OS); 1538 return OS; 1539 } 1540 1541 } // end namespace llvm 1542 1543 #endif 1544