1 //===--------------------- Instruction.cpp ----------------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines abstractions used by the Pipeline to model register reads, 10 // register writes and instructions. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/MCA/Instruction.h" 15 #include "llvm/Support/Debug.h" 16 #include "llvm/Support/raw_ostream.h" 17 18 namespace llvm { 19 namespace mca { 20 21 void WriteState::writeStartEvent(unsigned IID, MCPhysReg RegID, 22 unsigned Cycles) { 23 CRD.IID = IID; 24 CRD.RegID = RegID; 25 CRD.Cycles = Cycles; 26 DependentWriteCyclesLeft = Cycles; 27 DependentWrite = nullptr; 28 } 29 30 void ReadState::writeStartEvent(unsigned IID, MCPhysReg RegID, 31 unsigned Cycles) { 32 assert(DependentWrites); 33 assert(CyclesLeft == UNKNOWN_CYCLES); 34 35 // This read may be dependent on more than one write. This typically occurs 36 // when a definition is the result of multiple writes where at least one 37 // write does a partial register update. 38 // The HW is forced to do some extra bookkeeping to track of all the 39 // dependent writes, and implement a merging scheme for the partial writes. 40 --DependentWrites; 41 if (TotalCycles < Cycles) { 42 CRD.IID = IID; 43 CRD.RegID = RegID; 44 CRD.Cycles = Cycles; 45 TotalCycles = Cycles; 46 } 47 48 if (!DependentWrites) { 49 CyclesLeft = TotalCycles; 50 IsReady = !CyclesLeft; 51 } 52 } 53 54 void WriteState::onInstructionIssued(unsigned IID) { 55 assert(CyclesLeft == UNKNOWN_CYCLES); 56 // Update the number of cycles left based on the WriteDescriptor info. 57 CyclesLeft = getLatency(); 58 59 // Now that the time left before write-back is known, notify 60 // all the users. 61 for (const std::pair<ReadState *, int> &User : Users) { 62 ReadState *RS = User.first; 63 unsigned ReadCycles = std::max(0, CyclesLeft - User.second); 64 RS->writeStartEvent(IID, RegisterID, ReadCycles); 65 } 66 67 // Notify any writes that are in a false dependency with this write. 68 if (PartialWrite) 69 PartialWrite->writeStartEvent(IID, RegisterID, CyclesLeft); 70 } 71 72 void WriteState::addUser(unsigned IID, ReadState *User, int ReadAdvance) { 73 // If CyclesLeft is different than -1, then we don't need to 74 // update the list of users. We can just notify the user with 75 // the actual number of cycles left (which may be zero). 76 if (CyclesLeft != UNKNOWN_CYCLES) { 77 unsigned ReadCycles = std::max(0, CyclesLeft - ReadAdvance); 78 User->writeStartEvent(IID, RegisterID, ReadCycles); 79 return; 80 } 81 82 Users.emplace_back(User, ReadAdvance); 83 } 84 85 void WriteState::addUser(unsigned IID, WriteState *User) { 86 if (CyclesLeft != UNKNOWN_CYCLES) { 87 User->writeStartEvent(IID, RegisterID, std::max(0, CyclesLeft)); 88 return; 89 } 90 91 assert(!PartialWrite && "PartialWrite already set!"); 92 PartialWrite = User; 93 User->setDependentWrite(this); 94 } 95 96 void WriteState::cycleEvent() { 97 // Note: CyclesLeft can be a negative number. It is an error to 98 // make it an unsigned quantity because users of this write may 99 // specify a negative ReadAdvance. 100 if (CyclesLeft != UNKNOWN_CYCLES) 101 CyclesLeft--; 102 103 if (DependentWriteCyclesLeft) 104 DependentWriteCyclesLeft--; 105 } 106 107 void ReadState::cycleEvent() { 108 // Update the total number of cycles. 109 if (DependentWrites && TotalCycles) { 110 --TotalCycles; 111 return; 112 } 113 114 // Bail out immediately if we don't know how many cycles are left. 115 if (CyclesLeft == UNKNOWN_CYCLES) 116 return; 117 118 if (CyclesLeft) { 119 --CyclesLeft; 120 IsReady = !CyclesLeft; 121 } 122 } 123 124 #ifndef NDEBUG 125 void WriteState::dump() const { 126 dbgs() << "{ OpIdx=" << WD->OpIndex << ", Lat=" << getLatency() << ", RegID " 127 << getRegisterID() << ", Cycles Left=" << getCyclesLeft() << " }"; 128 } 129 #endif 130 131 const CriticalDependency &Instruction::computeCriticalRegDep() { 132 if (CriticalRegDep.Cycles) 133 return CriticalRegDep; 134 135 unsigned MaxLatency = 0; 136 for (const WriteState &WS : getDefs()) { 137 const CriticalDependency &WriteCRD = WS.getCriticalRegDep(); 138 if (WriteCRD.Cycles > MaxLatency) 139 CriticalRegDep = WriteCRD; 140 } 141 142 for (const ReadState &RS : getUses()) { 143 const CriticalDependency &ReadCRD = RS.getCriticalRegDep(); 144 if (ReadCRD.Cycles > MaxLatency) 145 CriticalRegDep = ReadCRD; 146 } 147 148 return CriticalRegDep; 149 } 150 151 void Instruction::dispatch(unsigned RCUToken) { 152 assert(Stage == IS_INVALID); 153 Stage = IS_DISPATCHED; 154 RCUTokenID = RCUToken; 155 156 // Check if input operands are already available. 157 if (updateDispatched()) 158 updatePending(); 159 } 160 161 void Instruction::execute(unsigned IID) { 162 assert(Stage == IS_READY); 163 Stage = IS_EXECUTING; 164 165 // Set the cycles left before the write-back stage. 166 CyclesLeft = getLatency(); 167 168 for (WriteState &WS : getDefs()) 169 WS.onInstructionIssued(IID); 170 171 // Transition to the "executed" stage if this is a zero-latency instruction. 172 if (!CyclesLeft) 173 Stage = IS_EXECUTED; 174 } 175 176 void Instruction::forceExecuted() { 177 assert(Stage == IS_READY && "Invalid internal state!"); 178 CyclesLeft = 0; 179 Stage = IS_EXECUTED; 180 } 181 182 bool Instruction::updatePending() { 183 assert(isPending() && "Unexpected instruction stage found!"); 184 185 if (!all_of(getUses(), [](const ReadState &Use) { return Use.isReady(); })) 186 return false; 187 188 // A partial register write cannot complete before a dependent write. 189 if (!all_of(getDefs(), [](const WriteState &Def) { return Def.isReady(); })) 190 return false; 191 192 Stage = IS_READY; 193 return true; 194 } 195 196 bool Instruction::updateDispatched() { 197 assert(isDispatched() && "Unexpected instruction stage found!"); 198 199 if (!all_of(getUses(), [](const ReadState &Use) { 200 return Use.isPending() || Use.isReady(); 201 })) 202 return false; 203 204 // A partial register write cannot complete before a dependent write. 205 if (!all_of(getDefs(), 206 [](const WriteState &Def) { return !Def.getDependentWrite(); })) 207 return false; 208 209 Stage = IS_PENDING; 210 return true; 211 } 212 213 void Instruction::update() { 214 if (isDispatched()) 215 updateDispatched(); 216 if (isPending()) 217 updatePending(); 218 } 219 220 void Instruction::cycleEvent() { 221 if (isReady()) 222 return; 223 224 if (isDispatched() || isPending()) { 225 for (ReadState &Use : getUses()) 226 Use.cycleEvent(); 227 228 for (WriteState &Def : getDefs()) 229 Def.cycleEvent(); 230 231 update(); 232 return; 233 } 234 235 assert(isExecuting() && "Instruction not in-flight?"); 236 assert(CyclesLeft && "Instruction already executed?"); 237 for (WriteState &Def : getDefs()) 238 Def.cycleEvent(); 239 CyclesLeft--; 240 if (!CyclesLeft) 241 Stage = IS_EXECUTED; 242 } 243 244 } // namespace mca 245 } // namespace llvm 246