1 //===- SIInsertWaitcnts.cpp - Insert Wait Instructions --------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// Insert wait instructions for memory reads and writes. 11 /// 12 /// Memory reads and writes are issued asynchronously, so we need to insert 13 /// S_WAITCNT instructions when we want to access any of their results or 14 /// overwrite any register that's used asynchronously. 15 /// 16 /// TODO: This pass currently keeps one timeline per hardware counter. A more 17 /// finely-grained approach that keeps one timeline per event type could 18 /// sometimes get away with generating weaker s_waitcnt instructions. For 19 /// example, when both SMEM and LDS are in flight and we need to wait for 20 /// the i-th-last LDS instruction, then an lgkmcnt(i) is actually sufficient, 21 /// but the pass will currently generate a conservative lgkmcnt(0) because 22 /// multiple event types are in flight. 23 // 24 //===----------------------------------------------------------------------===// 25 26 #include "AMDGPU.h" 27 #include "GCNSubtarget.h" 28 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 29 #include "SIMachineFunctionInfo.h" 30 #include "llvm/ADT/MapVector.h" 31 #include "llvm/ADT/PostOrderIterator.h" 32 #include "llvm/CodeGen/MachinePostDominators.h" 33 #include "llvm/InitializePasses.h" 34 #include "llvm/Support/DebugCounter.h" 35 #include "llvm/Support/TargetParser.h" 36 using namespace llvm; 37 38 #define DEBUG_TYPE "si-insert-waitcnts" 39 40 DEBUG_COUNTER(ForceExpCounter, DEBUG_TYPE"-forceexp", 41 "Force emit s_waitcnt expcnt(0) instrs"); 42 DEBUG_COUNTER(ForceLgkmCounter, DEBUG_TYPE"-forcelgkm", 43 "Force emit s_waitcnt lgkmcnt(0) instrs"); 44 DEBUG_COUNTER(ForceVMCounter, DEBUG_TYPE"-forcevm", 45 "Force emit s_waitcnt vmcnt(0) instrs"); 46 47 static cl::opt<bool> ForceEmitZeroFlag( 48 "amdgpu-waitcnt-forcezero", 49 cl::desc("Force all waitcnt instrs to be emitted as s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)"), 50 cl::init(false), cl::Hidden); 51 52 namespace { 53 54 template <typename EnumT> 55 class enum_iterator 56 : public iterator_facade_base<enum_iterator<EnumT>, 57 std::forward_iterator_tag, const EnumT> { 58 EnumT Value; 59 public: 60 enum_iterator() = default; 61 enum_iterator(EnumT Value) : Value(Value) {} 62 63 enum_iterator &operator++() { 64 Value = static_cast<EnumT>(Value + 1); 65 return *this; 66 } 67 68 bool operator==(const enum_iterator &RHS) const { return Value == RHS.Value; } 69 70 EnumT operator*() const { return Value; } 71 }; 72 73 // Class of object that encapsulates latest instruction counter score 74 // associated with the operand. Used for determining whether 75 // s_waitcnt instruction needs to be emited. 76 77 #define CNT_MASK(t) (1u << (t)) 78 79 enum InstCounterType { VM_CNT = 0, LGKM_CNT, EXP_CNT, VS_CNT, NUM_INST_CNTS }; 80 81 iterator_range<enum_iterator<InstCounterType>> inst_counter_types() { 82 return make_range(enum_iterator<InstCounterType>(VM_CNT), 83 enum_iterator<InstCounterType>(NUM_INST_CNTS)); 84 } 85 86 using RegInterval = std::pair<int, int>; 87 88 struct { 89 unsigned VmcntMax; 90 unsigned ExpcntMax; 91 unsigned LgkmcntMax; 92 unsigned VscntMax; 93 } HardwareLimits; 94 95 struct { 96 unsigned VGPR0; 97 unsigned VGPRL; 98 unsigned SGPR0; 99 unsigned SGPRL; 100 } RegisterEncoding; 101 102 enum WaitEventType { 103 VMEM_ACCESS, // vector-memory read & write 104 VMEM_READ_ACCESS, // vector-memory read 105 VMEM_WRITE_ACCESS,// vector-memory write 106 LDS_ACCESS, // lds read & write 107 GDS_ACCESS, // gds read & write 108 SQ_MESSAGE, // send message 109 SMEM_ACCESS, // scalar-memory read & write 110 EXP_GPR_LOCK, // export holding on its data src 111 GDS_GPR_LOCK, // GDS holding on its data and addr src 112 EXP_POS_ACCESS, // write to export position 113 EXP_PARAM_ACCESS, // write to export parameter 114 VMW_GPR_LOCK, // vector-memory write holding on its data src 115 NUM_WAIT_EVENTS, 116 }; 117 118 static const unsigned WaitEventMaskForInst[NUM_INST_CNTS] = { 119 (1 << VMEM_ACCESS) | (1 << VMEM_READ_ACCESS), 120 (1 << SMEM_ACCESS) | (1 << LDS_ACCESS) | (1 << GDS_ACCESS) | 121 (1 << SQ_MESSAGE), 122 (1 << EXP_GPR_LOCK) | (1 << GDS_GPR_LOCK) | (1 << VMW_GPR_LOCK) | 123 (1 << EXP_PARAM_ACCESS) | (1 << EXP_POS_ACCESS), 124 (1 << VMEM_WRITE_ACCESS) 125 }; 126 127 // The mapping is: 128 // 0 .. SQ_MAX_PGM_VGPRS-1 real VGPRs 129 // SQ_MAX_PGM_VGPRS .. NUM_ALL_VGPRS-1 extra VGPR-like slots 130 // NUM_ALL_VGPRS .. NUM_ALL_VGPRS+SQ_MAX_PGM_SGPRS-1 real SGPRs 131 // We reserve a fixed number of VGPR slots in the scoring tables for 132 // special tokens like SCMEM_LDS (needed for buffer load to LDS). 133 enum RegisterMapping { 134 SQ_MAX_PGM_VGPRS = 256, // Maximum programmable VGPRs across all targets. 135 SQ_MAX_PGM_SGPRS = 256, // Maximum programmable SGPRs across all targets. 136 NUM_EXTRA_VGPRS = 1, // A reserved slot for DS. 137 EXTRA_VGPR_LDS = 0, // This is a placeholder the Shader algorithm uses. 138 NUM_ALL_VGPRS = SQ_MAX_PGM_VGPRS + NUM_EXTRA_VGPRS, // Where SGPR starts. 139 }; 140 141 // Enumerate different types of result-returning VMEM operations. Although 142 // s_waitcnt orders them all with a single vmcnt counter, in the absence of 143 // s_waitcnt only instructions of the same VmemType are guaranteed to write 144 // their results in order -- so there is no need to insert an s_waitcnt between 145 // two instructions of the same type that write the same vgpr. 146 enum VmemType { 147 // BUF instructions and MIMG instructions without a sampler. 148 VMEM_NOSAMPLER, 149 // MIMG instructions with a sampler. 150 VMEM_SAMPLER, 151 }; 152 153 VmemType getVmemType(const MachineInstr &Inst) { 154 assert(SIInstrInfo::isVMEM(Inst)); 155 if (!SIInstrInfo::isMIMG(Inst)) 156 return VMEM_NOSAMPLER; 157 const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(Inst.getOpcode()); 158 return AMDGPU::getMIMGBaseOpcodeInfo(Info->BaseOpcode)->Sampler 159 ? VMEM_SAMPLER 160 : VMEM_NOSAMPLER; 161 } 162 163 void addWait(AMDGPU::Waitcnt &Wait, InstCounterType T, unsigned Count) { 164 switch (T) { 165 case VM_CNT: 166 Wait.VmCnt = std::min(Wait.VmCnt, Count); 167 break; 168 case EXP_CNT: 169 Wait.ExpCnt = std::min(Wait.ExpCnt, Count); 170 break; 171 case LGKM_CNT: 172 Wait.LgkmCnt = std::min(Wait.LgkmCnt, Count); 173 break; 174 case VS_CNT: 175 Wait.VsCnt = std::min(Wait.VsCnt, Count); 176 break; 177 default: 178 llvm_unreachable("bad InstCounterType"); 179 } 180 } 181 182 // This objects maintains the current score brackets of each wait counter, and 183 // a per-register scoreboard for each wait counter. 184 // 185 // We also maintain the latest score for every event type that can change the 186 // waitcnt in order to know if there are multiple types of events within 187 // the brackets. When multiple types of event happen in the bracket, 188 // wait count may get decreased out of order, therefore we need to put in 189 // "s_waitcnt 0" before use. 190 class WaitcntBrackets { 191 public: 192 WaitcntBrackets(const GCNSubtarget *SubTarget) : ST(SubTarget) {} 193 194 static unsigned getWaitCountMax(InstCounterType T) { 195 switch (T) { 196 case VM_CNT: 197 return HardwareLimits.VmcntMax; 198 case LGKM_CNT: 199 return HardwareLimits.LgkmcntMax; 200 case EXP_CNT: 201 return HardwareLimits.ExpcntMax; 202 case VS_CNT: 203 return HardwareLimits.VscntMax; 204 default: 205 break; 206 } 207 return 0; 208 } 209 210 unsigned getScoreLB(InstCounterType T) const { 211 assert(T < NUM_INST_CNTS); 212 return ScoreLBs[T]; 213 } 214 215 unsigned getScoreUB(InstCounterType T) const { 216 assert(T < NUM_INST_CNTS); 217 return ScoreUBs[T]; 218 } 219 220 // Mapping from event to counter. 221 InstCounterType eventCounter(WaitEventType E) { 222 if (WaitEventMaskForInst[VM_CNT] & (1 << E)) 223 return VM_CNT; 224 if (WaitEventMaskForInst[LGKM_CNT] & (1 << E)) 225 return LGKM_CNT; 226 if (WaitEventMaskForInst[VS_CNT] & (1 << E)) 227 return VS_CNT; 228 assert(WaitEventMaskForInst[EXP_CNT] & (1 << E)); 229 return EXP_CNT; 230 } 231 232 unsigned getRegScore(int GprNo, InstCounterType T) { 233 if (GprNo < NUM_ALL_VGPRS) { 234 return VgprScores[T][GprNo]; 235 } 236 assert(T == LGKM_CNT); 237 return SgprScores[GprNo - NUM_ALL_VGPRS]; 238 } 239 240 bool merge(const WaitcntBrackets &Other); 241 242 RegInterval getRegInterval(const MachineInstr *MI, const SIInstrInfo *TII, 243 const MachineRegisterInfo *MRI, 244 const SIRegisterInfo *TRI, unsigned OpNo) const; 245 246 bool counterOutOfOrder(InstCounterType T) const; 247 bool simplifyWaitcnt(AMDGPU::Waitcnt &Wait) const; 248 bool simplifyWaitcnt(InstCounterType T, unsigned &Count) const; 249 void determineWait(InstCounterType T, unsigned ScoreToWait, 250 AMDGPU::Waitcnt &Wait) const; 251 void applyWaitcnt(const AMDGPU::Waitcnt &Wait); 252 void applyWaitcnt(InstCounterType T, unsigned Count); 253 void updateByEvent(const SIInstrInfo *TII, const SIRegisterInfo *TRI, 254 const MachineRegisterInfo *MRI, WaitEventType E, 255 MachineInstr &MI); 256 257 bool hasPending() const { return PendingEvents != 0; } 258 bool hasPendingEvent(WaitEventType E) const { 259 return PendingEvents & (1 << E); 260 } 261 262 bool hasMixedPendingEvents(InstCounterType T) const { 263 unsigned Events = PendingEvents & WaitEventMaskForInst[T]; 264 // Return true if more than one bit is set in Events. 265 return Events & (Events - 1); 266 } 267 268 bool hasPendingFlat() const { 269 return ((LastFlat[LGKM_CNT] > ScoreLBs[LGKM_CNT] && 270 LastFlat[LGKM_CNT] <= ScoreUBs[LGKM_CNT]) || 271 (LastFlat[VM_CNT] > ScoreLBs[VM_CNT] && 272 LastFlat[VM_CNT] <= ScoreUBs[VM_CNT])); 273 } 274 275 void setPendingFlat() { 276 LastFlat[VM_CNT] = ScoreUBs[VM_CNT]; 277 LastFlat[LGKM_CNT] = ScoreUBs[LGKM_CNT]; 278 } 279 280 // Return true if there might be pending writes to the specified vgpr by VMEM 281 // instructions with types different from V. 282 bool hasOtherPendingVmemTypes(int GprNo, VmemType V) const { 283 assert(GprNo < NUM_ALL_VGPRS); 284 return VgprVmemTypes[GprNo] & ~(1 << V); 285 } 286 287 void clearVgprVmemTypes(int GprNo) { 288 assert(GprNo < NUM_ALL_VGPRS); 289 VgprVmemTypes[GprNo] = 0; 290 } 291 292 void print(raw_ostream &); 293 void dump() { print(dbgs()); } 294 295 private: 296 struct MergeInfo { 297 unsigned OldLB; 298 unsigned OtherLB; 299 unsigned MyShift; 300 unsigned OtherShift; 301 }; 302 static bool mergeScore(const MergeInfo &M, unsigned &Score, 303 unsigned OtherScore); 304 305 void setScoreLB(InstCounterType T, unsigned Val) { 306 assert(T < NUM_INST_CNTS); 307 ScoreLBs[T] = Val; 308 } 309 310 void setScoreUB(InstCounterType T, unsigned Val) { 311 assert(T < NUM_INST_CNTS); 312 ScoreUBs[T] = Val; 313 if (T == EXP_CNT) { 314 unsigned UB = ScoreUBs[T] - getWaitCountMax(EXP_CNT); 315 if (ScoreLBs[T] < UB && UB < ScoreUBs[T]) 316 ScoreLBs[T] = UB; 317 } 318 } 319 320 void setRegScore(int GprNo, InstCounterType T, unsigned Val) { 321 if (GprNo < NUM_ALL_VGPRS) { 322 VgprUB = std::max(VgprUB, GprNo); 323 VgprScores[T][GprNo] = Val; 324 } else { 325 assert(T == LGKM_CNT); 326 SgprUB = std::max(SgprUB, GprNo - NUM_ALL_VGPRS); 327 SgprScores[GprNo - NUM_ALL_VGPRS] = Val; 328 } 329 } 330 331 void setExpScore(const MachineInstr *MI, const SIInstrInfo *TII, 332 const SIRegisterInfo *TRI, const MachineRegisterInfo *MRI, 333 unsigned OpNo, unsigned Val); 334 335 const GCNSubtarget *ST = nullptr; 336 unsigned ScoreLBs[NUM_INST_CNTS] = {0}; 337 unsigned ScoreUBs[NUM_INST_CNTS] = {0}; 338 unsigned PendingEvents = 0; 339 // Remember the last flat memory operation. 340 unsigned LastFlat[NUM_INST_CNTS] = {0}; 341 // wait_cnt scores for every vgpr. 342 // Keep track of the VgprUB and SgprUB to make merge at join efficient. 343 int VgprUB = -1; 344 int SgprUB = -1; 345 unsigned VgprScores[NUM_INST_CNTS][NUM_ALL_VGPRS] = {{0}}; 346 // Wait cnt scores for every sgpr, only lgkmcnt is relevant. 347 unsigned SgprScores[SQ_MAX_PGM_SGPRS] = {0}; 348 // Bitmask of the VmemTypes of VMEM instructions that might have a pending 349 // write to each vgpr. 350 unsigned char VgprVmemTypes[NUM_ALL_VGPRS] = {0}; 351 }; 352 353 class SIInsertWaitcnts : public MachineFunctionPass { 354 private: 355 const GCNSubtarget *ST = nullptr; 356 const SIInstrInfo *TII = nullptr; 357 const SIRegisterInfo *TRI = nullptr; 358 const MachineRegisterInfo *MRI = nullptr; 359 AMDGPU::IsaVersion IV; 360 361 DenseSet<MachineInstr *> TrackedWaitcntSet; 362 DenseMap<const Value *, MachineBasicBlock *> SLoadAddresses; 363 MachinePostDominatorTree *PDT; 364 365 struct BlockInfo { 366 MachineBasicBlock *MBB; 367 std::unique_ptr<WaitcntBrackets> Incoming; 368 bool Dirty = true; 369 370 explicit BlockInfo(MachineBasicBlock *MBB) : MBB(MBB) {} 371 }; 372 373 MapVector<MachineBasicBlock *, BlockInfo> BlockInfos; 374 375 // ForceEmitZeroWaitcnts: force all waitcnts insts to be s_waitcnt 0 376 // because of amdgpu-waitcnt-forcezero flag 377 bool ForceEmitZeroWaitcnts; 378 bool ForceEmitWaitcnt[NUM_INST_CNTS]; 379 380 public: 381 static char ID; 382 383 SIInsertWaitcnts() : MachineFunctionPass(ID) { 384 (void)ForceExpCounter; 385 (void)ForceLgkmCounter; 386 (void)ForceVMCounter; 387 } 388 389 bool runOnMachineFunction(MachineFunction &MF) override; 390 391 StringRef getPassName() const override { 392 return "SI insert wait instructions"; 393 } 394 395 void getAnalysisUsage(AnalysisUsage &AU) const override { 396 AU.setPreservesCFG(); 397 AU.addRequired<MachinePostDominatorTree>(); 398 MachineFunctionPass::getAnalysisUsage(AU); 399 } 400 401 bool isForceEmitWaitcnt() const { 402 for (auto T : inst_counter_types()) 403 if (ForceEmitWaitcnt[T]) 404 return true; 405 return false; 406 } 407 408 void setForceEmitWaitcnt() { 409 // For non-debug builds, ForceEmitWaitcnt has been initialized to false; 410 // For debug builds, get the debug counter info and adjust if need be 411 #ifndef NDEBUG 412 if (DebugCounter::isCounterSet(ForceExpCounter) && 413 DebugCounter::shouldExecute(ForceExpCounter)) { 414 ForceEmitWaitcnt[EXP_CNT] = true; 415 } else { 416 ForceEmitWaitcnt[EXP_CNT] = false; 417 } 418 419 if (DebugCounter::isCounterSet(ForceLgkmCounter) && 420 DebugCounter::shouldExecute(ForceLgkmCounter)) { 421 ForceEmitWaitcnt[LGKM_CNT] = true; 422 } else { 423 ForceEmitWaitcnt[LGKM_CNT] = false; 424 } 425 426 if (DebugCounter::isCounterSet(ForceVMCounter) && 427 DebugCounter::shouldExecute(ForceVMCounter)) { 428 ForceEmitWaitcnt[VM_CNT] = true; 429 } else { 430 ForceEmitWaitcnt[VM_CNT] = false; 431 } 432 #endif // NDEBUG 433 } 434 435 bool mayAccessVMEMThroughFlat(const MachineInstr &MI) const; 436 bool mayAccessLDSThroughFlat(const MachineInstr &MI) const; 437 bool generateWaitcntInstBefore(MachineInstr &MI, 438 WaitcntBrackets &ScoreBrackets, 439 MachineInstr *OldWaitcntInstr); 440 void updateEventWaitcntAfter(MachineInstr &Inst, 441 WaitcntBrackets *ScoreBrackets); 442 bool insertWaitcntInBlock(MachineFunction &MF, MachineBasicBlock &Block, 443 WaitcntBrackets &ScoreBrackets); 444 }; 445 446 } // end anonymous namespace 447 448 RegInterval WaitcntBrackets::getRegInterval(const MachineInstr *MI, 449 const SIInstrInfo *TII, 450 const MachineRegisterInfo *MRI, 451 const SIRegisterInfo *TRI, 452 unsigned OpNo) const { 453 const MachineOperand &Op = MI->getOperand(OpNo); 454 assert(Op.isReg()); 455 if (!TRI->isInAllocatableClass(Op.getReg()) || TRI->isAGPR(*MRI, Op.getReg())) 456 return {-1, -1}; 457 458 // A use via a PW operand does not need a waitcnt. 459 // A partial write is not a WAW. 460 assert(!Op.getSubReg() || !Op.isUndef()); 461 462 RegInterval Result; 463 464 unsigned Reg = TRI->getEncodingValue(AMDGPU::getMCReg(Op.getReg(), *ST)); 465 466 if (TRI->isVGPR(*MRI, Op.getReg())) { 467 assert(Reg >= RegisterEncoding.VGPR0 && Reg <= RegisterEncoding.VGPRL); 468 Result.first = Reg - RegisterEncoding.VGPR0; 469 assert(Result.first >= 0 && Result.first < SQ_MAX_PGM_VGPRS); 470 } else if (TRI->isSGPRReg(*MRI, Op.getReg())) { 471 assert(Reg >= RegisterEncoding.SGPR0 && Reg < SQ_MAX_PGM_SGPRS); 472 Result.first = Reg - RegisterEncoding.SGPR0 + NUM_ALL_VGPRS; 473 assert(Result.first >= NUM_ALL_VGPRS && 474 Result.first < SQ_MAX_PGM_SGPRS + NUM_ALL_VGPRS); 475 } 476 // TODO: Handle TTMP 477 // else if (TRI->isTTMP(*MRI, Reg.getReg())) ... 478 else 479 return {-1, -1}; 480 481 const TargetRegisterClass *RC = TII->getOpRegClass(*MI, OpNo); 482 unsigned Size = TRI->getRegSizeInBits(*RC); 483 Result.second = Result.first + ((Size + 16) / 32); 484 485 return Result; 486 } 487 488 void WaitcntBrackets::setExpScore(const MachineInstr *MI, 489 const SIInstrInfo *TII, 490 const SIRegisterInfo *TRI, 491 const MachineRegisterInfo *MRI, unsigned OpNo, 492 unsigned Val) { 493 RegInterval Interval = getRegInterval(MI, TII, MRI, TRI, OpNo); 494 assert(TRI->isVGPR(*MRI, MI->getOperand(OpNo).getReg())); 495 for (int RegNo = Interval.first; RegNo < Interval.second; ++RegNo) { 496 setRegScore(RegNo, EXP_CNT, Val); 497 } 498 } 499 500 void WaitcntBrackets::updateByEvent(const SIInstrInfo *TII, 501 const SIRegisterInfo *TRI, 502 const MachineRegisterInfo *MRI, 503 WaitEventType E, MachineInstr &Inst) { 504 InstCounterType T = eventCounter(E); 505 unsigned CurrScore = getScoreUB(T) + 1; 506 if (CurrScore == 0) 507 report_fatal_error("InsertWaitcnt score wraparound"); 508 // PendingEvents and ScoreUB need to be update regardless if this event 509 // changes the score of a register or not. 510 // Examples including vm_cnt when buffer-store or lgkm_cnt when send-message. 511 PendingEvents |= 1 << E; 512 setScoreUB(T, CurrScore); 513 514 if (T == EXP_CNT) { 515 // Put score on the source vgprs. If this is a store, just use those 516 // specific register(s). 517 if (TII->isDS(Inst) && (Inst.mayStore() || Inst.mayLoad())) { 518 int AddrOpIdx = 519 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::addr); 520 // All GDS operations must protect their address register (same as 521 // export.) 522 if (AddrOpIdx != -1) { 523 setExpScore(&Inst, TII, TRI, MRI, AddrOpIdx, CurrScore); 524 } 525 526 if (Inst.mayStore()) { 527 if (AMDGPU::getNamedOperandIdx(Inst.getOpcode(), 528 AMDGPU::OpName::data0) != -1) { 529 setExpScore( 530 &Inst, TII, TRI, MRI, 531 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::data0), 532 CurrScore); 533 } 534 if (AMDGPU::getNamedOperandIdx(Inst.getOpcode(), 535 AMDGPU::OpName::data1) != -1) { 536 setExpScore(&Inst, TII, TRI, MRI, 537 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), 538 AMDGPU::OpName::data1), 539 CurrScore); 540 } 541 } else if (AMDGPU::getAtomicNoRetOp(Inst.getOpcode()) != -1 && 542 Inst.getOpcode() != AMDGPU::DS_GWS_INIT && 543 Inst.getOpcode() != AMDGPU::DS_GWS_SEMA_V && 544 Inst.getOpcode() != AMDGPU::DS_GWS_SEMA_BR && 545 Inst.getOpcode() != AMDGPU::DS_GWS_SEMA_P && 546 Inst.getOpcode() != AMDGPU::DS_GWS_BARRIER && 547 Inst.getOpcode() != AMDGPU::DS_APPEND && 548 Inst.getOpcode() != AMDGPU::DS_CONSUME && 549 Inst.getOpcode() != AMDGPU::DS_ORDERED_COUNT) { 550 for (unsigned I = 0, E = Inst.getNumOperands(); I != E; ++I) { 551 const MachineOperand &Op = Inst.getOperand(I); 552 if (Op.isReg() && !Op.isDef() && TRI->isVGPR(*MRI, Op.getReg())) { 553 setExpScore(&Inst, TII, TRI, MRI, I, CurrScore); 554 } 555 } 556 } 557 } else if (TII->isFLAT(Inst)) { 558 if (Inst.mayStore()) { 559 setExpScore( 560 &Inst, TII, TRI, MRI, 561 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::data), 562 CurrScore); 563 } else if (AMDGPU::getAtomicNoRetOp(Inst.getOpcode()) != -1) { 564 setExpScore( 565 &Inst, TII, TRI, MRI, 566 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::data), 567 CurrScore); 568 } 569 } else if (TII->isMIMG(Inst)) { 570 if (Inst.mayStore()) { 571 setExpScore(&Inst, TII, TRI, MRI, 0, CurrScore); 572 } else if (AMDGPU::getAtomicNoRetOp(Inst.getOpcode()) != -1) { 573 setExpScore( 574 &Inst, TII, TRI, MRI, 575 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::data), 576 CurrScore); 577 } 578 } else if (TII->isMTBUF(Inst)) { 579 if (Inst.mayStore()) { 580 setExpScore(&Inst, TII, TRI, MRI, 0, CurrScore); 581 } 582 } else if (TII->isMUBUF(Inst)) { 583 if (Inst.mayStore()) { 584 setExpScore(&Inst, TII, TRI, MRI, 0, CurrScore); 585 } else if (AMDGPU::getAtomicNoRetOp(Inst.getOpcode()) != -1) { 586 setExpScore( 587 &Inst, TII, TRI, MRI, 588 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::data), 589 CurrScore); 590 } 591 } else { 592 if (TII->isEXP(Inst)) { 593 // For export the destination registers are really temps that 594 // can be used as the actual source after export patching, so 595 // we need to treat them like sources and set the EXP_CNT 596 // score. 597 for (unsigned I = 0, E = Inst.getNumOperands(); I != E; ++I) { 598 MachineOperand &DefMO = Inst.getOperand(I); 599 if (DefMO.isReg() && DefMO.isDef() && 600 TRI->isVGPR(*MRI, DefMO.getReg())) { 601 setRegScore( 602 TRI->getEncodingValue(AMDGPU::getMCReg(DefMO.getReg(), *ST)), 603 EXP_CNT, CurrScore); 604 } 605 } 606 } 607 for (unsigned I = 0, E = Inst.getNumOperands(); I != E; ++I) { 608 MachineOperand &MO = Inst.getOperand(I); 609 if (MO.isReg() && !MO.isDef() && TRI->isVGPR(*MRI, MO.getReg())) { 610 setExpScore(&Inst, TII, TRI, MRI, I, CurrScore); 611 } 612 } 613 } 614 #if 0 // TODO: check if this is handled by MUBUF code above. 615 } else if (Inst.getOpcode() == AMDGPU::BUFFER_STORE_DWORD || 616 Inst.getOpcode() == AMDGPU::BUFFER_STORE_DWORDX2 || 617 Inst.getOpcode() == AMDGPU::BUFFER_STORE_DWORDX4) { 618 MachineOperand *MO = TII->getNamedOperand(Inst, AMDGPU::OpName::data); 619 unsigned OpNo;//TODO: find the OpNo for this operand; 620 RegInterval Interval = getRegInterval(&Inst, TII, MRI, TRI, OpNo); 621 for (int RegNo = Interval.first; RegNo < Interval.second; 622 ++RegNo) { 623 setRegScore(RegNo + NUM_ALL_VGPRS, t, CurrScore); 624 } 625 #endif 626 } else { 627 // Match the score to the destination registers. 628 for (unsigned I = 0, E = Inst.getNumOperands(); I != E; ++I) { 629 auto &Op = Inst.getOperand(I); 630 if (!Op.isReg() || !Op.isDef()) 631 continue; 632 RegInterval Interval = getRegInterval(&Inst, TII, MRI, TRI, I); 633 if (T == VM_CNT) { 634 if (Interval.first >= NUM_ALL_VGPRS) 635 continue; 636 if (SIInstrInfo::isVMEM(Inst)) { 637 VmemType V = getVmemType(Inst); 638 for (int RegNo = Interval.first; RegNo < Interval.second; ++RegNo) 639 VgprVmemTypes[RegNo] |= 1 << V; 640 } 641 } 642 for (int RegNo = Interval.first; RegNo < Interval.second; ++RegNo) { 643 setRegScore(RegNo, T, CurrScore); 644 } 645 } 646 if (TII->isDS(Inst) && Inst.mayStore()) { 647 setRegScore(SQ_MAX_PGM_VGPRS + EXTRA_VGPR_LDS, T, CurrScore); 648 } 649 } 650 } 651 652 void WaitcntBrackets::print(raw_ostream &OS) { 653 OS << '\n'; 654 for (auto T : inst_counter_types()) { 655 unsigned LB = getScoreLB(T); 656 unsigned UB = getScoreUB(T); 657 658 switch (T) { 659 case VM_CNT: 660 OS << " VM_CNT(" << UB - LB << "): "; 661 break; 662 case LGKM_CNT: 663 OS << " LGKM_CNT(" << UB - LB << "): "; 664 break; 665 case EXP_CNT: 666 OS << " EXP_CNT(" << UB - LB << "): "; 667 break; 668 case VS_CNT: 669 OS << " VS_CNT(" << UB - LB << "): "; 670 break; 671 default: 672 OS << " UNKNOWN(" << UB - LB << "): "; 673 break; 674 } 675 676 if (LB < UB) { 677 // Print vgpr scores. 678 for (int J = 0; J <= VgprUB; J++) { 679 unsigned RegScore = getRegScore(J, T); 680 if (RegScore <= LB) 681 continue; 682 unsigned RelScore = RegScore - LB - 1; 683 if (J < SQ_MAX_PGM_VGPRS + EXTRA_VGPR_LDS) { 684 OS << RelScore << ":v" << J << " "; 685 } else { 686 OS << RelScore << ":ds "; 687 } 688 } 689 // Also need to print sgpr scores for lgkm_cnt. 690 if (T == LGKM_CNT) { 691 for (int J = 0; J <= SgprUB; J++) { 692 unsigned RegScore = getRegScore(J + NUM_ALL_VGPRS, LGKM_CNT); 693 if (RegScore <= LB) 694 continue; 695 unsigned RelScore = RegScore - LB - 1; 696 OS << RelScore << ":s" << J << " "; 697 } 698 } 699 } 700 OS << '\n'; 701 } 702 OS << '\n'; 703 } 704 705 /// Simplify the waitcnt, in the sense of removing redundant counts, and return 706 /// whether a waitcnt instruction is needed at all. 707 bool WaitcntBrackets::simplifyWaitcnt(AMDGPU::Waitcnt &Wait) const { 708 return simplifyWaitcnt(VM_CNT, Wait.VmCnt) | 709 simplifyWaitcnt(EXP_CNT, Wait.ExpCnt) | 710 simplifyWaitcnt(LGKM_CNT, Wait.LgkmCnt) | 711 simplifyWaitcnt(VS_CNT, Wait.VsCnt); 712 } 713 714 bool WaitcntBrackets::simplifyWaitcnt(InstCounterType T, 715 unsigned &Count) const { 716 const unsigned LB = getScoreLB(T); 717 const unsigned UB = getScoreUB(T); 718 if (Count < UB && UB - Count > LB) 719 return true; 720 721 Count = ~0u; 722 return false; 723 } 724 725 void WaitcntBrackets::determineWait(InstCounterType T, unsigned ScoreToWait, 726 AMDGPU::Waitcnt &Wait) const { 727 // If the score of src_operand falls within the bracket, we need an 728 // s_waitcnt instruction. 729 const unsigned LB = getScoreLB(T); 730 const unsigned UB = getScoreUB(T); 731 if ((UB >= ScoreToWait) && (ScoreToWait > LB)) { 732 if ((T == VM_CNT || T == LGKM_CNT) && 733 hasPendingFlat() && 734 !ST->hasFlatLgkmVMemCountInOrder()) { 735 // If there is a pending FLAT operation, and this is a VMem or LGKM 736 // waitcnt and the target can report early completion, then we need 737 // to force a waitcnt 0. 738 addWait(Wait, T, 0); 739 } else if (counterOutOfOrder(T)) { 740 // Counter can get decremented out-of-order when there 741 // are multiple types event in the bracket. Also emit an s_wait counter 742 // with a conservative value of 0 for the counter. 743 addWait(Wait, T, 0); 744 } else { 745 // If a counter has been maxed out avoid overflow by waiting for 746 // MAX(CounterType) - 1 instead. 747 unsigned NeededWait = std::min(UB - ScoreToWait, getWaitCountMax(T) - 1); 748 addWait(Wait, T, NeededWait); 749 } 750 } 751 } 752 753 void WaitcntBrackets::applyWaitcnt(const AMDGPU::Waitcnt &Wait) { 754 applyWaitcnt(VM_CNT, Wait.VmCnt); 755 applyWaitcnt(EXP_CNT, Wait.ExpCnt); 756 applyWaitcnt(LGKM_CNT, Wait.LgkmCnt); 757 applyWaitcnt(VS_CNT, Wait.VsCnt); 758 } 759 760 void WaitcntBrackets::applyWaitcnt(InstCounterType T, unsigned Count) { 761 const unsigned UB = getScoreUB(T); 762 if (Count >= UB) 763 return; 764 if (Count != 0) { 765 if (counterOutOfOrder(T)) 766 return; 767 setScoreLB(T, std::max(getScoreLB(T), UB - Count)); 768 } else { 769 setScoreLB(T, UB); 770 PendingEvents &= ~WaitEventMaskForInst[T]; 771 } 772 } 773 774 // Where there are multiple types of event in the bracket of a counter, 775 // the decrement may go out of order. 776 bool WaitcntBrackets::counterOutOfOrder(InstCounterType T) const { 777 // Scalar memory read always can go out of order. 778 if (T == LGKM_CNT && hasPendingEvent(SMEM_ACCESS)) 779 return true; 780 return hasMixedPendingEvents(T); 781 } 782 783 INITIALIZE_PASS_BEGIN(SIInsertWaitcnts, DEBUG_TYPE, "SI Insert Waitcnts", false, 784 false) 785 INITIALIZE_PASS_DEPENDENCY(MachinePostDominatorTree) 786 INITIALIZE_PASS_END(SIInsertWaitcnts, DEBUG_TYPE, "SI Insert Waitcnts", false, 787 false) 788 789 char SIInsertWaitcnts::ID = 0; 790 791 char &llvm::SIInsertWaitcntsID = SIInsertWaitcnts::ID; 792 793 FunctionPass *llvm::createSIInsertWaitcntsPass() { 794 return new SIInsertWaitcnts(); 795 } 796 797 static bool readsVCCZ(const MachineInstr &MI) { 798 unsigned Opc = MI.getOpcode(); 799 return (Opc == AMDGPU::S_CBRANCH_VCCNZ || Opc == AMDGPU::S_CBRANCH_VCCZ) && 800 !MI.getOperand(1).isUndef(); 801 } 802 803 /// \returns true if the callee inserts an s_waitcnt 0 on function entry. 804 static bool callWaitsOnFunctionEntry(const MachineInstr &MI) { 805 // Currently all conventions wait, but this may not always be the case. 806 // 807 // TODO: If IPRA is enabled, and the callee is isSafeForNoCSROpt, it may make 808 // senses to omit the wait and do it in the caller. 809 return true; 810 } 811 812 /// \returns true if the callee is expected to wait for any outstanding waits 813 /// before returning. 814 static bool callWaitsOnFunctionReturn(const MachineInstr &MI) { 815 return true; 816 } 817 818 /// Generate s_waitcnt instruction to be placed before cur_Inst. 819 /// Instructions of a given type are returned in order, 820 /// but instructions of different types can complete out of order. 821 /// We rely on this in-order completion 822 /// and simply assign a score to the memory access instructions. 823 /// We keep track of the active "score bracket" to determine 824 /// if an access of a memory read requires an s_waitcnt 825 /// and if so what the value of each counter is. 826 /// The "score bracket" is bound by the lower bound and upper bound 827 /// scores (*_score_LB and *_score_ub respectively). 828 bool SIInsertWaitcnts::generateWaitcntInstBefore( 829 MachineInstr &MI, WaitcntBrackets &ScoreBrackets, 830 MachineInstr *OldWaitcntInstr) { 831 setForceEmitWaitcnt(); 832 bool IsForceEmitWaitcnt = isForceEmitWaitcnt(); 833 834 if (MI.isMetaInstruction()) 835 return false; 836 837 AMDGPU::Waitcnt Wait; 838 839 // See if this instruction has a forced S_WAITCNT VM. 840 // TODO: Handle other cases of NeedsWaitcntVmBefore() 841 if (MI.getOpcode() == AMDGPU::BUFFER_WBINVL1 || 842 MI.getOpcode() == AMDGPU::BUFFER_WBINVL1_SC || 843 MI.getOpcode() == AMDGPU::BUFFER_WBINVL1_VOL || 844 MI.getOpcode() == AMDGPU::BUFFER_GL0_INV || 845 MI.getOpcode() == AMDGPU::BUFFER_GL1_INV) { 846 Wait.VmCnt = 0; 847 } 848 849 // All waits must be resolved at call return. 850 // NOTE: this could be improved with knowledge of all call sites or 851 // with knowledge of the called routines. 852 if (MI.getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG || 853 MI.getOpcode() == AMDGPU::S_SETPC_B64_return || 854 (MI.isReturn() && MI.isCall() && !callWaitsOnFunctionEntry(MI))) { 855 Wait = Wait.combined(AMDGPU::Waitcnt::allZero(ST->hasVscnt())); 856 } 857 // Resolve vm waits before gs-done. 858 else if ((MI.getOpcode() == AMDGPU::S_SENDMSG || 859 MI.getOpcode() == AMDGPU::S_SENDMSGHALT) && 860 ((MI.getOperand(0).getImm() & AMDGPU::SendMsg::ID_MASK_) == 861 AMDGPU::SendMsg::ID_GS_DONE)) { 862 Wait.VmCnt = 0; 863 } 864 #if 0 // TODO: the following blocks of logic when we have fence. 865 else if (MI.getOpcode() == SC_FENCE) { 866 const unsigned int group_size = 867 context->shader_info->GetMaxThreadGroupSize(); 868 // group_size == 0 means thread group size is unknown at compile time 869 const bool group_is_multi_wave = 870 (group_size == 0 || group_size > target_info->GetWaveFrontSize()); 871 const bool fence_is_global = !((SCInstInternalMisc*)Inst)->IsGroupFence(); 872 873 for (unsigned int i = 0; i < Inst->NumSrcOperands(); i++) { 874 SCRegType src_type = Inst->GetSrcType(i); 875 switch (src_type) { 876 case SCMEM_LDS: 877 if (group_is_multi_wave || 878 context->OptFlagIsOn(OPT_R1100_LDSMEM_FENCE_CHICKEN_BIT)) { 879 EmitWaitcnt |= ScoreBrackets->updateByWait(LGKM_CNT, 880 ScoreBrackets->getScoreUB(LGKM_CNT)); 881 // LDS may have to wait for VM_CNT after buffer load to LDS 882 if (target_info->HasBufferLoadToLDS()) { 883 EmitWaitcnt |= ScoreBrackets->updateByWait(VM_CNT, 884 ScoreBrackets->getScoreUB(VM_CNT)); 885 } 886 } 887 break; 888 889 case SCMEM_GDS: 890 if (group_is_multi_wave || fence_is_global) { 891 EmitWaitcnt |= ScoreBrackets->updateByWait(EXP_CNT, 892 ScoreBrackets->getScoreUB(EXP_CNT)); 893 EmitWaitcnt |= ScoreBrackets->updateByWait(LGKM_CNT, 894 ScoreBrackets->getScoreUB(LGKM_CNT)); 895 } 896 break; 897 898 case SCMEM_UAV: 899 case SCMEM_TFBUF: 900 case SCMEM_RING: 901 case SCMEM_SCATTER: 902 if (group_is_multi_wave || fence_is_global) { 903 EmitWaitcnt |= ScoreBrackets->updateByWait(EXP_CNT, 904 ScoreBrackets->getScoreUB(EXP_CNT)); 905 EmitWaitcnt |= ScoreBrackets->updateByWait(VM_CNT, 906 ScoreBrackets->getScoreUB(VM_CNT)); 907 } 908 break; 909 910 case SCMEM_SCRATCH: 911 default: 912 break; 913 } 914 } 915 } 916 #endif 917 918 // Export & GDS instructions do not read the EXEC mask until after the export 919 // is granted (which can occur well after the instruction is issued). 920 // The shader program must flush all EXP operations on the export-count 921 // before overwriting the EXEC mask. 922 else { 923 if (MI.modifiesRegister(AMDGPU::EXEC, TRI)) { 924 // Export and GDS are tracked individually, either may trigger a waitcnt 925 // for EXEC. 926 if (ScoreBrackets.hasPendingEvent(EXP_GPR_LOCK) || 927 ScoreBrackets.hasPendingEvent(EXP_PARAM_ACCESS) || 928 ScoreBrackets.hasPendingEvent(EXP_POS_ACCESS) || 929 ScoreBrackets.hasPendingEvent(GDS_GPR_LOCK)) { 930 Wait.ExpCnt = 0; 931 } 932 } 933 934 if (MI.isCall() && callWaitsOnFunctionEntry(MI)) { 935 // The function is going to insert a wait on everything in its prolog. 936 // This still needs to be careful if the call target is a load (e.g. a GOT 937 // load). We also need to check WAW depenancy with saved PC. 938 Wait = AMDGPU::Waitcnt(); 939 940 int CallAddrOpIdx = 941 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0); 942 943 if (MI.getOperand(CallAddrOpIdx).isReg()) { 944 RegInterval CallAddrOpInterval = 945 ScoreBrackets.getRegInterval(&MI, TII, MRI, TRI, CallAddrOpIdx); 946 947 for (int RegNo = CallAddrOpInterval.first; 948 RegNo < CallAddrOpInterval.second; ++RegNo) 949 ScoreBrackets.determineWait( 950 LGKM_CNT, ScoreBrackets.getRegScore(RegNo, LGKM_CNT), Wait); 951 952 int RtnAddrOpIdx = 953 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::dst); 954 if (RtnAddrOpIdx != -1) { 955 RegInterval RtnAddrOpInterval = 956 ScoreBrackets.getRegInterval(&MI, TII, MRI, TRI, RtnAddrOpIdx); 957 958 for (int RegNo = RtnAddrOpInterval.first; 959 RegNo < RtnAddrOpInterval.second; ++RegNo) 960 ScoreBrackets.determineWait( 961 LGKM_CNT, ScoreBrackets.getRegScore(RegNo, LGKM_CNT), Wait); 962 } 963 } 964 } else { 965 // FIXME: Should not be relying on memoperands. 966 // Look at the source operands of every instruction to see if 967 // any of them results from a previous memory operation that affects 968 // its current usage. If so, an s_waitcnt instruction needs to be 969 // emitted. 970 // If the source operand was defined by a load, add the s_waitcnt 971 // instruction. 972 // 973 // Two cases are handled for destination operands: 974 // 1) If the destination operand was defined by a load, add the s_waitcnt 975 // instruction to guarantee the right WAW order. 976 // 2) If a destination operand that was used by a recent export/store ins, 977 // add s_waitcnt on exp_cnt to guarantee the WAR order. 978 for (const MachineMemOperand *Memop : MI.memoperands()) { 979 const Value *Ptr = Memop->getValue(); 980 if (Memop->isStore() && SLoadAddresses.count(Ptr)) { 981 addWait(Wait, LGKM_CNT, 0); 982 if (PDT->dominates(MI.getParent(), SLoadAddresses.find(Ptr)->second)) 983 SLoadAddresses.erase(Ptr); 984 } 985 unsigned AS = Memop->getAddrSpace(); 986 if (AS != AMDGPUAS::LOCAL_ADDRESS) 987 continue; 988 unsigned RegNo = SQ_MAX_PGM_VGPRS + EXTRA_VGPR_LDS; 989 // VM_CNT is only relevant to vgpr or LDS. 990 ScoreBrackets.determineWait( 991 VM_CNT, ScoreBrackets.getRegScore(RegNo, VM_CNT), Wait); 992 if (Memop->isStore()) { 993 ScoreBrackets.determineWait( 994 EXP_CNT, ScoreBrackets.getRegScore(RegNo, EXP_CNT), Wait); 995 } 996 } 997 998 // Loop over use and def operands. 999 for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) { 1000 MachineOperand &Op = MI.getOperand(I); 1001 if (!Op.isReg()) 1002 continue; 1003 RegInterval Interval = 1004 ScoreBrackets.getRegInterval(&MI, TII, MRI, TRI, I); 1005 1006 const bool IsVGPR = TRI->isVGPR(*MRI, Op.getReg()); 1007 for (int RegNo = Interval.first; RegNo < Interval.second; ++RegNo) { 1008 if (IsVGPR) { 1009 // RAW always needs an s_waitcnt. WAW needs an s_waitcnt unless the 1010 // previous write and this write are the same type of VMEM 1011 // instruction, in which case they're guaranteed to write their 1012 // results in order anyway. 1013 if (Op.isUse() || !SIInstrInfo::isVMEM(MI) || 1014 ScoreBrackets.hasOtherPendingVmemTypes(RegNo, 1015 getVmemType(MI))) { 1016 ScoreBrackets.determineWait( 1017 VM_CNT, ScoreBrackets.getRegScore(RegNo, VM_CNT), Wait); 1018 ScoreBrackets.clearVgprVmemTypes(RegNo); 1019 } 1020 if (Op.isDef()) { 1021 ScoreBrackets.determineWait( 1022 EXP_CNT, ScoreBrackets.getRegScore(RegNo, EXP_CNT), Wait); 1023 } 1024 } 1025 ScoreBrackets.determineWait( 1026 LGKM_CNT, ScoreBrackets.getRegScore(RegNo, LGKM_CNT), Wait); 1027 } 1028 } 1029 } 1030 } 1031 1032 // Check to see if this is an S_BARRIER, and if an implicit S_WAITCNT 0 1033 // occurs before the instruction. Doing it here prevents any additional 1034 // S_WAITCNTs from being emitted if the instruction was marked as 1035 // requiring a WAITCNT beforehand. 1036 if (MI.getOpcode() == AMDGPU::S_BARRIER && 1037 !ST->hasAutoWaitcntBeforeBarrier()) { 1038 Wait = Wait.combined(AMDGPU::Waitcnt::allZero(ST->hasVscnt())); 1039 } 1040 1041 // TODO: Remove this work-around, enable the assert for Bug 457939 1042 // after fixing the scheduler. Also, the Shader Compiler code is 1043 // independent of target. 1044 if (readsVCCZ(MI) && ST->hasReadVCCZBug()) { 1045 if (ScoreBrackets.getScoreLB(LGKM_CNT) < 1046 ScoreBrackets.getScoreUB(LGKM_CNT) && 1047 ScoreBrackets.hasPendingEvent(SMEM_ACCESS)) { 1048 Wait.LgkmCnt = 0; 1049 } 1050 } 1051 1052 // Early-out if no wait is indicated. 1053 if (!ScoreBrackets.simplifyWaitcnt(Wait) && !IsForceEmitWaitcnt) { 1054 bool Modified = false; 1055 if (OldWaitcntInstr) { 1056 for (auto II = OldWaitcntInstr->getIterator(), NextI = std::next(II); 1057 &*II != &MI; II = NextI, ++NextI) { 1058 if (II->isDebugInstr()) 1059 continue; 1060 1061 if (TrackedWaitcntSet.count(&*II)) { 1062 TrackedWaitcntSet.erase(&*II); 1063 II->eraseFromParent(); 1064 Modified = true; 1065 } else if (II->getOpcode() == AMDGPU::S_WAITCNT) { 1066 int64_t Imm = II->getOperand(0).getImm(); 1067 ScoreBrackets.applyWaitcnt(AMDGPU::decodeWaitcnt(IV, Imm)); 1068 } else { 1069 assert(II->getOpcode() == AMDGPU::S_WAITCNT_VSCNT); 1070 assert(II->getOperand(0).getReg() == AMDGPU::SGPR_NULL); 1071 auto W = TII->getNamedOperand(*II, AMDGPU::OpName::simm16)->getImm(); 1072 ScoreBrackets.applyWaitcnt(AMDGPU::Waitcnt(~0u, ~0u, ~0u, W)); 1073 } 1074 } 1075 } 1076 return Modified; 1077 } 1078 1079 if (ForceEmitZeroWaitcnts) 1080 Wait = AMDGPU::Waitcnt::allZero(ST->hasVscnt()); 1081 1082 if (ForceEmitWaitcnt[VM_CNT]) 1083 Wait.VmCnt = 0; 1084 if (ForceEmitWaitcnt[EXP_CNT]) 1085 Wait.ExpCnt = 0; 1086 if (ForceEmitWaitcnt[LGKM_CNT]) 1087 Wait.LgkmCnt = 0; 1088 if (ForceEmitWaitcnt[VS_CNT]) 1089 Wait.VsCnt = 0; 1090 1091 ScoreBrackets.applyWaitcnt(Wait); 1092 1093 AMDGPU::Waitcnt OldWait; 1094 bool Modified = false; 1095 1096 if (OldWaitcntInstr) { 1097 for (auto II = OldWaitcntInstr->getIterator(), NextI = std::next(II); 1098 &*II != &MI; II = NextI, NextI++) { 1099 if (II->isDebugInstr()) 1100 continue; 1101 1102 if (II->getOpcode() == AMDGPU::S_WAITCNT) { 1103 unsigned IEnc = II->getOperand(0).getImm(); 1104 AMDGPU::Waitcnt IWait = AMDGPU::decodeWaitcnt(IV, IEnc); 1105 OldWait = OldWait.combined(IWait); 1106 if (!TrackedWaitcntSet.count(&*II)) 1107 Wait = Wait.combined(IWait); 1108 unsigned NewEnc = AMDGPU::encodeWaitcnt(IV, Wait); 1109 if (IEnc != NewEnc) { 1110 II->getOperand(0).setImm(NewEnc); 1111 Modified = true; 1112 } 1113 Wait.VmCnt = ~0u; 1114 Wait.LgkmCnt = ~0u; 1115 Wait.ExpCnt = ~0u; 1116 } else { 1117 assert(II->getOpcode() == AMDGPU::S_WAITCNT_VSCNT); 1118 assert(II->getOperand(0).getReg() == AMDGPU::SGPR_NULL); 1119 1120 unsigned ICnt = TII->getNamedOperand(*II, AMDGPU::OpName::simm16) 1121 ->getImm(); 1122 OldWait.VsCnt = std::min(OldWait.VsCnt, ICnt); 1123 if (!TrackedWaitcntSet.count(&*II)) 1124 Wait.VsCnt = std::min(Wait.VsCnt, ICnt); 1125 if (Wait.VsCnt != ICnt) { 1126 TII->getNamedOperand(*II, AMDGPU::OpName::simm16)->setImm(Wait.VsCnt); 1127 Modified = true; 1128 } 1129 Wait.VsCnt = ~0u; 1130 } 1131 1132 LLVM_DEBUG(dbgs() << "generateWaitcntInstBefore\n" 1133 << "Old Instr: " << MI 1134 << "New Instr: " << *II << '\n'); 1135 1136 if (!Wait.hasWait()) 1137 return Modified; 1138 } 1139 } 1140 1141 if (Wait.VmCnt != ~0u || Wait.LgkmCnt != ~0u || Wait.ExpCnt != ~0u) { 1142 unsigned Enc = AMDGPU::encodeWaitcnt(IV, Wait); 1143 auto SWaitInst = BuildMI(*MI.getParent(), MI.getIterator(), 1144 MI.getDebugLoc(), TII->get(AMDGPU::S_WAITCNT)) 1145 .addImm(Enc); 1146 TrackedWaitcntSet.insert(SWaitInst); 1147 Modified = true; 1148 1149 LLVM_DEBUG(dbgs() << "generateWaitcntInstBefore\n" 1150 << "Old Instr: " << MI 1151 << "New Instr: " << *SWaitInst << '\n'); 1152 } 1153 1154 if (Wait.VsCnt != ~0u) { 1155 assert(ST->hasVscnt()); 1156 1157 auto SWaitInst = 1158 BuildMI(*MI.getParent(), MI.getIterator(), MI.getDebugLoc(), 1159 TII->get(AMDGPU::S_WAITCNT_VSCNT)) 1160 .addReg(AMDGPU::SGPR_NULL, RegState::Undef) 1161 .addImm(Wait.VsCnt); 1162 TrackedWaitcntSet.insert(SWaitInst); 1163 Modified = true; 1164 1165 LLVM_DEBUG(dbgs() << "generateWaitcntInstBefore\n" 1166 << "Old Instr: " << MI 1167 << "New Instr: " << *SWaitInst << '\n'); 1168 } 1169 1170 return Modified; 1171 } 1172 1173 // This is a flat memory operation. Check to see if it has memory tokens other 1174 // than LDS. Other address spaces supported by flat memory operations involve 1175 // global memory. 1176 bool SIInsertWaitcnts::mayAccessVMEMThroughFlat(const MachineInstr &MI) const { 1177 assert(TII->isFLAT(MI)); 1178 1179 // All flat instructions use the VMEM counter. 1180 assert(TII->usesVM_CNT(MI)); 1181 1182 // If there are no memory operands then conservatively assume the flat 1183 // operation may access VMEM. 1184 if (MI.memoperands_empty()) 1185 return true; 1186 1187 // See if any memory operand specifies an address space that involves VMEM. 1188 // Flat operations only supported FLAT, LOCAL (LDS), or address spaces 1189 // involving VMEM such as GLOBAL, CONSTANT, PRIVATE (SCRATCH), etc. The REGION 1190 // (GDS) address space is not supported by flat operations. Therefore, simply 1191 // return true unless only the LDS address space is found. 1192 for (const MachineMemOperand *Memop : MI.memoperands()) { 1193 unsigned AS = Memop->getAddrSpace(); 1194 assert(AS != AMDGPUAS::REGION_ADDRESS); 1195 if (AS != AMDGPUAS::LOCAL_ADDRESS) 1196 return true; 1197 } 1198 1199 return false; 1200 } 1201 1202 // This is a flat memory operation. Check to see if it has memory tokens for 1203 // either LDS or FLAT. 1204 bool SIInsertWaitcnts::mayAccessLDSThroughFlat(const MachineInstr &MI) const { 1205 assert(TII->isFLAT(MI)); 1206 1207 // Flat instruction such as SCRATCH and GLOBAL do not use the lgkm counter. 1208 if (!TII->usesLGKM_CNT(MI)) 1209 return false; 1210 1211 // If there are no memory operands then conservatively assume the flat 1212 // operation may access LDS. 1213 if (MI.memoperands_empty()) 1214 return true; 1215 1216 // See if any memory operand specifies an address space that involves LDS. 1217 for (const MachineMemOperand *Memop : MI.memoperands()) { 1218 unsigned AS = Memop->getAddrSpace(); 1219 if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS) 1220 return true; 1221 } 1222 1223 return false; 1224 } 1225 1226 void SIInsertWaitcnts::updateEventWaitcntAfter(MachineInstr &Inst, 1227 WaitcntBrackets *ScoreBrackets) { 1228 // Now look at the instruction opcode. If it is a memory access 1229 // instruction, update the upper-bound of the appropriate counter's 1230 // bracket and the destination operand scores. 1231 // TODO: Use the (TSFlags & SIInstrFlags::LGKM_CNT) property everywhere. 1232 if (TII->isDS(Inst) && TII->usesLGKM_CNT(Inst)) { 1233 if (TII->isAlwaysGDS(Inst.getOpcode()) || 1234 TII->hasModifiersSet(Inst, AMDGPU::OpName::gds)) { 1235 ScoreBrackets->updateByEvent(TII, TRI, MRI, GDS_ACCESS, Inst); 1236 ScoreBrackets->updateByEvent(TII, TRI, MRI, GDS_GPR_LOCK, Inst); 1237 } else { 1238 ScoreBrackets->updateByEvent(TII, TRI, MRI, LDS_ACCESS, Inst); 1239 } 1240 } else if (TII->isFLAT(Inst)) { 1241 assert(Inst.mayLoadOrStore()); 1242 1243 int FlatASCount = 0; 1244 1245 if (mayAccessVMEMThroughFlat(Inst)) { 1246 ++FlatASCount; 1247 if (!ST->hasVscnt()) 1248 ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_ACCESS, Inst); 1249 else if (Inst.mayLoad() && 1250 AMDGPU::getAtomicRetOp(Inst.getOpcode()) == -1) 1251 ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_READ_ACCESS, Inst); 1252 else 1253 ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_WRITE_ACCESS, Inst); 1254 } 1255 1256 if (mayAccessLDSThroughFlat(Inst)) { 1257 ++FlatASCount; 1258 ScoreBrackets->updateByEvent(TII, TRI, MRI, LDS_ACCESS, Inst); 1259 } 1260 1261 // A Flat memory operation must access at least one address space. 1262 assert(FlatASCount); 1263 1264 // This is a flat memory operation that access both VMEM and LDS, so note it 1265 // - it will require that both the VM and LGKM be flushed to zero if it is 1266 // pending when a VM or LGKM dependency occurs. 1267 if (FlatASCount > 1) 1268 ScoreBrackets->setPendingFlat(); 1269 } else if (SIInstrInfo::isVMEM(Inst) && 1270 // TODO: get a better carve out. 1271 Inst.getOpcode() != AMDGPU::BUFFER_WBINVL1 && 1272 Inst.getOpcode() != AMDGPU::BUFFER_WBINVL1_SC && 1273 Inst.getOpcode() != AMDGPU::BUFFER_WBINVL1_VOL && 1274 Inst.getOpcode() != AMDGPU::BUFFER_GL0_INV && 1275 Inst.getOpcode() != AMDGPU::BUFFER_GL1_INV) { 1276 if (!ST->hasVscnt()) 1277 ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_ACCESS, Inst); 1278 else if ((Inst.mayLoad() && 1279 AMDGPU::getAtomicRetOp(Inst.getOpcode()) == -1) || 1280 /* IMAGE_GET_RESINFO / IMAGE_GET_LOD */ 1281 (TII->isMIMG(Inst) && !Inst.mayLoad() && !Inst.mayStore())) 1282 ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_READ_ACCESS, Inst); 1283 else if (Inst.mayStore()) 1284 ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_WRITE_ACCESS, Inst); 1285 1286 if (ST->vmemWriteNeedsExpWaitcnt() && 1287 (Inst.mayStore() || AMDGPU::getAtomicNoRetOp(Inst.getOpcode()) != -1)) { 1288 ScoreBrackets->updateByEvent(TII, TRI, MRI, VMW_GPR_LOCK, Inst); 1289 } 1290 } else if (TII->isSMRD(Inst)) { 1291 ScoreBrackets->updateByEvent(TII, TRI, MRI, SMEM_ACCESS, Inst); 1292 } else if (Inst.isCall()) { 1293 if (callWaitsOnFunctionReturn(Inst)) { 1294 // Act as a wait on everything 1295 ScoreBrackets->applyWaitcnt(AMDGPU::Waitcnt::allZero(ST->hasVscnt())); 1296 } else { 1297 // May need to way wait for anything. 1298 ScoreBrackets->applyWaitcnt(AMDGPU::Waitcnt()); 1299 } 1300 } else if (SIInstrInfo::isEXP(Inst)) { 1301 unsigned Imm = TII->getNamedOperand(Inst, AMDGPU::OpName::tgt)->getImm(); 1302 if (Imm >= AMDGPU::Exp::ET_PARAM0 && Imm <= AMDGPU::Exp::ET_PARAM31) 1303 ScoreBrackets->updateByEvent(TII, TRI, MRI, EXP_PARAM_ACCESS, Inst); 1304 else if (Imm >= AMDGPU::Exp::ET_POS0 && Imm <= AMDGPU::Exp::ET_POS_LAST) 1305 ScoreBrackets->updateByEvent(TII, TRI, MRI, EXP_POS_ACCESS, Inst); 1306 else 1307 ScoreBrackets->updateByEvent(TII, TRI, MRI, EXP_GPR_LOCK, Inst); 1308 } else { 1309 switch (Inst.getOpcode()) { 1310 case AMDGPU::S_SENDMSG: 1311 case AMDGPU::S_SENDMSGHALT: 1312 ScoreBrackets->updateByEvent(TII, TRI, MRI, SQ_MESSAGE, Inst); 1313 break; 1314 case AMDGPU::S_MEMTIME: 1315 case AMDGPU::S_MEMREALTIME: 1316 ScoreBrackets->updateByEvent(TII, TRI, MRI, SMEM_ACCESS, Inst); 1317 break; 1318 } 1319 } 1320 } 1321 1322 bool WaitcntBrackets::mergeScore(const MergeInfo &M, unsigned &Score, 1323 unsigned OtherScore) { 1324 unsigned MyShifted = Score <= M.OldLB ? 0 : Score + M.MyShift; 1325 unsigned OtherShifted = 1326 OtherScore <= M.OtherLB ? 0 : OtherScore + M.OtherShift; 1327 Score = std::max(MyShifted, OtherShifted); 1328 return OtherShifted > MyShifted; 1329 } 1330 1331 /// Merge the pending events and associater score brackets of \p Other into 1332 /// this brackets status. 1333 /// 1334 /// Returns whether the merge resulted in a change that requires tighter waits 1335 /// (i.e. the merged brackets strictly dominate the original brackets). 1336 bool WaitcntBrackets::merge(const WaitcntBrackets &Other) { 1337 bool StrictDom = false; 1338 1339 VgprUB = std::max(VgprUB, Other.VgprUB); 1340 SgprUB = std::max(SgprUB, Other.SgprUB); 1341 1342 for (auto T : inst_counter_types()) { 1343 // Merge event flags for this counter 1344 const bool OldOutOfOrder = counterOutOfOrder(T); 1345 const unsigned OldEvents = PendingEvents & WaitEventMaskForInst[T]; 1346 const unsigned OtherEvents = Other.PendingEvents & WaitEventMaskForInst[T]; 1347 if (OtherEvents & ~OldEvents) 1348 StrictDom = true; 1349 PendingEvents |= OtherEvents; 1350 1351 // Merge scores for this counter 1352 const unsigned MyPending = ScoreUBs[T] - ScoreLBs[T]; 1353 const unsigned OtherPending = Other.ScoreUBs[T] - Other.ScoreLBs[T]; 1354 const unsigned NewUB = ScoreLBs[T] + std::max(MyPending, OtherPending); 1355 if (NewUB < ScoreLBs[T]) 1356 report_fatal_error("waitcnt score overflow"); 1357 1358 MergeInfo M; 1359 M.OldLB = ScoreLBs[T]; 1360 M.OtherLB = Other.ScoreLBs[T]; 1361 M.MyShift = NewUB - ScoreUBs[T]; 1362 M.OtherShift = NewUB - Other.ScoreUBs[T]; 1363 1364 ScoreUBs[T] = NewUB; 1365 1366 StrictDom |= mergeScore(M, LastFlat[T], Other.LastFlat[T]); 1367 1368 bool RegStrictDom = false; 1369 for (int J = 0; J <= VgprUB; J++) { 1370 RegStrictDom |= mergeScore(M, VgprScores[T][J], Other.VgprScores[T][J]); 1371 } 1372 1373 if (T == VM_CNT) { 1374 for (int J = 0; J <= VgprUB; J++) { 1375 unsigned char NewVmemTypes = VgprVmemTypes[J] | Other.VgprVmemTypes[J]; 1376 RegStrictDom |= NewVmemTypes != VgprVmemTypes[J]; 1377 VgprVmemTypes[J] = NewVmemTypes; 1378 } 1379 } 1380 1381 if (T == LGKM_CNT) { 1382 for (int J = 0; J <= SgprUB; J++) { 1383 RegStrictDom |= mergeScore(M, SgprScores[J], Other.SgprScores[J]); 1384 } 1385 } 1386 1387 if (RegStrictDom && !OldOutOfOrder) 1388 StrictDom = true; 1389 } 1390 1391 return StrictDom; 1392 } 1393 1394 // Generate s_waitcnt instructions where needed. 1395 bool SIInsertWaitcnts::insertWaitcntInBlock(MachineFunction &MF, 1396 MachineBasicBlock &Block, 1397 WaitcntBrackets &ScoreBrackets) { 1398 bool Modified = false; 1399 1400 LLVM_DEBUG({ 1401 dbgs() << "*** Block" << Block.getNumber() << " ***"; 1402 ScoreBrackets.dump(); 1403 }); 1404 1405 // Track the correctness of vccz through this basic block. There are two 1406 // reasons why it might be incorrect; see ST->hasReadVCCZBug() and 1407 // ST->partialVCCWritesUpdateVCCZ(). 1408 bool VCCZCorrect = true; 1409 if (ST->hasReadVCCZBug()) { 1410 // vccz could be incorrect at a basic block boundary if a predecessor wrote 1411 // to vcc and then issued an smem load. 1412 VCCZCorrect = false; 1413 } else if (!ST->partialVCCWritesUpdateVCCZ()) { 1414 // vccz could be incorrect at a basic block boundary if a predecessor wrote 1415 // to vcc_lo or vcc_hi. 1416 VCCZCorrect = false; 1417 } 1418 1419 // Walk over the instructions. 1420 MachineInstr *OldWaitcntInstr = nullptr; 1421 1422 for (MachineBasicBlock::instr_iterator Iter = Block.instr_begin(), 1423 E = Block.instr_end(); 1424 Iter != E;) { 1425 MachineInstr &Inst = *Iter; 1426 1427 // Track pre-existing waitcnts from earlier iterations. 1428 if (Inst.getOpcode() == AMDGPU::S_WAITCNT || 1429 (Inst.getOpcode() == AMDGPU::S_WAITCNT_VSCNT && 1430 Inst.getOperand(0).isReg() && 1431 Inst.getOperand(0).getReg() == AMDGPU::SGPR_NULL)) { 1432 if (!OldWaitcntInstr) 1433 OldWaitcntInstr = &Inst; 1434 ++Iter; 1435 continue; 1436 } 1437 1438 // Generate an s_waitcnt instruction to be placed before Inst, if needed. 1439 Modified |= generateWaitcntInstBefore(Inst, ScoreBrackets, OldWaitcntInstr); 1440 OldWaitcntInstr = nullptr; 1441 1442 // Restore vccz if it's not known to be correct already. 1443 bool RestoreVCCZ = !VCCZCorrect && readsVCCZ(Inst); 1444 1445 // Don't examine operands unless we need to track vccz correctness. 1446 if (ST->hasReadVCCZBug() || !ST->partialVCCWritesUpdateVCCZ()) { 1447 if (Inst.definesRegister(AMDGPU::VCC_LO) || 1448 Inst.definesRegister(AMDGPU::VCC_HI)) { 1449 // Up to gfx9, writes to vcc_lo and vcc_hi don't update vccz. 1450 if (!ST->partialVCCWritesUpdateVCCZ()) 1451 VCCZCorrect = false; 1452 } else if (Inst.definesRegister(AMDGPU::VCC)) { 1453 // There is a hardware bug on CI/SI where SMRD instruction may corrupt 1454 // vccz bit, so when we detect that an instruction may read from a 1455 // corrupt vccz bit, we need to: 1456 // 1. Insert s_waitcnt lgkm(0) to wait for all outstanding SMRD 1457 // operations to complete. 1458 // 2. Restore the correct value of vccz by writing the current value 1459 // of vcc back to vcc. 1460 if (ST->hasReadVCCZBug() && 1461 ScoreBrackets.getScoreLB(LGKM_CNT) < 1462 ScoreBrackets.getScoreUB(LGKM_CNT) && 1463 ScoreBrackets.hasPendingEvent(SMEM_ACCESS)) { 1464 // Writes to vcc while there's an outstanding smem read may get 1465 // clobbered as soon as any read completes. 1466 VCCZCorrect = false; 1467 } else { 1468 // Writes to vcc will fix any incorrect value in vccz. 1469 VCCZCorrect = true; 1470 } 1471 } 1472 } 1473 1474 if (TII->isSMRD(Inst)) { 1475 for (const MachineMemOperand *Memop : Inst.memoperands()) { 1476 const Value *Ptr = Memop->getValue(); 1477 SLoadAddresses.insert(std::make_pair(Ptr, Inst.getParent())); 1478 } 1479 if (ST->hasReadVCCZBug()) { 1480 // This smem read could complete and clobber vccz at any time. 1481 VCCZCorrect = false; 1482 } 1483 } 1484 1485 updateEventWaitcntAfter(Inst, &ScoreBrackets); 1486 1487 #if 0 // TODO: implement resource type check controlled by options with ub = LB. 1488 // If this instruction generates a S_SETVSKIP because it is an 1489 // indexed resource, and we are on Tahiti, then it will also force 1490 // an S_WAITCNT vmcnt(0) 1491 if (RequireCheckResourceType(Inst, context)) { 1492 // Force the score to as if an S_WAITCNT vmcnt(0) is emitted. 1493 ScoreBrackets->setScoreLB(VM_CNT, 1494 ScoreBrackets->getScoreUB(VM_CNT)); 1495 } 1496 #endif 1497 1498 LLVM_DEBUG({ 1499 Inst.print(dbgs()); 1500 ScoreBrackets.dump(); 1501 }); 1502 1503 // TODO: Remove this work-around after fixing the scheduler and enable the 1504 // assert above. 1505 if (RestoreVCCZ) { 1506 // Restore the vccz bit. Any time a value is written to vcc, the vcc 1507 // bit is updated, so we can restore the bit by reading the value of 1508 // vcc and then writing it back to the register. 1509 BuildMI(Block, Inst, Inst.getDebugLoc(), 1510 TII->get(ST->isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64), 1511 TRI->getVCC()) 1512 .addReg(TRI->getVCC()); 1513 VCCZCorrect = true; 1514 Modified = true; 1515 } 1516 1517 ++Iter; 1518 } 1519 1520 return Modified; 1521 } 1522 1523 bool SIInsertWaitcnts::runOnMachineFunction(MachineFunction &MF) { 1524 ST = &MF.getSubtarget<GCNSubtarget>(); 1525 TII = ST->getInstrInfo(); 1526 TRI = &TII->getRegisterInfo(); 1527 MRI = &MF.getRegInfo(); 1528 IV = AMDGPU::getIsaVersion(ST->getCPU()); 1529 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 1530 PDT = &getAnalysis<MachinePostDominatorTree>(); 1531 1532 ForceEmitZeroWaitcnts = ForceEmitZeroFlag; 1533 for (auto T : inst_counter_types()) 1534 ForceEmitWaitcnt[T] = false; 1535 1536 HardwareLimits.VmcntMax = AMDGPU::getVmcntBitMask(IV); 1537 HardwareLimits.ExpcntMax = AMDGPU::getExpcntBitMask(IV); 1538 HardwareLimits.LgkmcntMax = AMDGPU::getLgkmcntBitMask(IV); 1539 HardwareLimits.VscntMax = ST->hasVscnt() ? 63 : 0; 1540 1541 unsigned NumVGPRsMax = ST->getAddressableNumVGPRs(); 1542 unsigned NumSGPRsMax = ST->getAddressableNumSGPRs(); 1543 assert(NumVGPRsMax <= SQ_MAX_PGM_VGPRS); 1544 assert(NumSGPRsMax <= SQ_MAX_PGM_SGPRS); 1545 1546 RegisterEncoding.VGPR0 = TRI->getEncodingValue(AMDGPU::VGPR0); 1547 RegisterEncoding.VGPRL = RegisterEncoding.VGPR0 + NumVGPRsMax - 1; 1548 RegisterEncoding.SGPR0 = TRI->getEncodingValue(AMDGPU::SGPR0); 1549 RegisterEncoding.SGPRL = RegisterEncoding.SGPR0 + NumSGPRsMax - 1; 1550 1551 TrackedWaitcntSet.clear(); 1552 BlockInfos.clear(); 1553 1554 // Keep iterating over the blocks in reverse post order, inserting and 1555 // updating s_waitcnt where needed, until a fix point is reached. 1556 for (auto *MBB : ReversePostOrderTraversal<MachineFunction *>(&MF)) 1557 BlockInfos.insert({MBB, BlockInfo(MBB)}); 1558 1559 std::unique_ptr<WaitcntBrackets> Brackets; 1560 bool Modified = false; 1561 bool Repeat; 1562 do { 1563 Repeat = false; 1564 1565 for (auto BII = BlockInfos.begin(), BIE = BlockInfos.end(); BII != BIE; 1566 ++BII) { 1567 BlockInfo &BI = BII->second; 1568 if (!BI.Dirty) 1569 continue; 1570 1571 if (BI.Incoming) { 1572 if (!Brackets) 1573 Brackets = std::make_unique<WaitcntBrackets>(*BI.Incoming); 1574 else 1575 *Brackets = *BI.Incoming; 1576 } else { 1577 if (!Brackets) 1578 Brackets = std::make_unique<WaitcntBrackets>(ST); 1579 else 1580 *Brackets = WaitcntBrackets(ST); 1581 } 1582 1583 Modified |= insertWaitcntInBlock(MF, *BI.MBB, *Brackets); 1584 BI.Dirty = false; 1585 1586 if (Brackets->hasPending()) { 1587 BlockInfo *MoveBracketsToSucc = nullptr; 1588 for (MachineBasicBlock *Succ : BI.MBB->successors()) { 1589 auto SuccBII = BlockInfos.find(Succ); 1590 BlockInfo &SuccBI = SuccBII->second; 1591 if (!SuccBI.Incoming) { 1592 SuccBI.Dirty = true; 1593 if (SuccBII <= BII) 1594 Repeat = true; 1595 if (!MoveBracketsToSucc) { 1596 MoveBracketsToSucc = &SuccBI; 1597 } else { 1598 SuccBI.Incoming = std::make_unique<WaitcntBrackets>(*Brackets); 1599 } 1600 } else if (SuccBI.Incoming->merge(*Brackets)) { 1601 SuccBI.Dirty = true; 1602 if (SuccBII <= BII) 1603 Repeat = true; 1604 } 1605 } 1606 if (MoveBracketsToSucc) 1607 MoveBracketsToSucc->Incoming = std::move(Brackets); 1608 } 1609 } 1610 } while (Repeat); 1611 1612 SmallVector<MachineBasicBlock *, 4> EndPgmBlocks; 1613 1614 bool HaveScalarStores = false; 1615 1616 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end(); BI != BE; 1617 ++BI) { 1618 MachineBasicBlock &MBB = *BI; 1619 1620 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E; 1621 ++I) { 1622 if (!HaveScalarStores && TII->isScalarStore(*I)) 1623 HaveScalarStores = true; 1624 1625 if (I->getOpcode() == AMDGPU::S_ENDPGM || 1626 I->getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG) 1627 EndPgmBlocks.push_back(&MBB); 1628 } 1629 } 1630 1631 if (HaveScalarStores) { 1632 // If scalar writes are used, the cache must be flushed or else the next 1633 // wave to reuse the same scratch memory can be clobbered. 1634 // 1635 // Insert s_dcache_wb at wave termination points if there were any scalar 1636 // stores, and only if the cache hasn't already been flushed. This could be 1637 // improved by looking across blocks for flushes in postdominating blocks 1638 // from the stores but an explicitly requested flush is probably very rare. 1639 for (MachineBasicBlock *MBB : EndPgmBlocks) { 1640 bool SeenDCacheWB = false; 1641 1642 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E; 1643 ++I) { 1644 if (I->getOpcode() == AMDGPU::S_DCACHE_WB) 1645 SeenDCacheWB = true; 1646 else if (TII->isScalarStore(*I)) 1647 SeenDCacheWB = false; 1648 1649 // FIXME: It would be better to insert this before a waitcnt if any. 1650 if ((I->getOpcode() == AMDGPU::S_ENDPGM || 1651 I->getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG) && 1652 !SeenDCacheWB) { 1653 Modified = true; 1654 BuildMI(*MBB, I, I->getDebugLoc(), TII->get(AMDGPU::S_DCACHE_WB)); 1655 } 1656 } 1657 } 1658 } 1659 1660 if (!MFI->isEntryFunction()) { 1661 // Wait for any outstanding memory operations that the input registers may 1662 // depend on. We can't track them and it's better to the wait after the 1663 // costly call sequence. 1664 1665 // TODO: Could insert earlier and schedule more liberally with operations 1666 // that only use caller preserved registers. 1667 MachineBasicBlock &EntryBB = MF.front(); 1668 MachineBasicBlock::iterator I = EntryBB.begin(); 1669 for (MachineBasicBlock::iterator E = EntryBB.end(); 1670 I != E && (I->isPHI() || I->isMetaInstruction()); ++I) 1671 ; 1672 BuildMI(EntryBB, I, DebugLoc(), TII->get(AMDGPU::S_WAITCNT)).addImm(0); 1673 if (ST->hasVscnt()) 1674 BuildMI(EntryBB, I, DebugLoc(), TII->get(AMDGPU::S_WAITCNT_VSCNT)) 1675 .addReg(AMDGPU::SGPR_NULL, RegState::Undef) 1676 .addImm(0); 1677 1678 Modified = true; 1679 } 1680 1681 return Modified; 1682 } 1683