1 //===- SIInsertWaitcnts.cpp - Insert Wait Instructions --------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// Insert wait instructions for memory reads and writes. 11 /// 12 /// Memory reads and writes are issued asynchronously, so we need to insert 13 /// S_WAITCNT instructions when we want to access any of their results or 14 /// overwrite any register that's used asynchronously. 15 /// 16 /// TODO: This pass currently keeps one timeline per hardware counter. A more 17 /// finely-grained approach that keeps one timeline per event type could 18 /// sometimes get away with generating weaker s_waitcnt instructions. For 19 /// example, when both SMEM and LDS are in flight and we need to wait for 20 /// the i-th-last LDS instruction, then an lgkmcnt(i) is actually sufficient, 21 /// but the pass will currently generate a conservative lgkmcnt(0) because 22 /// multiple event types are in flight. 23 // 24 //===----------------------------------------------------------------------===// 25 26 #include "AMDGPU.h" 27 #include "AMDGPUSubtarget.h" 28 #include "SIDefines.h" 29 #include "SIInstrInfo.h" 30 #include "SIMachineFunctionInfo.h" 31 #include "SIRegisterInfo.h" 32 #include "Utils/AMDGPUBaseInfo.h" 33 #include "llvm/ADT/DenseMap.h" 34 #include "llvm/ADT/DenseSet.h" 35 #include "llvm/ADT/PostOrderIterator.h" 36 #include "llvm/ADT/STLExtras.h" 37 #include "llvm/ADT/SmallVector.h" 38 #include "llvm/CodeGen/MachineBasicBlock.h" 39 #include "llvm/CodeGen/MachineFunction.h" 40 #include "llvm/CodeGen/MachineFunctionPass.h" 41 #include "llvm/CodeGen/MachineInstr.h" 42 #include "llvm/CodeGen/MachineInstrBuilder.h" 43 #include "llvm/CodeGen/MachineMemOperand.h" 44 #include "llvm/CodeGen/MachineOperand.h" 45 #include "llvm/CodeGen/MachinePostDominators.h" 46 #include "llvm/CodeGen/MachineRegisterInfo.h" 47 #include "llvm/InitializePasses.h" 48 #include "llvm/IR/DebugLoc.h" 49 #include "llvm/Pass.h" 50 #include "llvm/Support/Debug.h" 51 #include "llvm/Support/DebugCounter.h" 52 #include "llvm/Support/ErrorHandling.h" 53 #include "llvm/Support/raw_ostream.h" 54 #include <algorithm> 55 #include <cassert> 56 #include <cstdint> 57 #include <cstring> 58 #include <memory> 59 #include <utility> 60 #include <vector> 61 62 using namespace llvm; 63 64 #define DEBUG_TYPE "si-insert-waitcnts" 65 66 DEBUG_COUNTER(ForceExpCounter, DEBUG_TYPE"-forceexp", 67 "Force emit s_waitcnt expcnt(0) instrs"); 68 DEBUG_COUNTER(ForceLgkmCounter, DEBUG_TYPE"-forcelgkm", 69 "Force emit s_waitcnt lgkmcnt(0) instrs"); 70 DEBUG_COUNTER(ForceVMCounter, DEBUG_TYPE"-forcevm", 71 "Force emit s_waitcnt vmcnt(0) instrs"); 72 73 static cl::opt<bool> ForceEmitZeroFlag( 74 "amdgpu-waitcnt-forcezero", 75 cl::desc("Force all waitcnt instrs to be emitted as s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)"), 76 cl::init(false), cl::Hidden); 77 78 namespace { 79 80 template <typename EnumT> 81 class enum_iterator 82 : public iterator_facade_base<enum_iterator<EnumT>, 83 std::forward_iterator_tag, const EnumT> { 84 EnumT Value; 85 public: 86 enum_iterator() = default; 87 enum_iterator(EnumT Value) : Value(Value) {} 88 89 enum_iterator &operator++() { 90 Value = static_cast<EnumT>(Value + 1); 91 return *this; 92 } 93 94 bool operator==(const enum_iterator &RHS) const { return Value == RHS.Value; } 95 96 EnumT operator*() const { return Value; } 97 }; 98 99 // Class of object that encapsulates latest instruction counter score 100 // associated with the operand. Used for determining whether 101 // s_waitcnt instruction needs to be emited. 102 103 #define CNT_MASK(t) (1u << (t)) 104 105 enum InstCounterType { VM_CNT = 0, LGKM_CNT, EXP_CNT, VS_CNT, NUM_INST_CNTS }; 106 107 iterator_range<enum_iterator<InstCounterType>> inst_counter_types() { 108 return make_range(enum_iterator<InstCounterType>(VM_CNT), 109 enum_iterator<InstCounterType>(NUM_INST_CNTS)); 110 } 111 112 using RegInterval = std::pair<signed, signed>; 113 114 struct { 115 uint32_t VmcntMax; 116 uint32_t ExpcntMax; 117 uint32_t LgkmcntMax; 118 uint32_t VscntMax; 119 int32_t NumVGPRsMax; 120 int32_t NumSGPRsMax; 121 } HardwareLimits; 122 123 struct { 124 unsigned VGPR0; 125 unsigned VGPRL; 126 unsigned SGPR0; 127 unsigned SGPRL; 128 } RegisterEncoding; 129 130 enum WaitEventType { 131 VMEM_ACCESS, // vector-memory read & write 132 VMEM_READ_ACCESS, // vector-memory read 133 VMEM_WRITE_ACCESS,// vector-memory write 134 LDS_ACCESS, // lds read & write 135 GDS_ACCESS, // gds read & write 136 SQ_MESSAGE, // send message 137 SMEM_ACCESS, // scalar-memory read & write 138 EXP_GPR_LOCK, // export holding on its data src 139 GDS_GPR_LOCK, // GDS holding on its data and addr src 140 EXP_POS_ACCESS, // write to export position 141 EXP_PARAM_ACCESS, // write to export parameter 142 VMW_GPR_LOCK, // vector-memory write holding on its data src 143 NUM_WAIT_EVENTS, 144 }; 145 146 static const uint32_t WaitEventMaskForInst[NUM_INST_CNTS] = { 147 (1 << VMEM_ACCESS) | (1 << VMEM_READ_ACCESS), 148 (1 << SMEM_ACCESS) | (1 << LDS_ACCESS) | (1 << GDS_ACCESS) | 149 (1 << SQ_MESSAGE), 150 (1 << EXP_GPR_LOCK) | (1 << GDS_GPR_LOCK) | (1 << VMW_GPR_LOCK) | 151 (1 << EXP_PARAM_ACCESS) | (1 << EXP_POS_ACCESS), 152 (1 << VMEM_WRITE_ACCESS) 153 }; 154 155 // The mapping is: 156 // 0 .. SQ_MAX_PGM_VGPRS-1 real VGPRs 157 // SQ_MAX_PGM_VGPRS .. NUM_ALL_VGPRS-1 extra VGPR-like slots 158 // NUM_ALL_VGPRS .. NUM_ALL_VGPRS+SQ_MAX_PGM_SGPRS-1 real SGPRs 159 // We reserve a fixed number of VGPR slots in the scoring tables for 160 // special tokens like SCMEM_LDS (needed for buffer load to LDS). 161 enum RegisterMapping { 162 SQ_MAX_PGM_VGPRS = 256, // Maximum programmable VGPRs across all targets. 163 SQ_MAX_PGM_SGPRS = 256, // Maximum programmable SGPRs across all targets. 164 NUM_EXTRA_VGPRS = 1, // A reserved slot for DS. 165 EXTRA_VGPR_LDS = 0, // This is a placeholder the Shader algorithm uses. 166 NUM_ALL_VGPRS = SQ_MAX_PGM_VGPRS + NUM_EXTRA_VGPRS, // Where SGPR starts. 167 }; 168 169 void addWait(AMDGPU::Waitcnt &Wait, InstCounterType T, unsigned Count) { 170 switch (T) { 171 case VM_CNT: 172 Wait.VmCnt = std::min(Wait.VmCnt, Count); 173 break; 174 case EXP_CNT: 175 Wait.ExpCnt = std::min(Wait.ExpCnt, Count); 176 break; 177 case LGKM_CNT: 178 Wait.LgkmCnt = std::min(Wait.LgkmCnt, Count); 179 break; 180 case VS_CNT: 181 Wait.VsCnt = std::min(Wait.VsCnt, Count); 182 break; 183 default: 184 llvm_unreachable("bad InstCounterType"); 185 } 186 } 187 188 // This objects maintains the current score brackets of each wait counter, and 189 // a per-register scoreboard for each wait counter. 190 // 191 // We also maintain the latest score for every event type that can change the 192 // waitcnt in order to know if there are multiple types of events within 193 // the brackets. When multiple types of event happen in the bracket, 194 // wait count may get decreased out of order, therefore we need to put in 195 // "s_waitcnt 0" before use. 196 class WaitcntBrackets { 197 public: 198 WaitcntBrackets(const GCNSubtarget *SubTarget) : ST(SubTarget) { 199 for (auto T : inst_counter_types()) 200 memset(VgprScores[T], 0, sizeof(VgprScores[T])); 201 } 202 203 static uint32_t getWaitCountMax(InstCounterType T) { 204 switch (T) { 205 case VM_CNT: 206 return HardwareLimits.VmcntMax; 207 case LGKM_CNT: 208 return HardwareLimits.LgkmcntMax; 209 case EXP_CNT: 210 return HardwareLimits.ExpcntMax; 211 case VS_CNT: 212 return HardwareLimits.VscntMax; 213 default: 214 break; 215 } 216 return 0; 217 } 218 219 uint32_t getScoreLB(InstCounterType T) const { 220 assert(T < NUM_INST_CNTS); 221 if (T >= NUM_INST_CNTS) 222 return 0; 223 return ScoreLBs[T]; 224 } 225 226 uint32_t getScoreUB(InstCounterType T) const { 227 assert(T < NUM_INST_CNTS); 228 if (T >= NUM_INST_CNTS) 229 return 0; 230 return ScoreUBs[T]; 231 } 232 233 // Mapping from event to counter. 234 InstCounterType eventCounter(WaitEventType E) { 235 if (WaitEventMaskForInst[VM_CNT] & (1 << E)) 236 return VM_CNT; 237 if (WaitEventMaskForInst[LGKM_CNT] & (1 << E)) 238 return LGKM_CNT; 239 if (WaitEventMaskForInst[VS_CNT] & (1 << E)) 240 return VS_CNT; 241 assert(WaitEventMaskForInst[EXP_CNT] & (1 << E)); 242 return EXP_CNT; 243 } 244 245 uint32_t getRegScore(int GprNo, InstCounterType T) { 246 if (GprNo < NUM_ALL_VGPRS) { 247 return VgprScores[T][GprNo]; 248 } 249 assert(T == LGKM_CNT); 250 return SgprScores[GprNo - NUM_ALL_VGPRS]; 251 } 252 253 void clear() { 254 memset(ScoreLBs, 0, sizeof(ScoreLBs)); 255 memset(ScoreUBs, 0, sizeof(ScoreUBs)); 256 PendingEvents = 0; 257 memset(MixedPendingEvents, 0, sizeof(MixedPendingEvents)); 258 for (auto T : inst_counter_types()) 259 memset(VgprScores[T], 0, sizeof(VgprScores[T])); 260 memset(SgprScores, 0, sizeof(SgprScores)); 261 } 262 263 bool merge(const WaitcntBrackets &Other); 264 265 RegInterval getRegInterval(const MachineInstr *MI, const SIInstrInfo *TII, 266 const MachineRegisterInfo *MRI, 267 const SIRegisterInfo *TRI, unsigned OpNo, 268 bool Def) const; 269 270 int32_t getMaxVGPR() const { return VgprUB; } 271 int32_t getMaxSGPR() const { return SgprUB; } 272 273 bool counterOutOfOrder(InstCounterType T) const; 274 bool simplifyWaitcnt(AMDGPU::Waitcnt &Wait) const; 275 bool simplifyWaitcnt(InstCounterType T, unsigned &Count) const; 276 void determineWait(InstCounterType T, uint32_t ScoreToWait, 277 AMDGPU::Waitcnt &Wait) const; 278 void applyWaitcnt(const AMDGPU::Waitcnt &Wait); 279 void applyWaitcnt(InstCounterType T, unsigned Count); 280 void updateByEvent(const SIInstrInfo *TII, const SIRegisterInfo *TRI, 281 const MachineRegisterInfo *MRI, WaitEventType E, 282 MachineInstr &MI); 283 284 bool hasPending() const { return PendingEvents != 0; } 285 bool hasPendingEvent(WaitEventType E) const { 286 return PendingEvents & (1 << E); 287 } 288 289 bool hasPendingFlat() const { 290 return ((LastFlat[LGKM_CNT] > ScoreLBs[LGKM_CNT] && 291 LastFlat[LGKM_CNT] <= ScoreUBs[LGKM_CNT]) || 292 (LastFlat[VM_CNT] > ScoreLBs[VM_CNT] && 293 LastFlat[VM_CNT] <= ScoreUBs[VM_CNT])); 294 } 295 296 void setPendingFlat() { 297 LastFlat[VM_CNT] = ScoreUBs[VM_CNT]; 298 LastFlat[LGKM_CNT] = ScoreUBs[LGKM_CNT]; 299 } 300 301 void print(raw_ostream &); 302 void dump() { print(dbgs()); } 303 304 private: 305 struct MergeInfo { 306 uint32_t OldLB; 307 uint32_t OtherLB; 308 uint32_t MyShift; 309 uint32_t OtherShift; 310 }; 311 static bool mergeScore(const MergeInfo &M, uint32_t &Score, 312 uint32_t OtherScore); 313 314 void setScoreLB(InstCounterType T, uint32_t Val) { 315 assert(T < NUM_INST_CNTS); 316 if (T >= NUM_INST_CNTS) 317 return; 318 ScoreLBs[T] = Val; 319 } 320 321 void setScoreUB(InstCounterType T, uint32_t Val) { 322 assert(T < NUM_INST_CNTS); 323 if (T >= NUM_INST_CNTS) 324 return; 325 ScoreUBs[T] = Val; 326 if (T == EXP_CNT) { 327 uint32_t UB = ScoreUBs[T] - getWaitCountMax(EXP_CNT); 328 if (ScoreLBs[T] < UB && UB < ScoreUBs[T]) 329 ScoreLBs[T] = UB; 330 } 331 } 332 333 void setRegScore(int GprNo, InstCounterType T, uint32_t Val) { 334 if (GprNo < NUM_ALL_VGPRS) { 335 if (GprNo > VgprUB) { 336 VgprUB = GprNo; 337 } 338 VgprScores[T][GprNo] = Val; 339 } else { 340 assert(T == LGKM_CNT); 341 if (GprNo - NUM_ALL_VGPRS > SgprUB) { 342 SgprUB = GprNo - NUM_ALL_VGPRS; 343 } 344 SgprScores[GprNo - NUM_ALL_VGPRS] = Val; 345 } 346 } 347 348 void setExpScore(const MachineInstr *MI, const SIInstrInfo *TII, 349 const SIRegisterInfo *TRI, const MachineRegisterInfo *MRI, 350 unsigned OpNo, uint32_t Val); 351 352 const GCNSubtarget *ST = nullptr; 353 uint32_t ScoreLBs[NUM_INST_CNTS] = {0}; 354 uint32_t ScoreUBs[NUM_INST_CNTS] = {0}; 355 uint32_t PendingEvents = 0; 356 bool MixedPendingEvents[NUM_INST_CNTS] = {false}; 357 // Remember the last flat memory operation. 358 uint32_t LastFlat[NUM_INST_CNTS] = {0}; 359 // wait_cnt scores for every vgpr. 360 // Keep track of the VgprUB and SgprUB to make merge at join efficient. 361 int32_t VgprUB = 0; 362 int32_t SgprUB = 0; 363 uint32_t VgprScores[NUM_INST_CNTS][NUM_ALL_VGPRS]; 364 // Wait cnt scores for every sgpr, only lgkmcnt is relevant. 365 uint32_t SgprScores[SQ_MAX_PGM_SGPRS] = {0}; 366 }; 367 368 class SIInsertWaitcnts : public MachineFunctionPass { 369 private: 370 const GCNSubtarget *ST = nullptr; 371 const SIInstrInfo *TII = nullptr; 372 const SIRegisterInfo *TRI = nullptr; 373 const MachineRegisterInfo *MRI = nullptr; 374 AMDGPU::IsaVersion IV; 375 376 DenseSet<MachineInstr *> TrackedWaitcntSet; 377 DenseMap<const Value *, MachineBasicBlock *> SLoadAddresses; 378 MachinePostDominatorTree *PDT; 379 380 struct BlockInfo { 381 MachineBasicBlock *MBB; 382 std::unique_ptr<WaitcntBrackets> Incoming; 383 bool Dirty = true; 384 385 explicit BlockInfo(MachineBasicBlock *MBB) : MBB(MBB) {} 386 }; 387 388 std::vector<BlockInfo> BlockInfos; // by reverse post-order traversal index 389 DenseMap<MachineBasicBlock *, unsigned> RpotIdxMap; 390 391 // ForceEmitZeroWaitcnts: force all waitcnts insts to be s_waitcnt 0 392 // because of amdgpu-waitcnt-forcezero flag 393 bool ForceEmitZeroWaitcnts; 394 bool ForceEmitWaitcnt[NUM_INST_CNTS]; 395 396 public: 397 static char ID; 398 399 SIInsertWaitcnts() : MachineFunctionPass(ID) { 400 (void)ForceExpCounter; 401 (void)ForceLgkmCounter; 402 (void)ForceVMCounter; 403 } 404 405 bool runOnMachineFunction(MachineFunction &MF) override; 406 407 StringRef getPassName() const override { 408 return "SI insert wait instructions"; 409 } 410 411 void getAnalysisUsage(AnalysisUsage &AU) const override { 412 AU.setPreservesCFG(); 413 AU.addRequired<MachinePostDominatorTree>(); 414 MachineFunctionPass::getAnalysisUsage(AU); 415 } 416 417 bool isForceEmitWaitcnt() const { 418 for (auto T : inst_counter_types()) 419 if (ForceEmitWaitcnt[T]) 420 return true; 421 return false; 422 } 423 424 void setForceEmitWaitcnt() { 425 // For non-debug builds, ForceEmitWaitcnt has been initialized to false; 426 // For debug builds, get the debug counter info and adjust if need be 427 #ifndef NDEBUG 428 if (DebugCounter::isCounterSet(ForceExpCounter) && 429 DebugCounter::shouldExecute(ForceExpCounter)) { 430 ForceEmitWaitcnt[EXP_CNT] = true; 431 } else { 432 ForceEmitWaitcnt[EXP_CNT] = false; 433 } 434 435 if (DebugCounter::isCounterSet(ForceLgkmCounter) && 436 DebugCounter::shouldExecute(ForceLgkmCounter)) { 437 ForceEmitWaitcnt[LGKM_CNT] = true; 438 } else { 439 ForceEmitWaitcnt[LGKM_CNT] = false; 440 } 441 442 if (DebugCounter::isCounterSet(ForceVMCounter) && 443 DebugCounter::shouldExecute(ForceVMCounter)) { 444 ForceEmitWaitcnt[VM_CNT] = true; 445 } else { 446 ForceEmitWaitcnt[VM_CNT] = false; 447 } 448 #endif // NDEBUG 449 } 450 451 bool mayAccessLDSThroughFlat(const MachineInstr &MI) const; 452 bool generateWaitcntInstBefore(MachineInstr &MI, 453 WaitcntBrackets &ScoreBrackets, 454 MachineInstr *OldWaitcntInstr); 455 void updateEventWaitcntAfter(MachineInstr &Inst, 456 WaitcntBrackets *ScoreBrackets); 457 bool insertWaitcntInBlock(MachineFunction &MF, MachineBasicBlock &Block, 458 WaitcntBrackets &ScoreBrackets); 459 }; 460 461 } // end anonymous namespace 462 463 RegInterval WaitcntBrackets::getRegInterval(const MachineInstr *MI, 464 const SIInstrInfo *TII, 465 const MachineRegisterInfo *MRI, 466 const SIRegisterInfo *TRI, 467 unsigned OpNo, bool Def) const { 468 const MachineOperand &Op = MI->getOperand(OpNo); 469 if (!Op.isReg() || !TRI->isInAllocatableClass(Op.getReg()) || 470 (Def && !Op.isDef()) || TRI->isAGPR(*MRI, Op.getReg())) 471 return {-1, -1}; 472 473 // A use via a PW operand does not need a waitcnt. 474 // A partial write is not a WAW. 475 assert(!Op.getSubReg() || !Op.isUndef()); 476 477 RegInterval Result; 478 const MachineRegisterInfo &MRIA = *MRI; 479 480 unsigned Reg = TRI->getEncodingValue(Op.getReg()); 481 482 if (TRI->isVGPR(MRIA, Op.getReg())) { 483 assert(Reg >= RegisterEncoding.VGPR0 && Reg <= RegisterEncoding.VGPRL); 484 Result.first = Reg - RegisterEncoding.VGPR0; 485 assert(Result.first >= 0 && Result.first < SQ_MAX_PGM_VGPRS); 486 } else if (TRI->isSGPRReg(MRIA, Op.getReg())) { 487 assert(Reg >= RegisterEncoding.SGPR0 && Reg < SQ_MAX_PGM_SGPRS); 488 Result.first = Reg - RegisterEncoding.SGPR0 + NUM_ALL_VGPRS; 489 assert(Result.first >= NUM_ALL_VGPRS && 490 Result.first < SQ_MAX_PGM_SGPRS + NUM_ALL_VGPRS); 491 } 492 // TODO: Handle TTMP 493 // else if (TRI->isTTMP(MRIA, Reg.getReg())) ... 494 else 495 return {-1, -1}; 496 497 const MachineInstr &MIA = *MI; 498 const TargetRegisterClass *RC = TII->getOpRegClass(MIA, OpNo); 499 unsigned Size = TRI->getRegSizeInBits(*RC); 500 Result.second = Result.first + (Size / 32); 501 502 return Result; 503 } 504 505 void WaitcntBrackets::setExpScore(const MachineInstr *MI, 506 const SIInstrInfo *TII, 507 const SIRegisterInfo *TRI, 508 const MachineRegisterInfo *MRI, unsigned OpNo, 509 uint32_t Val) { 510 RegInterval Interval = getRegInterval(MI, TII, MRI, TRI, OpNo, false); 511 LLVM_DEBUG({ 512 const MachineOperand &Opnd = MI->getOperand(OpNo); 513 assert(TRI->isVGPR(*MRI, Opnd.getReg())); 514 }); 515 for (signed RegNo = Interval.first; RegNo < Interval.second; ++RegNo) { 516 setRegScore(RegNo, EXP_CNT, Val); 517 } 518 } 519 520 void WaitcntBrackets::updateByEvent(const SIInstrInfo *TII, 521 const SIRegisterInfo *TRI, 522 const MachineRegisterInfo *MRI, 523 WaitEventType E, MachineInstr &Inst) { 524 const MachineRegisterInfo &MRIA = *MRI; 525 InstCounterType T = eventCounter(E); 526 uint32_t CurrScore = getScoreUB(T) + 1; 527 if (CurrScore == 0) 528 report_fatal_error("InsertWaitcnt score wraparound"); 529 // PendingEvents and ScoreUB need to be update regardless if this event 530 // changes the score of a register or not. 531 // Examples including vm_cnt when buffer-store or lgkm_cnt when send-message. 532 if (!hasPendingEvent(E)) { 533 if (PendingEvents & WaitEventMaskForInst[T]) 534 MixedPendingEvents[T] = true; 535 PendingEvents |= 1 << E; 536 } 537 setScoreUB(T, CurrScore); 538 539 if (T == EXP_CNT) { 540 // Put score on the source vgprs. If this is a store, just use those 541 // specific register(s). 542 if (TII->isDS(Inst) && (Inst.mayStore() || Inst.mayLoad())) { 543 int AddrOpIdx = 544 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::addr); 545 // All GDS operations must protect their address register (same as 546 // export.) 547 if (AddrOpIdx != -1) { 548 setExpScore(&Inst, TII, TRI, MRI, AddrOpIdx, CurrScore); 549 } 550 551 if (Inst.mayStore()) { 552 if (AMDGPU::getNamedOperandIdx(Inst.getOpcode(), 553 AMDGPU::OpName::data0) != -1) { 554 setExpScore( 555 &Inst, TII, TRI, MRI, 556 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::data0), 557 CurrScore); 558 } 559 if (AMDGPU::getNamedOperandIdx(Inst.getOpcode(), 560 AMDGPU::OpName::data1) != -1) { 561 setExpScore(&Inst, TII, TRI, MRI, 562 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), 563 AMDGPU::OpName::data1), 564 CurrScore); 565 } 566 } else if (AMDGPU::getAtomicNoRetOp(Inst.getOpcode()) != -1 && 567 Inst.getOpcode() != AMDGPU::DS_GWS_INIT && 568 Inst.getOpcode() != AMDGPU::DS_GWS_SEMA_V && 569 Inst.getOpcode() != AMDGPU::DS_GWS_SEMA_BR && 570 Inst.getOpcode() != AMDGPU::DS_GWS_SEMA_P && 571 Inst.getOpcode() != AMDGPU::DS_GWS_BARRIER && 572 Inst.getOpcode() != AMDGPU::DS_APPEND && 573 Inst.getOpcode() != AMDGPU::DS_CONSUME && 574 Inst.getOpcode() != AMDGPU::DS_ORDERED_COUNT) { 575 for (unsigned I = 0, E = Inst.getNumOperands(); I != E; ++I) { 576 const MachineOperand &Op = Inst.getOperand(I); 577 if (Op.isReg() && !Op.isDef() && TRI->isVGPR(MRIA, Op.getReg())) { 578 setExpScore(&Inst, TII, TRI, MRI, I, CurrScore); 579 } 580 } 581 } 582 } else if (TII->isFLAT(Inst)) { 583 if (Inst.mayStore()) { 584 setExpScore( 585 &Inst, TII, TRI, MRI, 586 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::data), 587 CurrScore); 588 } else if (AMDGPU::getAtomicNoRetOp(Inst.getOpcode()) != -1) { 589 setExpScore( 590 &Inst, TII, TRI, MRI, 591 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::data), 592 CurrScore); 593 } 594 } else if (TII->isMIMG(Inst)) { 595 if (Inst.mayStore()) { 596 setExpScore(&Inst, TII, TRI, MRI, 0, CurrScore); 597 } else if (AMDGPU::getAtomicNoRetOp(Inst.getOpcode()) != -1) { 598 setExpScore( 599 &Inst, TII, TRI, MRI, 600 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::data), 601 CurrScore); 602 } 603 } else if (TII->isMTBUF(Inst)) { 604 if (Inst.mayStore()) { 605 setExpScore(&Inst, TII, TRI, MRI, 0, CurrScore); 606 } 607 } else if (TII->isMUBUF(Inst)) { 608 if (Inst.mayStore()) { 609 setExpScore(&Inst, TII, TRI, MRI, 0, CurrScore); 610 } else if (AMDGPU::getAtomicNoRetOp(Inst.getOpcode()) != -1) { 611 setExpScore( 612 &Inst, TII, TRI, MRI, 613 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::data), 614 CurrScore); 615 } 616 } else { 617 if (TII->isEXP(Inst)) { 618 // For export the destination registers are really temps that 619 // can be used as the actual source after export patching, so 620 // we need to treat them like sources and set the EXP_CNT 621 // score. 622 for (unsigned I = 0, E = Inst.getNumOperands(); I != E; ++I) { 623 MachineOperand &DefMO = Inst.getOperand(I); 624 if (DefMO.isReg() && DefMO.isDef() && 625 TRI->isVGPR(MRIA, DefMO.getReg())) { 626 setRegScore(TRI->getEncodingValue(DefMO.getReg()), EXP_CNT, 627 CurrScore); 628 } 629 } 630 } 631 for (unsigned I = 0, E = Inst.getNumOperands(); I != E; ++I) { 632 MachineOperand &MO = Inst.getOperand(I); 633 if (MO.isReg() && !MO.isDef() && TRI->isVGPR(MRIA, MO.getReg())) { 634 setExpScore(&Inst, TII, TRI, MRI, I, CurrScore); 635 } 636 } 637 } 638 #if 0 // TODO: check if this is handled by MUBUF code above. 639 } else if (Inst.getOpcode() == AMDGPU::BUFFER_STORE_DWORD || 640 Inst.getOpcode() == AMDGPU::BUFFER_STORE_DWORDX2 || 641 Inst.getOpcode() == AMDGPU::BUFFER_STORE_DWORDX4) { 642 MachineOperand *MO = TII->getNamedOperand(Inst, AMDGPU::OpName::data); 643 unsigned OpNo;//TODO: find the OpNo for this operand; 644 RegInterval Interval = getRegInterval(&Inst, TII, MRI, TRI, OpNo, false); 645 for (signed RegNo = Interval.first; RegNo < Interval.second; 646 ++RegNo) { 647 setRegScore(RegNo + NUM_ALL_VGPRS, t, CurrScore); 648 } 649 #endif 650 } else { 651 // Match the score to the destination registers. 652 for (unsigned I = 0, E = Inst.getNumOperands(); I != E; ++I) { 653 RegInterval Interval = getRegInterval(&Inst, TII, MRI, TRI, I, true); 654 if (T == VM_CNT && Interval.first >= NUM_ALL_VGPRS) 655 continue; 656 for (signed RegNo = Interval.first; RegNo < Interval.second; ++RegNo) { 657 setRegScore(RegNo, T, CurrScore); 658 } 659 } 660 if (TII->isDS(Inst) && Inst.mayStore()) { 661 setRegScore(SQ_MAX_PGM_VGPRS + EXTRA_VGPR_LDS, T, CurrScore); 662 } 663 } 664 } 665 666 void WaitcntBrackets::print(raw_ostream &OS) { 667 OS << '\n'; 668 for (auto T : inst_counter_types()) { 669 uint32_t LB = getScoreLB(T); 670 uint32_t UB = getScoreUB(T); 671 672 switch (T) { 673 case VM_CNT: 674 OS << " VM_CNT(" << UB - LB << "): "; 675 break; 676 case LGKM_CNT: 677 OS << " LGKM_CNT(" << UB - LB << "): "; 678 break; 679 case EXP_CNT: 680 OS << " EXP_CNT(" << UB - LB << "): "; 681 break; 682 case VS_CNT: 683 OS << " VS_CNT(" << UB - LB << "): "; 684 break; 685 default: 686 OS << " UNKNOWN(" << UB - LB << "): "; 687 break; 688 } 689 690 if (LB < UB) { 691 // Print vgpr scores. 692 for (int J = 0; J <= getMaxVGPR(); J++) { 693 uint32_t RegScore = getRegScore(J, T); 694 if (RegScore <= LB) 695 continue; 696 uint32_t RelScore = RegScore - LB - 1; 697 if (J < SQ_MAX_PGM_VGPRS + EXTRA_VGPR_LDS) { 698 OS << RelScore << ":v" << J << " "; 699 } else { 700 OS << RelScore << ":ds "; 701 } 702 } 703 // Also need to print sgpr scores for lgkm_cnt. 704 if (T == LGKM_CNT) { 705 for (int J = 0; J <= getMaxSGPR(); J++) { 706 uint32_t RegScore = getRegScore(J + NUM_ALL_VGPRS, LGKM_CNT); 707 if (RegScore <= LB) 708 continue; 709 uint32_t RelScore = RegScore - LB - 1; 710 OS << RelScore << ":s" << J << " "; 711 } 712 } 713 } 714 OS << '\n'; 715 } 716 OS << '\n'; 717 } 718 719 /// Simplify the waitcnt, in the sense of removing redundant counts, and return 720 /// whether a waitcnt instruction is needed at all. 721 bool WaitcntBrackets::simplifyWaitcnt(AMDGPU::Waitcnt &Wait) const { 722 return simplifyWaitcnt(VM_CNT, Wait.VmCnt) | 723 simplifyWaitcnt(EXP_CNT, Wait.ExpCnt) | 724 simplifyWaitcnt(LGKM_CNT, Wait.LgkmCnt) | 725 simplifyWaitcnt(VS_CNT, Wait.VsCnt); 726 } 727 728 bool WaitcntBrackets::simplifyWaitcnt(InstCounterType T, 729 unsigned &Count) const { 730 const uint32_t LB = getScoreLB(T); 731 const uint32_t UB = getScoreUB(T); 732 if (Count < UB && UB - Count > LB) 733 return true; 734 735 Count = ~0u; 736 return false; 737 } 738 739 void WaitcntBrackets::determineWait(InstCounterType T, uint32_t ScoreToWait, 740 AMDGPU::Waitcnt &Wait) const { 741 // If the score of src_operand falls within the bracket, we need an 742 // s_waitcnt instruction. 743 const uint32_t LB = getScoreLB(T); 744 const uint32_t UB = getScoreUB(T); 745 if ((UB >= ScoreToWait) && (ScoreToWait > LB)) { 746 if ((T == VM_CNT || T == LGKM_CNT) && 747 hasPendingFlat() && 748 !ST->hasFlatLgkmVMemCountInOrder()) { 749 // If there is a pending FLAT operation, and this is a VMem or LGKM 750 // waitcnt and the target can report early completion, then we need 751 // to force a waitcnt 0. 752 addWait(Wait, T, 0); 753 } else if (counterOutOfOrder(T)) { 754 // Counter can get decremented out-of-order when there 755 // are multiple types event in the bracket. Also emit an s_wait counter 756 // with a conservative value of 0 for the counter. 757 addWait(Wait, T, 0); 758 } else { 759 // If a counter has been maxed out avoid overflow by waiting for 760 // MAX(CounterType) - 1 instead. 761 uint32_t NeededWait = std::min(UB - ScoreToWait, getWaitCountMax(T) - 1); 762 addWait(Wait, T, NeededWait); 763 } 764 } 765 } 766 767 void WaitcntBrackets::applyWaitcnt(const AMDGPU::Waitcnt &Wait) { 768 applyWaitcnt(VM_CNT, Wait.VmCnt); 769 applyWaitcnt(EXP_CNT, Wait.ExpCnt); 770 applyWaitcnt(LGKM_CNT, Wait.LgkmCnt); 771 applyWaitcnt(VS_CNT, Wait.VsCnt); 772 } 773 774 void WaitcntBrackets::applyWaitcnt(InstCounterType T, unsigned Count) { 775 const uint32_t UB = getScoreUB(T); 776 if (Count >= UB) 777 return; 778 if (Count != 0) { 779 if (counterOutOfOrder(T)) 780 return; 781 setScoreLB(T, std::max(getScoreLB(T), UB - Count)); 782 } else { 783 setScoreLB(T, UB); 784 MixedPendingEvents[T] = false; 785 PendingEvents &= ~WaitEventMaskForInst[T]; 786 } 787 } 788 789 // Where there are multiple types of event in the bracket of a counter, 790 // the decrement may go out of order. 791 bool WaitcntBrackets::counterOutOfOrder(InstCounterType T) const { 792 // Scalar memory read always can go out of order. 793 if (T == LGKM_CNT && hasPendingEvent(SMEM_ACCESS)) 794 return true; 795 return MixedPendingEvents[T]; 796 } 797 798 INITIALIZE_PASS_BEGIN(SIInsertWaitcnts, DEBUG_TYPE, "SI Insert Waitcnts", false, 799 false) 800 INITIALIZE_PASS_DEPENDENCY(MachinePostDominatorTree) 801 INITIALIZE_PASS_END(SIInsertWaitcnts, DEBUG_TYPE, "SI Insert Waitcnts", false, 802 false) 803 804 char SIInsertWaitcnts::ID = 0; 805 806 char &llvm::SIInsertWaitcntsID = SIInsertWaitcnts::ID; 807 808 FunctionPass *llvm::createSIInsertWaitcntsPass() { 809 return new SIInsertWaitcnts(); 810 } 811 812 static bool readsVCCZ(const MachineInstr &MI) { 813 unsigned Opc = MI.getOpcode(); 814 return (Opc == AMDGPU::S_CBRANCH_VCCNZ || Opc == AMDGPU::S_CBRANCH_VCCZ) && 815 !MI.getOperand(1).isUndef(); 816 } 817 818 /// \returns true if the callee inserts an s_waitcnt 0 on function entry. 819 static bool callWaitsOnFunctionEntry(const MachineInstr &MI) { 820 // Currently all conventions wait, but this may not always be the case. 821 // 822 // TODO: If IPRA is enabled, and the callee is isSafeForNoCSROpt, it may make 823 // senses to omit the wait and do it in the caller. 824 return true; 825 } 826 827 /// \returns true if the callee is expected to wait for any outstanding waits 828 /// before returning. 829 static bool callWaitsOnFunctionReturn(const MachineInstr &MI) { 830 return true; 831 } 832 833 /// Generate s_waitcnt instruction to be placed before cur_Inst. 834 /// Instructions of a given type are returned in order, 835 /// but instructions of different types can complete out of order. 836 /// We rely on this in-order completion 837 /// and simply assign a score to the memory access instructions. 838 /// We keep track of the active "score bracket" to determine 839 /// if an access of a memory read requires an s_waitcnt 840 /// and if so what the value of each counter is. 841 /// The "score bracket" is bound by the lower bound and upper bound 842 /// scores (*_score_LB and *_score_ub respectively). 843 bool SIInsertWaitcnts::generateWaitcntInstBefore( 844 MachineInstr &MI, WaitcntBrackets &ScoreBrackets, 845 MachineInstr *OldWaitcntInstr) { 846 setForceEmitWaitcnt(); 847 bool IsForceEmitWaitcnt = isForceEmitWaitcnt(); 848 849 if (MI.isDebugInstr()) 850 return false; 851 852 AMDGPU::Waitcnt Wait; 853 854 // See if this instruction has a forced S_WAITCNT VM. 855 // TODO: Handle other cases of NeedsWaitcntVmBefore() 856 if (MI.getOpcode() == AMDGPU::BUFFER_WBINVL1 || 857 MI.getOpcode() == AMDGPU::BUFFER_WBINVL1_SC || 858 MI.getOpcode() == AMDGPU::BUFFER_WBINVL1_VOL || 859 MI.getOpcode() == AMDGPU::BUFFER_GL0_INV || 860 MI.getOpcode() == AMDGPU::BUFFER_GL1_INV) { 861 Wait.VmCnt = 0; 862 } 863 864 // All waits must be resolved at call return. 865 // NOTE: this could be improved with knowledge of all call sites or 866 // with knowledge of the called routines. 867 if (MI.getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG || 868 MI.getOpcode() == AMDGPU::S_SETPC_B64_return || 869 (MI.isReturn() && MI.isCall() && !callWaitsOnFunctionEntry(MI))) { 870 Wait = Wait.combined(AMDGPU::Waitcnt::allZero(IV)); 871 } 872 // Resolve vm waits before gs-done. 873 else if ((MI.getOpcode() == AMDGPU::S_SENDMSG || 874 MI.getOpcode() == AMDGPU::S_SENDMSGHALT) && 875 ((MI.getOperand(0).getImm() & AMDGPU::SendMsg::ID_MASK_) == 876 AMDGPU::SendMsg::ID_GS_DONE)) { 877 Wait.VmCnt = 0; 878 } 879 #if 0 // TODO: the following blocks of logic when we have fence. 880 else if (MI.getOpcode() == SC_FENCE) { 881 const unsigned int group_size = 882 context->shader_info->GetMaxThreadGroupSize(); 883 // group_size == 0 means thread group size is unknown at compile time 884 const bool group_is_multi_wave = 885 (group_size == 0 || group_size > target_info->GetWaveFrontSize()); 886 const bool fence_is_global = !((SCInstInternalMisc*)Inst)->IsGroupFence(); 887 888 for (unsigned int i = 0; i < Inst->NumSrcOperands(); i++) { 889 SCRegType src_type = Inst->GetSrcType(i); 890 switch (src_type) { 891 case SCMEM_LDS: 892 if (group_is_multi_wave || 893 context->OptFlagIsOn(OPT_R1100_LDSMEM_FENCE_CHICKEN_BIT)) { 894 EmitWaitcnt |= ScoreBrackets->updateByWait(LGKM_CNT, 895 ScoreBrackets->getScoreUB(LGKM_CNT)); 896 // LDS may have to wait for VM_CNT after buffer load to LDS 897 if (target_info->HasBufferLoadToLDS()) { 898 EmitWaitcnt |= ScoreBrackets->updateByWait(VM_CNT, 899 ScoreBrackets->getScoreUB(VM_CNT)); 900 } 901 } 902 break; 903 904 case SCMEM_GDS: 905 if (group_is_multi_wave || fence_is_global) { 906 EmitWaitcnt |= ScoreBrackets->updateByWait(EXP_CNT, 907 ScoreBrackets->getScoreUB(EXP_CNT)); 908 EmitWaitcnt |= ScoreBrackets->updateByWait(LGKM_CNT, 909 ScoreBrackets->getScoreUB(LGKM_CNT)); 910 } 911 break; 912 913 case SCMEM_UAV: 914 case SCMEM_TFBUF: 915 case SCMEM_RING: 916 case SCMEM_SCATTER: 917 if (group_is_multi_wave || fence_is_global) { 918 EmitWaitcnt |= ScoreBrackets->updateByWait(EXP_CNT, 919 ScoreBrackets->getScoreUB(EXP_CNT)); 920 EmitWaitcnt |= ScoreBrackets->updateByWait(VM_CNT, 921 ScoreBrackets->getScoreUB(VM_CNT)); 922 } 923 break; 924 925 case SCMEM_SCRATCH: 926 default: 927 break; 928 } 929 } 930 } 931 #endif 932 933 // Export & GDS instructions do not read the EXEC mask until after the export 934 // is granted (which can occur well after the instruction is issued). 935 // The shader program must flush all EXP operations on the export-count 936 // before overwriting the EXEC mask. 937 else { 938 if (MI.modifiesRegister(AMDGPU::EXEC, TRI)) { 939 // Export and GDS are tracked individually, either may trigger a waitcnt 940 // for EXEC. 941 if (ScoreBrackets.hasPendingEvent(EXP_GPR_LOCK) || 942 ScoreBrackets.hasPendingEvent(EXP_PARAM_ACCESS) || 943 ScoreBrackets.hasPendingEvent(EXP_POS_ACCESS) || 944 ScoreBrackets.hasPendingEvent(GDS_GPR_LOCK)) { 945 Wait.ExpCnt = 0; 946 } 947 } 948 949 if (MI.isCall() && callWaitsOnFunctionEntry(MI)) { 950 // The function is going to insert a wait on everything in its prolog. 951 // This still needs to be careful if the call target is a load (e.g. a GOT 952 // load). We also need to check WAW depenancy with saved PC. 953 Wait = AMDGPU::Waitcnt(); 954 955 int CallAddrOpIdx = 956 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0); 957 RegInterval CallAddrOpInterval = ScoreBrackets.getRegInterval( 958 &MI, TII, MRI, TRI, CallAddrOpIdx, false); 959 960 for (signed RegNo = CallAddrOpInterval.first; 961 RegNo < CallAddrOpInterval.second; ++RegNo) 962 ScoreBrackets.determineWait( 963 LGKM_CNT, ScoreBrackets.getRegScore(RegNo, LGKM_CNT), Wait); 964 965 int RtnAddrOpIdx = 966 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::dst); 967 if (RtnAddrOpIdx != -1) { 968 RegInterval RtnAddrOpInterval = ScoreBrackets.getRegInterval( 969 &MI, TII, MRI, TRI, RtnAddrOpIdx, false); 970 971 for (signed RegNo = RtnAddrOpInterval.first; 972 RegNo < RtnAddrOpInterval.second; ++RegNo) 973 ScoreBrackets.determineWait( 974 LGKM_CNT, ScoreBrackets.getRegScore(RegNo, LGKM_CNT), Wait); 975 } 976 977 } else { 978 // FIXME: Should not be relying on memoperands. 979 // Look at the source operands of every instruction to see if 980 // any of them results from a previous memory operation that affects 981 // its current usage. If so, an s_waitcnt instruction needs to be 982 // emitted. 983 // If the source operand was defined by a load, add the s_waitcnt 984 // instruction. 985 for (const MachineMemOperand *Memop : MI.memoperands()) { 986 unsigned AS = Memop->getAddrSpace(); 987 if (AS != AMDGPUAS::LOCAL_ADDRESS) 988 continue; 989 unsigned RegNo = SQ_MAX_PGM_VGPRS + EXTRA_VGPR_LDS; 990 // VM_CNT is only relevant to vgpr or LDS. 991 ScoreBrackets.determineWait( 992 VM_CNT, ScoreBrackets.getRegScore(RegNo, VM_CNT), Wait); 993 } 994 995 for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) { 996 const MachineOperand &Op = MI.getOperand(I); 997 const MachineRegisterInfo &MRIA = *MRI; 998 RegInterval Interval = 999 ScoreBrackets.getRegInterval(&MI, TII, MRI, TRI, I, false); 1000 for (signed RegNo = Interval.first; RegNo < Interval.second; ++RegNo) { 1001 if (TRI->isVGPR(MRIA, Op.getReg())) { 1002 // VM_CNT is only relevant to vgpr or LDS. 1003 ScoreBrackets.determineWait( 1004 VM_CNT, ScoreBrackets.getRegScore(RegNo, VM_CNT), Wait); 1005 } 1006 ScoreBrackets.determineWait( 1007 LGKM_CNT, ScoreBrackets.getRegScore(RegNo, LGKM_CNT), Wait); 1008 } 1009 } 1010 // End of for loop that looks at all source operands to decide vm_wait_cnt 1011 // and lgk_wait_cnt. 1012 1013 // Two cases are handled for destination operands: 1014 // 1) If the destination operand was defined by a load, add the s_waitcnt 1015 // instruction to guarantee the right WAW order. 1016 // 2) If a destination operand that was used by a recent export/store ins, 1017 // add s_waitcnt on exp_cnt to guarantee the WAR order. 1018 if (MI.mayStore()) { 1019 // FIXME: Should not be relying on memoperands. 1020 for (const MachineMemOperand *Memop : MI.memoperands()) { 1021 const Value *Ptr = Memop->getValue(); 1022 if (SLoadAddresses.count(Ptr)) { 1023 addWait(Wait, LGKM_CNT, 0); 1024 if (PDT->dominates(MI.getParent(), 1025 SLoadAddresses.find(Ptr)->second)) 1026 SLoadAddresses.erase(Ptr); 1027 } 1028 unsigned AS = Memop->getAddrSpace(); 1029 if (AS != AMDGPUAS::LOCAL_ADDRESS) 1030 continue; 1031 unsigned RegNo = SQ_MAX_PGM_VGPRS + EXTRA_VGPR_LDS; 1032 ScoreBrackets.determineWait( 1033 VM_CNT, ScoreBrackets.getRegScore(RegNo, VM_CNT), Wait); 1034 ScoreBrackets.determineWait( 1035 EXP_CNT, ScoreBrackets.getRegScore(RegNo, EXP_CNT), Wait); 1036 } 1037 } 1038 for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) { 1039 MachineOperand &Def = MI.getOperand(I); 1040 const MachineRegisterInfo &MRIA = *MRI; 1041 RegInterval Interval = 1042 ScoreBrackets.getRegInterval(&MI, TII, MRI, TRI, I, true); 1043 for (signed RegNo = Interval.first; RegNo < Interval.second; ++RegNo) { 1044 if (TRI->isVGPR(MRIA, Def.getReg())) { 1045 ScoreBrackets.determineWait( 1046 VM_CNT, ScoreBrackets.getRegScore(RegNo, VM_CNT), Wait); 1047 ScoreBrackets.determineWait( 1048 EXP_CNT, ScoreBrackets.getRegScore(RegNo, EXP_CNT), Wait); 1049 } 1050 ScoreBrackets.determineWait( 1051 LGKM_CNT, ScoreBrackets.getRegScore(RegNo, LGKM_CNT), Wait); 1052 } 1053 } // End of for loop that looks at all dest operands. 1054 } 1055 } 1056 1057 // Check to see if this is an S_BARRIER, and if an implicit S_WAITCNT 0 1058 // occurs before the instruction. Doing it here prevents any additional 1059 // S_WAITCNTs from being emitted if the instruction was marked as 1060 // requiring a WAITCNT beforehand. 1061 if (MI.getOpcode() == AMDGPU::S_BARRIER && 1062 !ST->hasAutoWaitcntBeforeBarrier()) { 1063 Wait = Wait.combined(AMDGPU::Waitcnt::allZero(IV)); 1064 } 1065 1066 // TODO: Remove this work-around, enable the assert for Bug 457939 1067 // after fixing the scheduler. Also, the Shader Compiler code is 1068 // independent of target. 1069 if (readsVCCZ(MI) && ST->hasReadVCCZBug()) { 1070 if (ScoreBrackets.getScoreLB(LGKM_CNT) < 1071 ScoreBrackets.getScoreUB(LGKM_CNT) && 1072 ScoreBrackets.hasPendingEvent(SMEM_ACCESS)) { 1073 Wait.LgkmCnt = 0; 1074 } 1075 } 1076 1077 // Early-out if no wait is indicated. 1078 if (!ScoreBrackets.simplifyWaitcnt(Wait) && !IsForceEmitWaitcnt) { 1079 bool Modified = false; 1080 if (OldWaitcntInstr) { 1081 for (auto II = OldWaitcntInstr->getIterator(), NextI = std::next(II); 1082 &*II != &MI; II = NextI, ++NextI) { 1083 if (II->isDebugInstr()) 1084 continue; 1085 1086 if (TrackedWaitcntSet.count(&*II)) { 1087 TrackedWaitcntSet.erase(&*II); 1088 II->eraseFromParent(); 1089 Modified = true; 1090 } else if (II->getOpcode() == AMDGPU::S_WAITCNT) { 1091 int64_t Imm = II->getOperand(0).getImm(); 1092 ScoreBrackets.applyWaitcnt(AMDGPU::decodeWaitcnt(IV, Imm)); 1093 } else { 1094 assert(II->getOpcode() == AMDGPU::S_WAITCNT_VSCNT); 1095 assert(II->getOperand(0).getReg() == AMDGPU::SGPR_NULL); 1096 ScoreBrackets.applyWaitcnt( 1097 AMDGPU::Waitcnt(~0u, ~0u, ~0u, II->getOperand(1).getImm())); 1098 } 1099 } 1100 } 1101 return Modified; 1102 } 1103 1104 if (ForceEmitZeroWaitcnts) 1105 Wait = AMDGPU::Waitcnt::allZero(IV); 1106 1107 if (ForceEmitWaitcnt[VM_CNT]) 1108 Wait.VmCnt = 0; 1109 if (ForceEmitWaitcnt[EXP_CNT]) 1110 Wait.ExpCnt = 0; 1111 if (ForceEmitWaitcnt[LGKM_CNT]) 1112 Wait.LgkmCnt = 0; 1113 if (ForceEmitWaitcnt[VS_CNT]) 1114 Wait.VsCnt = 0; 1115 1116 ScoreBrackets.applyWaitcnt(Wait); 1117 1118 AMDGPU::Waitcnt OldWait; 1119 bool Modified = false; 1120 1121 if (OldWaitcntInstr) { 1122 for (auto II = OldWaitcntInstr->getIterator(), NextI = std::next(II); 1123 &*II != &MI; II = NextI, NextI++) { 1124 if (II->isDebugInstr()) 1125 continue; 1126 1127 if (II->getOpcode() == AMDGPU::S_WAITCNT) { 1128 unsigned IEnc = II->getOperand(0).getImm(); 1129 AMDGPU::Waitcnt IWait = AMDGPU::decodeWaitcnt(IV, IEnc); 1130 OldWait = OldWait.combined(IWait); 1131 if (!TrackedWaitcntSet.count(&*II)) 1132 Wait = Wait.combined(IWait); 1133 unsigned NewEnc = AMDGPU::encodeWaitcnt(IV, Wait); 1134 if (IEnc != NewEnc) { 1135 II->getOperand(0).setImm(NewEnc); 1136 Modified = true; 1137 } 1138 Wait.VmCnt = ~0u; 1139 Wait.LgkmCnt = ~0u; 1140 Wait.ExpCnt = ~0u; 1141 } else { 1142 assert(II->getOpcode() == AMDGPU::S_WAITCNT_VSCNT); 1143 assert(II->getOperand(0).getReg() == AMDGPU::SGPR_NULL); 1144 1145 unsigned ICnt = II->getOperand(1).getImm(); 1146 OldWait.VsCnt = std::min(OldWait.VsCnt, ICnt); 1147 if (!TrackedWaitcntSet.count(&*II)) 1148 Wait.VsCnt = std::min(Wait.VsCnt, ICnt); 1149 if (Wait.VsCnt != ICnt) { 1150 II->getOperand(1).setImm(Wait.VsCnt); 1151 Modified = true; 1152 } 1153 Wait.VsCnt = ~0u; 1154 } 1155 1156 LLVM_DEBUG(dbgs() << "generateWaitcntInstBefore\n" 1157 << "Old Instr: " << MI << '\n' 1158 << "New Instr: " << *II << '\n'); 1159 1160 if (!Wait.hasWait()) 1161 return Modified; 1162 } 1163 } 1164 1165 if (Wait.VmCnt != ~0u || Wait.LgkmCnt != ~0u || Wait.ExpCnt != ~0u) { 1166 unsigned Enc = AMDGPU::encodeWaitcnt(IV, Wait); 1167 auto SWaitInst = BuildMI(*MI.getParent(), MI.getIterator(), 1168 MI.getDebugLoc(), TII->get(AMDGPU::S_WAITCNT)) 1169 .addImm(Enc); 1170 TrackedWaitcntSet.insert(SWaitInst); 1171 Modified = true; 1172 1173 LLVM_DEBUG(dbgs() << "generateWaitcntInstBefore\n" 1174 << "Old Instr: " << MI << '\n' 1175 << "New Instr: " << *SWaitInst << '\n'); 1176 } 1177 1178 if (Wait.VsCnt != ~0u) { 1179 assert(ST->hasVscnt()); 1180 1181 auto SWaitInst = 1182 BuildMI(*MI.getParent(), MI.getIterator(), MI.getDebugLoc(), 1183 TII->get(AMDGPU::S_WAITCNT_VSCNT)) 1184 .addReg(AMDGPU::SGPR_NULL, RegState::Undef) 1185 .addImm(Wait.VsCnt); 1186 TrackedWaitcntSet.insert(SWaitInst); 1187 Modified = true; 1188 1189 LLVM_DEBUG(dbgs() << "generateWaitcntInstBefore\n" 1190 << "Old Instr: " << MI << '\n' 1191 << "New Instr: " << *SWaitInst << '\n'); 1192 } 1193 1194 return Modified; 1195 } 1196 1197 // This is a flat memory operation. Check to see if it has memory 1198 // tokens for both LDS and Memory, and if so mark it as a flat. 1199 bool SIInsertWaitcnts::mayAccessLDSThroughFlat(const MachineInstr &MI) const { 1200 if (MI.memoperands_empty()) 1201 return true; 1202 1203 for (const MachineMemOperand *Memop : MI.memoperands()) { 1204 unsigned AS = Memop->getAddrSpace(); 1205 if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS) 1206 return true; 1207 } 1208 1209 return false; 1210 } 1211 1212 void SIInsertWaitcnts::updateEventWaitcntAfter(MachineInstr &Inst, 1213 WaitcntBrackets *ScoreBrackets) { 1214 // Now look at the instruction opcode. If it is a memory access 1215 // instruction, update the upper-bound of the appropriate counter's 1216 // bracket and the destination operand scores. 1217 // TODO: Use the (TSFlags & SIInstrFlags::LGKM_CNT) property everywhere. 1218 if (TII->isDS(Inst) && TII->usesLGKM_CNT(Inst)) { 1219 if (TII->isAlwaysGDS(Inst.getOpcode()) || 1220 TII->hasModifiersSet(Inst, AMDGPU::OpName::gds)) { 1221 ScoreBrackets->updateByEvent(TII, TRI, MRI, GDS_ACCESS, Inst); 1222 ScoreBrackets->updateByEvent(TII, TRI, MRI, GDS_GPR_LOCK, Inst); 1223 } else { 1224 ScoreBrackets->updateByEvent(TII, TRI, MRI, LDS_ACCESS, Inst); 1225 } 1226 } else if (TII->isFLAT(Inst)) { 1227 assert(Inst.mayLoadOrStore()); 1228 1229 if (TII->usesVM_CNT(Inst)) { 1230 if (!ST->hasVscnt()) 1231 ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_ACCESS, Inst); 1232 else if (Inst.mayLoad() && 1233 AMDGPU::getAtomicRetOp(Inst.getOpcode()) == -1) 1234 ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_READ_ACCESS, Inst); 1235 else 1236 ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_WRITE_ACCESS, Inst); 1237 } 1238 1239 if (TII->usesLGKM_CNT(Inst)) { 1240 ScoreBrackets->updateByEvent(TII, TRI, MRI, LDS_ACCESS, Inst); 1241 1242 // This is a flat memory operation, so note it - it will require 1243 // that both the VM and LGKM be flushed to zero if it is pending when 1244 // a VM or LGKM dependency occurs. 1245 if (mayAccessLDSThroughFlat(Inst)) 1246 ScoreBrackets->setPendingFlat(); 1247 } 1248 } else if (SIInstrInfo::isVMEM(Inst) && 1249 // TODO: get a better carve out. 1250 Inst.getOpcode() != AMDGPU::BUFFER_WBINVL1 && 1251 Inst.getOpcode() != AMDGPU::BUFFER_WBINVL1_SC && 1252 Inst.getOpcode() != AMDGPU::BUFFER_WBINVL1_VOL && 1253 Inst.getOpcode() != AMDGPU::BUFFER_GL0_INV && 1254 Inst.getOpcode() != AMDGPU::BUFFER_GL1_INV) { 1255 if (!ST->hasVscnt()) 1256 ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_ACCESS, Inst); 1257 else if ((Inst.mayLoad() && 1258 AMDGPU::getAtomicRetOp(Inst.getOpcode()) == -1) || 1259 /* IMAGE_GET_RESINFO / IMAGE_GET_LOD */ 1260 (TII->isMIMG(Inst) && !Inst.mayLoad() && !Inst.mayStore())) 1261 ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_READ_ACCESS, Inst); 1262 else if (Inst.mayStore()) 1263 ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_WRITE_ACCESS, Inst); 1264 1265 if (ST->vmemWriteNeedsExpWaitcnt() && 1266 (Inst.mayStore() || AMDGPU::getAtomicNoRetOp(Inst.getOpcode()) != -1)) { 1267 ScoreBrackets->updateByEvent(TII, TRI, MRI, VMW_GPR_LOCK, Inst); 1268 } 1269 } else if (TII->isSMRD(Inst)) { 1270 ScoreBrackets->updateByEvent(TII, TRI, MRI, SMEM_ACCESS, Inst); 1271 } else if (Inst.isCall()) { 1272 if (callWaitsOnFunctionReturn(Inst)) { 1273 // Act as a wait on everything 1274 ScoreBrackets->applyWaitcnt(AMDGPU::Waitcnt::allZero(IV)); 1275 } else { 1276 // May need to way wait for anything. 1277 ScoreBrackets->applyWaitcnt(AMDGPU::Waitcnt()); 1278 } 1279 } else { 1280 switch (Inst.getOpcode()) { 1281 case AMDGPU::S_SENDMSG: 1282 case AMDGPU::S_SENDMSGHALT: 1283 ScoreBrackets->updateByEvent(TII, TRI, MRI, SQ_MESSAGE, Inst); 1284 break; 1285 case AMDGPU::EXP: 1286 case AMDGPU::EXP_DONE: { 1287 int Imm = TII->getNamedOperand(Inst, AMDGPU::OpName::tgt)->getImm(); 1288 if (Imm >= 32 && Imm <= 63) 1289 ScoreBrackets->updateByEvent(TII, TRI, MRI, EXP_PARAM_ACCESS, Inst); 1290 else if (Imm >= 12 && Imm <= 15) 1291 ScoreBrackets->updateByEvent(TII, TRI, MRI, EXP_POS_ACCESS, Inst); 1292 else 1293 ScoreBrackets->updateByEvent(TII, TRI, MRI, EXP_GPR_LOCK, Inst); 1294 break; 1295 } 1296 case AMDGPU::S_MEMTIME: 1297 case AMDGPU::S_MEMREALTIME: 1298 ScoreBrackets->updateByEvent(TII, TRI, MRI, SMEM_ACCESS, Inst); 1299 break; 1300 default: 1301 break; 1302 } 1303 } 1304 } 1305 1306 bool WaitcntBrackets::mergeScore(const MergeInfo &M, uint32_t &Score, 1307 uint32_t OtherScore) { 1308 uint32_t MyShifted = Score <= M.OldLB ? 0 : Score + M.MyShift; 1309 uint32_t OtherShifted = 1310 OtherScore <= M.OtherLB ? 0 : OtherScore + M.OtherShift; 1311 Score = std::max(MyShifted, OtherShifted); 1312 return OtherShifted > MyShifted; 1313 } 1314 1315 /// Merge the pending events and associater score brackets of \p Other into 1316 /// this brackets status. 1317 /// 1318 /// Returns whether the merge resulted in a change that requires tighter waits 1319 /// (i.e. the merged brackets strictly dominate the original brackets). 1320 bool WaitcntBrackets::merge(const WaitcntBrackets &Other) { 1321 bool StrictDom = false; 1322 1323 for (auto T : inst_counter_types()) { 1324 // Merge event flags for this counter 1325 const bool OldOutOfOrder = counterOutOfOrder(T); 1326 const uint32_t OldEvents = PendingEvents & WaitEventMaskForInst[T]; 1327 const uint32_t OtherEvents = Other.PendingEvents & WaitEventMaskForInst[T]; 1328 if (OtherEvents & ~OldEvents) 1329 StrictDom = true; 1330 if (Other.MixedPendingEvents[T] || 1331 (OldEvents && OtherEvents && OldEvents != OtherEvents)) 1332 MixedPendingEvents[T] = true; 1333 PendingEvents |= OtherEvents; 1334 1335 // Merge scores for this counter 1336 const uint32_t MyPending = ScoreUBs[T] - ScoreLBs[T]; 1337 const uint32_t OtherPending = Other.ScoreUBs[T] - Other.ScoreLBs[T]; 1338 MergeInfo M; 1339 M.OldLB = ScoreLBs[T]; 1340 M.OtherLB = Other.ScoreLBs[T]; 1341 M.MyShift = OtherPending > MyPending ? OtherPending - MyPending : 0; 1342 M.OtherShift = ScoreUBs[T] - Other.ScoreUBs[T] + M.MyShift; 1343 1344 const uint32_t NewUB = ScoreUBs[T] + M.MyShift; 1345 if (NewUB < ScoreUBs[T]) 1346 report_fatal_error("waitcnt score overflow"); 1347 ScoreUBs[T] = NewUB; 1348 ScoreLBs[T] = std::min(M.OldLB + M.MyShift, M.OtherLB + M.OtherShift); 1349 1350 StrictDom |= mergeScore(M, LastFlat[T], Other.LastFlat[T]); 1351 1352 bool RegStrictDom = false; 1353 for (int J = 0, E = std::max(getMaxVGPR(), Other.getMaxVGPR()) + 1; J != E; 1354 J++) { 1355 RegStrictDom |= mergeScore(M, VgprScores[T][J], Other.VgprScores[T][J]); 1356 } 1357 1358 if (T == LGKM_CNT) { 1359 for (int J = 0, E = std::max(getMaxSGPR(), Other.getMaxSGPR()) + 1; 1360 J != E; J++) { 1361 RegStrictDom |= mergeScore(M, SgprScores[J], Other.SgprScores[J]); 1362 } 1363 } 1364 1365 if (RegStrictDom && !OldOutOfOrder) 1366 StrictDom = true; 1367 } 1368 1369 VgprUB = std::max(getMaxVGPR(), Other.getMaxVGPR()); 1370 SgprUB = std::max(getMaxSGPR(), Other.getMaxSGPR()); 1371 1372 return StrictDom; 1373 } 1374 1375 // Generate s_waitcnt instructions where needed. 1376 bool SIInsertWaitcnts::insertWaitcntInBlock(MachineFunction &MF, 1377 MachineBasicBlock &Block, 1378 WaitcntBrackets &ScoreBrackets) { 1379 bool Modified = false; 1380 1381 LLVM_DEBUG({ 1382 dbgs() << "*** Block" << Block.getNumber() << " ***"; 1383 ScoreBrackets.dump(); 1384 }); 1385 1386 // Walk over the instructions. 1387 MachineInstr *OldWaitcntInstr = nullptr; 1388 1389 for (MachineBasicBlock::instr_iterator Iter = Block.instr_begin(), 1390 E = Block.instr_end(); 1391 Iter != E;) { 1392 MachineInstr &Inst = *Iter; 1393 1394 // Track pre-existing waitcnts from earlier iterations. 1395 if (Inst.getOpcode() == AMDGPU::S_WAITCNT || 1396 (Inst.getOpcode() == AMDGPU::S_WAITCNT_VSCNT && 1397 Inst.getOperand(0).isReg() && 1398 Inst.getOperand(0).getReg() == AMDGPU::SGPR_NULL)) { 1399 if (!OldWaitcntInstr) 1400 OldWaitcntInstr = &Inst; 1401 ++Iter; 1402 continue; 1403 } 1404 1405 bool VCCZBugWorkAround = false; 1406 if (readsVCCZ(Inst)) { 1407 if (ScoreBrackets.getScoreLB(LGKM_CNT) < 1408 ScoreBrackets.getScoreUB(LGKM_CNT) && 1409 ScoreBrackets.hasPendingEvent(SMEM_ACCESS)) { 1410 if (ST->hasReadVCCZBug()) 1411 VCCZBugWorkAround = true; 1412 } 1413 } 1414 1415 if (TII->isSMRD(Inst)) { 1416 for (const MachineMemOperand *Memop : Inst.memoperands()) { 1417 const Value *Ptr = Memop->getValue(); 1418 SLoadAddresses.insert(std::make_pair(Ptr, Inst.getParent())); 1419 } 1420 } 1421 1422 // Generate an s_waitcnt instruction to be placed before 1423 // cur_Inst, if needed. 1424 Modified |= generateWaitcntInstBefore(Inst, ScoreBrackets, OldWaitcntInstr); 1425 OldWaitcntInstr = nullptr; 1426 1427 updateEventWaitcntAfter(Inst, &ScoreBrackets); 1428 1429 #if 0 // TODO: implement resource type check controlled by options with ub = LB. 1430 // If this instruction generates a S_SETVSKIP because it is an 1431 // indexed resource, and we are on Tahiti, then it will also force 1432 // an S_WAITCNT vmcnt(0) 1433 if (RequireCheckResourceType(Inst, context)) { 1434 // Force the score to as if an S_WAITCNT vmcnt(0) is emitted. 1435 ScoreBrackets->setScoreLB(VM_CNT, 1436 ScoreBrackets->getScoreUB(VM_CNT)); 1437 } 1438 #endif 1439 1440 LLVM_DEBUG({ 1441 Inst.print(dbgs()); 1442 ScoreBrackets.dump(); 1443 }); 1444 1445 // TODO: Remove this work-around after fixing the scheduler and enable the 1446 // assert above. 1447 if (VCCZBugWorkAround) { 1448 // Restore the vccz bit. Any time a value is written to vcc, the vcc 1449 // bit is updated, so we can restore the bit by reading the value of 1450 // vcc and then writing it back to the register. 1451 BuildMI(Block, Inst, Inst.getDebugLoc(), 1452 TII->get(ST->isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64), 1453 TRI->getVCC()) 1454 .addReg(TRI->getVCC()); 1455 Modified = true; 1456 } 1457 1458 ++Iter; 1459 } 1460 1461 return Modified; 1462 } 1463 1464 bool SIInsertWaitcnts::runOnMachineFunction(MachineFunction &MF) { 1465 ST = &MF.getSubtarget<GCNSubtarget>(); 1466 TII = ST->getInstrInfo(); 1467 TRI = &TII->getRegisterInfo(); 1468 MRI = &MF.getRegInfo(); 1469 IV = AMDGPU::getIsaVersion(ST->getCPU()); 1470 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 1471 PDT = &getAnalysis<MachinePostDominatorTree>(); 1472 1473 ForceEmitZeroWaitcnts = ForceEmitZeroFlag; 1474 for (auto T : inst_counter_types()) 1475 ForceEmitWaitcnt[T] = false; 1476 1477 HardwareLimits.VmcntMax = AMDGPU::getVmcntBitMask(IV); 1478 HardwareLimits.ExpcntMax = AMDGPU::getExpcntBitMask(IV); 1479 HardwareLimits.LgkmcntMax = AMDGPU::getLgkmcntBitMask(IV); 1480 HardwareLimits.VscntMax = ST->hasVscnt() ? 63 : 0; 1481 1482 HardwareLimits.NumVGPRsMax = ST->getAddressableNumVGPRs(); 1483 HardwareLimits.NumSGPRsMax = ST->getAddressableNumSGPRs(); 1484 assert(HardwareLimits.NumVGPRsMax <= SQ_MAX_PGM_VGPRS); 1485 assert(HardwareLimits.NumSGPRsMax <= SQ_MAX_PGM_SGPRS); 1486 1487 RegisterEncoding.VGPR0 = TRI->getEncodingValue(AMDGPU::VGPR0); 1488 RegisterEncoding.VGPRL = 1489 RegisterEncoding.VGPR0 + HardwareLimits.NumVGPRsMax - 1; 1490 RegisterEncoding.SGPR0 = TRI->getEncodingValue(AMDGPU::SGPR0); 1491 RegisterEncoding.SGPRL = 1492 RegisterEncoding.SGPR0 + HardwareLimits.NumSGPRsMax - 1; 1493 1494 TrackedWaitcntSet.clear(); 1495 RpotIdxMap.clear(); 1496 BlockInfos.clear(); 1497 1498 // Keep iterating over the blocks in reverse post order, inserting and 1499 // updating s_waitcnt where needed, until a fix point is reached. 1500 for (MachineBasicBlock *MBB : 1501 ReversePostOrderTraversal<MachineFunction *>(&MF)) { 1502 RpotIdxMap[MBB] = BlockInfos.size(); 1503 BlockInfos.emplace_back(MBB); 1504 } 1505 1506 std::unique_ptr<WaitcntBrackets> Brackets; 1507 bool Modified = false; 1508 bool Repeat; 1509 do { 1510 Repeat = false; 1511 1512 for (BlockInfo &BI : BlockInfos) { 1513 if (!BI.Dirty) 1514 continue; 1515 1516 unsigned Idx = std::distance(&*BlockInfos.begin(), &BI); 1517 1518 if (BI.Incoming) { 1519 if (!Brackets) 1520 Brackets = std::make_unique<WaitcntBrackets>(*BI.Incoming); 1521 else 1522 *Brackets = *BI.Incoming; 1523 } else { 1524 if (!Brackets) 1525 Brackets = std::make_unique<WaitcntBrackets>(ST); 1526 else 1527 Brackets->clear(); 1528 } 1529 1530 Modified |= insertWaitcntInBlock(MF, *BI.MBB, *Brackets); 1531 BI.Dirty = false; 1532 1533 if (Brackets->hasPending()) { 1534 BlockInfo *MoveBracketsToSucc = nullptr; 1535 for (MachineBasicBlock *Succ : BI.MBB->successors()) { 1536 unsigned SuccIdx = RpotIdxMap[Succ]; 1537 BlockInfo &SuccBI = BlockInfos[SuccIdx]; 1538 if (!SuccBI.Incoming) { 1539 SuccBI.Dirty = true; 1540 if (SuccIdx <= Idx) 1541 Repeat = true; 1542 if (!MoveBracketsToSucc) { 1543 MoveBracketsToSucc = &SuccBI; 1544 } else { 1545 SuccBI.Incoming = std::make_unique<WaitcntBrackets>(*Brackets); 1546 } 1547 } else if (SuccBI.Incoming->merge(*Brackets)) { 1548 SuccBI.Dirty = true; 1549 if (SuccIdx <= Idx) 1550 Repeat = true; 1551 } 1552 } 1553 if (MoveBracketsToSucc) 1554 MoveBracketsToSucc->Incoming = std::move(Brackets); 1555 } 1556 } 1557 } while (Repeat); 1558 1559 SmallVector<MachineBasicBlock *, 4> EndPgmBlocks; 1560 1561 bool HaveScalarStores = false; 1562 1563 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end(); BI != BE; 1564 ++BI) { 1565 MachineBasicBlock &MBB = *BI; 1566 1567 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E; 1568 ++I) { 1569 if (!HaveScalarStores && TII->isScalarStore(*I)) 1570 HaveScalarStores = true; 1571 1572 if (I->getOpcode() == AMDGPU::S_ENDPGM || 1573 I->getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG) 1574 EndPgmBlocks.push_back(&MBB); 1575 } 1576 } 1577 1578 if (HaveScalarStores) { 1579 // If scalar writes are used, the cache must be flushed or else the next 1580 // wave to reuse the same scratch memory can be clobbered. 1581 // 1582 // Insert s_dcache_wb at wave termination points if there were any scalar 1583 // stores, and only if the cache hasn't already been flushed. This could be 1584 // improved by looking across blocks for flushes in postdominating blocks 1585 // from the stores but an explicitly requested flush is probably very rare. 1586 for (MachineBasicBlock *MBB : EndPgmBlocks) { 1587 bool SeenDCacheWB = false; 1588 1589 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E; 1590 ++I) { 1591 if (I->getOpcode() == AMDGPU::S_DCACHE_WB) 1592 SeenDCacheWB = true; 1593 else if (TII->isScalarStore(*I)) 1594 SeenDCacheWB = false; 1595 1596 // FIXME: It would be better to insert this before a waitcnt if any. 1597 if ((I->getOpcode() == AMDGPU::S_ENDPGM || 1598 I->getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG) && 1599 !SeenDCacheWB) { 1600 Modified = true; 1601 BuildMI(*MBB, I, I->getDebugLoc(), TII->get(AMDGPU::S_DCACHE_WB)); 1602 } 1603 } 1604 } 1605 } 1606 1607 if (!MFI->isEntryFunction()) { 1608 // Wait for any outstanding memory operations that the input registers may 1609 // depend on. We can't track them and it's better to the wait after the 1610 // costly call sequence. 1611 1612 // TODO: Could insert earlier and schedule more liberally with operations 1613 // that only use caller preserved registers. 1614 MachineBasicBlock &EntryBB = MF.front(); 1615 if (ST->hasVscnt()) 1616 BuildMI(EntryBB, EntryBB.getFirstNonPHI(), DebugLoc(), 1617 TII->get(AMDGPU::S_WAITCNT_VSCNT)) 1618 .addReg(AMDGPU::SGPR_NULL, RegState::Undef) 1619 .addImm(0); 1620 BuildMI(EntryBB, EntryBB.getFirstNonPHI(), DebugLoc(), TII->get(AMDGPU::S_WAITCNT)) 1621 .addImm(0); 1622 1623 Modified = true; 1624 } 1625 1626 return Modified; 1627 } 1628