Searched refs:VMEM (Results 1 – 5 of 5) sorted by relevance
83 int checkVMEMHazards(MachineInstr* VMEM);
72 VMEM = 1u << 4, enumerator79 ALL = ALU | VALU | SALU | MFMA | VMEM | VMEM_READ | VMEM_WRITE | DS |2411 else if (((SGMask & SchedGroupMask::VMEM) != SchedGroupMask::NONE) && in canAddMI()2634 if ((InvertedMask & SchedGroupMask::VMEM) == SchedGroupMask::NONE) in invertSchedBarrierMask()2639 InvertedMask &= ~SchedGroupMask::VMEM; in invertSchedBarrierMask()
151 // If XNACK is enabled, the VMEM latency can be worse.208 …"VMEM instruction followed by scalar writing to EXEC mask, M0 or SGPR leads to incorrect execution…232 "Switching between LDS and VMEM-tex not waiting VM_VSRC=0"253 "MIMG-NSA followed by VMEM fail if EXEC_LO or EXEC_HI equals zero"959 "VMEM instructions of the same type write VGPR results in order"
717 int GCNHazardRecognizer::checkVMEMHazards(MachineInstr* VMEM) { in checkVMEMHazards() argument721 int WaitStatesNeeded = checkSoftClauseHazards(VMEM); in checkVMEMHazards()729 for (const MachineOperand &Use : VMEM->uses()) { in checkVMEMHazards()
293 // MASK = 0x0000 0010: ALL VMEM instructions may be scheduled across SCHED_BARRIER.294 // MASK = 0x0000 0020: VMEM read instructions may be scheduled across SCHED_BARRIER.295 // MASK = 0x0000 0040: VMEM write instructions may be scheduled across SCHED_BARRIER.