Home
last modified time | relevance | path

Searched refs:VGPR (Results 1 – 20 of 20) sorted by relevance

/freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/
H A DAMDGPUCallingConv.td137 (add (sequence "VGPR%u", 40, 47),
138 (sequence "VGPR%u", 56, 63),
139 (sequence "VGPR%u", 72, 79),
140 (sequence "VGPR%u", 88, 95),
141 (sequence "VGPR%u", 104, 111),
142 (sequence "VGPR%u", 120, 127),
143 (sequence "VGPR%u", 136, 143),
144 (sequence "VGPR%u", 152, 159),
145 (sequence "VGPR%u", 168, 175),
146 (sequence "VGPR%u", 184, 191),
[all …]
H A DSIRegisterInfo.td348 // VGPR registers
350 defm VGPR#Index :
407 // Give all SGPR classes higher priority than VGPR classes, because
596 (add (interleave (sequence "VGPR%u_LO16", 0, 255),
597 (sequence "VGPR%u_HI16", 0, 255)))> {
602 // This is the base class for VGPR{128..255}_{LO16,HI16}.
610 (add (interleave (sequence "VGPR%u_LO16", 0, 127),
611 (sequence "VGPR%u_HI16", 0, 127)))> {
616 // This is the base class for VGPR{0..127}_{LO16,HI16}.
620 // VGPR 32-bit registers
[all …]
H A DSIRegisterInfo.h57 Register VGPR;
61 SpilledReg(Register R, int L) : VGPR(R), Lane(L) {} in hasLane()
64 bool hasReg() { return VGPR != 0; }
98 static bool isChainScratchRegister(Register VGPR);
153 /// free VGPR lane to spill.
209 /// \returns true if this class contains only VGPR registers in isAGPRClass()
219 /// \returns true only if this class contains both VGPR and AGPR registers in isVSSuperClass()
224 /// \returns true only if this class contains both VGPR and SGPR registers in hasVGPRs()
229 /// \returns true if this class contains VGPR registers. in hasAGPRs()
249 /// \returns A VGPR re
55 Register VGPR; global() member
[all...]
H A DAMDGPURegisterBanks.td13 def VGPRRegBank : RegisterBank<"VGPR",
H A DSIMachineFunctionInfo.cpp281 void SIMachineFunctionInfo::allocateWWMSpill(MachineFunction &MF, Register VGPR, in allocateWWMSpill() argument
284 if (isEntryFunction() || WWMSpills.count(VGPR)) in allocateWWMSpill()
292 if (isChainFunction() && SIRegisterInfo::isChainScratchRegister(VGPR)) in allocateWWMSpill()
296 VGPR, MF.getFrameInfo().CreateSpillStackObject(Size, Alignment))); in allocateWWMSpill()
H A DSIFrameLowering.cpp108 << printReg(Spill.VGPR, TRI) << ':' << Spill.Lane in getVGPRSpillLaneOrTempRegister()
279 Spill[I].VGPR) in saveToVGPRLane()
282 .addReg(Spill[I].VGPR, RegState::Undef); in saveToVGPRLane()
325 .addReg(Spill[I].VGPR) in restoreFromVGPRLane()
944 Register VGPR = Reg.first; in emitCSRSpillStores() local
947 VGPR, FI, FrameReg); in emitCSRSpillStores()
1047 Register VGPR = Reg.first; in emitCSRSpillRestores() local
1050 VGPR, FI, FrameReg); in emitCSRSpillRestores()
H A DSIInstrInfo.td100 SDTCisVT<2, i32>, // vindex(VGPR)
101 SDTCisVT<3, i32>, // voffset(VGPR)
118 SDTCisVT<2, i32>, // vindex(VGPR)
119 SDTCisVT<3, i32>, // voffset(VGPR)
136 SDTCisVT<2, i32>, // vindex(VGPR)
137 SDTCisVT<3, i32>, // voffset(VGPR)
174 SDTCisVT<2, i32>, // vindex(VGPR)
175 SDTCisVT<3, i32>, // voffset(VGPR)
200 SDTCisVT<3, i32>, // vindex(VGPR)
201 SDTCisVT<4, i32>, // voffset(VGPR)
[all …]
H A DSISchedule.td305 // Add 1 stall cycle for VGPR read.
338 // Add 1 stall cycle for VGPR read.
H A DAMDGPUGenRegisterBankInfo.def70 {0, 1, VGPRRegBank}, // VGPR begin
H A DSIPeepholeSDWA.cpp1226 Register VGPR = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); in legalizeScalarOperands() local
1228 TII->get(AMDGPU::V_MOV_B32_e32), VGPR); in legalizeScalarOperands()
1234 Op.ChangeToRegister(VGPR, false); in legalizeScalarOperands()
H A DSIRegisterInfo.cpp443 bool SIRegisterInfo::isChainScratchRegister(Register VGPR) { in isChainScratchRegister() argument
444 return VGPR >= AMDGPU::VGPR0 && VGPR < AMDGPU::VGPR8; in isChainScratchRegister()
1796 SB.TII.get(AMDGPU::SI_SPILL_S32_TO_VGPR), Spill.VGPR) in spillSGPR()
1799 .addReg(Spill.VGPR); in spillSGPR()
1906 .addReg(Spill.VGPR) in restoreSGPR()
H A DVOP1Instructions.td367 // Restrict src0 to be VGPR
733 // Restrict src0 to be VGPR
1343 // Copy of v_mov_b32 with $vdst as a use operand for use with VGPR
1351 // Copy of v_mov_b32 for use with VGPR indexing mode. An implicit use of the
H A DAMDGPU.td277 …"Scratch instructions with a VGPR offset and a negative immediate offset that is not a multiple of…
438 "Has VGPR mode register indexing"
935 "Has single-use VGPR hint instructions"
959 "VMEM instructions of the same type write VGPR results in order"
H A DSIInstrFormats.td49 // Combined SGPR/VGPR spill bit
H A DSIMachineFunctionInfo.h691 void allocateWWMSpill(MachineFunction &MF, Register VGPR, uint64_t Size = 4,
H A DVOP3Instructions.td490 // blocking folding SGPR->VGPR copies later.
780 // GISel-specific pattern that avoids creating a SGPR->VGPR copy if
781 // $src2 is a VGPR.
H A DSIInstructions.td854 // These variants of V_INDIRECT_REG_READ/WRITE use VGPR indexing. By using these
856 // that switch the VGPR indexing mode. Spills to accvgprs could be effected by
959 // VGPR or AGPR spill instructions. In case of AGPR spilling a temp register
960 // needs to be used and an extra instruction to move between VGPR and AGPR.
3396 // Avoid pointlessly materializing a constant in VGPR.
H A DVOPInstructions.td443 bits<10> vdst; // VGPR or AGPR, but not SGPR. vdst{8} is not encoded in the instruction.
H A DFLATInstructions.td60 bits<1> has_sve = 0; // Scratch VGPR Enable
/freebsd/contrib/llvm-project/llvm/include/llvm/IR/
H A DIntrinsicsAMDGPU.td559 // The pointer argument is assumed to be dynamically uniform if a VGPR.
855 P_.RetTypes, // vdata(VGPR) -- for load/atomic-with-return
857 !foreach(arg, P_.DataArgs, arg.Type), // vdata(VGPR) -- for store/atomic
859 P_.AddrTypes, // vaddr(VGPR)
1127 llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
1144 llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
1157 llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
1175 llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
1188 llvm_i32_ty, // vindex(VGPR)
1189 llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
[all …]