1 //===-- AMDGPUMCCodeEmitter.cpp - AMDGPU Code Emitter ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// The AMDGPU code emitter produces machine code that can be executed
11 /// directly on the GPU device.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #include "MCTargetDesc/AMDGPUFixupKinds.h"
16 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
17 #include "SIDefines.h"
18 #include "Utils/AMDGPUBaseInfo.h"
19 #include "llvm/ADT/APInt.h"
20 #include "llvm/MC/MCCodeEmitter.h"
21 #include "llvm/MC/MCContext.h"
22 #include "llvm/MC/MCExpr.h"
23 #include "llvm/MC/MCInstrInfo.h"
24 #include "llvm/MC/MCRegisterInfo.h"
25 #include "llvm/MC/MCSubtargetInfo.h"
26 #include "llvm/Support/Casting.h"
27 #include "llvm/Support/EndianStream.h"
28 #include "llvm/TargetParser/SubtargetFeature.h"
29 #include <optional>
30
31 using namespace llvm;
32
33 namespace {
34
35 class AMDGPUMCCodeEmitter : public MCCodeEmitter {
36 const MCRegisterInfo &MRI;
37 const MCInstrInfo &MCII;
38
39 public:
AMDGPUMCCodeEmitter(const MCInstrInfo & MCII,const MCRegisterInfo & MRI)40 AMDGPUMCCodeEmitter(const MCInstrInfo &MCII, const MCRegisterInfo &MRI)
41 : MRI(MRI), MCII(MCII) {}
42
43 /// Encode the instruction and write it to the OS.
44 void encodeInstruction(const MCInst &MI, SmallVectorImpl<char> &CB,
45 SmallVectorImpl<MCFixup> &Fixups,
46 const MCSubtargetInfo &STI) const override;
47
48 void getMachineOpValue(const MCInst &MI, const MCOperand &MO, APInt &Op,
49 SmallVectorImpl<MCFixup> &Fixups,
50 const MCSubtargetInfo &STI) const;
51
52 void getMachineOpValueT16(const MCInst &MI, unsigned OpNo, APInt &Op,
53 SmallVectorImpl<MCFixup> &Fixups,
54 const MCSubtargetInfo &STI) const;
55
56 void getMachineOpValueT16Lo128(const MCInst &MI, unsigned OpNo, APInt &Op,
57 SmallVectorImpl<MCFixup> &Fixups,
58 const MCSubtargetInfo &STI) const;
59
60 /// Use a fixup to encode the simm16 field for SOPP branch
61 /// instructions.
62 void getSOPPBrEncoding(const MCInst &MI, unsigned OpNo, APInt &Op,
63 SmallVectorImpl<MCFixup> &Fixups,
64 const MCSubtargetInfo &STI) const;
65
66 void getSMEMOffsetEncoding(const MCInst &MI, unsigned OpNo, APInt &Op,
67 SmallVectorImpl<MCFixup> &Fixups,
68 const MCSubtargetInfo &STI) const;
69
70 void getSDWASrcEncoding(const MCInst &MI, unsigned OpNo, APInt &Op,
71 SmallVectorImpl<MCFixup> &Fixups,
72 const MCSubtargetInfo &STI) const;
73
74 void getSDWAVopcDstEncoding(const MCInst &MI, unsigned OpNo, APInt &Op,
75 SmallVectorImpl<MCFixup> &Fixups,
76 const MCSubtargetInfo &STI) const;
77
78 void getAVOperandEncoding(const MCInst &MI, unsigned OpNo, APInt &Op,
79 SmallVectorImpl<MCFixup> &Fixups,
80 const MCSubtargetInfo &STI) const;
81
82 private:
83 uint64_t getImplicitOpSelHiEncoding(int Opcode) const;
84 void getMachineOpValueCommon(const MCInst &MI, const MCOperand &MO,
85 unsigned OpNo, APInt &Op,
86 SmallVectorImpl<MCFixup> &Fixups,
87 const MCSubtargetInfo &STI) const;
88
89 /// Encode an fp or int literal.
90 std::optional<uint32_t> getLitEncoding(const MCOperand &MO,
91 const MCOperandInfo &OpInfo,
92 const MCSubtargetInfo &STI) const;
93
94 void getBinaryCodeForInstr(const MCInst &MI, SmallVectorImpl<MCFixup> &Fixups,
95 APInt &Inst, APInt &Scratch,
96 const MCSubtargetInfo &STI) const;
97 };
98
99 } // end anonymous namespace
100
createAMDGPUMCCodeEmitter(const MCInstrInfo & MCII,MCContext & Ctx)101 MCCodeEmitter *llvm::createAMDGPUMCCodeEmitter(const MCInstrInfo &MCII,
102 MCContext &Ctx) {
103 return new AMDGPUMCCodeEmitter(MCII, *Ctx.getRegisterInfo());
104 }
105
106 // Returns the encoding value to use if the given integer is an integer inline
107 // immediate value, or 0 if it is not.
108 template <typename IntTy>
getIntInlineImmEncoding(IntTy Imm)109 static uint32_t getIntInlineImmEncoding(IntTy Imm) {
110 if (Imm >= 0 && Imm <= 64)
111 return 128 + Imm;
112
113 if (Imm >= -16 && Imm <= -1)
114 return 192 + std::abs(Imm);
115
116 return 0;
117 }
118
getLit16Encoding(uint16_t Val,const MCSubtargetInfo & STI)119 static uint32_t getLit16Encoding(uint16_t Val, const MCSubtargetInfo &STI) {
120 uint16_t IntImm = getIntInlineImmEncoding(static_cast<int16_t>(Val));
121 if (IntImm != 0)
122 return IntImm;
123
124 if (Val == 0x3800) // 0.5
125 return 240;
126
127 if (Val == 0xB800) // -0.5
128 return 241;
129
130 if (Val == 0x3C00) // 1.0
131 return 242;
132
133 if (Val == 0xBC00) // -1.0
134 return 243;
135
136 if (Val == 0x4000) // 2.0
137 return 244;
138
139 if (Val == 0xC000) // -2.0
140 return 245;
141
142 if (Val == 0x4400) // 4.0
143 return 246;
144
145 if (Val == 0xC400) // -4.0
146 return 247;
147
148 if (Val == 0x3118 && // 1.0 / (2.0 * pi)
149 STI.hasFeature(AMDGPU::FeatureInv2PiInlineImm))
150 return 248;
151
152 return 255;
153 }
154
getLitBF16Encoding(uint16_t Val)155 static uint32_t getLitBF16Encoding(uint16_t Val) {
156 uint16_t IntImm = getIntInlineImmEncoding(static_cast<int16_t>(Val));
157 if (IntImm != 0)
158 return IntImm;
159
160 // clang-format off
161 switch (Val) {
162 case 0x3F00: return 240; // 0.5
163 case 0xBF00: return 241; // -0.5
164 case 0x3F80: return 242; // 1.0
165 case 0xBF80: return 243; // -1.0
166 case 0x4000: return 244; // 2.0
167 case 0xC000: return 245; // -2.0
168 case 0x4080: return 246; // 4.0
169 case 0xC080: return 247; // -4.0
170 case 0x3E22: return 248; // 1.0 / (2.0 * pi)
171 default: return 255;
172 }
173 // clang-format on
174 }
175
getLit32Encoding(uint32_t Val,const MCSubtargetInfo & STI)176 static uint32_t getLit32Encoding(uint32_t Val, const MCSubtargetInfo &STI) {
177 uint32_t IntImm = getIntInlineImmEncoding(static_cast<int32_t>(Val));
178 if (IntImm != 0)
179 return IntImm;
180
181 if (Val == llvm::bit_cast<uint32_t>(0.5f))
182 return 240;
183
184 if (Val == llvm::bit_cast<uint32_t>(-0.5f))
185 return 241;
186
187 if (Val == llvm::bit_cast<uint32_t>(1.0f))
188 return 242;
189
190 if (Val == llvm::bit_cast<uint32_t>(-1.0f))
191 return 243;
192
193 if (Val == llvm::bit_cast<uint32_t>(2.0f))
194 return 244;
195
196 if (Val == llvm::bit_cast<uint32_t>(-2.0f))
197 return 245;
198
199 if (Val == llvm::bit_cast<uint32_t>(4.0f))
200 return 246;
201
202 if (Val == llvm::bit_cast<uint32_t>(-4.0f))
203 return 247;
204
205 if (Val == 0x3e22f983 && // 1.0 / (2.0 * pi)
206 STI.hasFeature(AMDGPU::FeatureInv2PiInlineImm))
207 return 248;
208
209 return 255;
210 }
211
getLit16IntEncoding(uint32_t Val,const MCSubtargetInfo & STI)212 static uint32_t getLit16IntEncoding(uint32_t Val, const MCSubtargetInfo &STI) {
213 return getLit32Encoding(Val, STI);
214 }
215
getLit64Encoding(uint64_t Val,const MCSubtargetInfo & STI)216 static uint32_t getLit64Encoding(uint64_t Val, const MCSubtargetInfo &STI) {
217 uint32_t IntImm = getIntInlineImmEncoding(static_cast<int64_t>(Val));
218 if (IntImm != 0)
219 return IntImm;
220
221 if (Val == llvm::bit_cast<uint64_t>(0.5))
222 return 240;
223
224 if (Val == llvm::bit_cast<uint64_t>(-0.5))
225 return 241;
226
227 if (Val == llvm::bit_cast<uint64_t>(1.0))
228 return 242;
229
230 if (Val == llvm::bit_cast<uint64_t>(-1.0))
231 return 243;
232
233 if (Val == llvm::bit_cast<uint64_t>(2.0))
234 return 244;
235
236 if (Val == llvm::bit_cast<uint64_t>(-2.0))
237 return 245;
238
239 if (Val == llvm::bit_cast<uint64_t>(4.0))
240 return 246;
241
242 if (Val == llvm::bit_cast<uint64_t>(-4.0))
243 return 247;
244
245 if (Val == 0x3fc45f306dc9c882 && // 1.0 / (2.0 * pi)
246 STI.hasFeature(AMDGPU::FeatureInv2PiInlineImm))
247 return 248;
248
249 return 255;
250 }
251
252 std::optional<uint32_t>
getLitEncoding(const MCOperand & MO,const MCOperandInfo & OpInfo,const MCSubtargetInfo & STI) const253 AMDGPUMCCodeEmitter::getLitEncoding(const MCOperand &MO,
254 const MCOperandInfo &OpInfo,
255 const MCSubtargetInfo &STI) const {
256 int64_t Imm;
257 if (MO.isExpr()) {
258 const auto *C = dyn_cast<MCConstantExpr>(MO.getExpr());
259 if (!C)
260 return 255;
261
262 Imm = C->getValue();
263 } else {
264
265 assert(!MO.isDFPImm());
266
267 if (!MO.isImm())
268 return {};
269
270 Imm = MO.getImm();
271 }
272
273 switch (OpInfo.OperandType) {
274 case AMDGPU::OPERAND_REG_IMM_INT32:
275 case AMDGPU::OPERAND_REG_IMM_FP32:
276 case AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED:
277 case AMDGPU::OPERAND_REG_INLINE_C_INT32:
278 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
279 case AMDGPU::OPERAND_REG_INLINE_AC_INT32:
280 case AMDGPU::OPERAND_REG_INLINE_AC_FP32:
281 case AMDGPU::OPERAND_REG_IMM_V2INT32:
282 case AMDGPU::OPERAND_REG_IMM_V2FP32:
283 case AMDGPU::OPERAND_REG_INLINE_C_V2INT32:
284 case AMDGPU::OPERAND_REG_INLINE_C_V2FP32:
285 case AMDGPU::OPERAND_INLINE_SPLIT_BARRIER_INT32:
286 return getLit32Encoding(static_cast<uint32_t>(Imm), STI);
287
288 case AMDGPU::OPERAND_REG_IMM_INT64:
289 case AMDGPU::OPERAND_REG_IMM_FP64:
290 case AMDGPU::OPERAND_REG_INLINE_C_INT64:
291 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
292 case AMDGPU::OPERAND_REG_INLINE_AC_FP64:
293 return getLit64Encoding(static_cast<uint64_t>(Imm), STI);
294
295 case AMDGPU::OPERAND_REG_IMM_INT16:
296 case AMDGPU::OPERAND_REG_INLINE_C_INT16:
297 case AMDGPU::OPERAND_REG_INLINE_AC_INT16:
298 return getLit16IntEncoding(static_cast<uint32_t>(Imm), STI);
299
300 case AMDGPU::OPERAND_REG_IMM_FP16:
301 case AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED:
302 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
303 case AMDGPU::OPERAND_REG_INLINE_AC_FP16:
304 // FIXME Is this correct? What do inline immediates do on SI for f16 src
305 // which does not have f16 support?
306 return getLit16Encoding(static_cast<uint16_t>(Imm), STI);
307
308 case AMDGPU::OPERAND_REG_IMM_BF16:
309 case AMDGPU::OPERAND_REG_IMM_BF16_DEFERRED:
310 case AMDGPU::OPERAND_REG_INLINE_C_BF16:
311 case AMDGPU::OPERAND_REG_INLINE_AC_BF16:
312 // We don't actually need to check Inv2Pi here because BF16 instructions can
313 // only be emitted for targets that already support the feature.
314 return getLitBF16Encoding(static_cast<uint16_t>(Imm));
315
316 case AMDGPU::OPERAND_REG_IMM_V2INT16:
317 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
318 case AMDGPU::OPERAND_REG_INLINE_AC_V2INT16:
319 return AMDGPU::getInlineEncodingV2I16(static_cast<uint32_t>(Imm))
320 .value_or(255);
321
322 case AMDGPU::OPERAND_REG_IMM_V2FP16:
323 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
324 case AMDGPU::OPERAND_REG_INLINE_AC_V2FP16:
325 return AMDGPU::getInlineEncodingV2F16(static_cast<uint32_t>(Imm))
326 .value_or(255);
327
328 case AMDGPU::OPERAND_REG_IMM_V2BF16:
329 case AMDGPU::OPERAND_REG_INLINE_C_V2BF16:
330 case AMDGPU::OPERAND_REG_INLINE_AC_V2BF16:
331 return AMDGPU::getInlineEncodingV2BF16(static_cast<uint32_t>(Imm))
332 .value_or(255);
333
334 case AMDGPU::OPERAND_KIMM32:
335 case AMDGPU::OPERAND_KIMM16:
336 return MO.getImm();
337 default:
338 llvm_unreachable("invalid operand size");
339 }
340 }
341
getImplicitOpSelHiEncoding(int Opcode) const342 uint64_t AMDGPUMCCodeEmitter::getImplicitOpSelHiEncoding(int Opcode) const {
343 using namespace AMDGPU::VOP3PEncoding;
344 using namespace AMDGPU::OpName;
345
346 if (AMDGPU::hasNamedOperand(Opcode, op_sel_hi)) {
347 if (AMDGPU::hasNamedOperand(Opcode, src2))
348 return 0;
349 if (AMDGPU::hasNamedOperand(Opcode, src1))
350 return OP_SEL_HI_2;
351 if (AMDGPU::hasNamedOperand(Opcode, src0))
352 return OP_SEL_HI_1 | OP_SEL_HI_2;
353 }
354 return OP_SEL_HI_0 | OP_SEL_HI_1 | OP_SEL_HI_2;
355 }
356
isVCMPX64(const MCInstrDesc & Desc)357 static bool isVCMPX64(const MCInstrDesc &Desc) {
358 return (Desc.TSFlags & SIInstrFlags::VOP3) &&
359 Desc.hasImplicitDefOfPhysReg(AMDGPU::EXEC);
360 }
361
encodeInstruction(const MCInst & MI,SmallVectorImpl<char> & CB,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const362 void AMDGPUMCCodeEmitter::encodeInstruction(const MCInst &MI,
363 SmallVectorImpl<char> &CB,
364 SmallVectorImpl<MCFixup> &Fixups,
365 const MCSubtargetInfo &STI) const {
366 int Opcode = MI.getOpcode();
367 APInt Encoding, Scratch;
368 getBinaryCodeForInstr(MI, Fixups, Encoding, Scratch, STI);
369 const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
370 unsigned bytes = Desc.getSize();
371
372 // Set unused op_sel_hi bits to 1 for VOP3P and MAI instructions.
373 // Note that accvgpr_read/write are MAI, have src0, but do not use op_sel.
374 if ((Desc.TSFlags & SIInstrFlags::VOP3P) ||
375 Opcode == AMDGPU::V_ACCVGPR_READ_B32_vi ||
376 Opcode == AMDGPU::V_ACCVGPR_WRITE_B32_vi) {
377 Encoding |= getImplicitOpSelHiEncoding(Opcode);
378 }
379
380 // GFX10+ v_cmpx opcodes promoted to VOP3 have implied dst=EXEC.
381 // Documentation requires dst to be encoded as EXEC (0x7E),
382 // but it looks like the actual value encoded for dst operand
383 // is ignored by HW. It was decided to define dst as "do not care"
384 // in td files to allow disassembler accept any dst value.
385 // However, dst is encoded as EXEC for compatibility with SP3.
386 if (AMDGPU::isGFX10Plus(STI) && isVCMPX64(Desc)) {
387 assert((Encoding & 0xFF) == 0);
388 Encoding |= MRI.getEncodingValue(AMDGPU::EXEC_LO) &
389 AMDGPU::HWEncoding::REG_IDX_MASK;
390 }
391
392 for (unsigned i = 0; i < bytes; i++) {
393 CB.push_back((uint8_t)Encoding.extractBitsAsZExtValue(8, 8 * i));
394 }
395
396 // NSA encoding.
397 if (AMDGPU::isGFX10Plus(STI) && Desc.TSFlags & SIInstrFlags::MIMG) {
398 int vaddr0 = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
399 AMDGPU::OpName::vaddr0);
400 int srsrc = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
401 AMDGPU::OpName::srsrc);
402 assert(vaddr0 >= 0 && srsrc > vaddr0);
403 unsigned NumExtraAddrs = srsrc - vaddr0 - 1;
404 unsigned NumPadding = (-NumExtraAddrs) & 3;
405
406 for (unsigned i = 0; i < NumExtraAddrs; ++i) {
407 getMachineOpValue(MI, MI.getOperand(vaddr0 + 1 + i), Encoding, Fixups,
408 STI);
409 CB.push_back((uint8_t)Encoding.getLimitedValue());
410 }
411 CB.append(NumPadding, 0);
412 }
413
414 if ((bytes > 8 && STI.hasFeature(AMDGPU::FeatureVOP3Literal)) ||
415 (bytes > 4 && !STI.hasFeature(AMDGPU::FeatureVOP3Literal)))
416 return;
417
418 // Do not print literals from SISrc Operands for insts with mandatory literals
419 if (AMDGPU::hasNamedOperand(MI.getOpcode(), AMDGPU::OpName::imm))
420 return;
421
422 // Check for additional literals
423 for (unsigned i = 0, e = Desc.getNumOperands(); i < e; ++i) {
424
425 // Check if this operand should be encoded as [SV]Src
426 if (!AMDGPU::isSISrcOperand(Desc, i))
427 continue;
428
429 // Is this operand a literal immediate?
430 const MCOperand &Op = MI.getOperand(i);
431 auto Enc = getLitEncoding(Op, Desc.operands()[i], STI);
432 if (!Enc || *Enc != 255)
433 continue;
434
435 // Yes! Encode it
436 int64_t Imm = 0;
437
438 if (Op.isImm())
439 Imm = Op.getImm();
440 else if (Op.isExpr()) {
441 if (const auto *C = dyn_cast<MCConstantExpr>(Op.getExpr()))
442 Imm = C->getValue();
443 } else // Exprs will be replaced with a fixup value.
444 llvm_unreachable("Must be immediate or expr");
445
446 if (Desc.operands()[i].OperandType == AMDGPU::OPERAND_REG_IMM_FP64)
447 Imm = Hi_32(Imm);
448
449 support::endian::write<uint32_t>(CB, Imm, llvm::endianness::little);
450
451 // Only one literal value allowed
452 break;
453 }
454 }
455
getSOPPBrEncoding(const MCInst & MI,unsigned OpNo,APInt & Op,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const456 void AMDGPUMCCodeEmitter::getSOPPBrEncoding(const MCInst &MI, unsigned OpNo,
457 APInt &Op,
458 SmallVectorImpl<MCFixup> &Fixups,
459 const MCSubtargetInfo &STI) const {
460 const MCOperand &MO = MI.getOperand(OpNo);
461
462 if (MO.isExpr()) {
463 const MCExpr *Expr = MO.getExpr();
464 MCFixupKind Kind = (MCFixupKind)AMDGPU::fixup_si_sopp_br;
465 Fixups.push_back(MCFixup::create(0, Expr, Kind, MI.getLoc()));
466 Op = APInt::getZero(96);
467 } else {
468 getMachineOpValue(MI, MO, Op, Fixups, STI);
469 }
470 }
471
getSMEMOffsetEncoding(const MCInst & MI,unsigned OpNo,APInt & Op,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const472 void AMDGPUMCCodeEmitter::getSMEMOffsetEncoding(
473 const MCInst &MI, unsigned OpNo, APInt &Op,
474 SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const {
475 auto Offset = MI.getOperand(OpNo).getImm();
476 // VI only supports 20-bit unsigned offsets.
477 assert(!AMDGPU::isVI(STI) || isUInt<20>(Offset));
478 Op = Offset;
479 }
480
getSDWASrcEncoding(const MCInst & MI,unsigned OpNo,APInt & Op,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const481 void AMDGPUMCCodeEmitter::getSDWASrcEncoding(const MCInst &MI, unsigned OpNo,
482 APInt &Op,
483 SmallVectorImpl<MCFixup> &Fixups,
484 const MCSubtargetInfo &STI) const {
485 using namespace AMDGPU::SDWA;
486
487 uint64_t RegEnc = 0;
488
489 const MCOperand &MO = MI.getOperand(OpNo);
490
491 if (MO.isReg()) {
492 unsigned Reg = MO.getReg();
493 RegEnc |= MRI.getEncodingValue(Reg);
494 RegEnc &= SDWA9EncValues::SRC_VGPR_MASK;
495 if (AMDGPU::isSGPR(AMDGPU::mc2PseudoReg(Reg), &MRI)) {
496 RegEnc |= SDWA9EncValues::SRC_SGPR_MASK;
497 }
498 Op = RegEnc;
499 return;
500 } else {
501 const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
502 auto Enc = getLitEncoding(MO, Desc.operands()[OpNo], STI);
503 if (Enc && *Enc != 255) {
504 Op = *Enc | SDWA9EncValues::SRC_SGPR_MASK;
505 return;
506 }
507 }
508
509 llvm_unreachable("Unsupported operand kind");
510 }
511
getSDWAVopcDstEncoding(const MCInst & MI,unsigned OpNo,APInt & Op,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const512 void AMDGPUMCCodeEmitter::getSDWAVopcDstEncoding(
513 const MCInst &MI, unsigned OpNo, APInt &Op,
514 SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const {
515 using namespace AMDGPU::SDWA;
516
517 uint64_t RegEnc = 0;
518
519 const MCOperand &MO = MI.getOperand(OpNo);
520
521 unsigned Reg = MO.getReg();
522 if (Reg != AMDGPU::VCC && Reg != AMDGPU::VCC_LO) {
523 RegEnc |= MRI.getEncodingValue(Reg);
524 RegEnc &= SDWA9EncValues::VOPC_DST_SGPR_MASK;
525 RegEnc |= SDWA9EncValues::VOPC_DST_VCC_MASK;
526 }
527 Op = RegEnc;
528 }
529
getAVOperandEncoding(const MCInst & MI,unsigned OpNo,APInt & Op,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const530 void AMDGPUMCCodeEmitter::getAVOperandEncoding(
531 const MCInst &MI, unsigned OpNo, APInt &Op,
532 SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const {
533 unsigned Reg = MI.getOperand(OpNo).getReg();
534 unsigned Enc = MRI.getEncodingValue(Reg);
535 unsigned Idx = Enc & AMDGPU::HWEncoding::REG_IDX_MASK;
536 bool IsVGPROrAGPR = Enc & AMDGPU::HWEncoding::IS_VGPR_OR_AGPR;
537
538 // VGPR and AGPR have the same encoding, but SrcA and SrcB operands of mfma
539 // instructions use acc[0:1] modifier bits to distinguish. These bits are
540 // encoded as a virtual 9th bit of the register for these operands.
541 bool IsAGPR = false;
542 if (MRI.getRegClass(AMDGPU::AGPR_32RegClassID).contains(Reg) ||
543 MRI.getRegClass(AMDGPU::AReg_64RegClassID).contains(Reg) ||
544 MRI.getRegClass(AMDGPU::AReg_96RegClassID).contains(Reg) ||
545 MRI.getRegClass(AMDGPU::AReg_128RegClassID).contains(Reg) ||
546 MRI.getRegClass(AMDGPU::AReg_160RegClassID).contains(Reg) ||
547 MRI.getRegClass(AMDGPU::AReg_192RegClassID).contains(Reg) ||
548 MRI.getRegClass(AMDGPU::AReg_224RegClassID).contains(Reg) ||
549 MRI.getRegClass(AMDGPU::AReg_256RegClassID).contains(Reg) ||
550 MRI.getRegClass(AMDGPU::AReg_288RegClassID).contains(Reg) ||
551 MRI.getRegClass(AMDGPU::AReg_320RegClassID).contains(Reg) ||
552 MRI.getRegClass(AMDGPU::AReg_352RegClassID).contains(Reg) ||
553 MRI.getRegClass(AMDGPU::AReg_384RegClassID).contains(Reg) ||
554 MRI.getRegClass(AMDGPU::AReg_512RegClassID).contains(Reg) ||
555 MRI.getRegClass(AMDGPU::AGPR_LO16RegClassID).contains(Reg))
556 IsAGPR = true;
557
558 Op = Idx | (IsVGPROrAGPR << 8) | (IsAGPR << 9);
559 }
560
needsPCRel(const MCExpr * Expr)561 static bool needsPCRel(const MCExpr *Expr) {
562 switch (Expr->getKind()) {
563 case MCExpr::SymbolRef: {
564 auto *SE = cast<MCSymbolRefExpr>(Expr);
565 MCSymbolRefExpr::VariantKind Kind = SE->getKind();
566 return Kind != MCSymbolRefExpr::VK_AMDGPU_ABS32_LO &&
567 Kind != MCSymbolRefExpr::VK_AMDGPU_ABS32_HI;
568 }
569 case MCExpr::Binary: {
570 auto *BE = cast<MCBinaryExpr>(Expr);
571 if (BE->getOpcode() == MCBinaryExpr::Sub)
572 return false;
573 return needsPCRel(BE->getLHS()) || needsPCRel(BE->getRHS());
574 }
575 case MCExpr::Unary:
576 return needsPCRel(cast<MCUnaryExpr>(Expr)->getSubExpr());
577 case MCExpr::Target:
578 case MCExpr::Constant:
579 return false;
580 }
581 llvm_unreachable("invalid kind");
582 }
583
getMachineOpValue(const MCInst & MI,const MCOperand & MO,APInt & Op,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const584 void AMDGPUMCCodeEmitter::getMachineOpValue(const MCInst &MI,
585 const MCOperand &MO, APInt &Op,
586 SmallVectorImpl<MCFixup> &Fixups,
587 const MCSubtargetInfo &STI) const {
588 if (MO.isReg()){
589 unsigned Enc = MRI.getEncodingValue(MO.getReg());
590 unsigned Idx = Enc & AMDGPU::HWEncoding::REG_IDX_MASK;
591 bool IsVGPR = Enc & AMDGPU::HWEncoding::IS_VGPR_OR_AGPR;
592 Op = Idx | (IsVGPR << 8);
593 return;
594 }
595 unsigned OpNo = &MO - MI.begin();
596 getMachineOpValueCommon(MI, MO, OpNo, Op, Fixups, STI);
597 }
598
getMachineOpValueT16(const MCInst & MI,unsigned OpNo,APInt & Op,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const599 void AMDGPUMCCodeEmitter::getMachineOpValueT16(
600 const MCInst &MI, unsigned OpNo, APInt &Op,
601 SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const {
602 const MCOperand &MO = MI.getOperand(OpNo);
603 if (MO.isReg()) {
604 unsigned Enc = MRI.getEncodingValue(MO.getReg());
605 unsigned Idx = Enc & AMDGPU::HWEncoding::REG_IDX_MASK;
606 bool IsVGPR = Enc & AMDGPU::HWEncoding::IS_VGPR_OR_AGPR;
607 Op = Idx | (IsVGPR << 8);
608 return;
609 }
610 getMachineOpValueCommon(MI, MO, OpNo, Op, Fixups, STI);
611 // VGPRs include the suffix/op_sel bit in the register encoding, but
612 // immediates and SGPRs include it in src_modifiers. Therefore, copy the
613 // op_sel bit from the src operands into src_modifier operands if Op is
614 // src_modifiers and the corresponding src is a VGPR
615 int SrcMOIdx = -1;
616 assert(OpNo < INT_MAX);
617 if ((int)OpNo == AMDGPU::getNamedOperandIdx(MI.getOpcode(),
618 AMDGPU::OpName::src0_modifiers)) {
619 SrcMOIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0);
620 int VDstMOIdx =
621 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdst);
622 if (VDstMOIdx != -1) {
623 auto DstReg = MI.getOperand(VDstMOIdx).getReg();
624 if (AMDGPU::isHi(DstReg, MRI))
625 Op |= SISrcMods::DST_OP_SEL;
626 }
627 } else if ((int)OpNo == AMDGPU::getNamedOperandIdx(
628 MI.getOpcode(), AMDGPU::OpName::src1_modifiers))
629 SrcMOIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src1);
630 else if ((int)OpNo == AMDGPU::getNamedOperandIdx(
631 MI.getOpcode(), AMDGPU::OpName::src2_modifiers))
632 SrcMOIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src2);
633 if (SrcMOIdx == -1)
634 return;
635
636 const MCOperand &SrcMO = MI.getOperand(SrcMOIdx);
637 if (!SrcMO.isReg())
638 return;
639 auto SrcReg = SrcMO.getReg();
640 if (AMDGPU::isSGPR(SrcReg, &MRI))
641 return;
642 if (AMDGPU::isHi(SrcReg, MRI))
643 Op |= SISrcMods::OP_SEL_0;
644 }
645
getMachineOpValueT16Lo128(const MCInst & MI,unsigned OpNo,APInt & Op,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const646 void AMDGPUMCCodeEmitter::getMachineOpValueT16Lo128(
647 const MCInst &MI, unsigned OpNo, APInt &Op,
648 SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const {
649 const MCOperand &MO = MI.getOperand(OpNo);
650 if (MO.isReg()) {
651 uint16_t Encoding = MRI.getEncodingValue(MO.getReg());
652 unsigned RegIdx = Encoding & AMDGPU::HWEncoding::REG_IDX_MASK;
653 bool IsHi = Encoding & AMDGPU::HWEncoding::IS_HI;
654 bool IsVGPR = Encoding & AMDGPU::HWEncoding::IS_VGPR_OR_AGPR;
655 assert((!IsVGPR || isUInt<7>(RegIdx)) && "VGPR0-VGPR127 expected!");
656 Op = (IsVGPR ? 0x100 : 0) | (IsHi ? 0x80 : 0) | RegIdx;
657 return;
658 }
659 getMachineOpValueCommon(MI, MO, OpNo, Op, Fixups, STI);
660 }
661
getMachineOpValueCommon(const MCInst & MI,const MCOperand & MO,unsigned OpNo,APInt & Op,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const662 void AMDGPUMCCodeEmitter::getMachineOpValueCommon(
663 const MCInst &MI, const MCOperand &MO, unsigned OpNo, APInt &Op,
664 SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const {
665 int64_t Val;
666 if (MO.isExpr() && MO.getExpr()->evaluateAsAbsolute(Val)) {
667 Op = Val;
668 return;
669 }
670
671 if (MO.isExpr() && MO.getExpr()->getKind() != MCExpr::Constant) {
672 // FIXME: If this is expression is PCRel or not should not depend on what
673 // the expression looks like. Given that this is just a general expression,
674 // it should probably be FK_Data_4 and whatever is producing
675 //
676 // s_add_u32 s2, s2, (extern_const_addrspace+16
677 //
678 // And expecting a PCRel should instead produce
679 //
680 // .Ltmp1:
681 // s_add_u32 s2, s2, (extern_const_addrspace+16)-.Ltmp1
682 MCFixupKind Kind;
683 if (needsPCRel(MO.getExpr()))
684 Kind = FK_PCRel_4;
685 else
686 Kind = FK_Data_4;
687
688 const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
689 uint32_t Offset = Desc.getSize();
690 assert(Offset == 4 || Offset == 8);
691
692 Fixups.push_back(MCFixup::create(Offset, MO.getExpr(), Kind, MI.getLoc()));
693 }
694
695 const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
696 if (AMDGPU::isSISrcOperand(Desc, OpNo)) {
697 if (auto Enc = getLitEncoding(MO, Desc.operands()[OpNo], STI)) {
698 Op = *Enc;
699 return;
700 }
701 } else if (MO.isImm()) {
702 Op = MO.getImm();
703 return;
704 }
705
706 llvm_unreachable("Encoding of this operand type is not supported yet.");
707 }
708
709 #include "AMDGPUGenMCCodeEmitter.inc"
710