1 //===-- AMDGPUInstPrinter.cpp - AMDGPU MC Inst -> ASM ---------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 // \file 8 //===----------------------------------------------------------------------===// 9 10 #include "AMDGPUInstPrinter.h" 11 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 12 #include "SIDefines.h" 13 #include "SIRegisterInfo.h" 14 #include "Utils/AMDGPUAsmUtils.h" 15 #include "Utils/AMDGPUBaseInfo.h" 16 #include "llvm/MC/MCExpr.h" 17 #include "llvm/MC/MCInst.h" 18 #include "llvm/MC/MCInstrDesc.h" 19 #include "llvm/MC/MCInstrInfo.h" 20 #include "llvm/MC/MCSubtargetInfo.h" 21 #include "llvm/Support/CommandLine.h" 22 #include "llvm/Support/TargetParser.h" 23 24 using namespace llvm; 25 using namespace llvm::AMDGPU; 26 27 static cl::opt<bool> Keep16BitSuffixes( 28 "amdgpu-keep-16-bit-reg-suffixes", 29 cl::desc("Keep .l and .h suffixes in asm for debugging purposes"), 30 cl::init(false), 31 cl::ReallyHidden); 32 33 void AMDGPUInstPrinter::printRegName(raw_ostream &OS, unsigned RegNo) const { 34 // FIXME: The current implementation of 35 // AsmParser::parseRegisterOrRegisterNumber in MC implies we either emit this 36 // as an integer or we provide a name which represents a physical register. 37 // For CFI instructions we really want to emit a name for the DWARF register 38 // instead, because there may be multiple DWARF registers corresponding to a 39 // single physical register. One case where this problem manifests is with 40 // wave32/wave64 where using the physical register name is ambiguous: if we 41 // write e.g. `.cfi_undefined v0` we lose information about the wavefront 42 // size which we need to encode the register in the final DWARF. Ideally we 43 // would extend MC to support parsing DWARF register names so we could do 44 // something like `.cfi_undefined dwarf_wave32_v0`. For now we just live with 45 // non-pretty DWARF register names in assembly text. 46 OS << RegNo; 47 } 48 49 void AMDGPUInstPrinter::printInst(const MCInst *MI, uint64_t Address, 50 StringRef Annot, const MCSubtargetInfo &STI, 51 raw_ostream &OS) { 52 OS.flush(); 53 printInstruction(MI, Address, STI, OS); 54 printAnnotation(OS, Annot); 55 } 56 57 void AMDGPUInstPrinter::printU4ImmOperand(const MCInst *MI, unsigned OpNo, 58 const MCSubtargetInfo &STI, 59 raw_ostream &O) { 60 O << formatHex(MI->getOperand(OpNo).getImm() & 0xf); 61 } 62 63 void AMDGPUInstPrinter::printU8ImmOperand(const MCInst *MI, unsigned OpNo, 64 raw_ostream &O) { 65 O << formatHex(MI->getOperand(OpNo).getImm() & 0xff); 66 } 67 68 void AMDGPUInstPrinter::printU16ImmOperand(const MCInst *MI, unsigned OpNo, 69 const MCSubtargetInfo &STI, 70 raw_ostream &O) { 71 // It's possible to end up with a 32-bit literal used with a 16-bit operand 72 // with ignored high bits. Print as 32-bit anyway in that case. 73 int64_t Imm = MI->getOperand(OpNo).getImm(); 74 if (isInt<16>(Imm) || isUInt<16>(Imm)) 75 O << formatHex(static_cast<uint64_t>(Imm & 0xffff)); 76 else 77 printU32ImmOperand(MI, OpNo, STI, O); 78 } 79 80 void AMDGPUInstPrinter::printU4ImmDecOperand(const MCInst *MI, unsigned OpNo, 81 raw_ostream &O) { 82 O << formatDec(MI->getOperand(OpNo).getImm() & 0xf); 83 } 84 85 void AMDGPUInstPrinter::printU8ImmDecOperand(const MCInst *MI, unsigned OpNo, 86 raw_ostream &O) { 87 O << formatDec(MI->getOperand(OpNo).getImm() & 0xff); 88 } 89 90 void AMDGPUInstPrinter::printU16ImmDecOperand(const MCInst *MI, unsigned OpNo, 91 raw_ostream &O) { 92 O << formatDec(MI->getOperand(OpNo).getImm() & 0xffff); 93 } 94 95 void AMDGPUInstPrinter::printU32ImmOperand(const MCInst *MI, unsigned OpNo, 96 const MCSubtargetInfo &STI, 97 raw_ostream &O) { 98 O << formatHex(MI->getOperand(OpNo).getImm() & 0xffffffff); 99 } 100 101 void AMDGPUInstPrinter::printNamedBit(const MCInst *MI, unsigned OpNo, 102 raw_ostream &O, StringRef BitName) { 103 if (MI->getOperand(OpNo).getImm()) { 104 O << ' ' << BitName; 105 } 106 } 107 108 void AMDGPUInstPrinter::printOffen(const MCInst *MI, unsigned OpNo, 109 raw_ostream &O) { 110 printNamedBit(MI, OpNo, O, "offen"); 111 } 112 113 void AMDGPUInstPrinter::printIdxen(const MCInst *MI, unsigned OpNo, 114 raw_ostream &O) { 115 printNamedBit(MI, OpNo, O, "idxen"); 116 } 117 118 void AMDGPUInstPrinter::printAddr64(const MCInst *MI, unsigned OpNo, 119 raw_ostream &O) { 120 printNamedBit(MI, OpNo, O, "addr64"); 121 } 122 123 void AMDGPUInstPrinter::printMBUFOffset(const MCInst *MI, unsigned OpNo, 124 raw_ostream &O) { 125 if (MI->getOperand(OpNo).getImm()) { 126 O << " offset:"; 127 printU16ImmDecOperand(MI, OpNo, O); 128 } 129 } 130 131 void AMDGPUInstPrinter::printOffset(const MCInst *MI, unsigned OpNo, 132 const MCSubtargetInfo &STI, 133 raw_ostream &O) { 134 uint16_t Imm = MI->getOperand(OpNo).getImm(); 135 if (Imm != 0) { 136 O << " offset:"; 137 printU16ImmDecOperand(MI, OpNo, O); 138 } 139 } 140 141 void AMDGPUInstPrinter::printFlatOffset(const MCInst *MI, unsigned OpNo, 142 const MCSubtargetInfo &STI, 143 raw_ostream &O) { 144 uint16_t Imm = MI->getOperand(OpNo).getImm(); 145 if (Imm != 0) { 146 O << " offset:"; 147 148 const MCInstrDesc &Desc = MII.get(MI->getOpcode()); 149 bool IsFlatSeg = !(Desc.TSFlags & 150 (SIInstrFlags::FlatGlobal | SIInstrFlags::FlatScratch)); 151 152 if (IsFlatSeg) { // Unsigned offset 153 printU16ImmDecOperand(MI, OpNo, O); 154 } else { // Signed offset 155 if (AMDGPU::isGFX10Plus(STI)) { 156 O << formatDec(SignExtend32<12>(MI->getOperand(OpNo).getImm())); 157 } else { 158 O << formatDec(SignExtend32<13>(MI->getOperand(OpNo).getImm())); 159 } 160 } 161 } 162 } 163 164 void AMDGPUInstPrinter::printOffset0(const MCInst *MI, unsigned OpNo, 165 const MCSubtargetInfo &STI, 166 raw_ostream &O) { 167 if (MI->getOperand(OpNo).getImm()) { 168 O << " offset0:"; 169 printU8ImmDecOperand(MI, OpNo, O); 170 } 171 } 172 173 void AMDGPUInstPrinter::printOffset1(const MCInst *MI, unsigned OpNo, 174 const MCSubtargetInfo &STI, 175 raw_ostream &O) { 176 if (MI->getOperand(OpNo).getImm()) { 177 O << " offset1:"; 178 printU8ImmDecOperand(MI, OpNo, O); 179 } 180 } 181 182 void AMDGPUInstPrinter::printSMRDOffset8(const MCInst *MI, unsigned OpNo, 183 const MCSubtargetInfo &STI, 184 raw_ostream &O) { 185 printU32ImmOperand(MI, OpNo, STI, O); 186 } 187 188 void AMDGPUInstPrinter::printSMEMOffset(const MCInst *MI, unsigned OpNo, 189 const MCSubtargetInfo &STI, 190 raw_ostream &O) { 191 O << formatHex(MI->getOperand(OpNo).getImm()); 192 } 193 194 void AMDGPUInstPrinter::printSMRDLiteralOffset(const MCInst *MI, unsigned OpNo, 195 const MCSubtargetInfo &STI, 196 raw_ostream &O) { 197 printU32ImmOperand(MI, OpNo, STI, O); 198 } 199 200 void AMDGPUInstPrinter::printGDS(const MCInst *MI, unsigned OpNo, 201 const MCSubtargetInfo &STI, raw_ostream &O) { 202 printNamedBit(MI, OpNo, O, "gds"); 203 } 204 205 void AMDGPUInstPrinter::printCPol(const MCInst *MI, unsigned OpNo, 206 const MCSubtargetInfo &STI, raw_ostream &O) { 207 auto Imm = MI->getOperand(OpNo).getImm(); 208 if (Imm & CPol::GLC) 209 O << " glc"; 210 if (Imm & CPol::SLC) 211 O << " slc"; 212 if ((Imm & CPol::DLC) && AMDGPU::isGFX10Plus(STI)) 213 O << " dlc"; 214 if ((Imm & CPol::SCC) && AMDGPU::isGFX90A(STI)) 215 O << " scc"; 216 if (Imm & ~CPol::ALL) 217 O << " /* unexpected cache policy bit */"; 218 } 219 220 void AMDGPUInstPrinter::printSWZ(const MCInst *MI, unsigned OpNo, 221 const MCSubtargetInfo &STI, raw_ostream &O) { 222 } 223 224 void AMDGPUInstPrinter::printTFE(const MCInst *MI, unsigned OpNo, 225 const MCSubtargetInfo &STI, raw_ostream &O) { 226 printNamedBit(MI, OpNo, O, "tfe"); 227 } 228 229 void AMDGPUInstPrinter::printDMask(const MCInst *MI, unsigned OpNo, 230 const MCSubtargetInfo &STI, raw_ostream &O) { 231 if (MI->getOperand(OpNo).getImm()) { 232 O << " dmask:"; 233 printU16ImmOperand(MI, OpNo, STI, O); 234 } 235 } 236 237 void AMDGPUInstPrinter::printDim(const MCInst *MI, unsigned OpNo, 238 const MCSubtargetInfo &STI, raw_ostream &O) { 239 unsigned Dim = MI->getOperand(OpNo).getImm(); 240 O << " dim:SQ_RSRC_IMG_"; 241 242 const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfoByEncoding(Dim); 243 if (DimInfo) 244 O << DimInfo->AsmSuffix; 245 else 246 O << Dim; 247 } 248 249 void AMDGPUInstPrinter::printUNorm(const MCInst *MI, unsigned OpNo, 250 const MCSubtargetInfo &STI, raw_ostream &O) { 251 printNamedBit(MI, OpNo, O, "unorm"); 252 } 253 254 void AMDGPUInstPrinter::printDA(const MCInst *MI, unsigned OpNo, 255 const MCSubtargetInfo &STI, raw_ostream &O) { 256 printNamedBit(MI, OpNo, O, "da"); 257 } 258 259 void AMDGPUInstPrinter::printR128A16(const MCInst *MI, unsigned OpNo, 260 const MCSubtargetInfo &STI, raw_ostream &O) { 261 if (STI.hasFeature(AMDGPU::FeatureR128A16)) 262 printNamedBit(MI, OpNo, O, "a16"); 263 else 264 printNamedBit(MI, OpNo, O, "r128"); 265 } 266 267 void AMDGPUInstPrinter::printGFX10A16(const MCInst *MI, unsigned OpNo, 268 const MCSubtargetInfo &STI, raw_ostream &O) { 269 printNamedBit(MI, OpNo, O, "a16"); 270 } 271 272 void AMDGPUInstPrinter::printLWE(const MCInst *MI, unsigned OpNo, 273 const MCSubtargetInfo &STI, raw_ostream &O) { 274 printNamedBit(MI, OpNo, O, "lwe"); 275 } 276 277 void AMDGPUInstPrinter::printD16(const MCInst *MI, unsigned OpNo, 278 const MCSubtargetInfo &STI, raw_ostream &O) { 279 printNamedBit(MI, OpNo, O, "d16"); 280 } 281 282 void AMDGPUInstPrinter::printExpCompr(const MCInst *MI, unsigned OpNo, 283 const MCSubtargetInfo &STI, 284 raw_ostream &O) { 285 printNamedBit(MI, OpNo, O, "compr"); 286 } 287 288 void AMDGPUInstPrinter::printExpVM(const MCInst *MI, unsigned OpNo, 289 const MCSubtargetInfo &STI, 290 raw_ostream &O) { 291 printNamedBit(MI, OpNo, O, "vm"); 292 } 293 294 void AMDGPUInstPrinter::printFORMAT(const MCInst *MI, unsigned OpNo, 295 const MCSubtargetInfo &STI, 296 raw_ostream &O) { 297 } 298 299 void AMDGPUInstPrinter::printSymbolicFormat(const MCInst *MI, 300 const MCSubtargetInfo &STI, 301 raw_ostream &O) { 302 using namespace llvm::AMDGPU::MTBUFFormat; 303 304 int OpNo = 305 AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::format); 306 assert(OpNo != -1); 307 308 unsigned Val = MI->getOperand(OpNo).getImm(); 309 if (AMDGPU::isGFX10Plus(STI)) { 310 if (Val == UFMT_DEFAULT) 311 return; 312 if (isValidUnifiedFormat(Val)) { 313 O << " format:[" << getUnifiedFormatName(Val) << ']'; 314 } else { 315 O << " format:" << Val; 316 } 317 } else { 318 if (Val == DFMT_NFMT_DEFAULT) 319 return; 320 if (isValidDfmtNfmt(Val, STI)) { 321 unsigned Dfmt; 322 unsigned Nfmt; 323 decodeDfmtNfmt(Val, Dfmt, Nfmt); 324 O << " format:["; 325 if (Dfmt != DFMT_DEFAULT) { 326 O << getDfmtName(Dfmt); 327 if (Nfmt != NFMT_DEFAULT) { 328 O << ','; 329 } 330 } 331 if (Nfmt != NFMT_DEFAULT) { 332 O << getNfmtName(Nfmt, STI); 333 } 334 O << ']'; 335 } else { 336 O << " format:" << Val; 337 } 338 } 339 } 340 341 void AMDGPUInstPrinter::printRegOperand(unsigned RegNo, raw_ostream &O, 342 const MCRegisterInfo &MRI) { 343 #if !defined(NDEBUG) 344 switch (RegNo) { 345 case AMDGPU::FP_REG: 346 case AMDGPU::SP_REG: 347 case AMDGPU::PRIVATE_RSRC_REG: 348 llvm_unreachable("pseudo-register should not ever be emitted"); 349 case AMDGPU::SCC: 350 llvm_unreachable("pseudo scc should not ever be emitted"); 351 default: 352 break; 353 } 354 #endif 355 356 StringRef RegName(getRegisterName(RegNo)); 357 if (!Keep16BitSuffixes) 358 if (!RegName.consume_back(".l")) 359 RegName.consume_back(".h"); 360 361 O << RegName; 362 } 363 364 void AMDGPUInstPrinter::printVOPDst(const MCInst *MI, unsigned OpNo, 365 const MCSubtargetInfo &STI, 366 raw_ostream &O) { 367 auto Opcode = MI->getOpcode(); 368 auto Flags = MII.get(Opcode).TSFlags; 369 370 if (OpNo == 0) { 371 if (Flags & SIInstrFlags::VOP3) { 372 if (!getVOP3IsSingle(Opcode)) 373 O << "_e64"; 374 } else if (Flags & SIInstrFlags::DPP) { 375 O << "_dpp"; 376 } else if (Flags & SIInstrFlags::SDWA) { 377 O << "_sdwa"; 378 } else if (((Flags & SIInstrFlags::VOP1) && !getVOP1IsSingle(Opcode)) || 379 ((Flags & SIInstrFlags::VOP2) && !getVOP2IsSingle(Opcode))) { 380 O << "_e32"; 381 } 382 O << " "; 383 } 384 385 printOperand(MI, OpNo, STI, O); 386 387 // Print default vcc/vcc_lo operand. 388 switch (Opcode) { 389 default: break; 390 391 case AMDGPU::V_ADD_CO_CI_U32_e32_gfx10: 392 case AMDGPU::V_SUB_CO_CI_U32_e32_gfx10: 393 case AMDGPU::V_SUBREV_CO_CI_U32_e32_gfx10: 394 case AMDGPU::V_ADD_CO_CI_U32_sdwa_gfx10: 395 case AMDGPU::V_SUB_CO_CI_U32_sdwa_gfx10: 396 case AMDGPU::V_SUBREV_CO_CI_U32_sdwa_gfx10: 397 case AMDGPU::V_ADD_CO_CI_U32_dpp_gfx10: 398 case AMDGPU::V_SUB_CO_CI_U32_dpp_gfx10: 399 case AMDGPU::V_SUBREV_CO_CI_U32_dpp_gfx10: 400 case AMDGPU::V_ADD_CO_CI_U32_dpp8_gfx10: 401 case AMDGPU::V_SUB_CO_CI_U32_dpp8_gfx10: 402 case AMDGPU::V_SUBREV_CO_CI_U32_dpp8_gfx10: 403 printDefaultVccOperand(1, STI, O); 404 break; 405 } 406 } 407 408 void AMDGPUInstPrinter::printVINTRPDst(const MCInst *MI, unsigned OpNo, 409 const MCSubtargetInfo &STI, raw_ostream &O) { 410 if (AMDGPU::isSI(STI) || AMDGPU::isCI(STI)) 411 O << " "; 412 else 413 O << "_e32 "; 414 415 printOperand(MI, OpNo, STI, O); 416 } 417 418 void AMDGPUInstPrinter::printImmediateInt16(uint32_t Imm, 419 const MCSubtargetInfo &STI, 420 raw_ostream &O) { 421 int16_t SImm = static_cast<int16_t>(Imm); 422 if (isInlinableIntLiteral(SImm)) { 423 O << SImm; 424 } else { 425 uint64_t Imm16 = static_cast<uint16_t>(Imm); 426 O << formatHex(Imm16); 427 } 428 } 429 430 void AMDGPUInstPrinter::printImmediate16(uint32_t Imm, 431 const MCSubtargetInfo &STI, 432 raw_ostream &O) { 433 int16_t SImm = static_cast<int16_t>(Imm); 434 if (isInlinableIntLiteral(SImm)) { 435 O << SImm; 436 return; 437 } 438 439 if (Imm == 0x3C00) 440 O<< "1.0"; 441 else if (Imm == 0xBC00) 442 O<< "-1.0"; 443 else if (Imm == 0x3800) 444 O<< "0.5"; 445 else if (Imm == 0xB800) 446 O<< "-0.5"; 447 else if (Imm == 0x4000) 448 O<< "2.0"; 449 else if (Imm == 0xC000) 450 O<< "-2.0"; 451 else if (Imm == 0x4400) 452 O<< "4.0"; 453 else if (Imm == 0xC400) 454 O<< "-4.0"; 455 else if (Imm == 0x3118 && 456 STI.getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm]) { 457 O << "0.15915494"; 458 } else { 459 uint64_t Imm16 = static_cast<uint16_t>(Imm); 460 O << formatHex(Imm16); 461 } 462 } 463 464 void AMDGPUInstPrinter::printImmediateV216(uint32_t Imm, 465 const MCSubtargetInfo &STI, 466 raw_ostream &O) { 467 uint16_t Lo16 = static_cast<uint16_t>(Imm); 468 printImmediate16(Lo16, STI, O); 469 } 470 471 void AMDGPUInstPrinter::printImmediate32(uint32_t Imm, 472 const MCSubtargetInfo &STI, 473 raw_ostream &O) { 474 int32_t SImm = static_cast<int32_t>(Imm); 475 if (SImm >= -16 && SImm <= 64) { 476 O << SImm; 477 return; 478 } 479 480 if (Imm == FloatToBits(0.0f)) 481 O << "0.0"; 482 else if (Imm == FloatToBits(1.0f)) 483 O << "1.0"; 484 else if (Imm == FloatToBits(-1.0f)) 485 O << "-1.0"; 486 else if (Imm == FloatToBits(0.5f)) 487 O << "0.5"; 488 else if (Imm == FloatToBits(-0.5f)) 489 O << "-0.5"; 490 else if (Imm == FloatToBits(2.0f)) 491 O << "2.0"; 492 else if (Imm == FloatToBits(-2.0f)) 493 O << "-2.0"; 494 else if (Imm == FloatToBits(4.0f)) 495 O << "4.0"; 496 else if (Imm == FloatToBits(-4.0f)) 497 O << "-4.0"; 498 else if (Imm == 0x3e22f983 && 499 STI.getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm]) 500 O << "0.15915494"; 501 else 502 O << formatHex(static_cast<uint64_t>(Imm)); 503 } 504 505 void AMDGPUInstPrinter::printImmediate64(uint64_t Imm, 506 const MCSubtargetInfo &STI, 507 raw_ostream &O) { 508 int64_t SImm = static_cast<int64_t>(Imm); 509 if (SImm >= -16 && SImm <= 64) { 510 O << SImm; 511 return; 512 } 513 514 if (Imm == DoubleToBits(0.0)) 515 O << "0.0"; 516 else if (Imm == DoubleToBits(1.0)) 517 O << "1.0"; 518 else if (Imm == DoubleToBits(-1.0)) 519 O << "-1.0"; 520 else if (Imm == DoubleToBits(0.5)) 521 O << "0.5"; 522 else if (Imm == DoubleToBits(-0.5)) 523 O << "-0.5"; 524 else if (Imm == DoubleToBits(2.0)) 525 O << "2.0"; 526 else if (Imm == DoubleToBits(-2.0)) 527 O << "-2.0"; 528 else if (Imm == DoubleToBits(4.0)) 529 O << "4.0"; 530 else if (Imm == DoubleToBits(-4.0)) 531 O << "-4.0"; 532 else if (Imm == 0x3fc45f306dc9c882 && 533 STI.getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm]) 534 O << "0.15915494309189532"; 535 else { 536 assert(isUInt<32>(Imm) || Imm == 0x3fc45f306dc9c882); 537 538 // In rare situations, we will have a 32-bit literal in a 64-bit 539 // operand. This is technically allowed for the encoding of s_mov_b64. 540 O << formatHex(static_cast<uint64_t>(Imm)); 541 } 542 } 543 544 void AMDGPUInstPrinter::printBLGP(const MCInst *MI, unsigned OpNo, 545 const MCSubtargetInfo &STI, 546 raw_ostream &O) { 547 unsigned Imm = MI->getOperand(OpNo).getImm(); 548 if (!Imm) 549 return; 550 551 O << " blgp:" << Imm; 552 } 553 554 void AMDGPUInstPrinter::printCBSZ(const MCInst *MI, unsigned OpNo, 555 const MCSubtargetInfo &STI, 556 raw_ostream &O) { 557 unsigned Imm = MI->getOperand(OpNo).getImm(); 558 if (!Imm) 559 return; 560 561 O << " cbsz:" << Imm; 562 } 563 564 void AMDGPUInstPrinter::printABID(const MCInst *MI, unsigned OpNo, 565 const MCSubtargetInfo &STI, 566 raw_ostream &O) { 567 unsigned Imm = MI->getOperand(OpNo).getImm(); 568 if (!Imm) 569 return; 570 571 O << " abid:" << Imm; 572 } 573 574 void AMDGPUInstPrinter::printDefaultVccOperand(unsigned OpNo, 575 const MCSubtargetInfo &STI, 576 raw_ostream &O) { 577 if (OpNo > 0) 578 O << ", "; 579 printRegOperand(STI.getFeatureBits()[AMDGPU::FeatureWavefrontSize64] ? 580 AMDGPU::VCC : AMDGPU::VCC_LO, O, MRI); 581 if (OpNo == 0) 582 O << ", "; 583 } 584 585 void AMDGPUInstPrinter::printOperand(const MCInst *MI, unsigned OpNo, 586 const MCSubtargetInfo &STI, 587 raw_ostream &O) { 588 // Print default vcc/vcc_lo operand of VOPC. 589 const MCInstrDesc &Desc = MII.get(MI->getOpcode()); 590 if (OpNo == 0 && (Desc.TSFlags & SIInstrFlags::VOPC) && 591 (Desc.hasImplicitDefOfPhysReg(AMDGPU::VCC) || 592 Desc.hasImplicitDefOfPhysReg(AMDGPU::VCC_LO))) 593 printDefaultVccOperand(OpNo, STI, O); 594 595 if (OpNo >= MI->getNumOperands()) { 596 O << "/*Missing OP" << OpNo << "*/"; 597 return; 598 } 599 600 const MCOperand &Op = MI->getOperand(OpNo); 601 if (Op.isReg()) { 602 printRegOperand(Op.getReg(), O, MRI); 603 } else if (Op.isImm()) { 604 const uint8_t OpTy = Desc.OpInfo[OpNo].OperandType; 605 switch (OpTy) { 606 case AMDGPU::OPERAND_REG_IMM_INT32: 607 case AMDGPU::OPERAND_REG_IMM_FP32: 608 case AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED: 609 case AMDGPU::OPERAND_REG_INLINE_C_INT32: 610 case AMDGPU::OPERAND_REG_INLINE_C_FP32: 611 case AMDGPU::OPERAND_REG_INLINE_AC_INT32: 612 case AMDGPU::OPERAND_REG_INLINE_AC_FP32: 613 case AMDGPU::OPERAND_REG_IMM_V2INT32: 614 case AMDGPU::OPERAND_REG_IMM_V2FP32: 615 case AMDGPU::OPERAND_REG_INLINE_C_V2INT32: 616 case AMDGPU::OPERAND_REG_INLINE_C_V2FP32: 617 case MCOI::OPERAND_IMMEDIATE: 618 printImmediate32(Op.getImm(), STI, O); 619 break; 620 case AMDGPU::OPERAND_REG_IMM_INT64: 621 case AMDGPU::OPERAND_REG_IMM_FP64: 622 case AMDGPU::OPERAND_REG_INLINE_C_INT64: 623 case AMDGPU::OPERAND_REG_INLINE_C_FP64: 624 case AMDGPU::OPERAND_REG_INLINE_AC_FP64: 625 printImmediate64(Op.getImm(), STI, O); 626 break; 627 case AMDGPU::OPERAND_REG_INLINE_C_INT16: 628 case AMDGPU::OPERAND_REG_INLINE_AC_INT16: 629 case AMDGPU::OPERAND_REG_IMM_INT16: 630 printImmediateInt16(Op.getImm(), STI, O); 631 break; 632 case AMDGPU::OPERAND_REG_INLINE_C_FP16: 633 case AMDGPU::OPERAND_REG_INLINE_AC_FP16: 634 case AMDGPU::OPERAND_REG_IMM_FP16: 635 case AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED: 636 printImmediate16(Op.getImm(), STI, O); 637 break; 638 case AMDGPU::OPERAND_REG_IMM_V2INT16: 639 case AMDGPU::OPERAND_REG_IMM_V2FP16: 640 if (!isUInt<16>(Op.getImm()) && 641 STI.getFeatureBits()[AMDGPU::FeatureVOP3Literal]) { 642 printImmediate32(Op.getImm(), STI, O); 643 break; 644 } 645 646 // Deal with 16-bit FP inline immediates not working. 647 if (OpTy == AMDGPU::OPERAND_REG_IMM_V2FP16) { 648 printImmediate16(static_cast<uint16_t>(Op.getImm()), STI, O); 649 break; 650 } 651 LLVM_FALLTHROUGH; 652 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16: 653 case AMDGPU::OPERAND_REG_INLINE_AC_V2INT16: 654 printImmediateInt16(static_cast<uint16_t>(Op.getImm()), STI, O); 655 break; 656 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: 657 case AMDGPU::OPERAND_REG_INLINE_AC_V2FP16: 658 printImmediateV216(Op.getImm(), STI, O); 659 break; 660 case MCOI::OPERAND_UNKNOWN: 661 case MCOI::OPERAND_PCREL: 662 O << formatDec(Op.getImm()); 663 break; 664 case MCOI::OPERAND_REGISTER: 665 // FIXME: This should be removed and handled somewhere else. Seems to come 666 // from a disassembler bug. 667 O << "/*invalid immediate*/"; 668 break; 669 default: 670 // We hit this for the immediate instruction bits that don't yet have a 671 // custom printer. 672 llvm_unreachable("unexpected immediate operand type"); 673 } 674 } else if (Op.isDFPImm()) { 675 double Value = bit_cast<double>(Op.getDFPImm()); 676 // We special case 0.0 because otherwise it will be printed as an integer. 677 if (Value == 0.0) 678 O << "0.0"; 679 else { 680 const MCInstrDesc &Desc = MII.get(MI->getOpcode()); 681 int RCID = Desc.OpInfo[OpNo].RegClass; 682 unsigned RCBits = AMDGPU::getRegBitWidth(MRI.getRegClass(RCID)); 683 if (RCBits == 32) 684 printImmediate32(FloatToBits(Value), STI, O); 685 else if (RCBits == 64) 686 printImmediate64(DoubleToBits(Value), STI, O); 687 else 688 llvm_unreachable("Invalid register class size"); 689 } 690 } else if (Op.isExpr()) { 691 const MCExpr *Exp = Op.getExpr(); 692 Exp->print(O, &MAI); 693 } else { 694 O << "/*INV_OP*/"; 695 } 696 697 // Print default vcc/vcc_lo operand of v_cndmask_b32_e32. 698 switch (MI->getOpcode()) { 699 default: break; 700 701 case AMDGPU::V_CNDMASK_B32_e32_gfx10: 702 case AMDGPU::V_ADD_CO_CI_U32_e32_gfx10: 703 case AMDGPU::V_SUB_CO_CI_U32_e32_gfx10: 704 case AMDGPU::V_SUBREV_CO_CI_U32_e32_gfx10: 705 case AMDGPU::V_CNDMASK_B32_dpp_gfx10: 706 case AMDGPU::V_ADD_CO_CI_U32_dpp_gfx10: 707 case AMDGPU::V_SUB_CO_CI_U32_dpp_gfx10: 708 case AMDGPU::V_SUBREV_CO_CI_U32_dpp_gfx10: 709 case AMDGPU::V_CNDMASK_B32_dpp8_gfx10: 710 case AMDGPU::V_ADD_CO_CI_U32_dpp8_gfx10: 711 case AMDGPU::V_SUB_CO_CI_U32_dpp8_gfx10: 712 case AMDGPU::V_SUBREV_CO_CI_U32_dpp8_gfx10: 713 714 case AMDGPU::V_CNDMASK_B32_e32_gfx6_gfx7: 715 case AMDGPU::V_CNDMASK_B32_e32_vi: 716 if ((int)OpNo == AMDGPU::getNamedOperandIdx(MI->getOpcode(), 717 AMDGPU::OpName::src1)) 718 printDefaultVccOperand(OpNo, STI, O); 719 break; 720 } 721 722 if (Desc.TSFlags & SIInstrFlags::MTBUF) { 723 int SOffsetIdx = 724 AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::soffset); 725 assert(SOffsetIdx != -1); 726 if ((int)OpNo == SOffsetIdx) 727 printSymbolicFormat(MI, STI, O); 728 } 729 } 730 731 void AMDGPUInstPrinter::printOperandAndFPInputMods(const MCInst *MI, 732 unsigned OpNo, 733 const MCSubtargetInfo &STI, 734 raw_ostream &O) { 735 unsigned InputModifiers = MI->getOperand(OpNo).getImm(); 736 737 // Use 'neg(...)' instead of '-' to avoid ambiguity. 738 // This is important for integer literals because 739 // -1 is not the same value as neg(1). 740 bool NegMnemo = false; 741 742 if (InputModifiers & SISrcMods::NEG) { 743 if (OpNo + 1 < MI->getNumOperands() && 744 (InputModifiers & SISrcMods::ABS) == 0) { 745 const MCOperand &Op = MI->getOperand(OpNo + 1); 746 NegMnemo = Op.isImm() || Op.isDFPImm(); 747 } 748 if (NegMnemo) { 749 O << "neg("; 750 } else { 751 O << '-'; 752 } 753 } 754 755 if (InputModifiers & SISrcMods::ABS) 756 O << '|'; 757 printOperand(MI, OpNo + 1, STI, O); 758 if (InputModifiers & SISrcMods::ABS) 759 O << '|'; 760 761 if (NegMnemo) { 762 O << ')'; 763 } 764 } 765 766 void AMDGPUInstPrinter::printOperandAndIntInputMods(const MCInst *MI, 767 unsigned OpNo, 768 const MCSubtargetInfo &STI, 769 raw_ostream &O) { 770 unsigned InputModifiers = MI->getOperand(OpNo).getImm(); 771 if (InputModifiers & SISrcMods::SEXT) 772 O << "sext("; 773 printOperand(MI, OpNo + 1, STI, O); 774 if (InputModifiers & SISrcMods::SEXT) 775 O << ')'; 776 777 // Print default vcc/vcc_lo operand of VOP2b. 778 switch (MI->getOpcode()) { 779 default: break; 780 781 case AMDGPU::V_CNDMASK_B32_sdwa_gfx10: 782 case AMDGPU::V_ADD_CO_CI_U32_sdwa_gfx10: 783 case AMDGPU::V_SUB_CO_CI_U32_sdwa_gfx10: 784 case AMDGPU::V_SUBREV_CO_CI_U32_sdwa_gfx10: 785 if ((int)OpNo + 1 == AMDGPU::getNamedOperandIdx(MI->getOpcode(), 786 AMDGPU::OpName::src1)) 787 printDefaultVccOperand(OpNo, STI, O); 788 break; 789 } 790 } 791 792 void AMDGPUInstPrinter::printDPP8(const MCInst *MI, unsigned OpNo, 793 const MCSubtargetInfo &STI, 794 raw_ostream &O) { 795 if (!AMDGPU::isGFX10Plus(STI)) 796 llvm_unreachable("dpp8 is not supported on ASICs earlier than GFX10"); 797 798 unsigned Imm = MI->getOperand(OpNo).getImm(); 799 O << "dpp8:[" << formatDec(Imm & 0x7); 800 for (size_t i = 1; i < 8; ++i) { 801 O << ',' << formatDec((Imm >> (3 * i)) & 0x7); 802 } 803 O << ']'; 804 } 805 806 void AMDGPUInstPrinter::printDPPCtrl(const MCInst *MI, unsigned OpNo, 807 const MCSubtargetInfo &STI, 808 raw_ostream &O) { 809 using namespace AMDGPU::DPP; 810 811 unsigned Imm = MI->getOperand(OpNo).getImm(); 812 const MCInstrDesc &Desc = MII.get(MI->getOpcode()); 813 int Src0Idx = AMDGPU::getNamedOperandIdx(MI->getOpcode(), 814 AMDGPU::OpName::src0); 815 816 if (Src0Idx >= 0 && 817 Desc.OpInfo[Src0Idx].RegClass == AMDGPU::VReg_64RegClassID && 818 !AMDGPU::isLegal64BitDPPControl(Imm)) { 819 O << " /* 64 bit dpp only supports row_newbcast */"; 820 return; 821 } else if (Imm <= DppCtrl::QUAD_PERM_LAST) { 822 O << "quad_perm:["; 823 O << formatDec(Imm & 0x3) << ','; 824 O << formatDec((Imm & 0xc) >> 2) << ','; 825 O << formatDec((Imm & 0x30) >> 4) << ','; 826 O << formatDec((Imm & 0xc0) >> 6) << ']'; 827 } else if ((Imm >= DppCtrl::ROW_SHL_FIRST) && 828 (Imm <= DppCtrl::ROW_SHL_LAST)) { 829 O << "row_shl:"; 830 printU4ImmDecOperand(MI, OpNo, O); 831 } else if ((Imm >= DppCtrl::ROW_SHR_FIRST) && 832 (Imm <= DppCtrl::ROW_SHR_LAST)) { 833 O << "row_shr:"; 834 printU4ImmDecOperand(MI, OpNo, O); 835 } else if ((Imm >= DppCtrl::ROW_ROR_FIRST) && 836 (Imm <= DppCtrl::ROW_ROR_LAST)) { 837 O << "row_ror:"; 838 printU4ImmDecOperand(MI, OpNo, O); 839 } else if (Imm == DppCtrl::WAVE_SHL1) { 840 if (AMDGPU::isGFX10Plus(STI)) { 841 O << "/* wave_shl is not supported starting from GFX10 */"; 842 return; 843 } 844 O << "wave_shl:1"; 845 } else if (Imm == DppCtrl::WAVE_ROL1) { 846 if (AMDGPU::isGFX10Plus(STI)) { 847 O << "/* wave_rol is not supported starting from GFX10 */"; 848 return; 849 } 850 O << "wave_rol:1"; 851 } else if (Imm == DppCtrl::WAVE_SHR1) { 852 if (AMDGPU::isGFX10Plus(STI)) { 853 O << "/* wave_shr is not supported starting from GFX10 */"; 854 return; 855 } 856 O << "wave_shr:1"; 857 } else if (Imm == DppCtrl::WAVE_ROR1) { 858 if (AMDGPU::isGFX10Plus(STI)) { 859 O << "/* wave_ror is not supported starting from GFX10 */"; 860 return; 861 } 862 O << "wave_ror:1"; 863 } else if (Imm == DppCtrl::ROW_MIRROR) { 864 O << "row_mirror"; 865 } else if (Imm == DppCtrl::ROW_HALF_MIRROR) { 866 O << "row_half_mirror"; 867 } else if (Imm == DppCtrl::BCAST15) { 868 if (AMDGPU::isGFX10Plus(STI)) { 869 O << "/* row_bcast is not supported starting from GFX10 */"; 870 return; 871 } 872 O << "row_bcast:15"; 873 } else if (Imm == DppCtrl::BCAST31) { 874 if (AMDGPU::isGFX10Plus(STI)) { 875 O << "/* row_bcast is not supported starting from GFX10 */"; 876 return; 877 } 878 O << "row_bcast:31"; 879 } else if ((Imm >= DppCtrl::ROW_SHARE_FIRST) && 880 (Imm <= DppCtrl::ROW_SHARE_LAST)) { 881 if (AMDGPU::isGFX90A(STI)) { 882 O << "row_newbcast:"; 883 } else if (AMDGPU::isGFX10Plus(STI)) { 884 O << "row_share:"; 885 } else { 886 O << " /* row_newbcast/row_share is not supported on ASICs earlier " 887 "than GFX90A/GFX10 */"; 888 return; 889 } 890 printU4ImmDecOperand(MI, OpNo, O); 891 } else if ((Imm >= DppCtrl::ROW_XMASK_FIRST) && 892 (Imm <= DppCtrl::ROW_XMASK_LAST)) { 893 if (!AMDGPU::isGFX10Plus(STI)) { 894 O << "/* row_xmask is not supported on ASICs earlier than GFX10 */"; 895 return; 896 } 897 O << "row_xmask:"; 898 printU4ImmDecOperand(MI, OpNo, O); 899 } else { 900 O << "/* Invalid dpp_ctrl value */"; 901 } 902 } 903 904 void AMDGPUInstPrinter::printRowMask(const MCInst *MI, unsigned OpNo, 905 const MCSubtargetInfo &STI, 906 raw_ostream &O) { 907 O << " row_mask:"; 908 printU4ImmOperand(MI, OpNo, STI, O); 909 } 910 911 void AMDGPUInstPrinter::printBankMask(const MCInst *MI, unsigned OpNo, 912 const MCSubtargetInfo &STI, 913 raw_ostream &O) { 914 O << " bank_mask:"; 915 printU4ImmOperand(MI, OpNo, STI, O); 916 } 917 918 void AMDGPUInstPrinter::printBoundCtrl(const MCInst *MI, unsigned OpNo, 919 const MCSubtargetInfo &STI, 920 raw_ostream &O) { 921 unsigned Imm = MI->getOperand(OpNo).getImm(); 922 if (Imm) { 923 O << " bound_ctrl:1"; 924 } 925 } 926 927 void AMDGPUInstPrinter::printFI(const MCInst *MI, unsigned OpNo, 928 const MCSubtargetInfo &STI, 929 raw_ostream &O) { 930 using namespace llvm::AMDGPU::DPP; 931 unsigned Imm = MI->getOperand(OpNo).getImm(); 932 if (Imm == DPP_FI_1 || Imm == DPP8_FI_1) { 933 O << " fi:1"; 934 } 935 } 936 937 void AMDGPUInstPrinter::printSDWASel(const MCInst *MI, unsigned OpNo, 938 raw_ostream &O) { 939 using namespace llvm::AMDGPU::SDWA; 940 941 unsigned Imm = MI->getOperand(OpNo).getImm(); 942 switch (Imm) { 943 case SdwaSel::BYTE_0: O << "BYTE_0"; break; 944 case SdwaSel::BYTE_1: O << "BYTE_1"; break; 945 case SdwaSel::BYTE_2: O << "BYTE_2"; break; 946 case SdwaSel::BYTE_3: O << "BYTE_3"; break; 947 case SdwaSel::WORD_0: O << "WORD_0"; break; 948 case SdwaSel::WORD_1: O << "WORD_1"; break; 949 case SdwaSel::DWORD: O << "DWORD"; break; 950 default: llvm_unreachable("Invalid SDWA data select operand"); 951 } 952 } 953 954 void AMDGPUInstPrinter::printSDWADstSel(const MCInst *MI, unsigned OpNo, 955 const MCSubtargetInfo &STI, 956 raw_ostream &O) { 957 O << "dst_sel:"; 958 printSDWASel(MI, OpNo, O); 959 } 960 961 void AMDGPUInstPrinter::printSDWASrc0Sel(const MCInst *MI, unsigned OpNo, 962 const MCSubtargetInfo &STI, 963 raw_ostream &O) { 964 O << "src0_sel:"; 965 printSDWASel(MI, OpNo, O); 966 } 967 968 void AMDGPUInstPrinter::printSDWASrc1Sel(const MCInst *MI, unsigned OpNo, 969 const MCSubtargetInfo &STI, 970 raw_ostream &O) { 971 O << "src1_sel:"; 972 printSDWASel(MI, OpNo, O); 973 } 974 975 void AMDGPUInstPrinter::printSDWADstUnused(const MCInst *MI, unsigned OpNo, 976 const MCSubtargetInfo &STI, 977 raw_ostream &O) { 978 using namespace llvm::AMDGPU::SDWA; 979 980 O << "dst_unused:"; 981 unsigned Imm = MI->getOperand(OpNo).getImm(); 982 switch (Imm) { 983 case DstUnused::UNUSED_PAD: O << "UNUSED_PAD"; break; 984 case DstUnused::UNUSED_SEXT: O << "UNUSED_SEXT"; break; 985 case DstUnused::UNUSED_PRESERVE: O << "UNUSED_PRESERVE"; break; 986 default: llvm_unreachable("Invalid SDWA dest_unused operand"); 987 } 988 } 989 990 void AMDGPUInstPrinter::printExpSrcN(const MCInst *MI, unsigned OpNo, 991 const MCSubtargetInfo &STI, raw_ostream &O, 992 unsigned N) { 993 unsigned Opc = MI->getOpcode(); 994 int EnIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::en); 995 unsigned En = MI->getOperand(EnIdx).getImm(); 996 997 int ComprIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::compr); 998 999 // If compr is set, print as src0, src0, src1, src1 1000 if (MI->getOperand(ComprIdx).getImm()) 1001 OpNo = OpNo - N + N / 2; 1002 1003 if (En & (1 << N)) 1004 printRegOperand(MI->getOperand(OpNo).getReg(), O, MRI); 1005 else 1006 O << "off"; 1007 } 1008 1009 void AMDGPUInstPrinter::printExpSrc0(const MCInst *MI, unsigned OpNo, 1010 const MCSubtargetInfo &STI, 1011 raw_ostream &O) { 1012 printExpSrcN(MI, OpNo, STI, O, 0); 1013 } 1014 1015 void AMDGPUInstPrinter::printExpSrc1(const MCInst *MI, unsigned OpNo, 1016 const MCSubtargetInfo &STI, 1017 raw_ostream &O) { 1018 printExpSrcN(MI, OpNo, STI, O, 1); 1019 } 1020 1021 void AMDGPUInstPrinter::printExpSrc2(const MCInst *MI, unsigned OpNo, 1022 const MCSubtargetInfo &STI, 1023 raw_ostream &O) { 1024 printExpSrcN(MI, OpNo, STI, O, 2); 1025 } 1026 1027 void AMDGPUInstPrinter::printExpSrc3(const MCInst *MI, unsigned OpNo, 1028 const MCSubtargetInfo &STI, 1029 raw_ostream &O) { 1030 printExpSrcN(MI, OpNo, STI, O, 3); 1031 } 1032 1033 void AMDGPUInstPrinter::printExpTgt(const MCInst *MI, unsigned OpNo, 1034 const MCSubtargetInfo &STI, 1035 raw_ostream &O) { 1036 using namespace llvm::AMDGPU::Exp; 1037 1038 // This is really a 6 bit field. 1039 unsigned Id = MI->getOperand(OpNo).getImm() & ((1 << 6) - 1); 1040 1041 int Index; 1042 StringRef TgtName; 1043 if (getTgtName(Id, TgtName, Index) && isSupportedTgtId(Id, STI)) { 1044 O << ' ' << TgtName; 1045 if (Index >= 0) 1046 O << Index; 1047 } else { 1048 O << " invalid_target_" << Id; 1049 } 1050 } 1051 1052 static bool allOpsDefaultValue(const int* Ops, int NumOps, int Mod, 1053 bool IsPacked, bool HasDstSel) { 1054 int DefaultValue = IsPacked && (Mod == SISrcMods::OP_SEL_1); 1055 1056 for (int I = 0; I < NumOps; ++I) { 1057 if (!!(Ops[I] & Mod) != DefaultValue) 1058 return false; 1059 } 1060 1061 if (HasDstSel && (Ops[0] & SISrcMods::DST_OP_SEL) != 0) 1062 return false; 1063 1064 return true; 1065 } 1066 1067 void AMDGPUInstPrinter::printPackedModifier(const MCInst *MI, 1068 StringRef Name, 1069 unsigned Mod, 1070 raw_ostream &O) { 1071 unsigned Opc = MI->getOpcode(); 1072 int NumOps = 0; 1073 int Ops[3]; 1074 1075 for (int OpName : { AMDGPU::OpName::src0_modifiers, 1076 AMDGPU::OpName::src1_modifiers, 1077 AMDGPU::OpName::src2_modifiers }) { 1078 int Idx = AMDGPU::getNamedOperandIdx(Opc, OpName); 1079 if (Idx == -1) 1080 break; 1081 1082 Ops[NumOps++] = MI->getOperand(Idx).getImm(); 1083 } 1084 1085 const bool HasDstSel = 1086 NumOps > 0 && 1087 Mod == SISrcMods::OP_SEL_0 && 1088 MII.get(MI->getOpcode()).TSFlags & SIInstrFlags::VOP3_OPSEL; 1089 1090 const bool IsPacked = 1091 MII.get(MI->getOpcode()).TSFlags & SIInstrFlags::IsPacked; 1092 1093 if (allOpsDefaultValue(Ops, NumOps, Mod, IsPacked, HasDstSel)) 1094 return; 1095 1096 O << Name; 1097 for (int I = 0; I < NumOps; ++I) { 1098 if (I != 0) 1099 O << ','; 1100 1101 O << !!(Ops[I] & Mod); 1102 } 1103 1104 if (HasDstSel) { 1105 O << ',' << !!(Ops[0] & SISrcMods::DST_OP_SEL); 1106 } 1107 1108 O << ']'; 1109 } 1110 1111 void AMDGPUInstPrinter::printOpSel(const MCInst *MI, unsigned, 1112 const MCSubtargetInfo &STI, 1113 raw_ostream &O) { 1114 unsigned Opc = MI->getOpcode(); 1115 if (Opc == AMDGPU::V_PERMLANE16_B32_gfx10 || 1116 Opc == AMDGPU::V_PERMLANEX16_B32_gfx10) { 1117 auto FIN = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers); 1118 auto BCN = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1_modifiers); 1119 unsigned FI = !!(MI->getOperand(FIN).getImm() & SISrcMods::OP_SEL_0); 1120 unsigned BC = !!(MI->getOperand(BCN).getImm() & SISrcMods::OP_SEL_0); 1121 if (FI || BC) 1122 O << " op_sel:[" << FI << ',' << BC << ']'; 1123 return; 1124 } 1125 1126 printPackedModifier(MI, " op_sel:[", SISrcMods::OP_SEL_0, O); 1127 } 1128 1129 void AMDGPUInstPrinter::printOpSelHi(const MCInst *MI, unsigned OpNo, 1130 const MCSubtargetInfo &STI, 1131 raw_ostream &O) { 1132 printPackedModifier(MI, " op_sel_hi:[", SISrcMods::OP_SEL_1, O); 1133 } 1134 1135 void AMDGPUInstPrinter::printNegLo(const MCInst *MI, unsigned OpNo, 1136 const MCSubtargetInfo &STI, 1137 raw_ostream &O) { 1138 printPackedModifier(MI, " neg_lo:[", SISrcMods::NEG, O); 1139 } 1140 1141 void AMDGPUInstPrinter::printNegHi(const MCInst *MI, unsigned OpNo, 1142 const MCSubtargetInfo &STI, 1143 raw_ostream &O) { 1144 printPackedModifier(MI, " neg_hi:[", SISrcMods::NEG_HI, O); 1145 } 1146 1147 void AMDGPUInstPrinter::printInterpSlot(const MCInst *MI, unsigned OpNum, 1148 const MCSubtargetInfo &STI, 1149 raw_ostream &O) { 1150 unsigned Imm = MI->getOperand(OpNum).getImm(); 1151 switch (Imm) { 1152 case 0: 1153 O << "p10"; 1154 break; 1155 case 1: 1156 O << "p20"; 1157 break; 1158 case 2: 1159 O << "p0"; 1160 break; 1161 default: 1162 O << "invalid_param_" << Imm; 1163 } 1164 } 1165 1166 void AMDGPUInstPrinter::printInterpAttr(const MCInst *MI, unsigned OpNum, 1167 const MCSubtargetInfo &STI, 1168 raw_ostream &O) { 1169 unsigned Attr = MI->getOperand(OpNum).getImm(); 1170 O << "attr" << Attr; 1171 } 1172 1173 void AMDGPUInstPrinter::printInterpAttrChan(const MCInst *MI, unsigned OpNum, 1174 const MCSubtargetInfo &STI, 1175 raw_ostream &O) { 1176 unsigned Chan = MI->getOperand(OpNum).getImm(); 1177 O << '.' << "xyzw"[Chan & 0x3]; 1178 } 1179 1180 void AMDGPUInstPrinter::printVGPRIndexMode(const MCInst *MI, unsigned OpNo, 1181 const MCSubtargetInfo &STI, 1182 raw_ostream &O) { 1183 using namespace llvm::AMDGPU::VGPRIndexMode; 1184 unsigned Val = MI->getOperand(OpNo).getImm(); 1185 1186 if ((Val & ~ENABLE_MASK) != 0) { 1187 O << formatHex(static_cast<uint64_t>(Val)); 1188 } else { 1189 O << "gpr_idx("; 1190 bool NeedComma = false; 1191 for (unsigned ModeId = ID_MIN; ModeId <= ID_MAX; ++ModeId) { 1192 if (Val & (1 << ModeId)) { 1193 if (NeedComma) 1194 O << ','; 1195 O << IdSymbolic[ModeId]; 1196 NeedComma = true; 1197 } 1198 } 1199 O << ')'; 1200 } 1201 } 1202 1203 void AMDGPUInstPrinter::printMemOperand(const MCInst *MI, unsigned OpNo, 1204 const MCSubtargetInfo &STI, 1205 raw_ostream &O) { 1206 printOperand(MI, OpNo, STI, O); 1207 O << ", "; 1208 printOperand(MI, OpNo + 1, STI, O); 1209 } 1210 1211 void AMDGPUInstPrinter::printIfSet(const MCInst *MI, unsigned OpNo, 1212 raw_ostream &O, StringRef Asm, 1213 StringRef Default) { 1214 const MCOperand &Op = MI->getOperand(OpNo); 1215 assert(Op.isImm()); 1216 if (Op.getImm() == 1) { 1217 O << Asm; 1218 } else { 1219 O << Default; 1220 } 1221 } 1222 1223 void AMDGPUInstPrinter::printIfSet(const MCInst *MI, unsigned OpNo, 1224 raw_ostream &O, char Asm) { 1225 const MCOperand &Op = MI->getOperand(OpNo); 1226 assert(Op.isImm()); 1227 if (Op.getImm() == 1) 1228 O << Asm; 1229 } 1230 1231 void AMDGPUInstPrinter::printHigh(const MCInst *MI, unsigned OpNo, 1232 const MCSubtargetInfo &STI, 1233 raw_ostream &O) { 1234 printNamedBit(MI, OpNo, O, "high"); 1235 } 1236 1237 void AMDGPUInstPrinter::printClampSI(const MCInst *MI, unsigned OpNo, 1238 const MCSubtargetInfo &STI, 1239 raw_ostream &O) { 1240 printNamedBit(MI, OpNo, O, "clamp"); 1241 } 1242 1243 void AMDGPUInstPrinter::printOModSI(const MCInst *MI, unsigned OpNo, 1244 const MCSubtargetInfo &STI, 1245 raw_ostream &O) { 1246 int Imm = MI->getOperand(OpNo).getImm(); 1247 if (Imm == SIOutMods::MUL2) 1248 O << " mul:2"; 1249 else if (Imm == SIOutMods::MUL4) 1250 O << " mul:4"; 1251 else if (Imm == SIOutMods::DIV2) 1252 O << " div:2"; 1253 } 1254 1255 void AMDGPUInstPrinter::printSendMsg(const MCInst *MI, unsigned OpNo, 1256 const MCSubtargetInfo &STI, 1257 raw_ostream &O) { 1258 using namespace llvm::AMDGPU::SendMsg; 1259 1260 const unsigned Imm16 = MI->getOperand(OpNo).getImm(); 1261 1262 uint16_t MsgId; 1263 uint16_t OpId; 1264 uint16_t StreamId; 1265 decodeMsg(Imm16, MsgId, OpId, StreamId); 1266 1267 if (isValidMsgId(MsgId, STI) && 1268 isValidMsgOp(MsgId, OpId, STI) && 1269 isValidMsgStream(MsgId, OpId, StreamId, STI)) { 1270 O << "sendmsg(" << getMsgName(MsgId); 1271 if (msgRequiresOp(MsgId)) { 1272 O << ", " << getMsgOpName(MsgId, OpId); 1273 if (msgSupportsStream(MsgId, OpId)) { 1274 O << ", " << StreamId; 1275 } 1276 } 1277 O << ')'; 1278 } else if (encodeMsg(MsgId, OpId, StreamId) == Imm16) { 1279 O << "sendmsg(" << MsgId << ", " << OpId << ", " << StreamId << ')'; 1280 } else { 1281 O << Imm16; // Unknown imm16 code. 1282 } 1283 } 1284 1285 static void printSwizzleBitmask(const uint16_t AndMask, 1286 const uint16_t OrMask, 1287 const uint16_t XorMask, 1288 raw_ostream &O) { 1289 using namespace llvm::AMDGPU::Swizzle; 1290 1291 uint16_t Probe0 = ((0 & AndMask) | OrMask) ^ XorMask; 1292 uint16_t Probe1 = ((BITMASK_MASK & AndMask) | OrMask) ^ XorMask; 1293 1294 O << "\""; 1295 1296 for (unsigned Mask = 1 << (BITMASK_WIDTH - 1); Mask > 0; Mask >>= 1) { 1297 uint16_t p0 = Probe0 & Mask; 1298 uint16_t p1 = Probe1 & Mask; 1299 1300 if (p0 == p1) { 1301 if (p0 == 0) { 1302 O << "0"; 1303 } else { 1304 O << "1"; 1305 } 1306 } else { 1307 if (p0 == 0) { 1308 O << "p"; 1309 } else { 1310 O << "i"; 1311 } 1312 } 1313 } 1314 1315 O << "\""; 1316 } 1317 1318 void AMDGPUInstPrinter::printSwizzle(const MCInst *MI, unsigned OpNo, 1319 const MCSubtargetInfo &STI, 1320 raw_ostream &O) { 1321 using namespace llvm::AMDGPU::Swizzle; 1322 1323 uint16_t Imm = MI->getOperand(OpNo).getImm(); 1324 if (Imm == 0) { 1325 return; 1326 } 1327 1328 O << " offset:"; 1329 1330 if ((Imm & QUAD_PERM_ENC_MASK) == QUAD_PERM_ENC) { 1331 1332 O << "swizzle(" << IdSymbolic[ID_QUAD_PERM]; 1333 for (unsigned I = 0; I < LANE_NUM; ++I) { 1334 O << ","; 1335 O << formatDec(Imm & LANE_MASK); 1336 Imm >>= LANE_SHIFT; 1337 } 1338 O << ")"; 1339 1340 } else if ((Imm & BITMASK_PERM_ENC_MASK) == BITMASK_PERM_ENC) { 1341 1342 uint16_t AndMask = (Imm >> BITMASK_AND_SHIFT) & BITMASK_MASK; 1343 uint16_t OrMask = (Imm >> BITMASK_OR_SHIFT) & BITMASK_MASK; 1344 uint16_t XorMask = (Imm >> BITMASK_XOR_SHIFT) & BITMASK_MASK; 1345 1346 if (AndMask == BITMASK_MAX && 1347 OrMask == 0 && 1348 countPopulation(XorMask) == 1) { 1349 1350 O << "swizzle(" << IdSymbolic[ID_SWAP]; 1351 O << ","; 1352 O << formatDec(XorMask); 1353 O << ")"; 1354 1355 } else if (AndMask == BITMASK_MAX && 1356 OrMask == 0 && XorMask > 0 && 1357 isPowerOf2_64(XorMask + 1)) { 1358 1359 O << "swizzle(" << IdSymbolic[ID_REVERSE]; 1360 O << ","; 1361 O << formatDec(XorMask + 1); 1362 O << ")"; 1363 1364 } else { 1365 1366 uint16_t GroupSize = BITMASK_MAX - AndMask + 1; 1367 if (GroupSize > 1 && 1368 isPowerOf2_64(GroupSize) && 1369 OrMask < GroupSize && 1370 XorMask == 0) { 1371 1372 O << "swizzle(" << IdSymbolic[ID_BROADCAST]; 1373 O << ","; 1374 O << formatDec(GroupSize); 1375 O << ","; 1376 O << formatDec(OrMask); 1377 O << ")"; 1378 1379 } else { 1380 O << "swizzle(" << IdSymbolic[ID_BITMASK_PERM]; 1381 O << ","; 1382 printSwizzleBitmask(AndMask, OrMask, XorMask, O); 1383 O << ")"; 1384 } 1385 } 1386 } else { 1387 printU16ImmDecOperand(MI, OpNo, O); 1388 } 1389 } 1390 1391 void AMDGPUInstPrinter::printWaitFlag(const MCInst *MI, unsigned OpNo, 1392 const MCSubtargetInfo &STI, 1393 raw_ostream &O) { 1394 AMDGPU::IsaVersion ISA = AMDGPU::getIsaVersion(STI.getCPU()); 1395 1396 unsigned SImm16 = MI->getOperand(OpNo).getImm(); 1397 unsigned Vmcnt, Expcnt, Lgkmcnt; 1398 decodeWaitcnt(ISA, SImm16, Vmcnt, Expcnt, Lgkmcnt); 1399 1400 bool IsDefaultVmcnt = Vmcnt == getVmcntBitMask(ISA); 1401 bool IsDefaultExpcnt = Expcnt == getExpcntBitMask(ISA); 1402 bool IsDefaultLgkmcnt = Lgkmcnt == getLgkmcntBitMask(ISA); 1403 bool PrintAll = IsDefaultVmcnt && IsDefaultExpcnt && IsDefaultLgkmcnt; 1404 1405 bool NeedSpace = false; 1406 1407 if (!IsDefaultVmcnt || PrintAll) { 1408 O << "vmcnt(" << Vmcnt << ')'; 1409 NeedSpace = true; 1410 } 1411 1412 if (!IsDefaultExpcnt || PrintAll) { 1413 if (NeedSpace) 1414 O << ' '; 1415 O << "expcnt(" << Expcnt << ')'; 1416 NeedSpace = true; 1417 } 1418 1419 if (!IsDefaultLgkmcnt || PrintAll) { 1420 if (NeedSpace) 1421 O << ' '; 1422 O << "lgkmcnt(" << Lgkmcnt << ')'; 1423 } 1424 } 1425 1426 void AMDGPUInstPrinter::printHwreg(const MCInst *MI, unsigned OpNo, 1427 const MCSubtargetInfo &STI, raw_ostream &O) { 1428 unsigned Id; 1429 unsigned Offset; 1430 unsigned Width; 1431 1432 using namespace llvm::AMDGPU::Hwreg; 1433 unsigned Val = MI->getOperand(OpNo).getImm(); 1434 decodeHwreg(Val, Id, Offset, Width); 1435 StringRef HwRegName = getHwreg(Id, STI); 1436 1437 O << "hwreg("; 1438 if (!HwRegName.empty()) { 1439 O << HwRegName; 1440 } else { 1441 O << Id; 1442 } 1443 if (Width != WIDTH_DEFAULT_ || Offset != OFFSET_DEFAULT_) { 1444 O << ", " << Offset << ", " << Width; 1445 } 1446 O << ')'; 1447 } 1448 1449 void AMDGPUInstPrinter::printEndpgm(const MCInst *MI, unsigned OpNo, 1450 const MCSubtargetInfo &STI, 1451 raw_ostream &O) { 1452 uint16_t Imm = MI->getOperand(OpNo).getImm(); 1453 if (Imm == 0) { 1454 return; 1455 } 1456 1457 O << ' ' << formatDec(Imm); 1458 } 1459 1460 #include "AMDGPUGenAsmWriter.inc" 1461