1 //=- AArch64/AArch64MCCodeEmitter.cpp - Convert AArch64 code to machine code-=// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the AArch64MCCodeEmitter class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "MCTargetDesc/AArch64AddressingModes.h" 14 #include "MCTargetDesc/AArch64FixupKinds.h" 15 #include "MCTargetDesc/AArch64MCExpr.h" 16 #include "Utils/AArch64BaseInfo.h" 17 #include "llvm/ADT/SmallVector.h" 18 #include "llvm/ADT/Statistic.h" 19 #include "llvm/BinaryFormat/ELF.h" 20 #include "llvm/MC/MCCodeEmitter.h" 21 #include "llvm/MC/MCContext.h" 22 #include "llvm/MC/MCFixup.h" 23 #include "llvm/MC/MCInst.h" 24 #include "llvm/MC/MCInstrInfo.h" 25 #include "llvm/MC/MCRegisterInfo.h" 26 #include "llvm/MC/MCSubtargetInfo.h" 27 #include "llvm/Support/Casting.h" 28 #include "llvm/Support/Endian.h" 29 #include "llvm/Support/EndianStream.h" 30 #include "llvm/Support/ErrorHandling.h" 31 #include "llvm/Support/raw_ostream.h" 32 #include <cassert> 33 #include <cstdint> 34 35 using namespace llvm; 36 37 #define DEBUG_TYPE "mccodeemitter" 38 39 STATISTIC(MCNumEmitted, "Number of MC instructions emitted."); 40 STATISTIC(MCNumFixups, "Number of MC fixups created."); 41 42 namespace { 43 44 class AArch64MCCodeEmitter : public MCCodeEmitter { 45 MCContext &Ctx; 46 47 public: 48 AArch64MCCodeEmitter(const MCInstrInfo &, MCContext &ctx) : Ctx(ctx) {} 49 AArch64MCCodeEmitter(const AArch64MCCodeEmitter &) = delete; 50 void operator=(const AArch64MCCodeEmitter &) = delete; 51 ~AArch64MCCodeEmitter() override = default; 52 53 // getBinaryCodeForInstr - TableGen'erated function for getting the 54 // binary encoding for an instruction. 55 uint64_t getBinaryCodeForInstr(const MCInst &MI, 56 SmallVectorImpl<MCFixup> &Fixups, 57 const MCSubtargetInfo &STI) const; 58 59 /// getMachineOpValue - Return binary encoding of operand. If the machine 60 /// operand requires relocation, record the relocation and return zero. 61 unsigned getMachineOpValue(const MCInst &MI, const MCOperand &MO, 62 SmallVectorImpl<MCFixup> &Fixups, 63 const MCSubtargetInfo &STI) const; 64 65 /// getLdStUImm12OpValue - Return encoding info for 12-bit unsigned immediate 66 /// attached to a load, store or prfm instruction. If operand requires a 67 /// relocation, record it and return zero in that part of the encoding. 68 template <uint32_t FixupKind> 69 uint32_t getLdStUImm12OpValue(const MCInst &MI, unsigned OpIdx, 70 SmallVectorImpl<MCFixup> &Fixups, 71 const MCSubtargetInfo &STI) const; 72 73 /// getAdrLabelOpValue - Return encoding info for 21-bit immediate ADR label 74 /// target. 75 uint32_t getAdrLabelOpValue(const MCInst &MI, unsigned OpIdx, 76 SmallVectorImpl<MCFixup> &Fixups, 77 const MCSubtargetInfo &STI) const; 78 79 /// getAddSubImmOpValue - Return encoding for the 12-bit immediate value and 80 /// the 2-bit shift field. 81 uint32_t getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx, 82 SmallVectorImpl<MCFixup> &Fixups, 83 const MCSubtargetInfo &STI) const; 84 85 /// getCondBranchTargetOpValue - Return the encoded value for a conditional 86 /// branch target. 87 uint32_t getCondBranchTargetOpValue(const MCInst &MI, unsigned OpIdx, 88 SmallVectorImpl<MCFixup> &Fixups, 89 const MCSubtargetInfo &STI) const; 90 91 /// getLoadLiteralOpValue - Return the encoded value for a load-literal 92 /// pc-relative address. 93 uint32_t getLoadLiteralOpValue(const MCInst &MI, unsigned OpIdx, 94 SmallVectorImpl<MCFixup> &Fixups, 95 const MCSubtargetInfo &STI) const; 96 97 /// getMemExtendOpValue - Return the encoded value for a reg-extend load/store 98 /// instruction: bit 0 is whether a shift is present, bit 1 is whether the 99 /// operation is a sign extend (as opposed to a zero extend). 100 uint32_t getMemExtendOpValue(const MCInst &MI, unsigned OpIdx, 101 SmallVectorImpl<MCFixup> &Fixups, 102 const MCSubtargetInfo &STI) const; 103 104 /// getTestBranchTargetOpValue - Return the encoded value for a test-bit-and- 105 /// branch target. 106 uint32_t getTestBranchTargetOpValue(const MCInst &MI, unsigned OpIdx, 107 SmallVectorImpl<MCFixup> &Fixups, 108 const MCSubtargetInfo &STI) const; 109 110 /// getBranchTargetOpValue - Return the encoded value for an unconditional 111 /// branch target. 112 uint32_t getBranchTargetOpValue(const MCInst &MI, unsigned OpIdx, 113 SmallVectorImpl<MCFixup> &Fixups, 114 const MCSubtargetInfo &STI) const; 115 116 /// getMoveWideImmOpValue - Return the encoded value for the immediate operand 117 /// of a MOVZ or MOVK instruction. 118 uint32_t getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx, 119 SmallVectorImpl<MCFixup> &Fixups, 120 const MCSubtargetInfo &STI) const; 121 122 /// getVecShifterOpValue - Return the encoded value for the vector shifter. 123 uint32_t getVecShifterOpValue(const MCInst &MI, unsigned OpIdx, 124 SmallVectorImpl<MCFixup> &Fixups, 125 const MCSubtargetInfo &STI) const; 126 127 /// getMoveVecShifterOpValue - Return the encoded value for the vector move 128 /// shifter (MSL). 129 uint32_t getMoveVecShifterOpValue(const MCInst &MI, unsigned OpIdx, 130 SmallVectorImpl<MCFixup> &Fixups, 131 const MCSubtargetInfo &STI) const; 132 133 /// getFixedPointScaleOpValue - Return the encoded value for the 134 // FP-to-fixed-point scale factor. 135 uint32_t getFixedPointScaleOpValue(const MCInst &MI, unsigned OpIdx, 136 SmallVectorImpl<MCFixup> &Fixups, 137 const MCSubtargetInfo &STI) const; 138 139 uint32_t getVecShiftR64OpValue(const MCInst &MI, unsigned OpIdx, 140 SmallVectorImpl<MCFixup> &Fixups, 141 const MCSubtargetInfo &STI) const; 142 uint32_t getVecShiftR32OpValue(const MCInst &MI, unsigned OpIdx, 143 SmallVectorImpl<MCFixup> &Fixups, 144 const MCSubtargetInfo &STI) const; 145 uint32_t getVecShiftR16OpValue(const MCInst &MI, unsigned OpIdx, 146 SmallVectorImpl<MCFixup> &Fixups, 147 const MCSubtargetInfo &STI) const; 148 uint32_t getVecShiftR8OpValue(const MCInst &MI, unsigned OpIdx, 149 SmallVectorImpl<MCFixup> &Fixups, 150 const MCSubtargetInfo &STI) const; 151 uint32_t getVecShiftL64OpValue(const MCInst &MI, unsigned OpIdx, 152 SmallVectorImpl<MCFixup> &Fixups, 153 const MCSubtargetInfo &STI) const; 154 uint32_t getVecShiftL32OpValue(const MCInst &MI, unsigned OpIdx, 155 SmallVectorImpl<MCFixup> &Fixups, 156 const MCSubtargetInfo &STI) const; 157 uint32_t getVecShiftL16OpValue(const MCInst &MI, unsigned OpIdx, 158 SmallVectorImpl<MCFixup> &Fixups, 159 const MCSubtargetInfo &STI) const; 160 uint32_t getVecShiftL8OpValue(const MCInst &MI, unsigned OpIdx, 161 SmallVectorImpl<MCFixup> &Fixups, 162 const MCSubtargetInfo &STI) const; 163 164 uint32_t getImm8OptLsl(const MCInst &MI, unsigned OpIdx, 165 SmallVectorImpl<MCFixup> &Fixups, 166 const MCSubtargetInfo &STI) const; 167 uint32_t getSVEIncDecImm(const MCInst &MI, unsigned OpIdx, 168 SmallVectorImpl<MCFixup> &Fixups, 169 const MCSubtargetInfo &STI) const; 170 171 unsigned fixMOVZ(const MCInst &MI, unsigned EncodedValue, 172 const MCSubtargetInfo &STI) const; 173 174 void encodeInstruction(const MCInst &MI, raw_ostream &OS, 175 SmallVectorImpl<MCFixup> &Fixups, 176 const MCSubtargetInfo &STI) const override; 177 178 unsigned fixMulHigh(const MCInst &MI, unsigned EncodedValue, 179 const MCSubtargetInfo &STI) const; 180 181 template<int hasRs, int hasRt2> unsigned 182 fixLoadStoreExclusive(const MCInst &MI, unsigned EncodedValue, 183 const MCSubtargetInfo &STI) const; 184 185 unsigned fixOneOperandFPComparison(const MCInst &MI, unsigned EncodedValue, 186 const MCSubtargetInfo &STI) const; 187 188 uint32_t EncodeMatrixTileListRegisterClass(const MCInst &MI, unsigned OpIdx, 189 SmallVectorImpl<MCFixup> &Fixups, 190 const MCSubtargetInfo &STI) const; 191 uint32_t encodeMatrixIndexGPR32(const MCInst &MI, unsigned OpIdx, 192 SmallVectorImpl<MCFixup> &Fixups, 193 const MCSubtargetInfo &STI) const; 194 }; 195 196 } // end anonymous namespace 197 198 /// getMachineOpValue - Return binary encoding of operand. If the machine 199 /// operand requires relocation, record the relocation and return zero. 200 unsigned 201 AArch64MCCodeEmitter::getMachineOpValue(const MCInst &MI, const MCOperand &MO, 202 SmallVectorImpl<MCFixup> &Fixups, 203 const MCSubtargetInfo &STI) const { 204 if (MO.isReg()) 205 return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg()); 206 207 assert(MO.isImm() && "did not expect relocated expression"); 208 return static_cast<unsigned>(MO.getImm()); 209 } 210 211 template<unsigned FixupKind> uint32_t 212 AArch64MCCodeEmitter::getLdStUImm12OpValue(const MCInst &MI, unsigned OpIdx, 213 SmallVectorImpl<MCFixup> &Fixups, 214 const MCSubtargetInfo &STI) const { 215 const MCOperand &MO = MI.getOperand(OpIdx); 216 uint32_t ImmVal = 0; 217 218 if (MO.isImm()) 219 ImmVal = static_cast<uint32_t>(MO.getImm()); 220 else { 221 assert(MO.isExpr() && "unable to encode load/store imm operand"); 222 MCFixupKind Kind = MCFixupKind(FixupKind); 223 Fixups.push_back(MCFixup::create(0, MO.getExpr(), Kind, MI.getLoc())); 224 ++MCNumFixups; 225 } 226 227 return ImmVal; 228 } 229 230 /// getAdrLabelOpValue - Return encoding info for 21-bit immediate ADR label 231 /// target. 232 uint32_t 233 AArch64MCCodeEmitter::getAdrLabelOpValue(const MCInst &MI, unsigned OpIdx, 234 SmallVectorImpl<MCFixup> &Fixups, 235 const MCSubtargetInfo &STI) const { 236 const MCOperand &MO = MI.getOperand(OpIdx); 237 238 // If the destination is an immediate, we have nothing to do. 239 if (MO.isImm()) 240 return MO.getImm(); 241 assert(MO.isExpr() && "Unexpected target type!"); 242 const MCExpr *Expr = MO.getExpr(); 243 244 MCFixupKind Kind = MI.getOpcode() == AArch64::ADR 245 ? MCFixupKind(AArch64::fixup_aarch64_pcrel_adr_imm21) 246 : MCFixupKind(AArch64::fixup_aarch64_pcrel_adrp_imm21); 247 Fixups.push_back(MCFixup::create(0, Expr, Kind, MI.getLoc())); 248 249 MCNumFixups += 1; 250 251 // All of the information is in the fixup. 252 return 0; 253 } 254 255 /// getAddSubImmOpValue - Return encoding for the 12-bit immediate value and 256 /// the 2-bit shift field. The shift field is stored in bits 13-14 of the 257 /// return value. 258 uint32_t 259 AArch64MCCodeEmitter::getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx, 260 SmallVectorImpl<MCFixup> &Fixups, 261 const MCSubtargetInfo &STI) const { 262 // Suboperands are [imm, shifter]. 263 const MCOperand &MO = MI.getOperand(OpIdx); 264 const MCOperand &MO1 = MI.getOperand(OpIdx + 1); 265 assert(AArch64_AM::getShiftType(MO1.getImm()) == AArch64_AM::LSL && 266 "unexpected shift type for add/sub immediate"); 267 unsigned ShiftVal = AArch64_AM::getShiftValue(MO1.getImm()); 268 assert((ShiftVal == 0 || ShiftVal == 12) && 269 "unexpected shift value for add/sub immediate"); 270 if (MO.isImm()) 271 return MO.getImm() | (ShiftVal == 0 ? 0 : (1 << ShiftVal)); 272 assert(MO.isExpr() && "Unable to encode MCOperand!"); 273 const MCExpr *Expr = MO.getExpr(); 274 275 // Encode the 12 bits of the fixup. 276 MCFixupKind Kind = MCFixupKind(AArch64::fixup_aarch64_add_imm12); 277 Fixups.push_back(MCFixup::create(0, Expr, Kind, MI.getLoc())); 278 279 ++MCNumFixups; 280 281 // Set the shift bit of the add instruction for relocation types 282 // R_AARCH64_TLSLE_ADD_TPREL_HI12 and R_AARCH64_TLSLD_ADD_DTPREL_HI12. 283 if (const AArch64MCExpr *A64E = dyn_cast<AArch64MCExpr>(Expr)) { 284 AArch64MCExpr::VariantKind RefKind = A64E->getKind(); 285 if (RefKind == AArch64MCExpr::VK_TPREL_HI12 || 286 RefKind == AArch64MCExpr::VK_DTPREL_HI12 || 287 RefKind == AArch64MCExpr::VK_SECREL_HI12) 288 ShiftVal = 12; 289 } 290 return ShiftVal == 0 ? 0 : (1 << ShiftVal); 291 } 292 293 /// getCondBranchTargetOpValue - Return the encoded value for a conditional 294 /// branch target. 295 uint32_t AArch64MCCodeEmitter::getCondBranchTargetOpValue( 296 const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups, 297 const MCSubtargetInfo &STI) const { 298 const MCOperand &MO = MI.getOperand(OpIdx); 299 300 // If the destination is an immediate, we have nothing to do. 301 if (MO.isImm()) 302 return MO.getImm(); 303 assert(MO.isExpr() && "Unexpected target type!"); 304 305 MCFixupKind Kind = MCFixupKind(AArch64::fixup_aarch64_pcrel_branch19); 306 Fixups.push_back(MCFixup::create(0, MO.getExpr(), Kind, MI.getLoc())); 307 308 ++MCNumFixups; 309 310 // All of the information is in the fixup. 311 return 0; 312 } 313 314 /// getLoadLiteralOpValue - Return the encoded value for a load-literal 315 /// pc-relative address. 316 uint32_t 317 AArch64MCCodeEmitter::getLoadLiteralOpValue(const MCInst &MI, unsigned OpIdx, 318 SmallVectorImpl<MCFixup> &Fixups, 319 const MCSubtargetInfo &STI) const { 320 const MCOperand &MO = MI.getOperand(OpIdx); 321 322 // If the destination is an immediate, we have nothing to do. 323 if (MO.isImm()) 324 return MO.getImm(); 325 assert(MO.isExpr() && "Unexpected target type!"); 326 327 MCFixupKind Kind = MCFixupKind(AArch64::fixup_aarch64_ldr_pcrel_imm19); 328 Fixups.push_back(MCFixup::create(0, MO.getExpr(), Kind, MI.getLoc())); 329 330 ++MCNumFixups; 331 332 // All of the information is in the fixup. 333 return 0; 334 } 335 336 uint32_t 337 AArch64MCCodeEmitter::getMemExtendOpValue(const MCInst &MI, unsigned OpIdx, 338 SmallVectorImpl<MCFixup> &Fixups, 339 const MCSubtargetInfo &STI) const { 340 unsigned SignExtend = MI.getOperand(OpIdx).getImm(); 341 unsigned DoShift = MI.getOperand(OpIdx + 1).getImm(); 342 return (SignExtend << 1) | DoShift; 343 } 344 345 uint32_t 346 AArch64MCCodeEmitter::getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx, 347 SmallVectorImpl<MCFixup> &Fixups, 348 const MCSubtargetInfo &STI) const { 349 const MCOperand &MO = MI.getOperand(OpIdx); 350 351 if (MO.isImm()) 352 return MO.getImm(); 353 assert(MO.isExpr() && "Unexpected movz/movk immediate"); 354 355 Fixups.push_back(MCFixup::create( 356 0, MO.getExpr(), MCFixupKind(AArch64::fixup_aarch64_movw), MI.getLoc())); 357 358 ++MCNumFixups; 359 360 return 0; 361 } 362 363 /// getTestBranchTargetOpValue - Return the encoded value for a test-bit-and- 364 /// branch target. 365 uint32_t AArch64MCCodeEmitter::getTestBranchTargetOpValue( 366 const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups, 367 const MCSubtargetInfo &STI) const { 368 const MCOperand &MO = MI.getOperand(OpIdx); 369 370 // If the destination is an immediate, we have nothing to do. 371 if (MO.isImm()) 372 return MO.getImm(); 373 assert(MO.isExpr() && "Unexpected ADR target type!"); 374 375 MCFixupKind Kind = MCFixupKind(AArch64::fixup_aarch64_pcrel_branch14); 376 Fixups.push_back(MCFixup::create(0, MO.getExpr(), Kind, MI.getLoc())); 377 378 ++MCNumFixups; 379 380 // All of the information is in the fixup. 381 return 0; 382 } 383 384 /// getBranchTargetOpValue - Return the encoded value for an unconditional 385 /// branch target. 386 uint32_t 387 AArch64MCCodeEmitter::getBranchTargetOpValue(const MCInst &MI, unsigned OpIdx, 388 SmallVectorImpl<MCFixup> &Fixups, 389 const MCSubtargetInfo &STI) const { 390 const MCOperand &MO = MI.getOperand(OpIdx); 391 392 // If the destination is an immediate, we have nothing to do. 393 if (MO.isImm()) 394 return MO.getImm(); 395 assert(MO.isExpr() && "Unexpected ADR target type!"); 396 397 MCFixupKind Kind = MI.getOpcode() == AArch64::BL 398 ? MCFixupKind(AArch64::fixup_aarch64_pcrel_call26) 399 : MCFixupKind(AArch64::fixup_aarch64_pcrel_branch26); 400 Fixups.push_back(MCFixup::create(0, MO.getExpr(), Kind, MI.getLoc())); 401 402 ++MCNumFixups; 403 404 // All of the information is in the fixup. 405 return 0; 406 } 407 408 /// getVecShifterOpValue - Return the encoded value for the vector shifter: 409 /// 410 /// 00 -> 0 411 /// 01 -> 8 412 /// 10 -> 16 413 /// 11 -> 24 414 uint32_t 415 AArch64MCCodeEmitter::getVecShifterOpValue(const MCInst &MI, unsigned OpIdx, 416 SmallVectorImpl<MCFixup> &Fixups, 417 const MCSubtargetInfo &STI) const { 418 const MCOperand &MO = MI.getOperand(OpIdx); 419 assert(MO.isImm() && "Expected an immediate value for the shift amount!"); 420 421 switch (MO.getImm()) { 422 default: 423 break; 424 case 0: 425 return 0; 426 case 8: 427 return 1; 428 case 16: 429 return 2; 430 case 24: 431 return 3; 432 } 433 434 llvm_unreachable("Invalid value for vector shift amount!"); 435 } 436 437 /// getFixedPointScaleOpValue - Return the encoded value for the 438 // FP-to-fixed-point scale factor. 439 uint32_t AArch64MCCodeEmitter::getFixedPointScaleOpValue( 440 const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups, 441 const MCSubtargetInfo &STI) const { 442 const MCOperand &MO = MI.getOperand(OpIdx); 443 assert(MO.isImm() && "Expected an immediate value for the scale amount!"); 444 return 64 - MO.getImm(); 445 } 446 447 uint32_t 448 AArch64MCCodeEmitter::getVecShiftR64OpValue(const MCInst &MI, unsigned OpIdx, 449 SmallVectorImpl<MCFixup> &Fixups, 450 const MCSubtargetInfo &STI) const { 451 const MCOperand &MO = MI.getOperand(OpIdx); 452 assert(MO.isImm() && "Expected an immediate value for the scale amount!"); 453 return 64 - MO.getImm(); 454 } 455 456 uint32_t 457 AArch64MCCodeEmitter::getVecShiftR32OpValue(const MCInst &MI, unsigned OpIdx, 458 SmallVectorImpl<MCFixup> &Fixups, 459 const MCSubtargetInfo &STI) const { 460 const MCOperand &MO = MI.getOperand(OpIdx); 461 assert(MO.isImm() && "Expected an immediate value for the scale amount!"); 462 return 32 - MO.getImm(); 463 } 464 465 uint32_t 466 AArch64MCCodeEmitter::getVecShiftR16OpValue(const MCInst &MI, unsigned OpIdx, 467 SmallVectorImpl<MCFixup> &Fixups, 468 const MCSubtargetInfo &STI) const { 469 const MCOperand &MO = MI.getOperand(OpIdx); 470 assert(MO.isImm() && "Expected an immediate value for the scale amount!"); 471 return 16 - MO.getImm(); 472 } 473 474 uint32_t 475 AArch64MCCodeEmitter::getVecShiftR8OpValue(const MCInst &MI, unsigned OpIdx, 476 SmallVectorImpl<MCFixup> &Fixups, 477 const MCSubtargetInfo &STI) const { 478 const MCOperand &MO = MI.getOperand(OpIdx); 479 assert(MO.isImm() && "Expected an immediate value for the scale amount!"); 480 return 8 - MO.getImm(); 481 } 482 483 uint32_t 484 AArch64MCCodeEmitter::getVecShiftL64OpValue(const MCInst &MI, unsigned OpIdx, 485 SmallVectorImpl<MCFixup> &Fixups, 486 const MCSubtargetInfo &STI) const { 487 const MCOperand &MO = MI.getOperand(OpIdx); 488 assert(MO.isImm() && "Expected an immediate value for the scale amount!"); 489 return MO.getImm() - 64; 490 } 491 492 uint32_t 493 AArch64MCCodeEmitter::getVecShiftL32OpValue(const MCInst &MI, unsigned OpIdx, 494 SmallVectorImpl<MCFixup> &Fixups, 495 const MCSubtargetInfo &STI) const { 496 const MCOperand &MO = MI.getOperand(OpIdx); 497 assert(MO.isImm() && "Expected an immediate value for the scale amount!"); 498 return MO.getImm() - 32; 499 } 500 501 uint32_t 502 AArch64MCCodeEmitter::getVecShiftL16OpValue(const MCInst &MI, unsigned OpIdx, 503 SmallVectorImpl<MCFixup> &Fixups, 504 const MCSubtargetInfo &STI) const { 505 const MCOperand &MO = MI.getOperand(OpIdx); 506 assert(MO.isImm() && "Expected an immediate value for the scale amount!"); 507 return MO.getImm() - 16; 508 } 509 510 uint32_t 511 AArch64MCCodeEmitter::getVecShiftL8OpValue(const MCInst &MI, unsigned OpIdx, 512 SmallVectorImpl<MCFixup> &Fixups, 513 const MCSubtargetInfo &STI) const { 514 const MCOperand &MO = MI.getOperand(OpIdx); 515 assert(MO.isImm() && "Expected an immediate value for the scale amount!"); 516 return MO.getImm() - 8; 517 } 518 519 uint32_t AArch64MCCodeEmitter::EncodeMatrixTileListRegisterClass( 520 const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups, 521 const MCSubtargetInfo &STI) const { 522 unsigned RegMask = MI.getOperand(OpIdx).getImm(); 523 assert(RegMask <= 0xFF && "Invalid register mask!"); 524 return RegMask; 525 } 526 527 uint32_t 528 AArch64MCCodeEmitter::encodeMatrixIndexGPR32(const MCInst &MI, unsigned OpIdx, 529 SmallVectorImpl<MCFixup> &Fixups, 530 const MCSubtargetInfo &STI) const { 531 auto RegOpnd = MI.getOperand(OpIdx).getReg(); 532 assert(RegOpnd >= AArch64::W12 && RegOpnd <= AArch64::W15 && 533 "Expected register in the range w12-w15!"); 534 return RegOpnd - AArch64::W12; 535 } 536 537 uint32_t 538 AArch64MCCodeEmitter::getImm8OptLsl(const MCInst &MI, unsigned OpIdx, 539 SmallVectorImpl<MCFixup> &Fixups, 540 const MCSubtargetInfo &STI) const { 541 // Test shift 542 auto ShiftOpnd = MI.getOperand(OpIdx + 1).getImm(); 543 assert(AArch64_AM::getShiftType(ShiftOpnd) == AArch64_AM::LSL && 544 "Unexpected shift type for imm8_opt_lsl immediate."); 545 546 unsigned ShiftVal = AArch64_AM::getShiftValue(ShiftOpnd); 547 assert((ShiftVal == 0 || ShiftVal == 8) && 548 "Unexpected shift value for imm8_opt_lsl immediate."); 549 550 // Test immediate 551 auto Immediate = MI.getOperand(OpIdx).getImm(); 552 return (Immediate & 0xff) | (ShiftVal == 0 ? 0 : (1 << ShiftVal)); 553 } 554 555 uint32_t 556 AArch64MCCodeEmitter::getSVEIncDecImm(const MCInst &MI, unsigned OpIdx, 557 SmallVectorImpl<MCFixup> &Fixups, 558 const MCSubtargetInfo &STI) const { 559 const MCOperand &MO = MI.getOperand(OpIdx); 560 assert(MO.isImm() && "Expected an immediate value!"); 561 // Normalize 1-16 range to 0-15. 562 return MO.getImm() - 1; 563 } 564 565 /// getMoveVecShifterOpValue - Return the encoded value for the vector move 566 /// shifter (MSL). 567 uint32_t AArch64MCCodeEmitter::getMoveVecShifterOpValue( 568 const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups, 569 const MCSubtargetInfo &STI) const { 570 const MCOperand &MO = MI.getOperand(OpIdx); 571 assert(MO.isImm() && 572 "Expected an immediate value for the move shift amount!"); 573 unsigned ShiftVal = AArch64_AM::getShiftValue(MO.getImm()); 574 assert((ShiftVal == 8 || ShiftVal == 16) && "Invalid shift amount!"); 575 return ShiftVal == 8 ? 0 : 1; 576 } 577 578 unsigned AArch64MCCodeEmitter::fixMOVZ(const MCInst &MI, unsigned EncodedValue, 579 const MCSubtargetInfo &STI) const { 580 // If one of the signed fixup kinds is applied to a MOVZ instruction, the 581 // eventual result could be either a MOVZ or a MOVN. It's the MCCodeEmitter's 582 // job to ensure that any bits possibly affected by this are 0. This means we 583 // must zero out bit 30 (essentially emitting a MOVN). 584 MCOperand UImm16MO = MI.getOperand(1); 585 586 // Nothing to do if there's no fixup. 587 if (UImm16MO.isImm()) 588 return EncodedValue; 589 590 const MCExpr *E = UImm16MO.getExpr(); 591 if (const AArch64MCExpr *A64E = dyn_cast<AArch64MCExpr>(E)) { 592 switch (A64E->getKind()) { 593 case AArch64MCExpr::VK_DTPREL_G2: 594 case AArch64MCExpr::VK_DTPREL_G1: 595 case AArch64MCExpr::VK_DTPREL_G0: 596 case AArch64MCExpr::VK_GOTTPREL_G1: 597 case AArch64MCExpr::VK_TPREL_G2: 598 case AArch64MCExpr::VK_TPREL_G1: 599 case AArch64MCExpr::VK_TPREL_G0: 600 return EncodedValue & ~(1u << 30); 601 default: 602 // Nothing to do for an unsigned fixup. 603 return EncodedValue; 604 } 605 } 606 607 return EncodedValue; 608 } 609 610 void AArch64MCCodeEmitter::encodeInstruction(const MCInst &MI, raw_ostream &OS, 611 SmallVectorImpl<MCFixup> &Fixups, 612 const MCSubtargetInfo &STI) const { 613 if (MI.getOpcode() == AArch64::TLSDESCCALL) { 614 // This is a directive which applies an R_AARCH64_TLSDESC_CALL to the 615 // following (BLR) instruction. It doesn't emit any code itself so it 616 // doesn't go through the normal TableGenerated channels. 617 auto Reloc = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32 618 ? ELF::R_AARCH64_P32_TLSDESC_CALL 619 : ELF::R_AARCH64_TLSDESC_CALL; 620 Fixups.push_back( 621 MCFixup::create(0, MI.getOperand(0).getExpr(), 622 MCFixupKind(FirstLiteralRelocationKind + Reloc))); 623 return; 624 } 625 626 if (MI.getOpcode() == AArch64::CompilerBarrier || 627 MI.getOpcode() == AArch64::SPACE) { 628 // CompilerBarrier just prevents the compiler from reordering accesses, and 629 // SPACE just increases basic block size, in both cases no actual code. 630 return; 631 } 632 633 uint64_t Binary = getBinaryCodeForInstr(MI, Fixups, STI); 634 support::endian::write<uint32_t>(OS, Binary, support::little); 635 ++MCNumEmitted; // Keep track of the # of mi's emitted. 636 } 637 638 unsigned 639 AArch64MCCodeEmitter::fixMulHigh(const MCInst &MI, 640 unsigned EncodedValue, 641 const MCSubtargetInfo &STI) const { 642 // The Ra field of SMULH and UMULH is unused: it should be assembled as 31 643 // (i.e. all bits 1) but is ignored by the processor. 644 EncodedValue |= 0x1f << 10; 645 return EncodedValue; 646 } 647 648 template<int hasRs, int hasRt2> unsigned 649 AArch64MCCodeEmitter::fixLoadStoreExclusive(const MCInst &MI, 650 unsigned EncodedValue, 651 const MCSubtargetInfo &STI) const { 652 if (!hasRs) EncodedValue |= 0x001F0000; 653 if (!hasRt2) EncodedValue |= 0x00007C00; 654 655 return EncodedValue; 656 } 657 658 unsigned AArch64MCCodeEmitter::fixOneOperandFPComparison( 659 const MCInst &MI, unsigned EncodedValue, const MCSubtargetInfo &STI) const { 660 // The Rm field of FCMP and friends is unused - it should be assembled 661 // as 0, but is ignored by the processor. 662 EncodedValue &= ~(0x1f << 16); 663 return EncodedValue; 664 } 665 666 #include "AArch64GenMCCodeEmitter.inc" 667 668 MCCodeEmitter *llvm::createAArch64MCCodeEmitter(const MCInstrInfo &MCII, 669 MCContext &Ctx) { 670 return new AArch64MCCodeEmitter(MCII, Ctx); 671 } 672