1 //===-- ARMAsmBackend.cpp - ARM Assembler Backend -------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "MCTargetDesc/ARMAsmBackend.h" 10 #include "MCTargetDesc/ARMAddressingModes.h" 11 #include "MCTargetDesc/ARMAsmBackendDarwin.h" 12 #include "MCTargetDesc/ARMAsmBackendELF.h" 13 #include "MCTargetDesc/ARMAsmBackendWinCOFF.h" 14 #include "MCTargetDesc/ARMFixupKinds.h" 15 #include "MCTargetDesc/ARMMCAsmInfo.h" 16 #include "MCTargetDesc/ARMMCTargetDesc.h" 17 #include "llvm/ADT/StringSwitch.h" 18 #include "llvm/BinaryFormat/ELF.h" 19 #include "llvm/BinaryFormat/MachO.h" 20 #include "llvm/MC/MCAsmBackend.h" 21 #include "llvm/MC/MCAssembler.h" 22 #include "llvm/MC/MCContext.h" 23 #include "llvm/MC/MCELFObjectWriter.h" 24 #include "llvm/MC/MCExpr.h" 25 #include "llvm/MC/MCObjectWriter.h" 26 #include "llvm/MC/MCRegisterInfo.h" 27 #include "llvm/MC/MCSubtargetInfo.h" 28 #include "llvm/MC/MCTargetOptions.h" 29 #include "llvm/MC/MCValue.h" 30 #include "llvm/Support/Debug.h" 31 #include "llvm/Support/EndianStream.h" 32 #include "llvm/Support/ErrorHandling.h" 33 #include "llvm/Support/MathExtras.h" 34 #include "llvm/Support/raw_ostream.h" 35 using namespace llvm; 36 37 namespace { 38 class ARMELFObjectWriter : public MCELFObjectTargetWriter { 39 public: 40 ARMELFObjectWriter(uint8_t OSABI) 41 : MCELFObjectTargetWriter(/*Is64Bit*/ false, OSABI, ELF::EM_ARM, 42 /*HasRelocationAddend*/ false) {} 43 }; 44 } // end anonymous namespace 45 46 std::optional<MCFixupKind> ARMAsmBackend::getFixupKind(StringRef Name) const { 47 return std::nullopt; 48 } 49 50 std::optional<MCFixupKind> 51 ARMAsmBackendELF::getFixupKind(StringRef Name) const { 52 unsigned Type = llvm::StringSwitch<unsigned>(Name) 53 #define ELF_RELOC(X, Y) .Case(#X, Y) 54 #include "llvm/BinaryFormat/ELFRelocs/ARM.def" 55 #undef ELF_RELOC 56 .Case("BFD_RELOC_NONE", ELF::R_ARM_NONE) 57 .Case("BFD_RELOC_8", ELF::R_ARM_ABS8) 58 .Case("BFD_RELOC_16", ELF::R_ARM_ABS16) 59 .Case("BFD_RELOC_32", ELF::R_ARM_ABS32) 60 .Default(-1u); 61 if (Type == -1u) 62 return std::nullopt; 63 return static_cast<MCFixupKind>(FirstLiteralRelocationKind + Type); 64 } 65 66 MCFixupKindInfo ARMAsmBackend::getFixupKindInfo(MCFixupKind Kind) const { 67 const static MCFixupKindInfo InfosLE[ARM::NumTargetFixupKinds] = { 68 // This table *must* be in the order that the fixup_* kinds are defined in 69 // ARMFixupKinds.h. 70 // 71 // Name Offset (bits) Size (bits) Flags 72 {"fixup_arm_ldst_pcrel_12", 0, 32, 0}, 73 {"fixup_t2_ldst_pcrel_12", 0, 32, 0}, 74 {"fixup_arm_pcrel_10_unscaled", 0, 32, 0}, 75 {"fixup_arm_pcrel_10", 0, 32, 0}, 76 {"fixup_t2_pcrel_10", 0, 32, 0}, 77 {"fixup_arm_pcrel_9", 0, 32, 0}, 78 {"fixup_t2_pcrel_9", 0, 32, 0}, 79 {"fixup_arm_ldst_abs_12", 0, 32, 0}, 80 {"fixup_thumb_adr_pcrel_10", 0, 8, 0}, 81 {"fixup_arm_adr_pcrel_12", 0, 32, 0}, 82 {"fixup_t2_adr_pcrel_12", 0, 32, 0}, 83 {"fixup_arm_condbranch", 0, 24, 0}, 84 {"fixup_arm_uncondbranch", 0, 24, 0}, 85 {"fixup_t2_condbranch", 0, 32, 0}, 86 {"fixup_t2_uncondbranch", 0, 32, 0}, 87 {"fixup_arm_thumb_br", 0, 16, 0}, 88 {"fixup_arm_uncondbl", 0, 24, 0}, 89 {"fixup_arm_condbl", 0, 24, 0}, 90 {"fixup_arm_blx", 0, 24, 0}, 91 {"fixup_arm_thumb_bl", 0, 32, 0}, 92 {"fixup_arm_thumb_blx", 0, 32, 0}, 93 {"fixup_arm_thumb_cb", 0, 16, 0}, 94 {"fixup_arm_thumb_cp", 0, 8, 0}, 95 {"fixup_arm_thumb_bcc", 0, 8, 0}, 96 // movw / movt: 16-bits immediate but scattered into two chunks 0 - 12, 16 97 // - 19. 98 {"fixup_arm_movt_hi16", 0, 20, 0}, 99 {"fixup_arm_movw_lo16", 0, 20, 0}, 100 {"fixup_t2_movt_hi16", 0, 20, 0}, 101 {"fixup_t2_movw_lo16", 0, 20, 0}, 102 {"fixup_arm_thumb_upper_8_15", 0, 8, 0}, 103 {"fixup_arm_thumb_upper_0_7", 0, 8, 0}, 104 {"fixup_arm_thumb_lower_8_15", 0, 8, 0}, 105 {"fixup_arm_thumb_lower_0_7", 0, 8, 0}, 106 {"fixup_arm_mod_imm", 0, 12, 0}, 107 {"fixup_t2_so_imm", 0, 26, 0}, 108 {"fixup_bf_branch", 0, 32, 0}, 109 {"fixup_bf_target", 0, 32, 0}, 110 {"fixup_bfl_target", 0, 32, 0}, 111 {"fixup_bfc_target", 0, 32, 0}, 112 {"fixup_bfcsel_else_target", 0, 32, 0}, 113 {"fixup_wls", 0, 32, 0}, 114 {"fixup_le", 0, 32, 0}, 115 }; 116 const static MCFixupKindInfo InfosBE[ARM::NumTargetFixupKinds] = { 117 // This table *must* be in the order that the fixup_* kinds are defined in 118 // ARMFixupKinds.h. 119 // 120 // Name Offset (bits) Size (bits) Flags 121 {"fixup_arm_ldst_pcrel_12", 0, 32, 0}, 122 {"fixup_t2_ldst_pcrel_12", 0, 32, 0}, 123 {"fixup_arm_pcrel_10_unscaled", 0, 32, 0}, 124 {"fixup_arm_pcrel_10", 0, 32, 0}, 125 {"fixup_t2_pcrel_10", 0, 32, 0}, 126 {"fixup_arm_pcrel_9", 0, 32, 0}, 127 {"fixup_t2_pcrel_9", 0, 32, 0}, 128 {"fixup_arm_ldst_abs_12", 0, 32, 0}, 129 {"fixup_thumb_adr_pcrel_10", 8, 8, 0}, 130 {"fixup_arm_adr_pcrel_12", 0, 32, 0}, 131 {"fixup_t2_adr_pcrel_12", 0, 32, 0}, 132 {"fixup_arm_condbranch", 8, 24, 0}, 133 {"fixup_arm_uncondbranch", 8, 24, 0}, 134 {"fixup_t2_condbranch", 0, 32, 0}, 135 {"fixup_t2_uncondbranch", 0, 32, 0}, 136 {"fixup_arm_thumb_br", 0, 16, 0}, 137 {"fixup_arm_uncondbl", 8, 24, 0}, 138 {"fixup_arm_condbl", 8, 24, 0}, 139 {"fixup_arm_blx", 8, 24, 0}, 140 {"fixup_arm_thumb_bl", 0, 32, 0}, 141 {"fixup_arm_thumb_blx", 0, 32, 0}, 142 {"fixup_arm_thumb_cb", 0, 16, 0}, 143 {"fixup_arm_thumb_cp", 8, 8, 0}, 144 {"fixup_arm_thumb_bcc", 8, 8, 0}, 145 // movw / movt: 16-bits immediate but scattered into two chunks 0 - 12, 16 146 // - 19. 147 {"fixup_arm_movt_hi16", 12, 20, 0}, 148 {"fixup_arm_movw_lo16", 12, 20, 0}, 149 {"fixup_t2_movt_hi16", 12, 20, 0}, 150 {"fixup_t2_movw_lo16", 12, 20, 0}, 151 {"fixup_arm_thumb_upper_8_15", 24, 8, 0}, 152 {"fixup_arm_thumb_upper_0_7", 24, 8, 0}, 153 {"fixup_arm_thumb_lower_8_15", 24, 8, 0}, 154 {"fixup_arm_thumb_lower_0_7", 24, 8, 0}, 155 {"fixup_arm_mod_imm", 20, 12, 0}, 156 {"fixup_t2_so_imm", 26, 6, 0}, 157 {"fixup_bf_branch", 0, 32, 0}, 158 {"fixup_bf_target", 0, 32, 0}, 159 {"fixup_bfl_target", 0, 32, 0}, 160 {"fixup_bfc_target", 0, 32, 0}, 161 {"fixup_bfcsel_else_target", 0, 32, 0}, 162 {"fixup_wls", 0, 32, 0}, 163 {"fixup_le", 0, 32, 0}, 164 }; 165 166 // Fixup kinds from .reloc directive are like R_ARM_NONE. They do not require 167 // any extra processing. 168 if (mc::isRelocation(Kind)) 169 return {}; 170 171 if (Kind < FirstTargetFixupKind) 172 return MCAsmBackend::getFixupKindInfo(Kind); 173 174 assert(unsigned(Kind - FirstTargetFixupKind) < ARM::NumTargetFixupKinds && 175 "Invalid kind!"); 176 return (Endian == llvm::endianness::little 177 ? InfosLE 178 : InfosBE)[Kind - FirstTargetFixupKind]; 179 } 180 181 unsigned ARMAsmBackend::getRelaxedOpcode(unsigned Op, 182 const MCSubtargetInfo &STI) const { 183 bool HasThumb2 = STI.hasFeature(ARM::FeatureThumb2); 184 bool HasV8MBaselineOps = STI.hasFeature(ARM::HasV8MBaselineOps); 185 186 switch (Op) { 187 default: 188 return Op; 189 case ARM::tBcc: 190 return HasThumb2 ? (unsigned)ARM::t2Bcc : Op; 191 case ARM::tLDRpci: 192 return HasThumb2 ? (unsigned)ARM::t2LDRpci : Op; 193 case ARM::tADR: 194 return HasThumb2 ? (unsigned)ARM::t2ADR : Op; 195 case ARM::tB: 196 return HasV8MBaselineOps ? (unsigned)ARM::t2B : Op; 197 case ARM::tCBZ: 198 return ARM::tHINT; 199 case ARM::tCBNZ: 200 return ARM::tHINT; 201 } 202 } 203 204 bool ARMAsmBackend::mayNeedRelaxation(unsigned Opcode, ArrayRef<MCOperand>, 205 const MCSubtargetInfo &STI) const { 206 return getRelaxedOpcode(Opcode, STI) != Opcode; 207 } 208 209 static const char *checkPCRelOffset(uint64_t Value, int64_t Min, int64_t Max) { 210 int64_t Offset = int64_t(Value) - 4; 211 if (Offset < Min || Offset > Max) 212 return "out of range pc-relative fixup value"; 213 return nullptr; 214 } 215 216 const char *ARMAsmBackend::reasonForFixupRelaxation(const MCFixup &Fixup, 217 uint64_t Value) const { 218 switch (Fixup.getKind()) { 219 case ARM::fixup_arm_thumb_br: { 220 // Relaxing tB to t2B. tB has a signed 12-bit displacement with the 221 // low bit being an implied zero. There's an implied +4 offset for the 222 // branch, so we adjust the other way here to determine what's 223 // encodable. 224 // 225 // Relax if the value is too big for a (signed) i8. 226 int64_t Offset = int64_t(Value) - 4; 227 if (Offset > 2046 || Offset < -2048) 228 return "out of range pc-relative fixup value"; 229 break; 230 } 231 case ARM::fixup_arm_thumb_bcc: { 232 // Relaxing tBcc to t2Bcc. tBcc has a signed 9-bit displacement with the 233 // low bit being an implied zero. There's an implied +4 offset for the 234 // branch, so we adjust the other way here to determine what's 235 // encodable. 236 // 237 // Relax if the value is too big for a (signed) i8. 238 int64_t Offset = int64_t(Value) - 4; 239 if (Offset > 254 || Offset < -256) 240 return "out of range pc-relative fixup value"; 241 break; 242 } 243 case ARM::fixup_thumb_adr_pcrel_10: 244 case ARM::fixup_arm_thumb_cp: { 245 // If the immediate is negative, greater than 1020, or not a multiple 246 // of four, the wide version of the instruction must be used. 247 int64_t Offset = int64_t(Value) - 4; 248 if (Offset & 3) 249 return "misaligned pc-relative fixup value"; 250 else if (Offset > 1020 || Offset < 0) 251 return "out of range pc-relative fixup value"; 252 break; 253 } 254 case ARM::fixup_arm_thumb_cb: { 255 // If we have a Thumb CBZ or CBNZ instruction and its target is the next 256 // instruction it is actually out of range for the instruction. 257 // It will be changed to a NOP. 258 int64_t Offset = (Value & ~1); 259 if (Offset == 2) 260 return "will be converted to nop"; 261 break; 262 } 263 case ARM::fixup_bf_branch: 264 return checkPCRelOffset(Value, 0, 30); 265 case ARM::fixup_bf_target: 266 return checkPCRelOffset(Value, -0x10000, +0xfffe); 267 case ARM::fixup_bfl_target: 268 return checkPCRelOffset(Value, -0x40000, +0x3fffe); 269 case ARM::fixup_bfc_target: 270 return checkPCRelOffset(Value, -0x1000, +0xffe); 271 case ARM::fixup_wls: 272 return checkPCRelOffset(Value, 0, +0xffe); 273 case ARM::fixup_le: 274 // The offset field in the LE and LETP instructions is an 11-bit 275 // value shifted left by 2 (i.e. 0,2,4,...,4094), and it is 276 // interpreted as a negative offset from the value read from pc, 277 // i.e. from instruction_address+4. 278 // 279 // So an LE instruction can in principle address the instruction 280 // immediately after itself, or (not very usefully) the address 281 // half way through the 4-byte LE. 282 return checkPCRelOffset(Value, -0xffe, 0); 283 case ARM::fixup_bfcsel_else_target: { 284 if (Value != 2 && Value != 4) 285 return "out of range label-relative fixup value"; 286 break; 287 } 288 289 default: 290 llvm_unreachable("Unexpected fixup kind in reasonForFixupRelaxation()!"); 291 } 292 return nullptr; 293 } 294 295 static bool needsInterworking(const MCAssembler &Asm, const MCSymbol *Sym, 296 unsigned FixupKind) { 297 // Create relocations for unconditional branches to function symbols with 298 // different execution mode in ELF binaries. 299 if (!Sym || !Sym->isELF()) 300 return false; 301 unsigned Type = cast<MCSymbolELF>(Sym)->getType(); 302 if ((Type == ELF::STT_FUNC || Type == ELF::STT_GNU_IFUNC)) { 303 if (Asm.isThumbFunc(Sym) && (FixupKind == ARM::fixup_arm_uncondbranch)) 304 return true; 305 if (!Asm.isThumbFunc(Sym) && (FixupKind == ARM::fixup_arm_thumb_br || 306 FixupKind == ARM::fixup_arm_thumb_bl || 307 FixupKind == ARM::fixup_t2_condbranch || 308 FixupKind == ARM::fixup_t2_uncondbranch)) 309 return true; 310 } 311 return false; 312 } 313 314 bool ARMAsmBackend::fixupNeedsRelaxationAdvanced(const MCFixup &Fixup, 315 const MCValue &Target, 316 uint64_t Value, 317 bool Resolved) const { 318 const MCSymbol *Sym = Target.getAddSym(); 319 if (needsInterworking(*Asm, Sym, Fixup.getKind())) 320 return true; 321 322 if (!Resolved) 323 return true; 324 return reasonForFixupRelaxation(Fixup, Value); 325 } 326 327 void ARMAsmBackend::relaxInstruction(MCInst &Inst, 328 const MCSubtargetInfo &STI) const { 329 unsigned RelaxedOp = getRelaxedOpcode(Inst.getOpcode(), STI); 330 assert(RelaxedOp != Inst.getOpcode()); 331 332 // If we are changing Thumb CBZ or CBNZ instruction to a NOP, aka tHINT, we 333 // have to change the operands too. 334 if ((Inst.getOpcode() == ARM::tCBZ || Inst.getOpcode() == ARM::tCBNZ) && 335 RelaxedOp == ARM::tHINT) { 336 MCInst Res; 337 Res.setOpcode(RelaxedOp); 338 Res.addOperand(MCOperand::createImm(0)); 339 Res.addOperand(MCOperand::createImm(14)); 340 Res.addOperand(MCOperand::createReg(0)); 341 Inst = std::move(Res); 342 return; 343 } 344 345 // The rest of instructions we're relaxing have the same operands. 346 // We just need to update to the proper opcode. 347 Inst.setOpcode(RelaxedOp); 348 } 349 350 bool ARMAsmBackend::writeNopData(raw_ostream &OS, uint64_t Count, 351 const MCSubtargetInfo *STI) const { 352 const uint16_t Thumb1_16bitNopEncoding = 0x46c0; // using MOV r8,r8 353 const uint16_t Thumb2_16bitNopEncoding = 0xbf00; // NOP 354 const uint32_t ARMv4_NopEncoding = 0xe1a00000; // using MOV r0,r0 355 const uint32_t ARMv6T2_NopEncoding = 0xe320f000; // NOP 356 if (STI->hasFeature(ARM::ModeThumb)) { 357 const uint16_t nopEncoding = 358 hasNOP(STI) ? Thumb2_16bitNopEncoding : Thumb1_16bitNopEncoding; 359 uint64_t NumNops = Count / 2; 360 for (uint64_t i = 0; i != NumNops; ++i) 361 support::endian::write(OS, nopEncoding, Endian); 362 if (Count & 1) 363 OS << '\0'; 364 return true; 365 } 366 // ARM mode 367 const uint32_t nopEncoding = 368 hasNOP(STI) ? ARMv6T2_NopEncoding : ARMv4_NopEncoding; 369 uint64_t NumNops = Count / 4; 370 for (uint64_t i = 0; i != NumNops; ++i) 371 support::endian::write(OS, nopEncoding, Endian); 372 // FIXME: should this function return false when unable to write exactly 373 // 'Count' bytes with NOP encodings? 374 switch (Count % 4) { 375 default: 376 break; // No leftover bytes to write 377 case 1: 378 OS << '\0'; 379 break; 380 case 2: 381 OS.write("\0\0", 2); 382 break; 383 case 3: 384 OS.write("\0\0\xa0", 3); 385 break; 386 } 387 388 return true; 389 } 390 391 static uint32_t swapHalfWords(uint32_t Value, bool IsLittleEndian) { 392 if (IsLittleEndian) { 393 // Note that the halfwords are stored high first and low second in thumb; 394 // so we need to swap the fixup value here to map properly. 395 uint32_t Swapped = (Value & 0xFFFF0000) >> 16; 396 Swapped |= (Value & 0x0000FFFF) << 16; 397 return Swapped; 398 } else 399 return Value; 400 } 401 402 static uint32_t joinHalfWords(uint32_t FirstHalf, uint32_t SecondHalf, 403 bool IsLittleEndian) { 404 uint32_t Value; 405 406 if (IsLittleEndian) { 407 Value = (SecondHalf & 0xFFFF) << 16; 408 Value |= (FirstHalf & 0xFFFF); 409 } else { 410 Value = (SecondHalf & 0xFFFF); 411 Value |= (FirstHalf & 0xFFFF) << 16; 412 } 413 414 return Value; 415 } 416 417 unsigned ARMAsmBackend::adjustFixupValue(const MCAssembler &Asm, 418 const MCFixup &Fixup, 419 const MCValue &Target, uint64_t Value, 420 bool IsResolved, MCContext &Ctx, 421 const MCSubtargetInfo* STI) const { 422 unsigned Kind = Fixup.getKind(); 423 int64_t Addend = Target.getConstant(); 424 425 // For MOVW/MOVT Instructions, the fixup value must already be within a 426 // signed 16bit range. 427 if ((Kind == ARM::fixup_arm_movw_lo16 || Kind == ARM::fixup_arm_movt_hi16 || 428 Kind == ARM::fixup_t2_movw_lo16 || Kind == ARM::fixup_t2_movt_hi16) && 429 (Addend < minIntN(16) || Addend > maxIntN(16))) { 430 Ctx.reportError(Fixup.getLoc(), "Relocation Not In Range"); 431 return 0; 432 } 433 434 // MachO tries to make .o files that look vaguely pre-linked, so for MOVW/MOVT 435 // and .word relocations they put the Thumb bit into the addend if possible. 436 // Other relocation types don't want this bit though (branches couldn't encode 437 // it if it *was* present, and no other relocations exist) and it can 438 // interfere with checking valid expressions. 439 bool IsMachO = getContext().getObjectFileType() == MCContext::IsMachO; 440 if (const auto *SA = Target.getAddSym()) { 441 if (IsMachO && Asm.isThumbFunc(SA) && SA->isExternal() && 442 (Kind == FK_Data_4 || Kind == ARM::fixup_arm_movw_lo16 || 443 Kind == ARM::fixup_arm_movt_hi16 || Kind == ARM::fixup_t2_movw_lo16 || 444 Kind == ARM::fixup_t2_movt_hi16)) 445 Value |= 1; 446 } 447 448 switch (Kind) { 449 default: 450 return 0; 451 case FK_Data_1: 452 case FK_Data_2: 453 case FK_Data_4: 454 return Value; 455 case FK_SecRel_2: 456 return Value; 457 case FK_SecRel_4: 458 return Value; 459 case ARM::fixup_arm_movt_hi16: 460 assert(STI != nullptr); 461 if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF()) 462 Value >>= 16; 463 [[fallthrough]]; 464 case ARM::fixup_arm_movw_lo16: { 465 unsigned Hi4 = (Value & 0xF000) >> 12; 466 unsigned Lo12 = Value & 0x0FFF; 467 // inst{19-16} = Hi4; 468 // inst{11-0} = Lo12; 469 Value = (Hi4 << 16) | (Lo12); 470 return Value; 471 } 472 case ARM::fixup_t2_movt_hi16: 473 assert(STI != nullptr); 474 if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF()) 475 Value >>= 16; 476 [[fallthrough]]; 477 case ARM::fixup_t2_movw_lo16: { 478 unsigned Hi4 = (Value & 0xF000) >> 12; 479 unsigned i = (Value & 0x800) >> 11; 480 unsigned Mid3 = (Value & 0x700) >> 8; 481 unsigned Lo8 = Value & 0x0FF; 482 // inst{19-16} = Hi4; 483 // inst{26} = i; 484 // inst{14-12} = Mid3; 485 // inst{7-0} = Lo8; 486 Value = (Hi4 << 16) | (i << 26) | (Mid3 << 12) | (Lo8); 487 return swapHalfWords(Value, Endian == llvm::endianness::little); 488 } 489 case ARM::fixup_arm_thumb_upper_8_15: 490 if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF()) 491 return (Value & 0xff000000) >> 24; 492 return Value & 0xff; 493 case ARM::fixup_arm_thumb_upper_0_7: 494 if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF()) 495 return (Value & 0x00ff0000) >> 16; 496 return Value & 0xff; 497 case ARM::fixup_arm_thumb_lower_8_15: 498 if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF()) 499 return (Value & 0x0000ff00) >> 8; 500 return Value & 0xff; 501 case ARM::fixup_arm_thumb_lower_0_7: 502 return Value & 0x000000ff; 503 case ARM::fixup_arm_ldst_pcrel_12: 504 // ARM PC-relative values are offset by 8. 505 Value -= 4; 506 [[fallthrough]]; 507 case ARM::fixup_t2_ldst_pcrel_12: 508 // Offset by 4, adjusted by two due to the half-word ordering of thumb. 509 Value -= 4; 510 [[fallthrough]]; 511 case ARM::fixup_arm_ldst_abs_12: { 512 bool isAdd = true; 513 if ((int64_t)Value < 0) { 514 Value = -Value; 515 isAdd = false; 516 } 517 if (Value >= 4096) { 518 Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value"); 519 return 0; 520 } 521 Value |= isAdd << 23; 522 523 // Same addressing mode as fixup_arm_pcrel_10, 524 // but with 16-bit halfwords swapped. 525 if (Kind == ARM::fixup_t2_ldst_pcrel_12) 526 return swapHalfWords(Value, Endian == llvm::endianness::little); 527 528 return Value; 529 } 530 case ARM::fixup_arm_adr_pcrel_12: { 531 // ARM PC-relative values are offset by 8. 532 Value -= 8; 533 unsigned opc = 4; // bits {24-21}. Default to add: 0b0100 534 if ((int64_t)Value < 0) { 535 Value = -Value; 536 opc = 2; // 0b0010 537 } 538 if (ARM_AM::getSOImmVal(Value) == -1) { 539 Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value"); 540 return 0; 541 } 542 // Encode the immediate and shift the opcode into place. 543 return ARM_AM::getSOImmVal(Value) | (opc << 21); 544 } 545 546 case ARM::fixup_t2_adr_pcrel_12: { 547 Value -= 4; 548 unsigned opc = 0; 549 if ((int64_t)Value < 0) { 550 Value = -Value; 551 opc = 5; 552 } 553 554 uint32_t out = (opc << 21); 555 out |= (Value & 0x800) << 15; 556 out |= (Value & 0x700) << 4; 557 out |= (Value & 0x0FF); 558 559 return swapHalfWords(out, Endian == llvm::endianness::little); 560 } 561 562 case ARM::fixup_arm_condbranch: 563 case ARM::fixup_arm_uncondbranch: 564 case ARM::fixup_arm_uncondbl: 565 case ARM::fixup_arm_condbl: 566 case ARM::fixup_arm_blx: 567 // Check that the relocation value is legal. 568 Value -= 8; 569 if (!isInt<26>(Value)) { 570 Ctx.reportError(Fixup.getLoc(), "Relocation out of range"); 571 return 0; 572 } 573 // Alignment differs for blx. Because we are switching to thumb ISA, we use 574 // 16-bit alignment. Otherwise, use 32-bit. 575 if ((Kind == ARM::fixup_arm_blx && Value % 2 != 0) || 576 (Kind != ARM::fixup_arm_blx && Value % 4 != 0)) { 577 Ctx.reportError(Fixup.getLoc(), "Relocation not aligned"); 578 return 0; 579 } 580 581 // These values don't encode the low two bits since they're always zero. 582 // Offset by 8 just as above. 583 if (const MCSymbolRefExpr *SRE = 584 dyn_cast<MCSymbolRefExpr>(Fixup.getValue())) 585 if (SRE->getSpecifier() == ARM::S_TLSCALL) 586 return 0; 587 return 0xffffff & (Value >> 2); 588 case ARM::fixup_t2_uncondbranch: { 589 if (STI->getTargetTriple().isOSBinFormatCOFF() && !IsResolved && 590 Value != 4) { 591 // MSVC link.exe and lld do not support this relocation type 592 // with a non-zero offset. ("Value" is offset by 4 at this point.) 593 Ctx.reportError(Fixup.getLoc(), 594 "cannot perform a PC-relative fixup with a non-zero " 595 "symbol offset"); 596 } 597 Value = Value - 4; 598 if (!isInt<25>(Value)) { 599 Ctx.reportError(Fixup.getLoc(), "Relocation out of range"); 600 return 0; 601 } 602 603 Value >>= 1; // Low bit is not encoded. 604 605 uint32_t out = 0; 606 bool I = Value & 0x800000; 607 bool J1 = Value & 0x400000; 608 bool J2 = Value & 0x200000; 609 J1 ^= I; 610 J2 ^= I; 611 612 out |= I << 26; // S bit 613 out |= !J1 << 13; // J1 bit 614 out |= !J2 << 11; // J2 bit 615 out |= (Value & 0x1FF800) << 5; // imm6 field 616 out |= (Value & 0x0007FF); // imm11 field 617 618 return swapHalfWords(out, Endian == llvm::endianness::little); 619 } 620 case ARM::fixup_t2_condbranch: { 621 Value = Value - 4; 622 if (!isInt<21>(Value)) { 623 Ctx.reportError(Fixup.getLoc(), "Relocation out of range"); 624 return 0; 625 } 626 627 Value >>= 1; // Low bit is not encoded. 628 629 uint64_t out = 0; 630 out |= (Value & 0x80000) << 7; // S bit 631 out |= (Value & 0x40000) >> 7; // J2 bit 632 out |= (Value & 0x20000) >> 4; // J1 bit 633 out |= (Value & 0x1F800) << 5; // imm6 field 634 out |= (Value & 0x007FF); // imm11 field 635 636 return swapHalfWords(out, Endian == llvm::endianness::little); 637 } 638 case ARM::fixup_arm_thumb_bl: { 639 if (!isInt<25>(Value - 4) || 640 (!STI->hasFeature(ARM::FeatureThumb2) && 641 !STI->hasFeature(ARM::HasV8MBaselineOps) && 642 !STI->hasFeature(ARM::HasV6MOps) && 643 !isInt<23>(Value - 4))) { 644 Ctx.reportError(Fixup.getLoc(), "Relocation out of range"); 645 return 0; 646 } 647 if (STI->getTargetTriple().isOSBinFormatCOFF() && !IsResolved && 648 Value != 4) { 649 // MSVC link.exe and lld do not support this relocation type 650 // with a non-zero offset. ("Value" is offset by 4 at this point.) 651 Ctx.reportError(Fixup.getLoc(), 652 "cannot perform a PC-relative fixup with a non-zero " 653 "symbol offset"); 654 } 655 656 // The value doesn't encode the low bit (always zero) and is offset by 657 // four. The 32-bit immediate value is encoded as 658 // imm32 = SignExtend(S:I1:I2:imm10:imm11:0) 659 // where I1 = NOT(J1 ^ S) and I2 = NOT(J2 ^ S). 660 // The value is encoded into disjoint bit positions in the destination 661 // opcode. x = unchanged, I = immediate value bit, S = sign extension bit, 662 // J = either J1 or J2 bit 663 // 664 // BL: xxxxxSIIIIIIIIII xxJxJIIIIIIIIIII 665 // 666 // Note that the halfwords are stored high first, low second; so we need 667 // to transpose the fixup value here to map properly. 668 uint32_t offset = (Value - 4) >> 1; 669 uint32_t signBit = (offset & 0x800000) >> 23; 670 uint32_t I1Bit = (offset & 0x400000) >> 22; 671 uint32_t J1Bit = (I1Bit ^ 0x1) ^ signBit; 672 uint32_t I2Bit = (offset & 0x200000) >> 21; 673 uint32_t J2Bit = (I2Bit ^ 0x1) ^ signBit; 674 uint32_t imm10Bits = (offset & 0x1FF800) >> 11; 675 uint32_t imm11Bits = (offset & 0x000007FF); 676 677 uint32_t FirstHalf = (((uint16_t)signBit << 10) | (uint16_t)imm10Bits); 678 uint32_t SecondHalf = (((uint16_t)J1Bit << 13) | ((uint16_t)J2Bit << 11) | 679 (uint16_t)imm11Bits); 680 return joinHalfWords(FirstHalf, SecondHalf, 681 Endian == llvm::endianness::little); 682 } 683 case ARM::fixup_arm_thumb_blx: { 684 if (STI->getTargetTriple().isOSBinFormatCOFF() && !IsResolved && 685 Value != 4) { 686 // MSVC link.exe and lld do not support this relocation type 687 // with a non-zero offset. ("Value" is offset by 4 at this point.) 688 Ctx.reportError(Fixup.getLoc(), 689 "cannot perform a PC-relative fixup with a non-zero " 690 "symbol offset"); 691 } 692 // The value doesn't encode the low two bits (always zero) and is offset by 693 // four (see fixup_arm_thumb_cp). The 32-bit immediate value is encoded as 694 // imm32 = SignExtend(S:I1:I2:imm10H:imm10L:00) 695 // where I1 = NOT(J1 ^ S) and I2 = NOT(J2 ^ S). 696 // The value is encoded into disjoint bit positions in the destination 697 // opcode. x = unchanged, I = immediate value bit, S = sign extension bit, 698 // J = either J1 or J2 bit, 0 = zero. 699 // 700 // BLX: xxxxxSIIIIIIIIII xxJxJIIIIIIIIII0 701 // 702 // Note that the halfwords are stored high first, low second; so we need 703 // to transpose the fixup value here to map properly. 704 if (Value % 4 != 0) { 705 Ctx.reportError(Fixup.getLoc(), "misaligned ARM call destination"); 706 return 0; 707 } 708 709 uint32_t offset = (Value - 4) >> 2; 710 if (const MCSymbolRefExpr *SRE = 711 dyn_cast<MCSymbolRefExpr>(Fixup.getValue())) 712 if (SRE->getSpecifier() == ARM::S_TLSCALL) 713 offset = 0; 714 uint32_t signBit = (offset & 0x400000) >> 22; 715 uint32_t I1Bit = (offset & 0x200000) >> 21; 716 uint32_t J1Bit = (I1Bit ^ 0x1) ^ signBit; 717 uint32_t I2Bit = (offset & 0x100000) >> 20; 718 uint32_t J2Bit = (I2Bit ^ 0x1) ^ signBit; 719 uint32_t imm10HBits = (offset & 0xFFC00) >> 10; 720 uint32_t imm10LBits = (offset & 0x3FF); 721 722 uint32_t FirstHalf = (((uint16_t)signBit << 10) | (uint16_t)imm10HBits); 723 uint32_t SecondHalf = (((uint16_t)J1Bit << 13) | ((uint16_t)J2Bit << 11) | 724 ((uint16_t)imm10LBits) << 1); 725 return joinHalfWords(FirstHalf, SecondHalf, 726 Endian == llvm::endianness::little); 727 } 728 case ARM::fixup_thumb_adr_pcrel_10: 729 case ARM::fixup_arm_thumb_cp: 730 // On CPUs supporting Thumb2, this will be relaxed to an ldr.w, otherwise we 731 // could have an error on our hands. 732 assert(STI != nullptr); 733 if (!STI->hasFeature(ARM::FeatureThumb2) && IsResolved) { 734 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value); 735 if (FixupDiagnostic) { 736 Ctx.reportError(Fixup.getLoc(), FixupDiagnostic); 737 return 0; 738 } 739 } 740 // Offset by 4, and don't encode the low two bits. 741 return ((Value - 4) >> 2) & 0xff; 742 case ARM::fixup_arm_thumb_cb: { 743 // CB instructions can only branch to offsets in [4, 126] in multiples of 2 744 // so ensure that the raw value LSB is zero and it lies in [2, 130]. 745 // An offset of 2 will be relaxed to a NOP. 746 if ((int64_t)Value < 2 || Value > 0x82 || Value & 1) { 747 Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value"); 748 return 0; 749 } 750 // Offset by 4 and don't encode the lower bit, which is always 0. 751 // FIXME: diagnose if no Thumb2 752 uint32_t Binary = (Value - 4) >> 1; 753 return ((Binary & 0x20) << 4) | ((Binary & 0x1f) << 3); 754 } 755 case ARM::fixup_arm_thumb_br: 756 // Offset by 4 and don't encode the lower bit, which is always 0. 757 assert(STI != nullptr); 758 if (!STI->hasFeature(ARM::FeatureThumb2) && 759 !STI->hasFeature(ARM::HasV8MBaselineOps)) { 760 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value); 761 if (FixupDiagnostic) { 762 Ctx.reportError(Fixup.getLoc(), FixupDiagnostic); 763 return 0; 764 } 765 } 766 return ((Value - 4) >> 1) & 0x7ff; 767 case ARM::fixup_arm_thumb_bcc: 768 // Offset by 4 and don't encode the lower bit, which is always 0. 769 assert(STI != nullptr); 770 if (!STI->hasFeature(ARM::FeatureThumb2)) { 771 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value); 772 if (FixupDiagnostic) { 773 Ctx.reportError(Fixup.getLoc(), FixupDiagnostic); 774 return 0; 775 } 776 } 777 return ((Value - 4) >> 1) & 0xff; 778 case ARM::fixup_arm_pcrel_10_unscaled: { 779 Value = Value - 8; // ARM fixups offset by an additional word and don't 780 // need to adjust for the half-word ordering. 781 bool isAdd = true; 782 if ((int64_t)Value < 0) { 783 Value = -Value; 784 isAdd = false; 785 } 786 // The value has the low 4 bits encoded in [3:0] and the high 4 in [11:8]. 787 if (Value >= 256) { 788 Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value"); 789 return 0; 790 } 791 Value = (Value & 0xf) | ((Value & 0xf0) << 4); 792 return Value | (isAdd << 23); 793 } 794 case ARM::fixup_arm_pcrel_10: 795 Value = Value - 4; // ARM fixups offset by an additional word and don't 796 // need to adjust for the half-word ordering. 797 [[fallthrough]]; 798 case ARM::fixup_t2_pcrel_10: { 799 // Offset by 4, adjusted by two due to the half-word ordering of thumb. 800 Value = Value - 4; 801 bool isAdd = true; 802 if ((int64_t)Value < 0) { 803 Value = -Value; 804 isAdd = false; 805 } 806 // These values don't encode the low two bits since they're always zero. 807 Value >>= 2; 808 if (Value >= 256) { 809 Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value"); 810 return 0; 811 } 812 Value |= isAdd << 23; 813 814 // Same addressing mode as fixup_arm_pcrel_10, but with 16-bit halfwords 815 // swapped. 816 if (Kind == ARM::fixup_t2_pcrel_10) 817 return swapHalfWords(Value, Endian == llvm::endianness::little); 818 819 return Value; 820 } 821 case ARM::fixup_arm_pcrel_9: 822 Value = Value - 4; // ARM fixups offset by an additional word and don't 823 // need to adjust for the half-word ordering. 824 [[fallthrough]]; 825 case ARM::fixup_t2_pcrel_9: { 826 // Offset by 4, adjusted by two due to the half-word ordering of thumb. 827 Value = Value - 4; 828 bool isAdd = true; 829 if ((int64_t)Value < 0) { 830 Value = -Value; 831 isAdd = false; 832 } 833 // These values don't encode the low bit since it's always zero. 834 if (Value & 1) { 835 Ctx.reportError(Fixup.getLoc(), "invalid value for this fixup"); 836 return 0; 837 } 838 Value >>= 1; 839 if (Value >= 256) { 840 Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value"); 841 return 0; 842 } 843 Value |= isAdd << 23; 844 845 // Same addressing mode as fixup_arm_pcrel_9, but with 16-bit halfwords 846 // swapped. 847 if (Kind == ARM::fixup_t2_pcrel_9) 848 return swapHalfWords(Value, Endian == llvm::endianness::little); 849 850 return Value; 851 } 852 case ARM::fixup_arm_mod_imm: 853 Value = ARM_AM::getSOImmVal(Value); 854 if (Value >> 12) { 855 Ctx.reportError(Fixup.getLoc(), "out of range immediate fixup value"); 856 return 0; 857 } 858 return Value; 859 case ARM::fixup_t2_so_imm: { 860 Value = ARM_AM::getT2SOImmVal(Value); 861 if ((int64_t)Value < 0) { 862 Ctx.reportError(Fixup.getLoc(), "out of range immediate fixup value"); 863 return 0; 864 } 865 // Value will contain a 12-bit value broken up into a 4-bit shift in bits 866 // 11:8 and the 8-bit immediate in 0:7. The instruction has the immediate 867 // in 0:7. The 4-bit shift is split up into i:imm3 where i is placed at bit 868 // 10 of the upper half-word and imm3 is placed at 14:12 of the lower 869 // half-word. 870 uint64_t EncValue = 0; 871 EncValue |= (Value & 0x800) << 15; 872 EncValue |= (Value & 0x700) << 4; 873 EncValue |= (Value & 0xff); 874 return swapHalfWords(EncValue, Endian == llvm::endianness::little); 875 } 876 case ARM::fixup_bf_branch: { 877 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value); 878 if (FixupDiagnostic) { 879 Ctx.reportError(Fixup.getLoc(), FixupDiagnostic); 880 return 0; 881 } 882 uint32_t out = (((Value - 4) >> 1) & 0xf) << 23; 883 return swapHalfWords(out, Endian == llvm::endianness::little); 884 } 885 case ARM::fixup_bf_target: 886 case ARM::fixup_bfl_target: 887 case ARM::fixup_bfc_target: { 888 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value); 889 if (FixupDiagnostic) { 890 Ctx.reportError(Fixup.getLoc(), FixupDiagnostic); 891 return 0; 892 } 893 uint32_t out = 0; 894 uint32_t HighBitMask = (Kind == ARM::fixup_bf_target ? 0xf800 : 895 Kind == ARM::fixup_bfl_target ? 0x3f800 : 0x800); 896 out |= (((Value - 4) >> 1) & 0x1) << 11; 897 out |= (((Value - 4) >> 1) & 0x7fe); 898 out |= (((Value - 4) >> 1) & HighBitMask) << 5; 899 return swapHalfWords(out, Endian == llvm::endianness::little); 900 } 901 case ARM::fixup_bfcsel_else_target: { 902 // If this is a fixup of a branch future's else target then it should be a 903 // constant MCExpr representing the distance between the branch targetted 904 // and the instruction after that same branch. 905 Value = Target.getConstant(); 906 907 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value); 908 if (FixupDiagnostic) { 909 Ctx.reportError(Fixup.getLoc(), FixupDiagnostic); 910 return 0; 911 } 912 uint32_t out = ((Value >> 2) & 1) << 17; 913 return swapHalfWords(out, Endian == llvm::endianness::little); 914 } 915 case ARM::fixup_wls: 916 case ARM::fixup_le: { 917 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value); 918 if (FixupDiagnostic) { 919 Ctx.reportError(Fixup.getLoc(), FixupDiagnostic); 920 return 0; 921 } 922 uint64_t real_value = Value - 4; 923 uint32_t out = 0; 924 if (Kind == ARM::fixup_le) 925 real_value = -real_value; 926 out |= ((real_value >> 1) & 0x1) << 11; 927 out |= ((real_value >> 1) & 0x7fe); 928 return swapHalfWords(out, Endian == llvm::endianness::little); 929 } 930 } 931 } 932 933 bool ARMAsmBackend::shouldForceRelocation(const MCFixup &Fixup, 934 const MCValue &Target) { 935 const MCSymbol *Sym = Target.getAddSym(); 936 const unsigned FixupKind = Fixup.getKind(); 937 if (FixupKind == ARM::fixup_arm_thumb_bl) { 938 assert(Sym && "How did we resolve this?"); 939 940 // If the symbol is external the linker will handle it. 941 // FIXME: Should we handle it as an optimization? 942 943 // If the symbol is out of range, produce a relocation and hope the 944 // linker can handle it. GNU AS produces an error in this case. 945 if (Sym->isExternal()) 946 return true; 947 } 948 // Create relocations for unconditional branches to function symbols with 949 // different execution mode in ELF binaries. 950 if (needsInterworking(*Asm, Sym, Fixup.getKind())) 951 return true; 952 // We must always generate a relocation for BL/BLX instructions if we have 953 // a symbol to reference, as the linker relies on knowing the destination 954 // symbol's thumb-ness to get interworking right. 955 if (Sym && (FixupKind == ARM::fixup_arm_thumb_blx || 956 FixupKind == ARM::fixup_arm_blx || 957 FixupKind == ARM::fixup_arm_uncondbl || 958 FixupKind == ARM::fixup_arm_condbl)) 959 return true; 960 return Target.getSpecifier(); 961 } 962 963 /// getFixupKindNumBytes - The number of bytes the fixup may change. 964 static unsigned getFixupKindNumBytes(unsigned Kind) { 965 switch (Kind) { 966 default: 967 llvm_unreachable("Unknown fixup kind!"); 968 969 case FK_Data_1: 970 case ARM::fixup_arm_thumb_bcc: 971 case ARM::fixup_arm_thumb_cp: 972 case ARM::fixup_thumb_adr_pcrel_10: 973 case ARM::fixup_arm_thumb_upper_8_15: 974 case ARM::fixup_arm_thumb_upper_0_7: 975 case ARM::fixup_arm_thumb_lower_8_15: 976 case ARM::fixup_arm_thumb_lower_0_7: 977 return 1; 978 979 case FK_Data_2: 980 case ARM::fixup_arm_thumb_br: 981 case ARM::fixup_arm_thumb_cb: 982 case ARM::fixup_arm_mod_imm: 983 return 2; 984 985 case ARM::fixup_arm_pcrel_10_unscaled: 986 case ARM::fixup_arm_ldst_pcrel_12: 987 case ARM::fixup_arm_pcrel_10: 988 case ARM::fixup_arm_pcrel_9: 989 case ARM::fixup_arm_ldst_abs_12: 990 case ARM::fixup_arm_adr_pcrel_12: 991 case ARM::fixup_arm_uncondbl: 992 case ARM::fixup_arm_condbl: 993 case ARM::fixup_arm_blx: 994 case ARM::fixup_arm_condbranch: 995 case ARM::fixup_arm_uncondbranch: 996 return 3; 997 998 case FK_Data_4: 999 case ARM::fixup_t2_ldst_pcrel_12: 1000 case ARM::fixup_t2_condbranch: 1001 case ARM::fixup_t2_uncondbranch: 1002 case ARM::fixup_t2_pcrel_10: 1003 case ARM::fixup_t2_pcrel_9: 1004 case ARM::fixup_t2_adr_pcrel_12: 1005 case ARM::fixup_arm_thumb_bl: 1006 case ARM::fixup_arm_thumb_blx: 1007 case ARM::fixup_arm_movt_hi16: 1008 case ARM::fixup_arm_movw_lo16: 1009 case ARM::fixup_t2_movt_hi16: 1010 case ARM::fixup_t2_movw_lo16: 1011 case ARM::fixup_t2_so_imm: 1012 case ARM::fixup_bf_branch: 1013 case ARM::fixup_bf_target: 1014 case ARM::fixup_bfl_target: 1015 case ARM::fixup_bfc_target: 1016 case ARM::fixup_bfcsel_else_target: 1017 case ARM::fixup_wls: 1018 case ARM::fixup_le: 1019 return 4; 1020 1021 case FK_SecRel_2: 1022 return 2; 1023 case FK_SecRel_4: 1024 return 4; 1025 } 1026 } 1027 1028 /// getFixupKindContainerSizeBytes - The number of bytes of the 1029 /// container involved in big endian. 1030 static unsigned getFixupKindContainerSizeBytes(unsigned Kind) { 1031 switch (Kind) { 1032 default: 1033 llvm_unreachable("Unknown fixup kind!"); 1034 1035 case FK_Data_1: 1036 return 1; 1037 case FK_Data_2: 1038 return 2; 1039 case FK_Data_4: 1040 return 4; 1041 1042 case ARM::fixup_arm_thumb_bcc: 1043 case ARM::fixup_arm_thumb_cp: 1044 case ARM::fixup_thumb_adr_pcrel_10: 1045 case ARM::fixup_arm_thumb_br: 1046 case ARM::fixup_arm_thumb_cb: 1047 case ARM::fixup_arm_thumb_upper_8_15: 1048 case ARM::fixup_arm_thumb_upper_0_7: 1049 case ARM::fixup_arm_thumb_lower_8_15: 1050 case ARM::fixup_arm_thumb_lower_0_7: 1051 // Instruction size is 2 bytes. 1052 return 2; 1053 1054 case ARM::fixup_arm_pcrel_10_unscaled: 1055 case ARM::fixup_arm_ldst_pcrel_12: 1056 case ARM::fixup_arm_pcrel_10: 1057 case ARM::fixup_arm_pcrel_9: 1058 case ARM::fixup_arm_adr_pcrel_12: 1059 case ARM::fixup_arm_uncondbl: 1060 case ARM::fixup_arm_condbl: 1061 case ARM::fixup_arm_blx: 1062 case ARM::fixup_arm_condbranch: 1063 case ARM::fixup_arm_uncondbranch: 1064 case ARM::fixup_t2_ldst_pcrel_12: 1065 case ARM::fixup_t2_condbranch: 1066 case ARM::fixup_t2_uncondbranch: 1067 case ARM::fixup_t2_pcrel_10: 1068 case ARM::fixup_t2_pcrel_9: 1069 case ARM::fixup_t2_adr_pcrel_12: 1070 case ARM::fixup_arm_thumb_bl: 1071 case ARM::fixup_arm_thumb_blx: 1072 case ARM::fixup_arm_movt_hi16: 1073 case ARM::fixup_arm_movw_lo16: 1074 case ARM::fixup_t2_movt_hi16: 1075 case ARM::fixup_t2_movw_lo16: 1076 case ARM::fixup_arm_mod_imm: 1077 case ARM::fixup_t2_so_imm: 1078 case ARM::fixup_bf_branch: 1079 case ARM::fixup_bf_target: 1080 case ARM::fixup_bfl_target: 1081 case ARM::fixup_bfc_target: 1082 case ARM::fixup_bfcsel_else_target: 1083 case ARM::fixup_wls: 1084 case ARM::fixup_le: 1085 // Instruction size is 4 bytes. 1086 return 4; 1087 } 1088 } 1089 1090 std::optional<bool> ARMAsmBackend::evaluateFixup(const MCFragment &F, 1091 MCFixup &Fixup, MCValue &, 1092 uint64_t &Value) { 1093 // For a few PC-relative fixups in Thumb mode, offsets need to be aligned 1094 // down. We compensate here because the default handler's `Value` decrement 1095 // doesn't account for this alignment. 1096 switch (Fixup.getKind()) { 1097 case ARM::fixup_t2_ldst_pcrel_12: 1098 case ARM::fixup_t2_pcrel_10: 1099 case ARM::fixup_t2_pcrel_9: 1100 case ARM::fixup_thumb_adr_pcrel_10: 1101 case ARM::fixup_t2_adr_pcrel_12: 1102 case ARM::fixup_arm_thumb_blx: 1103 case ARM::fixup_arm_thumb_cp: 1104 Value = (Asm->getFragmentOffset(F) + Fixup.getOffset()) % 4; 1105 } 1106 return {}; 1107 } 1108 1109 void ARMAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup, 1110 const MCValue &Target, 1111 MutableArrayRef<char> Data, uint64_t Value, 1112 bool IsResolved) { 1113 if (IsResolved && shouldForceRelocation(Fixup, Target)) 1114 IsResolved = false; 1115 maybeAddReloc(F, Fixup, Target, Value, IsResolved); 1116 auto Kind = Fixup.getKind(); 1117 if (mc::isRelocation(Kind)) 1118 return; 1119 MCContext &Ctx = getContext(); 1120 Value = adjustFixupValue(*Asm, Fixup, Target, Value, IsResolved, Ctx, 1121 getSubtargetInfo(F)); 1122 if (!Value) 1123 return; // Doesn't change encoding. 1124 const unsigned NumBytes = getFixupKindNumBytes(Kind); 1125 1126 unsigned Offset = Fixup.getOffset(); 1127 assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!"); 1128 1129 // Used to point to big endian bytes. 1130 unsigned FullSizeBytes; 1131 if (Endian == llvm::endianness::big) { 1132 FullSizeBytes = getFixupKindContainerSizeBytes(Kind); 1133 assert((Offset + FullSizeBytes) <= Data.size() && "Invalid fixup size!"); 1134 assert(NumBytes <= FullSizeBytes && "Invalid fixup size!"); 1135 } 1136 1137 // For each byte of the fragment that the fixup touches, mask in the bits from 1138 // the fixup value. The Value has been "split up" into the appropriate 1139 // bitfields above. 1140 for (unsigned i = 0; i != NumBytes; ++i) { 1141 unsigned Idx = 1142 Endian == llvm::endianness::little ? i : (FullSizeBytes - 1 - i); 1143 Data[Offset + Idx] |= uint8_t((Value >> (i * 8)) & 0xff); 1144 } 1145 } 1146 1147 namespace CU { 1148 1149 /// Compact unwind encoding values. 1150 enum CompactUnwindEncodings { 1151 UNWIND_ARM_MODE_MASK = 0x0F000000, 1152 UNWIND_ARM_MODE_FRAME = 0x01000000, 1153 UNWIND_ARM_MODE_FRAME_D = 0x02000000, 1154 UNWIND_ARM_MODE_DWARF = 0x04000000, 1155 1156 UNWIND_ARM_FRAME_STACK_ADJUST_MASK = 0x00C00000, 1157 1158 UNWIND_ARM_FRAME_FIRST_PUSH_R4 = 0x00000001, 1159 UNWIND_ARM_FRAME_FIRST_PUSH_R5 = 0x00000002, 1160 UNWIND_ARM_FRAME_FIRST_PUSH_R6 = 0x00000004, 1161 1162 UNWIND_ARM_FRAME_SECOND_PUSH_R8 = 0x00000008, 1163 UNWIND_ARM_FRAME_SECOND_PUSH_R9 = 0x00000010, 1164 UNWIND_ARM_FRAME_SECOND_PUSH_R10 = 0x00000020, 1165 UNWIND_ARM_FRAME_SECOND_PUSH_R11 = 0x00000040, 1166 UNWIND_ARM_FRAME_SECOND_PUSH_R12 = 0x00000080, 1167 1168 UNWIND_ARM_FRAME_D_REG_COUNT_MASK = 0x00000F00, 1169 1170 UNWIND_ARM_DWARF_SECTION_OFFSET = 0x00FFFFFF 1171 }; 1172 1173 } // end CU namespace 1174 1175 /// Generate compact unwind encoding for the function based on the CFI 1176 /// instructions. If the CFI instructions describe a frame that cannot be 1177 /// encoded in compact unwind, the method returns UNWIND_ARM_MODE_DWARF which 1178 /// tells the runtime to fallback and unwind using dwarf. 1179 uint64_t ARMAsmBackendDarwin::generateCompactUnwindEncoding( 1180 const MCDwarfFrameInfo *FI, const MCContext *Ctxt) const { 1181 DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() << "generateCU()\n"); 1182 // Only armv7k uses CFI based unwinding. 1183 if (Subtype != MachO::CPU_SUBTYPE_ARM_V7K) 1184 return 0; 1185 // No .cfi directives means no frame. 1186 ArrayRef<MCCFIInstruction> Instrs = FI->Instructions; 1187 if (Instrs.empty()) 1188 return 0; 1189 if (!isDarwinCanonicalPersonality(FI->Personality) && 1190 !Ctxt->emitCompactUnwindNonCanonical()) 1191 return CU::UNWIND_ARM_MODE_DWARF; 1192 1193 // Start off assuming CFA is at SP+0. 1194 MCRegister CFARegister = ARM::SP; 1195 int CFARegisterOffset = 0; 1196 // Mark savable registers as initially unsaved 1197 DenseMap<MCRegister, int> RegOffsets; 1198 int FloatRegCount = 0; 1199 // Process each .cfi directive and build up compact unwind info. 1200 for (const MCCFIInstruction &Inst : Instrs) { 1201 MCRegister Reg; 1202 switch (Inst.getOperation()) { 1203 case MCCFIInstruction::OpDefCfa: // DW_CFA_def_cfa 1204 CFARegisterOffset = Inst.getOffset(); 1205 CFARegister = *MRI.getLLVMRegNum(Inst.getRegister(), true); 1206 break; 1207 case MCCFIInstruction::OpDefCfaOffset: // DW_CFA_def_cfa_offset 1208 CFARegisterOffset = Inst.getOffset(); 1209 break; 1210 case MCCFIInstruction::OpDefCfaRegister: // DW_CFA_def_cfa_register 1211 CFARegister = *MRI.getLLVMRegNum(Inst.getRegister(), true); 1212 break; 1213 case MCCFIInstruction::OpOffset: // DW_CFA_offset 1214 Reg = *MRI.getLLVMRegNum(Inst.getRegister(), true); 1215 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 1216 RegOffsets[Reg] = Inst.getOffset(); 1217 else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) { 1218 RegOffsets[Reg] = Inst.getOffset(); 1219 ++FloatRegCount; 1220 } else { 1221 DEBUG_WITH_TYPE("compact-unwind", 1222 llvm::dbgs() << ".cfi_offset on unknown register=" 1223 << Inst.getRegister() << "\n"); 1224 return CU::UNWIND_ARM_MODE_DWARF; 1225 } 1226 break; 1227 case MCCFIInstruction::OpRelOffset: // DW_CFA_advance_loc 1228 // Ignore 1229 break; 1230 default: 1231 // Directive not convertable to compact unwind, bail out. 1232 DEBUG_WITH_TYPE("compact-unwind", 1233 llvm::dbgs() 1234 << "CFI directive not compatible with compact " 1235 "unwind encoding, opcode=" 1236 << uint8_t(Inst.getOperation()) << "\n"); 1237 return CU::UNWIND_ARM_MODE_DWARF; 1238 break; 1239 } 1240 } 1241 1242 // If no frame set up, return no unwind info. 1243 if ((CFARegister == ARM::SP) && (CFARegisterOffset == 0)) 1244 return 0; 1245 1246 // Verify standard frame (lr/r7) was used. 1247 if (CFARegister != ARM::R7) { 1248 DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() << "frame register is " 1249 << CFARegister 1250 << " instead of r7\n"); 1251 return CU::UNWIND_ARM_MODE_DWARF; 1252 } 1253 int StackAdjust = CFARegisterOffset - 8; 1254 if (RegOffsets.lookup(ARM::LR) != (-4 - StackAdjust)) { 1255 DEBUG_WITH_TYPE( 1256 "compact-unwind", 1257 llvm::dbgs() << "LR not saved as standard frame, StackAdjust=" 1258 << StackAdjust 1259 << ", CFARegisterOffset=" << CFARegisterOffset 1260 << ", lr save at offset=" << RegOffsets[ARM::LR] << "\n"); 1261 return CU::UNWIND_ARM_MODE_DWARF; 1262 } 1263 if (RegOffsets.lookup(ARM::R7) != (-8 - StackAdjust)) { 1264 DEBUG_WITH_TYPE("compact-unwind", 1265 llvm::dbgs() << "r7 not saved as standard frame\n"); 1266 return CU::UNWIND_ARM_MODE_DWARF; 1267 } 1268 uint32_t CompactUnwindEncoding = CU::UNWIND_ARM_MODE_FRAME; 1269 1270 // If var-args are used, there may be a stack adjust required. 1271 switch (StackAdjust) { 1272 case 0: 1273 break; 1274 case 4: 1275 CompactUnwindEncoding |= 0x00400000; 1276 break; 1277 case 8: 1278 CompactUnwindEncoding |= 0x00800000; 1279 break; 1280 case 12: 1281 CompactUnwindEncoding |= 0x00C00000; 1282 break; 1283 default: 1284 DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() 1285 << ".cfi_def_cfa stack adjust (" 1286 << StackAdjust << ") out of range\n"); 1287 return CU::UNWIND_ARM_MODE_DWARF; 1288 } 1289 1290 // If r6 is saved, it must be right below r7. 1291 static struct { 1292 unsigned Reg; 1293 unsigned Encoding; 1294 } GPRCSRegs[] = {{ARM::R6, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R6}, 1295 {ARM::R5, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R5}, 1296 {ARM::R4, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R4}, 1297 {ARM::R12, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R12}, 1298 {ARM::R11, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R11}, 1299 {ARM::R10, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R10}, 1300 {ARM::R9, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R9}, 1301 {ARM::R8, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R8}}; 1302 1303 int CurOffset = -8 - StackAdjust; 1304 for (auto CSReg : GPRCSRegs) { 1305 auto Offset = RegOffsets.find(CSReg.Reg); 1306 if (Offset == RegOffsets.end()) 1307 continue; 1308 1309 int RegOffset = Offset->second; 1310 if (RegOffset != CurOffset - 4) { 1311 DEBUG_WITH_TYPE("compact-unwind", 1312 llvm::dbgs() << MRI.getName(CSReg.Reg) << " saved at " 1313 << RegOffset << " but only supported at " 1314 << CurOffset << "\n"); 1315 return CU::UNWIND_ARM_MODE_DWARF; 1316 } 1317 CompactUnwindEncoding |= CSReg.Encoding; 1318 CurOffset -= 4; 1319 } 1320 1321 // If no floats saved, we are done. 1322 if (FloatRegCount == 0) 1323 return CompactUnwindEncoding; 1324 1325 // Switch mode to include D register saving. 1326 CompactUnwindEncoding &= ~CU::UNWIND_ARM_MODE_MASK; 1327 CompactUnwindEncoding |= CU::UNWIND_ARM_MODE_FRAME_D; 1328 1329 // FIXME: supporting more than 4 saved D-registers compactly would be trivial, 1330 // but needs coordination with the linker and libunwind. 1331 if (FloatRegCount > 4) { 1332 DEBUG_WITH_TYPE("compact-unwind", 1333 llvm::dbgs() << "unsupported number of D registers saved (" 1334 << FloatRegCount << ")\n"); 1335 return CU::UNWIND_ARM_MODE_DWARF; 1336 } 1337 1338 // Floating point registers must either be saved sequentially, or we defer to 1339 // DWARF. No gaps allowed here so check that each saved d-register is 1340 // precisely where it should be. 1341 static MCPhysReg FPRCSRegs[] = {ARM::D8, ARM::D10, ARM::D12, ARM::D14}; 1342 for (int Idx = FloatRegCount - 1; Idx >= 0; --Idx) { 1343 auto Offset = RegOffsets.find(FPRCSRegs[Idx]); 1344 if (Offset == RegOffsets.end()) { 1345 DEBUG_WITH_TYPE("compact-unwind", 1346 llvm::dbgs() << FloatRegCount << " D-regs saved, but " 1347 << MRI.getName(FPRCSRegs[Idx]) 1348 << " not saved\n"); 1349 return CU::UNWIND_ARM_MODE_DWARF; 1350 } else if (Offset->second != CurOffset - 8) { 1351 DEBUG_WITH_TYPE("compact-unwind", 1352 llvm::dbgs() << FloatRegCount << " D-regs saved, but " 1353 << MRI.getName(FPRCSRegs[Idx]) 1354 << " saved at " << Offset->second 1355 << ", expected at " << CurOffset - 8 1356 << "\n"); 1357 return CU::UNWIND_ARM_MODE_DWARF; 1358 } 1359 CurOffset -= 8; 1360 } 1361 1362 return CompactUnwindEncoding | ((FloatRegCount - 1) << 8); 1363 } 1364 1365 static MCAsmBackend *createARMAsmBackend(const Target &T, 1366 const MCSubtargetInfo &STI, 1367 const MCRegisterInfo &MRI, 1368 const MCTargetOptions &Options, 1369 llvm::endianness Endian) { 1370 const Triple &TheTriple = STI.getTargetTriple(); 1371 switch (TheTriple.getObjectFormat()) { 1372 default: 1373 llvm_unreachable("unsupported object format"); 1374 case Triple::MachO: 1375 return new ARMAsmBackendDarwin(T, STI, MRI); 1376 case Triple::COFF: 1377 assert(TheTriple.isOSWindows() && "non-Windows ARM COFF is not supported"); 1378 return new ARMAsmBackendWinCOFF(T); 1379 case Triple::ELF: 1380 assert(TheTriple.isOSBinFormatELF() && "using ELF for non-ELF target"); 1381 uint8_t OSABI = Options.FDPIC 1382 ? static_cast<uint8_t>(ELF::ELFOSABI_ARM_FDPIC) 1383 : MCELFObjectTargetWriter::getOSABI(TheTriple.getOS()); 1384 return new ARMAsmBackendELF(T, OSABI, Endian); 1385 } 1386 } 1387 1388 MCAsmBackend *llvm::createARMLEAsmBackend(const Target &T, 1389 const MCSubtargetInfo &STI, 1390 const MCRegisterInfo &MRI, 1391 const MCTargetOptions &Options) { 1392 return createARMAsmBackend(T, STI, MRI, Options, llvm::endianness::little); 1393 } 1394 1395 MCAsmBackend *llvm::createARMBEAsmBackend(const Target &T, 1396 const MCSubtargetInfo &STI, 1397 const MCRegisterInfo &MRI, 1398 const MCTargetOptions &Options) { 1399 return createARMAsmBackend(T, STI, MRI, Options, llvm::endianness::big); 1400 } 1401