1 //===-- ARMAsmBackend.cpp - ARM Assembler Backend -------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "MCTargetDesc/ARMAsmBackend.h" 10 #include "MCTargetDesc/ARMAddressingModes.h" 11 #include "MCTargetDesc/ARMAsmBackendDarwin.h" 12 #include "MCTargetDesc/ARMAsmBackendELF.h" 13 #include "MCTargetDesc/ARMAsmBackendWinCOFF.h" 14 #include "MCTargetDesc/ARMFixupKinds.h" 15 #include "MCTargetDesc/ARMMCTargetDesc.h" 16 #include "llvm/ADT/StringSwitch.h" 17 #include "llvm/BinaryFormat/ELF.h" 18 #include "llvm/BinaryFormat/MachO.h" 19 #include "llvm/MC/MCAsmBackend.h" 20 #include "llvm/MC/MCAsmLayout.h" 21 #include "llvm/MC/MCAssembler.h" 22 #include "llvm/MC/MCContext.h" 23 #include "llvm/MC/MCDirectives.h" 24 #include "llvm/MC/MCELFObjectWriter.h" 25 #include "llvm/MC/MCExpr.h" 26 #include "llvm/MC/MCFixupKindInfo.h" 27 #include "llvm/MC/MCObjectWriter.h" 28 #include "llvm/MC/MCRegisterInfo.h" 29 #include "llvm/MC/MCSectionELF.h" 30 #include "llvm/MC/MCSectionMachO.h" 31 #include "llvm/MC/MCSubtargetInfo.h" 32 #include "llvm/MC/MCValue.h" 33 #include "llvm/Support/Debug.h" 34 #include "llvm/Support/EndianStream.h" 35 #include "llvm/Support/ErrorHandling.h" 36 #include "llvm/Support/Format.h" 37 #include "llvm/Support/raw_ostream.h" 38 using namespace llvm; 39 40 namespace { 41 class ARMELFObjectWriter : public MCELFObjectTargetWriter { 42 public: 43 ARMELFObjectWriter(uint8_t OSABI) 44 : MCELFObjectTargetWriter(/*Is64Bit*/ false, OSABI, ELF::EM_ARM, 45 /*HasRelocationAddend*/ false) {} 46 }; 47 } // end anonymous namespace 48 49 std::optional<MCFixupKind> ARMAsmBackend::getFixupKind(StringRef Name) const { 50 return std::nullopt; 51 } 52 53 std::optional<MCFixupKind> 54 ARMAsmBackendELF::getFixupKind(StringRef Name) const { 55 unsigned Type = llvm::StringSwitch<unsigned>(Name) 56 #define ELF_RELOC(X, Y) .Case(#X, Y) 57 #include "llvm/BinaryFormat/ELFRelocs/ARM.def" 58 #undef ELF_RELOC 59 .Case("BFD_RELOC_NONE", ELF::R_ARM_NONE) 60 .Case("BFD_RELOC_8", ELF::R_ARM_ABS8) 61 .Case("BFD_RELOC_16", ELF::R_ARM_ABS16) 62 .Case("BFD_RELOC_32", ELF::R_ARM_ABS32) 63 .Default(-1u); 64 if (Type == -1u) 65 return std::nullopt; 66 return static_cast<MCFixupKind>(FirstLiteralRelocationKind + Type); 67 } 68 69 const MCFixupKindInfo &ARMAsmBackend::getFixupKindInfo(MCFixupKind Kind) const { 70 unsigned IsPCRelConstant = 71 MCFixupKindInfo::FKF_IsPCRel | MCFixupKindInfo::FKF_Constant; 72 const static MCFixupKindInfo InfosLE[ARM::NumTargetFixupKinds] = { 73 // This table *must* be in the order that the fixup_* kinds are defined in 74 // ARMFixupKinds.h. 75 // 76 // Name Offset (bits) Size (bits) Flags 77 {"fixup_arm_ldst_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 78 {"fixup_t2_ldst_pcrel_12", 0, 32, 79 MCFixupKindInfo::FKF_IsPCRel | 80 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 81 {"fixup_arm_pcrel_10_unscaled", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 82 {"fixup_arm_pcrel_10", 0, 32, IsPCRelConstant}, 83 {"fixup_t2_pcrel_10", 0, 32, 84 MCFixupKindInfo::FKF_IsPCRel | 85 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 86 {"fixup_arm_pcrel_9", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 87 {"fixup_t2_pcrel_9", 0, 32, 88 IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 89 {"fixup_arm_ldst_abs_12", 0, 32, 0}, 90 {"fixup_thumb_adr_pcrel_10", 0, 8, 91 MCFixupKindInfo::FKF_IsPCRel | 92 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 93 {"fixup_arm_adr_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 94 {"fixup_t2_adr_pcrel_12", 0, 32, 95 MCFixupKindInfo::FKF_IsPCRel | 96 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 97 {"fixup_arm_condbranch", 0, 24, MCFixupKindInfo::FKF_IsPCRel}, 98 {"fixup_arm_uncondbranch", 0, 24, MCFixupKindInfo::FKF_IsPCRel}, 99 {"fixup_t2_condbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 100 {"fixup_t2_uncondbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 101 {"fixup_arm_thumb_br", 0, 16, MCFixupKindInfo::FKF_IsPCRel}, 102 {"fixup_arm_uncondbl", 0, 24, MCFixupKindInfo::FKF_IsPCRel}, 103 {"fixup_arm_condbl", 0, 24, MCFixupKindInfo::FKF_IsPCRel}, 104 {"fixup_arm_blx", 0, 24, MCFixupKindInfo::FKF_IsPCRel}, 105 {"fixup_arm_thumb_bl", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 106 {"fixup_arm_thumb_blx", 0, 32, 107 MCFixupKindInfo::FKF_IsPCRel | 108 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 109 {"fixup_arm_thumb_cb", 0, 16, MCFixupKindInfo::FKF_IsPCRel}, 110 {"fixup_arm_thumb_cp", 0, 8, 111 MCFixupKindInfo::FKF_IsPCRel | 112 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 113 {"fixup_arm_thumb_bcc", 0, 8, MCFixupKindInfo::FKF_IsPCRel}, 114 // movw / movt: 16-bits immediate but scattered into two chunks 0 - 12, 16 115 // - 19. 116 {"fixup_arm_movt_hi16", 0, 20, 0}, 117 {"fixup_arm_movw_lo16", 0, 20, 0}, 118 {"fixup_t2_movt_hi16", 0, 20, 0}, 119 {"fixup_t2_movw_lo16", 0, 20, 0}, 120 {"fixup_arm_thumb_upper_8_15", 0, 8, 0}, 121 {"fixup_arm_thumb_upper_0_7", 0, 8, 0}, 122 {"fixup_arm_thumb_lower_8_15", 0, 8, 0}, 123 {"fixup_arm_thumb_lower_0_7", 0, 8, 0}, 124 {"fixup_arm_mod_imm", 0, 12, 0}, 125 {"fixup_t2_so_imm", 0, 26, 0}, 126 {"fixup_bf_branch", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 127 {"fixup_bf_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 128 {"fixup_bfl_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 129 {"fixup_bfc_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 130 {"fixup_bfcsel_else_target", 0, 32, 0}, 131 {"fixup_wls", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 132 {"fixup_le", 0, 32, MCFixupKindInfo::FKF_IsPCRel}}; 133 const static MCFixupKindInfo InfosBE[ARM::NumTargetFixupKinds] = { 134 // This table *must* be in the order that the fixup_* kinds are defined in 135 // ARMFixupKinds.h. 136 // 137 // Name Offset (bits) Size (bits) Flags 138 {"fixup_arm_ldst_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 139 {"fixup_t2_ldst_pcrel_12", 0, 32, 140 MCFixupKindInfo::FKF_IsPCRel | 141 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 142 {"fixup_arm_pcrel_10_unscaled", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 143 {"fixup_arm_pcrel_10", 0, 32, IsPCRelConstant}, 144 {"fixup_t2_pcrel_10", 0, 32, 145 MCFixupKindInfo::FKF_IsPCRel | 146 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 147 {"fixup_arm_pcrel_9", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 148 {"fixup_t2_pcrel_9", 0, 32, 149 IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 150 {"fixup_arm_ldst_abs_12", 0, 32, 0}, 151 {"fixup_thumb_adr_pcrel_10", 8, 8, 152 IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 153 {"fixup_arm_adr_pcrel_12", 0, 32, IsPCRelConstant}, 154 {"fixup_t2_adr_pcrel_12", 0, 32, 155 IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 156 {"fixup_arm_condbranch", 8, 24, MCFixupKindInfo::FKF_IsPCRel}, 157 {"fixup_arm_uncondbranch", 8, 24, MCFixupKindInfo::FKF_IsPCRel}, 158 {"fixup_t2_condbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 159 {"fixup_t2_uncondbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 160 {"fixup_arm_thumb_br", 0, 16, MCFixupKindInfo::FKF_IsPCRel}, 161 {"fixup_arm_uncondbl", 8, 24, MCFixupKindInfo::FKF_IsPCRel}, 162 {"fixup_arm_condbl", 8, 24, MCFixupKindInfo::FKF_IsPCRel}, 163 {"fixup_arm_blx", 8, 24, MCFixupKindInfo::FKF_IsPCRel}, 164 {"fixup_arm_thumb_bl", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 165 {"fixup_arm_thumb_blx", 0, 32, 166 MCFixupKindInfo::FKF_IsPCRel | 167 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 168 {"fixup_arm_thumb_cb", 0, 16, MCFixupKindInfo::FKF_IsPCRel}, 169 {"fixup_arm_thumb_cp", 8, 8, 170 MCFixupKindInfo::FKF_IsPCRel | 171 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 172 {"fixup_arm_thumb_bcc", 8, 8, MCFixupKindInfo::FKF_IsPCRel}, 173 // movw / movt: 16-bits immediate but scattered into two chunks 0 - 12, 16 174 // - 19. 175 {"fixup_arm_movt_hi16", 12, 20, 0}, 176 {"fixup_arm_movw_lo16", 12, 20, 0}, 177 {"fixup_t2_movt_hi16", 12, 20, 0}, 178 {"fixup_t2_movw_lo16", 12, 20, 0}, 179 {"fixup_arm_thumb_upper_8_15", 24, 8, 0}, 180 {"fixup_arm_thumb_upper_0_7", 24, 8, 0}, 181 {"fixup_arm_thumb_lower_8_15", 24, 8, 0}, 182 {"fixup_arm_thumb_lower_0_7", 24, 8, 0}, 183 {"fixup_arm_mod_imm", 20, 12, 0}, 184 {"fixup_t2_so_imm", 26, 6, 0}, 185 {"fixup_bf_branch", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 186 {"fixup_bf_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 187 {"fixup_bfl_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 188 {"fixup_bfc_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 189 {"fixup_bfcsel_else_target", 0, 32, 0}, 190 {"fixup_wls", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 191 {"fixup_le", 0, 32, MCFixupKindInfo::FKF_IsPCRel}}; 192 193 // Fixup kinds from .reloc directive are like R_ARM_NONE. They do not require 194 // any extra processing. 195 if (Kind >= FirstLiteralRelocationKind) 196 return MCAsmBackend::getFixupKindInfo(FK_NONE); 197 198 if (Kind < FirstTargetFixupKind) 199 return MCAsmBackend::getFixupKindInfo(Kind); 200 201 assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() && 202 "Invalid kind!"); 203 return (Endian == llvm::endianness::little 204 ? InfosLE 205 : InfosBE)[Kind - FirstTargetFixupKind]; 206 } 207 208 void ARMAsmBackend::handleAssemblerFlag(MCAssemblerFlag Flag) { 209 switch (Flag) { 210 default: 211 break; 212 case MCAF_Code16: 213 setIsThumb(true); 214 break; 215 case MCAF_Code32: 216 setIsThumb(false); 217 break; 218 } 219 } 220 221 unsigned ARMAsmBackend::getRelaxedOpcode(unsigned Op, 222 const MCSubtargetInfo &STI) const { 223 bool HasThumb2 = STI.hasFeature(ARM::FeatureThumb2); 224 bool HasV8MBaselineOps = STI.hasFeature(ARM::HasV8MBaselineOps); 225 226 switch (Op) { 227 default: 228 return Op; 229 case ARM::tBcc: 230 return HasThumb2 ? (unsigned)ARM::t2Bcc : Op; 231 case ARM::tLDRpci: 232 return HasThumb2 ? (unsigned)ARM::t2LDRpci : Op; 233 case ARM::tADR: 234 return HasThumb2 ? (unsigned)ARM::t2ADR : Op; 235 case ARM::tB: 236 return HasV8MBaselineOps ? (unsigned)ARM::t2B : Op; 237 case ARM::tCBZ: 238 return ARM::tHINT; 239 case ARM::tCBNZ: 240 return ARM::tHINT; 241 } 242 } 243 244 bool ARMAsmBackend::mayNeedRelaxation(const MCInst &Inst, 245 const MCSubtargetInfo &STI) const { 246 if (getRelaxedOpcode(Inst.getOpcode(), STI) != Inst.getOpcode()) 247 return true; 248 return false; 249 } 250 251 static const char *checkPCRelOffset(uint64_t Value, int64_t Min, int64_t Max) { 252 int64_t Offset = int64_t(Value) - 4; 253 if (Offset < Min || Offset > Max) 254 return "out of range pc-relative fixup value"; 255 return nullptr; 256 } 257 258 const char *ARMAsmBackend::reasonForFixupRelaxation(const MCFixup &Fixup, 259 uint64_t Value) const { 260 switch (Fixup.getTargetKind()) { 261 case ARM::fixup_arm_thumb_br: { 262 // Relaxing tB to t2B. tB has a signed 12-bit displacement with the 263 // low bit being an implied zero. There's an implied +4 offset for the 264 // branch, so we adjust the other way here to determine what's 265 // encodable. 266 // 267 // Relax if the value is too big for a (signed) i8. 268 int64_t Offset = int64_t(Value) - 4; 269 if (Offset > 2046 || Offset < -2048) 270 return "out of range pc-relative fixup value"; 271 break; 272 } 273 case ARM::fixup_arm_thumb_bcc: { 274 // Relaxing tBcc to t2Bcc. tBcc has a signed 9-bit displacement with the 275 // low bit being an implied zero. There's an implied +4 offset for the 276 // branch, so we adjust the other way here to determine what's 277 // encodable. 278 // 279 // Relax if the value is too big for a (signed) i8. 280 int64_t Offset = int64_t(Value) - 4; 281 if (Offset > 254 || Offset < -256) 282 return "out of range pc-relative fixup value"; 283 break; 284 } 285 case ARM::fixup_thumb_adr_pcrel_10: 286 case ARM::fixup_arm_thumb_cp: { 287 // If the immediate is negative, greater than 1020, or not a multiple 288 // of four, the wide version of the instruction must be used. 289 int64_t Offset = int64_t(Value) - 4; 290 if (Offset & 3) 291 return "misaligned pc-relative fixup value"; 292 else if (Offset > 1020 || Offset < 0) 293 return "out of range pc-relative fixup value"; 294 break; 295 } 296 case ARM::fixup_arm_thumb_cb: { 297 // If we have a Thumb CBZ or CBNZ instruction and its target is the next 298 // instruction it is actually out of range for the instruction. 299 // It will be changed to a NOP. 300 int64_t Offset = (Value & ~1); 301 if (Offset == 2) 302 return "will be converted to nop"; 303 break; 304 } 305 case ARM::fixup_bf_branch: 306 return checkPCRelOffset(Value, 0, 30); 307 case ARM::fixup_bf_target: 308 return checkPCRelOffset(Value, -0x10000, +0xfffe); 309 case ARM::fixup_bfl_target: 310 return checkPCRelOffset(Value, -0x40000, +0x3fffe); 311 case ARM::fixup_bfc_target: 312 return checkPCRelOffset(Value, -0x1000, +0xffe); 313 case ARM::fixup_wls: 314 return checkPCRelOffset(Value, 0, +0xffe); 315 case ARM::fixup_le: 316 // The offset field in the LE and LETP instructions is an 11-bit 317 // value shifted left by 2 (i.e. 0,2,4,...,4094), and it is 318 // interpreted as a negative offset from the value read from pc, 319 // i.e. from instruction_address+4. 320 // 321 // So an LE instruction can in principle address the instruction 322 // immediately after itself, or (not very usefully) the address 323 // half way through the 4-byte LE. 324 return checkPCRelOffset(Value, -0xffe, 0); 325 case ARM::fixup_bfcsel_else_target: { 326 if (Value != 2 && Value != 4) 327 return "out of range label-relative fixup value"; 328 break; 329 } 330 331 default: 332 llvm_unreachable("Unexpected fixup kind in reasonForFixupRelaxation()!"); 333 } 334 return nullptr; 335 } 336 337 bool ARMAsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value, 338 const MCRelaxableFragment *DF, 339 const MCAsmLayout &Layout) const { 340 return reasonForFixupRelaxation(Fixup, Value); 341 } 342 343 void ARMAsmBackend::relaxInstruction(MCInst &Inst, 344 const MCSubtargetInfo &STI) const { 345 unsigned RelaxedOp = getRelaxedOpcode(Inst.getOpcode(), STI); 346 347 // Return a diagnostic if we get here w/ a bogus instruction. 348 if (RelaxedOp == Inst.getOpcode()) { 349 SmallString<256> Tmp; 350 raw_svector_ostream OS(Tmp); 351 Inst.dump_pretty(OS); 352 OS << "\n"; 353 report_fatal_error("unexpected instruction to relax: " + OS.str()); 354 } 355 356 // If we are changing Thumb CBZ or CBNZ instruction to a NOP, aka tHINT, we 357 // have to change the operands too. 358 if ((Inst.getOpcode() == ARM::tCBZ || Inst.getOpcode() == ARM::tCBNZ) && 359 RelaxedOp == ARM::tHINT) { 360 MCInst Res; 361 Res.setOpcode(RelaxedOp); 362 Res.addOperand(MCOperand::createImm(0)); 363 Res.addOperand(MCOperand::createImm(14)); 364 Res.addOperand(MCOperand::createReg(0)); 365 Inst = std::move(Res); 366 return; 367 } 368 369 // The rest of instructions we're relaxing have the same operands. 370 // We just need to update to the proper opcode. 371 Inst.setOpcode(RelaxedOp); 372 } 373 374 bool ARMAsmBackend::writeNopData(raw_ostream &OS, uint64_t Count, 375 const MCSubtargetInfo *STI) const { 376 const uint16_t Thumb1_16bitNopEncoding = 0x46c0; // using MOV r8,r8 377 const uint16_t Thumb2_16bitNopEncoding = 0xbf00; // NOP 378 const uint32_t ARMv4_NopEncoding = 0xe1a00000; // using MOV r0,r0 379 const uint32_t ARMv6T2_NopEncoding = 0xe320f000; // NOP 380 if (isThumb()) { 381 const uint16_t nopEncoding = 382 hasNOP(STI) ? Thumb2_16bitNopEncoding : Thumb1_16bitNopEncoding; 383 uint64_t NumNops = Count / 2; 384 for (uint64_t i = 0; i != NumNops; ++i) 385 support::endian::write(OS, nopEncoding, Endian); 386 if (Count & 1) 387 OS << '\0'; 388 return true; 389 } 390 // ARM mode 391 const uint32_t nopEncoding = 392 hasNOP(STI) ? ARMv6T2_NopEncoding : ARMv4_NopEncoding; 393 uint64_t NumNops = Count / 4; 394 for (uint64_t i = 0; i != NumNops; ++i) 395 support::endian::write(OS, nopEncoding, Endian); 396 // FIXME: should this function return false when unable to write exactly 397 // 'Count' bytes with NOP encodings? 398 switch (Count % 4) { 399 default: 400 break; // No leftover bytes to write 401 case 1: 402 OS << '\0'; 403 break; 404 case 2: 405 OS.write("\0\0", 2); 406 break; 407 case 3: 408 OS.write("\0\0\xa0", 3); 409 break; 410 } 411 412 return true; 413 } 414 415 static uint32_t swapHalfWords(uint32_t Value, bool IsLittleEndian) { 416 if (IsLittleEndian) { 417 // Note that the halfwords are stored high first and low second in thumb; 418 // so we need to swap the fixup value here to map properly. 419 uint32_t Swapped = (Value & 0xFFFF0000) >> 16; 420 Swapped |= (Value & 0x0000FFFF) << 16; 421 return Swapped; 422 } else 423 return Value; 424 } 425 426 static uint32_t joinHalfWords(uint32_t FirstHalf, uint32_t SecondHalf, 427 bool IsLittleEndian) { 428 uint32_t Value; 429 430 if (IsLittleEndian) { 431 Value = (SecondHalf & 0xFFFF) << 16; 432 Value |= (FirstHalf & 0xFFFF); 433 } else { 434 Value = (SecondHalf & 0xFFFF); 435 Value |= (FirstHalf & 0xFFFF) << 16; 436 } 437 438 return Value; 439 } 440 441 unsigned ARMAsmBackend::adjustFixupValue(const MCAssembler &Asm, 442 const MCFixup &Fixup, 443 const MCValue &Target, uint64_t Value, 444 bool IsResolved, MCContext &Ctx, 445 const MCSubtargetInfo* STI) const { 446 unsigned Kind = Fixup.getKind(); 447 448 // MachO tries to make .o files that look vaguely pre-linked, so for MOVW/MOVT 449 // and .word relocations they put the Thumb bit into the addend if possible. 450 // Other relocation types don't want this bit though (branches couldn't encode 451 // it if it *was* present, and no other relocations exist) and it can 452 // interfere with checking valid expressions. 453 if (const MCSymbolRefExpr *A = Target.getSymA()) { 454 if (A->hasSubsectionsViaSymbols() && Asm.isThumbFunc(&A->getSymbol()) && 455 A->getSymbol().isExternal() && 456 (Kind == FK_Data_4 || Kind == ARM::fixup_arm_movw_lo16 || 457 Kind == ARM::fixup_arm_movt_hi16 || Kind == ARM::fixup_t2_movw_lo16 || 458 Kind == ARM::fixup_t2_movt_hi16)) 459 Value |= 1; 460 } 461 462 switch (Kind) { 463 default: 464 return 0; 465 case FK_Data_1: 466 case FK_Data_2: 467 case FK_Data_4: 468 return Value; 469 case FK_SecRel_2: 470 return Value; 471 case FK_SecRel_4: 472 return Value; 473 case ARM::fixup_arm_movt_hi16: 474 assert(STI != nullptr); 475 if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF()) 476 Value >>= 16; 477 [[fallthrough]]; 478 case ARM::fixup_arm_movw_lo16: { 479 unsigned Hi4 = (Value & 0xF000) >> 12; 480 unsigned Lo12 = Value & 0x0FFF; 481 // inst{19-16} = Hi4; 482 // inst{11-0} = Lo12; 483 Value = (Hi4 << 16) | (Lo12); 484 return Value; 485 } 486 case ARM::fixup_t2_movt_hi16: 487 assert(STI != nullptr); 488 if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF()) 489 Value >>= 16; 490 [[fallthrough]]; 491 case ARM::fixup_t2_movw_lo16: { 492 unsigned Hi4 = (Value & 0xF000) >> 12; 493 unsigned i = (Value & 0x800) >> 11; 494 unsigned Mid3 = (Value & 0x700) >> 8; 495 unsigned Lo8 = Value & 0x0FF; 496 // inst{19-16} = Hi4; 497 // inst{26} = i; 498 // inst{14-12} = Mid3; 499 // inst{7-0} = Lo8; 500 Value = (Hi4 << 16) | (i << 26) | (Mid3 << 12) | (Lo8); 501 return swapHalfWords(Value, Endian == llvm::endianness::little); 502 } 503 case ARM::fixup_arm_thumb_upper_8_15: 504 if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF()) 505 return (Value & 0xff000000) >> 24; 506 return Value & 0xff; 507 case ARM::fixup_arm_thumb_upper_0_7: 508 if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF()) 509 return (Value & 0x00ff0000) >> 16; 510 return Value & 0xff; 511 case ARM::fixup_arm_thumb_lower_8_15: 512 if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF()) 513 return (Value & 0x0000ff00) >> 8; 514 return Value & 0xff; 515 case ARM::fixup_arm_thumb_lower_0_7: 516 return Value & 0x000000ff; 517 case ARM::fixup_arm_ldst_pcrel_12: 518 // ARM PC-relative values are offset by 8. 519 Value -= 4; 520 [[fallthrough]]; 521 case ARM::fixup_t2_ldst_pcrel_12: 522 // Offset by 4, adjusted by two due to the half-word ordering of thumb. 523 Value -= 4; 524 [[fallthrough]]; 525 case ARM::fixup_arm_ldst_abs_12: { 526 bool isAdd = true; 527 if ((int64_t)Value < 0) { 528 Value = -Value; 529 isAdd = false; 530 } 531 if (Value >= 4096) { 532 Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value"); 533 return 0; 534 } 535 Value |= isAdd << 23; 536 537 // Same addressing mode as fixup_arm_pcrel_10, 538 // but with 16-bit halfwords swapped. 539 if (Kind == ARM::fixup_t2_ldst_pcrel_12) 540 return swapHalfWords(Value, Endian == llvm::endianness::little); 541 542 return Value; 543 } 544 case ARM::fixup_arm_adr_pcrel_12: { 545 // ARM PC-relative values are offset by 8. 546 Value -= 8; 547 unsigned opc = 4; // bits {24-21}. Default to add: 0b0100 548 if ((int64_t)Value < 0) { 549 Value = -Value; 550 opc = 2; // 0b0010 551 } 552 if (ARM_AM::getSOImmVal(Value) == -1) { 553 Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value"); 554 return 0; 555 } 556 // Encode the immediate and shift the opcode into place. 557 return ARM_AM::getSOImmVal(Value) | (opc << 21); 558 } 559 560 case ARM::fixup_t2_adr_pcrel_12: { 561 Value -= 4; 562 unsigned opc = 0; 563 if ((int64_t)Value < 0) { 564 Value = -Value; 565 opc = 5; 566 } 567 568 uint32_t out = (opc << 21); 569 out |= (Value & 0x800) << 15; 570 out |= (Value & 0x700) << 4; 571 out |= (Value & 0x0FF); 572 573 return swapHalfWords(out, Endian == llvm::endianness::little); 574 } 575 576 case ARM::fixup_arm_condbranch: 577 case ARM::fixup_arm_uncondbranch: 578 case ARM::fixup_arm_uncondbl: 579 case ARM::fixup_arm_condbl: 580 case ARM::fixup_arm_blx: 581 // These values don't encode the low two bits since they're always zero. 582 // Offset by 8 just as above. 583 if (const MCSymbolRefExpr *SRE = 584 dyn_cast<MCSymbolRefExpr>(Fixup.getValue())) 585 if (SRE->getKind() == MCSymbolRefExpr::VK_TLSCALL) 586 return 0; 587 return 0xffffff & ((Value - 8) >> 2); 588 case ARM::fixup_t2_uncondbranch: { 589 Value = Value - 4; 590 if (!isInt<25>(Value)) { 591 Ctx.reportError(Fixup.getLoc(), "Relocation out of range"); 592 return 0; 593 } 594 595 Value >>= 1; // Low bit is not encoded. 596 597 uint32_t out = 0; 598 bool I = Value & 0x800000; 599 bool J1 = Value & 0x400000; 600 bool J2 = Value & 0x200000; 601 J1 ^= I; 602 J2 ^= I; 603 604 out |= I << 26; // S bit 605 out |= !J1 << 13; // J1 bit 606 out |= !J2 << 11; // J2 bit 607 out |= (Value & 0x1FF800) << 5; // imm6 field 608 out |= (Value & 0x0007FF); // imm11 field 609 610 return swapHalfWords(out, Endian == llvm::endianness::little); 611 } 612 case ARM::fixup_t2_condbranch: { 613 Value = Value - 4; 614 if (!isInt<21>(Value)) { 615 Ctx.reportError(Fixup.getLoc(), "Relocation out of range"); 616 return 0; 617 } 618 619 Value >>= 1; // Low bit is not encoded. 620 621 uint64_t out = 0; 622 out |= (Value & 0x80000) << 7; // S bit 623 out |= (Value & 0x40000) >> 7; // J2 bit 624 out |= (Value & 0x20000) >> 4; // J1 bit 625 out |= (Value & 0x1F800) << 5; // imm6 field 626 out |= (Value & 0x007FF); // imm11 field 627 628 return swapHalfWords(out, Endian == llvm::endianness::little); 629 } 630 case ARM::fixup_arm_thumb_bl: { 631 if (!isInt<25>(Value - 4) || 632 (!STI->hasFeature(ARM::FeatureThumb2) && 633 !STI->hasFeature(ARM::HasV8MBaselineOps) && 634 !STI->hasFeature(ARM::HasV6MOps) && 635 !isInt<23>(Value - 4))) { 636 Ctx.reportError(Fixup.getLoc(), "Relocation out of range"); 637 return 0; 638 } 639 640 // The value doesn't encode the low bit (always zero) and is offset by 641 // four. The 32-bit immediate value is encoded as 642 // imm32 = SignExtend(S:I1:I2:imm10:imm11:0) 643 // where I1 = NOT(J1 ^ S) and I2 = NOT(J2 ^ S). 644 // The value is encoded into disjoint bit positions in the destination 645 // opcode. x = unchanged, I = immediate value bit, S = sign extension bit, 646 // J = either J1 or J2 bit 647 // 648 // BL: xxxxxSIIIIIIIIII xxJxJIIIIIIIIIII 649 // 650 // Note that the halfwords are stored high first, low second; so we need 651 // to transpose the fixup value here to map properly. 652 uint32_t offset = (Value - 4) >> 1; 653 uint32_t signBit = (offset & 0x800000) >> 23; 654 uint32_t I1Bit = (offset & 0x400000) >> 22; 655 uint32_t J1Bit = (I1Bit ^ 0x1) ^ signBit; 656 uint32_t I2Bit = (offset & 0x200000) >> 21; 657 uint32_t J2Bit = (I2Bit ^ 0x1) ^ signBit; 658 uint32_t imm10Bits = (offset & 0x1FF800) >> 11; 659 uint32_t imm11Bits = (offset & 0x000007FF); 660 661 uint32_t FirstHalf = (((uint16_t)signBit << 10) | (uint16_t)imm10Bits); 662 uint32_t SecondHalf = (((uint16_t)J1Bit << 13) | ((uint16_t)J2Bit << 11) | 663 (uint16_t)imm11Bits); 664 return joinHalfWords(FirstHalf, SecondHalf, 665 Endian == llvm::endianness::little); 666 } 667 case ARM::fixup_arm_thumb_blx: { 668 // The value doesn't encode the low two bits (always zero) and is offset by 669 // four (see fixup_arm_thumb_cp). The 32-bit immediate value is encoded as 670 // imm32 = SignExtend(S:I1:I2:imm10H:imm10L:00) 671 // where I1 = NOT(J1 ^ S) and I2 = NOT(J2 ^ S). 672 // The value is encoded into disjoint bit positions in the destination 673 // opcode. x = unchanged, I = immediate value bit, S = sign extension bit, 674 // J = either J1 or J2 bit, 0 = zero. 675 // 676 // BLX: xxxxxSIIIIIIIIII xxJxJIIIIIIIIII0 677 // 678 // Note that the halfwords are stored high first, low second; so we need 679 // to transpose the fixup value here to map properly. 680 if (Value % 4 != 0) { 681 Ctx.reportError(Fixup.getLoc(), "misaligned ARM call destination"); 682 return 0; 683 } 684 685 uint32_t offset = (Value - 4) >> 2; 686 if (const MCSymbolRefExpr *SRE = 687 dyn_cast<MCSymbolRefExpr>(Fixup.getValue())) 688 if (SRE->getKind() == MCSymbolRefExpr::VK_TLSCALL) 689 offset = 0; 690 uint32_t signBit = (offset & 0x400000) >> 22; 691 uint32_t I1Bit = (offset & 0x200000) >> 21; 692 uint32_t J1Bit = (I1Bit ^ 0x1) ^ signBit; 693 uint32_t I2Bit = (offset & 0x100000) >> 20; 694 uint32_t J2Bit = (I2Bit ^ 0x1) ^ signBit; 695 uint32_t imm10HBits = (offset & 0xFFC00) >> 10; 696 uint32_t imm10LBits = (offset & 0x3FF); 697 698 uint32_t FirstHalf = (((uint16_t)signBit << 10) | (uint16_t)imm10HBits); 699 uint32_t SecondHalf = (((uint16_t)J1Bit << 13) | ((uint16_t)J2Bit << 11) | 700 ((uint16_t)imm10LBits) << 1); 701 return joinHalfWords(FirstHalf, SecondHalf, 702 Endian == llvm::endianness::little); 703 } 704 case ARM::fixup_thumb_adr_pcrel_10: 705 case ARM::fixup_arm_thumb_cp: 706 // On CPUs supporting Thumb2, this will be relaxed to an ldr.w, otherwise we 707 // could have an error on our hands. 708 assert(STI != nullptr); 709 if (!STI->hasFeature(ARM::FeatureThumb2) && IsResolved) { 710 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value); 711 if (FixupDiagnostic) { 712 Ctx.reportError(Fixup.getLoc(), FixupDiagnostic); 713 return 0; 714 } 715 } 716 // Offset by 4, and don't encode the low two bits. 717 return ((Value - 4) >> 2) & 0xff; 718 case ARM::fixup_arm_thumb_cb: { 719 // CB instructions can only branch to offsets in [4, 126] in multiples of 2 720 // so ensure that the raw value LSB is zero and it lies in [2, 130]. 721 // An offset of 2 will be relaxed to a NOP. 722 if ((int64_t)Value < 2 || Value > 0x82 || Value & 1) { 723 Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value"); 724 return 0; 725 } 726 // Offset by 4 and don't encode the lower bit, which is always 0. 727 // FIXME: diagnose if no Thumb2 728 uint32_t Binary = (Value - 4) >> 1; 729 return ((Binary & 0x20) << 4) | ((Binary & 0x1f) << 3); 730 } 731 case ARM::fixup_arm_thumb_br: 732 // Offset by 4 and don't encode the lower bit, which is always 0. 733 assert(STI != nullptr); 734 if (!STI->hasFeature(ARM::FeatureThumb2) && 735 !STI->hasFeature(ARM::HasV8MBaselineOps)) { 736 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value); 737 if (FixupDiagnostic) { 738 Ctx.reportError(Fixup.getLoc(), FixupDiagnostic); 739 return 0; 740 } 741 } 742 return ((Value - 4) >> 1) & 0x7ff; 743 case ARM::fixup_arm_thumb_bcc: 744 // Offset by 4 and don't encode the lower bit, which is always 0. 745 assert(STI != nullptr); 746 if (!STI->hasFeature(ARM::FeatureThumb2)) { 747 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value); 748 if (FixupDiagnostic) { 749 Ctx.reportError(Fixup.getLoc(), FixupDiagnostic); 750 return 0; 751 } 752 } 753 return ((Value - 4) >> 1) & 0xff; 754 case ARM::fixup_arm_pcrel_10_unscaled: { 755 Value = Value - 8; // ARM fixups offset by an additional word and don't 756 // need to adjust for the half-word ordering. 757 bool isAdd = true; 758 if ((int64_t)Value < 0) { 759 Value = -Value; 760 isAdd = false; 761 } 762 // The value has the low 4 bits encoded in [3:0] and the high 4 in [11:8]. 763 if (Value >= 256) { 764 Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value"); 765 return 0; 766 } 767 Value = (Value & 0xf) | ((Value & 0xf0) << 4); 768 return Value | (isAdd << 23); 769 } 770 case ARM::fixup_arm_pcrel_10: 771 Value = Value - 4; // ARM fixups offset by an additional word and don't 772 // need to adjust for the half-word ordering. 773 [[fallthrough]]; 774 case ARM::fixup_t2_pcrel_10: { 775 // Offset by 4, adjusted by two due to the half-word ordering of thumb. 776 Value = Value - 4; 777 bool isAdd = true; 778 if ((int64_t)Value < 0) { 779 Value = -Value; 780 isAdd = false; 781 } 782 // These values don't encode the low two bits since they're always zero. 783 Value >>= 2; 784 if (Value >= 256) { 785 Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value"); 786 return 0; 787 } 788 Value |= isAdd << 23; 789 790 // Same addressing mode as fixup_arm_pcrel_10, but with 16-bit halfwords 791 // swapped. 792 if (Kind == ARM::fixup_t2_pcrel_10) 793 return swapHalfWords(Value, Endian == llvm::endianness::little); 794 795 return Value; 796 } 797 case ARM::fixup_arm_pcrel_9: 798 Value = Value - 4; // ARM fixups offset by an additional word and don't 799 // need to adjust for the half-word ordering. 800 [[fallthrough]]; 801 case ARM::fixup_t2_pcrel_9: { 802 // Offset by 4, adjusted by two due to the half-word ordering of thumb. 803 Value = Value - 4; 804 bool isAdd = true; 805 if ((int64_t)Value < 0) { 806 Value = -Value; 807 isAdd = false; 808 } 809 // These values don't encode the low bit since it's always zero. 810 if (Value & 1) { 811 Ctx.reportError(Fixup.getLoc(), "invalid value for this fixup"); 812 return 0; 813 } 814 Value >>= 1; 815 if (Value >= 256) { 816 Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value"); 817 return 0; 818 } 819 Value |= isAdd << 23; 820 821 // Same addressing mode as fixup_arm_pcrel_9, but with 16-bit halfwords 822 // swapped. 823 if (Kind == ARM::fixup_t2_pcrel_9) 824 return swapHalfWords(Value, Endian == llvm::endianness::little); 825 826 return Value; 827 } 828 case ARM::fixup_arm_mod_imm: 829 Value = ARM_AM::getSOImmVal(Value); 830 if (Value >> 12) { 831 Ctx.reportError(Fixup.getLoc(), "out of range immediate fixup value"); 832 return 0; 833 } 834 return Value; 835 case ARM::fixup_t2_so_imm: { 836 Value = ARM_AM::getT2SOImmVal(Value); 837 if ((int64_t)Value < 0) { 838 Ctx.reportError(Fixup.getLoc(), "out of range immediate fixup value"); 839 return 0; 840 } 841 // Value will contain a 12-bit value broken up into a 4-bit shift in bits 842 // 11:8 and the 8-bit immediate in 0:7. The instruction has the immediate 843 // in 0:7. The 4-bit shift is split up into i:imm3 where i is placed at bit 844 // 10 of the upper half-word and imm3 is placed at 14:12 of the lower 845 // half-word. 846 uint64_t EncValue = 0; 847 EncValue |= (Value & 0x800) << 15; 848 EncValue |= (Value & 0x700) << 4; 849 EncValue |= (Value & 0xff); 850 return swapHalfWords(EncValue, Endian == llvm::endianness::little); 851 } 852 case ARM::fixup_bf_branch: { 853 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value); 854 if (FixupDiagnostic) { 855 Ctx.reportError(Fixup.getLoc(), FixupDiagnostic); 856 return 0; 857 } 858 uint32_t out = (((Value - 4) >> 1) & 0xf) << 23; 859 return swapHalfWords(out, Endian == llvm::endianness::little); 860 } 861 case ARM::fixup_bf_target: 862 case ARM::fixup_bfl_target: 863 case ARM::fixup_bfc_target: { 864 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value); 865 if (FixupDiagnostic) { 866 Ctx.reportError(Fixup.getLoc(), FixupDiagnostic); 867 return 0; 868 } 869 uint32_t out = 0; 870 uint32_t HighBitMask = (Kind == ARM::fixup_bf_target ? 0xf800 : 871 Kind == ARM::fixup_bfl_target ? 0x3f800 : 0x800); 872 out |= (((Value - 4) >> 1) & 0x1) << 11; 873 out |= (((Value - 4) >> 1) & 0x7fe); 874 out |= (((Value - 4) >> 1) & HighBitMask) << 5; 875 return swapHalfWords(out, Endian == llvm::endianness::little); 876 } 877 case ARM::fixup_bfcsel_else_target: { 878 // If this is a fixup of a branch future's else target then it should be a 879 // constant MCExpr representing the distance between the branch targetted 880 // and the instruction after that same branch. 881 Value = Target.getConstant(); 882 883 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value); 884 if (FixupDiagnostic) { 885 Ctx.reportError(Fixup.getLoc(), FixupDiagnostic); 886 return 0; 887 } 888 uint32_t out = ((Value >> 2) & 1) << 17; 889 return swapHalfWords(out, Endian == llvm::endianness::little); 890 } 891 case ARM::fixup_wls: 892 case ARM::fixup_le: { 893 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value); 894 if (FixupDiagnostic) { 895 Ctx.reportError(Fixup.getLoc(), FixupDiagnostic); 896 return 0; 897 } 898 uint64_t real_value = Value - 4; 899 uint32_t out = 0; 900 if (Kind == ARM::fixup_le) 901 real_value = -real_value; 902 out |= ((real_value >> 1) & 0x1) << 11; 903 out |= ((real_value >> 1) & 0x7fe); 904 return swapHalfWords(out, Endian == llvm::endianness::little); 905 } 906 } 907 } 908 909 bool ARMAsmBackend::shouldForceRelocation(const MCAssembler &Asm, 910 const MCFixup &Fixup, 911 const MCValue &Target, 912 const MCSubtargetInfo *STI) { 913 const MCSymbolRefExpr *A = Target.getSymA(); 914 const MCSymbol *Sym = A ? &A->getSymbol() : nullptr; 915 const unsigned FixupKind = Fixup.getKind(); 916 if (FixupKind >= FirstLiteralRelocationKind) 917 return true; 918 if (FixupKind == ARM::fixup_arm_thumb_bl) { 919 assert(Sym && "How did we resolve this?"); 920 921 // If the symbol is external the linker will handle it. 922 // FIXME: Should we handle it as an optimization? 923 924 // If the symbol is out of range, produce a relocation and hope the 925 // linker can handle it. GNU AS produces an error in this case. 926 if (Sym->isExternal()) 927 return true; 928 } 929 // Create relocations for unconditional branches to function symbols with 930 // different execution mode in ELF binaries. 931 if (Sym && Sym->isELF()) { 932 unsigned Type = cast<MCSymbolELF>(Sym)->getType(); 933 if ((Type == ELF::STT_FUNC || Type == ELF::STT_GNU_IFUNC)) { 934 if (Asm.isThumbFunc(Sym) && (FixupKind == ARM::fixup_arm_uncondbranch)) 935 return true; 936 if (!Asm.isThumbFunc(Sym) && (FixupKind == ARM::fixup_arm_thumb_br || 937 FixupKind == ARM::fixup_arm_thumb_bl || 938 FixupKind == ARM::fixup_t2_condbranch || 939 FixupKind == ARM::fixup_t2_uncondbranch)) 940 return true; 941 } 942 } 943 // We must always generate a relocation for BL/BLX instructions if we have 944 // a symbol to reference, as the linker relies on knowing the destination 945 // symbol's thumb-ness to get interworking right. 946 if (A && (FixupKind == ARM::fixup_arm_thumb_blx || 947 FixupKind == ARM::fixup_arm_blx || 948 FixupKind == ARM::fixup_arm_uncondbl || 949 FixupKind == ARM::fixup_arm_condbl)) 950 return true; 951 return false; 952 } 953 954 /// getFixupKindNumBytes - The number of bytes the fixup may change. 955 static unsigned getFixupKindNumBytes(unsigned Kind) { 956 switch (Kind) { 957 default: 958 llvm_unreachable("Unknown fixup kind!"); 959 960 case FK_Data_1: 961 case ARM::fixup_arm_thumb_bcc: 962 case ARM::fixup_arm_thumb_cp: 963 case ARM::fixup_thumb_adr_pcrel_10: 964 case ARM::fixup_arm_thumb_upper_8_15: 965 case ARM::fixup_arm_thumb_upper_0_7: 966 case ARM::fixup_arm_thumb_lower_8_15: 967 case ARM::fixup_arm_thumb_lower_0_7: 968 return 1; 969 970 case FK_Data_2: 971 case ARM::fixup_arm_thumb_br: 972 case ARM::fixup_arm_thumb_cb: 973 case ARM::fixup_arm_mod_imm: 974 return 2; 975 976 case ARM::fixup_arm_pcrel_10_unscaled: 977 case ARM::fixup_arm_ldst_pcrel_12: 978 case ARM::fixup_arm_pcrel_10: 979 case ARM::fixup_arm_pcrel_9: 980 case ARM::fixup_arm_ldst_abs_12: 981 case ARM::fixup_arm_adr_pcrel_12: 982 case ARM::fixup_arm_uncondbl: 983 case ARM::fixup_arm_condbl: 984 case ARM::fixup_arm_blx: 985 case ARM::fixup_arm_condbranch: 986 case ARM::fixup_arm_uncondbranch: 987 return 3; 988 989 case FK_Data_4: 990 case ARM::fixup_t2_ldst_pcrel_12: 991 case ARM::fixup_t2_condbranch: 992 case ARM::fixup_t2_uncondbranch: 993 case ARM::fixup_t2_pcrel_10: 994 case ARM::fixup_t2_pcrel_9: 995 case ARM::fixup_t2_adr_pcrel_12: 996 case ARM::fixup_arm_thumb_bl: 997 case ARM::fixup_arm_thumb_blx: 998 case ARM::fixup_arm_movt_hi16: 999 case ARM::fixup_arm_movw_lo16: 1000 case ARM::fixup_t2_movt_hi16: 1001 case ARM::fixup_t2_movw_lo16: 1002 case ARM::fixup_t2_so_imm: 1003 case ARM::fixup_bf_branch: 1004 case ARM::fixup_bf_target: 1005 case ARM::fixup_bfl_target: 1006 case ARM::fixup_bfc_target: 1007 case ARM::fixup_bfcsel_else_target: 1008 case ARM::fixup_wls: 1009 case ARM::fixup_le: 1010 return 4; 1011 1012 case FK_SecRel_2: 1013 return 2; 1014 case FK_SecRel_4: 1015 return 4; 1016 } 1017 } 1018 1019 /// getFixupKindContainerSizeBytes - The number of bytes of the 1020 /// container involved in big endian. 1021 static unsigned getFixupKindContainerSizeBytes(unsigned Kind) { 1022 switch (Kind) { 1023 default: 1024 llvm_unreachable("Unknown fixup kind!"); 1025 1026 case FK_Data_1: 1027 return 1; 1028 case FK_Data_2: 1029 return 2; 1030 case FK_Data_4: 1031 return 4; 1032 1033 case ARM::fixup_arm_thumb_bcc: 1034 case ARM::fixup_arm_thumb_cp: 1035 case ARM::fixup_thumb_adr_pcrel_10: 1036 case ARM::fixup_arm_thumb_br: 1037 case ARM::fixup_arm_thumb_cb: 1038 case ARM::fixup_arm_thumb_upper_8_15: 1039 case ARM::fixup_arm_thumb_upper_0_7: 1040 case ARM::fixup_arm_thumb_lower_8_15: 1041 case ARM::fixup_arm_thumb_lower_0_7: 1042 // Instruction size is 2 bytes. 1043 return 2; 1044 1045 case ARM::fixup_arm_pcrel_10_unscaled: 1046 case ARM::fixup_arm_ldst_pcrel_12: 1047 case ARM::fixup_arm_pcrel_10: 1048 case ARM::fixup_arm_pcrel_9: 1049 case ARM::fixup_arm_adr_pcrel_12: 1050 case ARM::fixup_arm_uncondbl: 1051 case ARM::fixup_arm_condbl: 1052 case ARM::fixup_arm_blx: 1053 case ARM::fixup_arm_condbranch: 1054 case ARM::fixup_arm_uncondbranch: 1055 case ARM::fixup_t2_ldst_pcrel_12: 1056 case ARM::fixup_t2_condbranch: 1057 case ARM::fixup_t2_uncondbranch: 1058 case ARM::fixup_t2_pcrel_10: 1059 case ARM::fixup_t2_pcrel_9: 1060 case ARM::fixup_t2_adr_pcrel_12: 1061 case ARM::fixup_arm_thumb_bl: 1062 case ARM::fixup_arm_thumb_blx: 1063 case ARM::fixup_arm_movt_hi16: 1064 case ARM::fixup_arm_movw_lo16: 1065 case ARM::fixup_t2_movt_hi16: 1066 case ARM::fixup_t2_movw_lo16: 1067 case ARM::fixup_arm_mod_imm: 1068 case ARM::fixup_t2_so_imm: 1069 case ARM::fixup_bf_branch: 1070 case ARM::fixup_bf_target: 1071 case ARM::fixup_bfl_target: 1072 case ARM::fixup_bfc_target: 1073 case ARM::fixup_bfcsel_else_target: 1074 case ARM::fixup_wls: 1075 case ARM::fixup_le: 1076 // Instruction size is 4 bytes. 1077 return 4; 1078 } 1079 } 1080 1081 void ARMAsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup, 1082 const MCValue &Target, 1083 MutableArrayRef<char> Data, uint64_t Value, 1084 bool IsResolved, 1085 const MCSubtargetInfo* STI) const { 1086 unsigned Kind = Fixup.getKind(); 1087 if (Kind >= FirstLiteralRelocationKind) 1088 return; 1089 MCContext &Ctx = Asm.getContext(); 1090 Value = adjustFixupValue(Asm, Fixup, Target, Value, IsResolved, Ctx, STI); 1091 if (!Value) 1092 return; // Doesn't change encoding. 1093 const unsigned NumBytes = getFixupKindNumBytes(Kind); 1094 1095 unsigned Offset = Fixup.getOffset(); 1096 assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!"); 1097 1098 // Used to point to big endian bytes. 1099 unsigned FullSizeBytes; 1100 if (Endian == llvm::endianness::big) { 1101 FullSizeBytes = getFixupKindContainerSizeBytes(Kind); 1102 assert((Offset + FullSizeBytes) <= Data.size() && "Invalid fixup size!"); 1103 assert(NumBytes <= FullSizeBytes && "Invalid fixup size!"); 1104 } 1105 1106 // For each byte of the fragment that the fixup touches, mask in the bits from 1107 // the fixup value. The Value has been "split up" into the appropriate 1108 // bitfields above. 1109 for (unsigned i = 0; i != NumBytes; ++i) { 1110 unsigned Idx = 1111 Endian == llvm::endianness::little ? i : (FullSizeBytes - 1 - i); 1112 Data[Offset + Idx] |= uint8_t((Value >> (i * 8)) & 0xff); 1113 } 1114 } 1115 1116 namespace CU { 1117 1118 /// Compact unwind encoding values. 1119 enum CompactUnwindEncodings { 1120 UNWIND_ARM_MODE_MASK = 0x0F000000, 1121 UNWIND_ARM_MODE_FRAME = 0x01000000, 1122 UNWIND_ARM_MODE_FRAME_D = 0x02000000, 1123 UNWIND_ARM_MODE_DWARF = 0x04000000, 1124 1125 UNWIND_ARM_FRAME_STACK_ADJUST_MASK = 0x00C00000, 1126 1127 UNWIND_ARM_FRAME_FIRST_PUSH_R4 = 0x00000001, 1128 UNWIND_ARM_FRAME_FIRST_PUSH_R5 = 0x00000002, 1129 UNWIND_ARM_FRAME_FIRST_PUSH_R6 = 0x00000004, 1130 1131 UNWIND_ARM_FRAME_SECOND_PUSH_R8 = 0x00000008, 1132 UNWIND_ARM_FRAME_SECOND_PUSH_R9 = 0x00000010, 1133 UNWIND_ARM_FRAME_SECOND_PUSH_R10 = 0x00000020, 1134 UNWIND_ARM_FRAME_SECOND_PUSH_R11 = 0x00000040, 1135 UNWIND_ARM_FRAME_SECOND_PUSH_R12 = 0x00000080, 1136 1137 UNWIND_ARM_FRAME_D_REG_COUNT_MASK = 0x00000F00, 1138 1139 UNWIND_ARM_DWARF_SECTION_OFFSET = 0x00FFFFFF 1140 }; 1141 1142 } // end CU namespace 1143 1144 /// Generate compact unwind encoding for the function based on the CFI 1145 /// instructions. If the CFI instructions describe a frame that cannot be 1146 /// encoded in compact unwind, the method returns UNWIND_ARM_MODE_DWARF which 1147 /// tells the runtime to fallback and unwind using dwarf. 1148 uint32_t ARMAsmBackendDarwin::generateCompactUnwindEncoding( 1149 const MCDwarfFrameInfo *FI, const MCContext *Ctxt) const { 1150 DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() << "generateCU()\n"); 1151 // Only armv7k uses CFI based unwinding. 1152 if (Subtype != MachO::CPU_SUBTYPE_ARM_V7K) 1153 return 0; 1154 // No .cfi directives means no frame. 1155 ArrayRef<MCCFIInstruction> Instrs = FI->Instructions; 1156 if (Instrs.empty()) 1157 return 0; 1158 if (!isDarwinCanonicalPersonality(FI->Personality) && 1159 !Ctxt->emitCompactUnwindNonCanonical()) 1160 return CU::UNWIND_ARM_MODE_DWARF; 1161 1162 // Start off assuming CFA is at SP+0. 1163 unsigned CFARegister = ARM::SP; 1164 int CFARegisterOffset = 0; 1165 // Mark savable registers as initially unsaved 1166 DenseMap<unsigned, int> RegOffsets; 1167 int FloatRegCount = 0; 1168 // Process each .cfi directive and build up compact unwind info. 1169 for (const MCCFIInstruction &Inst : Instrs) { 1170 unsigned Reg; 1171 switch (Inst.getOperation()) { 1172 case MCCFIInstruction::OpDefCfa: // DW_CFA_def_cfa 1173 CFARegisterOffset = Inst.getOffset(); 1174 CFARegister = *MRI.getLLVMRegNum(Inst.getRegister(), true); 1175 break; 1176 case MCCFIInstruction::OpDefCfaOffset: // DW_CFA_def_cfa_offset 1177 CFARegisterOffset = Inst.getOffset(); 1178 break; 1179 case MCCFIInstruction::OpDefCfaRegister: // DW_CFA_def_cfa_register 1180 CFARegister = *MRI.getLLVMRegNum(Inst.getRegister(), true); 1181 break; 1182 case MCCFIInstruction::OpOffset: // DW_CFA_offset 1183 Reg = *MRI.getLLVMRegNum(Inst.getRegister(), true); 1184 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 1185 RegOffsets[Reg] = Inst.getOffset(); 1186 else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) { 1187 RegOffsets[Reg] = Inst.getOffset(); 1188 ++FloatRegCount; 1189 } else { 1190 DEBUG_WITH_TYPE("compact-unwind", 1191 llvm::dbgs() << ".cfi_offset on unknown register=" 1192 << Inst.getRegister() << "\n"); 1193 return CU::UNWIND_ARM_MODE_DWARF; 1194 } 1195 break; 1196 case MCCFIInstruction::OpRelOffset: // DW_CFA_advance_loc 1197 // Ignore 1198 break; 1199 default: 1200 // Directive not convertable to compact unwind, bail out. 1201 DEBUG_WITH_TYPE("compact-unwind", 1202 llvm::dbgs() 1203 << "CFI directive not compatible with compact " 1204 "unwind encoding, opcode=" << Inst.getOperation() 1205 << "\n"); 1206 return CU::UNWIND_ARM_MODE_DWARF; 1207 break; 1208 } 1209 } 1210 1211 // If no frame set up, return no unwind info. 1212 if ((CFARegister == ARM::SP) && (CFARegisterOffset == 0)) 1213 return 0; 1214 1215 // Verify standard frame (lr/r7) was used. 1216 if (CFARegister != ARM::R7) { 1217 DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() << "frame register is " 1218 << CFARegister 1219 << " instead of r7\n"); 1220 return CU::UNWIND_ARM_MODE_DWARF; 1221 } 1222 int StackAdjust = CFARegisterOffset - 8; 1223 if (RegOffsets.lookup(ARM::LR) != (-4 - StackAdjust)) { 1224 DEBUG_WITH_TYPE("compact-unwind", 1225 llvm::dbgs() 1226 << "LR not saved as standard frame, StackAdjust=" 1227 << StackAdjust 1228 << ", CFARegisterOffset=" << CFARegisterOffset 1229 << ", lr save at offset=" << RegOffsets[14] << "\n"); 1230 return CU::UNWIND_ARM_MODE_DWARF; 1231 } 1232 if (RegOffsets.lookup(ARM::R7) != (-8 - StackAdjust)) { 1233 DEBUG_WITH_TYPE("compact-unwind", 1234 llvm::dbgs() << "r7 not saved as standard frame\n"); 1235 return CU::UNWIND_ARM_MODE_DWARF; 1236 } 1237 uint32_t CompactUnwindEncoding = CU::UNWIND_ARM_MODE_FRAME; 1238 1239 // If var-args are used, there may be a stack adjust required. 1240 switch (StackAdjust) { 1241 case 0: 1242 break; 1243 case 4: 1244 CompactUnwindEncoding |= 0x00400000; 1245 break; 1246 case 8: 1247 CompactUnwindEncoding |= 0x00800000; 1248 break; 1249 case 12: 1250 CompactUnwindEncoding |= 0x00C00000; 1251 break; 1252 default: 1253 DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() 1254 << ".cfi_def_cfa stack adjust (" 1255 << StackAdjust << ") out of range\n"); 1256 return CU::UNWIND_ARM_MODE_DWARF; 1257 } 1258 1259 // If r6 is saved, it must be right below r7. 1260 static struct { 1261 unsigned Reg; 1262 unsigned Encoding; 1263 } GPRCSRegs[] = {{ARM::R6, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R6}, 1264 {ARM::R5, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R5}, 1265 {ARM::R4, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R4}, 1266 {ARM::R12, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R12}, 1267 {ARM::R11, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R11}, 1268 {ARM::R10, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R10}, 1269 {ARM::R9, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R9}, 1270 {ARM::R8, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R8}}; 1271 1272 int CurOffset = -8 - StackAdjust; 1273 for (auto CSReg : GPRCSRegs) { 1274 auto Offset = RegOffsets.find(CSReg.Reg); 1275 if (Offset == RegOffsets.end()) 1276 continue; 1277 1278 int RegOffset = Offset->second; 1279 if (RegOffset != CurOffset - 4) { 1280 DEBUG_WITH_TYPE("compact-unwind", 1281 llvm::dbgs() << MRI.getName(CSReg.Reg) << " saved at " 1282 << RegOffset << " but only supported at " 1283 << CurOffset << "\n"); 1284 return CU::UNWIND_ARM_MODE_DWARF; 1285 } 1286 CompactUnwindEncoding |= CSReg.Encoding; 1287 CurOffset -= 4; 1288 } 1289 1290 // If no floats saved, we are done. 1291 if (FloatRegCount == 0) 1292 return CompactUnwindEncoding; 1293 1294 // Switch mode to include D register saving. 1295 CompactUnwindEncoding &= ~CU::UNWIND_ARM_MODE_MASK; 1296 CompactUnwindEncoding |= CU::UNWIND_ARM_MODE_FRAME_D; 1297 1298 // FIXME: supporting more than 4 saved D-registers compactly would be trivial, 1299 // but needs coordination with the linker and libunwind. 1300 if (FloatRegCount > 4) { 1301 DEBUG_WITH_TYPE("compact-unwind", 1302 llvm::dbgs() << "unsupported number of D registers saved (" 1303 << FloatRegCount << ")\n"); 1304 return CU::UNWIND_ARM_MODE_DWARF; 1305 } 1306 1307 // Floating point registers must either be saved sequentially, or we defer to 1308 // DWARF. No gaps allowed here so check that each saved d-register is 1309 // precisely where it should be. 1310 static unsigned FPRCSRegs[] = { ARM::D8, ARM::D10, ARM::D12, ARM::D14 }; 1311 for (int Idx = FloatRegCount - 1; Idx >= 0; --Idx) { 1312 auto Offset = RegOffsets.find(FPRCSRegs[Idx]); 1313 if (Offset == RegOffsets.end()) { 1314 DEBUG_WITH_TYPE("compact-unwind", 1315 llvm::dbgs() << FloatRegCount << " D-regs saved, but " 1316 << MRI.getName(FPRCSRegs[Idx]) 1317 << " not saved\n"); 1318 return CU::UNWIND_ARM_MODE_DWARF; 1319 } else if (Offset->second != CurOffset - 8) { 1320 DEBUG_WITH_TYPE("compact-unwind", 1321 llvm::dbgs() << FloatRegCount << " D-regs saved, but " 1322 << MRI.getName(FPRCSRegs[Idx]) 1323 << " saved at " << Offset->second 1324 << ", expected at " << CurOffset - 8 1325 << "\n"); 1326 return CU::UNWIND_ARM_MODE_DWARF; 1327 } 1328 CurOffset -= 8; 1329 } 1330 1331 return CompactUnwindEncoding | ((FloatRegCount - 1) << 8); 1332 } 1333 1334 static MCAsmBackend *createARMAsmBackend(const Target &T, 1335 const MCSubtargetInfo &STI, 1336 const MCRegisterInfo &MRI, 1337 const MCTargetOptions &Options, 1338 llvm::endianness Endian) { 1339 const Triple &TheTriple = STI.getTargetTriple(); 1340 switch (TheTriple.getObjectFormat()) { 1341 default: 1342 llvm_unreachable("unsupported object format"); 1343 case Triple::MachO: 1344 return new ARMAsmBackendDarwin(T, STI, MRI); 1345 case Triple::COFF: 1346 assert(TheTriple.isOSWindows() && "non-Windows ARM COFF is not supported"); 1347 return new ARMAsmBackendWinCOFF(T, STI.getTargetTriple().isThumb()); 1348 case Triple::ELF: 1349 assert(TheTriple.isOSBinFormatELF() && "using ELF for non-ELF target"); 1350 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS()); 1351 return new ARMAsmBackendELF(T, STI.getTargetTriple().isThumb(), OSABI, 1352 Endian); 1353 } 1354 } 1355 1356 MCAsmBackend *llvm::createARMLEAsmBackend(const Target &T, 1357 const MCSubtargetInfo &STI, 1358 const MCRegisterInfo &MRI, 1359 const MCTargetOptions &Options) { 1360 return createARMAsmBackend(T, STI, MRI, Options, llvm::endianness::little); 1361 } 1362 1363 MCAsmBackend *llvm::createARMBEAsmBackend(const Target &T, 1364 const MCSubtargetInfo &STI, 1365 const MCRegisterInfo &MRI, 1366 const MCTargetOptions &Options) { 1367 return createARMAsmBackend(T, STI, MRI, Options, llvm::endianness::big); 1368 } 1369