1 //===-- ARMAsmBackend.cpp - ARM Assembler Backend -------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "MCTargetDesc/ARMAsmBackend.h" 10 #include "MCTargetDesc/ARMAddressingModes.h" 11 #include "MCTargetDesc/ARMAsmBackendDarwin.h" 12 #include "MCTargetDesc/ARMAsmBackendELF.h" 13 #include "MCTargetDesc/ARMAsmBackendWinCOFF.h" 14 #include "MCTargetDesc/ARMFixupKinds.h" 15 #include "MCTargetDesc/ARMMCTargetDesc.h" 16 #include "llvm/ADT/StringSwitch.h" 17 #include "llvm/BinaryFormat/ELF.h" 18 #include "llvm/BinaryFormat/MachO.h" 19 #include "llvm/MC/MCAsmBackend.h" 20 #include "llvm/MC/MCAsmLayout.h" 21 #include "llvm/MC/MCAssembler.h" 22 #include "llvm/MC/MCContext.h" 23 #include "llvm/MC/MCDirectives.h" 24 #include "llvm/MC/MCELFObjectWriter.h" 25 #include "llvm/MC/MCExpr.h" 26 #include "llvm/MC/MCFixupKindInfo.h" 27 #include "llvm/MC/MCObjectWriter.h" 28 #include "llvm/MC/MCRegisterInfo.h" 29 #include "llvm/MC/MCSectionELF.h" 30 #include "llvm/MC/MCSectionMachO.h" 31 #include "llvm/MC/MCSubtargetInfo.h" 32 #include "llvm/MC/MCValue.h" 33 #include "llvm/Support/Debug.h" 34 #include "llvm/Support/EndianStream.h" 35 #include "llvm/Support/ErrorHandling.h" 36 #include "llvm/Support/Format.h" 37 #include "llvm/Support/raw_ostream.h" 38 using namespace llvm; 39 40 namespace { 41 class ARMELFObjectWriter : public MCELFObjectTargetWriter { 42 public: 43 ARMELFObjectWriter(uint8_t OSABI) 44 : MCELFObjectTargetWriter(/*Is64Bit*/ false, OSABI, ELF::EM_ARM, 45 /*HasRelocationAddend*/ false) {} 46 }; 47 } // end anonymous namespace 48 49 std::optional<MCFixupKind> ARMAsmBackend::getFixupKind(StringRef Name) const { 50 return std::nullopt; 51 } 52 53 std::optional<MCFixupKind> 54 ARMAsmBackendELF::getFixupKind(StringRef Name) const { 55 unsigned Type = llvm::StringSwitch<unsigned>(Name) 56 #define ELF_RELOC(X, Y) .Case(#X, Y) 57 #include "llvm/BinaryFormat/ELFRelocs/ARM.def" 58 #undef ELF_RELOC 59 .Case("BFD_RELOC_NONE", ELF::R_ARM_NONE) 60 .Case("BFD_RELOC_8", ELF::R_ARM_ABS8) 61 .Case("BFD_RELOC_16", ELF::R_ARM_ABS16) 62 .Case("BFD_RELOC_32", ELF::R_ARM_ABS32) 63 .Default(-1u); 64 if (Type == -1u) 65 return std::nullopt; 66 return static_cast<MCFixupKind>(FirstLiteralRelocationKind + Type); 67 } 68 69 const MCFixupKindInfo &ARMAsmBackend::getFixupKindInfo(MCFixupKind Kind) const { 70 unsigned IsPCRelConstant = 71 MCFixupKindInfo::FKF_IsPCRel | MCFixupKindInfo::FKF_Constant; 72 const static MCFixupKindInfo InfosLE[ARM::NumTargetFixupKinds] = { 73 // This table *must* be in the order that the fixup_* kinds are defined in 74 // ARMFixupKinds.h. 75 // 76 // Name Offset (bits) Size (bits) Flags 77 {"fixup_arm_ldst_pcrel_12", 0, 32, IsPCRelConstant}, 78 {"fixup_t2_ldst_pcrel_12", 0, 32, 79 IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 80 {"fixup_arm_pcrel_10_unscaled", 0, 32, IsPCRelConstant}, 81 {"fixup_arm_pcrel_10", 0, 32, IsPCRelConstant}, 82 {"fixup_t2_pcrel_10", 0, 32, 83 MCFixupKindInfo::FKF_IsPCRel | 84 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 85 {"fixup_arm_pcrel_9", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 86 {"fixup_t2_pcrel_9", 0, 32, 87 IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 88 {"fixup_arm_ldst_abs_12", 0, 32, 0}, 89 {"fixup_thumb_adr_pcrel_10", 0, 8, 90 IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 91 {"fixup_arm_adr_pcrel_12", 0, 32, IsPCRelConstant}, 92 {"fixup_t2_adr_pcrel_12", 0, 32, 93 IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 94 {"fixup_arm_condbranch", 0, 24, MCFixupKindInfo::FKF_IsPCRel}, 95 {"fixup_arm_uncondbranch", 0, 24, MCFixupKindInfo::FKF_IsPCRel}, 96 {"fixup_t2_condbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 97 {"fixup_t2_uncondbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 98 {"fixup_arm_thumb_br", 0, 16, MCFixupKindInfo::FKF_IsPCRel}, 99 {"fixup_arm_uncondbl", 0, 24, MCFixupKindInfo::FKF_IsPCRel}, 100 {"fixup_arm_condbl", 0, 24, MCFixupKindInfo::FKF_IsPCRel}, 101 {"fixup_arm_blx", 0, 24, MCFixupKindInfo::FKF_IsPCRel}, 102 {"fixup_arm_thumb_bl", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 103 {"fixup_arm_thumb_blx", 0, 32, 104 MCFixupKindInfo::FKF_IsPCRel | 105 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 106 {"fixup_arm_thumb_cb", 0, 16, MCFixupKindInfo::FKF_IsPCRel}, 107 {"fixup_arm_thumb_cp", 0, 8, 108 MCFixupKindInfo::FKF_IsPCRel | 109 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 110 {"fixup_arm_thumb_bcc", 0, 8, MCFixupKindInfo::FKF_IsPCRel}, 111 // movw / movt: 16-bits immediate but scattered into two chunks 0 - 12, 16 112 // - 19. 113 {"fixup_arm_movt_hi16", 0, 20, 0}, 114 {"fixup_arm_movw_lo16", 0, 20, 0}, 115 {"fixup_t2_movt_hi16", 0, 20, 0}, 116 {"fixup_t2_movw_lo16", 0, 20, 0}, 117 {"fixup_arm_thumb_upper_8_15", 0, 8, 0}, 118 {"fixup_arm_thumb_upper_0_7", 0, 8, 0}, 119 {"fixup_arm_thumb_lower_8_15", 0, 8, 0}, 120 {"fixup_arm_thumb_lower_0_7", 0, 8, 0}, 121 {"fixup_arm_mod_imm", 0, 12, 0}, 122 {"fixup_t2_so_imm", 0, 26, 0}, 123 {"fixup_bf_branch", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 124 {"fixup_bf_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 125 {"fixup_bfl_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 126 {"fixup_bfc_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 127 {"fixup_bfcsel_else_target", 0, 32, 0}, 128 {"fixup_wls", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 129 {"fixup_le", 0, 32, MCFixupKindInfo::FKF_IsPCRel}}; 130 const static MCFixupKindInfo InfosBE[ARM::NumTargetFixupKinds] = { 131 // This table *must* be in the order that the fixup_* kinds are defined in 132 // ARMFixupKinds.h. 133 // 134 // Name Offset (bits) Size (bits) Flags 135 {"fixup_arm_ldst_pcrel_12", 0, 32, IsPCRelConstant}, 136 {"fixup_t2_ldst_pcrel_12", 0, 32, 137 IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 138 {"fixup_arm_pcrel_10_unscaled", 0, 32, IsPCRelConstant}, 139 {"fixup_arm_pcrel_10", 0, 32, IsPCRelConstant}, 140 {"fixup_t2_pcrel_10", 0, 32, 141 MCFixupKindInfo::FKF_IsPCRel | 142 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 143 {"fixup_arm_pcrel_9", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 144 {"fixup_t2_pcrel_9", 0, 32, 145 IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 146 {"fixup_arm_ldst_abs_12", 0, 32, 0}, 147 {"fixup_thumb_adr_pcrel_10", 8, 8, 148 IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 149 {"fixup_arm_adr_pcrel_12", 0, 32, IsPCRelConstant}, 150 {"fixup_t2_adr_pcrel_12", 0, 32, 151 IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 152 {"fixup_arm_condbranch", 8, 24, MCFixupKindInfo::FKF_IsPCRel}, 153 {"fixup_arm_uncondbranch", 8, 24, MCFixupKindInfo::FKF_IsPCRel}, 154 {"fixup_t2_condbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 155 {"fixup_t2_uncondbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 156 {"fixup_arm_thumb_br", 0, 16, MCFixupKindInfo::FKF_IsPCRel}, 157 {"fixup_arm_uncondbl", 8, 24, MCFixupKindInfo::FKF_IsPCRel}, 158 {"fixup_arm_condbl", 8, 24, MCFixupKindInfo::FKF_IsPCRel}, 159 {"fixup_arm_blx", 8, 24, MCFixupKindInfo::FKF_IsPCRel}, 160 {"fixup_arm_thumb_bl", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 161 {"fixup_arm_thumb_blx", 0, 32, 162 MCFixupKindInfo::FKF_IsPCRel | 163 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 164 {"fixup_arm_thumb_cb", 0, 16, MCFixupKindInfo::FKF_IsPCRel}, 165 {"fixup_arm_thumb_cp", 8, 8, 166 MCFixupKindInfo::FKF_IsPCRel | 167 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 168 {"fixup_arm_thumb_bcc", 8, 8, MCFixupKindInfo::FKF_IsPCRel}, 169 // movw / movt: 16-bits immediate but scattered into two chunks 0 - 12, 16 170 // - 19. 171 {"fixup_arm_movt_hi16", 12, 20, 0}, 172 {"fixup_arm_movw_lo16", 12, 20, 0}, 173 {"fixup_t2_movt_hi16", 12, 20, 0}, 174 {"fixup_t2_movw_lo16", 12, 20, 0}, 175 {"fixup_arm_thumb_upper_8_15", 24, 8, 0}, 176 {"fixup_arm_thumb_upper_0_7", 24, 8, 0}, 177 {"fixup_arm_thumb_lower_8_15", 24, 8, 0}, 178 {"fixup_arm_thumb_lower_0_7", 24, 8, 0}, 179 {"fixup_arm_mod_imm", 20, 12, 0}, 180 {"fixup_t2_so_imm", 26, 6, 0}, 181 {"fixup_bf_branch", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 182 {"fixup_bf_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 183 {"fixup_bfl_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 184 {"fixup_bfc_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 185 {"fixup_bfcsel_else_target", 0, 32, 0}, 186 {"fixup_wls", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 187 {"fixup_le", 0, 32, MCFixupKindInfo::FKF_IsPCRel}}; 188 189 // Fixup kinds from .reloc directive are like R_ARM_NONE. They do not require 190 // any extra processing. 191 if (Kind >= FirstLiteralRelocationKind) 192 return MCAsmBackend::getFixupKindInfo(FK_NONE); 193 194 if (Kind < FirstTargetFixupKind) 195 return MCAsmBackend::getFixupKindInfo(Kind); 196 197 assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() && 198 "Invalid kind!"); 199 return (Endian == support::little ? InfosLE 200 : InfosBE)[Kind - FirstTargetFixupKind]; 201 } 202 203 void ARMAsmBackend::handleAssemblerFlag(MCAssemblerFlag Flag) { 204 switch (Flag) { 205 default: 206 break; 207 case MCAF_Code16: 208 setIsThumb(true); 209 break; 210 case MCAF_Code32: 211 setIsThumb(false); 212 break; 213 } 214 } 215 216 unsigned ARMAsmBackend::getRelaxedOpcode(unsigned Op, 217 const MCSubtargetInfo &STI) const { 218 bool HasThumb2 = STI.hasFeature(ARM::FeatureThumb2); 219 bool HasV8MBaselineOps = STI.hasFeature(ARM::HasV8MBaselineOps); 220 221 switch (Op) { 222 default: 223 return Op; 224 case ARM::tBcc: 225 return HasThumb2 ? (unsigned)ARM::t2Bcc : Op; 226 case ARM::tLDRpci: 227 return HasThumb2 ? (unsigned)ARM::t2LDRpci : Op; 228 case ARM::tADR: 229 return HasThumb2 ? (unsigned)ARM::t2ADR : Op; 230 case ARM::tB: 231 return HasV8MBaselineOps ? (unsigned)ARM::t2B : Op; 232 case ARM::tCBZ: 233 return ARM::tHINT; 234 case ARM::tCBNZ: 235 return ARM::tHINT; 236 } 237 } 238 239 bool ARMAsmBackend::mayNeedRelaxation(const MCInst &Inst, 240 const MCSubtargetInfo &STI) const { 241 if (getRelaxedOpcode(Inst.getOpcode(), STI) != Inst.getOpcode()) 242 return true; 243 return false; 244 } 245 246 static const char *checkPCRelOffset(uint64_t Value, int64_t Min, int64_t Max) { 247 int64_t Offset = int64_t(Value) - 4; 248 if (Offset < Min || Offset > Max) 249 return "out of range pc-relative fixup value"; 250 return nullptr; 251 } 252 253 const char *ARMAsmBackend::reasonForFixupRelaxation(const MCFixup &Fixup, 254 uint64_t Value) const { 255 switch (Fixup.getTargetKind()) { 256 case ARM::fixup_arm_thumb_br: { 257 // Relaxing tB to t2B. tB has a signed 12-bit displacement with the 258 // low bit being an implied zero. There's an implied +4 offset for the 259 // branch, so we adjust the other way here to determine what's 260 // encodable. 261 // 262 // Relax if the value is too big for a (signed) i8. 263 int64_t Offset = int64_t(Value) - 4; 264 if (Offset > 2046 || Offset < -2048) 265 return "out of range pc-relative fixup value"; 266 break; 267 } 268 case ARM::fixup_arm_thumb_bcc: { 269 // Relaxing tBcc to t2Bcc. tBcc has a signed 9-bit displacement with the 270 // low bit being an implied zero. There's an implied +4 offset for the 271 // branch, so we adjust the other way here to determine what's 272 // encodable. 273 // 274 // Relax if the value is too big for a (signed) i8. 275 int64_t Offset = int64_t(Value) - 4; 276 if (Offset > 254 || Offset < -256) 277 return "out of range pc-relative fixup value"; 278 break; 279 } 280 case ARM::fixup_thumb_adr_pcrel_10: 281 case ARM::fixup_arm_thumb_cp: { 282 // If the immediate is negative, greater than 1020, or not a multiple 283 // of four, the wide version of the instruction must be used. 284 int64_t Offset = int64_t(Value) - 4; 285 if (Offset & 3) 286 return "misaligned pc-relative fixup value"; 287 else if (Offset > 1020 || Offset < 0) 288 return "out of range pc-relative fixup value"; 289 break; 290 } 291 case ARM::fixup_arm_thumb_cb: { 292 // If we have a Thumb CBZ or CBNZ instruction and its target is the next 293 // instruction it is actually out of range for the instruction. 294 // It will be changed to a NOP. 295 int64_t Offset = (Value & ~1); 296 if (Offset == 2) 297 return "will be converted to nop"; 298 break; 299 } 300 case ARM::fixup_bf_branch: 301 return checkPCRelOffset(Value, 0, 30); 302 case ARM::fixup_bf_target: 303 return checkPCRelOffset(Value, -0x10000, +0xfffe); 304 case ARM::fixup_bfl_target: 305 return checkPCRelOffset(Value, -0x40000, +0x3fffe); 306 case ARM::fixup_bfc_target: 307 return checkPCRelOffset(Value, -0x1000, +0xffe); 308 case ARM::fixup_wls: 309 return checkPCRelOffset(Value, 0, +0xffe); 310 case ARM::fixup_le: 311 // The offset field in the LE and LETP instructions is an 11-bit 312 // value shifted left by 2 (i.e. 0,2,4,...,4094), and it is 313 // interpreted as a negative offset from the value read from pc, 314 // i.e. from instruction_address+4. 315 // 316 // So an LE instruction can in principle address the instruction 317 // immediately after itself, or (not very usefully) the address 318 // half way through the 4-byte LE. 319 return checkPCRelOffset(Value, -0xffe, 0); 320 case ARM::fixup_bfcsel_else_target: { 321 if (Value != 2 && Value != 4) 322 return "out of range label-relative fixup value"; 323 break; 324 } 325 326 default: 327 llvm_unreachable("Unexpected fixup kind in reasonForFixupRelaxation()!"); 328 } 329 return nullptr; 330 } 331 332 bool ARMAsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value, 333 const MCRelaxableFragment *DF, 334 const MCAsmLayout &Layout) const { 335 return reasonForFixupRelaxation(Fixup, Value); 336 } 337 338 void ARMAsmBackend::relaxInstruction(MCInst &Inst, 339 const MCSubtargetInfo &STI) const { 340 unsigned RelaxedOp = getRelaxedOpcode(Inst.getOpcode(), STI); 341 342 // Return a diagnostic if we get here w/ a bogus instruction. 343 if (RelaxedOp == Inst.getOpcode()) { 344 SmallString<256> Tmp; 345 raw_svector_ostream OS(Tmp); 346 Inst.dump_pretty(OS); 347 OS << "\n"; 348 report_fatal_error("unexpected instruction to relax: " + OS.str()); 349 } 350 351 // If we are changing Thumb CBZ or CBNZ instruction to a NOP, aka tHINT, we 352 // have to change the operands too. 353 if ((Inst.getOpcode() == ARM::tCBZ || Inst.getOpcode() == ARM::tCBNZ) && 354 RelaxedOp == ARM::tHINT) { 355 MCInst Res; 356 Res.setOpcode(RelaxedOp); 357 Res.addOperand(MCOperand::createImm(0)); 358 Res.addOperand(MCOperand::createImm(14)); 359 Res.addOperand(MCOperand::createReg(0)); 360 Inst = std::move(Res); 361 return; 362 } 363 364 // The rest of instructions we're relaxing have the same operands. 365 // We just need to update to the proper opcode. 366 Inst.setOpcode(RelaxedOp); 367 } 368 369 bool ARMAsmBackend::writeNopData(raw_ostream &OS, uint64_t Count, 370 const MCSubtargetInfo *STI) const { 371 const uint16_t Thumb1_16bitNopEncoding = 0x46c0; // using MOV r8,r8 372 const uint16_t Thumb2_16bitNopEncoding = 0xbf00; // NOP 373 const uint32_t ARMv4_NopEncoding = 0xe1a00000; // using MOV r0,r0 374 const uint32_t ARMv6T2_NopEncoding = 0xe320f000; // NOP 375 if (isThumb()) { 376 const uint16_t nopEncoding = 377 hasNOP(STI) ? Thumb2_16bitNopEncoding : Thumb1_16bitNopEncoding; 378 uint64_t NumNops = Count / 2; 379 for (uint64_t i = 0; i != NumNops; ++i) 380 support::endian::write(OS, nopEncoding, Endian); 381 if (Count & 1) 382 OS << '\0'; 383 return true; 384 } 385 // ARM mode 386 const uint32_t nopEncoding = 387 hasNOP(STI) ? ARMv6T2_NopEncoding : ARMv4_NopEncoding; 388 uint64_t NumNops = Count / 4; 389 for (uint64_t i = 0; i != NumNops; ++i) 390 support::endian::write(OS, nopEncoding, Endian); 391 // FIXME: should this function return false when unable to write exactly 392 // 'Count' bytes with NOP encodings? 393 switch (Count % 4) { 394 default: 395 break; // No leftover bytes to write 396 case 1: 397 OS << '\0'; 398 break; 399 case 2: 400 OS.write("\0\0", 2); 401 break; 402 case 3: 403 OS.write("\0\0\xa0", 3); 404 break; 405 } 406 407 return true; 408 } 409 410 static uint32_t swapHalfWords(uint32_t Value, bool IsLittleEndian) { 411 if (IsLittleEndian) { 412 // Note that the halfwords are stored high first and low second in thumb; 413 // so we need to swap the fixup value here to map properly. 414 uint32_t Swapped = (Value & 0xFFFF0000) >> 16; 415 Swapped |= (Value & 0x0000FFFF) << 16; 416 return Swapped; 417 } else 418 return Value; 419 } 420 421 static uint32_t joinHalfWords(uint32_t FirstHalf, uint32_t SecondHalf, 422 bool IsLittleEndian) { 423 uint32_t Value; 424 425 if (IsLittleEndian) { 426 Value = (SecondHalf & 0xFFFF) << 16; 427 Value |= (FirstHalf & 0xFFFF); 428 } else { 429 Value = (SecondHalf & 0xFFFF); 430 Value |= (FirstHalf & 0xFFFF) << 16; 431 } 432 433 return Value; 434 } 435 436 unsigned ARMAsmBackend::adjustFixupValue(const MCAssembler &Asm, 437 const MCFixup &Fixup, 438 const MCValue &Target, uint64_t Value, 439 bool IsResolved, MCContext &Ctx, 440 const MCSubtargetInfo* STI) const { 441 unsigned Kind = Fixup.getKind(); 442 443 // MachO tries to make .o files that look vaguely pre-linked, so for MOVW/MOVT 444 // and .word relocations they put the Thumb bit into the addend if possible. 445 // Other relocation types don't want this bit though (branches couldn't encode 446 // it if it *was* present, and no other relocations exist) and it can 447 // interfere with checking valid expressions. 448 if (const MCSymbolRefExpr *A = Target.getSymA()) { 449 if (A->hasSubsectionsViaSymbols() && Asm.isThumbFunc(&A->getSymbol()) && 450 A->getSymbol().isExternal() && 451 (Kind == FK_Data_4 || Kind == ARM::fixup_arm_movw_lo16 || 452 Kind == ARM::fixup_arm_movt_hi16 || Kind == ARM::fixup_t2_movw_lo16 || 453 Kind == ARM::fixup_t2_movt_hi16)) 454 Value |= 1; 455 } 456 457 switch (Kind) { 458 default: 459 return 0; 460 case FK_Data_1: 461 case FK_Data_2: 462 case FK_Data_4: 463 return Value; 464 case FK_SecRel_2: 465 return Value; 466 case FK_SecRel_4: 467 return Value; 468 case ARM::fixup_arm_movt_hi16: 469 assert(STI != nullptr); 470 if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF()) 471 Value >>= 16; 472 [[fallthrough]]; 473 case ARM::fixup_arm_movw_lo16: { 474 unsigned Hi4 = (Value & 0xF000) >> 12; 475 unsigned Lo12 = Value & 0x0FFF; 476 // inst{19-16} = Hi4; 477 // inst{11-0} = Lo12; 478 Value = (Hi4 << 16) | (Lo12); 479 return Value; 480 } 481 case ARM::fixup_t2_movt_hi16: 482 assert(STI != nullptr); 483 if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF()) 484 Value >>= 16; 485 [[fallthrough]]; 486 case ARM::fixup_t2_movw_lo16: { 487 unsigned Hi4 = (Value & 0xF000) >> 12; 488 unsigned i = (Value & 0x800) >> 11; 489 unsigned Mid3 = (Value & 0x700) >> 8; 490 unsigned Lo8 = Value & 0x0FF; 491 // inst{19-16} = Hi4; 492 // inst{26} = i; 493 // inst{14-12} = Mid3; 494 // inst{7-0} = Lo8; 495 Value = (Hi4 << 16) | (i << 26) | (Mid3 << 12) | (Lo8); 496 return swapHalfWords(Value, Endian == support::little); 497 } 498 case ARM::fixup_arm_thumb_upper_8_15: 499 if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF()) 500 return (Value & 0xff000000) >> 24; 501 return Value & 0xff; 502 case ARM::fixup_arm_thumb_upper_0_7: 503 if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF()) 504 return (Value & 0x00ff0000) >> 16; 505 return Value & 0xff; 506 case ARM::fixup_arm_thumb_lower_8_15: 507 if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF()) 508 return (Value & 0x0000ff00) >> 8; 509 return Value & 0xff; 510 case ARM::fixup_arm_thumb_lower_0_7: 511 return Value & 0x000000ff; 512 case ARM::fixup_arm_ldst_pcrel_12: 513 // ARM PC-relative values are offset by 8. 514 Value -= 4; 515 [[fallthrough]]; 516 case ARM::fixup_t2_ldst_pcrel_12: 517 // Offset by 4, adjusted by two due to the half-word ordering of thumb. 518 Value -= 4; 519 [[fallthrough]]; 520 case ARM::fixup_arm_ldst_abs_12: { 521 bool isAdd = true; 522 if ((int64_t)Value < 0) { 523 Value = -Value; 524 isAdd = false; 525 } 526 if (Value >= 4096) { 527 Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value"); 528 return 0; 529 } 530 Value |= isAdd << 23; 531 532 // Same addressing mode as fixup_arm_pcrel_10, 533 // but with 16-bit halfwords swapped. 534 if (Kind == ARM::fixup_t2_ldst_pcrel_12) 535 return swapHalfWords(Value, Endian == support::little); 536 537 return Value; 538 } 539 case ARM::fixup_arm_adr_pcrel_12: { 540 // ARM PC-relative values are offset by 8. 541 Value -= 8; 542 unsigned opc = 4; // bits {24-21}. Default to add: 0b0100 543 if ((int64_t)Value < 0) { 544 Value = -Value; 545 opc = 2; // 0b0010 546 } 547 if (ARM_AM::getSOImmVal(Value) == -1) { 548 Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value"); 549 return 0; 550 } 551 // Encode the immediate and shift the opcode into place. 552 return ARM_AM::getSOImmVal(Value) | (opc << 21); 553 } 554 555 case ARM::fixup_t2_adr_pcrel_12: { 556 Value -= 4; 557 unsigned opc = 0; 558 if ((int64_t)Value < 0) { 559 Value = -Value; 560 opc = 5; 561 } 562 563 uint32_t out = (opc << 21); 564 out |= (Value & 0x800) << 15; 565 out |= (Value & 0x700) << 4; 566 out |= (Value & 0x0FF); 567 568 return swapHalfWords(out, Endian == support::little); 569 } 570 571 case ARM::fixup_arm_condbranch: 572 case ARM::fixup_arm_uncondbranch: 573 case ARM::fixup_arm_uncondbl: 574 case ARM::fixup_arm_condbl: 575 case ARM::fixup_arm_blx: 576 // These values don't encode the low two bits since they're always zero. 577 // Offset by 8 just as above. 578 if (const MCSymbolRefExpr *SRE = 579 dyn_cast<MCSymbolRefExpr>(Fixup.getValue())) 580 if (SRE->getKind() == MCSymbolRefExpr::VK_TLSCALL) 581 return 0; 582 return 0xffffff & ((Value - 8) >> 2); 583 case ARM::fixup_t2_uncondbranch: { 584 Value = Value - 4; 585 if (!isInt<25>(Value)) { 586 Ctx.reportError(Fixup.getLoc(), "Relocation out of range"); 587 return 0; 588 } 589 590 Value >>= 1; // Low bit is not encoded. 591 592 uint32_t out = 0; 593 bool I = Value & 0x800000; 594 bool J1 = Value & 0x400000; 595 bool J2 = Value & 0x200000; 596 J1 ^= I; 597 J2 ^= I; 598 599 out |= I << 26; // S bit 600 out |= !J1 << 13; // J1 bit 601 out |= !J2 << 11; // J2 bit 602 out |= (Value & 0x1FF800) << 5; // imm6 field 603 out |= (Value & 0x0007FF); // imm11 field 604 605 return swapHalfWords(out, Endian == support::little); 606 } 607 case ARM::fixup_t2_condbranch: { 608 Value = Value - 4; 609 if (!isInt<21>(Value)) { 610 Ctx.reportError(Fixup.getLoc(), "Relocation out of range"); 611 return 0; 612 } 613 614 Value >>= 1; // Low bit is not encoded. 615 616 uint64_t out = 0; 617 out |= (Value & 0x80000) << 7; // S bit 618 out |= (Value & 0x40000) >> 7; // J2 bit 619 out |= (Value & 0x20000) >> 4; // J1 bit 620 out |= (Value & 0x1F800) << 5; // imm6 field 621 out |= (Value & 0x007FF); // imm11 field 622 623 return swapHalfWords(out, Endian == support::little); 624 } 625 case ARM::fixup_arm_thumb_bl: { 626 if (!isInt<25>(Value - 4) || 627 (!STI->hasFeature(ARM::FeatureThumb2) && 628 !STI->hasFeature(ARM::HasV8MBaselineOps) && 629 !STI->hasFeature(ARM::HasV6MOps) && 630 !isInt<23>(Value - 4))) { 631 Ctx.reportError(Fixup.getLoc(), "Relocation out of range"); 632 return 0; 633 } 634 635 // The value doesn't encode the low bit (always zero) and is offset by 636 // four. The 32-bit immediate value is encoded as 637 // imm32 = SignExtend(S:I1:I2:imm10:imm11:0) 638 // where I1 = NOT(J1 ^ S) and I2 = NOT(J2 ^ S). 639 // The value is encoded into disjoint bit positions in the destination 640 // opcode. x = unchanged, I = immediate value bit, S = sign extension bit, 641 // J = either J1 or J2 bit 642 // 643 // BL: xxxxxSIIIIIIIIII xxJxJIIIIIIIIIII 644 // 645 // Note that the halfwords are stored high first, low second; so we need 646 // to transpose the fixup value here to map properly. 647 uint32_t offset = (Value - 4) >> 1; 648 uint32_t signBit = (offset & 0x800000) >> 23; 649 uint32_t I1Bit = (offset & 0x400000) >> 22; 650 uint32_t J1Bit = (I1Bit ^ 0x1) ^ signBit; 651 uint32_t I2Bit = (offset & 0x200000) >> 21; 652 uint32_t J2Bit = (I2Bit ^ 0x1) ^ signBit; 653 uint32_t imm10Bits = (offset & 0x1FF800) >> 11; 654 uint32_t imm11Bits = (offset & 0x000007FF); 655 656 uint32_t FirstHalf = (((uint16_t)signBit << 10) | (uint16_t)imm10Bits); 657 uint32_t SecondHalf = (((uint16_t)J1Bit << 13) | ((uint16_t)J2Bit << 11) | 658 (uint16_t)imm11Bits); 659 return joinHalfWords(FirstHalf, SecondHalf, Endian == support::little); 660 } 661 case ARM::fixup_arm_thumb_blx: { 662 // The value doesn't encode the low two bits (always zero) and is offset by 663 // four (see fixup_arm_thumb_cp). The 32-bit immediate value is encoded as 664 // imm32 = SignExtend(S:I1:I2:imm10H:imm10L:00) 665 // where I1 = NOT(J1 ^ S) and I2 = NOT(J2 ^ S). 666 // The value is encoded into disjoint bit positions in the destination 667 // opcode. x = unchanged, I = immediate value bit, S = sign extension bit, 668 // J = either J1 or J2 bit, 0 = zero. 669 // 670 // BLX: xxxxxSIIIIIIIIII xxJxJIIIIIIIIII0 671 // 672 // Note that the halfwords are stored high first, low second; so we need 673 // to transpose the fixup value here to map properly. 674 if (Value % 4 != 0) { 675 Ctx.reportError(Fixup.getLoc(), "misaligned ARM call destination"); 676 return 0; 677 } 678 679 uint32_t offset = (Value - 4) >> 2; 680 if (const MCSymbolRefExpr *SRE = 681 dyn_cast<MCSymbolRefExpr>(Fixup.getValue())) 682 if (SRE->getKind() == MCSymbolRefExpr::VK_TLSCALL) 683 offset = 0; 684 uint32_t signBit = (offset & 0x400000) >> 22; 685 uint32_t I1Bit = (offset & 0x200000) >> 21; 686 uint32_t J1Bit = (I1Bit ^ 0x1) ^ signBit; 687 uint32_t I2Bit = (offset & 0x100000) >> 20; 688 uint32_t J2Bit = (I2Bit ^ 0x1) ^ signBit; 689 uint32_t imm10HBits = (offset & 0xFFC00) >> 10; 690 uint32_t imm10LBits = (offset & 0x3FF); 691 692 uint32_t FirstHalf = (((uint16_t)signBit << 10) | (uint16_t)imm10HBits); 693 uint32_t SecondHalf = (((uint16_t)J1Bit << 13) | ((uint16_t)J2Bit << 11) | 694 ((uint16_t)imm10LBits) << 1); 695 return joinHalfWords(FirstHalf, SecondHalf, Endian == support::little); 696 } 697 case ARM::fixup_thumb_adr_pcrel_10: 698 case ARM::fixup_arm_thumb_cp: 699 // On CPUs supporting Thumb2, this will be relaxed to an ldr.w, otherwise we 700 // could have an error on our hands. 701 assert(STI != nullptr); 702 if (!STI->hasFeature(ARM::FeatureThumb2) && IsResolved) { 703 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value); 704 if (FixupDiagnostic) { 705 Ctx.reportError(Fixup.getLoc(), FixupDiagnostic); 706 return 0; 707 } 708 } 709 // Offset by 4, and don't encode the low two bits. 710 return ((Value - 4) >> 2) & 0xff; 711 case ARM::fixup_arm_thumb_cb: { 712 // CB instructions can only branch to offsets in [4, 126] in multiples of 2 713 // so ensure that the raw value LSB is zero and it lies in [2, 130]. 714 // An offset of 2 will be relaxed to a NOP. 715 if ((int64_t)Value < 2 || Value > 0x82 || Value & 1) { 716 Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value"); 717 return 0; 718 } 719 // Offset by 4 and don't encode the lower bit, which is always 0. 720 // FIXME: diagnose if no Thumb2 721 uint32_t Binary = (Value - 4) >> 1; 722 return ((Binary & 0x20) << 4) | ((Binary & 0x1f) << 3); 723 } 724 case ARM::fixup_arm_thumb_br: 725 // Offset by 4 and don't encode the lower bit, which is always 0. 726 assert(STI != nullptr); 727 if (!STI->hasFeature(ARM::FeatureThumb2) && 728 !STI->hasFeature(ARM::HasV8MBaselineOps)) { 729 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value); 730 if (FixupDiagnostic) { 731 Ctx.reportError(Fixup.getLoc(), FixupDiagnostic); 732 return 0; 733 } 734 } 735 return ((Value - 4) >> 1) & 0x7ff; 736 case ARM::fixup_arm_thumb_bcc: 737 // Offset by 4 and don't encode the lower bit, which is always 0. 738 assert(STI != nullptr); 739 if (!STI->hasFeature(ARM::FeatureThumb2)) { 740 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value); 741 if (FixupDiagnostic) { 742 Ctx.reportError(Fixup.getLoc(), FixupDiagnostic); 743 return 0; 744 } 745 } 746 return ((Value - 4) >> 1) & 0xff; 747 case ARM::fixup_arm_pcrel_10_unscaled: { 748 Value = Value - 8; // ARM fixups offset by an additional word and don't 749 // need to adjust for the half-word ordering. 750 bool isAdd = true; 751 if ((int64_t)Value < 0) { 752 Value = -Value; 753 isAdd = false; 754 } 755 // The value has the low 4 bits encoded in [3:0] and the high 4 in [11:8]. 756 if (Value >= 256) { 757 Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value"); 758 return 0; 759 } 760 Value = (Value & 0xf) | ((Value & 0xf0) << 4); 761 return Value | (isAdd << 23); 762 } 763 case ARM::fixup_arm_pcrel_10: 764 Value = Value - 4; // ARM fixups offset by an additional word and don't 765 // need to adjust for the half-word ordering. 766 [[fallthrough]]; 767 case ARM::fixup_t2_pcrel_10: { 768 // Offset by 4, adjusted by two due to the half-word ordering of thumb. 769 Value = Value - 4; 770 bool isAdd = true; 771 if ((int64_t)Value < 0) { 772 Value = -Value; 773 isAdd = false; 774 } 775 // These values don't encode the low two bits since they're always zero. 776 Value >>= 2; 777 if (Value >= 256) { 778 Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value"); 779 return 0; 780 } 781 Value |= isAdd << 23; 782 783 // Same addressing mode as fixup_arm_pcrel_10, but with 16-bit halfwords 784 // swapped. 785 if (Kind == ARM::fixup_t2_pcrel_10) 786 return swapHalfWords(Value, Endian == support::little); 787 788 return Value; 789 } 790 case ARM::fixup_arm_pcrel_9: 791 Value = Value - 4; // ARM fixups offset by an additional word and don't 792 // need to adjust for the half-word ordering. 793 [[fallthrough]]; 794 case ARM::fixup_t2_pcrel_9: { 795 // Offset by 4, adjusted by two due to the half-word ordering of thumb. 796 Value = Value - 4; 797 bool isAdd = true; 798 if ((int64_t)Value < 0) { 799 Value = -Value; 800 isAdd = false; 801 } 802 // These values don't encode the low bit since it's always zero. 803 if (Value & 1) { 804 Ctx.reportError(Fixup.getLoc(), "invalid value for this fixup"); 805 return 0; 806 } 807 Value >>= 1; 808 if (Value >= 256) { 809 Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value"); 810 return 0; 811 } 812 Value |= isAdd << 23; 813 814 // Same addressing mode as fixup_arm_pcrel_9, but with 16-bit halfwords 815 // swapped. 816 if (Kind == ARM::fixup_t2_pcrel_9) 817 return swapHalfWords(Value, Endian == support::little); 818 819 return Value; 820 } 821 case ARM::fixup_arm_mod_imm: 822 Value = ARM_AM::getSOImmVal(Value); 823 if (Value >> 12) { 824 Ctx.reportError(Fixup.getLoc(), "out of range immediate fixup value"); 825 return 0; 826 } 827 return Value; 828 case ARM::fixup_t2_so_imm: { 829 Value = ARM_AM::getT2SOImmVal(Value); 830 if ((int64_t)Value < 0) { 831 Ctx.reportError(Fixup.getLoc(), "out of range immediate fixup value"); 832 return 0; 833 } 834 // Value will contain a 12-bit value broken up into a 4-bit shift in bits 835 // 11:8 and the 8-bit immediate in 0:7. The instruction has the immediate 836 // in 0:7. The 4-bit shift is split up into i:imm3 where i is placed at bit 837 // 10 of the upper half-word and imm3 is placed at 14:12 of the lower 838 // half-word. 839 uint64_t EncValue = 0; 840 EncValue |= (Value & 0x800) << 15; 841 EncValue |= (Value & 0x700) << 4; 842 EncValue |= (Value & 0xff); 843 return swapHalfWords(EncValue, Endian == support::little); 844 } 845 case ARM::fixup_bf_branch: { 846 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value); 847 if (FixupDiagnostic) { 848 Ctx.reportError(Fixup.getLoc(), FixupDiagnostic); 849 return 0; 850 } 851 uint32_t out = (((Value - 4) >> 1) & 0xf) << 23; 852 return swapHalfWords(out, Endian == support::little); 853 } 854 case ARM::fixup_bf_target: 855 case ARM::fixup_bfl_target: 856 case ARM::fixup_bfc_target: { 857 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value); 858 if (FixupDiagnostic) { 859 Ctx.reportError(Fixup.getLoc(), FixupDiagnostic); 860 return 0; 861 } 862 uint32_t out = 0; 863 uint32_t HighBitMask = (Kind == ARM::fixup_bf_target ? 0xf800 : 864 Kind == ARM::fixup_bfl_target ? 0x3f800 : 0x800); 865 out |= (((Value - 4) >> 1) & 0x1) << 11; 866 out |= (((Value - 4) >> 1) & 0x7fe); 867 out |= (((Value - 4) >> 1) & HighBitMask) << 5; 868 return swapHalfWords(out, Endian == support::little); 869 } 870 case ARM::fixup_bfcsel_else_target: { 871 // If this is a fixup of a branch future's else target then it should be a 872 // constant MCExpr representing the distance between the branch targetted 873 // and the instruction after that same branch. 874 Value = Target.getConstant(); 875 876 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value); 877 if (FixupDiagnostic) { 878 Ctx.reportError(Fixup.getLoc(), FixupDiagnostic); 879 return 0; 880 } 881 uint32_t out = ((Value >> 2) & 1) << 17; 882 return swapHalfWords(out, Endian == support::little); 883 } 884 case ARM::fixup_wls: 885 case ARM::fixup_le: { 886 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value); 887 if (FixupDiagnostic) { 888 Ctx.reportError(Fixup.getLoc(), FixupDiagnostic); 889 return 0; 890 } 891 uint64_t real_value = Value - 4; 892 uint32_t out = 0; 893 if (Kind == ARM::fixup_le) 894 real_value = -real_value; 895 out |= ((real_value >> 1) & 0x1) << 11; 896 out |= ((real_value >> 1) & 0x7fe); 897 return swapHalfWords(out, Endian == support::little); 898 } 899 } 900 } 901 902 bool ARMAsmBackend::shouldForceRelocation(const MCAssembler &Asm, 903 const MCFixup &Fixup, 904 const MCValue &Target) { 905 const MCSymbolRefExpr *A = Target.getSymA(); 906 const MCSymbol *Sym = A ? &A->getSymbol() : nullptr; 907 const unsigned FixupKind = Fixup.getKind(); 908 if (FixupKind >= FirstLiteralRelocationKind) 909 return true; 910 if (FixupKind == ARM::fixup_arm_thumb_bl) { 911 assert(Sym && "How did we resolve this?"); 912 913 // If the symbol is external the linker will handle it. 914 // FIXME: Should we handle it as an optimization? 915 916 // If the symbol is out of range, produce a relocation and hope the 917 // linker can handle it. GNU AS produces an error in this case. 918 if (Sym->isExternal()) 919 return true; 920 } 921 // Create relocations for unconditional branches to function symbols with 922 // different execution mode in ELF binaries. 923 if (Sym && Sym->isELF()) { 924 unsigned Type = cast<MCSymbolELF>(Sym)->getType(); 925 if ((Type == ELF::STT_FUNC || Type == ELF::STT_GNU_IFUNC)) { 926 if (Asm.isThumbFunc(Sym) && (FixupKind == ARM::fixup_arm_uncondbranch)) 927 return true; 928 if (!Asm.isThumbFunc(Sym) && (FixupKind == ARM::fixup_arm_thumb_br || 929 FixupKind == ARM::fixup_arm_thumb_bl || 930 FixupKind == ARM::fixup_t2_condbranch || 931 FixupKind == ARM::fixup_t2_uncondbranch)) 932 return true; 933 } 934 } 935 // We must always generate a relocation for BL/BLX instructions if we have 936 // a symbol to reference, as the linker relies on knowing the destination 937 // symbol's thumb-ness to get interworking right. 938 if (A && (FixupKind == ARM::fixup_arm_thumb_blx || 939 FixupKind == ARM::fixup_arm_blx || 940 FixupKind == ARM::fixup_arm_uncondbl || 941 FixupKind == ARM::fixup_arm_condbl)) 942 return true; 943 return false; 944 } 945 946 /// getFixupKindNumBytes - The number of bytes the fixup may change. 947 static unsigned getFixupKindNumBytes(unsigned Kind) { 948 switch (Kind) { 949 default: 950 llvm_unreachable("Unknown fixup kind!"); 951 952 case FK_Data_1: 953 case ARM::fixup_arm_thumb_bcc: 954 case ARM::fixup_arm_thumb_cp: 955 case ARM::fixup_thumb_adr_pcrel_10: 956 case ARM::fixup_arm_thumb_upper_8_15: 957 case ARM::fixup_arm_thumb_upper_0_7: 958 case ARM::fixup_arm_thumb_lower_8_15: 959 case ARM::fixup_arm_thumb_lower_0_7: 960 return 1; 961 962 case FK_Data_2: 963 case ARM::fixup_arm_thumb_br: 964 case ARM::fixup_arm_thumb_cb: 965 case ARM::fixup_arm_mod_imm: 966 return 2; 967 968 case ARM::fixup_arm_pcrel_10_unscaled: 969 case ARM::fixup_arm_ldst_pcrel_12: 970 case ARM::fixup_arm_pcrel_10: 971 case ARM::fixup_arm_pcrel_9: 972 case ARM::fixup_arm_ldst_abs_12: 973 case ARM::fixup_arm_adr_pcrel_12: 974 case ARM::fixup_arm_uncondbl: 975 case ARM::fixup_arm_condbl: 976 case ARM::fixup_arm_blx: 977 case ARM::fixup_arm_condbranch: 978 case ARM::fixup_arm_uncondbranch: 979 return 3; 980 981 case FK_Data_4: 982 case ARM::fixup_t2_ldst_pcrel_12: 983 case ARM::fixup_t2_condbranch: 984 case ARM::fixup_t2_uncondbranch: 985 case ARM::fixup_t2_pcrel_10: 986 case ARM::fixup_t2_pcrel_9: 987 case ARM::fixup_t2_adr_pcrel_12: 988 case ARM::fixup_arm_thumb_bl: 989 case ARM::fixup_arm_thumb_blx: 990 case ARM::fixup_arm_movt_hi16: 991 case ARM::fixup_arm_movw_lo16: 992 case ARM::fixup_t2_movt_hi16: 993 case ARM::fixup_t2_movw_lo16: 994 case ARM::fixup_t2_so_imm: 995 case ARM::fixup_bf_branch: 996 case ARM::fixup_bf_target: 997 case ARM::fixup_bfl_target: 998 case ARM::fixup_bfc_target: 999 case ARM::fixup_bfcsel_else_target: 1000 case ARM::fixup_wls: 1001 case ARM::fixup_le: 1002 return 4; 1003 1004 case FK_SecRel_2: 1005 return 2; 1006 case FK_SecRel_4: 1007 return 4; 1008 } 1009 } 1010 1011 /// getFixupKindContainerSizeBytes - The number of bytes of the 1012 /// container involved in big endian. 1013 static unsigned getFixupKindContainerSizeBytes(unsigned Kind) { 1014 switch (Kind) { 1015 default: 1016 llvm_unreachable("Unknown fixup kind!"); 1017 1018 case FK_Data_1: 1019 return 1; 1020 case FK_Data_2: 1021 return 2; 1022 case FK_Data_4: 1023 return 4; 1024 1025 case ARM::fixup_arm_thumb_bcc: 1026 case ARM::fixup_arm_thumb_cp: 1027 case ARM::fixup_thumb_adr_pcrel_10: 1028 case ARM::fixup_arm_thumb_br: 1029 case ARM::fixup_arm_thumb_cb: 1030 case ARM::fixup_arm_thumb_upper_8_15: 1031 case ARM::fixup_arm_thumb_upper_0_7: 1032 case ARM::fixup_arm_thumb_lower_8_15: 1033 case ARM::fixup_arm_thumb_lower_0_7: 1034 // Instruction size is 2 bytes. 1035 return 2; 1036 1037 case ARM::fixup_arm_pcrel_10_unscaled: 1038 case ARM::fixup_arm_ldst_pcrel_12: 1039 case ARM::fixup_arm_pcrel_10: 1040 case ARM::fixup_arm_pcrel_9: 1041 case ARM::fixup_arm_adr_pcrel_12: 1042 case ARM::fixup_arm_uncondbl: 1043 case ARM::fixup_arm_condbl: 1044 case ARM::fixup_arm_blx: 1045 case ARM::fixup_arm_condbranch: 1046 case ARM::fixup_arm_uncondbranch: 1047 case ARM::fixup_t2_ldst_pcrel_12: 1048 case ARM::fixup_t2_condbranch: 1049 case ARM::fixup_t2_uncondbranch: 1050 case ARM::fixup_t2_pcrel_10: 1051 case ARM::fixup_t2_pcrel_9: 1052 case ARM::fixup_t2_adr_pcrel_12: 1053 case ARM::fixup_arm_thumb_bl: 1054 case ARM::fixup_arm_thumb_blx: 1055 case ARM::fixup_arm_movt_hi16: 1056 case ARM::fixup_arm_movw_lo16: 1057 case ARM::fixup_t2_movt_hi16: 1058 case ARM::fixup_t2_movw_lo16: 1059 case ARM::fixup_arm_mod_imm: 1060 case ARM::fixup_t2_so_imm: 1061 case ARM::fixup_bf_branch: 1062 case ARM::fixup_bf_target: 1063 case ARM::fixup_bfl_target: 1064 case ARM::fixup_bfc_target: 1065 case ARM::fixup_bfcsel_else_target: 1066 case ARM::fixup_wls: 1067 case ARM::fixup_le: 1068 // Instruction size is 4 bytes. 1069 return 4; 1070 } 1071 } 1072 1073 void ARMAsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup, 1074 const MCValue &Target, 1075 MutableArrayRef<char> Data, uint64_t Value, 1076 bool IsResolved, 1077 const MCSubtargetInfo* STI) const { 1078 unsigned Kind = Fixup.getKind(); 1079 if (Kind >= FirstLiteralRelocationKind) 1080 return; 1081 MCContext &Ctx = Asm.getContext(); 1082 Value = adjustFixupValue(Asm, Fixup, Target, Value, IsResolved, Ctx, STI); 1083 if (!Value) 1084 return; // Doesn't change encoding. 1085 const unsigned NumBytes = getFixupKindNumBytes(Kind); 1086 1087 unsigned Offset = Fixup.getOffset(); 1088 assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!"); 1089 1090 // Used to point to big endian bytes. 1091 unsigned FullSizeBytes; 1092 if (Endian == support::big) { 1093 FullSizeBytes = getFixupKindContainerSizeBytes(Kind); 1094 assert((Offset + FullSizeBytes) <= Data.size() && "Invalid fixup size!"); 1095 assert(NumBytes <= FullSizeBytes && "Invalid fixup size!"); 1096 } 1097 1098 // For each byte of the fragment that the fixup touches, mask in the bits from 1099 // the fixup value. The Value has been "split up" into the appropriate 1100 // bitfields above. 1101 for (unsigned i = 0; i != NumBytes; ++i) { 1102 unsigned Idx = Endian == support::little ? i : (FullSizeBytes - 1 - i); 1103 Data[Offset + Idx] |= uint8_t((Value >> (i * 8)) & 0xff); 1104 } 1105 } 1106 1107 namespace CU { 1108 1109 /// Compact unwind encoding values. 1110 enum CompactUnwindEncodings { 1111 UNWIND_ARM_MODE_MASK = 0x0F000000, 1112 UNWIND_ARM_MODE_FRAME = 0x01000000, 1113 UNWIND_ARM_MODE_FRAME_D = 0x02000000, 1114 UNWIND_ARM_MODE_DWARF = 0x04000000, 1115 1116 UNWIND_ARM_FRAME_STACK_ADJUST_MASK = 0x00C00000, 1117 1118 UNWIND_ARM_FRAME_FIRST_PUSH_R4 = 0x00000001, 1119 UNWIND_ARM_FRAME_FIRST_PUSH_R5 = 0x00000002, 1120 UNWIND_ARM_FRAME_FIRST_PUSH_R6 = 0x00000004, 1121 1122 UNWIND_ARM_FRAME_SECOND_PUSH_R8 = 0x00000008, 1123 UNWIND_ARM_FRAME_SECOND_PUSH_R9 = 0x00000010, 1124 UNWIND_ARM_FRAME_SECOND_PUSH_R10 = 0x00000020, 1125 UNWIND_ARM_FRAME_SECOND_PUSH_R11 = 0x00000040, 1126 UNWIND_ARM_FRAME_SECOND_PUSH_R12 = 0x00000080, 1127 1128 UNWIND_ARM_FRAME_D_REG_COUNT_MASK = 0x00000F00, 1129 1130 UNWIND_ARM_DWARF_SECTION_OFFSET = 0x00FFFFFF 1131 }; 1132 1133 } // end CU namespace 1134 1135 /// Generate compact unwind encoding for the function based on the CFI 1136 /// instructions. If the CFI instructions describe a frame that cannot be 1137 /// encoded in compact unwind, the method returns UNWIND_ARM_MODE_DWARF which 1138 /// tells the runtime to fallback and unwind using dwarf. 1139 uint32_t ARMAsmBackendDarwin::generateCompactUnwindEncoding( 1140 const MCDwarfFrameInfo *FI, const MCContext *Ctxt) const { 1141 DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() << "generateCU()\n"); 1142 // Only armv7k uses CFI based unwinding. 1143 if (Subtype != MachO::CPU_SUBTYPE_ARM_V7K) 1144 return 0; 1145 // No .cfi directives means no frame. 1146 ArrayRef<MCCFIInstruction> Instrs = FI->Instructions; 1147 if (Instrs.empty()) 1148 return 0; 1149 if (!isDarwinCanonicalPersonality(FI->Personality) && 1150 !Ctxt->emitCompactUnwindNonCanonical()) 1151 return CU::UNWIND_ARM_MODE_DWARF; 1152 1153 // Start off assuming CFA is at SP+0. 1154 unsigned CFARegister = ARM::SP; 1155 int CFARegisterOffset = 0; 1156 // Mark savable registers as initially unsaved 1157 DenseMap<unsigned, int> RegOffsets; 1158 int FloatRegCount = 0; 1159 // Process each .cfi directive and build up compact unwind info. 1160 for (const MCCFIInstruction &Inst : Instrs) { 1161 unsigned Reg; 1162 switch (Inst.getOperation()) { 1163 case MCCFIInstruction::OpDefCfa: // DW_CFA_def_cfa 1164 CFARegisterOffset = Inst.getOffset(); 1165 CFARegister = *MRI.getLLVMRegNum(Inst.getRegister(), true); 1166 break; 1167 case MCCFIInstruction::OpDefCfaOffset: // DW_CFA_def_cfa_offset 1168 CFARegisterOffset = Inst.getOffset(); 1169 break; 1170 case MCCFIInstruction::OpDefCfaRegister: // DW_CFA_def_cfa_register 1171 CFARegister = *MRI.getLLVMRegNum(Inst.getRegister(), true); 1172 break; 1173 case MCCFIInstruction::OpOffset: // DW_CFA_offset 1174 Reg = *MRI.getLLVMRegNum(Inst.getRegister(), true); 1175 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 1176 RegOffsets[Reg] = Inst.getOffset(); 1177 else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) { 1178 RegOffsets[Reg] = Inst.getOffset(); 1179 ++FloatRegCount; 1180 } else { 1181 DEBUG_WITH_TYPE("compact-unwind", 1182 llvm::dbgs() << ".cfi_offset on unknown register=" 1183 << Inst.getRegister() << "\n"); 1184 return CU::UNWIND_ARM_MODE_DWARF; 1185 } 1186 break; 1187 case MCCFIInstruction::OpRelOffset: // DW_CFA_advance_loc 1188 // Ignore 1189 break; 1190 default: 1191 // Directive not convertable to compact unwind, bail out. 1192 DEBUG_WITH_TYPE("compact-unwind", 1193 llvm::dbgs() 1194 << "CFI directive not compatible with compact " 1195 "unwind encoding, opcode=" << Inst.getOperation() 1196 << "\n"); 1197 return CU::UNWIND_ARM_MODE_DWARF; 1198 break; 1199 } 1200 } 1201 1202 // If no frame set up, return no unwind info. 1203 if ((CFARegister == ARM::SP) && (CFARegisterOffset == 0)) 1204 return 0; 1205 1206 // Verify standard frame (lr/r7) was used. 1207 if (CFARegister != ARM::R7) { 1208 DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() << "frame register is " 1209 << CFARegister 1210 << " instead of r7\n"); 1211 return CU::UNWIND_ARM_MODE_DWARF; 1212 } 1213 int StackAdjust = CFARegisterOffset - 8; 1214 if (RegOffsets.lookup(ARM::LR) != (-4 - StackAdjust)) { 1215 DEBUG_WITH_TYPE("compact-unwind", 1216 llvm::dbgs() 1217 << "LR not saved as standard frame, StackAdjust=" 1218 << StackAdjust 1219 << ", CFARegisterOffset=" << CFARegisterOffset 1220 << ", lr save at offset=" << RegOffsets[14] << "\n"); 1221 return CU::UNWIND_ARM_MODE_DWARF; 1222 } 1223 if (RegOffsets.lookup(ARM::R7) != (-8 - StackAdjust)) { 1224 DEBUG_WITH_TYPE("compact-unwind", 1225 llvm::dbgs() << "r7 not saved as standard frame\n"); 1226 return CU::UNWIND_ARM_MODE_DWARF; 1227 } 1228 uint32_t CompactUnwindEncoding = CU::UNWIND_ARM_MODE_FRAME; 1229 1230 // If var-args are used, there may be a stack adjust required. 1231 switch (StackAdjust) { 1232 case 0: 1233 break; 1234 case 4: 1235 CompactUnwindEncoding |= 0x00400000; 1236 break; 1237 case 8: 1238 CompactUnwindEncoding |= 0x00800000; 1239 break; 1240 case 12: 1241 CompactUnwindEncoding |= 0x00C00000; 1242 break; 1243 default: 1244 DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() 1245 << ".cfi_def_cfa stack adjust (" 1246 << StackAdjust << ") out of range\n"); 1247 return CU::UNWIND_ARM_MODE_DWARF; 1248 } 1249 1250 // If r6 is saved, it must be right below r7. 1251 static struct { 1252 unsigned Reg; 1253 unsigned Encoding; 1254 } GPRCSRegs[] = {{ARM::R6, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R6}, 1255 {ARM::R5, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R5}, 1256 {ARM::R4, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R4}, 1257 {ARM::R12, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R12}, 1258 {ARM::R11, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R11}, 1259 {ARM::R10, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R10}, 1260 {ARM::R9, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R9}, 1261 {ARM::R8, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R8}}; 1262 1263 int CurOffset = -8 - StackAdjust; 1264 for (auto CSReg : GPRCSRegs) { 1265 auto Offset = RegOffsets.find(CSReg.Reg); 1266 if (Offset == RegOffsets.end()) 1267 continue; 1268 1269 int RegOffset = Offset->second; 1270 if (RegOffset != CurOffset - 4) { 1271 DEBUG_WITH_TYPE("compact-unwind", 1272 llvm::dbgs() << MRI.getName(CSReg.Reg) << " saved at " 1273 << RegOffset << " but only supported at " 1274 << CurOffset << "\n"); 1275 return CU::UNWIND_ARM_MODE_DWARF; 1276 } 1277 CompactUnwindEncoding |= CSReg.Encoding; 1278 CurOffset -= 4; 1279 } 1280 1281 // If no floats saved, we are done. 1282 if (FloatRegCount == 0) 1283 return CompactUnwindEncoding; 1284 1285 // Switch mode to include D register saving. 1286 CompactUnwindEncoding &= ~CU::UNWIND_ARM_MODE_MASK; 1287 CompactUnwindEncoding |= CU::UNWIND_ARM_MODE_FRAME_D; 1288 1289 // FIXME: supporting more than 4 saved D-registers compactly would be trivial, 1290 // but needs coordination with the linker and libunwind. 1291 if (FloatRegCount > 4) { 1292 DEBUG_WITH_TYPE("compact-unwind", 1293 llvm::dbgs() << "unsupported number of D registers saved (" 1294 << FloatRegCount << ")\n"); 1295 return CU::UNWIND_ARM_MODE_DWARF; 1296 } 1297 1298 // Floating point registers must either be saved sequentially, or we defer to 1299 // DWARF. No gaps allowed here so check that each saved d-register is 1300 // precisely where it should be. 1301 static unsigned FPRCSRegs[] = { ARM::D8, ARM::D10, ARM::D12, ARM::D14 }; 1302 for (int Idx = FloatRegCount - 1; Idx >= 0; --Idx) { 1303 auto Offset = RegOffsets.find(FPRCSRegs[Idx]); 1304 if (Offset == RegOffsets.end()) { 1305 DEBUG_WITH_TYPE("compact-unwind", 1306 llvm::dbgs() << FloatRegCount << " D-regs saved, but " 1307 << MRI.getName(FPRCSRegs[Idx]) 1308 << " not saved\n"); 1309 return CU::UNWIND_ARM_MODE_DWARF; 1310 } else if (Offset->second != CurOffset - 8) { 1311 DEBUG_WITH_TYPE("compact-unwind", 1312 llvm::dbgs() << FloatRegCount << " D-regs saved, but " 1313 << MRI.getName(FPRCSRegs[Idx]) 1314 << " saved at " << Offset->second 1315 << ", expected at " << CurOffset - 8 1316 << "\n"); 1317 return CU::UNWIND_ARM_MODE_DWARF; 1318 } 1319 CurOffset -= 8; 1320 } 1321 1322 return CompactUnwindEncoding | ((FloatRegCount - 1) << 8); 1323 } 1324 1325 static MCAsmBackend *createARMAsmBackend(const Target &T, 1326 const MCSubtargetInfo &STI, 1327 const MCRegisterInfo &MRI, 1328 const MCTargetOptions &Options, 1329 support::endianness Endian) { 1330 const Triple &TheTriple = STI.getTargetTriple(); 1331 switch (TheTriple.getObjectFormat()) { 1332 default: 1333 llvm_unreachable("unsupported object format"); 1334 case Triple::MachO: 1335 return new ARMAsmBackendDarwin(T, STI, MRI); 1336 case Triple::COFF: 1337 assert(TheTriple.isOSWindows() && "non-Windows ARM COFF is not supported"); 1338 return new ARMAsmBackendWinCOFF(T, STI.getTargetTriple().isThumb()); 1339 case Triple::ELF: 1340 assert(TheTriple.isOSBinFormatELF() && "using ELF for non-ELF target"); 1341 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS()); 1342 return new ARMAsmBackendELF(T, STI.getTargetTriple().isThumb(), OSABI, 1343 Endian); 1344 } 1345 } 1346 1347 MCAsmBackend *llvm::createARMLEAsmBackend(const Target &T, 1348 const MCSubtargetInfo &STI, 1349 const MCRegisterInfo &MRI, 1350 const MCTargetOptions &Options) { 1351 return createARMAsmBackend(T, STI, MRI, Options, support::little); 1352 } 1353 1354 MCAsmBackend *llvm::createARMBEAsmBackend(const Target &T, 1355 const MCSubtargetInfo &STI, 1356 const MCRegisterInfo &MRI, 1357 const MCTargetOptions &Options) { 1358 return createARMAsmBackend(T, STI, MRI, Options, support::big); 1359 } 1360