1 //===-- LoongArchAsmBackend.cpp - LoongArch Assembler Backend -*- C++ -*---===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the LoongArchAsmBackend class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "LoongArchAsmBackend.h" 14 #include "LoongArchFixupKinds.h" 15 #include "llvm/MC/MCAsmLayout.h" 16 #include "llvm/MC/MCAssembler.h" 17 #include "llvm/MC/MCContext.h" 18 #include "llvm/MC/MCELFObjectWriter.h" 19 #include "llvm/MC/MCValue.h" 20 #include "llvm/Support/Endian.h" 21 #include "llvm/Support/EndianStream.h" 22 23 #define DEBUG_TYPE "loongarch-asmbackend" 24 25 using namespace llvm; 26 27 std::optional<MCFixupKind> 28 LoongArchAsmBackend::getFixupKind(StringRef Name) const { 29 if (STI.getTargetTriple().isOSBinFormatELF()) { 30 auto Type = llvm::StringSwitch<unsigned>(Name) 31 #define ELF_RELOC(X, Y) .Case(#X, Y) 32 #include "llvm/BinaryFormat/ELFRelocs/LoongArch.def" 33 #undef ELF_RELOC 34 .Case("BFD_RELOC_NONE", ELF::R_LARCH_NONE) 35 .Case("BFD_RELOC_32", ELF::R_LARCH_32) 36 .Case("BFD_RELOC_64", ELF::R_LARCH_64) 37 .Default(-1u); 38 if (Type != -1u) 39 return static_cast<MCFixupKind>(FirstLiteralRelocationKind + Type); 40 } 41 return std::nullopt; 42 } 43 44 const MCFixupKindInfo & 45 LoongArchAsmBackend::getFixupKindInfo(MCFixupKind Kind) const { 46 const static MCFixupKindInfo Infos[] = { 47 // This table *must* be in the order that the fixup_* kinds are defined in 48 // LoongArchFixupKinds.h. 49 // 50 // {name, offset, bits, flags} 51 {"fixup_loongarch_b16", 10, 16, MCFixupKindInfo::FKF_IsPCRel}, 52 {"fixup_loongarch_b21", 0, 26, MCFixupKindInfo::FKF_IsPCRel}, 53 {"fixup_loongarch_b26", 0, 26, MCFixupKindInfo::FKF_IsPCRel}, 54 {"fixup_loongarch_abs_hi20", 5, 20, 0}, 55 {"fixup_loongarch_abs_lo12", 10, 12, 0}, 56 {"fixup_loongarch_abs64_lo20", 5, 20, 0}, 57 {"fixup_loongarch_abs64_hi12", 10, 12, 0}, 58 {"fixup_loongarch_tls_le_hi20", 5, 20, 0}, 59 {"fixup_loongarch_tls_le_lo12", 10, 12, 0}, 60 {"fixup_loongarch_tls_le64_lo20", 5, 20, 0}, 61 {"fixup_loongarch_tls_le64_hi12", 10, 12, 0}, 62 // TODO: Add more fixup kinds. 63 }; 64 65 static_assert((std::size(Infos)) == LoongArch::NumTargetFixupKinds, 66 "Not all fixup kinds added to Infos array"); 67 68 // Fixup kinds from .reloc directive are like R_LARCH_NONE. They 69 // do not require any extra processing. 70 if (Kind >= FirstLiteralRelocationKind) 71 return MCAsmBackend::getFixupKindInfo(FK_NONE); 72 73 if (Kind < FirstTargetFixupKind) 74 return MCAsmBackend::getFixupKindInfo(Kind); 75 76 assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() && 77 "Invalid kind!"); 78 return Infos[Kind - FirstTargetFixupKind]; 79 } 80 81 static void reportOutOfRangeError(MCContext &Ctx, SMLoc Loc, unsigned N) { 82 Ctx.reportError(Loc, "fixup value out of range [" + Twine(llvm::minIntN(N)) + 83 ", " + Twine(llvm::maxIntN(N)) + "]"); 84 } 85 86 static uint64_t adjustFixupValue(const MCFixup &Fixup, uint64_t Value, 87 MCContext &Ctx) { 88 switch (Fixup.getTargetKind()) { 89 default: 90 llvm_unreachable("Unknown fixup kind"); 91 case FK_Data_1: 92 case FK_Data_2: 93 case FK_Data_4: 94 case FK_Data_8: 95 return Value; 96 case LoongArch::fixup_loongarch_b16: { 97 if (!isInt<18>(Value)) 98 reportOutOfRangeError(Ctx, Fixup.getLoc(), 18); 99 if (Value % 4) 100 Ctx.reportError(Fixup.getLoc(), "fixup value must be 4-byte aligned"); 101 return (Value >> 2) & 0xffff; 102 } 103 case LoongArch::fixup_loongarch_b21: { 104 if (!isInt<23>(Value)) 105 reportOutOfRangeError(Ctx, Fixup.getLoc(), 23); 106 if (Value % 4) 107 Ctx.reportError(Fixup.getLoc(), "fixup value must be 4-byte aligned"); 108 return ((Value & 0x3fffc) << 8) | ((Value >> 18) & 0x1f); 109 } 110 case LoongArch::fixup_loongarch_b26: { 111 if (!isInt<28>(Value)) 112 reportOutOfRangeError(Ctx, Fixup.getLoc(), 28); 113 if (Value % 4) 114 Ctx.reportError(Fixup.getLoc(), "fixup value must be 4-byte aligned"); 115 return ((Value & 0x3fffc) << 8) | ((Value >> 18) & 0x3ff); 116 } 117 case LoongArch::fixup_loongarch_abs_hi20: 118 case LoongArch::fixup_loongarch_tls_le_hi20: 119 return (Value >> 12) & 0xfffff; 120 case LoongArch::fixup_loongarch_abs_lo12: 121 case LoongArch::fixup_loongarch_tls_le_lo12: 122 return Value & 0xfff; 123 case LoongArch::fixup_loongarch_abs64_lo20: 124 case LoongArch::fixup_loongarch_tls_le64_lo20: 125 return (Value >> 32) & 0xfffff; 126 case LoongArch::fixup_loongarch_abs64_hi12: 127 case LoongArch::fixup_loongarch_tls_le64_hi12: 128 return (Value >> 52) & 0xfff; 129 } 130 } 131 132 void LoongArchAsmBackend::applyFixup(const MCAssembler &Asm, 133 const MCFixup &Fixup, 134 const MCValue &Target, 135 MutableArrayRef<char> Data, uint64_t Value, 136 bool IsResolved, 137 const MCSubtargetInfo *STI) const { 138 if (!Value) 139 return; // Doesn't change encoding. 140 141 MCFixupKind Kind = Fixup.getKind(); 142 if (Kind >= FirstLiteralRelocationKind) 143 return; 144 MCFixupKindInfo Info = getFixupKindInfo(Kind); 145 MCContext &Ctx = Asm.getContext(); 146 147 // Apply any target-specific value adjustments. 148 Value = adjustFixupValue(Fixup, Value, Ctx); 149 150 // Shift the value into position. 151 Value <<= Info.TargetOffset; 152 153 unsigned Offset = Fixup.getOffset(); 154 unsigned NumBytes = alignTo(Info.TargetSize + Info.TargetOffset, 8) / 8; 155 156 assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!"); 157 // For each byte of the fragment that the fixup touches, mask in the 158 // bits from the fixup value. 159 for (unsigned I = 0; I != NumBytes; ++I) { 160 Data[Offset + I] |= uint8_t((Value >> (I * 8)) & 0xff); 161 } 162 } 163 164 bool LoongArchAsmBackend::shouldForceRelocation(const MCAssembler &Asm, 165 const MCFixup &Fixup, 166 const MCValue &Target) { 167 if (Fixup.getKind() >= FirstLiteralRelocationKind) 168 return true; 169 switch (Fixup.getTargetKind()) { 170 default: 171 return false; 172 case FK_Data_1: 173 case FK_Data_2: 174 case FK_Data_4: 175 case FK_Data_8: 176 return !Target.isAbsolute(); 177 } 178 } 179 180 bool LoongArchAsmBackend::writeNopData(raw_ostream &OS, uint64_t Count, 181 const MCSubtargetInfo *STI) const { 182 // We mostly follow binutils' convention here: align to 4-byte boundary with a 183 // 0-fill padding. 184 OS.write_zeros(Count % 4); 185 186 // The remainder is now padded with 4-byte nops. 187 // nop: andi r0, r0, 0 188 for (; Count >= 4; Count -= 4) 189 OS.write("\0\0\x40\x03", 4); 190 191 return true; 192 } 193 194 std::unique_ptr<MCObjectTargetWriter> 195 LoongArchAsmBackend::createObjectTargetWriter() const { 196 return createLoongArchELFObjectWriter(OSABI, Is64Bit); 197 } 198 199 MCAsmBackend *llvm::createLoongArchAsmBackend(const Target &T, 200 const MCSubtargetInfo &STI, 201 const MCRegisterInfo &MRI, 202 const MCTargetOptions &Options) { 203 const Triple &TT = STI.getTargetTriple(); 204 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TT.getOS()); 205 return new LoongArchAsmBackend(STI, OSABI, TT.isArch64Bit()); 206 } 207