xref: /freebsd/contrib/llvm-project/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.cpp (revision 1db9f3b21e39176dd5b67cf8ac378633b172463e)
1 //===-- LoongArchAsmBackend.cpp - LoongArch Assembler Backend -*- C++ -*---===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the LoongArchAsmBackend class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "LoongArchAsmBackend.h"
14 #include "LoongArchFixupKinds.h"
15 #include "llvm/MC/MCAsmLayout.h"
16 #include "llvm/MC/MCAssembler.h"
17 #include "llvm/MC/MCContext.h"
18 #include "llvm/MC/MCELFObjectWriter.h"
19 #include "llvm/MC/MCValue.h"
20 #include "llvm/Support/EndianStream.h"
21 
22 #define DEBUG_TYPE "loongarch-asmbackend"
23 
24 using namespace llvm;
25 
26 std::optional<MCFixupKind>
27 LoongArchAsmBackend::getFixupKind(StringRef Name) const {
28   if (STI.getTargetTriple().isOSBinFormatELF()) {
29     auto Type = llvm::StringSwitch<unsigned>(Name)
30 #define ELF_RELOC(X, Y) .Case(#X, Y)
31 #include "llvm/BinaryFormat/ELFRelocs/LoongArch.def"
32 #undef ELF_RELOC
33                     .Case("BFD_RELOC_NONE", ELF::R_LARCH_NONE)
34                     .Case("BFD_RELOC_32", ELF::R_LARCH_32)
35                     .Case("BFD_RELOC_64", ELF::R_LARCH_64)
36                     .Default(-1u);
37     if (Type != -1u)
38       return static_cast<MCFixupKind>(FirstLiteralRelocationKind + Type);
39   }
40   return std::nullopt;
41 }
42 
43 const MCFixupKindInfo &
44 LoongArchAsmBackend::getFixupKindInfo(MCFixupKind Kind) const {
45   const static MCFixupKindInfo Infos[] = {
46       // This table *must* be in the order that the fixup_* kinds are defined in
47       // LoongArchFixupKinds.h.
48       //
49       // {name, offset, bits, flags}
50       {"fixup_loongarch_b16", 10, 16, MCFixupKindInfo::FKF_IsPCRel},
51       {"fixup_loongarch_b21", 0, 26, MCFixupKindInfo::FKF_IsPCRel},
52       {"fixup_loongarch_b26", 0, 26, MCFixupKindInfo::FKF_IsPCRel},
53       {"fixup_loongarch_abs_hi20", 5, 20, 0},
54       {"fixup_loongarch_abs_lo12", 10, 12, 0},
55       {"fixup_loongarch_abs64_lo20", 5, 20, 0},
56       {"fixup_loongarch_abs64_hi12", 10, 12, 0},
57       {"fixup_loongarch_tls_le_hi20", 5, 20, 0},
58       {"fixup_loongarch_tls_le_lo12", 10, 12, 0},
59       {"fixup_loongarch_tls_le64_lo20", 5, 20, 0},
60       {"fixup_loongarch_tls_le64_hi12", 10, 12, 0},
61       // TODO: Add more fixup kinds.
62   };
63 
64   static_assert((std::size(Infos)) == LoongArch::NumTargetFixupKinds,
65                 "Not all fixup kinds added to Infos array");
66 
67   // Fixup kinds from .reloc directive are like R_LARCH_NONE. They
68   // do not require any extra processing.
69   if (Kind >= FirstLiteralRelocationKind)
70     return MCAsmBackend::getFixupKindInfo(FK_NONE);
71 
72   if (Kind < FirstTargetFixupKind)
73     return MCAsmBackend::getFixupKindInfo(Kind);
74 
75   assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
76          "Invalid kind!");
77   return Infos[Kind - FirstTargetFixupKind];
78 }
79 
80 static void reportOutOfRangeError(MCContext &Ctx, SMLoc Loc, unsigned N) {
81   Ctx.reportError(Loc, "fixup value out of range [" + Twine(llvm::minIntN(N)) +
82                            ", " + Twine(llvm::maxIntN(N)) + "]");
83 }
84 
85 static uint64_t adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
86                                  MCContext &Ctx) {
87   switch (Fixup.getTargetKind()) {
88   default:
89     llvm_unreachable("Unknown fixup kind");
90   case FK_Data_1:
91   case FK_Data_2:
92   case FK_Data_4:
93   case FK_Data_8:
94   case FK_Data_leb128:
95     return Value;
96   case LoongArch::fixup_loongarch_b16: {
97     if (!isInt<18>(Value))
98       reportOutOfRangeError(Ctx, Fixup.getLoc(), 18);
99     if (Value % 4)
100       Ctx.reportError(Fixup.getLoc(), "fixup value must be 4-byte aligned");
101     return (Value >> 2) & 0xffff;
102   }
103   case LoongArch::fixup_loongarch_b21: {
104     if (!isInt<23>(Value))
105       reportOutOfRangeError(Ctx, Fixup.getLoc(), 23);
106     if (Value % 4)
107       Ctx.reportError(Fixup.getLoc(), "fixup value must be 4-byte aligned");
108     return ((Value & 0x3fffc) << 8) | ((Value >> 18) & 0x1f);
109   }
110   case LoongArch::fixup_loongarch_b26: {
111     if (!isInt<28>(Value))
112       reportOutOfRangeError(Ctx, Fixup.getLoc(), 28);
113     if (Value % 4)
114       Ctx.reportError(Fixup.getLoc(), "fixup value must be 4-byte aligned");
115     return ((Value & 0x3fffc) << 8) | ((Value >> 18) & 0x3ff);
116   }
117   case LoongArch::fixup_loongarch_abs_hi20:
118   case LoongArch::fixup_loongarch_tls_le_hi20:
119     return (Value >> 12) & 0xfffff;
120   case LoongArch::fixup_loongarch_abs_lo12:
121   case LoongArch::fixup_loongarch_tls_le_lo12:
122     return Value & 0xfff;
123   case LoongArch::fixup_loongarch_abs64_lo20:
124   case LoongArch::fixup_loongarch_tls_le64_lo20:
125     return (Value >> 32) & 0xfffff;
126   case LoongArch::fixup_loongarch_abs64_hi12:
127   case LoongArch::fixup_loongarch_tls_le64_hi12:
128     return (Value >> 52) & 0xfff;
129   }
130 }
131 
132 static void fixupLeb128(MCContext &Ctx, const MCFixup &Fixup,
133                         MutableArrayRef<char> Data, uint64_t Value) {
134   unsigned I;
135   for (I = 0; I != Data.size() && Value; ++I, Value >>= 7)
136     Data[I] |= uint8_t(Value & 0x7f);
137   if (Value)
138     Ctx.reportError(Fixup.getLoc(), "Invalid uleb128 value!");
139 }
140 
141 void LoongArchAsmBackend::applyFixup(const MCAssembler &Asm,
142                                      const MCFixup &Fixup,
143                                      const MCValue &Target,
144                                      MutableArrayRef<char> Data, uint64_t Value,
145                                      bool IsResolved,
146                                      const MCSubtargetInfo *STI) const {
147   if (!Value)
148     return; // Doesn't change encoding.
149 
150   MCFixupKind Kind = Fixup.getKind();
151   if (Kind >= FirstLiteralRelocationKind)
152     return;
153   MCFixupKindInfo Info = getFixupKindInfo(Kind);
154   MCContext &Ctx = Asm.getContext();
155 
156   // Fixup leb128 separately.
157   if (Fixup.getTargetKind() == FK_Data_leb128)
158     return fixupLeb128(Ctx, Fixup, Data, Value);
159 
160   // Apply any target-specific value adjustments.
161   Value = adjustFixupValue(Fixup, Value, Ctx);
162 
163   // Shift the value into position.
164   Value <<= Info.TargetOffset;
165 
166   unsigned Offset = Fixup.getOffset();
167   unsigned NumBytes = alignTo(Info.TargetSize + Info.TargetOffset, 8) / 8;
168 
169   assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
170   // For each byte of the fragment that the fixup touches, mask in the
171   // bits from the fixup value.
172   for (unsigned I = 0; I != NumBytes; ++I) {
173     Data[Offset + I] |= uint8_t((Value >> (I * 8)) & 0xff);
174   }
175 }
176 
177 bool LoongArchAsmBackend::shouldForceRelocation(const MCAssembler &Asm,
178                                                 const MCFixup &Fixup,
179                                                 const MCValue &Target,
180                                                 const MCSubtargetInfo *STI) {
181   if (Fixup.getKind() >= FirstLiteralRelocationKind)
182     return true;
183   switch (Fixup.getTargetKind()) {
184   default:
185     return STI->hasFeature(LoongArch::FeatureRelax);
186   case FK_Data_1:
187   case FK_Data_2:
188   case FK_Data_4:
189   case FK_Data_8:
190   case FK_Data_leb128:
191     return !Target.isAbsolute();
192   }
193 }
194 
195 static inline std::pair<MCFixupKind, MCFixupKind>
196 getRelocPairForSize(unsigned Size) {
197   switch (Size) {
198   default:
199     llvm_unreachable("unsupported fixup size");
200   case 6:
201     return std::make_pair(
202         MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_ADD6),
203         MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_SUB6));
204   case 8:
205     return std::make_pair(
206         MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_ADD8),
207         MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_SUB8));
208   case 16:
209     return std::make_pair(
210         MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_ADD16),
211         MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_SUB16));
212   case 32:
213     return std::make_pair(
214         MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_ADD32),
215         MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_SUB32));
216   case 64:
217     return std::make_pair(
218         MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_ADD64),
219         MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_SUB64));
220   case 128:
221     return std::make_pair(
222         MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_ADD_ULEB128),
223         MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_SUB_ULEB128));
224   }
225 }
226 
227 std::pair<bool, bool> LoongArchAsmBackend::relaxLEB128(MCLEBFragment &LF,
228                                                        MCAsmLayout &Layout,
229                                                        int64_t &Value) const {
230   const MCExpr &Expr = LF.getValue();
231   if (LF.isSigned() || !Expr.evaluateKnownAbsolute(Value, Layout))
232     return std::make_pair(false, false);
233   LF.getFixups().push_back(
234       MCFixup::create(0, &Expr, FK_Data_leb128, Expr.getLoc()));
235   return std::make_pair(true, true);
236 }
237 
238 bool LoongArchAsmBackend::writeNopData(raw_ostream &OS, uint64_t Count,
239                                        const MCSubtargetInfo *STI) const {
240   // We mostly follow binutils' convention here: align to 4-byte boundary with a
241   // 0-fill padding.
242   OS.write_zeros(Count % 4);
243 
244   // The remainder is now padded with 4-byte nops.
245   // nop: andi r0, r0, 0
246   for (; Count >= 4; Count -= 4)
247     OS.write("\0\0\x40\x03", 4);
248 
249   return true;
250 }
251 
252 bool LoongArchAsmBackend::handleAddSubRelocations(const MCAsmLayout &Layout,
253                                                   const MCFragment &F,
254                                                   const MCFixup &Fixup,
255                                                   const MCValue &Target,
256                                                   uint64_t &FixedValue) const {
257   std::pair<MCFixupKind, MCFixupKind> FK;
258   uint64_t FixedValueA, FixedValueB;
259   const MCSymbol &SA = Target.getSymA()->getSymbol();
260   const MCSymbol &SB = Target.getSymB()->getSymbol();
261 
262   bool force = !SA.isInSection() || !SB.isInSection();
263   if (!force) {
264     const MCSection &SecA = SA.getSection();
265     const MCSection &SecB = SB.getSection();
266 
267     // We need record relocation if SecA != SecB. Usually SecB is same as the
268     // section of Fixup, which will be record the relocation as PCRel. If SecB
269     // is not same as the section of Fixup, it will report error. Just return
270     // false and then this work can be finished by handleFixup.
271     if (&SecA != &SecB)
272       return false;
273 
274     // In SecA == SecB case. If the linker relaxation is enabled, we need record
275     // the ADD, SUB relocations. Otherwise the FixedValue has already been calc-
276     // ulated out in evaluateFixup, return true and avoid record relocations.
277     if (!STI.hasFeature(LoongArch::FeatureRelax))
278       return true;
279   }
280 
281   switch (Fixup.getKind()) {
282   case llvm::FK_Data_1:
283     FK = getRelocPairForSize(8);
284     break;
285   case llvm::FK_Data_2:
286     FK = getRelocPairForSize(16);
287     break;
288   case llvm::FK_Data_4:
289     FK = getRelocPairForSize(32);
290     break;
291   case llvm::FK_Data_8:
292     FK = getRelocPairForSize(64);
293     break;
294   case llvm::FK_Data_leb128:
295     FK = getRelocPairForSize(128);
296     break;
297   default:
298     llvm_unreachable("unsupported fixup size");
299   }
300   MCValue A = MCValue::get(Target.getSymA(), nullptr, Target.getConstant());
301   MCValue B = MCValue::get(Target.getSymB());
302   auto FA = MCFixup::create(Fixup.getOffset(), nullptr, std::get<0>(FK));
303   auto FB = MCFixup::create(Fixup.getOffset(), nullptr, std::get<1>(FK));
304   auto &Asm = Layout.getAssembler();
305   Asm.getWriter().recordRelocation(Asm, Layout, &F, FA, A, FixedValueA);
306   Asm.getWriter().recordRelocation(Asm, Layout, &F, FB, B, FixedValueB);
307   FixedValue = FixedValueA - FixedValueB;
308   return true;
309 }
310 
311 std::unique_ptr<MCObjectTargetWriter>
312 LoongArchAsmBackend::createObjectTargetWriter() const {
313   return createLoongArchELFObjectWriter(
314       OSABI, Is64Bit, STI.hasFeature(LoongArch::FeatureRelax));
315 }
316 
317 MCAsmBackend *llvm::createLoongArchAsmBackend(const Target &T,
318                                               const MCSubtargetInfo &STI,
319                                               const MCRegisterInfo &MRI,
320                                               const MCTargetOptions &Options) {
321   const Triple &TT = STI.getTargetTriple();
322   uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TT.getOS());
323   return new LoongArchAsmBackend(STI, OSABI, TT.isArch64Bit(), Options);
324 }
325