xref: /freebsd/contrib/llvm-project/lld/ELF/Arch/LoongArch.cpp (revision e64bea71c21eb42e97aa615188ba91f6cce0d36d)
1 //===- LoongArch.cpp ------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "InputFiles.h"
10 #include "OutputSections.h"
11 #include "Symbols.h"
12 #include "SyntheticSections.h"
13 #include "Target.h"
14 #include "llvm/BinaryFormat/ELF.h"
15 #include "llvm/Support/LEB128.h"
16 
17 using namespace llvm;
18 using namespace llvm::object;
19 using namespace llvm::support::endian;
20 using namespace llvm::ELF;
21 using namespace lld;
22 using namespace lld::elf;
23 
24 namespace {
25 class LoongArch final : public TargetInfo {
26 public:
27   LoongArch(Ctx &);
28   uint32_t calcEFlags() const override;
29   int64_t getImplicitAddend(const uint8_t *buf, RelType type) const override;
30   void writeGotPlt(uint8_t *buf, const Symbol &s) const override;
31   void writeIgotPlt(uint8_t *buf, const Symbol &s) const override;
32   void writePltHeader(uint8_t *buf) const override;
33   void writePlt(uint8_t *buf, const Symbol &sym,
34                 uint64_t pltEntryAddr) const override;
35   RelType getDynRel(RelType type) const override;
36   RelExpr getRelExpr(RelType type, const Symbol &s,
37                      const uint8_t *loc) const override;
38   bool usesOnlyLowPageBits(RelType type) const override;
39   void relocate(uint8_t *loc, const Relocation &rel,
40                 uint64_t val) const override;
41   bool relaxOnce(int pass) const override;
42   RelExpr adjustTlsExpr(RelType type, RelExpr expr) const override;
43   void relocateAlloc(InputSectionBase &sec, uint8_t *buf) const override;
44   void finalizeRelax(int passes) const override;
45 
46 private:
47   void tlsdescToIe(uint8_t *loc, const Relocation &rel, uint64_t val) const;
48   void tlsdescToLe(uint8_t *loc, const Relocation &rel, uint64_t val) const;
49   bool tryGotToPCRel(uint8_t *loc, const Relocation &rHi20,
50                      const Relocation &rLo12, uint64_t secAddr) const;
51 };
52 } // end anonymous namespace
53 
54 namespace {
55 enum Op {
56   SUB_W = 0x00110000,
57   SUB_D = 0x00118000,
58   BREAK = 0x002a0000,
59   SRLI_W = 0x00448000,
60   SRLI_D = 0x00450000,
61   ADDI_W = 0x02800000,
62   ADDI_D = 0x02c00000,
63   ANDI = 0x03400000,
64   ORI = 0x03800000,
65   LU12I_W = 0x14000000,
66   PCADDI = 0x18000000,
67   PCADDU12I = 0x1c000000,
68   PCALAU12I = 0x1a000000,
69   LD_W = 0x28800000,
70   LD_D = 0x28c00000,
71   JIRL = 0x4c000000,
72   B = 0x50000000,
73   BL = 0x54000000,
74 };
75 
76 enum Reg {
77   R_ZERO = 0,
78   R_RA = 1,
79   R_TP = 2,
80   R_A0 = 4,
81   R_T0 = 12,
82   R_T1 = 13,
83   R_T2 = 14,
84   R_T3 = 15,
85 };
86 } // namespace
87 
88 // Mask out the input's lowest 12 bits for use with `pcalau12i`, in sequences
89 // like `pcalau12i + addi.[wd]` or `pcalau12i + {ld,st}.*` where the `pcalau12i`
90 // produces a PC-relative intermediate value with the lowest 12 bits zeroed (the
91 // "page") for the next instruction to add in the "page offset". (`pcalau12i`
92 // stands for something like "PC ALigned Add Upper that starts from the 12th
93 // bit, Immediate".)
94 //
95 // Here a "page" is in fact just another way to refer to the 12-bit range
96 // allowed by the immediate field of the addi/ld/st instructions, and not
97 // related to the system or the kernel's actual page size. The semantics happen
98 // to match the AArch64 `adrp`, so the concept of "page" is borrowed here.
getLoongArchPage(uint64_t p)99 static uint64_t getLoongArchPage(uint64_t p) {
100   return p & ~static_cast<uint64_t>(0xfff);
101 }
102 
lo12(uint32_t val)103 static uint32_t lo12(uint32_t val) { return val & 0xfff; }
104 
105 // Calculate the adjusted page delta between dest and PC.
getLoongArchPageDelta(uint64_t dest,uint64_t pc,RelType type)106 uint64_t elf::getLoongArchPageDelta(uint64_t dest, uint64_t pc, RelType type) {
107   // Note that if the sequence being relocated is `pcalau12i + addi.d + lu32i.d
108   // + lu52i.d`, they must be adjacent so that we can infer the PC of
109   // `pcalau12i` when calculating the page delta for the other two instructions
110   // (lu32i.d and lu52i.d). Compensate all the sign-extensions is a bit
111   // complicated. Just use psABI recommended algorithm.
112   uint64_t pcalau12i_pc;
113   switch (type) {
114   case R_LARCH_PCALA64_LO20:
115   case R_LARCH_GOT64_PC_LO20:
116   case R_LARCH_TLS_IE64_PC_LO20:
117   case R_LARCH_TLS_DESC64_PC_LO20:
118     pcalau12i_pc = pc - 8;
119     break;
120   case R_LARCH_PCALA64_HI12:
121   case R_LARCH_GOT64_PC_HI12:
122   case R_LARCH_TLS_IE64_PC_HI12:
123   case R_LARCH_TLS_DESC64_PC_HI12:
124     pcalau12i_pc = pc - 12;
125     break;
126   default:
127     pcalau12i_pc = pc;
128     break;
129   }
130   uint64_t result = getLoongArchPage(dest) - getLoongArchPage(pcalau12i_pc);
131   if (dest & 0x800)
132     result += 0x1000 - 0x1'0000'0000;
133   if (result & 0x8000'0000)
134     result += 0x1'0000'0000;
135   return result;
136 }
137 
hi20(uint32_t val)138 static uint32_t hi20(uint32_t val) { return (val + 0x800) >> 12; }
139 
insn(uint32_t op,uint32_t d,uint32_t j,uint32_t k)140 static uint32_t insn(uint32_t op, uint32_t d, uint32_t j, uint32_t k) {
141   return op | d | (j << 5) | (k << 10);
142 }
143 
144 // Extract bits v[begin:end], where range is inclusive.
extractBits(uint64_t v,uint32_t begin,uint32_t end)145 static uint32_t extractBits(uint64_t v, uint32_t begin, uint32_t end) {
146   return begin == 63 ? v >> end : (v & ((1ULL << (begin + 1)) - 1)) >> end;
147 }
148 
getD5(uint64_t v)149 static uint32_t getD5(uint64_t v) { return extractBits(v, 4, 0); }
150 
getJ5(uint64_t v)151 static uint32_t getJ5(uint64_t v) { return extractBits(v, 9, 5); }
152 
setD5k16(uint32_t insn,uint32_t imm)153 static uint32_t setD5k16(uint32_t insn, uint32_t imm) {
154   uint32_t immLo = extractBits(imm, 15, 0);
155   uint32_t immHi = extractBits(imm, 20, 16);
156   return (insn & 0xfc0003e0) | (immLo << 10) | immHi;
157 }
158 
setD10k16(uint32_t insn,uint32_t imm)159 static uint32_t setD10k16(uint32_t insn, uint32_t imm) {
160   uint32_t immLo = extractBits(imm, 15, 0);
161   uint32_t immHi = extractBits(imm, 25, 16);
162   return (insn & 0xfc000000) | (immLo << 10) | immHi;
163 }
164 
setJ20(uint32_t insn,uint32_t imm)165 static uint32_t setJ20(uint32_t insn, uint32_t imm) {
166   return (insn & 0xfe00001f) | (extractBits(imm, 19, 0) << 5);
167 }
168 
setJ5(uint32_t insn,uint32_t imm)169 static uint32_t setJ5(uint32_t insn, uint32_t imm) {
170   return (insn & 0xfffffc1f) | (extractBits(imm, 4, 0) << 5);
171 }
172 
setK12(uint32_t insn,uint32_t imm)173 static uint32_t setK12(uint32_t insn, uint32_t imm) {
174   return (insn & 0xffc003ff) | (extractBits(imm, 11, 0) << 10);
175 }
176 
setK16(uint32_t insn,uint32_t imm)177 static uint32_t setK16(uint32_t insn, uint32_t imm) {
178   return (insn & 0xfc0003ff) | (extractBits(imm, 15, 0) << 10);
179 }
180 
isJirl(uint32_t insn)181 static bool isJirl(uint32_t insn) {
182   return (insn & 0xfc000000) == JIRL;
183 }
184 
handleUleb128(Ctx & ctx,uint8_t * loc,uint64_t val)185 static void handleUleb128(Ctx &ctx, uint8_t *loc, uint64_t val) {
186   const uint32_t maxcount = 1 + 64 / 7;
187   uint32_t count;
188   const char *error = nullptr;
189   uint64_t orig = decodeULEB128(loc, &count, nullptr, &error);
190   if (count > maxcount || (count == maxcount && error))
191     Err(ctx) << getErrorLoc(ctx, loc) << "extra space for uleb128";
192   uint64_t mask = count < maxcount ? (1ULL << 7 * count) - 1 : -1ULL;
193   encodeULEB128((orig + val) & mask, loc, count);
194 }
195 
LoongArch(Ctx & ctx)196 LoongArch::LoongArch(Ctx &ctx) : TargetInfo(ctx) {
197   // The LoongArch ISA itself does not have a limit on page sizes. According to
198   // the ISA manual, the PS (page size) field in MTLB entries and CSR.STLBPS is
199   // 6 bits wide, meaning the maximum page size is 2^63 which is equivalent to
200   // "unlimited".
201   // However, practically the maximum usable page size is constrained by the
202   // kernel implementation, and 64KiB is the biggest non-huge page size
203   // supported by Linux as of v6.4. The most widespread page size in use,
204   // though, is 16KiB.
205   defaultCommonPageSize = 16384;
206   defaultMaxPageSize = 65536;
207   write32le(trapInstr.data(), BREAK); // break 0
208 
209   copyRel = R_LARCH_COPY;
210   pltRel = R_LARCH_JUMP_SLOT;
211   relativeRel = R_LARCH_RELATIVE;
212   iRelativeRel = R_LARCH_IRELATIVE;
213 
214   if (ctx.arg.is64) {
215     symbolicRel = R_LARCH_64;
216     tlsModuleIndexRel = R_LARCH_TLS_DTPMOD64;
217     tlsOffsetRel = R_LARCH_TLS_DTPREL64;
218     tlsGotRel = R_LARCH_TLS_TPREL64;
219     tlsDescRel = R_LARCH_TLS_DESC64;
220   } else {
221     symbolicRel = R_LARCH_32;
222     tlsModuleIndexRel = R_LARCH_TLS_DTPMOD32;
223     tlsOffsetRel = R_LARCH_TLS_DTPREL32;
224     tlsGotRel = R_LARCH_TLS_TPREL32;
225     tlsDescRel = R_LARCH_TLS_DESC32;
226   }
227 
228   gotRel = symbolicRel;
229 
230   // .got.plt[0] = _dl_runtime_resolve, .got.plt[1] = link_map
231   gotPltHeaderEntriesNum = 2;
232 
233   pltHeaderSize = 32;
234   pltEntrySize = 16;
235   ipltEntrySize = 16;
236 }
237 
getEFlags(Ctx & ctx,const InputFile * f)238 static uint32_t getEFlags(Ctx &ctx, const InputFile *f) {
239   if (ctx.arg.is64)
240     return cast<ObjFile<ELF64LE>>(f)->getObj().getHeader().e_flags;
241   return cast<ObjFile<ELF32LE>>(f)->getObj().getHeader().e_flags;
242 }
243 
inputFileHasCode(const InputFile * f)244 static bool inputFileHasCode(const InputFile *f) {
245   for (const auto *sec : f->getSections())
246     if (sec && sec->flags & SHF_EXECINSTR)
247       return true;
248 
249   return false;
250 }
251 
calcEFlags() const252 uint32_t LoongArch::calcEFlags() const {
253   // If there are only binary input files (from -b binary), use a
254   // value of 0 for the ELF header flags.
255   if (ctx.objectFiles.empty())
256     return 0;
257 
258   uint32_t target = 0;
259   const InputFile *targetFile;
260   for (const InputFile *f : ctx.objectFiles) {
261     // Do not enforce ABI compatibility if the input file does not contain code.
262     // This is useful for allowing linkage with data-only object files produced
263     // with tools like objcopy, that have zero e_flags.
264     if (!inputFileHasCode(f))
265       continue;
266 
267     // Take the first non-zero e_flags as the reference.
268     uint32_t flags = getEFlags(ctx, f);
269     if (target == 0 && flags != 0) {
270       target = flags;
271       targetFile = f;
272     }
273 
274     if ((flags & EF_LOONGARCH_ABI_MODIFIER_MASK) !=
275         (target & EF_LOONGARCH_ABI_MODIFIER_MASK))
276       ErrAlways(ctx) << f
277                      << ": cannot link object files with different ABI from "
278                      << targetFile;
279 
280     // We cannot process psABI v1.x / object ABI v0 files (containing stack
281     // relocations), unlike ld.bfd.
282     //
283     // Instead of blindly accepting every v0 object and only failing at
284     // relocation processing time, just disallow interlink altogether. We
285     // don't expect significant usage of object ABI v0 in the wild (the old
286     // world may continue using object ABI v0 for a while, but as it's not
287     // binary-compatible with the upstream i.e. new-world ecosystem, it's not
288     // being considered here).
289     //
290     // There are briefly some new-world systems with object ABI v0 binaries too.
291     // It is because these systems were built before the new ABI was finalized.
292     // These are not supported either due to the extremely small number of them,
293     // and the few impacted users are advised to simply rebuild world or
294     // reinstall a recent system.
295     if ((flags & EF_LOONGARCH_OBJABI_MASK) != EF_LOONGARCH_OBJABI_V1)
296       ErrAlways(ctx) << f << ": unsupported object file ABI version";
297   }
298 
299   return target;
300 }
301 
getImplicitAddend(const uint8_t * buf,RelType type) const302 int64_t LoongArch::getImplicitAddend(const uint8_t *buf, RelType type) const {
303   switch (type) {
304   default:
305     InternalErr(ctx, buf) << "cannot read addend for relocation " << type;
306     return 0;
307   case R_LARCH_32:
308   case R_LARCH_TLS_DTPMOD32:
309   case R_LARCH_TLS_DTPREL32:
310   case R_LARCH_TLS_TPREL32:
311     return SignExtend64<32>(read32le(buf));
312   case R_LARCH_64:
313   case R_LARCH_TLS_DTPMOD64:
314   case R_LARCH_TLS_DTPREL64:
315   case R_LARCH_TLS_TPREL64:
316     return read64le(buf);
317   case R_LARCH_RELATIVE:
318   case R_LARCH_IRELATIVE:
319     return ctx.arg.is64 ? read64le(buf) : read32le(buf);
320   case R_LARCH_NONE:
321   case R_LARCH_JUMP_SLOT:
322     // These relocations are defined as not having an implicit addend.
323     return 0;
324   case R_LARCH_TLS_DESC32:
325     return read32le(buf + 4);
326   case R_LARCH_TLS_DESC64:
327     return read64le(buf + 8);
328   }
329 }
330 
writeGotPlt(uint8_t * buf,const Symbol & s) const331 void LoongArch::writeGotPlt(uint8_t *buf, const Symbol &s) const {
332   if (ctx.arg.is64)
333     write64le(buf, ctx.in.plt->getVA());
334   else
335     write32le(buf, ctx.in.plt->getVA());
336 }
337 
writeIgotPlt(uint8_t * buf,const Symbol & s) const338 void LoongArch::writeIgotPlt(uint8_t *buf, const Symbol &s) const {
339   if (ctx.arg.writeAddends) {
340     if (ctx.arg.is64)
341       write64le(buf, s.getVA(ctx));
342     else
343       write32le(buf, s.getVA(ctx));
344   }
345 }
346 
writePltHeader(uint8_t * buf) const347 void LoongArch::writePltHeader(uint8_t *buf) const {
348   // The LoongArch PLT is currently structured just like that of RISCV.
349   // Annoyingly, this means the PLT is still using `pcaddu12i` to perform
350   // PC-relative addressing (because `pcaddu12i` is the same as RISCV `auipc`),
351   // in contrast to the AArch64-like page-offset scheme with `pcalau12i` that
352   // is used everywhere else involving PC-relative operations in the LoongArch
353   // ELF psABI v2.00.
354   //
355   // The `pcrel_{hi20,lo12}` operators are illustrative only and not really
356   // supported by LoongArch assemblers.
357   //
358   //   pcaddu12i $t2, %pcrel_hi20(.got.plt)
359   //   sub.[wd]  $t1, $t1, $t3
360   //   ld.[wd]   $t3, $t2, %pcrel_lo12(.got.plt)  ; t3 = _dl_runtime_resolve
361   //   addi.[wd] $t1, $t1, -pltHeaderSize-12      ; t1 = &.plt[i] - &.plt[0]
362   //   addi.[wd] $t0, $t2, %pcrel_lo12(.got.plt)
363   //   srli.[wd] $t1, $t1, (is64?1:2)             ; t1 = &.got.plt[i] - &.got.plt[0]
364   //   ld.[wd]   $t0, $t0, Wordsize               ; t0 = link_map
365   //   jr        $t3
366   uint32_t offset = ctx.in.gotPlt->getVA() - ctx.in.plt->getVA();
367   uint32_t sub = ctx.arg.is64 ? SUB_D : SUB_W;
368   uint32_t ld = ctx.arg.is64 ? LD_D : LD_W;
369   uint32_t addi = ctx.arg.is64 ? ADDI_D : ADDI_W;
370   uint32_t srli = ctx.arg.is64 ? SRLI_D : SRLI_W;
371   write32le(buf + 0, insn(PCADDU12I, R_T2, hi20(offset), 0));
372   write32le(buf + 4, insn(sub, R_T1, R_T1, R_T3));
373   write32le(buf + 8, insn(ld, R_T3, R_T2, lo12(offset)));
374   write32le(buf + 12,
375             insn(addi, R_T1, R_T1, lo12(-ctx.target->pltHeaderSize - 12)));
376   write32le(buf + 16, insn(addi, R_T0, R_T2, lo12(offset)));
377   write32le(buf + 20, insn(srli, R_T1, R_T1, ctx.arg.is64 ? 1 : 2));
378   write32le(buf + 24, insn(ld, R_T0, R_T0, ctx.arg.wordsize));
379   write32le(buf + 28, insn(JIRL, R_ZERO, R_T3, 0));
380 }
381 
writePlt(uint8_t * buf,const Symbol & sym,uint64_t pltEntryAddr) const382 void LoongArch::writePlt(uint8_t *buf, const Symbol &sym,
383                      uint64_t pltEntryAddr) const {
384   // See the comment in writePltHeader for reason why pcaddu12i is used instead
385   // of the pcalau12i that's more commonly seen in the ELF psABI v2.0 days.
386   //
387   //   pcaddu12i $t3, %pcrel_hi20(f@.got.plt)
388   //   ld.[wd]   $t3, $t3, %pcrel_lo12(f@.got.plt)
389   //   jirl      $t1, $t3, 0
390   //   nop
391   uint32_t offset = sym.getGotPltVA(ctx) - pltEntryAddr;
392   write32le(buf + 0, insn(PCADDU12I, R_T3, hi20(offset), 0));
393   write32le(buf + 4,
394             insn(ctx.arg.is64 ? LD_D : LD_W, R_T3, R_T3, lo12(offset)));
395   write32le(buf + 8, insn(JIRL, R_T1, R_T3, 0));
396   write32le(buf + 12, insn(ANDI, R_ZERO, R_ZERO, 0));
397 }
398 
getDynRel(RelType type) const399 RelType LoongArch::getDynRel(RelType type) const {
400   return type == ctx.target->symbolicRel ? type
401                                          : static_cast<RelType>(R_LARCH_NONE);
402 }
403 
getRelExpr(const RelType type,const Symbol & s,const uint8_t * loc) const404 RelExpr LoongArch::getRelExpr(const RelType type, const Symbol &s,
405                               const uint8_t *loc) const {
406   switch (type) {
407   case R_LARCH_NONE:
408   case R_LARCH_MARK_LA:
409   case R_LARCH_MARK_PCREL:
410     return R_NONE;
411   case R_LARCH_32:
412   case R_LARCH_64:
413   case R_LARCH_ABS_HI20:
414   case R_LARCH_ABS_LO12:
415   case R_LARCH_ABS64_LO20:
416   case R_LARCH_ABS64_HI12:
417     return R_ABS;
418   case R_LARCH_PCALA_LO12:
419     // We could just R_ABS, but the JIRL instruction reuses the relocation type
420     // for a different purpose. The questionable usage is part of glibc 2.37
421     // libc_nonshared.a [1], which is linked into user programs, so we have to
422     // work around it for a while, even if a new relocation type may be
423     // introduced in the future [2].
424     //
425     // [1]: https://sourceware.org/git/?p=glibc.git;a=commitdiff;h=9f482b73f41a9a1bbfb173aad0733d1c824c788a
426     // [2]: https://github.com/loongson/la-abi-specs/pull/3
427     return isJirl(read32le(loc)) ? R_PLT : R_ABS;
428   case R_LARCH_TLS_DTPREL32:
429   case R_LARCH_TLS_DTPREL64:
430     return R_DTPREL;
431   case R_LARCH_TLS_TPREL32:
432   case R_LARCH_TLS_TPREL64:
433   case R_LARCH_TLS_LE_HI20:
434   case R_LARCH_TLS_LE_HI20_R:
435   case R_LARCH_TLS_LE_LO12:
436   case R_LARCH_TLS_LE_LO12_R:
437   case R_LARCH_TLS_LE64_LO20:
438   case R_LARCH_TLS_LE64_HI12:
439     return R_TPREL;
440   case R_LARCH_ADD6:
441   case R_LARCH_ADD8:
442   case R_LARCH_ADD16:
443   case R_LARCH_ADD32:
444   case R_LARCH_ADD64:
445   case R_LARCH_ADD_ULEB128:
446   case R_LARCH_SUB6:
447   case R_LARCH_SUB8:
448   case R_LARCH_SUB16:
449   case R_LARCH_SUB32:
450   case R_LARCH_SUB64:
451   case R_LARCH_SUB_ULEB128:
452     // The LoongArch add/sub relocs behave like the RISCV counterparts; reuse
453     // the RelExpr to avoid code duplication.
454     return RE_RISCV_ADD;
455   case R_LARCH_32_PCREL:
456   case R_LARCH_64_PCREL:
457   case R_LARCH_PCREL20_S2:
458     return R_PC;
459   case R_LARCH_B16:
460   case R_LARCH_B21:
461   case R_LARCH_B26:
462   case R_LARCH_CALL36:
463     return R_PLT_PC;
464   case R_LARCH_GOT_PC_HI20:
465   case R_LARCH_GOT64_PC_LO20:
466   case R_LARCH_GOT64_PC_HI12:
467   case R_LARCH_TLS_IE_PC_HI20:
468   case R_LARCH_TLS_IE64_PC_LO20:
469   case R_LARCH_TLS_IE64_PC_HI12:
470     return RE_LOONGARCH_GOT_PAGE_PC;
471   case R_LARCH_GOT_PC_LO12:
472   case R_LARCH_TLS_IE_PC_LO12:
473     return RE_LOONGARCH_GOT;
474   case R_LARCH_TLS_LD_PC_HI20:
475   case R_LARCH_TLS_GD_PC_HI20:
476     return RE_LOONGARCH_TLSGD_PAGE_PC;
477   case R_LARCH_PCALA_HI20:
478     // Why not RE_LOONGARCH_PAGE_PC, majority of references don't go through
479     // PLT anyway so why waste time checking only to get everything relaxed back
480     // to it?
481     //
482     // This is again due to the R_LARCH_PCALA_LO12 on JIRL case, where we want
483     // both the HI20 and LO12 to potentially refer to the PLT. But in reality
484     // the HI20 reloc appears earlier, and the relocs don't contain enough
485     // information to let us properly resolve semantics per symbol.
486     // Unlike RISCV, our LO12 relocs *do not* point to their corresponding HI20
487     // relocs, hence it is nearly impossible to 100% accurately determine each
488     // HI20's "flavor" without taking big performance hits, in the presence of
489     // edge cases (e.g. HI20 without pairing LO12; paired LO12 placed so far
490     // apart that relationship is not certain anymore), and programmer mistakes
491     // (e.g. as outlined in https://github.com/loongson/la-abi-specs/pull/3).
492     //
493     // Ideally we would scan in an extra pass for all LO12s on JIRL, then mark
494     // every HI20 reloc referring to the same symbol differently; this is not
495     // feasible with the current function signature of getRelExpr that doesn't
496     // allow for such inter-pass state.
497     //
498     // So, unfortunately we have to again workaround this quirk the same way as
499     // BFD: assuming every R_LARCH_PCALA_HI20 is potentially PLT-needing, only
500     // relaxing back to RE_LOONGARCH_PAGE_PC if it's known not so at a later
501     // stage.
502     return RE_LOONGARCH_PLT_PAGE_PC;
503   case R_LARCH_PCALA64_LO20:
504   case R_LARCH_PCALA64_HI12:
505     return RE_LOONGARCH_PAGE_PC;
506   case R_LARCH_GOT_HI20:
507   case R_LARCH_GOT_LO12:
508   case R_LARCH_GOT64_LO20:
509   case R_LARCH_GOT64_HI12:
510   case R_LARCH_TLS_IE_HI20:
511   case R_LARCH_TLS_IE_LO12:
512   case R_LARCH_TLS_IE64_LO20:
513   case R_LARCH_TLS_IE64_HI12:
514     return R_GOT;
515   case R_LARCH_TLS_LD_HI20:
516     return R_TLSLD_GOT;
517   case R_LARCH_TLS_GD_HI20:
518     return R_TLSGD_GOT;
519   case R_LARCH_TLS_LE_ADD_R:
520   case R_LARCH_RELAX:
521     return ctx.arg.relax ? R_RELAX_HINT : R_NONE;
522   case R_LARCH_ALIGN:
523     return R_RELAX_HINT;
524   case R_LARCH_TLS_DESC_PC_HI20:
525   case R_LARCH_TLS_DESC64_PC_LO20:
526   case R_LARCH_TLS_DESC64_PC_HI12:
527     return RE_LOONGARCH_TLSDESC_PAGE_PC;
528   case R_LARCH_TLS_DESC_PC_LO12:
529   case R_LARCH_TLS_DESC_LD:
530   case R_LARCH_TLS_DESC_HI20:
531   case R_LARCH_TLS_DESC_LO12:
532   case R_LARCH_TLS_DESC64_LO20:
533   case R_LARCH_TLS_DESC64_HI12:
534     return R_TLSDESC;
535   case R_LARCH_TLS_DESC_CALL:
536     return R_TLSDESC_CALL;
537   case R_LARCH_TLS_LD_PCREL20_S2:
538     return R_TLSLD_PC;
539   case R_LARCH_TLS_GD_PCREL20_S2:
540     return R_TLSGD_PC;
541   case R_LARCH_TLS_DESC_PCREL20_S2:
542     return R_TLSDESC_PC;
543 
544   // Other known relocs that are explicitly unimplemented:
545   //
546   // - psABI v1 relocs that need a stateful stack machine to work, and not
547   //   required when implementing psABI v2;
548   // - relocs that are not used anywhere (R_LARCH_{ADD,SUB}_24 [1], and the
549   //   two GNU vtable-related relocs).
550   //
551   // [1]: https://web.archive.org/web/20230709064026/https://github.com/loongson/LoongArch-Documentation/issues/51
552   default:
553     Err(ctx) << getErrorLoc(ctx, loc) << "unknown relocation (" << type.v
554              << ") against symbol " << &s;
555     return R_NONE;
556   }
557 }
558 
usesOnlyLowPageBits(RelType type) const559 bool LoongArch::usesOnlyLowPageBits(RelType type) const {
560   switch (type) {
561   default:
562     return false;
563   case R_LARCH_PCALA_LO12:
564   case R_LARCH_GOT_LO12:
565   case R_LARCH_GOT_PC_LO12:
566   case R_LARCH_TLS_IE_PC_LO12:
567   case R_LARCH_TLS_DESC_LO12:
568   case R_LARCH_TLS_DESC_PC_LO12:
569     return true;
570   }
571 }
572 
relocate(uint8_t * loc,const Relocation & rel,uint64_t val) const573 void LoongArch::relocate(uint8_t *loc, const Relocation &rel,
574                          uint64_t val) const {
575   switch (rel.type) {
576   case R_LARCH_32_PCREL:
577     checkInt(ctx, loc, val, 32, rel);
578     [[fallthrough]];
579   case R_LARCH_32:
580   case R_LARCH_TLS_DTPREL32:
581     write32le(loc, val);
582     return;
583   case R_LARCH_64:
584   case R_LARCH_TLS_DTPREL64:
585   case R_LARCH_64_PCREL:
586     write64le(loc, val);
587     return;
588 
589   // Relocs intended for `pcaddi`.
590   case R_LARCH_PCREL20_S2:
591   case R_LARCH_TLS_LD_PCREL20_S2:
592   case R_LARCH_TLS_GD_PCREL20_S2:
593   case R_LARCH_TLS_DESC_PCREL20_S2:
594     checkInt(ctx, loc, val, 22, rel);
595     checkAlignment(ctx, loc, val, 4, rel);
596     write32le(loc, setJ20(read32le(loc), val >> 2));
597     return;
598 
599   case R_LARCH_B16:
600     checkInt(ctx, loc, val, 18, rel);
601     checkAlignment(ctx, loc, val, 4, rel);
602     write32le(loc, setK16(read32le(loc), val >> 2));
603     return;
604 
605   case R_LARCH_B21:
606     checkInt(ctx, loc, val, 23, rel);
607     checkAlignment(ctx, loc, val, 4, rel);
608     write32le(loc, setD5k16(read32le(loc), val >> 2));
609     return;
610 
611   case R_LARCH_B26:
612     checkInt(ctx, loc, val, 28, rel);
613     checkAlignment(ctx, loc, val, 4, rel);
614     write32le(loc, setD10k16(read32le(loc), val >> 2));
615     return;
616 
617   case R_LARCH_CALL36: {
618     // This relocation is designed for adjacent pcaddu18i+jirl pairs that
619     // are patched in one time. Because of sign extension of these insns'
620     // immediate fields, the relocation range is [-128G - 0x20000, +128G -
621     // 0x20000) (of course must be 4-byte aligned).
622     if (((int64_t)val + 0x20000) != llvm::SignExtend64(val + 0x20000, 38))
623       reportRangeError(ctx, loc, rel, Twine(val), llvm::minIntN(38) - 0x20000,
624                        llvm::maxIntN(38) - 0x20000);
625     checkAlignment(ctx, loc, val, 4, rel);
626     // Since jirl performs sign extension on the offset immediate, adds (1<<17)
627     // to original val to get the correct hi20.
628     uint32_t hi20 = extractBits(val + (1 << 17), 37, 18);
629     // Despite the name, the lower part is actually 18 bits with 4-byte aligned.
630     uint32_t lo16 = extractBits(val, 17, 2);
631     write32le(loc, setJ20(read32le(loc), hi20));
632     write32le(loc + 4, setK16(read32le(loc + 4), lo16));
633     return;
634   }
635 
636   // Relocs intended for `addi`, `ld` or `st`.
637   case R_LARCH_PCALA_LO12:
638     // We have to again inspect the insn word to handle the R_LARCH_PCALA_LO12
639     // on JIRL case: firstly JIRL wants its immediate's 2 lowest zeroes
640     // removed by us (in contrast to regular R_LARCH_PCALA_LO12), secondly
641     // its immediate slot width is different too (16, not 12).
642     // In this case, process like an R_LARCH_B16, but without overflow checking
643     // and only taking the value's lowest 12 bits.
644     if (isJirl(read32le(loc))) {
645       checkAlignment(ctx, loc, val, 4, rel);
646       val = SignExtend64<12>(val);
647       write32le(loc, setK16(read32le(loc), val >> 2));
648       return;
649     }
650     [[fallthrough]];
651   case R_LARCH_ABS_LO12:
652   case R_LARCH_GOT_PC_LO12:
653   case R_LARCH_GOT_LO12:
654   case R_LARCH_TLS_LE_LO12:
655   case R_LARCH_TLS_IE_PC_LO12:
656   case R_LARCH_TLS_IE_LO12:
657   case R_LARCH_TLS_LE_LO12_R:
658   case R_LARCH_TLS_DESC_PC_LO12:
659   case R_LARCH_TLS_DESC_LO12:
660     write32le(loc, setK12(read32le(loc), extractBits(val, 11, 0)));
661     return;
662 
663   // Relocs intended for `lu12i.w` or `pcalau12i`.
664   case R_LARCH_ABS_HI20:
665   case R_LARCH_PCALA_HI20:
666   case R_LARCH_GOT_PC_HI20:
667   case R_LARCH_GOT_HI20:
668   case R_LARCH_TLS_LE_HI20:
669   case R_LARCH_TLS_IE_PC_HI20:
670   case R_LARCH_TLS_IE_HI20:
671   case R_LARCH_TLS_LD_PC_HI20:
672   case R_LARCH_TLS_LD_HI20:
673   case R_LARCH_TLS_GD_PC_HI20:
674   case R_LARCH_TLS_GD_HI20:
675   case R_LARCH_TLS_DESC_PC_HI20:
676   case R_LARCH_TLS_DESC_HI20:
677     write32le(loc, setJ20(read32le(loc), extractBits(val, 31, 12)));
678     return;
679   case R_LARCH_TLS_LE_HI20_R:
680     write32le(loc, setJ20(read32le(loc), extractBits(val + 0x800, 31, 12)));
681     return;
682 
683   // Relocs intended for `lu32i.d`.
684   case R_LARCH_ABS64_LO20:
685   case R_LARCH_PCALA64_LO20:
686   case R_LARCH_GOT64_PC_LO20:
687   case R_LARCH_GOT64_LO20:
688   case R_LARCH_TLS_LE64_LO20:
689   case R_LARCH_TLS_IE64_PC_LO20:
690   case R_LARCH_TLS_IE64_LO20:
691   case R_LARCH_TLS_DESC64_PC_LO20:
692   case R_LARCH_TLS_DESC64_LO20:
693     write32le(loc, setJ20(read32le(loc), extractBits(val, 51, 32)));
694     return;
695 
696   // Relocs intended for `lu52i.d`.
697   case R_LARCH_ABS64_HI12:
698   case R_LARCH_PCALA64_HI12:
699   case R_LARCH_GOT64_PC_HI12:
700   case R_LARCH_GOT64_HI12:
701   case R_LARCH_TLS_LE64_HI12:
702   case R_LARCH_TLS_IE64_PC_HI12:
703   case R_LARCH_TLS_IE64_HI12:
704   case R_LARCH_TLS_DESC64_PC_HI12:
705   case R_LARCH_TLS_DESC64_HI12:
706     write32le(loc, setK12(read32le(loc), extractBits(val, 63, 52)));
707     return;
708 
709   case R_LARCH_ADD6:
710     *loc = (*loc & 0xc0) | ((*loc + val) & 0x3f);
711     return;
712   case R_LARCH_ADD8:
713     *loc += val;
714     return;
715   case R_LARCH_ADD16:
716     write16le(loc, read16le(loc) + val);
717     return;
718   case R_LARCH_ADD32:
719     write32le(loc, read32le(loc) + val);
720     return;
721   case R_LARCH_ADD64:
722     write64le(loc, read64le(loc) + val);
723     return;
724   case R_LARCH_ADD_ULEB128:
725     handleUleb128(ctx, loc, val);
726     return;
727   case R_LARCH_SUB6:
728     *loc = (*loc & 0xc0) | ((*loc - val) & 0x3f);
729     return;
730   case R_LARCH_SUB8:
731     *loc -= val;
732     return;
733   case R_LARCH_SUB16:
734     write16le(loc, read16le(loc) - val);
735     return;
736   case R_LARCH_SUB32:
737     write32le(loc, read32le(loc) - val);
738     return;
739   case R_LARCH_SUB64:
740     write64le(loc, read64le(loc) - val);
741     return;
742   case R_LARCH_SUB_ULEB128:
743     handleUleb128(ctx, loc, -val);
744     return;
745 
746   case R_LARCH_MARK_LA:
747   case R_LARCH_MARK_PCREL:
748     // no-op
749     return;
750 
751   case R_LARCH_TLS_LE_ADD_R:
752   case R_LARCH_RELAX:
753     return; // Ignored (for now)
754 
755   case R_LARCH_TLS_DESC_LD:
756     return; // nothing to do.
757   case R_LARCH_TLS_DESC32:
758     write32le(loc + 4, val);
759     return;
760   case R_LARCH_TLS_DESC64:
761     write64le(loc + 8, val);
762     return;
763 
764   default:
765     llvm_unreachable("unknown relocation");
766   }
767 }
768 
relaxable(ArrayRef<Relocation> relocs,size_t i)769 static bool relaxable(ArrayRef<Relocation> relocs, size_t i) {
770   return i + 1 < relocs.size() && relocs[i + 1].type == R_LARCH_RELAX;
771 }
772 
isPairRelaxable(ArrayRef<Relocation> relocs,size_t i)773 static bool isPairRelaxable(ArrayRef<Relocation> relocs, size_t i) {
774   return relaxable(relocs, i) && relaxable(relocs, i + 2) &&
775          relocs[i].offset + 4 == relocs[i + 2].offset;
776 }
777 
778 // Relax code sequence.
779 // From:
780 //   pcalau12i     $a0, %pc_hi20(sym) | %ld_pc_hi20(sym)  | %gd_pc_hi20(sym)
781 //                    | %desc_pc_hi20(sym)
782 //   addi.w/d $a0, $a0, %pc_lo12(sym) | %got_pc_lo12(sym) | %got_pc_lo12(sym)
783 //                    | %desc_pc_lo12(sym)
784 // To:
785 //   pcaddi        $a0, %pc_lo12(sym) | %got_pc_lo12(sym) | %got_pc_lo12(sym)
786 //                    | %desc_pcrel_20(sym)
787 //
788 // From:
789 //   pcalau12i $a0, %got_pc_hi20(sym_got)
790 //   ld.w/d $a0, $a0, %got_pc_lo12(sym_got)
791 // To:
792 //   pcaddi $a0, %got_pc_hi20(sym_got)
relaxPCHi20Lo12(Ctx & ctx,const InputSection & sec,size_t i,uint64_t loc,Relocation & rHi20,Relocation & rLo12,uint32_t & remove)793 static void relaxPCHi20Lo12(Ctx &ctx, const InputSection &sec, size_t i,
794                             uint64_t loc, Relocation &rHi20, Relocation &rLo12,
795                             uint32_t &remove) {
796   // check if the relocations are relaxable sequences.
797   if (!((rHi20.type == R_LARCH_PCALA_HI20 &&
798          rLo12.type == R_LARCH_PCALA_LO12) ||
799         (rHi20.type == R_LARCH_GOT_PC_HI20 &&
800          rLo12.type == R_LARCH_GOT_PC_LO12) ||
801         (rHi20.type == R_LARCH_TLS_GD_PC_HI20 &&
802          rLo12.type == R_LARCH_GOT_PC_LO12) ||
803         (rHi20.type == R_LARCH_TLS_LD_PC_HI20 &&
804          rLo12.type == R_LARCH_GOT_PC_LO12) ||
805         (rHi20.type == R_LARCH_TLS_DESC_PC_HI20 &&
806          rLo12.type == R_LARCH_TLS_DESC_PC_LO12)))
807     return;
808 
809   // GOT references to absolute symbols can't be relaxed to use pcaddi in
810   // position-independent code, because these instructions produce a relative
811   // address.
812   // Meanwhile skip undefined, preemptible and STT_GNU_IFUNC symbols, because
813   // these symbols may be resolve in runtime.
814   // Moreover, relaxation can only occur if the addends of both relocations are
815   // zero for GOT references.
816   if (rHi20.type == R_LARCH_GOT_PC_HI20 &&
817       (!rHi20.sym || rHi20.sym != rLo12.sym || !rHi20.sym->isDefined() ||
818        rHi20.sym->isPreemptible || rHi20.sym->isGnuIFunc() ||
819        (ctx.arg.isPic && !cast<Defined>(*rHi20.sym).section) ||
820        rHi20.addend != 0 || rLo12.addend != 0))
821     return;
822 
823   uint64_t dest = 0;
824   if (rHi20.expr == RE_LOONGARCH_PLT_PAGE_PC)
825     dest = rHi20.sym->getPltVA(ctx);
826   else if (rHi20.expr == RE_LOONGARCH_PAGE_PC ||
827            rHi20.expr == RE_LOONGARCH_GOT_PAGE_PC)
828     dest = rHi20.sym->getVA(ctx);
829   else if (rHi20.expr == RE_LOONGARCH_TLSGD_PAGE_PC)
830     dest = ctx.in.got->getGlobalDynAddr(*rHi20.sym);
831   else if (rHi20.expr == RE_LOONGARCH_TLSDESC_PAGE_PC)
832     dest = ctx.in.got->getTlsDescAddr(*rHi20.sym);
833   else {
834     Err(ctx) << getErrorLoc(ctx, (const uint8_t *)loc) << "unknown expr ("
835              << rHi20.expr << ") against symbol " << rHi20.sym
836              << "in relaxPCHi20Lo12";
837     return;
838   }
839   dest += rHi20.addend;
840 
841   const int64_t displace = dest - loc;
842   // Check if the displace aligns 4 bytes or exceeds the range of pcaddi.
843   if ((displace & 0x3) != 0 || !isInt<22>(displace))
844     return;
845 
846   // Note: If we can ensure that the .o files generated by LLVM only contain
847   // relaxable instruction sequences with R_LARCH_RELAX, then we do not need to
848   // decode instructions. The relaxable instruction sequences imply the
849   // following constraints:
850   // * For relocation pairs related to got_pc, the opcodes of instructions
851   // must be pcalau12i + ld.w/d. In other cases, the opcodes must be pcalau12i +
852   // addi.w/d.
853   // * The destination register of pcalau12i is guaranteed to be used only by
854   // the immediately following instruction.
855   const uint32_t currInsn = read32le(sec.content().data() + rHi20.offset);
856   const uint32_t nextInsn = read32le(sec.content().data() + rLo12.offset);
857   // Check if use the same register.
858   if (getD5(currInsn) != getJ5(nextInsn) || getJ5(nextInsn) != getD5(nextInsn))
859     return;
860 
861   sec.relaxAux->relocTypes[i] = R_LARCH_RELAX;
862   if (rHi20.type == R_LARCH_TLS_GD_PC_HI20)
863     sec.relaxAux->relocTypes[i + 2] = R_LARCH_TLS_GD_PCREL20_S2;
864   else if (rHi20.type == R_LARCH_TLS_LD_PC_HI20)
865     sec.relaxAux->relocTypes[i + 2] = R_LARCH_TLS_LD_PCREL20_S2;
866   else if (rHi20.type == R_LARCH_TLS_DESC_PC_HI20)
867     sec.relaxAux->relocTypes[i + 2] = R_LARCH_TLS_DESC_PCREL20_S2;
868   else
869     sec.relaxAux->relocTypes[i + 2] = R_LARCH_PCREL20_S2;
870   sec.relaxAux->writes.push_back(insn(PCADDI, getD5(nextInsn), 0, 0));
871   remove = 4;
872 }
873 
874 // Relax code sequence.
875 // From:
876 //   pcaddu18i $ra, %call36(foo)
877 //   jirl $ra, $ra, 0
878 // To:
879 //   b/bl foo
relaxCall36(Ctx & ctx,const InputSection & sec,size_t i,uint64_t loc,Relocation & r,uint32_t & remove)880 static void relaxCall36(Ctx &ctx, const InputSection &sec, size_t i,
881                         uint64_t loc, Relocation &r, uint32_t &remove) {
882   const uint64_t dest =
883       (r.expr == R_PLT_PC ? r.sym->getPltVA(ctx) : r.sym->getVA(ctx)) +
884       r.addend;
885 
886   const int64_t displace = dest - loc;
887   // Check if the displace aligns 4 bytes or exceeds the range of b[l].
888   if ((displace & 0x3) != 0 || !isInt<28>(displace))
889     return;
890 
891   const uint32_t nextInsn = read32le(sec.content().data() + r.offset + 4);
892   if (getD5(nextInsn) == R_RA) {
893     // convert jirl to bl
894     sec.relaxAux->relocTypes[i] = R_LARCH_B26;
895     sec.relaxAux->writes.push_back(insn(BL, 0, 0, 0));
896     remove = 4;
897   } else if (getD5(nextInsn) == R_ZERO) {
898     // convert jirl to b
899     sec.relaxAux->relocTypes[i] = R_LARCH_B26;
900     sec.relaxAux->writes.push_back(insn(B, 0, 0, 0));
901     remove = 4;
902   }
903 }
904 
905 // Relax code sequence.
906 // From:
907 //   lu12i.w $rd, %le_hi20_r(sym)
908 //   add.w/d $rd, $rd, $tp, %le_add_r(sym)
909 //   addi/ld/st.w/d $rd, $rd, %le_lo12_r(sym)
910 // To:
911 //   addi/ld/st.w/d $rd, $tp, %le_lo12_r(sym)
relaxTlsLe(Ctx & ctx,const InputSection & sec,size_t i,uint64_t loc,Relocation & r,uint32_t & remove)912 static void relaxTlsLe(Ctx &ctx, const InputSection &sec, size_t i,
913                        uint64_t loc, Relocation &r, uint32_t &remove) {
914   uint64_t val = r.sym->getVA(ctx, r.addend);
915   // Check if the val exceeds the range of addi/ld/st.
916   if (!isInt<12>(val))
917     return;
918   uint32_t currInsn = read32le(sec.content().data() + r.offset);
919   switch (r.type) {
920   case R_LARCH_TLS_LE_HI20_R:
921   case R_LARCH_TLS_LE_ADD_R:
922     sec.relaxAux->relocTypes[i] = R_LARCH_RELAX;
923     remove = 4;
924     break;
925   case R_LARCH_TLS_LE_LO12_R:
926     sec.relaxAux->writes.push_back(setJ5(currInsn, R_TP));
927     sec.relaxAux->relocTypes[i] = R_LARCH_TLS_LE_LO12_R;
928     break;
929   }
930 }
931 
relax(Ctx & ctx,InputSection & sec)932 static bool relax(Ctx &ctx, InputSection &sec) {
933   const uint64_t secAddr = sec.getVA();
934   const MutableArrayRef<Relocation> relocs = sec.relocs();
935   auto &aux = *sec.relaxAux;
936   bool changed = false;
937   ArrayRef<SymbolAnchor> sa = ArrayRef(aux.anchors);
938   uint64_t delta = 0;
939 
940   std::fill_n(aux.relocTypes.get(), relocs.size(), R_LARCH_NONE);
941   aux.writes.clear();
942   for (auto [i, r] : llvm::enumerate(relocs)) {
943     const uint64_t loc = secAddr + r.offset - delta;
944     uint32_t &cur = aux.relocDeltas[i], remove = 0;
945     switch (r.type) {
946     case R_LARCH_ALIGN: {
947       const uint64_t addend =
948           r.sym->isUndefined() ? Log2_64(r.addend) + 1 : r.addend;
949       const uint64_t allBytes = (1ULL << (addend & 0xff)) - 4;
950       const uint64_t align = 1ULL << (addend & 0xff);
951       const uint64_t maxBytes = addend >> 8;
952       const uint64_t off = loc & (align - 1);
953       const uint64_t curBytes = off == 0 ? 0 : align - off;
954       // All bytes beyond the alignment boundary should be removed.
955       // If emit bytes more than max bytes to emit, remove all.
956       if (maxBytes != 0 && curBytes > maxBytes)
957         remove = allBytes;
958       else
959         remove = allBytes - curBytes;
960       // If we can't satisfy this alignment, we've found a bad input.
961       if (LLVM_UNLIKELY(static_cast<int32_t>(remove) < 0)) {
962         Err(ctx) << getErrorLoc(ctx, (const uint8_t *)loc)
963                  << "insufficient padding bytes for " << r.type << ": "
964                  << allBytes << " bytes available for "
965                  << "requested alignment of " << align << " bytes";
966         remove = 0;
967       }
968       break;
969     }
970     case R_LARCH_PCALA_HI20:
971     case R_LARCH_GOT_PC_HI20:
972     case R_LARCH_TLS_GD_PC_HI20:
973     case R_LARCH_TLS_LD_PC_HI20:
974       // The overflow check for i+2 will be carried out in isPairRelaxable.
975       if (isPairRelaxable(relocs, i))
976         relaxPCHi20Lo12(ctx, sec, i, loc, r, relocs[i + 2], remove);
977       break;
978     case R_LARCH_TLS_DESC_PC_HI20:
979       if (r.expr == RE_LOONGARCH_RELAX_TLS_GD_TO_IE_PAGE_PC ||
980           r.expr == R_RELAX_TLS_GD_TO_LE) {
981         if (relaxable(relocs, i))
982           remove = 4;
983       } else if (isPairRelaxable(relocs, i))
984         relaxPCHi20Lo12(ctx, sec, i, loc, r, relocs[i + 2], remove);
985       break;
986     case R_LARCH_CALL36:
987       if (relaxable(relocs, i))
988         relaxCall36(ctx, sec, i, loc, r, remove);
989       break;
990     case R_LARCH_TLS_LE_HI20_R:
991     case R_LARCH_TLS_LE_ADD_R:
992     case R_LARCH_TLS_LE_LO12_R:
993       if (relaxable(relocs, i))
994         relaxTlsLe(ctx, sec, i, loc, r, remove);
995       break;
996     case R_LARCH_TLS_IE_PC_HI20:
997       if (relaxable(relocs, i) && r.expr == R_RELAX_TLS_IE_TO_LE &&
998           isUInt<12>(r.sym->getVA(ctx, r.addend)))
999         remove = 4;
1000       break;
1001     case R_LARCH_TLS_DESC_PC_LO12:
1002       if (relaxable(relocs, i) &&
1003           (r.expr == RE_LOONGARCH_RELAX_TLS_GD_TO_IE_PAGE_PC ||
1004            r.expr == R_RELAX_TLS_GD_TO_LE))
1005         remove = 4;
1006       break;
1007     case R_LARCH_TLS_DESC_LD:
1008       if (relaxable(relocs, i) && r.expr == R_RELAX_TLS_GD_TO_LE &&
1009           isUInt<12>(r.sym->getVA(ctx, r.addend)))
1010         remove = 4;
1011       break;
1012     }
1013 
1014     // For all anchors whose offsets are <= r.offset, they are preceded by
1015     // the previous relocation whose `relocDeltas` value equals `delta`.
1016     // Decrease their st_value and update their st_size.
1017     for (; sa.size() && sa[0].offset <= r.offset; sa = sa.slice(1)) {
1018       if (sa[0].end)
1019         sa[0].d->size = sa[0].offset - delta - sa[0].d->value;
1020       else
1021         sa[0].d->value = sa[0].offset - delta;
1022     }
1023     delta += remove;
1024     if (delta != cur) {
1025       cur = delta;
1026       changed = true;
1027     }
1028   }
1029 
1030   for (const SymbolAnchor &a : sa) {
1031     if (a.end)
1032       a.d->size = a.offset - delta - a.d->value;
1033     else
1034       a.d->value = a.offset - delta;
1035   }
1036   // Inform assignAddresses that the size has changed.
1037   if (!isUInt<32>(delta))
1038     Fatal(ctx) << "section size decrease is too large: " << delta;
1039   sec.bytesDropped = delta;
1040   return changed;
1041 }
1042 
1043 // Convert TLS IE to LE in the normal or medium code model.
1044 // Original code sequence:
1045 //  * pcalau12i $a0, %ie_pc_hi20(sym)
1046 //  * ld.d      $a0, $a0, %ie_pc_lo12(sym)
1047 //
1048 // The code sequence converted is as follows:
1049 //  * lu12i.w   $a0, %le_hi20(sym)      # le_hi20 != 0, otherwise NOP
1050 //  * ori       $a0, src, %le_lo12(sym) # le_hi20 != 0, src = $a0,
1051 //                                      # otherwise,    src = $zero
1052 //
1053 // When relaxation enables, redundant NOPs can be removed.
tlsIeToLe(uint8_t * loc,const Relocation & rel,uint64_t val)1054 static void tlsIeToLe(uint8_t *loc, const Relocation &rel, uint64_t val) {
1055   assert(isInt<32>(val) &&
1056          "val exceeds the range of medium code model in tlsIeToLe");
1057 
1058   bool isUInt12 = isUInt<12>(val);
1059   const uint32_t currInsn = read32le(loc);
1060   switch (rel.type) {
1061   case R_LARCH_TLS_IE_PC_HI20:
1062     if (isUInt12)
1063       write32le(loc, insn(ANDI, R_ZERO, R_ZERO, 0)); // nop
1064     else
1065       write32le(loc, insn(LU12I_W, getD5(currInsn), extractBits(val, 31, 12),
1066                           0)); // lu12i.w $a0, %le_hi20
1067     break;
1068   case R_LARCH_TLS_IE_PC_LO12:
1069     if (isUInt12)
1070       write32le(loc, insn(ORI, getD5(currInsn), R_ZERO,
1071                           val)); // ori $a0, $zero, %le_lo12
1072     else
1073       write32le(loc, insn(ORI, getD5(currInsn), getJ5(currInsn),
1074                           lo12(val))); // ori $a0, $a0, %le_lo12
1075     break;
1076   }
1077 }
1078 
1079 // Convert TLSDESC GD/LD to IE.
1080 // In normal or medium code model, there are two forms of code sequences:
1081 //  * pcalau12i  $a0, %desc_pc_hi20(sym_desc)
1082 //  * addi.d     $a0, $a0, %desc_pc_lo12(sym_desc)
1083 //  * ld.d       $ra, $a0, %desc_ld(sym_desc)
1084 //  * jirl       $ra, $ra, %desc_call(sym_desc)
1085 //  ------
1086 //  * pcaddi $a0, %desc_pcrel_20(a)
1087 //  * load $ra, $a0, %desc_ld(a)
1088 //  * jirl $ra, $ra, %desc_call(a)
1089 //
1090 // The code sequence obtained is as follows:
1091 //  * pcalau12i $a0, %ie_pc_hi20(sym_ie)
1092 //  * ld.[wd]   $a0, $a0, %ie_pc_lo12(sym_ie)
1093 //
1094 // Simplicity, whether tlsdescToIe or tlsdescToLe, we always tend to convert the
1095 // preceding instructions to NOPs, due to both forms of code sequence
1096 // (corresponding to relocation combinations:
1097 // R_LARCH_TLS_DESC_PC_HI20+R_LARCH_TLS_DESC_PC_LO12 and
1098 // R_LARCH_TLS_DESC_PCREL20_S2) have same process.
1099 //
1100 // When relaxation enables, redundant NOPs can be removed.
tlsdescToIe(uint8_t * loc,const Relocation & rel,uint64_t val) const1101 void LoongArch::tlsdescToIe(uint8_t *loc, const Relocation &rel,
1102                             uint64_t val) const {
1103   switch (rel.type) {
1104   case R_LARCH_TLS_DESC_PC_HI20:
1105   case R_LARCH_TLS_DESC_PC_LO12:
1106   case R_LARCH_TLS_DESC_PCREL20_S2:
1107     write32le(loc, insn(ANDI, R_ZERO, R_ZERO, 0)); // nop
1108     break;
1109   case R_LARCH_TLS_DESC_LD:
1110     write32le(loc, insn(PCALAU12I, R_A0, 0, 0)); // pcalau12i $a0, %ie_pc_hi20
1111     relocateNoSym(loc, R_LARCH_TLS_IE_PC_HI20, val);
1112     break;
1113   case R_LARCH_TLS_DESC_CALL:
1114     write32le(loc, insn(ctx.arg.is64 ? LD_D : LD_W, R_A0, R_A0,
1115                         0)); // ld.[wd] $a0, $a0, %ie_pc_lo12
1116     relocateNoSym(loc, R_LARCH_TLS_IE_PC_LO12, val);
1117     break;
1118   default:
1119     llvm_unreachable("unsupported relocation for TLSDESC to IE");
1120   }
1121 }
1122 
1123 // Convert TLSDESC GD/LD to LE.
1124 // The code sequence obtained in the normal or medium code model is as follows:
1125 //  * lu12i.w   $a0, %le_hi20(sym)      # le_hi20 != 0, otherwise NOP
1126 //  * ori       $a0, src, %le_lo12(sym) # le_hi20 != 0, src = $a0,
1127 //                                      # otherwise,    src = $zero
1128 // See the comment in tlsdescToIe for detailed information.
tlsdescToLe(uint8_t * loc,const Relocation & rel,uint64_t val) const1129 void LoongArch::tlsdescToLe(uint8_t *loc, const Relocation &rel,
1130                             uint64_t val) const {
1131   assert(isInt<32>(val) &&
1132          "val exceeds the range of medium code model in tlsdescToLe");
1133 
1134   bool isUInt12 = isUInt<12>(val);
1135   switch (rel.type) {
1136   case R_LARCH_TLS_DESC_PC_HI20:
1137   case R_LARCH_TLS_DESC_PC_LO12:
1138   case R_LARCH_TLS_DESC_PCREL20_S2:
1139     write32le(loc, insn(ANDI, R_ZERO, R_ZERO, 0)); // nop
1140     break;
1141   case R_LARCH_TLS_DESC_LD:
1142     if (isUInt12)
1143       write32le(loc, insn(ANDI, R_ZERO, R_ZERO, 0)); // nop
1144     else
1145       write32le(loc, insn(LU12I_W, R_A0, extractBits(val, 31, 12),
1146                           0)); // lu12i.w $a0, %le_hi20
1147     break;
1148   case R_LARCH_TLS_DESC_CALL:
1149     if (isUInt12)
1150       write32le(loc, insn(ORI, R_A0, R_ZERO, val)); // ori $a0, $zero, %le_lo12
1151     else
1152       write32le(loc,
1153                 insn(ORI, R_A0, R_A0, lo12(val))); // ori $a0, $a0, %le_lo12
1154     break;
1155   default:
1156     llvm_unreachable("unsupported relocation for TLSDESC to LE");
1157   }
1158 }
1159 
1160 // Try GOT indirection to PC relative optimization.
1161 // From:
1162 //  * pcalau12i $a0, %got_pc_hi20(sym_got)
1163 //  * ld.w/d    $a0, $a0, %got_pc_lo12(sym_got)
1164 // To:
1165 //  * pcalau12i $a0, %pc_hi20(sym)
1166 //  * addi.w/d  $a0, $a0, %pc_lo12(sym)
1167 //
1168 // Note: Althouth the optimization has been performed, the GOT entries still
1169 // exists, similarly to AArch64. Eliminating the entries will increase code
1170 // complexity.
tryGotToPCRel(uint8_t * loc,const Relocation & rHi20,const Relocation & rLo12,uint64_t secAddr) const1171 bool LoongArch::tryGotToPCRel(uint8_t *loc, const Relocation &rHi20,
1172                               const Relocation &rLo12, uint64_t secAddr) const {
1173   // Check if the relocations apply to consecutive instructions.
1174   if (rHi20.offset + 4 != rLo12.offset)
1175     return false;
1176 
1177   // Check if the relocations reference the same symbol and skip undefined,
1178   // preemptible and STT_GNU_IFUNC symbols.
1179   if (!rHi20.sym || rHi20.sym != rLo12.sym || !rHi20.sym->isDefined() ||
1180       rHi20.sym->isPreemptible || rHi20.sym->isGnuIFunc())
1181     return false;
1182 
1183   // GOT references to absolute symbols can't be relaxed to use PCALAU12I/ADDI
1184   // in position-independent code because these instructions produce a relative
1185   // address.
1186   if ((ctx.arg.isPic && !cast<Defined>(*rHi20.sym).section))
1187     return false;
1188 
1189   // Check if the addends of the both relocations are zero.
1190   if (rHi20.addend != 0 || rLo12.addend != 0)
1191     return false;
1192 
1193   const uint32_t currInsn = read32le(loc);
1194   const uint32_t nextInsn = read32le(loc + 4);
1195   const uint32_t ldOpcode = ctx.arg.is64 ? LD_D : LD_W;
1196   // Check if the first instruction is PCALAU12I and the second instruction is
1197   // LD.
1198   if ((currInsn & 0xfe000000) != PCALAU12I ||
1199       (nextInsn & 0xffc00000) != ldOpcode)
1200     return false;
1201 
1202   // Check if use the same register.
1203   if (getD5(currInsn) != getJ5(nextInsn) || getJ5(nextInsn) != getD5(nextInsn))
1204     return false;
1205 
1206   Symbol &sym = *rHi20.sym;
1207   uint64_t symLocal = sym.getVA(ctx);
1208   const int64_t displace = symLocal - getLoongArchPage(secAddr + rHi20.offset);
1209   // Check if the symbol address is in
1210   // [(PC & ~0xfff) - 2GiB - 0x800, (PC & ~0xfff) + 2GiB - 0x800).
1211   const int64_t underflow = -0x80000000LL - 0x800;
1212   const int64_t overflow = 0x80000000LL - 0x800;
1213   if (!(displace >= underflow && displace < overflow))
1214     return false;
1215 
1216   Relocation newRHi20 = {RE_LOONGARCH_PAGE_PC, R_LARCH_PCALA_HI20, rHi20.offset,
1217                          rHi20.addend, &sym};
1218   Relocation newRLo12 = {R_ABS, R_LARCH_PCALA_LO12, rLo12.offset, rLo12.addend,
1219                          &sym};
1220   uint64_t pageDelta =
1221       getLoongArchPageDelta(symLocal, secAddr + rHi20.offset, rHi20.type);
1222   // pcalau12i $a0, %pc_hi20
1223   write32le(loc, insn(PCALAU12I, getD5(currInsn), 0, 0));
1224   relocate(loc, newRHi20, pageDelta);
1225   // addi.w/d $a0, $a0, %pc_lo12
1226   write32le(loc + 4, insn(ctx.arg.is64 ? ADDI_D : ADDI_W, getD5(nextInsn),
1227                           getJ5(nextInsn), 0));
1228   relocate(loc + 4, newRLo12, SignExtend64(symLocal, 64));
1229   return true;
1230 }
1231 
1232 // During TLSDESC GD_TO_IE, the converted code sequence always includes an
1233 // instruction related to the Lo12 relocation (ld.[wd]). To obtain correct val
1234 // in `getRelocTargetVA`, expr of this instruction should be adjusted to
1235 // R_RELAX_TLS_GD_TO_IE_ABS, while expr of other instructions related to the
1236 // Hi20 relocation (pcalau12i) should be adjusted to
1237 // RE_LOONGARCH_RELAX_TLS_GD_TO_IE_PAGE_PC. Specifically, in the normal or
1238 // medium code model, the instruction with relocation R_LARCH_TLS_DESC_CALL is
1239 // the candidate of Lo12 relocation.
adjustTlsExpr(RelType type,RelExpr expr) const1240 RelExpr LoongArch::adjustTlsExpr(RelType type, RelExpr expr) const {
1241   if (expr == R_RELAX_TLS_GD_TO_IE) {
1242     if (type != R_LARCH_TLS_DESC_CALL)
1243       return RE_LOONGARCH_RELAX_TLS_GD_TO_IE_PAGE_PC;
1244     return R_RELAX_TLS_GD_TO_IE_ABS;
1245   }
1246   return expr;
1247 }
1248 
pairForGotRels(ArrayRef<Relocation> relocs)1249 static bool pairForGotRels(ArrayRef<Relocation> relocs) {
1250   // Check if R_LARCH_GOT_PC_HI20 and R_LARCH_GOT_PC_LO12 always appear in
1251   // pairs.
1252   size_t i = 0;
1253   const size_t size = relocs.size();
1254   for (; i != size; ++i) {
1255     if (relocs[i].type == R_LARCH_GOT_PC_HI20) {
1256       if (i + 1 < size && relocs[i + 1].type == R_LARCH_GOT_PC_LO12) {
1257         ++i;
1258         continue;
1259       }
1260       if (relaxable(relocs, i) && i + 2 < size &&
1261           relocs[i + 2].type == R_LARCH_GOT_PC_LO12) {
1262         i += 2;
1263         continue;
1264       }
1265       break;
1266     } else if (relocs[i].type == R_LARCH_GOT_PC_LO12) {
1267       break;
1268     }
1269   }
1270   return i == size;
1271 }
1272 
relocateAlloc(InputSectionBase & sec,uint8_t * buf) const1273 void LoongArch::relocateAlloc(InputSectionBase &sec, uint8_t *buf) const {
1274   const unsigned bits = ctx.arg.is64 ? 64 : 32;
1275   uint64_t secAddr = sec.getOutputSection()->addr;
1276   if (auto *s = dyn_cast<InputSection>(&sec))
1277     secAddr += s->outSecOff;
1278   else if (auto *ehIn = dyn_cast<EhInputSection>(&sec))
1279     secAddr += ehIn->getParent()->outSecOff;
1280   bool isExtreme = false, isRelax = false;
1281   const MutableArrayRef<Relocation> relocs = sec.relocs();
1282   const bool isPairForGotRels = pairForGotRels(relocs);
1283   for (size_t i = 0, size = relocs.size(); i != size; ++i) {
1284     Relocation &rel = relocs[i];
1285     uint8_t *loc = buf + rel.offset;
1286     uint64_t val = SignExtend64(
1287         sec.getRelocTargetVA(ctx, rel, secAddr + rel.offset), bits);
1288 
1289     switch (rel.expr) {
1290     case R_RELAX_HINT:
1291       continue;
1292     case R_RELAX_TLS_IE_TO_LE:
1293       if (rel.type == R_LARCH_TLS_IE_PC_HI20) {
1294         // LoongArch does not support IE to LE optimization in the extreme code
1295         // model. In this case, the relocs are as follows:
1296         //
1297         //  * i   -- R_LARCH_TLS_IE_PC_HI20
1298         //  * i+1 -- R_LARCH_TLS_IE_PC_LO12
1299         //  * i+2 -- R_LARCH_TLS_IE64_PC_LO20
1300         //  * i+3 -- R_LARCH_TLS_IE64_PC_HI12
1301         isExtreme =
1302             i + 2 < size && relocs[i + 2].type == R_LARCH_TLS_IE64_PC_LO20;
1303       }
1304       if (isExtreme) {
1305         rel.expr = getRelExpr(rel.type, *rel.sym, loc);
1306         val = SignExtend64(sec.getRelocTargetVA(ctx, rel, secAddr + rel.offset),
1307                            bits);
1308         relocateNoSym(loc, rel.type, val);
1309       } else {
1310         isRelax = relaxable(relocs, i);
1311         if (isRelax && rel.type == R_LARCH_TLS_IE_PC_HI20 && isUInt<12>(val))
1312           continue;
1313         tlsIeToLe(loc, rel, val);
1314       }
1315       continue;
1316     case RE_LOONGARCH_RELAX_TLS_GD_TO_IE_PAGE_PC:
1317       if (rel.type == R_LARCH_TLS_DESC_PC_HI20) {
1318         // LoongArch does not support TLSDESC GD/LD to LE/IE optimization in the
1319         // extreme code model. In these cases, the relocs are as follows:
1320         //
1321         //  * i   -- R_LARCH_TLS_DESC_PC_HI20
1322         //  * i+1 -- R_LARCH_TLS_DESC_PC_LO12
1323         //  * i+2 -- R_LARCH_TLS_DESC64_PC_LO20
1324         //  * i+3 -- R_LARCH_TLS_DESC64_PC_HI12
1325         isExtreme =
1326             i + 2 < size && relocs[i + 2].type == R_LARCH_TLS_DESC64_PC_LO20;
1327       }
1328       [[fallthrough]];
1329     case R_RELAX_TLS_GD_TO_IE_ABS:
1330       if (isExtreme) {
1331         if (rel.type == R_LARCH_TLS_DESC_CALL)
1332           continue;
1333         rel.expr = getRelExpr(rel.type, *rel.sym, loc);
1334         val = SignExtend64(sec.getRelocTargetVA(ctx, rel, secAddr + rel.offset),
1335                            bits);
1336         relocateNoSym(loc, rel.type, val);
1337       } else {
1338         isRelax = relaxable(relocs, i);
1339         if (isRelax && (rel.type == R_LARCH_TLS_DESC_PC_HI20 ||
1340                         rel.type == R_LARCH_TLS_DESC_PC_LO12))
1341           continue;
1342         tlsdescToIe(loc, rel, val);
1343       }
1344       continue;
1345     case R_RELAX_TLS_GD_TO_LE:
1346       if (rel.type == R_LARCH_TLS_DESC_PC_HI20) {
1347         isExtreme =
1348             i + 2 < size && relocs[i + 2].type == R_LARCH_TLS_DESC64_PC_LO20;
1349       }
1350       if (isExtreme) {
1351         if (rel.type == R_LARCH_TLS_DESC_CALL)
1352           continue;
1353         rel.expr = getRelExpr(rel.type, *rel.sym, loc);
1354         val = SignExtend64(sec.getRelocTargetVA(ctx, rel, secAddr + rel.offset),
1355                            bits);
1356         relocateNoSym(loc, rel.type, val);
1357       } else {
1358         isRelax = relaxable(relocs, i);
1359         if (isRelax && (rel.type == R_LARCH_TLS_DESC_PC_HI20 ||
1360                         rel.type == R_LARCH_TLS_DESC_PC_LO12 ||
1361                         (rel.type == R_LARCH_TLS_DESC_LD && isUInt<12>(val))))
1362           continue;
1363         tlsdescToLe(loc, rel, val);
1364       }
1365       continue;
1366     case RE_LOONGARCH_GOT_PAGE_PC:
1367       // In LoongArch, we try GOT indirection to PC relative optimization in
1368       // normal or medium code model, whether or not with R_LARCH_RELAX
1369       // relocation. Moreover, if the original code sequence can be relaxed to a
1370       // single instruction `pcaddi`, the first instruction will be removed and
1371       // it will not reach here.
1372       if (isPairForGotRels && rel.type == R_LARCH_GOT_PC_HI20) {
1373         bool isRelax = relaxable(relocs, i);
1374         const Relocation lo12Rel = isRelax ? relocs[i + 2] : relocs[i + 1];
1375         if (lo12Rel.type == R_LARCH_GOT_PC_LO12 &&
1376             tryGotToPCRel(loc, rel, lo12Rel, secAddr)) {
1377           // isRelax: skip relocations R_LARCH_RELAX, R_LARCH_GOT_PC_LO12
1378           // !isRelax: skip relocation R_LARCH_GOT_PC_LO12
1379           i += isRelax ? 2 : 1;
1380           continue;
1381         }
1382       }
1383       break;
1384     default:
1385       break;
1386     }
1387     relocate(loc, rel, val);
1388   }
1389 }
1390 
1391 // When relaxing just R_LARCH_ALIGN, relocDeltas is usually changed only once in
1392 // the absence of a linker script. For call and load/store R_LARCH_RELAX, code
1393 // shrinkage may reduce displacement and make more relocations eligible for
1394 // relaxation. Code shrinkage may increase displacement to a call/load/store
1395 // target at a higher fixed address, invalidating an earlier relaxation. Any
1396 // change in section sizes can have cascading effect and require another
1397 // relaxation pass.
relaxOnce(int pass) const1398 bool LoongArch::relaxOnce(int pass) const {
1399   if (ctx.arg.relocatable)
1400     return false;
1401 
1402   if (pass == 0)
1403     initSymbolAnchors(ctx);
1404 
1405   SmallVector<InputSection *, 0> storage;
1406   bool changed = false;
1407   for (OutputSection *osec : ctx.outputSections) {
1408     if (!(osec->flags & SHF_EXECINSTR))
1409       continue;
1410     for (InputSection *sec : getInputSections(*osec, storage))
1411       changed |= relax(ctx, *sec);
1412   }
1413   return changed;
1414 }
1415 
finalizeRelax(int passes) const1416 void LoongArch::finalizeRelax(int passes) const {
1417   Log(ctx) << "relaxation passes: " << passes;
1418   SmallVector<InputSection *, 0> storage;
1419   for (OutputSection *osec : ctx.outputSections) {
1420     if (!(osec->flags & SHF_EXECINSTR))
1421       continue;
1422     for (InputSection *sec : getInputSections(*osec, storage)) {
1423       RelaxAux &aux = *sec->relaxAux;
1424       if (!aux.relocDeltas)
1425         continue;
1426 
1427       MutableArrayRef<Relocation> rels = sec->relocs();
1428       ArrayRef<uint8_t> old = sec->content();
1429       size_t newSize = old.size() - aux.relocDeltas[rels.size() - 1];
1430       size_t writesIdx = 0;
1431       uint8_t *p = ctx.bAlloc.Allocate<uint8_t>(newSize);
1432       uint64_t offset = 0;
1433       int64_t delta = 0;
1434       sec->content_ = p;
1435       sec->size = newSize;
1436       sec->bytesDropped = 0;
1437 
1438       // Update section content: remove NOPs for R_LARCH_ALIGN and rewrite
1439       // instructions for relaxed relocations.
1440       for (size_t i = 0, e = rels.size(); i != e; ++i) {
1441         uint32_t remove = aux.relocDeltas[i] - delta;
1442         delta = aux.relocDeltas[i];
1443         if (remove == 0 && aux.relocTypes[i] == R_LARCH_NONE)
1444           continue;
1445 
1446         // Copy from last location to the current relocated location.
1447         Relocation &r = rels[i];
1448         uint64_t size = r.offset - offset;
1449         memcpy(p, old.data() + offset, size);
1450         p += size;
1451 
1452         int64_t skip = 0;
1453         if (RelType newType = aux.relocTypes[i]) {
1454           switch (newType) {
1455           case R_LARCH_RELAX:
1456             break;
1457           case R_LARCH_PCREL20_S2:
1458             skip = 4;
1459             write32le(p, aux.writes[writesIdx++]);
1460             // RelExpr is needed for relocating.
1461             r.expr = r.sym->hasFlag(NEEDS_PLT) ? R_PLT_PC : R_PC;
1462             break;
1463           case R_LARCH_B26:
1464           case R_LARCH_TLS_LE_LO12_R:
1465             skip = 4;
1466             write32le(p, aux.writes[writesIdx++]);
1467             break;
1468           case R_LARCH_TLS_GD_PCREL20_S2:
1469             // Note: R_LARCH_TLS_LD_PCREL20_S2 must also use R_TLSGD_PC instead
1470             // of R_TLSLD_PC due to historical reasons. In fact, right now TLSLD
1471             // behaves exactly like TLSGD on LoongArch.
1472             //
1473             // This reason has also been mentioned in mold commit:
1474             // https://github.com/rui314/mold/commit/5dfa1cf07c03bd57cb3d493b652ef22441bcd71c
1475           case R_LARCH_TLS_LD_PCREL20_S2:
1476             skip = 4;
1477             write32le(p, aux.writes[writesIdx++]);
1478             r.expr = R_TLSGD_PC;
1479             break;
1480           case R_LARCH_TLS_DESC_PCREL20_S2:
1481             skip = 4;
1482             write32le(p, aux.writes[writesIdx++]);
1483             r.expr = R_TLSDESC_PC;
1484             break;
1485           default:
1486             llvm_unreachable("unsupported type");
1487           }
1488         }
1489 
1490         p += skip;
1491         offset = r.offset + skip + remove;
1492       }
1493       memcpy(p, old.data() + offset, old.size() - offset);
1494 
1495       // Subtract the previous relocDeltas value from the relocation offset.
1496       // For a pair of R_LARCH_XXX/R_LARCH_RELAX with the same offset, decrease
1497       // their r_offset by the same delta.
1498       delta = 0;
1499       for (size_t i = 0, e = rels.size(); i != e;) {
1500         uint64_t cur = rels[i].offset;
1501         do {
1502           rels[i].offset -= delta;
1503           if (aux.relocTypes[i] != R_LARCH_NONE)
1504             rels[i].type = aux.relocTypes[i];
1505         } while (++i != e && rels[i].offset == cur);
1506         delta = aux.relocDeltas[i - 1];
1507       }
1508     }
1509   }
1510 }
1511 
setLoongArchTargetInfo(Ctx & ctx)1512 void elf::setLoongArchTargetInfo(Ctx &ctx) {
1513   ctx.target.reset(new LoongArch(ctx));
1514 }
1515