xref: /freebsd/contrib/llvm-project/lld/ELF/Arch/AArch64.cpp (revision 1db9f3b21e39176dd5b67cf8ac378633b172463e)
1 //===- AArch64.cpp --------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "InputFiles.h"
10 #include "OutputSections.h"
11 #include "Symbols.h"
12 #include "SyntheticSections.h"
13 #include "Target.h"
14 #include "lld/Common/ErrorHandler.h"
15 #include "llvm/BinaryFormat/ELF.h"
16 #include "llvm/Support/Endian.h"
17 
18 using namespace llvm;
19 using namespace llvm::support::endian;
20 using namespace llvm::ELF;
21 using namespace lld;
22 using namespace lld::elf;
23 
24 // Page(Expr) is the page address of the expression Expr, defined
25 // as (Expr & ~0xFFF). (This applies even if the machine page size
26 // supported by the platform has a different value.)
27 uint64_t elf::getAArch64Page(uint64_t expr) {
28   return expr & ~static_cast<uint64_t>(0xFFF);
29 }
30 
31 namespace {
32 class AArch64 : public TargetInfo {
33 public:
34   AArch64();
35   RelExpr getRelExpr(RelType type, const Symbol &s,
36                      const uint8_t *loc) const override;
37   RelType getDynRel(RelType type) const override;
38   int64_t getImplicitAddend(const uint8_t *buf, RelType type) const override;
39   void writeGotPlt(uint8_t *buf, const Symbol &s) const override;
40   void writeIgotPlt(uint8_t *buf, const Symbol &s) const override;
41   void writePltHeader(uint8_t *buf) const override;
42   void writePlt(uint8_t *buf, const Symbol &sym,
43                 uint64_t pltEntryAddr) const override;
44   bool needsThunk(RelExpr expr, RelType type, const InputFile *file,
45                   uint64_t branchAddr, const Symbol &s,
46                   int64_t a) const override;
47   uint32_t getThunkSectionSpacing() const override;
48   bool inBranchRange(RelType type, uint64_t src, uint64_t dst) const override;
49   bool usesOnlyLowPageBits(RelType type) const override;
50   void relocate(uint8_t *loc, const Relocation &rel,
51                 uint64_t val) const override;
52   RelExpr adjustTlsExpr(RelType type, RelExpr expr) const override;
53   void relocateAlloc(InputSectionBase &sec, uint8_t *buf) const override;
54 
55 private:
56   void relaxTlsGdToLe(uint8_t *loc, const Relocation &rel, uint64_t val) const;
57   void relaxTlsGdToIe(uint8_t *loc, const Relocation &rel, uint64_t val) const;
58   void relaxTlsIeToLe(uint8_t *loc, const Relocation &rel, uint64_t val) const;
59 };
60 
61 struct AArch64Relaxer {
62   bool safeToRelaxAdrpLdr = false;
63 
64   AArch64Relaxer(ArrayRef<Relocation> relocs);
65   bool tryRelaxAdrpAdd(const Relocation &adrpRel, const Relocation &addRel,
66                        uint64_t secAddr, uint8_t *buf) const;
67   bool tryRelaxAdrpLdr(const Relocation &adrpRel, const Relocation &ldrRel,
68                        uint64_t secAddr, uint8_t *buf) const;
69 };
70 } // namespace
71 
72 AArch64::AArch64() {
73   copyRel = R_AARCH64_COPY;
74   relativeRel = R_AARCH64_RELATIVE;
75   iRelativeRel = R_AARCH64_IRELATIVE;
76   gotRel = R_AARCH64_GLOB_DAT;
77   pltRel = R_AARCH64_JUMP_SLOT;
78   symbolicRel = R_AARCH64_ABS64;
79   tlsDescRel = R_AARCH64_TLSDESC;
80   tlsGotRel = R_AARCH64_TLS_TPREL64;
81   pltHeaderSize = 32;
82   pltEntrySize = 16;
83   ipltEntrySize = 16;
84   defaultMaxPageSize = 65536;
85 
86   // Align to the 2 MiB page size (known as a superpage or huge page).
87   // FreeBSD automatically promotes 2 MiB-aligned allocations.
88   defaultImageBase = 0x200000;
89 
90   needsThunks = true;
91 }
92 
93 RelExpr AArch64::getRelExpr(RelType type, const Symbol &s,
94                             const uint8_t *loc) const {
95   switch (type) {
96   case R_AARCH64_ABS16:
97   case R_AARCH64_ABS32:
98   case R_AARCH64_ABS64:
99   case R_AARCH64_ADD_ABS_LO12_NC:
100   case R_AARCH64_LDST128_ABS_LO12_NC:
101   case R_AARCH64_LDST16_ABS_LO12_NC:
102   case R_AARCH64_LDST32_ABS_LO12_NC:
103   case R_AARCH64_LDST64_ABS_LO12_NC:
104   case R_AARCH64_LDST8_ABS_LO12_NC:
105   case R_AARCH64_MOVW_SABS_G0:
106   case R_AARCH64_MOVW_SABS_G1:
107   case R_AARCH64_MOVW_SABS_G2:
108   case R_AARCH64_MOVW_UABS_G0:
109   case R_AARCH64_MOVW_UABS_G0_NC:
110   case R_AARCH64_MOVW_UABS_G1:
111   case R_AARCH64_MOVW_UABS_G1_NC:
112   case R_AARCH64_MOVW_UABS_G2:
113   case R_AARCH64_MOVW_UABS_G2_NC:
114   case R_AARCH64_MOVW_UABS_G3:
115     return R_ABS;
116   case R_AARCH64_TLSDESC_ADR_PAGE21:
117     return R_AARCH64_TLSDESC_PAGE;
118   case R_AARCH64_TLSDESC_LD64_LO12:
119   case R_AARCH64_TLSDESC_ADD_LO12:
120     return R_TLSDESC;
121   case R_AARCH64_TLSDESC_CALL:
122     return R_TLSDESC_CALL;
123   case R_AARCH64_TLSLE_ADD_TPREL_HI12:
124   case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
125   case R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
126   case R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
127   case R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
128   case R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
129   case R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC:
130   case R_AARCH64_TLSLE_MOVW_TPREL_G0:
131   case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
132   case R_AARCH64_TLSLE_MOVW_TPREL_G1:
133   case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
134   case R_AARCH64_TLSLE_MOVW_TPREL_G2:
135     return R_TPREL;
136   case R_AARCH64_CALL26:
137   case R_AARCH64_CONDBR19:
138   case R_AARCH64_JUMP26:
139   case R_AARCH64_TSTBR14:
140     return R_PLT_PC;
141   case R_AARCH64_PLT32:
142     const_cast<Symbol &>(s).thunkAccessed = true;
143     return R_PLT_PC;
144   case R_AARCH64_PREL16:
145   case R_AARCH64_PREL32:
146   case R_AARCH64_PREL64:
147   case R_AARCH64_ADR_PREL_LO21:
148   case R_AARCH64_LD_PREL_LO19:
149   case R_AARCH64_MOVW_PREL_G0:
150   case R_AARCH64_MOVW_PREL_G0_NC:
151   case R_AARCH64_MOVW_PREL_G1:
152   case R_AARCH64_MOVW_PREL_G1_NC:
153   case R_AARCH64_MOVW_PREL_G2:
154   case R_AARCH64_MOVW_PREL_G2_NC:
155   case R_AARCH64_MOVW_PREL_G3:
156     return R_PC;
157   case R_AARCH64_ADR_PREL_PG_HI21:
158   case R_AARCH64_ADR_PREL_PG_HI21_NC:
159     return R_AARCH64_PAGE_PC;
160   case R_AARCH64_LD64_GOT_LO12_NC:
161   case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
162     return R_GOT;
163   case R_AARCH64_LD64_GOTPAGE_LO15:
164     return R_AARCH64_GOT_PAGE;
165   case R_AARCH64_ADR_GOT_PAGE:
166   case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
167     return R_AARCH64_GOT_PAGE_PC;
168   case R_AARCH64_NONE:
169     return R_NONE;
170   default:
171     error(getErrorLocation(loc) + "unknown relocation (" + Twine(type) +
172           ") against symbol " + toString(s));
173     return R_NONE;
174   }
175 }
176 
177 RelExpr AArch64::adjustTlsExpr(RelType type, RelExpr expr) const {
178   if (expr == R_RELAX_TLS_GD_TO_IE) {
179     if (type == R_AARCH64_TLSDESC_ADR_PAGE21)
180       return R_AARCH64_RELAX_TLS_GD_TO_IE_PAGE_PC;
181     return R_RELAX_TLS_GD_TO_IE_ABS;
182   }
183   return expr;
184 }
185 
186 bool AArch64::usesOnlyLowPageBits(RelType type) const {
187   switch (type) {
188   default:
189     return false;
190   case R_AARCH64_ADD_ABS_LO12_NC:
191   case R_AARCH64_LD64_GOT_LO12_NC:
192   case R_AARCH64_LDST128_ABS_LO12_NC:
193   case R_AARCH64_LDST16_ABS_LO12_NC:
194   case R_AARCH64_LDST32_ABS_LO12_NC:
195   case R_AARCH64_LDST64_ABS_LO12_NC:
196   case R_AARCH64_LDST8_ABS_LO12_NC:
197   case R_AARCH64_TLSDESC_ADD_LO12:
198   case R_AARCH64_TLSDESC_LD64_LO12:
199   case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
200     return true;
201   }
202 }
203 
204 RelType AArch64::getDynRel(RelType type) const {
205   if (type == R_AARCH64_ABS64)
206     return type;
207   return R_AARCH64_NONE;
208 }
209 
210 int64_t AArch64::getImplicitAddend(const uint8_t *buf, RelType type) const {
211   switch (type) {
212   case R_AARCH64_TLSDESC:
213     return read64(buf + 8);
214   case R_AARCH64_NONE:
215   case R_AARCH64_GLOB_DAT:
216   case R_AARCH64_JUMP_SLOT:
217     return 0;
218   case R_AARCH64_PREL32:
219     return SignExtend64<32>(read32(buf));
220   case R_AARCH64_ABS64:
221   case R_AARCH64_PREL64:
222   case R_AARCH64_RELATIVE:
223   case R_AARCH64_IRELATIVE:
224   case R_AARCH64_TLS_TPREL64:
225     return read64(buf);
226   default:
227     internalLinkerError(getErrorLocation(buf),
228                         "cannot read addend for relocation " + toString(type));
229     return 0;
230   }
231 }
232 
233 void AArch64::writeGotPlt(uint8_t *buf, const Symbol &) const {
234   write64(buf, in.plt->getVA());
235 }
236 
237 void AArch64::writeIgotPlt(uint8_t *buf, const Symbol &s) const {
238   if (config->writeAddends)
239     write64(buf, s.getVA());
240 }
241 
242 void AArch64::writePltHeader(uint8_t *buf) const {
243   const uint8_t pltData[] = {
244       0xf0, 0x7b, 0xbf, 0xa9, // stp    x16, x30, [sp,#-16]!
245       0x10, 0x00, 0x00, 0x90, // adrp   x16, Page(&(.got.plt[2]))
246       0x11, 0x02, 0x40, 0xf9, // ldr    x17, [x16, Offset(&(.got.plt[2]))]
247       0x10, 0x02, 0x00, 0x91, // add    x16, x16, Offset(&(.got.plt[2]))
248       0x20, 0x02, 0x1f, 0xd6, // br     x17
249       0x1f, 0x20, 0x03, 0xd5, // nop
250       0x1f, 0x20, 0x03, 0xd5, // nop
251       0x1f, 0x20, 0x03, 0xd5  // nop
252   };
253   memcpy(buf, pltData, sizeof(pltData));
254 
255   uint64_t got = in.gotPlt->getVA();
256   uint64_t plt = in.plt->getVA();
257   relocateNoSym(buf + 4, R_AARCH64_ADR_PREL_PG_HI21,
258                 getAArch64Page(got + 16) - getAArch64Page(plt + 4));
259   relocateNoSym(buf + 8, R_AARCH64_LDST64_ABS_LO12_NC, got + 16);
260   relocateNoSym(buf + 12, R_AARCH64_ADD_ABS_LO12_NC, got + 16);
261 }
262 
263 void AArch64::writePlt(uint8_t *buf, const Symbol &sym,
264                        uint64_t pltEntryAddr) const {
265   const uint8_t inst[] = {
266       0x10, 0x00, 0x00, 0x90, // adrp x16, Page(&(.got.plt[n]))
267       0x11, 0x02, 0x40, 0xf9, // ldr  x17, [x16, Offset(&(.got.plt[n]))]
268       0x10, 0x02, 0x00, 0x91, // add  x16, x16, Offset(&(.got.plt[n]))
269       0x20, 0x02, 0x1f, 0xd6  // br   x17
270   };
271   memcpy(buf, inst, sizeof(inst));
272 
273   uint64_t gotPltEntryAddr = sym.getGotPltVA();
274   relocateNoSym(buf, R_AARCH64_ADR_PREL_PG_HI21,
275                 getAArch64Page(gotPltEntryAddr) - getAArch64Page(pltEntryAddr));
276   relocateNoSym(buf + 4, R_AARCH64_LDST64_ABS_LO12_NC, gotPltEntryAddr);
277   relocateNoSym(buf + 8, R_AARCH64_ADD_ABS_LO12_NC, gotPltEntryAddr);
278 }
279 
280 bool AArch64::needsThunk(RelExpr expr, RelType type, const InputFile *file,
281                          uint64_t branchAddr, const Symbol &s,
282                          int64_t a) const {
283   // If s is an undefined weak symbol and does not have a PLT entry then it will
284   // be resolved as a branch to the next instruction. If it is hidden, its
285   // binding has been converted to local, so we just check isUndefined() here. A
286   // undefined non-weak symbol will have been errored.
287   if (s.isUndefined() && !s.isInPlt())
288     return false;
289   // ELF for the ARM 64-bit architecture, section Call and Jump relocations
290   // only permits range extension thunks for R_AARCH64_CALL26 and
291   // R_AARCH64_JUMP26 relocation types.
292   if (type != R_AARCH64_CALL26 && type != R_AARCH64_JUMP26 &&
293       type != R_AARCH64_PLT32)
294     return false;
295   uint64_t dst = expr == R_PLT_PC ? s.getPltVA() : s.getVA(a);
296   return !inBranchRange(type, branchAddr, dst);
297 }
298 
299 uint32_t AArch64::getThunkSectionSpacing() const {
300   // See comment in Arch/ARM.cpp for a more detailed explanation of
301   // getThunkSectionSpacing(). For AArch64 the only branches we are permitted to
302   // Thunk have a range of +/- 128 MiB
303   return (128 * 1024 * 1024) - 0x30000;
304 }
305 
306 bool AArch64::inBranchRange(RelType type, uint64_t src, uint64_t dst) const {
307   if (type != R_AARCH64_CALL26 && type != R_AARCH64_JUMP26 &&
308       type != R_AARCH64_PLT32)
309     return true;
310   // The AArch64 call and unconditional branch instructions have a range of
311   // +/- 128 MiB. The PLT32 relocation supports a range up to +/- 2 GiB.
312   uint64_t range =
313       type == R_AARCH64_PLT32 ? (UINT64_C(1) << 31) : (128 * 1024 * 1024);
314   if (dst > src) {
315     // Immediate of branch is signed.
316     range -= 4;
317     return dst - src <= range;
318   }
319   return src - dst <= range;
320 }
321 
322 static void write32AArch64Addr(uint8_t *l, uint64_t imm) {
323   uint32_t immLo = (imm & 0x3) << 29;
324   uint32_t immHi = (imm & 0x1FFFFC) << 3;
325   uint64_t mask = (0x3 << 29) | (0x1FFFFC << 3);
326   write32le(l, (read32le(l) & ~mask) | immLo | immHi);
327 }
328 
329 // Return the bits [Start, End] from Val shifted Start bits.
330 // For instance, getBits(0xF0, 4, 8) returns 0xF.
331 static uint64_t getBits(uint64_t val, int start, int end) {
332   uint64_t mask = ((uint64_t)1 << (end + 1 - start)) - 1;
333   return (val >> start) & mask;
334 }
335 
336 static void or32le(uint8_t *p, int32_t v) { write32le(p, read32le(p) | v); }
337 
338 // Update the immediate field in a AARCH64 ldr, str, and add instruction.
339 static void or32AArch64Imm(uint8_t *l, uint64_t imm) {
340   or32le(l, (imm & 0xFFF) << 10);
341 }
342 
343 // Update the immediate field in an AArch64 movk, movn or movz instruction
344 // for a signed relocation, and update the opcode of a movn or movz instruction
345 // to match the sign of the operand.
346 static void writeSMovWImm(uint8_t *loc, uint32_t imm) {
347   uint32_t inst = read32le(loc);
348   // Opcode field is bits 30, 29, with 10 = movz, 00 = movn and 11 = movk.
349   if (!(inst & (1 << 29))) {
350     // movn or movz.
351     if (imm & 0x10000) {
352       // Change opcode to movn, which takes an inverted operand.
353       imm ^= 0xFFFF;
354       inst &= ~(1 << 30);
355     } else {
356       // Change opcode to movz.
357       inst |= 1 << 30;
358     }
359   }
360   write32le(loc, inst | ((imm & 0xFFFF) << 5));
361 }
362 
363 void AArch64::relocate(uint8_t *loc, const Relocation &rel,
364                        uint64_t val) const {
365   switch (rel.type) {
366   case R_AARCH64_ABS16:
367   case R_AARCH64_PREL16:
368     checkIntUInt(loc, val, 16, rel);
369     write16(loc, val);
370     break;
371   case R_AARCH64_ABS32:
372   case R_AARCH64_PREL32:
373     checkIntUInt(loc, val, 32, rel);
374     write32(loc, val);
375     break;
376   case R_AARCH64_PLT32:
377     checkInt(loc, val, 32, rel);
378     write32(loc, val);
379     break;
380   case R_AARCH64_ABS64:
381     // AArch64 relocations to tagged symbols have extended semantics, as
382     // described here:
383     // https://github.com/ARM-software/abi-aa/blob/main/memtagabielf64/memtagabielf64.rst#841extended-semantics-of-r_aarch64_relative.
384     // tl;dr: encode the symbol's special addend in the place, which is an
385     // offset to the point where the logical tag is derived from. Quick hack, if
386     // the addend is within the symbol's bounds, no need to encode the tag
387     // derivation offset.
388     if (rel.sym && rel.sym->isTagged() &&
389         (rel.addend < 0 ||
390          rel.addend >= static_cast<int64_t>(rel.sym->getSize())))
391       write64(loc, -rel.addend);
392     else
393       write64(loc, val);
394     break;
395   case R_AARCH64_PREL64:
396     write64(loc, val);
397     break;
398   case R_AARCH64_ADD_ABS_LO12_NC:
399     or32AArch64Imm(loc, val);
400     break;
401   case R_AARCH64_ADR_GOT_PAGE:
402   case R_AARCH64_ADR_PREL_PG_HI21:
403   case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
404   case R_AARCH64_TLSDESC_ADR_PAGE21:
405     checkInt(loc, val, 33, rel);
406     [[fallthrough]];
407   case R_AARCH64_ADR_PREL_PG_HI21_NC:
408     write32AArch64Addr(loc, val >> 12);
409     break;
410   case R_AARCH64_ADR_PREL_LO21:
411     checkInt(loc, val, 21, rel);
412     write32AArch64Addr(loc, val);
413     break;
414   case R_AARCH64_JUMP26:
415     // Normally we would just write the bits of the immediate field, however
416     // when patching instructions for the cpu errata fix -fix-cortex-a53-843419
417     // we want to replace a non-branch instruction with a branch immediate
418     // instruction. By writing all the bits of the instruction including the
419     // opcode and the immediate (0 001 | 01 imm26) we can do this
420     // transformation by placing a R_AARCH64_JUMP26 relocation at the offset of
421     // the instruction we want to patch.
422     write32le(loc, 0x14000000);
423     [[fallthrough]];
424   case R_AARCH64_CALL26:
425     checkInt(loc, val, 28, rel);
426     or32le(loc, (val & 0x0FFFFFFC) >> 2);
427     break;
428   case R_AARCH64_CONDBR19:
429   case R_AARCH64_LD_PREL_LO19:
430     checkAlignment(loc, val, 4, rel);
431     checkInt(loc, val, 21, rel);
432     or32le(loc, (val & 0x1FFFFC) << 3);
433     break;
434   case R_AARCH64_LDST8_ABS_LO12_NC:
435   case R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
436     or32AArch64Imm(loc, getBits(val, 0, 11));
437     break;
438   case R_AARCH64_LDST16_ABS_LO12_NC:
439   case R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
440     checkAlignment(loc, val, 2, rel);
441     or32AArch64Imm(loc, getBits(val, 1, 11));
442     break;
443   case R_AARCH64_LDST32_ABS_LO12_NC:
444   case R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
445     checkAlignment(loc, val, 4, rel);
446     or32AArch64Imm(loc, getBits(val, 2, 11));
447     break;
448   case R_AARCH64_LDST64_ABS_LO12_NC:
449   case R_AARCH64_LD64_GOT_LO12_NC:
450   case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
451   case R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
452   case R_AARCH64_TLSDESC_LD64_LO12:
453     checkAlignment(loc, val, 8, rel);
454     or32AArch64Imm(loc, getBits(val, 3, 11));
455     break;
456   case R_AARCH64_LDST128_ABS_LO12_NC:
457   case R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC:
458     checkAlignment(loc, val, 16, rel);
459     or32AArch64Imm(loc, getBits(val, 4, 11));
460     break;
461   case R_AARCH64_LD64_GOTPAGE_LO15:
462     checkAlignment(loc, val, 8, rel);
463     or32AArch64Imm(loc, getBits(val, 3, 14));
464     break;
465   case R_AARCH64_MOVW_UABS_G0:
466     checkUInt(loc, val, 16, rel);
467     [[fallthrough]];
468   case R_AARCH64_MOVW_UABS_G0_NC:
469     or32le(loc, (val & 0xFFFF) << 5);
470     break;
471   case R_AARCH64_MOVW_UABS_G1:
472     checkUInt(loc, val, 32, rel);
473     [[fallthrough]];
474   case R_AARCH64_MOVW_UABS_G1_NC:
475     or32le(loc, (val & 0xFFFF0000) >> 11);
476     break;
477   case R_AARCH64_MOVW_UABS_G2:
478     checkUInt(loc, val, 48, rel);
479     [[fallthrough]];
480   case R_AARCH64_MOVW_UABS_G2_NC:
481     or32le(loc, (val & 0xFFFF00000000) >> 27);
482     break;
483   case R_AARCH64_MOVW_UABS_G3:
484     or32le(loc, (val & 0xFFFF000000000000) >> 43);
485     break;
486   case R_AARCH64_MOVW_PREL_G0:
487   case R_AARCH64_MOVW_SABS_G0:
488   case R_AARCH64_TLSLE_MOVW_TPREL_G0:
489     checkInt(loc, val, 17, rel);
490     [[fallthrough]];
491   case R_AARCH64_MOVW_PREL_G0_NC:
492   case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
493     writeSMovWImm(loc, val);
494     break;
495   case R_AARCH64_MOVW_PREL_G1:
496   case R_AARCH64_MOVW_SABS_G1:
497   case R_AARCH64_TLSLE_MOVW_TPREL_G1:
498     checkInt(loc, val, 33, rel);
499     [[fallthrough]];
500   case R_AARCH64_MOVW_PREL_G1_NC:
501   case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
502     writeSMovWImm(loc, val >> 16);
503     break;
504   case R_AARCH64_MOVW_PREL_G2:
505   case R_AARCH64_MOVW_SABS_G2:
506   case R_AARCH64_TLSLE_MOVW_TPREL_G2:
507     checkInt(loc, val, 49, rel);
508     [[fallthrough]];
509   case R_AARCH64_MOVW_PREL_G2_NC:
510     writeSMovWImm(loc, val >> 32);
511     break;
512   case R_AARCH64_MOVW_PREL_G3:
513     writeSMovWImm(loc, val >> 48);
514     break;
515   case R_AARCH64_TSTBR14:
516     checkInt(loc, val, 16, rel);
517     or32le(loc, (val & 0xFFFC) << 3);
518     break;
519   case R_AARCH64_TLSLE_ADD_TPREL_HI12:
520     checkUInt(loc, val, 24, rel);
521     or32AArch64Imm(loc, val >> 12);
522     break;
523   case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
524   case R_AARCH64_TLSDESC_ADD_LO12:
525     or32AArch64Imm(loc, val);
526     break;
527   case R_AARCH64_TLSDESC:
528     // For R_AARCH64_TLSDESC the addend is stored in the second 64-bit word.
529     write64(loc + 8, val);
530     break;
531   default:
532     llvm_unreachable("unknown relocation");
533   }
534 }
535 
536 void AArch64::relaxTlsGdToLe(uint8_t *loc, const Relocation &rel,
537                              uint64_t val) const {
538   // TLSDESC Global-Dynamic relocation are in the form:
539   //   adrp    x0, :tlsdesc:v             [R_AARCH64_TLSDESC_ADR_PAGE21]
540   //   ldr     x1, [x0, #:tlsdesc_lo12:v  [R_AARCH64_TLSDESC_LD64_LO12]
541   //   add     x0, x0, :tlsdesc_los:v     [R_AARCH64_TLSDESC_ADD_LO12]
542   //   .tlsdesccall                       [R_AARCH64_TLSDESC_CALL]
543   //   blr     x1
544   // And it can optimized to:
545   //   movz    x0, #0x0, lsl #16
546   //   movk    x0, #0x10
547   //   nop
548   //   nop
549   checkUInt(loc, val, 32, rel);
550 
551   switch (rel.type) {
552   case R_AARCH64_TLSDESC_ADD_LO12:
553   case R_AARCH64_TLSDESC_CALL:
554     write32le(loc, 0xd503201f); // nop
555     return;
556   case R_AARCH64_TLSDESC_ADR_PAGE21:
557     write32le(loc, 0xd2a00000 | (((val >> 16) & 0xffff) << 5)); // movz
558     return;
559   case R_AARCH64_TLSDESC_LD64_LO12:
560     write32le(loc, 0xf2800000 | ((val & 0xffff) << 5)); // movk
561     return;
562   default:
563     llvm_unreachable("unsupported relocation for TLS GD to LE relaxation");
564   }
565 }
566 
567 void AArch64::relaxTlsGdToIe(uint8_t *loc, const Relocation &rel,
568                              uint64_t val) const {
569   // TLSDESC Global-Dynamic relocation are in the form:
570   //   adrp    x0, :tlsdesc:v             [R_AARCH64_TLSDESC_ADR_PAGE21]
571   //   ldr     x1, [x0, #:tlsdesc_lo12:v  [R_AARCH64_TLSDESC_LD64_LO12]
572   //   add     x0, x0, :tlsdesc_los:v     [R_AARCH64_TLSDESC_ADD_LO12]
573   //   .tlsdesccall                       [R_AARCH64_TLSDESC_CALL]
574   //   blr     x1
575   // And it can optimized to:
576   //   adrp    x0, :gottprel:v
577   //   ldr     x0, [x0, :gottprel_lo12:v]
578   //   nop
579   //   nop
580 
581   switch (rel.type) {
582   case R_AARCH64_TLSDESC_ADD_LO12:
583   case R_AARCH64_TLSDESC_CALL:
584     write32le(loc, 0xd503201f); // nop
585     break;
586   case R_AARCH64_TLSDESC_ADR_PAGE21:
587     write32le(loc, 0x90000000); // adrp
588     relocateNoSym(loc, R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21, val);
589     break;
590   case R_AARCH64_TLSDESC_LD64_LO12:
591     write32le(loc, 0xf9400000); // ldr
592     relocateNoSym(loc, R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC, val);
593     break;
594   default:
595     llvm_unreachable("unsupported relocation for TLS GD to LE relaxation");
596   }
597 }
598 
599 void AArch64::relaxTlsIeToLe(uint8_t *loc, const Relocation &rel,
600                              uint64_t val) const {
601   checkUInt(loc, val, 32, rel);
602 
603   if (rel.type == R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21) {
604     // Generate MOVZ.
605     uint32_t regNo = read32le(loc) & 0x1f;
606     write32le(loc, (0xd2a00000 | regNo) | (((val >> 16) & 0xffff) << 5));
607     return;
608   }
609   if (rel.type == R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC) {
610     // Generate MOVK.
611     uint32_t regNo = read32le(loc) & 0x1f;
612     write32le(loc, (0xf2800000 | regNo) | ((val & 0xffff) << 5));
613     return;
614   }
615   llvm_unreachable("invalid relocation for TLS IE to LE relaxation");
616 }
617 
618 AArch64Relaxer::AArch64Relaxer(ArrayRef<Relocation> relocs) {
619   if (!config->relax)
620     return;
621   // Check if R_AARCH64_ADR_GOT_PAGE and R_AARCH64_LD64_GOT_LO12_NC
622   // always appear in pairs.
623   size_t i = 0;
624   const size_t size = relocs.size();
625   for (; i != size; ++i) {
626     if (relocs[i].type == R_AARCH64_ADR_GOT_PAGE) {
627       if (i + 1 < size && relocs[i + 1].type == R_AARCH64_LD64_GOT_LO12_NC) {
628         ++i;
629         continue;
630       }
631       break;
632     } else if (relocs[i].type == R_AARCH64_LD64_GOT_LO12_NC) {
633       break;
634     }
635   }
636   safeToRelaxAdrpLdr = i == size;
637 }
638 
639 bool AArch64Relaxer::tryRelaxAdrpAdd(const Relocation &adrpRel,
640                                      const Relocation &addRel, uint64_t secAddr,
641                                      uint8_t *buf) const {
642   // When the address of sym is within the range of ADR then
643   // we may relax
644   // ADRP xn, sym
645   // ADD  xn, xn, :lo12: sym
646   // to
647   // NOP
648   // ADR xn, sym
649   if (!config->relax || adrpRel.type != R_AARCH64_ADR_PREL_PG_HI21 ||
650       addRel.type != R_AARCH64_ADD_ABS_LO12_NC)
651     return false;
652   // Check if the relocations apply to consecutive instructions.
653   if (adrpRel.offset + 4 != addRel.offset)
654     return false;
655   if (adrpRel.sym != addRel.sym)
656     return false;
657   if (adrpRel.addend != 0 || addRel.addend != 0)
658     return false;
659 
660   uint32_t adrpInstr = read32le(buf + adrpRel.offset);
661   uint32_t addInstr = read32le(buf + addRel.offset);
662   // Check if the first instruction is ADRP and the second instruction is ADD.
663   if ((adrpInstr & 0x9f000000) != 0x90000000 ||
664       (addInstr & 0xffc00000) != 0x91000000)
665     return false;
666   uint32_t adrpDestReg = adrpInstr & 0x1f;
667   uint32_t addDestReg = addInstr & 0x1f;
668   uint32_t addSrcReg = (addInstr >> 5) & 0x1f;
669   if (adrpDestReg != addDestReg || adrpDestReg != addSrcReg)
670     return false;
671 
672   Symbol &sym = *adrpRel.sym;
673   // Check if the address difference is within 1MiB range.
674   int64_t val = sym.getVA() - (secAddr + addRel.offset);
675   if (val < -1024 * 1024 || val >= 1024 * 1024)
676     return false;
677 
678   Relocation adrRel = {R_ABS, R_AARCH64_ADR_PREL_LO21, addRel.offset,
679                        /*addend=*/0, &sym};
680   // nop
681   write32le(buf + adrpRel.offset, 0xd503201f);
682   // adr x_<dest_reg>
683   write32le(buf + adrRel.offset, 0x10000000 | adrpDestReg);
684   target->relocate(buf + adrRel.offset, adrRel, val);
685   return true;
686 }
687 
688 bool AArch64Relaxer::tryRelaxAdrpLdr(const Relocation &adrpRel,
689                                      const Relocation &ldrRel, uint64_t secAddr,
690                                      uint8_t *buf) const {
691   if (!safeToRelaxAdrpLdr)
692     return false;
693 
694   // When the definition of sym is not preemptible then we may
695   // be able to relax
696   // ADRP xn, :got: sym
697   // LDR xn, [ xn :got_lo12: sym]
698   // to
699   // ADRP xn, sym
700   // ADD xn, xn, :lo_12: sym
701 
702   if (adrpRel.type != R_AARCH64_ADR_GOT_PAGE ||
703       ldrRel.type != R_AARCH64_LD64_GOT_LO12_NC)
704     return false;
705   // Check if the relocations apply to consecutive instructions.
706   if (adrpRel.offset + 4 != ldrRel.offset)
707     return false;
708   // Check if the relocations reference the same symbol and
709   // skip undefined, preemptible and STT_GNU_IFUNC symbols.
710   if (!adrpRel.sym || adrpRel.sym != ldrRel.sym || !adrpRel.sym->isDefined() ||
711       adrpRel.sym->isPreemptible || adrpRel.sym->isGnuIFunc())
712     return false;
713   // Check if the addends of the both relocations are zero.
714   if (adrpRel.addend != 0 || ldrRel.addend != 0)
715     return false;
716   uint32_t adrpInstr = read32le(buf + adrpRel.offset);
717   uint32_t ldrInstr = read32le(buf + ldrRel.offset);
718   // Check if the first instruction is ADRP and the second instruction is LDR.
719   if ((adrpInstr & 0x9f000000) != 0x90000000 ||
720       (ldrInstr & 0x3b000000) != 0x39000000)
721     return false;
722   // Check the value of the sf bit.
723   if (!(ldrInstr >> 31))
724     return false;
725   uint32_t adrpDestReg = adrpInstr & 0x1f;
726   uint32_t ldrDestReg = ldrInstr & 0x1f;
727   uint32_t ldrSrcReg = (ldrInstr >> 5) & 0x1f;
728   // Check if ADPR and LDR use the same register.
729   if (adrpDestReg != ldrDestReg || adrpDestReg != ldrSrcReg)
730     return false;
731 
732   Symbol &sym = *adrpRel.sym;
733   // GOT references to absolute symbols can't be relaxed to use ADRP/ADD in
734   // position-independent code because these instructions produce a relative
735   // address.
736   if (config->isPic && !cast<Defined>(sym).section)
737     return false;
738   // Check if the address difference is within 4GB range.
739   int64_t val =
740       getAArch64Page(sym.getVA()) - getAArch64Page(secAddr + adrpRel.offset);
741   if (val != llvm::SignExtend64(val, 33))
742     return false;
743 
744   Relocation adrpSymRel = {R_AARCH64_PAGE_PC, R_AARCH64_ADR_PREL_PG_HI21,
745                            adrpRel.offset, /*addend=*/0, &sym};
746   Relocation addRel = {R_ABS, R_AARCH64_ADD_ABS_LO12_NC, ldrRel.offset,
747                        /*addend=*/0, &sym};
748 
749   // adrp x_<dest_reg>
750   write32le(buf + adrpSymRel.offset, 0x90000000 | adrpDestReg);
751   // add x_<dest reg>, x_<dest reg>
752   write32le(buf + addRel.offset, 0x91000000 | adrpDestReg | (adrpDestReg << 5));
753 
754   target->relocate(buf + adrpSymRel.offset, adrpSymRel,
755                    SignExtend64(getAArch64Page(sym.getVA()) -
756                                     getAArch64Page(secAddr + adrpSymRel.offset),
757                                 64));
758   target->relocate(buf + addRel.offset, addRel, SignExtend64(sym.getVA(), 64));
759   tryRelaxAdrpAdd(adrpSymRel, addRel, secAddr, buf);
760   return true;
761 }
762 
763 // Tagged symbols have upper address bits that are added by the dynamic loader,
764 // and thus need the full 64-bit GOT entry. Do not relax such symbols.
765 static bool needsGotForMemtag(const Relocation &rel) {
766   return rel.sym->isTagged() && needsGot(rel.expr);
767 }
768 
769 void AArch64::relocateAlloc(InputSectionBase &sec, uint8_t *buf) const {
770   uint64_t secAddr = sec.getOutputSection()->addr;
771   if (auto *s = dyn_cast<InputSection>(&sec))
772     secAddr += s->outSecOff;
773   else if (auto *ehIn = dyn_cast<EhInputSection>(&sec))
774     secAddr += ehIn->getParent()->outSecOff;
775   AArch64Relaxer relaxer(sec.relocs());
776   for (size_t i = 0, size = sec.relocs().size(); i != size; ++i) {
777     const Relocation &rel = sec.relocs()[i];
778     uint8_t *loc = buf + rel.offset;
779     const uint64_t val =
780         sec.getRelocTargetVA(sec.file, rel.type, rel.addend,
781                              secAddr + rel.offset, *rel.sym, rel.expr);
782 
783     if (needsGotForMemtag(rel)) {
784       relocate(loc, rel, val);
785       continue;
786     }
787 
788     switch (rel.expr) {
789     case R_AARCH64_GOT_PAGE_PC:
790       if (i + 1 < size &&
791           relaxer.tryRelaxAdrpLdr(rel, sec.relocs()[i + 1], secAddr, buf)) {
792         ++i;
793         continue;
794       }
795       break;
796     case R_AARCH64_PAGE_PC:
797       if (i + 1 < size &&
798           relaxer.tryRelaxAdrpAdd(rel, sec.relocs()[i + 1], secAddr, buf)) {
799         ++i;
800         continue;
801       }
802       break;
803     case R_AARCH64_RELAX_TLS_GD_TO_IE_PAGE_PC:
804     case R_RELAX_TLS_GD_TO_IE_ABS:
805       relaxTlsGdToIe(loc, rel, val);
806       continue;
807     case R_RELAX_TLS_GD_TO_LE:
808       relaxTlsGdToLe(loc, rel, val);
809       continue;
810     case R_RELAX_TLS_IE_TO_LE:
811       relaxTlsIeToLe(loc, rel, val);
812       continue;
813     default:
814       break;
815     }
816     relocate(loc, rel, val);
817   }
818 }
819 
820 // AArch64 may use security features in variant PLT sequences. These are:
821 // Pointer Authentication (PAC), introduced in armv8.3-a and Branch Target
822 // Indicator (BTI) introduced in armv8.5-a. The additional instructions used
823 // in the variant Plt sequences are encoded in the Hint space so they can be
824 // deployed on older architectures, which treat the instructions as a nop.
825 // PAC and BTI can be combined leading to the following combinations:
826 // writePltHeader
827 // writePltHeaderBti (no PAC Header needed)
828 // writePlt
829 // writePltBti (BTI only)
830 // writePltPac (PAC only)
831 // writePltBtiPac (BTI and PAC)
832 //
833 // When PAC is enabled the dynamic loader encrypts the address that it places
834 // in the .got.plt using the pacia1716 instruction which encrypts the value in
835 // x17 using the modifier in x16. The static linker places autia1716 before the
836 // indirect branch to x17 to authenticate the address in x17 with the modifier
837 // in x16. This makes it more difficult for an attacker to modify the value in
838 // the .got.plt.
839 //
840 // When BTI is enabled all indirect branches must land on a bti instruction.
841 // The static linker must place a bti instruction at the start of any PLT entry
842 // that may be the target of an indirect branch. As the PLT entries call the
843 // lazy resolver indirectly this must have a bti instruction at start. In
844 // general a bti instruction is not needed for a PLT entry as indirect calls
845 // are resolved to the function address and not the PLT entry for the function.
846 // There are a small number of cases where the PLT address can escape, such as
847 // taking the address of a function or ifunc via a non got-generating
848 // relocation, and a shared library refers to that symbol.
849 //
850 // We use the bti c variant of the instruction which permits indirect branches
851 // (br) via x16/x17 and indirect function calls (blr) via any register. The ABI
852 // guarantees that all indirect branches from code requiring BTI protection
853 // will go via x16/x17
854 
855 namespace {
856 class AArch64BtiPac final : public AArch64 {
857 public:
858   AArch64BtiPac();
859   void writePltHeader(uint8_t *buf) const override;
860   void writePlt(uint8_t *buf, const Symbol &sym,
861                 uint64_t pltEntryAddr) const override;
862 
863 private:
864   bool btiHeader; // bti instruction needed in PLT Header and Entry
865   bool pacEntry;  // autia1716 instruction needed in PLT Entry
866 };
867 } // namespace
868 
869 AArch64BtiPac::AArch64BtiPac() {
870   btiHeader = (config->andFeatures & GNU_PROPERTY_AARCH64_FEATURE_1_BTI);
871   // A BTI (Branch Target Indicator) Plt Entry is only required if the
872   // address of the PLT entry can be taken by the program, which permits an
873   // indirect jump to the PLT entry. This can happen when the address
874   // of the PLT entry for a function is canonicalised due to the address of
875   // the function in an executable being taken by a shared library, or
876   // non-preemptible ifunc referenced by non-GOT-generating, non-PLT-generating
877   // relocations.
878   // The PAC PLT entries require dynamic loader support and this isn't known
879   // from properties in the objects, so we use the command line flag.
880   pacEntry = config->zPacPlt;
881 
882   if (btiHeader || pacEntry) {
883     pltEntrySize = 24;
884     ipltEntrySize = 24;
885   }
886 }
887 
888 void AArch64BtiPac::writePltHeader(uint8_t *buf) const {
889   const uint8_t btiData[] = { 0x5f, 0x24, 0x03, 0xd5 }; // bti c
890   const uint8_t pltData[] = {
891       0xf0, 0x7b, 0xbf, 0xa9, // stp    x16, x30, [sp,#-16]!
892       0x10, 0x00, 0x00, 0x90, // adrp   x16, Page(&(.got.plt[2]))
893       0x11, 0x02, 0x40, 0xf9, // ldr    x17, [x16, Offset(&(.got.plt[2]))]
894       0x10, 0x02, 0x00, 0x91, // add    x16, x16, Offset(&(.got.plt[2]))
895       0x20, 0x02, 0x1f, 0xd6, // br     x17
896       0x1f, 0x20, 0x03, 0xd5, // nop
897       0x1f, 0x20, 0x03, 0xd5  // nop
898   };
899   const uint8_t nopData[] = { 0x1f, 0x20, 0x03, 0xd5 }; // nop
900 
901   uint64_t got = in.gotPlt->getVA();
902   uint64_t plt = in.plt->getVA();
903 
904   if (btiHeader) {
905     // PltHeader is called indirectly by plt[N]. Prefix pltData with a BTI C
906     // instruction.
907     memcpy(buf, btiData, sizeof(btiData));
908     buf += sizeof(btiData);
909     plt += sizeof(btiData);
910   }
911   memcpy(buf, pltData, sizeof(pltData));
912 
913   relocateNoSym(buf + 4, R_AARCH64_ADR_PREL_PG_HI21,
914                 getAArch64Page(got + 16) - getAArch64Page(plt + 8));
915   relocateNoSym(buf + 8, R_AARCH64_LDST64_ABS_LO12_NC, got + 16);
916   relocateNoSym(buf + 12, R_AARCH64_ADD_ABS_LO12_NC, got + 16);
917   if (!btiHeader)
918     // We didn't add the BTI c instruction so round out size with NOP.
919     memcpy(buf + sizeof(pltData), nopData, sizeof(nopData));
920 }
921 
922 void AArch64BtiPac::writePlt(uint8_t *buf, const Symbol &sym,
923                              uint64_t pltEntryAddr) const {
924   // The PLT entry is of the form:
925   // [btiData] addrInst (pacBr | stdBr) [nopData]
926   const uint8_t btiData[] = { 0x5f, 0x24, 0x03, 0xd5 }; // bti c
927   const uint8_t addrInst[] = {
928       0x10, 0x00, 0x00, 0x90,  // adrp x16, Page(&(.got.plt[n]))
929       0x11, 0x02, 0x40, 0xf9,  // ldr  x17, [x16, Offset(&(.got.plt[n]))]
930       0x10, 0x02, 0x00, 0x91   // add  x16, x16, Offset(&(.got.plt[n]))
931   };
932   const uint8_t pacBr[] = {
933       0x9f, 0x21, 0x03, 0xd5,  // autia1716
934       0x20, 0x02, 0x1f, 0xd6   // br   x17
935   };
936   const uint8_t stdBr[] = {
937       0x20, 0x02, 0x1f, 0xd6,  // br   x17
938       0x1f, 0x20, 0x03, 0xd5   // nop
939   };
940   const uint8_t nopData[] = { 0x1f, 0x20, 0x03, 0xd5 }; // nop
941 
942   // NEEDS_COPY indicates a non-ifunc canonical PLT entry whose address may
943   // escape to shared objects. isInIplt indicates a non-preemptible ifunc. Its
944   // address may escape if referenced by a direct relocation. If relative
945   // vtables are used then if the vtable is in a shared object the offsets will
946   // be to the PLT entry. The condition is conservative.
947   bool hasBti = btiHeader &&
948                 (sym.hasFlag(NEEDS_COPY) || sym.isInIplt || sym.thunkAccessed);
949   if (hasBti) {
950     memcpy(buf, btiData, sizeof(btiData));
951     buf += sizeof(btiData);
952     pltEntryAddr += sizeof(btiData);
953   }
954 
955   uint64_t gotPltEntryAddr = sym.getGotPltVA();
956   memcpy(buf, addrInst, sizeof(addrInst));
957   relocateNoSym(buf, R_AARCH64_ADR_PREL_PG_HI21,
958                 getAArch64Page(gotPltEntryAddr) - getAArch64Page(pltEntryAddr));
959   relocateNoSym(buf + 4, R_AARCH64_LDST64_ABS_LO12_NC, gotPltEntryAddr);
960   relocateNoSym(buf + 8, R_AARCH64_ADD_ABS_LO12_NC, gotPltEntryAddr);
961 
962   if (pacEntry)
963     memcpy(buf + sizeof(addrInst), pacBr, sizeof(pacBr));
964   else
965     memcpy(buf + sizeof(addrInst), stdBr, sizeof(stdBr));
966   if (!hasBti)
967     // We didn't add the BTI c instruction so round out size with NOP.
968     memcpy(buf + sizeof(addrInst) + sizeof(stdBr), nopData, sizeof(nopData));
969 }
970 
971 static TargetInfo *getTargetInfo() {
972   if ((config->andFeatures & GNU_PROPERTY_AARCH64_FEATURE_1_BTI) ||
973       config->zPacPlt) {
974     static AArch64BtiPac t;
975     return &t;
976   }
977   static AArch64 t;
978   return &t;
979 }
980 
981 TargetInfo *elf::getAArch64TargetInfo() { return getTargetInfo(); }
982 
983 template <class ELFT>
984 static void
985 addTaggedSymbolReferences(InputSectionBase &sec,
986                           DenseMap<Symbol *, unsigned> &referenceCount) {
987   assert(sec.type == SHT_AARCH64_MEMTAG_GLOBALS_STATIC);
988 
989   const RelsOrRelas<ELFT> rels = sec.relsOrRelas<ELFT>();
990   if (rels.areRelocsRel())
991     error("non-RELA relocations are not allowed with memtag globals");
992 
993   for (const typename ELFT::Rela &rel : rels.relas) {
994     Symbol &sym = sec.getFile<ELFT>()->getRelocTargetSym(rel);
995     // Linker-synthesized symbols such as __executable_start may be referenced
996     // as tagged in input objfiles, and we don't want them to be tagged. A
997     // cheap way to exclude them is the type check, but their type is
998     // STT_NOTYPE. In addition, this save us from checking untaggable symbols,
999     // like functions or TLS symbols.
1000     if (sym.type != STT_OBJECT)
1001       continue;
1002     // STB_LOCAL symbols can't be referenced from outside the object file, and
1003     // thus don't need to be checked for references from other object files.
1004     if (sym.binding == STB_LOCAL) {
1005       sym.setIsTagged(true);
1006       continue;
1007     }
1008     ++referenceCount[&sym];
1009   }
1010   sec.markDead();
1011 }
1012 
1013 // A tagged symbol must be denoted as being tagged by all references and the
1014 // chosen definition. For simplicity, here, it must also be denoted as tagged
1015 // for all definitions. Otherwise:
1016 //
1017 //  1. A tagged definition can be used by an untagged declaration, in which case
1018 //     the untagged access may be PC-relative, causing a tag mismatch at
1019 //     runtime.
1020 //  2. An untagged definition can be used by a tagged declaration, where the
1021 //     compiler has taken advantage of the increased alignment of the tagged
1022 //     declaration, but the alignment at runtime is wrong, causing a fault.
1023 //
1024 // Ideally, this isn't a problem, as any TU that imports or exports tagged
1025 // symbols should also be built with tagging. But, to handle these cases, we
1026 // demote the symbol to be untagged.
1027 void lld::elf::createTaggedSymbols(const SmallVector<ELFFileBase *, 0> &files) {
1028   assert(hasMemtag());
1029 
1030   // First, collect all symbols that are marked as tagged, and count how many
1031   // times they're marked as tagged.
1032   DenseMap<Symbol *, unsigned> taggedSymbolReferenceCount;
1033   for (InputFile* file : files) {
1034     if (file->kind() != InputFile::ObjKind)
1035       continue;
1036     for (InputSectionBase *section : file->getSections()) {
1037       if (!section || section->type != SHT_AARCH64_MEMTAG_GLOBALS_STATIC ||
1038           section == &InputSection::discarded)
1039         continue;
1040       invokeELFT(addTaggedSymbolReferences, *section,
1041                  taggedSymbolReferenceCount);
1042     }
1043   }
1044 
1045   // Now, go through all the symbols. If the number of declarations +
1046   // definitions to a symbol exceeds the amount of times they're marked as
1047   // tagged, it means we have an objfile that uses the untagged variant of the
1048   // symbol.
1049   for (InputFile *file : files) {
1050     if (file->kind() != InputFile::BinaryKind &&
1051         file->kind() != InputFile::ObjKind)
1052       continue;
1053 
1054     for (Symbol *symbol : file->getSymbols()) {
1055       // See `addTaggedSymbolReferences` for more details.
1056       if (symbol->type != STT_OBJECT ||
1057           symbol->binding == STB_LOCAL)
1058         continue;
1059       auto it = taggedSymbolReferenceCount.find(symbol);
1060       if (it == taggedSymbolReferenceCount.end()) continue;
1061       unsigned &remainingAllowedTaggedRefs = it->second;
1062       if (remainingAllowedTaggedRefs == 0) {
1063         taggedSymbolReferenceCount.erase(it);
1064         continue;
1065       }
1066       --remainingAllowedTaggedRefs;
1067     }
1068   }
1069 
1070   // `addTaggedSymbolReferences` has already checked that we have RELA
1071   // relocations, the only other way to get written addends is with
1072   // --apply-dynamic-relocs.
1073   if (!taggedSymbolReferenceCount.empty() && config->writeAddends)
1074     error("--apply-dynamic-relocs cannot be used with MTE globals");
1075 
1076   // Now, `taggedSymbolReferenceCount` should only contain symbols that are
1077   // defined as tagged exactly the same amount as it's referenced, meaning all
1078   // uses are tagged.
1079   for (auto &[symbol, remainingTaggedRefs] : taggedSymbolReferenceCount) {
1080     assert(remainingTaggedRefs == 0 &&
1081             "Symbol is defined as tagged more times than it's used");
1082     symbol->setIsTagged(true);
1083   }
1084 }
1085