1 //===- AArch64.cpp --------------------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "InputFiles.h" 10 #include "OutputSections.h" 11 #include "Symbols.h" 12 #include "SyntheticSections.h" 13 #include "Target.h" 14 #include "lld/Common/ErrorHandler.h" 15 #include "llvm/BinaryFormat/ELF.h" 16 #include "llvm/Support/Endian.h" 17 18 using namespace llvm; 19 using namespace llvm::support::endian; 20 using namespace llvm::ELF; 21 using namespace lld; 22 using namespace lld::elf; 23 24 // Page(Expr) is the page address of the expression Expr, defined 25 // as (Expr & ~0xFFF). (This applies even if the machine page size 26 // supported by the platform has a different value.) 27 uint64_t elf::getAArch64Page(uint64_t expr) { 28 return expr & ~static_cast<uint64_t>(0xFFF); 29 } 30 31 namespace { 32 class AArch64 : public TargetInfo { 33 public: 34 AArch64(); 35 RelExpr getRelExpr(RelType type, const Symbol &s, 36 const uint8_t *loc) const override; 37 RelType getDynRel(RelType type) const override; 38 int64_t getImplicitAddend(const uint8_t *buf, RelType type) const override; 39 void writeGotPlt(uint8_t *buf, const Symbol &s) const override; 40 void writeIgotPlt(uint8_t *buf, const Symbol &s) const override; 41 void writePltHeader(uint8_t *buf) const override; 42 void writePlt(uint8_t *buf, const Symbol &sym, 43 uint64_t pltEntryAddr) const override; 44 bool needsThunk(RelExpr expr, RelType type, const InputFile *file, 45 uint64_t branchAddr, const Symbol &s, 46 int64_t a) const override; 47 uint32_t getThunkSectionSpacing() const override; 48 bool inBranchRange(RelType type, uint64_t src, uint64_t dst) const override; 49 bool usesOnlyLowPageBits(RelType type) const override; 50 void relocate(uint8_t *loc, const Relocation &rel, 51 uint64_t val) const override; 52 RelExpr adjustTlsExpr(RelType type, RelExpr expr) const override; 53 void relocateAlloc(InputSectionBase &sec, uint8_t *buf) const override; 54 55 private: 56 void relaxTlsGdToLe(uint8_t *loc, const Relocation &rel, uint64_t val) const; 57 void relaxTlsGdToIe(uint8_t *loc, const Relocation &rel, uint64_t val) const; 58 void relaxTlsIeToLe(uint8_t *loc, const Relocation &rel, uint64_t val) const; 59 }; 60 61 struct AArch64Relaxer { 62 bool safeToRelaxAdrpLdr = false; 63 64 AArch64Relaxer(ArrayRef<Relocation> relocs); 65 bool tryRelaxAdrpAdd(const Relocation &adrpRel, const Relocation &addRel, 66 uint64_t secAddr, uint8_t *buf) const; 67 bool tryRelaxAdrpLdr(const Relocation &adrpRel, const Relocation &ldrRel, 68 uint64_t secAddr, uint8_t *buf) const; 69 }; 70 } // namespace 71 72 AArch64::AArch64() { 73 copyRel = R_AARCH64_COPY; 74 relativeRel = R_AARCH64_RELATIVE; 75 iRelativeRel = R_AARCH64_IRELATIVE; 76 gotRel = R_AARCH64_GLOB_DAT; 77 pltRel = R_AARCH64_JUMP_SLOT; 78 symbolicRel = R_AARCH64_ABS64; 79 tlsDescRel = R_AARCH64_TLSDESC; 80 tlsGotRel = R_AARCH64_TLS_TPREL64; 81 pltHeaderSize = 32; 82 pltEntrySize = 16; 83 ipltEntrySize = 16; 84 defaultMaxPageSize = 65536; 85 86 // Align to the 2 MiB page size (known as a superpage or huge page). 87 // FreeBSD automatically promotes 2 MiB-aligned allocations. 88 defaultImageBase = 0x200000; 89 90 needsThunks = true; 91 } 92 93 RelExpr AArch64::getRelExpr(RelType type, const Symbol &s, 94 const uint8_t *loc) const { 95 switch (type) { 96 case R_AARCH64_ABS16: 97 case R_AARCH64_ABS32: 98 case R_AARCH64_ABS64: 99 case R_AARCH64_ADD_ABS_LO12_NC: 100 case R_AARCH64_LDST128_ABS_LO12_NC: 101 case R_AARCH64_LDST16_ABS_LO12_NC: 102 case R_AARCH64_LDST32_ABS_LO12_NC: 103 case R_AARCH64_LDST64_ABS_LO12_NC: 104 case R_AARCH64_LDST8_ABS_LO12_NC: 105 case R_AARCH64_MOVW_SABS_G0: 106 case R_AARCH64_MOVW_SABS_G1: 107 case R_AARCH64_MOVW_SABS_G2: 108 case R_AARCH64_MOVW_UABS_G0: 109 case R_AARCH64_MOVW_UABS_G0_NC: 110 case R_AARCH64_MOVW_UABS_G1: 111 case R_AARCH64_MOVW_UABS_G1_NC: 112 case R_AARCH64_MOVW_UABS_G2: 113 case R_AARCH64_MOVW_UABS_G2_NC: 114 case R_AARCH64_MOVW_UABS_G3: 115 return R_ABS; 116 case R_AARCH64_TLSDESC_ADR_PAGE21: 117 return R_AARCH64_TLSDESC_PAGE; 118 case R_AARCH64_TLSDESC_LD64_LO12: 119 case R_AARCH64_TLSDESC_ADD_LO12: 120 return R_TLSDESC; 121 case R_AARCH64_TLSDESC_CALL: 122 return R_TLSDESC_CALL; 123 case R_AARCH64_TLSLE_ADD_TPREL_HI12: 124 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC: 125 case R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC: 126 case R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC: 127 case R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC: 128 case R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC: 129 case R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC: 130 case R_AARCH64_TLSLE_MOVW_TPREL_G0: 131 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC: 132 case R_AARCH64_TLSLE_MOVW_TPREL_G1: 133 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC: 134 case R_AARCH64_TLSLE_MOVW_TPREL_G2: 135 return R_TPREL; 136 case R_AARCH64_CALL26: 137 case R_AARCH64_CONDBR19: 138 case R_AARCH64_JUMP26: 139 case R_AARCH64_TSTBR14: 140 return R_PLT_PC; 141 case R_AARCH64_PLT32: 142 const_cast<Symbol &>(s).thunkAccessed = true; 143 return R_PLT_PC; 144 case R_AARCH64_PREL16: 145 case R_AARCH64_PREL32: 146 case R_AARCH64_PREL64: 147 case R_AARCH64_ADR_PREL_LO21: 148 case R_AARCH64_LD_PREL_LO19: 149 case R_AARCH64_MOVW_PREL_G0: 150 case R_AARCH64_MOVW_PREL_G0_NC: 151 case R_AARCH64_MOVW_PREL_G1: 152 case R_AARCH64_MOVW_PREL_G1_NC: 153 case R_AARCH64_MOVW_PREL_G2: 154 case R_AARCH64_MOVW_PREL_G2_NC: 155 case R_AARCH64_MOVW_PREL_G3: 156 return R_PC; 157 case R_AARCH64_ADR_PREL_PG_HI21: 158 case R_AARCH64_ADR_PREL_PG_HI21_NC: 159 return R_AARCH64_PAGE_PC; 160 case R_AARCH64_LD64_GOT_LO12_NC: 161 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: 162 return R_GOT; 163 case R_AARCH64_LD64_GOTPAGE_LO15: 164 return R_AARCH64_GOT_PAGE; 165 case R_AARCH64_ADR_GOT_PAGE: 166 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: 167 return R_AARCH64_GOT_PAGE_PC; 168 case R_AARCH64_GOTPCREL32: 169 return R_GOT_PC; 170 case R_AARCH64_NONE: 171 return R_NONE; 172 default: 173 error(getErrorLocation(loc) + "unknown relocation (" + Twine(type) + 174 ") against symbol " + toString(s)); 175 return R_NONE; 176 } 177 } 178 179 RelExpr AArch64::adjustTlsExpr(RelType type, RelExpr expr) const { 180 if (expr == R_RELAX_TLS_GD_TO_IE) { 181 if (type == R_AARCH64_TLSDESC_ADR_PAGE21) 182 return R_AARCH64_RELAX_TLS_GD_TO_IE_PAGE_PC; 183 return R_RELAX_TLS_GD_TO_IE_ABS; 184 } 185 return expr; 186 } 187 188 bool AArch64::usesOnlyLowPageBits(RelType type) const { 189 switch (type) { 190 default: 191 return false; 192 case R_AARCH64_ADD_ABS_LO12_NC: 193 case R_AARCH64_LD64_GOT_LO12_NC: 194 case R_AARCH64_LDST128_ABS_LO12_NC: 195 case R_AARCH64_LDST16_ABS_LO12_NC: 196 case R_AARCH64_LDST32_ABS_LO12_NC: 197 case R_AARCH64_LDST64_ABS_LO12_NC: 198 case R_AARCH64_LDST8_ABS_LO12_NC: 199 case R_AARCH64_TLSDESC_ADD_LO12: 200 case R_AARCH64_TLSDESC_LD64_LO12: 201 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: 202 return true; 203 } 204 } 205 206 RelType AArch64::getDynRel(RelType type) const { 207 if (type == R_AARCH64_ABS64) 208 return type; 209 return R_AARCH64_NONE; 210 } 211 212 int64_t AArch64::getImplicitAddend(const uint8_t *buf, RelType type) const { 213 switch (type) { 214 case R_AARCH64_TLSDESC: 215 return read64(buf + 8); 216 case R_AARCH64_NONE: 217 case R_AARCH64_GLOB_DAT: 218 case R_AARCH64_JUMP_SLOT: 219 return 0; 220 case R_AARCH64_PREL32: 221 return SignExtend64<32>(read32(buf)); 222 case R_AARCH64_ABS64: 223 case R_AARCH64_PREL64: 224 case R_AARCH64_RELATIVE: 225 case R_AARCH64_IRELATIVE: 226 case R_AARCH64_TLS_TPREL64: 227 return read64(buf); 228 default: 229 internalLinkerError(getErrorLocation(buf), 230 "cannot read addend for relocation " + toString(type)); 231 return 0; 232 } 233 } 234 235 void AArch64::writeGotPlt(uint8_t *buf, const Symbol &) const { 236 write64(buf, in.plt->getVA()); 237 } 238 239 void AArch64::writeIgotPlt(uint8_t *buf, const Symbol &s) const { 240 if (config->writeAddends) 241 write64(buf, s.getVA()); 242 } 243 244 void AArch64::writePltHeader(uint8_t *buf) const { 245 const uint8_t pltData[] = { 246 0xf0, 0x7b, 0xbf, 0xa9, // stp x16, x30, [sp,#-16]! 247 0x10, 0x00, 0x00, 0x90, // adrp x16, Page(&(.got.plt[2])) 248 0x11, 0x02, 0x40, 0xf9, // ldr x17, [x16, Offset(&(.got.plt[2]))] 249 0x10, 0x02, 0x00, 0x91, // add x16, x16, Offset(&(.got.plt[2])) 250 0x20, 0x02, 0x1f, 0xd6, // br x17 251 0x1f, 0x20, 0x03, 0xd5, // nop 252 0x1f, 0x20, 0x03, 0xd5, // nop 253 0x1f, 0x20, 0x03, 0xd5 // nop 254 }; 255 memcpy(buf, pltData, sizeof(pltData)); 256 257 uint64_t got = in.gotPlt->getVA(); 258 uint64_t plt = in.plt->getVA(); 259 relocateNoSym(buf + 4, R_AARCH64_ADR_PREL_PG_HI21, 260 getAArch64Page(got + 16) - getAArch64Page(plt + 4)); 261 relocateNoSym(buf + 8, R_AARCH64_LDST64_ABS_LO12_NC, got + 16); 262 relocateNoSym(buf + 12, R_AARCH64_ADD_ABS_LO12_NC, got + 16); 263 } 264 265 void AArch64::writePlt(uint8_t *buf, const Symbol &sym, 266 uint64_t pltEntryAddr) const { 267 const uint8_t inst[] = { 268 0x10, 0x00, 0x00, 0x90, // adrp x16, Page(&(.got.plt[n])) 269 0x11, 0x02, 0x40, 0xf9, // ldr x17, [x16, Offset(&(.got.plt[n]))] 270 0x10, 0x02, 0x00, 0x91, // add x16, x16, Offset(&(.got.plt[n])) 271 0x20, 0x02, 0x1f, 0xd6 // br x17 272 }; 273 memcpy(buf, inst, sizeof(inst)); 274 275 uint64_t gotPltEntryAddr = sym.getGotPltVA(); 276 relocateNoSym(buf, R_AARCH64_ADR_PREL_PG_HI21, 277 getAArch64Page(gotPltEntryAddr) - getAArch64Page(pltEntryAddr)); 278 relocateNoSym(buf + 4, R_AARCH64_LDST64_ABS_LO12_NC, gotPltEntryAddr); 279 relocateNoSym(buf + 8, R_AARCH64_ADD_ABS_LO12_NC, gotPltEntryAddr); 280 } 281 282 bool AArch64::needsThunk(RelExpr expr, RelType type, const InputFile *file, 283 uint64_t branchAddr, const Symbol &s, 284 int64_t a) const { 285 // If s is an undefined weak symbol and does not have a PLT entry then it will 286 // be resolved as a branch to the next instruction. If it is hidden, its 287 // binding has been converted to local, so we just check isUndefined() here. A 288 // undefined non-weak symbol will have been errored. 289 if (s.isUndefined() && !s.isInPlt()) 290 return false; 291 // ELF for the ARM 64-bit architecture, section Call and Jump relocations 292 // only permits range extension thunks for R_AARCH64_CALL26 and 293 // R_AARCH64_JUMP26 relocation types. 294 if (type != R_AARCH64_CALL26 && type != R_AARCH64_JUMP26 && 295 type != R_AARCH64_PLT32) 296 return false; 297 uint64_t dst = expr == R_PLT_PC ? s.getPltVA() : s.getVA(a); 298 return !inBranchRange(type, branchAddr, dst); 299 } 300 301 uint32_t AArch64::getThunkSectionSpacing() const { 302 // See comment in Arch/ARM.cpp for a more detailed explanation of 303 // getThunkSectionSpacing(). For AArch64 the only branches we are permitted to 304 // Thunk have a range of +/- 128 MiB 305 return (128 * 1024 * 1024) - 0x30000; 306 } 307 308 bool AArch64::inBranchRange(RelType type, uint64_t src, uint64_t dst) const { 309 if (type != R_AARCH64_CALL26 && type != R_AARCH64_JUMP26 && 310 type != R_AARCH64_PLT32) 311 return true; 312 // The AArch64 call and unconditional branch instructions have a range of 313 // +/- 128 MiB. The PLT32 relocation supports a range up to +/- 2 GiB. 314 uint64_t range = 315 type == R_AARCH64_PLT32 ? (UINT64_C(1) << 31) : (128 * 1024 * 1024); 316 if (dst > src) { 317 // Immediate of branch is signed. 318 range -= 4; 319 return dst - src <= range; 320 } 321 return src - dst <= range; 322 } 323 324 static void write32AArch64Addr(uint8_t *l, uint64_t imm) { 325 uint32_t immLo = (imm & 0x3) << 29; 326 uint32_t immHi = (imm & 0x1FFFFC) << 3; 327 uint64_t mask = (0x3 << 29) | (0x1FFFFC << 3); 328 write32le(l, (read32le(l) & ~mask) | immLo | immHi); 329 } 330 331 // Return the bits [Start, End] from Val shifted Start bits. 332 // For instance, getBits(0xF0, 4, 8) returns 0xF. 333 static uint64_t getBits(uint64_t val, int start, int end) { 334 uint64_t mask = ((uint64_t)1 << (end + 1 - start)) - 1; 335 return (val >> start) & mask; 336 } 337 338 static void or32le(uint8_t *p, int32_t v) { write32le(p, read32le(p) | v); } 339 340 // Update the immediate field in a AARCH64 ldr, str, and add instruction. 341 static void or32AArch64Imm(uint8_t *l, uint64_t imm) { 342 or32le(l, (imm & 0xFFF) << 10); 343 } 344 345 // Update the immediate field in an AArch64 movk, movn or movz instruction 346 // for a signed relocation, and update the opcode of a movn or movz instruction 347 // to match the sign of the operand. 348 static void writeSMovWImm(uint8_t *loc, uint32_t imm) { 349 uint32_t inst = read32le(loc); 350 // Opcode field is bits 30, 29, with 10 = movz, 00 = movn and 11 = movk. 351 if (!(inst & (1 << 29))) { 352 // movn or movz. 353 if (imm & 0x10000) { 354 // Change opcode to movn, which takes an inverted operand. 355 imm ^= 0xFFFF; 356 inst &= ~(1 << 30); 357 } else { 358 // Change opcode to movz. 359 inst |= 1 << 30; 360 } 361 } 362 write32le(loc, inst | ((imm & 0xFFFF) << 5)); 363 } 364 365 void AArch64::relocate(uint8_t *loc, const Relocation &rel, 366 uint64_t val) const { 367 switch (rel.type) { 368 case R_AARCH64_ABS16: 369 case R_AARCH64_PREL16: 370 checkIntUInt(loc, val, 16, rel); 371 write16(loc, val); 372 break; 373 case R_AARCH64_ABS32: 374 case R_AARCH64_PREL32: 375 checkIntUInt(loc, val, 32, rel); 376 write32(loc, val); 377 break; 378 case R_AARCH64_PLT32: 379 case R_AARCH64_GOTPCREL32: 380 checkInt(loc, val, 32, rel); 381 write32(loc, val); 382 break; 383 case R_AARCH64_ABS64: 384 // AArch64 relocations to tagged symbols have extended semantics, as 385 // described here: 386 // https://github.com/ARM-software/abi-aa/blob/main/memtagabielf64/memtagabielf64.rst#841extended-semantics-of-r_aarch64_relative. 387 // tl;dr: encode the symbol's special addend in the place, which is an 388 // offset to the point where the logical tag is derived from. Quick hack, if 389 // the addend is within the symbol's bounds, no need to encode the tag 390 // derivation offset. 391 if (rel.sym && rel.sym->isTagged() && 392 (rel.addend < 0 || 393 rel.addend >= static_cast<int64_t>(rel.sym->getSize()))) 394 write64(loc, -rel.addend); 395 else 396 write64(loc, val); 397 break; 398 case R_AARCH64_PREL64: 399 write64(loc, val); 400 break; 401 case R_AARCH64_ADD_ABS_LO12_NC: 402 or32AArch64Imm(loc, val); 403 break; 404 case R_AARCH64_ADR_GOT_PAGE: 405 case R_AARCH64_ADR_PREL_PG_HI21: 406 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: 407 case R_AARCH64_TLSDESC_ADR_PAGE21: 408 checkInt(loc, val, 33, rel); 409 [[fallthrough]]; 410 case R_AARCH64_ADR_PREL_PG_HI21_NC: 411 write32AArch64Addr(loc, val >> 12); 412 break; 413 case R_AARCH64_ADR_PREL_LO21: 414 checkInt(loc, val, 21, rel); 415 write32AArch64Addr(loc, val); 416 break; 417 case R_AARCH64_JUMP26: 418 // Normally we would just write the bits of the immediate field, however 419 // when patching instructions for the cpu errata fix -fix-cortex-a53-843419 420 // we want to replace a non-branch instruction with a branch immediate 421 // instruction. By writing all the bits of the instruction including the 422 // opcode and the immediate (0 001 | 01 imm26) we can do this 423 // transformation by placing a R_AARCH64_JUMP26 relocation at the offset of 424 // the instruction we want to patch. 425 write32le(loc, 0x14000000); 426 [[fallthrough]]; 427 case R_AARCH64_CALL26: 428 checkInt(loc, val, 28, rel); 429 or32le(loc, (val & 0x0FFFFFFC) >> 2); 430 break; 431 case R_AARCH64_CONDBR19: 432 case R_AARCH64_LD_PREL_LO19: 433 checkAlignment(loc, val, 4, rel); 434 checkInt(loc, val, 21, rel); 435 or32le(loc, (val & 0x1FFFFC) << 3); 436 break; 437 case R_AARCH64_LDST8_ABS_LO12_NC: 438 case R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC: 439 or32AArch64Imm(loc, getBits(val, 0, 11)); 440 break; 441 case R_AARCH64_LDST16_ABS_LO12_NC: 442 case R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC: 443 checkAlignment(loc, val, 2, rel); 444 or32AArch64Imm(loc, getBits(val, 1, 11)); 445 break; 446 case R_AARCH64_LDST32_ABS_LO12_NC: 447 case R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC: 448 checkAlignment(loc, val, 4, rel); 449 or32AArch64Imm(loc, getBits(val, 2, 11)); 450 break; 451 case R_AARCH64_LDST64_ABS_LO12_NC: 452 case R_AARCH64_LD64_GOT_LO12_NC: 453 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: 454 case R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC: 455 case R_AARCH64_TLSDESC_LD64_LO12: 456 checkAlignment(loc, val, 8, rel); 457 or32AArch64Imm(loc, getBits(val, 3, 11)); 458 break; 459 case R_AARCH64_LDST128_ABS_LO12_NC: 460 case R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC: 461 checkAlignment(loc, val, 16, rel); 462 or32AArch64Imm(loc, getBits(val, 4, 11)); 463 break; 464 case R_AARCH64_LD64_GOTPAGE_LO15: 465 checkAlignment(loc, val, 8, rel); 466 or32AArch64Imm(loc, getBits(val, 3, 14)); 467 break; 468 case R_AARCH64_MOVW_UABS_G0: 469 checkUInt(loc, val, 16, rel); 470 [[fallthrough]]; 471 case R_AARCH64_MOVW_UABS_G0_NC: 472 or32le(loc, (val & 0xFFFF) << 5); 473 break; 474 case R_AARCH64_MOVW_UABS_G1: 475 checkUInt(loc, val, 32, rel); 476 [[fallthrough]]; 477 case R_AARCH64_MOVW_UABS_G1_NC: 478 or32le(loc, (val & 0xFFFF0000) >> 11); 479 break; 480 case R_AARCH64_MOVW_UABS_G2: 481 checkUInt(loc, val, 48, rel); 482 [[fallthrough]]; 483 case R_AARCH64_MOVW_UABS_G2_NC: 484 or32le(loc, (val & 0xFFFF00000000) >> 27); 485 break; 486 case R_AARCH64_MOVW_UABS_G3: 487 or32le(loc, (val & 0xFFFF000000000000) >> 43); 488 break; 489 case R_AARCH64_MOVW_PREL_G0: 490 case R_AARCH64_MOVW_SABS_G0: 491 case R_AARCH64_TLSLE_MOVW_TPREL_G0: 492 checkInt(loc, val, 17, rel); 493 [[fallthrough]]; 494 case R_AARCH64_MOVW_PREL_G0_NC: 495 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC: 496 writeSMovWImm(loc, val); 497 break; 498 case R_AARCH64_MOVW_PREL_G1: 499 case R_AARCH64_MOVW_SABS_G1: 500 case R_AARCH64_TLSLE_MOVW_TPREL_G1: 501 checkInt(loc, val, 33, rel); 502 [[fallthrough]]; 503 case R_AARCH64_MOVW_PREL_G1_NC: 504 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC: 505 writeSMovWImm(loc, val >> 16); 506 break; 507 case R_AARCH64_MOVW_PREL_G2: 508 case R_AARCH64_MOVW_SABS_G2: 509 case R_AARCH64_TLSLE_MOVW_TPREL_G2: 510 checkInt(loc, val, 49, rel); 511 [[fallthrough]]; 512 case R_AARCH64_MOVW_PREL_G2_NC: 513 writeSMovWImm(loc, val >> 32); 514 break; 515 case R_AARCH64_MOVW_PREL_G3: 516 writeSMovWImm(loc, val >> 48); 517 break; 518 case R_AARCH64_TSTBR14: 519 checkInt(loc, val, 16, rel); 520 or32le(loc, (val & 0xFFFC) << 3); 521 break; 522 case R_AARCH64_TLSLE_ADD_TPREL_HI12: 523 checkUInt(loc, val, 24, rel); 524 or32AArch64Imm(loc, val >> 12); 525 break; 526 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC: 527 case R_AARCH64_TLSDESC_ADD_LO12: 528 or32AArch64Imm(loc, val); 529 break; 530 case R_AARCH64_TLSDESC: 531 // For R_AARCH64_TLSDESC the addend is stored in the second 64-bit word. 532 write64(loc + 8, val); 533 break; 534 default: 535 llvm_unreachable("unknown relocation"); 536 } 537 } 538 539 void AArch64::relaxTlsGdToLe(uint8_t *loc, const Relocation &rel, 540 uint64_t val) const { 541 // TLSDESC Global-Dynamic relocation are in the form: 542 // adrp x0, :tlsdesc:v [R_AARCH64_TLSDESC_ADR_PAGE21] 543 // ldr x1, [x0, #:tlsdesc_lo12:v [R_AARCH64_TLSDESC_LD64_LO12] 544 // add x0, x0, :tlsdesc_los:v [R_AARCH64_TLSDESC_ADD_LO12] 545 // .tlsdesccall [R_AARCH64_TLSDESC_CALL] 546 // blr x1 547 // And it can optimized to: 548 // movz x0, #0x0, lsl #16 549 // movk x0, #0x10 550 // nop 551 // nop 552 checkUInt(loc, val, 32, rel); 553 554 switch (rel.type) { 555 case R_AARCH64_TLSDESC_ADD_LO12: 556 case R_AARCH64_TLSDESC_CALL: 557 write32le(loc, 0xd503201f); // nop 558 return; 559 case R_AARCH64_TLSDESC_ADR_PAGE21: 560 write32le(loc, 0xd2a00000 | (((val >> 16) & 0xffff) << 5)); // movz 561 return; 562 case R_AARCH64_TLSDESC_LD64_LO12: 563 write32le(loc, 0xf2800000 | ((val & 0xffff) << 5)); // movk 564 return; 565 default: 566 llvm_unreachable("unsupported relocation for TLS GD to LE relaxation"); 567 } 568 } 569 570 void AArch64::relaxTlsGdToIe(uint8_t *loc, const Relocation &rel, 571 uint64_t val) const { 572 // TLSDESC Global-Dynamic relocation are in the form: 573 // adrp x0, :tlsdesc:v [R_AARCH64_TLSDESC_ADR_PAGE21] 574 // ldr x1, [x0, #:tlsdesc_lo12:v [R_AARCH64_TLSDESC_LD64_LO12] 575 // add x0, x0, :tlsdesc_los:v [R_AARCH64_TLSDESC_ADD_LO12] 576 // .tlsdesccall [R_AARCH64_TLSDESC_CALL] 577 // blr x1 578 // And it can optimized to: 579 // adrp x0, :gottprel:v 580 // ldr x0, [x0, :gottprel_lo12:v] 581 // nop 582 // nop 583 584 switch (rel.type) { 585 case R_AARCH64_TLSDESC_ADD_LO12: 586 case R_AARCH64_TLSDESC_CALL: 587 write32le(loc, 0xd503201f); // nop 588 break; 589 case R_AARCH64_TLSDESC_ADR_PAGE21: 590 write32le(loc, 0x90000000); // adrp 591 relocateNoSym(loc, R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21, val); 592 break; 593 case R_AARCH64_TLSDESC_LD64_LO12: 594 write32le(loc, 0xf9400000); // ldr 595 relocateNoSym(loc, R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC, val); 596 break; 597 default: 598 llvm_unreachable("unsupported relocation for TLS GD to LE relaxation"); 599 } 600 } 601 602 void AArch64::relaxTlsIeToLe(uint8_t *loc, const Relocation &rel, 603 uint64_t val) const { 604 checkUInt(loc, val, 32, rel); 605 606 if (rel.type == R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21) { 607 // Generate MOVZ. 608 uint32_t regNo = read32le(loc) & 0x1f; 609 write32le(loc, (0xd2a00000 | regNo) | (((val >> 16) & 0xffff) << 5)); 610 return; 611 } 612 if (rel.type == R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC) { 613 // Generate MOVK. 614 uint32_t regNo = read32le(loc) & 0x1f; 615 write32le(loc, (0xf2800000 | regNo) | ((val & 0xffff) << 5)); 616 return; 617 } 618 llvm_unreachable("invalid relocation for TLS IE to LE relaxation"); 619 } 620 621 AArch64Relaxer::AArch64Relaxer(ArrayRef<Relocation> relocs) { 622 if (!config->relax) 623 return; 624 // Check if R_AARCH64_ADR_GOT_PAGE and R_AARCH64_LD64_GOT_LO12_NC 625 // always appear in pairs. 626 size_t i = 0; 627 const size_t size = relocs.size(); 628 for (; i != size; ++i) { 629 if (relocs[i].type == R_AARCH64_ADR_GOT_PAGE) { 630 if (i + 1 < size && relocs[i + 1].type == R_AARCH64_LD64_GOT_LO12_NC) { 631 ++i; 632 continue; 633 } 634 break; 635 } else if (relocs[i].type == R_AARCH64_LD64_GOT_LO12_NC) { 636 break; 637 } 638 } 639 safeToRelaxAdrpLdr = i == size; 640 } 641 642 bool AArch64Relaxer::tryRelaxAdrpAdd(const Relocation &adrpRel, 643 const Relocation &addRel, uint64_t secAddr, 644 uint8_t *buf) const { 645 // When the address of sym is within the range of ADR then 646 // we may relax 647 // ADRP xn, sym 648 // ADD xn, xn, :lo12: sym 649 // to 650 // NOP 651 // ADR xn, sym 652 if (!config->relax || adrpRel.type != R_AARCH64_ADR_PREL_PG_HI21 || 653 addRel.type != R_AARCH64_ADD_ABS_LO12_NC) 654 return false; 655 // Check if the relocations apply to consecutive instructions. 656 if (adrpRel.offset + 4 != addRel.offset) 657 return false; 658 if (adrpRel.sym != addRel.sym) 659 return false; 660 if (adrpRel.addend != 0 || addRel.addend != 0) 661 return false; 662 663 uint32_t adrpInstr = read32le(buf + adrpRel.offset); 664 uint32_t addInstr = read32le(buf + addRel.offset); 665 // Check if the first instruction is ADRP and the second instruction is ADD. 666 if ((adrpInstr & 0x9f000000) != 0x90000000 || 667 (addInstr & 0xffc00000) != 0x91000000) 668 return false; 669 uint32_t adrpDestReg = adrpInstr & 0x1f; 670 uint32_t addDestReg = addInstr & 0x1f; 671 uint32_t addSrcReg = (addInstr >> 5) & 0x1f; 672 if (adrpDestReg != addDestReg || adrpDestReg != addSrcReg) 673 return false; 674 675 Symbol &sym = *adrpRel.sym; 676 // Check if the address difference is within 1MiB range. 677 int64_t val = sym.getVA() - (secAddr + addRel.offset); 678 if (val < -1024 * 1024 || val >= 1024 * 1024) 679 return false; 680 681 Relocation adrRel = {R_ABS, R_AARCH64_ADR_PREL_LO21, addRel.offset, 682 /*addend=*/0, &sym}; 683 // nop 684 write32le(buf + adrpRel.offset, 0xd503201f); 685 // adr x_<dest_reg> 686 write32le(buf + adrRel.offset, 0x10000000 | adrpDestReg); 687 target->relocate(buf + adrRel.offset, adrRel, val); 688 return true; 689 } 690 691 bool AArch64Relaxer::tryRelaxAdrpLdr(const Relocation &adrpRel, 692 const Relocation &ldrRel, uint64_t secAddr, 693 uint8_t *buf) const { 694 if (!safeToRelaxAdrpLdr) 695 return false; 696 697 // When the definition of sym is not preemptible then we may 698 // be able to relax 699 // ADRP xn, :got: sym 700 // LDR xn, [ xn :got_lo12: sym] 701 // to 702 // ADRP xn, sym 703 // ADD xn, xn, :lo_12: sym 704 705 if (adrpRel.type != R_AARCH64_ADR_GOT_PAGE || 706 ldrRel.type != R_AARCH64_LD64_GOT_LO12_NC) 707 return false; 708 // Check if the relocations apply to consecutive instructions. 709 if (adrpRel.offset + 4 != ldrRel.offset) 710 return false; 711 // Check if the relocations reference the same symbol and 712 // skip undefined, preemptible and STT_GNU_IFUNC symbols. 713 if (!adrpRel.sym || adrpRel.sym != ldrRel.sym || !adrpRel.sym->isDefined() || 714 adrpRel.sym->isPreemptible || adrpRel.sym->isGnuIFunc()) 715 return false; 716 // Check if the addends of the both relocations are zero. 717 if (adrpRel.addend != 0 || ldrRel.addend != 0) 718 return false; 719 uint32_t adrpInstr = read32le(buf + adrpRel.offset); 720 uint32_t ldrInstr = read32le(buf + ldrRel.offset); 721 // Check if the first instruction is ADRP and the second instruction is LDR. 722 if ((adrpInstr & 0x9f000000) != 0x90000000 || 723 (ldrInstr & 0x3b000000) != 0x39000000) 724 return false; 725 // Check the value of the sf bit. 726 if (!(ldrInstr >> 31)) 727 return false; 728 uint32_t adrpDestReg = adrpInstr & 0x1f; 729 uint32_t ldrDestReg = ldrInstr & 0x1f; 730 uint32_t ldrSrcReg = (ldrInstr >> 5) & 0x1f; 731 // Check if ADPR and LDR use the same register. 732 if (adrpDestReg != ldrDestReg || adrpDestReg != ldrSrcReg) 733 return false; 734 735 Symbol &sym = *adrpRel.sym; 736 // GOT references to absolute symbols can't be relaxed to use ADRP/ADD in 737 // position-independent code because these instructions produce a relative 738 // address. 739 if (config->isPic && !cast<Defined>(sym).section) 740 return false; 741 // Check if the address difference is within 4GB range. 742 int64_t val = 743 getAArch64Page(sym.getVA()) - getAArch64Page(secAddr + adrpRel.offset); 744 if (val != llvm::SignExtend64(val, 33)) 745 return false; 746 747 Relocation adrpSymRel = {R_AARCH64_PAGE_PC, R_AARCH64_ADR_PREL_PG_HI21, 748 adrpRel.offset, /*addend=*/0, &sym}; 749 Relocation addRel = {R_ABS, R_AARCH64_ADD_ABS_LO12_NC, ldrRel.offset, 750 /*addend=*/0, &sym}; 751 752 // adrp x_<dest_reg> 753 write32le(buf + adrpSymRel.offset, 0x90000000 | adrpDestReg); 754 // add x_<dest reg>, x_<dest reg> 755 write32le(buf + addRel.offset, 0x91000000 | adrpDestReg | (adrpDestReg << 5)); 756 757 target->relocate(buf + adrpSymRel.offset, adrpSymRel, 758 SignExtend64(getAArch64Page(sym.getVA()) - 759 getAArch64Page(secAddr + adrpSymRel.offset), 760 64)); 761 target->relocate(buf + addRel.offset, addRel, SignExtend64(sym.getVA(), 64)); 762 tryRelaxAdrpAdd(adrpSymRel, addRel, secAddr, buf); 763 return true; 764 } 765 766 // Tagged symbols have upper address bits that are added by the dynamic loader, 767 // and thus need the full 64-bit GOT entry. Do not relax such symbols. 768 static bool needsGotForMemtag(const Relocation &rel) { 769 return rel.sym->isTagged() && needsGot(rel.expr); 770 } 771 772 void AArch64::relocateAlloc(InputSectionBase &sec, uint8_t *buf) const { 773 uint64_t secAddr = sec.getOutputSection()->addr; 774 if (auto *s = dyn_cast<InputSection>(&sec)) 775 secAddr += s->outSecOff; 776 else if (auto *ehIn = dyn_cast<EhInputSection>(&sec)) 777 secAddr += ehIn->getParent()->outSecOff; 778 AArch64Relaxer relaxer(sec.relocs()); 779 for (size_t i = 0, size = sec.relocs().size(); i != size; ++i) { 780 const Relocation &rel = sec.relocs()[i]; 781 uint8_t *loc = buf + rel.offset; 782 const uint64_t val = 783 sec.getRelocTargetVA(sec.file, rel.type, rel.addend, 784 secAddr + rel.offset, *rel.sym, rel.expr); 785 786 if (needsGotForMemtag(rel)) { 787 relocate(loc, rel, val); 788 continue; 789 } 790 791 switch (rel.expr) { 792 case R_AARCH64_GOT_PAGE_PC: 793 if (i + 1 < size && 794 relaxer.tryRelaxAdrpLdr(rel, sec.relocs()[i + 1], secAddr, buf)) { 795 ++i; 796 continue; 797 } 798 break; 799 case R_AARCH64_PAGE_PC: 800 if (i + 1 < size && 801 relaxer.tryRelaxAdrpAdd(rel, sec.relocs()[i + 1], secAddr, buf)) { 802 ++i; 803 continue; 804 } 805 break; 806 case R_AARCH64_RELAX_TLS_GD_TO_IE_PAGE_PC: 807 case R_RELAX_TLS_GD_TO_IE_ABS: 808 relaxTlsGdToIe(loc, rel, val); 809 continue; 810 case R_RELAX_TLS_GD_TO_LE: 811 relaxTlsGdToLe(loc, rel, val); 812 continue; 813 case R_RELAX_TLS_IE_TO_LE: 814 relaxTlsIeToLe(loc, rel, val); 815 continue; 816 default: 817 break; 818 } 819 relocate(loc, rel, val); 820 } 821 } 822 823 // AArch64 may use security features in variant PLT sequences. These are: 824 // Pointer Authentication (PAC), introduced in armv8.3-a and Branch Target 825 // Indicator (BTI) introduced in armv8.5-a. The additional instructions used 826 // in the variant Plt sequences are encoded in the Hint space so they can be 827 // deployed on older architectures, which treat the instructions as a nop. 828 // PAC and BTI can be combined leading to the following combinations: 829 // writePltHeader 830 // writePltHeaderBti (no PAC Header needed) 831 // writePlt 832 // writePltBti (BTI only) 833 // writePltPac (PAC only) 834 // writePltBtiPac (BTI and PAC) 835 // 836 // When PAC is enabled the dynamic loader encrypts the address that it places 837 // in the .got.plt using the pacia1716 instruction which encrypts the value in 838 // x17 using the modifier in x16. The static linker places autia1716 before the 839 // indirect branch to x17 to authenticate the address in x17 with the modifier 840 // in x16. This makes it more difficult for an attacker to modify the value in 841 // the .got.plt. 842 // 843 // When BTI is enabled all indirect branches must land on a bti instruction. 844 // The static linker must place a bti instruction at the start of any PLT entry 845 // that may be the target of an indirect branch. As the PLT entries call the 846 // lazy resolver indirectly this must have a bti instruction at start. In 847 // general a bti instruction is not needed for a PLT entry as indirect calls 848 // are resolved to the function address and not the PLT entry for the function. 849 // There are a small number of cases where the PLT address can escape, such as 850 // taking the address of a function or ifunc via a non got-generating 851 // relocation, and a shared library refers to that symbol. 852 // 853 // We use the bti c variant of the instruction which permits indirect branches 854 // (br) via x16/x17 and indirect function calls (blr) via any register. The ABI 855 // guarantees that all indirect branches from code requiring BTI protection 856 // will go via x16/x17 857 858 namespace { 859 class AArch64BtiPac final : public AArch64 { 860 public: 861 AArch64BtiPac(); 862 void writePltHeader(uint8_t *buf) const override; 863 void writePlt(uint8_t *buf, const Symbol &sym, 864 uint64_t pltEntryAddr) const override; 865 866 private: 867 bool btiHeader; // bti instruction needed in PLT Header and Entry 868 bool pacEntry; // autia1716 instruction needed in PLT Entry 869 }; 870 } // namespace 871 872 AArch64BtiPac::AArch64BtiPac() { 873 btiHeader = (config->andFeatures & GNU_PROPERTY_AARCH64_FEATURE_1_BTI); 874 // A BTI (Branch Target Indicator) Plt Entry is only required if the 875 // address of the PLT entry can be taken by the program, which permits an 876 // indirect jump to the PLT entry. This can happen when the address 877 // of the PLT entry for a function is canonicalised due to the address of 878 // the function in an executable being taken by a shared library, or 879 // non-preemptible ifunc referenced by non-GOT-generating, non-PLT-generating 880 // relocations. 881 // The PAC PLT entries require dynamic loader support and this isn't known 882 // from properties in the objects, so we use the command line flag. 883 pacEntry = config->zPacPlt; 884 885 if (btiHeader || pacEntry) { 886 pltEntrySize = 24; 887 ipltEntrySize = 24; 888 } 889 } 890 891 void AArch64BtiPac::writePltHeader(uint8_t *buf) const { 892 const uint8_t btiData[] = { 0x5f, 0x24, 0x03, 0xd5 }; // bti c 893 const uint8_t pltData[] = { 894 0xf0, 0x7b, 0xbf, 0xa9, // stp x16, x30, [sp,#-16]! 895 0x10, 0x00, 0x00, 0x90, // adrp x16, Page(&(.got.plt[2])) 896 0x11, 0x02, 0x40, 0xf9, // ldr x17, [x16, Offset(&(.got.plt[2]))] 897 0x10, 0x02, 0x00, 0x91, // add x16, x16, Offset(&(.got.plt[2])) 898 0x20, 0x02, 0x1f, 0xd6, // br x17 899 0x1f, 0x20, 0x03, 0xd5, // nop 900 0x1f, 0x20, 0x03, 0xd5 // nop 901 }; 902 const uint8_t nopData[] = { 0x1f, 0x20, 0x03, 0xd5 }; // nop 903 904 uint64_t got = in.gotPlt->getVA(); 905 uint64_t plt = in.plt->getVA(); 906 907 if (btiHeader) { 908 // PltHeader is called indirectly by plt[N]. Prefix pltData with a BTI C 909 // instruction. 910 memcpy(buf, btiData, sizeof(btiData)); 911 buf += sizeof(btiData); 912 plt += sizeof(btiData); 913 } 914 memcpy(buf, pltData, sizeof(pltData)); 915 916 relocateNoSym(buf + 4, R_AARCH64_ADR_PREL_PG_HI21, 917 getAArch64Page(got + 16) - getAArch64Page(plt + 8)); 918 relocateNoSym(buf + 8, R_AARCH64_LDST64_ABS_LO12_NC, got + 16); 919 relocateNoSym(buf + 12, R_AARCH64_ADD_ABS_LO12_NC, got + 16); 920 if (!btiHeader) 921 // We didn't add the BTI c instruction so round out size with NOP. 922 memcpy(buf + sizeof(pltData), nopData, sizeof(nopData)); 923 } 924 925 void AArch64BtiPac::writePlt(uint8_t *buf, const Symbol &sym, 926 uint64_t pltEntryAddr) const { 927 // The PLT entry is of the form: 928 // [btiData] addrInst (pacBr | stdBr) [nopData] 929 const uint8_t btiData[] = { 0x5f, 0x24, 0x03, 0xd5 }; // bti c 930 const uint8_t addrInst[] = { 931 0x10, 0x00, 0x00, 0x90, // adrp x16, Page(&(.got.plt[n])) 932 0x11, 0x02, 0x40, 0xf9, // ldr x17, [x16, Offset(&(.got.plt[n]))] 933 0x10, 0x02, 0x00, 0x91 // add x16, x16, Offset(&(.got.plt[n])) 934 }; 935 const uint8_t pacBr[] = { 936 0x9f, 0x21, 0x03, 0xd5, // autia1716 937 0x20, 0x02, 0x1f, 0xd6 // br x17 938 }; 939 const uint8_t stdBr[] = { 940 0x20, 0x02, 0x1f, 0xd6, // br x17 941 0x1f, 0x20, 0x03, 0xd5 // nop 942 }; 943 const uint8_t nopData[] = { 0x1f, 0x20, 0x03, 0xd5 }; // nop 944 945 // NEEDS_COPY indicates a non-ifunc canonical PLT entry whose address may 946 // escape to shared objects. isInIplt indicates a non-preemptible ifunc. Its 947 // address may escape if referenced by a direct relocation. If relative 948 // vtables are used then if the vtable is in a shared object the offsets will 949 // be to the PLT entry. The condition is conservative. 950 bool hasBti = btiHeader && 951 (sym.hasFlag(NEEDS_COPY) || sym.isInIplt || sym.thunkAccessed); 952 if (hasBti) { 953 memcpy(buf, btiData, sizeof(btiData)); 954 buf += sizeof(btiData); 955 pltEntryAddr += sizeof(btiData); 956 } 957 958 uint64_t gotPltEntryAddr = sym.getGotPltVA(); 959 memcpy(buf, addrInst, sizeof(addrInst)); 960 relocateNoSym(buf, R_AARCH64_ADR_PREL_PG_HI21, 961 getAArch64Page(gotPltEntryAddr) - getAArch64Page(pltEntryAddr)); 962 relocateNoSym(buf + 4, R_AARCH64_LDST64_ABS_LO12_NC, gotPltEntryAddr); 963 relocateNoSym(buf + 8, R_AARCH64_ADD_ABS_LO12_NC, gotPltEntryAddr); 964 965 if (pacEntry) 966 memcpy(buf + sizeof(addrInst), pacBr, sizeof(pacBr)); 967 else 968 memcpy(buf + sizeof(addrInst), stdBr, sizeof(stdBr)); 969 if (!hasBti) 970 // We didn't add the BTI c instruction so round out size with NOP. 971 memcpy(buf + sizeof(addrInst) + sizeof(stdBr), nopData, sizeof(nopData)); 972 } 973 974 static TargetInfo *getTargetInfo() { 975 if ((config->andFeatures & GNU_PROPERTY_AARCH64_FEATURE_1_BTI) || 976 config->zPacPlt) { 977 static AArch64BtiPac t; 978 return &t; 979 } 980 static AArch64 t; 981 return &t; 982 } 983 984 TargetInfo *elf::getAArch64TargetInfo() { return getTargetInfo(); } 985 986 template <class ELFT> 987 static void 988 addTaggedSymbolReferences(InputSectionBase &sec, 989 DenseMap<Symbol *, unsigned> &referenceCount) { 990 assert(sec.type == SHT_AARCH64_MEMTAG_GLOBALS_STATIC); 991 992 const RelsOrRelas<ELFT> rels = sec.relsOrRelas<ELFT>(); 993 if (rels.areRelocsRel()) 994 error("non-RELA relocations are not allowed with memtag globals"); 995 996 for (const typename ELFT::Rela &rel : rels.relas) { 997 Symbol &sym = sec.getFile<ELFT>()->getRelocTargetSym(rel); 998 // Linker-synthesized symbols such as __executable_start may be referenced 999 // as tagged in input objfiles, and we don't want them to be tagged. A 1000 // cheap way to exclude them is the type check, but their type is 1001 // STT_NOTYPE. In addition, this save us from checking untaggable symbols, 1002 // like functions or TLS symbols. 1003 if (sym.type != STT_OBJECT) 1004 continue; 1005 // STB_LOCAL symbols can't be referenced from outside the object file, and 1006 // thus don't need to be checked for references from other object files. 1007 if (sym.binding == STB_LOCAL) { 1008 sym.setIsTagged(true); 1009 continue; 1010 } 1011 ++referenceCount[&sym]; 1012 } 1013 sec.markDead(); 1014 } 1015 1016 // A tagged symbol must be denoted as being tagged by all references and the 1017 // chosen definition. For simplicity, here, it must also be denoted as tagged 1018 // for all definitions. Otherwise: 1019 // 1020 // 1. A tagged definition can be used by an untagged declaration, in which case 1021 // the untagged access may be PC-relative, causing a tag mismatch at 1022 // runtime. 1023 // 2. An untagged definition can be used by a tagged declaration, where the 1024 // compiler has taken advantage of the increased alignment of the tagged 1025 // declaration, but the alignment at runtime is wrong, causing a fault. 1026 // 1027 // Ideally, this isn't a problem, as any TU that imports or exports tagged 1028 // symbols should also be built with tagging. But, to handle these cases, we 1029 // demote the symbol to be untagged. 1030 void lld::elf::createTaggedSymbols(const SmallVector<ELFFileBase *, 0> &files) { 1031 assert(hasMemtag()); 1032 1033 // First, collect all symbols that are marked as tagged, and count how many 1034 // times they're marked as tagged. 1035 DenseMap<Symbol *, unsigned> taggedSymbolReferenceCount; 1036 for (InputFile* file : files) { 1037 if (file->kind() != InputFile::ObjKind) 1038 continue; 1039 for (InputSectionBase *section : file->getSections()) { 1040 if (!section || section->type != SHT_AARCH64_MEMTAG_GLOBALS_STATIC || 1041 section == &InputSection::discarded) 1042 continue; 1043 invokeELFT(addTaggedSymbolReferences, *section, 1044 taggedSymbolReferenceCount); 1045 } 1046 } 1047 1048 // Now, go through all the symbols. If the number of declarations + 1049 // definitions to a symbol exceeds the amount of times they're marked as 1050 // tagged, it means we have an objfile that uses the untagged variant of the 1051 // symbol. 1052 for (InputFile *file : files) { 1053 if (file->kind() != InputFile::BinaryKind && 1054 file->kind() != InputFile::ObjKind) 1055 continue; 1056 1057 for (Symbol *symbol : file->getSymbols()) { 1058 // See `addTaggedSymbolReferences` for more details. 1059 if (symbol->type != STT_OBJECT || 1060 symbol->binding == STB_LOCAL) 1061 continue; 1062 auto it = taggedSymbolReferenceCount.find(symbol); 1063 if (it == taggedSymbolReferenceCount.end()) continue; 1064 unsigned &remainingAllowedTaggedRefs = it->second; 1065 if (remainingAllowedTaggedRefs == 0) { 1066 taggedSymbolReferenceCount.erase(it); 1067 continue; 1068 } 1069 --remainingAllowedTaggedRefs; 1070 } 1071 } 1072 1073 // `addTaggedSymbolReferences` has already checked that we have RELA 1074 // relocations, the only other way to get written addends is with 1075 // --apply-dynamic-relocs. 1076 if (!taggedSymbolReferenceCount.empty() && config->writeAddends) 1077 error("--apply-dynamic-relocs cannot be used with MTE globals"); 1078 1079 // Now, `taggedSymbolReferenceCount` should only contain symbols that are 1080 // defined as tagged exactly the same amount as it's referenced, meaning all 1081 // uses are tagged. 1082 for (auto &[symbol, remainingTaggedRefs] : taggedSymbolReferenceCount) { 1083 assert(remainingTaggedRefs == 0 && 1084 "Symbol is defined as tagged more times than it's used"); 1085 symbol->setIsTagged(true); 1086 } 1087 } 1088