xref: /freebsd/contrib/llvm-project/lld/MachO/Arch/X86_64.cpp (revision 43a5ec4eb41567cc92586503212743d89686d78f)
1 //===- X86_64.cpp ---------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "InputFiles.h"
10 #include "Symbols.h"
11 #include "SyntheticSections.h"
12 #include "Target.h"
13 
14 #include "lld/Common/ErrorHandler.h"
15 #include "llvm/BinaryFormat/MachO.h"
16 #include "llvm/Support/Endian.h"
17 
18 using namespace llvm::MachO;
19 using namespace llvm::support::endian;
20 using namespace lld;
21 using namespace lld::macho;
22 
23 namespace {
24 
25 struct X86_64 : TargetInfo {
26   X86_64();
27 
28   int64_t getEmbeddedAddend(MemoryBufferRef, uint64_t offset,
29                             const relocation_info) const override;
30   void relocateOne(uint8_t *loc, const Reloc &, uint64_t va,
31                    uint64_t relocVA) const override;
32 
33   void writeStub(uint8_t *buf, const Symbol &) const override;
34   void writeStubHelperHeader(uint8_t *buf) const override;
35   void writeStubHelperEntry(uint8_t *buf, const DylibSymbol &,
36                             uint64_t entryAddr) const override;
37 
38   void relaxGotLoad(uint8_t *loc, uint8_t type) const override;
39   const RelocAttrs &getRelocAttrs(uint8_t type) const override;
40   uint64_t getPageSize() const override { return 4 * 1024; }
41 };
42 
43 } // namespace
44 
45 const RelocAttrs &X86_64::getRelocAttrs(uint8_t type) const {
46   static const std::array<RelocAttrs, 10> relocAttrsArray{{
47 #define B(x) RelocAttrBits::x
48       {"UNSIGNED",
49        B(UNSIGNED) | B(ABSOLUTE) | B(EXTERN) | B(LOCAL) | B(BYTE4) | B(BYTE8)},
50       {"SIGNED", B(PCREL) | B(EXTERN) | B(LOCAL) | B(BYTE4)},
51       {"BRANCH", B(PCREL) | B(EXTERN) | B(BRANCH) | B(BYTE4)},
52       {"GOT_LOAD", B(PCREL) | B(EXTERN) | B(GOT) | B(LOAD) | B(BYTE4)},
53       {"GOT", B(PCREL) | B(EXTERN) | B(GOT) | B(POINTER) | B(BYTE4)},
54       {"SUBTRACTOR", B(SUBTRAHEND) | B(EXTERN) | B(BYTE4) | B(BYTE8)},
55       {"SIGNED_1", B(PCREL) | B(EXTERN) | B(LOCAL) | B(BYTE4)},
56       {"SIGNED_2", B(PCREL) | B(EXTERN) | B(LOCAL) | B(BYTE4)},
57       {"SIGNED_4", B(PCREL) | B(EXTERN) | B(LOCAL) | B(BYTE4)},
58       {"TLV", B(PCREL) | B(EXTERN) | B(TLV) | B(LOAD) | B(BYTE4)},
59 #undef B
60   }};
61   assert(type < relocAttrsArray.size() && "invalid relocation type");
62   if (type >= relocAttrsArray.size())
63     return invalidRelocAttrs;
64   return relocAttrsArray[type];
65 }
66 
67 static int pcrelOffset(uint8_t type) {
68   switch (type) {
69   case X86_64_RELOC_SIGNED_1:
70     return 1;
71   case X86_64_RELOC_SIGNED_2:
72     return 2;
73   case X86_64_RELOC_SIGNED_4:
74     return 4;
75   default:
76     return 0;
77   }
78 }
79 
80 int64_t X86_64::getEmbeddedAddend(MemoryBufferRef mb, uint64_t offset,
81                                   relocation_info rel) const {
82   auto *buf = reinterpret_cast<const uint8_t *>(mb.getBufferStart());
83   const uint8_t *loc = buf + offset + rel.r_address;
84 
85   switch (rel.r_length) {
86   case 2:
87     return static_cast<int32_t>(read32le(loc)) + pcrelOffset(rel.r_type);
88   case 3:
89     return read64le(loc) + pcrelOffset(rel.r_type);
90   default:
91     llvm_unreachable("invalid r_length");
92   }
93 }
94 
95 void X86_64::relocateOne(uint8_t *loc, const Reloc &r, uint64_t value,
96                          uint64_t relocVA) const {
97   if (r.pcrel) {
98     uint64_t pc = relocVA + 4 + pcrelOffset(r.type);
99     value -= pc;
100   }
101 
102   switch (r.length) {
103   case 2:
104     if (r.type == X86_64_RELOC_UNSIGNED)
105       checkUInt(r, value, 32);
106     else
107       checkInt(r, value, 32);
108     write32le(loc, value);
109     break;
110   case 3:
111     write64le(loc, value);
112     break;
113   default:
114     llvm_unreachable("invalid r_length");
115   }
116 }
117 
118 // The following methods emit a number of assembly sequences with RIP-relative
119 // addressing. Note that RIP-relative addressing on X86-64 has the RIP pointing
120 // to the next instruction, not the current instruction, so we always have to
121 // account for the current instruction's size when calculating offsets.
122 // writeRipRelative helps with that.
123 //
124 // bufAddr:  The virtual address corresponding to buf[0].
125 // bufOff:   The offset within buf of the next instruction.
126 // destAddr: The destination address that the current instruction references.
127 static void writeRipRelative(SymbolDiagnostic d, uint8_t *buf, uint64_t bufAddr,
128                              uint64_t bufOff, uint64_t destAddr) {
129   uint64_t rip = bufAddr + bufOff;
130   checkInt(d, destAddr - rip, 32);
131   // For the instructions we care about, the RIP-relative address is always
132   // stored in the last 4 bytes of the instruction.
133   write32le(buf + bufOff - 4, destAddr - rip);
134 }
135 
136 static constexpr uint8_t stub[] = {
137     0xff, 0x25, 0, 0, 0, 0, // jmpq *__la_symbol_ptr(%rip)
138 };
139 
140 void X86_64::writeStub(uint8_t *buf, const Symbol &sym) const {
141   memcpy(buf, stub, 2); // just copy the two nonzero bytes
142   uint64_t stubAddr = in.stubs->addr + sym.stubsIndex * sizeof(stub);
143   writeRipRelative({&sym, "stub"}, buf, stubAddr, sizeof(stub),
144                    in.lazyPointers->addr + sym.stubsIndex * LP64::wordSize);
145 }
146 
147 static constexpr uint8_t stubHelperHeader[] = {
148     0x4c, 0x8d, 0x1d, 0, 0, 0, 0, // 0x0: leaq ImageLoaderCache(%rip), %r11
149     0x41, 0x53,                   // 0x7: pushq %r11
150     0xff, 0x25, 0,    0, 0, 0,    // 0x9: jmpq *dyld_stub_binder@GOT(%rip)
151     0x90,                         // 0xf: nop
152 };
153 
154 void X86_64::writeStubHelperHeader(uint8_t *buf) const {
155   memcpy(buf, stubHelperHeader, sizeof(stubHelperHeader));
156   SymbolDiagnostic d = {nullptr, "stub helper header"};
157   writeRipRelative(d, buf, in.stubHelper->addr, 7,
158                    in.imageLoaderCache->getVA());
159   writeRipRelative(d, buf, in.stubHelper->addr, 0xf,
160                    in.got->addr +
161                        in.stubHelper->stubBinder->gotIndex * LP64::wordSize);
162 }
163 
164 static constexpr uint8_t stubHelperEntry[] = {
165     0x68, 0, 0, 0, 0, // 0x0: pushq <bind offset>
166     0xe9, 0, 0, 0, 0, // 0x5: jmp <__stub_helper>
167 };
168 
169 void X86_64::writeStubHelperEntry(uint8_t *buf, const DylibSymbol &sym,
170                                   uint64_t entryAddr) const {
171   memcpy(buf, stubHelperEntry, sizeof(stubHelperEntry));
172   write32le(buf + 1, sym.lazyBindOffset);
173   writeRipRelative({&sym, "stub helper"}, buf, entryAddr,
174                    sizeof(stubHelperEntry), in.stubHelper->addr);
175 }
176 
177 void X86_64::relaxGotLoad(uint8_t *loc, uint8_t type) const {
178   // Convert MOVQ to LEAQ
179   if (loc[-2] != 0x8b)
180     error(getRelocAttrs(type).name + " reloc requires MOVQ instruction");
181   loc[-2] = 0x8d;
182 }
183 
184 X86_64::X86_64() : TargetInfo(LP64()) {
185   cpuType = CPU_TYPE_X86_64;
186   cpuSubtype = CPU_SUBTYPE_X86_64_ALL;
187 
188   stubSize = sizeof(stub);
189   stubHelperHeaderSize = sizeof(stubHelperHeader);
190   stubHelperEntrySize = sizeof(stubHelperEntry);
191 }
192 
193 TargetInfo *macho::createX86_64TargetInfo() {
194   static X86_64 t;
195   return &t;
196 }
197