xref: /freebsd/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/MachO_x86_64.cpp (revision 1323ec571215a77ddd21294f0871979d5ad6b992)
1 //===---- MachO_x86_64.cpp -JIT linker implementation for MachO/x86-64 ----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // MachO/x86-64 jit-link implementation.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/ExecutionEngine/JITLink/MachO_x86_64.h"
14 #include "llvm/ExecutionEngine/JITLink/x86_64.h"
15 
16 #include "MachOLinkGraphBuilder.h"
17 #include "PerGraphGOTAndPLTStubsBuilder.h"
18 
19 #define DEBUG_TYPE "jitlink"
20 
21 using namespace llvm;
22 using namespace llvm::jitlink;
23 
24 namespace {
25 
26 class MachOLinkGraphBuilder_x86_64 : public MachOLinkGraphBuilder {
27 public:
28   MachOLinkGraphBuilder_x86_64(const object::MachOObjectFile &Obj)
29       : MachOLinkGraphBuilder(Obj, Triple("x86_64-apple-darwin"),
30                               x86_64::getEdgeKindName) {}
31 
32 private:
33   enum MachONormalizedRelocationType : unsigned {
34     MachOBranch32,
35     MachOPointer32,
36     MachOPointer64,
37     MachOPointer64Anon,
38     MachOPCRel32,
39     MachOPCRel32Minus1,
40     MachOPCRel32Minus2,
41     MachOPCRel32Minus4,
42     MachOPCRel32Anon,
43     MachOPCRel32Minus1Anon,
44     MachOPCRel32Minus2Anon,
45     MachOPCRel32Minus4Anon,
46     MachOPCRel32GOTLoad,
47     MachOPCRel32GOT,
48     MachOPCRel32TLV,
49     MachOSubtractor32,
50     MachOSubtractor64,
51   };
52 
53   static Expected<MachONormalizedRelocationType>
54   getRelocKind(const MachO::relocation_info &RI) {
55     switch (RI.r_type) {
56     case MachO::X86_64_RELOC_UNSIGNED:
57       if (!RI.r_pcrel) {
58         if (RI.r_length == 3)
59           return RI.r_extern ? MachOPointer64 : MachOPointer64Anon;
60         else if (RI.r_extern && RI.r_length == 2)
61           return MachOPointer32;
62       }
63       break;
64     case MachO::X86_64_RELOC_SIGNED:
65       if (RI.r_pcrel && RI.r_length == 2)
66         return RI.r_extern ? MachOPCRel32 : MachOPCRel32Anon;
67       break;
68     case MachO::X86_64_RELOC_BRANCH:
69       if (RI.r_pcrel && RI.r_extern && RI.r_length == 2)
70         return MachOBranch32;
71       break;
72     case MachO::X86_64_RELOC_GOT_LOAD:
73       if (RI.r_pcrel && RI.r_extern && RI.r_length == 2)
74         return MachOPCRel32GOTLoad;
75       break;
76     case MachO::X86_64_RELOC_GOT:
77       if (RI.r_pcrel && RI.r_extern && RI.r_length == 2)
78         return MachOPCRel32GOT;
79       break;
80     case MachO::X86_64_RELOC_SUBTRACTOR:
81       if (!RI.r_pcrel && RI.r_extern) {
82         if (RI.r_length == 2)
83           return MachOSubtractor32;
84         else if (RI.r_length == 3)
85           return MachOSubtractor64;
86       }
87       break;
88     case MachO::X86_64_RELOC_SIGNED_1:
89       if (RI.r_pcrel && RI.r_length == 2)
90         return RI.r_extern ? MachOPCRel32Minus1 : MachOPCRel32Minus1Anon;
91       break;
92     case MachO::X86_64_RELOC_SIGNED_2:
93       if (RI.r_pcrel && RI.r_length == 2)
94         return RI.r_extern ? MachOPCRel32Minus2 : MachOPCRel32Minus2Anon;
95       break;
96     case MachO::X86_64_RELOC_SIGNED_4:
97       if (RI.r_pcrel && RI.r_length == 2)
98         return RI.r_extern ? MachOPCRel32Minus4 : MachOPCRel32Minus4Anon;
99       break;
100     case MachO::X86_64_RELOC_TLV:
101       if (RI.r_pcrel && RI.r_extern && RI.r_length == 2)
102         return MachOPCRel32TLV;
103       break;
104     }
105 
106     return make_error<JITLinkError>(
107         "Unsupported x86-64 relocation: address=" +
108         formatv("{0:x8}", RI.r_address) +
109         ", symbolnum=" + formatv("{0:x6}", RI.r_symbolnum) +
110         ", kind=" + formatv("{0:x1}", RI.r_type) +
111         ", pc_rel=" + (RI.r_pcrel ? "true" : "false") +
112         ", extern=" + (RI.r_extern ? "true" : "false") +
113         ", length=" + formatv("{0:d}", RI.r_length));
114   }
115 
116   using PairRelocInfo = std::tuple<Edge::Kind, Symbol *, uint64_t>;
117 
118   // Parses paired SUBTRACTOR/UNSIGNED relocations and, on success,
119   // returns the edge kind and addend to be used.
120   Expected<PairRelocInfo> parsePairRelocation(
121       Block &BlockToFix, MachONormalizedRelocationType SubtractorKind,
122       const MachO::relocation_info &SubRI, JITTargetAddress FixupAddress,
123       const char *FixupContent, object::relocation_iterator &UnsignedRelItr,
124       object::relocation_iterator &RelEnd) {
125     using namespace support;
126 
127     assert(((SubtractorKind == MachOSubtractor32 && SubRI.r_length == 2) ||
128             (SubtractorKind == MachOSubtractor64 && SubRI.r_length == 3)) &&
129            "Subtractor kind should match length");
130     assert(SubRI.r_extern && "SUBTRACTOR reloc symbol should be extern");
131     assert(!SubRI.r_pcrel && "SUBTRACTOR reloc should not be PCRel");
132 
133     if (UnsignedRelItr == RelEnd)
134       return make_error<JITLinkError>("x86_64 SUBTRACTOR without paired "
135                                       "UNSIGNED relocation");
136 
137     auto UnsignedRI = getRelocationInfo(UnsignedRelItr);
138 
139     if (SubRI.r_address != UnsignedRI.r_address)
140       return make_error<JITLinkError>("x86_64 SUBTRACTOR and paired UNSIGNED "
141                                       "point to different addresses");
142 
143     if (SubRI.r_length != UnsignedRI.r_length)
144       return make_error<JITLinkError>("length of x86_64 SUBTRACTOR and paired "
145                                       "UNSIGNED reloc must match");
146 
147     Symbol *FromSymbol;
148     if (auto FromSymbolOrErr = findSymbolByIndex(SubRI.r_symbolnum))
149       FromSymbol = FromSymbolOrErr->GraphSymbol;
150     else
151       return FromSymbolOrErr.takeError();
152 
153     // Read the current fixup value.
154     uint64_t FixupValue = 0;
155     if (SubRI.r_length == 3)
156       FixupValue = *(const little64_t *)FixupContent;
157     else
158       FixupValue = *(const little32_t *)FixupContent;
159 
160     // Find 'ToSymbol' using symbol number or address, depending on whether the
161     // paired UNSIGNED relocation is extern.
162     Symbol *ToSymbol = nullptr;
163     if (UnsignedRI.r_extern) {
164       // Find target symbol by symbol index.
165       if (auto ToSymbolOrErr = findSymbolByIndex(UnsignedRI.r_symbolnum))
166         ToSymbol = ToSymbolOrErr->GraphSymbol;
167       else
168         return ToSymbolOrErr.takeError();
169     } else {
170       auto ToSymbolSec = findSectionByIndex(UnsignedRI.r_symbolnum - 1);
171       if (!ToSymbolSec)
172         return ToSymbolSec.takeError();
173       ToSymbol = getSymbolByAddress(ToSymbolSec->Address);
174       assert(ToSymbol && "No symbol for section");
175       FixupValue -= ToSymbol->getAddress();
176     }
177 
178     Edge::Kind DeltaKind;
179     Symbol *TargetSymbol;
180     uint64_t Addend;
181     if (&BlockToFix == &FromSymbol->getAddressable()) {
182       TargetSymbol = ToSymbol;
183       DeltaKind = (SubRI.r_length == 3) ? x86_64::Delta64 : x86_64::Delta32;
184       Addend = FixupValue + (FixupAddress - FromSymbol->getAddress());
185       // FIXME: handle extern 'from'.
186     } else if (&BlockToFix == &ToSymbol->getAddressable()) {
187       TargetSymbol = FromSymbol;
188       DeltaKind =
189           (SubRI.r_length == 3) ? x86_64::NegDelta64 : x86_64::NegDelta32;
190       Addend = FixupValue - (FixupAddress - ToSymbol->getAddress());
191     } else {
192       // BlockToFix was neither FromSymbol nor ToSymbol.
193       return make_error<JITLinkError>("SUBTRACTOR relocation must fix up "
194                                       "either 'A' or 'B' (or a symbol in one "
195                                       "of their alt-entry chains)");
196     }
197 
198     return PairRelocInfo(DeltaKind, TargetSymbol, Addend);
199   }
200 
201   Error addRelocations() override {
202     using namespace support;
203     auto &Obj = getObject();
204 
205     LLVM_DEBUG(dbgs() << "Processing relocations:\n");
206 
207     for (auto &S : Obj.sections()) {
208 
209       JITTargetAddress SectionAddress = S.getAddress();
210 
211       // Skip relocations virtual sections.
212       if (S.isVirtual()) {
213         if (S.relocation_begin() != S.relocation_end())
214           return make_error<JITLinkError>("Virtual section contains "
215                                           "relocations");
216         continue;
217       }
218 
219       // Skip relocations for debug symbols.
220       {
221         auto &NSec =
222             getSectionByIndex(Obj.getSectionIndex(S.getRawDataRefImpl()));
223         if (!NSec.GraphSection) {
224           LLVM_DEBUG({
225             dbgs() << "  Skipping relocations for MachO section "
226                    << NSec.SegName << "/" << NSec.SectName
227                    << " which has no associated graph section\n";
228           });
229           continue;
230         }
231       }
232 
233       // Add relocations for section.
234       for (auto RelItr = S.relocation_begin(), RelEnd = S.relocation_end();
235            RelItr != RelEnd; ++RelItr) {
236 
237         MachO::relocation_info RI = getRelocationInfo(RelItr);
238 
239         // Find the address of the value to fix up.
240         JITTargetAddress FixupAddress = SectionAddress + (uint32_t)RI.r_address;
241 
242         LLVM_DEBUG({
243           auto &NSec =
244               getSectionByIndex(Obj.getSectionIndex(S.getRawDataRefImpl()));
245           dbgs() << "  " << NSec.SectName << " + "
246                  << formatv("{0:x8}", RI.r_address) << ":\n";
247         });
248 
249         // Find the block that the fixup points to.
250         Block *BlockToFix = nullptr;
251         {
252           auto SymbolToFixOrErr = findSymbolByAddress(FixupAddress);
253           if (!SymbolToFixOrErr)
254             return SymbolToFixOrErr.takeError();
255           BlockToFix = &SymbolToFixOrErr->getBlock();
256         }
257 
258         if (FixupAddress + static_cast<JITTargetAddress>(1ULL << RI.r_length) >
259             BlockToFix->getAddress() + BlockToFix->getContent().size())
260           return make_error<JITLinkError>(
261               "Relocation extends past end of fixup block");
262 
263         // Get a pointer to the fixup content.
264         const char *FixupContent = BlockToFix->getContent().data() +
265                                    (FixupAddress - BlockToFix->getAddress());
266 
267         size_t FixupOffset = FixupAddress - BlockToFix->getAddress();
268 
269         // The target symbol and addend will be populated by the switch below.
270         Symbol *TargetSymbol = nullptr;
271         uint64_t Addend = 0;
272 
273         // Sanity check the relocation kind.
274         auto MachORelocKind = getRelocKind(RI);
275         if (!MachORelocKind)
276           return MachORelocKind.takeError();
277 
278         Edge::Kind Kind = Edge::Invalid;
279 
280         switch (*MachORelocKind) {
281         case MachOBranch32:
282           if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
283             TargetSymbol = TargetSymbolOrErr->GraphSymbol;
284           else
285             return TargetSymbolOrErr.takeError();
286           Addend = *(const little32_t *)FixupContent;
287           Kind = x86_64::BranchPCRel32;
288           break;
289         case MachOPCRel32:
290           if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
291             TargetSymbol = TargetSymbolOrErr->GraphSymbol;
292           else
293             return TargetSymbolOrErr.takeError();
294           Addend = *(const little32_t *)FixupContent - 4;
295           Kind = x86_64::Delta32;
296           break;
297         case MachOPCRel32GOTLoad:
298           if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
299             TargetSymbol = TargetSymbolOrErr->GraphSymbol;
300           else
301             return TargetSymbolOrErr.takeError();
302           Addend = *(const little32_t *)FixupContent;
303           Kind = x86_64::RequestGOTAndTransformToPCRel32GOTLoadRelaxable;
304           if (FixupOffset < 3)
305             return make_error<JITLinkError>("GOTLD at invalid offset " +
306                                             formatv("{0}", FixupOffset));
307           break;
308         case MachOPCRel32GOT:
309           if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
310             TargetSymbol = TargetSymbolOrErr->GraphSymbol;
311           else
312             return TargetSymbolOrErr.takeError();
313           Addend = *(const little32_t *)FixupContent - 4;
314           Kind = x86_64::RequestGOTAndTransformToDelta32;
315           break;
316         case MachOPCRel32TLV:
317           if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
318             TargetSymbol = TargetSymbolOrErr->GraphSymbol;
319           else
320             return TargetSymbolOrErr.takeError();
321           Addend = *(const little32_t *)FixupContent;
322           Kind = x86_64::RequestTLVPAndTransformToPCRel32TLVPLoadRelaxable;
323           break;
324         case MachOPointer32:
325           if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
326             TargetSymbol = TargetSymbolOrErr->GraphSymbol;
327           else
328             return TargetSymbolOrErr.takeError();
329           Addend = *(const ulittle32_t *)FixupContent;
330           Kind = x86_64::Pointer32;
331           break;
332         case MachOPointer64:
333           if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
334             TargetSymbol = TargetSymbolOrErr->GraphSymbol;
335           else
336             return TargetSymbolOrErr.takeError();
337           Addend = *(const ulittle64_t *)FixupContent;
338           Kind = x86_64::Pointer64;
339           break;
340         case MachOPointer64Anon: {
341           JITTargetAddress TargetAddress = *(const ulittle64_t *)FixupContent;
342           if (auto TargetSymbolOrErr = findSymbolByAddress(TargetAddress))
343             TargetSymbol = &*TargetSymbolOrErr;
344           else
345             return TargetSymbolOrErr.takeError();
346           Addend = TargetAddress - TargetSymbol->getAddress();
347           Kind = x86_64::Pointer64;
348           break;
349         }
350         case MachOPCRel32Minus1:
351         case MachOPCRel32Minus2:
352         case MachOPCRel32Minus4:
353           if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
354             TargetSymbol = TargetSymbolOrErr->GraphSymbol;
355           else
356             return TargetSymbolOrErr.takeError();
357           Addend = *(const little32_t *)FixupContent - 4;
358           Kind = x86_64::Delta32;
359           break;
360         case MachOPCRel32Anon: {
361           JITTargetAddress TargetAddress =
362               FixupAddress + 4 + *(const little32_t *)FixupContent;
363           if (auto TargetSymbolOrErr = findSymbolByAddress(TargetAddress))
364             TargetSymbol = &*TargetSymbolOrErr;
365           else
366             return TargetSymbolOrErr.takeError();
367           Addend = TargetAddress - TargetSymbol->getAddress() - 4;
368           Kind = x86_64::Delta32;
369           break;
370         }
371         case MachOPCRel32Minus1Anon:
372         case MachOPCRel32Minus2Anon:
373         case MachOPCRel32Minus4Anon: {
374           JITTargetAddress Delta =
375               4 + static_cast<JITTargetAddress>(
376                       1ULL << (*MachORelocKind - MachOPCRel32Minus1Anon));
377           JITTargetAddress TargetAddress =
378               FixupAddress + Delta + *(const little32_t *)FixupContent;
379           if (auto TargetSymbolOrErr = findSymbolByAddress(TargetAddress))
380             TargetSymbol = &*TargetSymbolOrErr;
381           else
382             return TargetSymbolOrErr.takeError();
383           Addend = TargetAddress - TargetSymbol->getAddress() - Delta;
384           Kind = x86_64::Delta32;
385           break;
386         }
387         case MachOSubtractor32:
388         case MachOSubtractor64: {
389           // We use Delta32/Delta64 to represent SUBTRACTOR relocations.
390           // parsePairRelocation handles the paired reloc, and returns the
391           // edge kind to be used (either Delta32/Delta64, or
392           // NegDelta32/NegDelta64, depending on the direction of the
393           // subtraction) along with the addend.
394           auto PairInfo =
395               parsePairRelocation(*BlockToFix, *MachORelocKind, RI,
396                                   FixupAddress, FixupContent, ++RelItr, RelEnd);
397           if (!PairInfo)
398             return PairInfo.takeError();
399           std::tie(Kind, TargetSymbol, Addend) = *PairInfo;
400           assert(TargetSymbol && "No target symbol from parsePairRelocation?");
401           break;
402         }
403         }
404 
405         LLVM_DEBUG({
406           dbgs() << "    ";
407           Edge GE(Kind, FixupAddress - BlockToFix->getAddress(), *TargetSymbol,
408                   Addend);
409           printEdge(dbgs(), *BlockToFix, GE, x86_64::getEdgeKindName(Kind));
410           dbgs() << "\n";
411         });
412         BlockToFix->addEdge(Kind, FixupAddress - BlockToFix->getAddress(),
413                             *TargetSymbol, Addend);
414       }
415     }
416     return Error::success();
417   }
418 };
419 
420 class PerGraphGOTAndPLTStubsBuilder_MachO_x86_64
421     : public PerGraphGOTAndPLTStubsBuilder<
422           PerGraphGOTAndPLTStubsBuilder_MachO_x86_64> {
423 public:
424 
425   using PerGraphGOTAndPLTStubsBuilder<
426       PerGraphGOTAndPLTStubsBuilder_MachO_x86_64>::
427       PerGraphGOTAndPLTStubsBuilder;
428 
429   bool isGOTEdgeToFix(Edge &E) const {
430     return E.getKind() == x86_64::RequestGOTAndTransformToDelta32 ||
431            E.getKind() ==
432                x86_64::RequestGOTAndTransformToPCRel32GOTLoadRelaxable;
433   }
434 
435   Symbol &createGOTEntry(Symbol &Target) {
436     return x86_64::createAnonymousPointer(G, getGOTSection(), &Target);
437   }
438 
439   void fixGOTEdge(Edge &E, Symbol &GOTEntry) {
440     // Fix the edge kind.
441     switch (E.getKind()) {
442     case x86_64::RequestGOTAndTransformToDelta32:
443       E.setKind(x86_64::Delta32);
444       break;
445     case x86_64::RequestGOTAndTransformToPCRel32GOTLoadRelaxable:
446       E.setKind(x86_64::PCRel32GOTLoadRelaxable);
447       break;
448     default:
449       llvm_unreachable("Not a GOT transform edge");
450     }
451     // Fix the target, leave the addend as-is.
452     E.setTarget(GOTEntry);
453   }
454 
455   bool isExternalBranchEdge(Edge &E) {
456     return E.getKind() == x86_64::BranchPCRel32 && E.getTarget().isExternal();
457   }
458 
459   Symbol &createPLTStub(Symbol &Target) {
460     return x86_64::createAnonymousPointerJumpStub(G, getStubsSection(),
461                                                   getGOTEntry(Target));
462   }
463 
464   void fixPLTEdge(Edge &E, Symbol &Stub) {
465     assert(E.getKind() == x86_64::BranchPCRel32 && "Not a Branch32 edge?");
466     assert(E.getAddend() == 0 &&
467            "BranchPCRel32 edge has unexpected addend value");
468 
469     // Set the edge kind to BranchPCRel32ToPtrJumpStubRelaxable. We will use
470     // this to check for stub optimization opportunities in the
471     // optimizeMachO_x86_64_GOTAndStubs pass below.
472     E.setKind(x86_64::BranchPCRel32ToPtrJumpStubRelaxable);
473     E.setTarget(Stub);
474   }
475 
476 private:
477   Section &getGOTSection() {
478     if (!GOTSection)
479       GOTSection = &G.createSection("$__GOT", sys::Memory::MF_READ);
480     return *GOTSection;
481   }
482 
483   Section &getStubsSection() {
484     if (!StubsSection) {
485       auto StubsProt = static_cast<sys::Memory::ProtectionFlags>(
486           sys::Memory::MF_READ | sys::Memory::MF_EXEC);
487       StubsSection = &G.createSection("$__STUBS", StubsProt);
488     }
489     return *StubsSection;
490   }
491 
492   Section *GOTSection = nullptr;
493   Section *StubsSection = nullptr;
494 };
495 
496 } // namespace
497 
498 static Error optimizeMachO_x86_64_GOTAndStubs(LinkGraph &G) {
499   LLVM_DEBUG(dbgs() << "Optimizing GOT entries and stubs:\n");
500 
501   for (auto *B : G.blocks())
502     for (auto &E : B->edges())
503       if (E.getKind() == x86_64::PCRel32GOTLoadRelaxable) {
504         assert(E.getOffset() >= 3 && "GOT edge occurs too early in block");
505 
506         // Optimize GOT references.
507         auto &GOTBlock = E.getTarget().getBlock();
508         assert(GOTBlock.getSize() == G.getPointerSize() &&
509                "GOT entry block should be pointer sized");
510         assert(GOTBlock.edges_size() == 1 &&
511                "GOT entry should only have one outgoing edge");
512 
513         auto &GOTTarget = GOTBlock.edges().begin()->getTarget();
514         JITTargetAddress EdgeAddr = B->getAddress() + E.getOffset();
515         JITTargetAddress TargetAddr = GOTTarget.getAddress();
516 
517         // Check that this is a recognized MOV instruction.
518         // FIXME: Can we assume this?
519         constexpr uint8_t MOVQRIPRel[] = {0x48, 0x8b};
520         if (strncmp(B->getContent().data() + E.getOffset() - 3,
521                     reinterpret_cast<const char *>(MOVQRIPRel), 2) != 0)
522           continue;
523 
524         int64_t Displacement = TargetAddr - EdgeAddr + 4;
525         if (Displacement >= std::numeric_limits<int32_t>::min() &&
526             Displacement <= std::numeric_limits<int32_t>::max()) {
527           E.setTarget(GOTTarget);
528           E.setKind(x86_64::Delta32);
529           E.setAddend(E.getAddend() - 4);
530           char *BlockData = B->getMutableContent(G).data();
531           BlockData[E.getOffset() - 2] = (char)0x8d;
532           LLVM_DEBUG({
533             dbgs() << "  Replaced GOT load wih LEA:\n    ";
534             printEdge(dbgs(), *B, E, x86_64::getEdgeKindName(E.getKind()));
535             dbgs() << "\n";
536           });
537         }
538       } else if (E.getKind() == x86_64::BranchPCRel32ToPtrJumpStubRelaxable) {
539         auto &StubBlock = E.getTarget().getBlock();
540         assert(StubBlock.getSize() == sizeof(x86_64::PointerJumpStubContent) &&
541                "Stub block should be stub sized");
542         assert(StubBlock.edges_size() == 1 &&
543                "Stub block should only have one outgoing edge");
544 
545         auto &GOTBlock = StubBlock.edges().begin()->getTarget().getBlock();
546         assert(GOTBlock.getSize() == G.getPointerSize() &&
547                "GOT block should be pointer sized");
548         assert(GOTBlock.edges_size() == 1 &&
549                "GOT block should only have one outgoing edge");
550 
551         auto &GOTTarget = GOTBlock.edges().begin()->getTarget();
552         JITTargetAddress EdgeAddr = B->getAddress() + E.getOffset();
553         JITTargetAddress TargetAddr = GOTTarget.getAddress();
554 
555         int64_t Displacement = TargetAddr - EdgeAddr + 4;
556         if (Displacement >= std::numeric_limits<int32_t>::min() &&
557             Displacement <= std::numeric_limits<int32_t>::max()) {
558           E.setKind(x86_64::BranchPCRel32);
559           E.setTarget(GOTTarget);
560           LLVM_DEBUG({
561             dbgs() << "  Replaced stub branch with direct branch:\n    ";
562             printEdge(dbgs(), *B, E, x86_64::getEdgeKindName(E.getKind()));
563             dbgs() << "\n";
564           });
565         }
566       }
567 
568   return Error::success();
569 }
570 
571 namespace llvm {
572 namespace jitlink {
573 
574 class MachOJITLinker_x86_64 : public JITLinker<MachOJITLinker_x86_64> {
575   friend class JITLinker<MachOJITLinker_x86_64>;
576 
577 public:
578   MachOJITLinker_x86_64(std::unique_ptr<JITLinkContext> Ctx,
579                         std::unique_ptr<LinkGraph> G,
580                         PassConfiguration PassConfig)
581       : JITLinker(std::move(Ctx), std::move(G), std::move(PassConfig)) {}
582 
583 private:
584   Error applyFixup(LinkGraph &G, Block &B, const Edge &E) const {
585     return x86_64::applyFixup(G, B, E);
586   }
587 };
588 
589 Expected<std::unique_ptr<LinkGraph>>
590 createLinkGraphFromMachOObject_x86_64(MemoryBufferRef ObjectBuffer) {
591   auto MachOObj = object::ObjectFile::createMachOObjectFile(ObjectBuffer);
592   if (!MachOObj)
593     return MachOObj.takeError();
594   return MachOLinkGraphBuilder_x86_64(**MachOObj).buildGraph();
595 }
596 
597 void link_MachO_x86_64(std::unique_ptr<LinkGraph> G,
598                        std::unique_ptr<JITLinkContext> Ctx) {
599 
600   PassConfiguration Config;
601 
602   if (Ctx->shouldAddDefaultTargetPasses(G->getTargetTriple())) {
603     // Add eh-frame passses.
604     Config.PrePrunePasses.push_back(createEHFrameSplitterPass_MachO_x86_64());
605     Config.PrePrunePasses.push_back(createEHFrameEdgeFixerPass_MachO_x86_64());
606 
607     // Add a mark-live pass.
608     if (auto MarkLive = Ctx->getMarkLivePass(G->getTargetTriple()))
609       Config.PrePrunePasses.push_back(std::move(MarkLive));
610     else
611       Config.PrePrunePasses.push_back(markAllSymbolsLive);
612 
613     // Add an in-place GOT/Stubs pass.
614     Config.PostPrunePasses.push_back(
615         PerGraphGOTAndPLTStubsBuilder_MachO_x86_64::asPass);
616 
617     // Add GOT/Stubs optimizer pass.
618     Config.PreFixupPasses.push_back(optimizeMachO_x86_64_GOTAndStubs);
619   }
620 
621   if (auto Err = Ctx->modifyPassConfig(*G, Config))
622     return Ctx->notifyFailed(std::move(Err));
623 
624   // Construct a JITLinker and run the link function.
625   MachOJITLinker_x86_64::link(std::move(Ctx), std::move(G), std::move(Config));
626 }
627 
628 LinkGraphPassFunction createEHFrameSplitterPass_MachO_x86_64() {
629   return EHFrameSplitter("__TEXT,__eh_frame");
630 }
631 
632 LinkGraphPassFunction createEHFrameEdgeFixerPass_MachO_x86_64() {
633   return EHFrameEdgeFixer("__TEXT,__eh_frame", x86_64::PointerSize,
634                           x86_64::Delta64, x86_64::Delta32, x86_64::NegDelta32);
635 }
636 
637 } // end namespace jitlink
638 } // end namespace llvm
639