xref: /freebsd/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/JITLinkMemoryManager.cpp (revision 5e801ac66d24704442eba426ed13c3effb8a34e7)
1 //===--- JITLinkMemoryManager.cpp - JITLinkMemoryManager implementation ---===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "llvm/ExecutionEngine/JITLink/JITLinkMemoryManager.h"
10 #include "llvm/ExecutionEngine/JITLink/JITLink.h"
11 #include "llvm/Support/FormatVariadic.h"
12 #include "llvm/Support/Process.h"
13 
14 #define DEBUG_TYPE "jitlink"
15 
16 using namespace llvm;
17 
18 namespace {
19 
20 // FIXME: Remove this copy of CWrapperFunctionResult as soon as JITLink can
21 // depend on shared utils from Orc.
22 
23 // Must be kept in-sync with compiler-rt/lib/orc/c-api.h.
24 union CWrapperFunctionResultDataUnion {
25   char *ValuePtr;
26   char Value[sizeof(ValuePtr)];
27 };
28 
29 // Must be kept in-sync with compiler-rt/lib/orc/c-api.h.
30 typedef struct {
31   CWrapperFunctionResultDataUnion Data;
32   size_t Size;
33 } CWrapperFunctionResult;
34 
35 Error toError(CWrapperFunctionResult R) {
36   bool HasError = false;
37   std::string ErrMsg;
38   if (R.Size) {
39     bool Large = R.Size > sizeof(CWrapperFunctionResultDataUnion);
40     char *Content = Large ? R.Data.ValuePtr : R.Data.Value;
41     if (Content[0]) {
42       HasError = true;
43       constexpr unsigned StrStart = 1 + sizeof(uint64_t);
44       ErrMsg.resize(R.Size - StrStart);
45       memcpy(&ErrMsg[0], Content + StrStart, R.Size - StrStart);
46     }
47     if (Large)
48       free(R.Data.ValuePtr);
49   } else if (R.Data.ValuePtr) {
50     HasError = true;
51     ErrMsg = R.Data.ValuePtr;
52     free(R.Data.ValuePtr);
53   }
54 
55   if (HasError)
56     return make_error<StringError>(std::move(ErrMsg), inconvertibleErrorCode());
57   return Error::success();
58 }
59 } // namespace
60 
61 namespace llvm {
62 namespace jitlink {
63 
64 JITLinkMemoryManager::~JITLinkMemoryManager() = default;
65 JITLinkMemoryManager::InFlightAlloc::~InFlightAlloc() = default;
66 
67 static Error runAllocAction(JITLinkMemoryManager::AllocActionCall &C) {
68   using WrapperFnTy = CWrapperFunctionResult (*)(const void *, size_t);
69   auto *Fn = jitTargetAddressToPointer<WrapperFnTy>(C.FnAddr);
70 
71   return toError(Fn(jitTargetAddressToPointer<const void *>(C.CtxAddr),
72                     static_cast<size_t>(C.CtxSize)));
73 }
74 
75 BasicLayout::BasicLayout(LinkGraph &G) : G(G) {
76 
77   for (auto &Sec : G.sections()) {
78     // Skip empty sections.
79     if (empty(Sec.blocks()))
80       continue;
81 
82     auto &Seg = Segments[{Sec.getMemProt(), Sec.getMemDeallocPolicy()}];
83     for (auto *B : Sec.blocks())
84       if (LLVM_LIKELY(!B->isZeroFill()))
85         Seg.ContentBlocks.push_back(B);
86       else
87         Seg.ZeroFillBlocks.push_back(B);
88   }
89 
90   // Build Segments map.
91   auto CompareBlocks = [](const Block *LHS, const Block *RHS) {
92     // Sort by section, address and size
93     if (LHS->getSection().getOrdinal() != RHS->getSection().getOrdinal())
94       return LHS->getSection().getOrdinal() < RHS->getSection().getOrdinal();
95     if (LHS->getAddress() != RHS->getAddress())
96       return LHS->getAddress() < RHS->getAddress();
97     return LHS->getSize() < RHS->getSize();
98   };
99 
100   LLVM_DEBUG(dbgs() << "Generated BasicLayout for " << G.getName() << ":\n");
101   for (auto &KV : Segments) {
102     auto &Seg = KV.second;
103 
104     llvm::sort(Seg.ContentBlocks, CompareBlocks);
105     llvm::sort(Seg.ZeroFillBlocks, CompareBlocks);
106 
107     for (auto *B : Seg.ContentBlocks) {
108       Seg.ContentSize = alignToBlock(Seg.ContentSize, *B);
109       Seg.ContentSize += B->getSize();
110       Seg.Alignment = std::max(Seg.Alignment, Align(B->getAlignment()));
111     }
112 
113     uint64_t SegEndOffset = Seg.ContentSize;
114     for (auto *B : Seg.ZeroFillBlocks) {
115       SegEndOffset = alignToBlock(SegEndOffset, *B);
116       SegEndOffset += B->getSize();
117       Seg.Alignment = std::max(Seg.Alignment, Align(B->getAlignment()));
118     }
119     Seg.ZeroFillSize = SegEndOffset - Seg.ContentSize;
120 
121     LLVM_DEBUG({
122       dbgs() << "  Seg " << KV.first
123              << ": content-size=" << formatv("{0:x}", Seg.ContentSize)
124              << ", zero-fill-size=" << formatv("{0:x}", Seg.ZeroFillSize)
125              << ", align=" << formatv("{0:x}", Seg.Alignment.value()) << "\n";
126     });
127   }
128 }
129 
130 Expected<BasicLayout::ContiguousPageBasedLayoutSizes>
131 BasicLayout::getContiguousPageBasedLayoutSizes(uint64_t PageSize) {
132   ContiguousPageBasedLayoutSizes SegsSizes;
133 
134   for (auto &KV : segments()) {
135     auto &AG = KV.first;
136     auto &Seg = KV.second;
137 
138     if (Seg.Alignment > PageSize)
139       return make_error<StringError>("Segment alignment greater than page size",
140                                      inconvertibleErrorCode());
141 
142     uint64_t SegSize = alignTo(Seg.ContentSize + Seg.ZeroFillSize, PageSize);
143     if (AG.getMemDeallocPolicy() == MemDeallocPolicy::Standard)
144       SegsSizes.StandardSegs += SegSize;
145     else
146       SegsSizes.FinalizeSegs += SegSize;
147   }
148 
149   return SegsSizes;
150 }
151 
152 Error BasicLayout::apply() {
153   for (auto &KV : Segments) {
154     auto &Seg = KV.second;
155 
156     assert(!(Seg.ContentBlocks.empty() && Seg.ZeroFillBlocks.empty()) &&
157            "Empty section recorded?");
158 
159     for (auto *B : Seg.ContentBlocks) {
160       // Align addr and working-mem-offset.
161       Seg.Addr = alignToBlock(Seg.Addr, *B);
162       Seg.NextWorkingMemOffset = alignToBlock(Seg.NextWorkingMemOffset, *B);
163 
164       // Update block addr.
165       B->setAddress(Seg.Addr);
166       Seg.Addr += B->getSize();
167 
168       // Copy content to working memory, then update content to point at working
169       // memory.
170       memcpy(Seg.WorkingMem + Seg.NextWorkingMemOffset, B->getContent().data(),
171              B->getSize());
172       B->setMutableContent(
173           {Seg.WorkingMem + Seg.NextWorkingMemOffset, B->getSize()});
174       Seg.NextWorkingMemOffset += B->getSize();
175     }
176 
177     for (auto *B : Seg.ZeroFillBlocks) {
178       // Align addr.
179       Seg.Addr = alignToBlock(Seg.Addr, *B);
180       // Update block addr.
181       B->setAddress(Seg.Addr);
182       Seg.Addr += B->getSize();
183     }
184 
185     Seg.ContentBlocks.clear();
186     Seg.ZeroFillBlocks.clear();
187   }
188 
189   return Error::success();
190 }
191 
192 JITLinkMemoryManager::AllocActions &BasicLayout::graphAllocActions() {
193   return G.allocActions();
194 }
195 
196 void SimpleSegmentAlloc::Create(JITLinkMemoryManager &MemMgr,
197                                 const JITLinkDylib *JD, SegmentMap Segments,
198                                 OnCreatedFunction OnCreated) {
199 
200   static_assert(AllocGroup::NumGroups == 16,
201                 "AllocGroup has changed. Section names below must be updated");
202   StringRef AGSectionNames[] = {
203       "__---.standard", "__R--.standard", "__-W-.standard", "__RW-.standard",
204       "__--X.standard", "__R-X.standard", "__-WX.standard", "__RWX.standard",
205       "__---.finalize", "__R--.finalize", "__-W-.finalize", "__RW-.finalize",
206       "__--X.finalize", "__R-X.finalize", "__-WX.finalize", "__RWX.finalize"};
207 
208   auto G =
209       std::make_unique<LinkGraph>("", Triple(), 0, support::native, nullptr);
210   AllocGroupSmallMap<Block *> ContentBlocks;
211 
212   JITTargetAddress NextAddr = 0x100000;
213   for (auto &KV : Segments) {
214     auto &AG = KV.first;
215     auto &Seg = KV.second;
216 
217     auto AGSectionName =
218         AGSectionNames[static_cast<unsigned>(AG.getMemProt()) |
219                        static_cast<bool>(AG.getMemDeallocPolicy()) << 3];
220 
221     auto &Sec = G->createSection(AGSectionName, AG.getMemProt());
222     Sec.setMemDeallocPolicy(AG.getMemDeallocPolicy());
223 
224     if (Seg.ContentSize != 0) {
225       NextAddr = alignTo(NextAddr, Seg.ContentAlign);
226       auto &B =
227           G->createMutableContentBlock(Sec, G->allocateBuffer(Seg.ContentSize),
228                                        NextAddr, Seg.ContentAlign.value(), 0);
229       ContentBlocks[AG] = &B;
230       NextAddr += Seg.ContentSize;
231     }
232   }
233 
234   // GRef declared separately since order-of-argument-eval isn't specified.
235   auto &GRef = *G;
236   MemMgr.allocate(JD, GRef,
237                   [G = std::move(G), ContentBlocks = std::move(ContentBlocks),
238                    OnCreated = std::move(OnCreated)](
239                       JITLinkMemoryManager::AllocResult Alloc) mutable {
240                     if (!Alloc)
241                       OnCreated(Alloc.takeError());
242                     else
243                       OnCreated(SimpleSegmentAlloc(std::move(G),
244                                                    std::move(ContentBlocks),
245                                                    std::move(*Alloc)));
246                   });
247 }
248 
249 Expected<SimpleSegmentAlloc>
250 SimpleSegmentAlloc::Create(JITLinkMemoryManager &MemMgr, const JITLinkDylib *JD,
251                            SegmentMap Segments) {
252   std::promise<MSVCPExpected<SimpleSegmentAlloc>> AllocP;
253   auto AllocF = AllocP.get_future();
254   Create(MemMgr, JD, std::move(Segments),
255          [&](Expected<SimpleSegmentAlloc> Result) {
256            AllocP.set_value(std::move(Result));
257          });
258   return AllocF.get();
259 }
260 
261 SimpleSegmentAlloc::SimpleSegmentAlloc(SimpleSegmentAlloc &&) = default;
262 SimpleSegmentAlloc &
263 SimpleSegmentAlloc::operator=(SimpleSegmentAlloc &&) = default;
264 SimpleSegmentAlloc::~SimpleSegmentAlloc() {}
265 
266 SimpleSegmentAlloc::SegmentInfo SimpleSegmentAlloc::getSegInfo(AllocGroup AG) {
267   auto I = ContentBlocks.find(AG);
268   if (I != ContentBlocks.end()) {
269     auto &B = *I->second;
270     return {B.getAddress(), B.getAlreadyMutableContent()};
271   }
272   return {};
273 }
274 
275 SimpleSegmentAlloc::SimpleSegmentAlloc(
276     std::unique_ptr<LinkGraph> G, AllocGroupSmallMap<Block *> ContentBlocks,
277     std::unique_ptr<JITLinkMemoryManager::InFlightAlloc> Alloc)
278     : G(std::move(G)), ContentBlocks(std::move(ContentBlocks)),
279       Alloc(std::move(Alloc)) {}
280 
281 class InProcessMemoryManager::IPInFlightAlloc
282     : public JITLinkMemoryManager::InFlightAlloc {
283 public:
284   IPInFlightAlloc(InProcessMemoryManager &MemMgr, LinkGraph &G, BasicLayout BL,
285                   sys::MemoryBlock StandardSegments,
286                   sys::MemoryBlock FinalizationSegments)
287       : MemMgr(MemMgr), G(G), BL(std::move(BL)),
288         StandardSegments(std::move(StandardSegments)),
289         FinalizationSegments(std::move(FinalizationSegments)) {}
290 
291   void finalize(OnFinalizedFunction OnFinalized) override {
292 
293     // Apply memory protections to all segments.
294     if (auto Err = applyProtections()) {
295       OnFinalized(std::move(Err));
296       return;
297     }
298 
299     // Run finalization actions.
300     // FIXME: Roll back previous successful actions on failure.
301     std::vector<AllocActionCall> DeallocActions;
302     DeallocActions.reserve(G.allocActions().size());
303     for (auto &ActPair : G.allocActions()) {
304       if (ActPair.Finalize.FnAddr)
305         if (auto Err = runAllocAction(ActPair.Finalize)) {
306           OnFinalized(std::move(Err));
307           return;
308         }
309       if (ActPair.Dealloc.FnAddr)
310         DeallocActions.push_back(ActPair.Dealloc);
311     }
312     G.allocActions().clear();
313 
314     // Release the finalize segments slab.
315     if (auto EC = sys::Memory::releaseMappedMemory(FinalizationSegments)) {
316       OnFinalized(errorCodeToError(EC));
317       return;
318     }
319 
320     // Continue with finalized allocation.
321     OnFinalized(MemMgr.createFinalizedAlloc(std::move(StandardSegments),
322                                             std::move(DeallocActions)));
323   }
324 
325   void abandon(OnAbandonedFunction OnAbandoned) override {
326     Error Err = Error::success();
327     if (auto EC = sys::Memory::releaseMappedMemory(FinalizationSegments))
328       Err = joinErrors(std::move(Err), errorCodeToError(EC));
329     if (auto EC = sys::Memory::releaseMappedMemory(StandardSegments))
330       Err = joinErrors(std::move(Err), errorCodeToError(EC));
331     OnAbandoned(std::move(Err));
332   }
333 
334 private:
335   Error applyProtections() {
336     for (auto &KV : BL.segments()) {
337       const auto &AG = KV.first;
338       auto &Seg = KV.second;
339 
340       auto Prot = toSysMemoryProtectionFlags(AG.getMemProt());
341 
342       uint64_t SegSize =
343           alignTo(Seg.ContentSize + Seg.ZeroFillSize, MemMgr.PageSize);
344       sys::MemoryBlock MB(Seg.WorkingMem, SegSize);
345       if (auto EC = sys::Memory::protectMappedMemory(MB, Prot))
346         return errorCodeToError(EC);
347       if (Prot & sys::Memory::MF_EXEC)
348         sys::Memory::InvalidateInstructionCache(MB.base(), MB.allocatedSize());
349     }
350     return Error::success();
351   }
352 
353   InProcessMemoryManager &MemMgr;
354   LinkGraph &G;
355   BasicLayout BL;
356   sys::MemoryBlock StandardSegments;
357   sys::MemoryBlock FinalizationSegments;
358 };
359 
360 Expected<std::unique_ptr<InProcessMemoryManager>>
361 InProcessMemoryManager::Create() {
362   if (auto PageSize = sys::Process::getPageSize())
363     return std::make_unique<InProcessMemoryManager>(*PageSize);
364   else
365     return PageSize.takeError();
366 }
367 
368 void InProcessMemoryManager::allocate(const JITLinkDylib *JD, LinkGraph &G,
369                                       OnAllocatedFunction OnAllocated) {
370 
371   // FIXME: Just check this once on startup.
372   if (!isPowerOf2_64((uint64_t)PageSize)) {
373     OnAllocated(make_error<StringError>("Page size is not a power of 2",
374                                         inconvertibleErrorCode()));
375     return;
376   }
377 
378   BasicLayout BL(G);
379 
380   /// Scan the request and calculate the group and total sizes.
381   /// Check that segment size is no larger than a page.
382   auto SegsSizes = BL.getContiguousPageBasedLayoutSizes(PageSize);
383   if (!SegsSizes) {
384     OnAllocated(SegsSizes.takeError());
385     return;
386   }
387 
388   /// Check that the total size requested (including zero fill) is not larger
389   /// than a size_t.
390   if (SegsSizes->total() > std::numeric_limits<size_t>::max()) {
391     OnAllocated(make_error<JITLinkError>(
392         "Total requested size " + formatv("{0:x}", SegsSizes->total()) +
393         " for graph " + G.getName() + " exceeds address space"));
394     return;
395   }
396 
397   // Allocate one slab for the whole thing (to make sure everything is
398   // in-range), then partition into standard and finalization blocks.
399   //
400   // FIXME: Make two separate allocations in the future to reduce
401   // fragmentation: finalization segments will usually be a single page, and
402   // standard segments are likely to be more than one page. Where multiple
403   // allocations are in-flight at once (likely) the current approach will leave
404   // a lot of single-page holes.
405   sys::MemoryBlock Slab;
406   sys::MemoryBlock StandardSegsMem;
407   sys::MemoryBlock FinalizeSegsMem;
408   {
409     const sys::Memory::ProtectionFlags ReadWrite =
410         static_cast<sys::Memory::ProtectionFlags>(sys::Memory::MF_READ |
411                                                   sys::Memory::MF_WRITE);
412 
413     std::error_code EC;
414     Slab = sys::Memory::allocateMappedMemory(SegsSizes->total(), nullptr,
415                                              ReadWrite, EC);
416 
417     if (EC) {
418       OnAllocated(errorCodeToError(EC));
419       return;
420     }
421 
422     // Zero-fill the whole slab up-front.
423     memset(Slab.base(), 0, Slab.allocatedSize());
424 
425     StandardSegsMem = {Slab.base(),
426                        static_cast<size_t>(SegsSizes->StandardSegs)};
427     FinalizeSegsMem = {(void *)((char *)Slab.base() + SegsSizes->StandardSegs),
428                        static_cast<size_t>(SegsSizes->FinalizeSegs)};
429   }
430 
431   auto NextStandardSegAddr = pointerToJITTargetAddress(StandardSegsMem.base());
432   auto NextFinalizeSegAddr = pointerToJITTargetAddress(FinalizeSegsMem.base());
433 
434   LLVM_DEBUG({
435     dbgs() << "InProcessMemoryManager allocated:\n";
436     if (SegsSizes->StandardSegs)
437       dbgs() << formatv("  [ {0:x16} -- {1:x16} ]", NextStandardSegAddr,
438                         NextStandardSegAddr + StandardSegsMem.allocatedSize())
439              << " to stardard segs\n";
440     else
441       dbgs() << "  no standard segs\n";
442     if (SegsSizes->FinalizeSegs)
443       dbgs() << formatv("  [ {0:x16} -- {1:x16} ]", NextFinalizeSegAddr,
444                         NextFinalizeSegAddr + FinalizeSegsMem.allocatedSize())
445              << " to finalize segs\n";
446     else
447       dbgs() << "  no finalize segs\n";
448   });
449 
450   // Build ProtMap, assign addresses.
451   for (auto &KV : BL.segments()) {
452     auto &AG = KV.first;
453     auto &Seg = KV.second;
454 
455     auto &SegAddr = (AG.getMemDeallocPolicy() == MemDeallocPolicy::Standard)
456                         ? NextStandardSegAddr
457                         : NextFinalizeSegAddr;
458 
459     Seg.WorkingMem = jitTargetAddressToPointer<char *>(SegAddr);
460     Seg.Addr = SegAddr;
461 
462     SegAddr += alignTo(Seg.ContentSize + Seg.ZeroFillSize, PageSize);
463   }
464 
465   if (auto Err = BL.apply()) {
466     OnAllocated(std::move(Err));
467     return;
468   }
469 
470   OnAllocated(std::make_unique<IPInFlightAlloc>(*this, G, std::move(BL),
471                                                 std::move(StandardSegsMem),
472                                                 std::move(FinalizeSegsMem)));
473 }
474 
475 void InProcessMemoryManager::deallocate(std::vector<FinalizedAlloc> Allocs,
476                                         OnDeallocatedFunction OnDeallocated) {
477   std::vector<sys::MemoryBlock> StandardSegmentsList;
478   std::vector<std::vector<AllocActionCall>> DeallocActionsList;
479 
480   {
481     std::lock_guard<std::mutex> Lock(FinalizedAllocsMutex);
482     for (auto &Alloc : Allocs) {
483       auto *FA =
484           jitTargetAddressToPointer<FinalizedAllocInfo *>(Alloc.release());
485       StandardSegmentsList.push_back(std::move(FA->StandardSegments));
486       if (!FA->DeallocActions.empty())
487         DeallocActionsList.push_back(std::move(FA->DeallocActions));
488       FA->~FinalizedAllocInfo();
489       FinalizedAllocInfos.Deallocate(FA);
490     }
491   }
492 
493   Error DeallocErr = Error::success();
494 
495   while (!DeallocActionsList.empty()) {
496     auto &DeallocActions = DeallocActionsList.back();
497     auto &StandardSegments = StandardSegmentsList.back();
498 
499     /// Run any deallocate calls.
500     while (!DeallocActions.empty()) {
501       if (auto Err = runAllocAction(DeallocActions.back()))
502         DeallocErr = joinErrors(std::move(DeallocErr), std::move(Err));
503       DeallocActions.pop_back();
504     }
505 
506     /// Release the standard segments slab.
507     if (auto EC = sys::Memory::releaseMappedMemory(StandardSegments))
508       DeallocErr = joinErrors(std::move(DeallocErr), errorCodeToError(EC));
509 
510     DeallocActionsList.pop_back();
511     StandardSegmentsList.pop_back();
512   }
513 
514   OnDeallocated(std::move(DeallocErr));
515 }
516 
517 JITLinkMemoryManager::FinalizedAlloc
518 InProcessMemoryManager::createFinalizedAlloc(
519     sys::MemoryBlock StandardSegments,
520     std::vector<AllocActionCall> DeallocActions) {
521   std::lock_guard<std::mutex> Lock(FinalizedAllocsMutex);
522   auto *FA = FinalizedAllocInfos.Allocate<FinalizedAllocInfo>();
523   new (FA) FinalizedAllocInfo(
524       {std::move(StandardSegments), std::move(DeallocActions)});
525   return FinalizedAlloc(pointerToJITTargetAddress(FA));
526 }
527 
528 } // end namespace jitlink
529 } // end namespace llvm
530