1 //===- SectionMemoryManager.cpp - Memory manager for MCJIT/RtDyld *- C++ -*-==// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the section-based memory manager used by the MCJIT 10 // execution engine and RuntimeDyld 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/ExecutionEngine/SectionMemoryManager.h" 15 #include "llvm/Config/config.h" 16 #include "llvm/Support/MathExtras.h" 17 #include "llvm/Support/Process.h" 18 19 namespace llvm { 20 21 uint8_t *SectionMemoryManager::allocateDataSection(uintptr_t Size, 22 unsigned Alignment, 23 unsigned SectionID, 24 StringRef SectionName, 25 bool IsReadOnly) { 26 if (IsReadOnly) 27 return allocateSection(SectionMemoryManager::AllocationPurpose::ROData, 28 Size, Alignment); 29 return allocateSection(SectionMemoryManager::AllocationPurpose::RWData, Size, 30 Alignment); 31 } 32 33 uint8_t *SectionMemoryManager::allocateCodeSection(uintptr_t Size, 34 unsigned Alignment, 35 unsigned SectionID, 36 StringRef SectionName) { 37 return allocateSection(SectionMemoryManager::AllocationPurpose::Code, Size, 38 Alignment); 39 } 40 41 uint8_t *SectionMemoryManager::allocateSection( 42 SectionMemoryManager::AllocationPurpose Purpose, uintptr_t Size, 43 unsigned Alignment) { 44 if (!Alignment) 45 Alignment = 16; 46 47 assert(!(Alignment & (Alignment - 1)) && "Alignment must be a power of two."); 48 49 uintptr_t RequiredSize = Alignment * ((Size + Alignment - 1) / Alignment + 1); 50 uintptr_t Addr = 0; 51 52 MemoryGroup &MemGroup = [&]() -> MemoryGroup & { 53 switch (Purpose) { 54 case AllocationPurpose::Code: 55 return CodeMem; 56 case AllocationPurpose::ROData: 57 return RODataMem; 58 case AllocationPurpose::RWData: 59 return RWDataMem; 60 } 61 llvm_unreachable("Unknown SectionMemoryManager::AllocationPurpose"); 62 }(); 63 64 // Look in the list of free memory regions and use a block there if one 65 // is available. 66 for (FreeMemBlock &FreeMB : MemGroup.FreeMem) { 67 if (FreeMB.Free.allocatedSize() >= RequiredSize) { 68 Addr = (uintptr_t)FreeMB.Free.base(); 69 uintptr_t EndOfBlock = Addr + FreeMB.Free.allocatedSize(); 70 // Align the address. 71 Addr = (Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1); 72 73 if (FreeMB.PendingPrefixIndex == (unsigned)-1) { 74 // The part of the block we're giving out to the user is now pending 75 MemGroup.PendingMem.push_back(sys::MemoryBlock((void *)Addr, Size)); 76 77 // Remember this pending block, such that future allocations can just 78 // modify it rather than creating a new one 79 FreeMB.PendingPrefixIndex = MemGroup.PendingMem.size() - 1; 80 } else { 81 sys::MemoryBlock &PendingMB = 82 MemGroup.PendingMem[FreeMB.PendingPrefixIndex]; 83 PendingMB = sys::MemoryBlock(PendingMB.base(), 84 Addr + Size - (uintptr_t)PendingMB.base()); 85 } 86 87 // Remember how much free space is now left in this block 88 FreeMB.Free = 89 sys::MemoryBlock((void *)(Addr + Size), EndOfBlock - Addr - Size); 90 return (uint8_t *)Addr; 91 } 92 } 93 94 // No pre-allocated free block was large enough. Allocate a new memory region. 95 // Note that all sections get allocated as read-write. The permissions will 96 // be updated later based on memory group. 97 // 98 // FIXME: It would be useful to define a default allocation size (or add 99 // it as a constructor parameter) to minimize the number of allocations. 100 // 101 // FIXME: Initialize the Near member for each memory group to avoid 102 // interleaving. 103 std::error_code ec; 104 sys::MemoryBlock MB = MMapper.allocateMappedMemory( 105 Purpose, RequiredSize, &MemGroup.Near, 106 sys::Memory::MF_READ | sys::Memory::MF_WRITE, ec); 107 if (ec) { 108 // FIXME: Add error propagation to the interface. 109 return nullptr; 110 } 111 112 // Save this address as the basis for our next request 113 MemGroup.Near = MB; 114 115 // Remember that we allocated this memory 116 MemGroup.AllocatedMem.push_back(MB); 117 Addr = (uintptr_t)MB.base(); 118 uintptr_t EndOfBlock = Addr + MB.allocatedSize(); 119 120 // Align the address. 121 Addr = (Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1); 122 123 // The part of the block we're giving out to the user is now pending 124 MemGroup.PendingMem.push_back(sys::MemoryBlock((void *)Addr, Size)); 125 126 // The allocateMappedMemory may allocate much more memory than we need. In 127 // this case, we store the unused memory as a free memory block. 128 unsigned FreeSize = EndOfBlock - Addr - Size; 129 if (FreeSize > 16) { 130 FreeMemBlock FreeMB; 131 FreeMB.Free = sys::MemoryBlock((void *)(Addr + Size), FreeSize); 132 FreeMB.PendingPrefixIndex = (unsigned)-1; 133 MemGroup.FreeMem.push_back(FreeMB); 134 } 135 136 // Return aligned address 137 return (uint8_t *)Addr; 138 } 139 140 bool SectionMemoryManager::finalizeMemory(std::string *ErrMsg) { 141 // FIXME: Should in-progress permissions be reverted if an error occurs? 142 std::error_code ec; 143 144 // Make code memory executable. 145 ec = applyMemoryGroupPermissions(CodeMem, 146 sys::Memory::MF_READ | sys::Memory::MF_EXEC); 147 if (ec) { 148 if (ErrMsg) { 149 *ErrMsg = ec.message(); 150 } 151 return true; 152 } 153 154 // Make read-only data memory read-only. 155 ec = applyMemoryGroupPermissions(RODataMem, 156 sys::Memory::MF_READ | sys::Memory::MF_EXEC); 157 if (ec) { 158 if (ErrMsg) { 159 *ErrMsg = ec.message(); 160 } 161 return true; 162 } 163 164 // Read-write data memory already has the correct permissions 165 166 // Some platforms with separate data cache and instruction cache require 167 // explicit cache flush, otherwise JIT code manipulations (like resolved 168 // relocations) will get to the data cache but not to the instruction cache. 169 invalidateInstructionCache(); 170 171 return false; 172 } 173 174 static sys::MemoryBlock trimBlockToPageSize(sys::MemoryBlock M) { 175 static const size_t PageSize = sys::Process::getPageSizeEstimate(); 176 177 size_t StartOverlap = 178 (PageSize - ((uintptr_t)M.base() % PageSize)) % PageSize; 179 180 size_t TrimmedSize = M.allocatedSize(); 181 TrimmedSize -= StartOverlap; 182 TrimmedSize -= TrimmedSize % PageSize; 183 184 sys::MemoryBlock Trimmed((void *)((uintptr_t)M.base() + StartOverlap), 185 TrimmedSize); 186 187 assert(((uintptr_t)Trimmed.base() % PageSize) == 0); 188 assert((Trimmed.allocatedSize() % PageSize) == 0); 189 assert(M.base() <= Trimmed.base() && 190 Trimmed.allocatedSize() <= M.allocatedSize()); 191 192 return Trimmed; 193 } 194 195 std::error_code 196 SectionMemoryManager::applyMemoryGroupPermissions(MemoryGroup &MemGroup, 197 unsigned Permissions) { 198 for (sys::MemoryBlock &MB : MemGroup.PendingMem) 199 if (std::error_code EC = MMapper.protectMappedMemory(MB, Permissions)) 200 return EC; 201 202 MemGroup.PendingMem.clear(); 203 204 // Now go through free blocks and trim any of them that don't span the entire 205 // page because one of the pending blocks may have overlapped it. 206 for (FreeMemBlock &FreeMB : MemGroup.FreeMem) { 207 FreeMB.Free = trimBlockToPageSize(FreeMB.Free); 208 // We cleared the PendingMem list, so all these pointers are now invalid 209 FreeMB.PendingPrefixIndex = (unsigned)-1; 210 } 211 212 // Remove all blocks which are now empty 213 MemGroup.FreeMem.erase(remove_if(MemGroup.FreeMem, 214 [](FreeMemBlock &FreeMB) { 215 return FreeMB.Free.allocatedSize() == 0; 216 }), 217 MemGroup.FreeMem.end()); 218 219 return std::error_code(); 220 } 221 222 void SectionMemoryManager::invalidateInstructionCache() { 223 for (sys::MemoryBlock &Block : CodeMem.PendingMem) 224 sys::Memory::InvalidateInstructionCache(Block.base(), 225 Block.allocatedSize()); 226 } 227 228 SectionMemoryManager::~SectionMemoryManager() { 229 for (MemoryGroup *Group : {&CodeMem, &RWDataMem, &RODataMem}) { 230 for (sys::MemoryBlock &Block : Group->AllocatedMem) 231 MMapper.releaseMappedMemory(Block); 232 } 233 } 234 235 SectionMemoryManager::MemoryMapper::~MemoryMapper() {} 236 237 void SectionMemoryManager::anchor() {} 238 239 namespace { 240 // Trivial implementation of SectionMemoryManager::MemoryMapper that just calls 241 // into sys::Memory. 242 class DefaultMMapper final : public SectionMemoryManager::MemoryMapper { 243 public: 244 sys::MemoryBlock 245 allocateMappedMemory(SectionMemoryManager::AllocationPurpose Purpose, 246 size_t NumBytes, const sys::MemoryBlock *const NearBlock, 247 unsigned Flags, std::error_code &EC) override { 248 return sys::Memory::allocateMappedMemory(NumBytes, NearBlock, Flags, EC); 249 } 250 251 std::error_code protectMappedMemory(const sys::MemoryBlock &Block, 252 unsigned Flags) override { 253 return sys::Memory::protectMappedMemory(Block, Flags); 254 } 255 256 std::error_code releaseMappedMemory(sys::MemoryBlock &M) override { 257 return sys::Memory::releaseMappedMemory(M); 258 } 259 }; 260 261 DefaultMMapper DefaultMMapperInstance; 262 } // namespace 263 264 SectionMemoryManager::SectionMemoryManager(MemoryMapper *MM) 265 : MMapper(MM ? *MM : DefaultMMapperInstance) {} 266 267 } // namespace llvm 268