1//===- Unix/Memory.cpp - Generic UNIX System Configuration ------*- C++ -*-===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8// 9// This file defines some functions for various memory management utilities. 10// 11//===----------------------------------------------------------------------===// 12 13#include "Unix.h" 14#include "llvm/Config/config.h" 15#include "llvm/Support/DataTypes.h" 16#include "llvm/Support/ErrorHandling.h" 17#include "llvm/Support/Process.h" 18 19#ifdef HAVE_SYS_MMAN_H 20#include <sys/mman.h> 21#endif 22 23#ifdef __APPLE__ 24#include <mach/mach.h> 25#endif 26 27#ifdef __Fuchsia__ 28#include <zircon/syscalls.h> 29#endif 30 31#if defined(__mips__) 32# if defined(__OpenBSD__) 33# include <mips64/sysarch.h> 34# elif !defined(__FreeBSD__) 35# include <sys/cachectl.h> 36# endif 37#endif 38 39#if defined(__APPLE__) 40extern "C" void sys_icache_invalidate(const void *Addr, size_t len); 41#else 42extern "C" void __clear_cache(void *, void*); 43#endif 44 45namespace { 46 47int getPosixProtectionFlags(unsigned Flags) { 48 switch (Flags & llvm::sys::Memory::MF_RWE_MASK) { 49 case llvm::sys::Memory::MF_READ: 50 return PROT_READ; 51 case llvm::sys::Memory::MF_WRITE: 52 return PROT_WRITE; 53 case llvm::sys::Memory::MF_READ|llvm::sys::Memory::MF_WRITE: 54 return PROT_READ | PROT_WRITE; 55 case llvm::sys::Memory::MF_READ|llvm::sys::Memory::MF_EXEC: 56 return PROT_READ | PROT_EXEC; 57 case llvm::sys::Memory::MF_READ | llvm::sys::Memory::MF_WRITE | 58 llvm::sys::Memory::MF_EXEC: 59 return PROT_READ | PROT_WRITE | PROT_EXEC; 60 case llvm::sys::Memory::MF_EXEC: 61#if (defined(__FreeBSD__) || defined(__POWERPC__) || defined (__ppc__) || \ 62 defined(_POWER) || defined(_ARCH_PPC)) 63 // On PowerPC, having an executable page that has no read permission 64 // can have unintended consequences. The function InvalidateInstruction- 65 // Cache uses instructions dcbf and icbi, both of which are treated by 66 // the processor as loads. If the page has no read permissions, 67 // executing these instructions will result in a segmentation fault. 68 return PROT_READ | PROT_EXEC; 69#else 70 return PROT_EXEC; 71#endif 72 default: 73 llvm_unreachable("Illegal memory protection flag specified!"); 74 } 75 // Provide a default return value as required by some compilers. 76 return PROT_NONE; 77} 78 79} // anonymous namespace 80 81namespace llvm { 82namespace sys { 83 84MemoryBlock 85Memory::allocateMappedMemory(size_t NumBytes, 86 const MemoryBlock *const NearBlock, 87 unsigned PFlags, 88 std::error_code &EC) { 89 EC = std::error_code(); 90 if (NumBytes == 0) 91 return MemoryBlock(); 92 93 // On platforms that have it, we can use MAP_ANON to get a memory-mapped 94 // page without file backing, but we need a fallback of opening /dev/zero 95 // for strictly POSIX platforms instead. 96 int fd; 97#if defined(MAP_ANON) 98 fd = -1; 99#else 100 fd = open("/dev/zero", O_RDWR); 101 if (fd == -1) { 102 EC = std::error_code(errno, std::generic_category()); 103 return MemoryBlock(); 104 } 105#endif 106 107 int MMFlags = MAP_PRIVATE; 108#if defined(MAP_ANON) 109 MMFlags |= MAP_ANON; 110#endif 111 int Protect = getPosixProtectionFlags(PFlags); 112 113#if defined(__NetBSD__) && defined(PROT_MPROTECT) 114 Protect |= PROT_MPROTECT(PROT_READ | PROT_WRITE | PROT_EXEC); 115#endif 116 117 // Use any near hint and the page size to set a page-aligned starting address 118 uintptr_t Start = NearBlock ? reinterpret_cast<uintptr_t>(NearBlock->base()) + 119 NearBlock->allocatedSize() : 0; 120 static const size_t PageSize = Process::getPageSizeEstimate(); 121 const size_t NumPages = (NumBytes+PageSize-1)/PageSize; 122 123 if (Start && Start % PageSize) 124 Start += PageSize - Start % PageSize; 125 126 // FIXME: Handle huge page requests (MF_HUGE_HINT). 127 void *Addr = ::mmap(reinterpret_cast<void *>(Start), PageSize*NumPages, Protect, 128 MMFlags, fd, 0); 129 if (Addr == MAP_FAILED) { 130 if (NearBlock) { //Try again without a near hint 131#if !defined(MAP_ANON) 132 close(fd); 133#endif 134 return allocateMappedMemory(NumBytes, nullptr, PFlags, EC); 135 } 136 137 EC = std::error_code(errno, std::generic_category()); 138#if !defined(MAP_ANON) 139 close(fd); 140#endif 141 return MemoryBlock(); 142 } 143 144#if !defined(MAP_ANON) 145 close(fd); 146#endif 147 148 MemoryBlock Result; 149 Result.Address = Addr; 150 Result.AllocatedSize = PageSize*NumPages; 151 Result.Flags = PFlags; 152 153 // Rely on protectMappedMemory to invalidate instruction cache. 154 if (PFlags & MF_EXEC) { 155 EC = Memory::protectMappedMemory (Result, PFlags); 156 if (EC != std::error_code()) 157 return MemoryBlock(); 158 } 159 160 return Result; 161} 162 163std::error_code 164Memory::releaseMappedMemory(MemoryBlock &M) { 165 if (M.Address == nullptr || M.AllocatedSize == 0) 166 return std::error_code(); 167 168 if (0 != ::munmap(M.Address, M.AllocatedSize)) 169 return std::error_code(errno, std::generic_category()); 170 171 M.Address = nullptr; 172 M.AllocatedSize = 0; 173 174 return std::error_code(); 175} 176 177std::error_code 178Memory::protectMappedMemory(const MemoryBlock &M, unsigned Flags) { 179 static const Align PageSize = Align(Process::getPageSizeEstimate()); 180 if (M.Address == nullptr || M.AllocatedSize == 0) 181 return std::error_code(); 182 183 if (!Flags) 184 return std::error_code(EINVAL, std::generic_category()); 185 186 int Protect = getPosixProtectionFlags(Flags); 187 uintptr_t Start = alignAddr((const uint8_t *)M.Address - PageSize.value() + 1, PageSize); 188 uintptr_t End = alignAddr((const uint8_t *)M.Address + M.AllocatedSize, PageSize); 189 190 bool InvalidateCache = (Flags & MF_EXEC); 191 192#if defined(__arm__) || defined(__aarch64__) 193 // Certain ARM implementations treat icache clear instruction as a memory read, 194 // and CPU segfaults on trying to clear cache on !PROT_READ page. Therefore we need 195 // to temporarily add PROT_READ for the sake of flushing the instruction caches. 196 if (InvalidateCache && !(Protect & PROT_READ)) { 197 int Result = ::mprotect((void *)Start, End - Start, Protect | PROT_READ); 198 if (Result != 0) 199 return std::error_code(errno, std::generic_category()); 200 201 Memory::InvalidateInstructionCache(M.Address, M.AllocatedSize); 202 InvalidateCache = false; 203 } 204#endif 205 206 int Result = ::mprotect((void *)Start, End - Start, Protect); 207 208 if (Result != 0) 209 return std::error_code(errno, std::generic_category()); 210 211 if (InvalidateCache) 212 Memory::InvalidateInstructionCache(M.Address, M.AllocatedSize); 213 214 return std::error_code(); 215} 216 217/// InvalidateInstructionCache - Before the JIT can run a block of code 218/// that has been emitted it must invalidate the instruction cache on some 219/// platforms. 220void Memory::InvalidateInstructionCache(const void *Addr, 221 size_t Len) { 222 223// icache invalidation for PPC and ARM. 224#if defined(__APPLE__) 225 226# if (defined(__POWERPC__) || defined (__ppc__) || \ 227 defined(_POWER) || defined(_ARCH_PPC) || defined(__arm__) || \ 228 defined(__arm64__)) 229 sys_icache_invalidate(const_cast<void *>(Addr), Len); 230# endif 231 232#elif defined(__Fuchsia__) 233 234 zx_status_t Status = zx_cache_flush(Addr, Len, ZX_CACHE_FLUSH_INSN); 235 assert(Status == ZX_OK && "cannot invalidate instruction cache"); 236 237#else 238 239# if (defined(__POWERPC__) || defined (__ppc__) || \ 240 defined(_POWER) || defined(_ARCH_PPC)) && defined(__GNUC__) 241 const size_t LineSize = 32; 242 243 const intptr_t Mask = ~(LineSize - 1); 244 const intptr_t StartLine = ((intptr_t) Addr) & Mask; 245 const intptr_t EndLine = ((intptr_t) Addr + Len + LineSize - 1) & Mask; 246 247 for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize) 248 asm volatile("dcbf 0, %0" : : "r"(Line)); 249 asm volatile("sync"); 250 251 for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize) 252 asm volatile("icbi 0, %0" : : "r"(Line)); 253 asm volatile("isync"); 254# elif (defined(__arm__) || defined(__aarch64__) || defined(__mips__)) && \ 255 defined(__GNUC__) 256 // FIXME: Can we safely always call this for __GNUC__ everywhere? 257 const char *Start = static_cast<const char *>(Addr); 258 const char *End = Start + Len; 259 __clear_cache(const_cast<char *>(Start), const_cast<char *>(End)); 260# endif 261 262#endif // end apple 263 264 ValgrindDiscardTranslations(Addr, Len); 265} 266 267} // namespace sys 268} // namespace llvm 269