1 //===-- fuchsia.cpp ---------------------------------------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "platform.h" 10 11 #if SCUDO_FUCHSIA 12 13 #include "common.h" 14 #include "mutex.h" 15 #include "string_utils.h" 16 17 #include <lib/sync/mutex.h> // for sync_mutex_t 18 #include <stdlib.h> // for getenv() 19 #include <zircon/compiler.h> 20 #include <zircon/process.h> 21 #include <zircon/sanitizer.h> 22 #include <zircon/status.h> 23 #include <zircon/syscalls.h> 24 25 namespace scudo { 26 27 uptr getPageSize() { return _zx_system_get_page_size(); } 28 29 void NORETURN die() { __builtin_trap(); } 30 31 // We zero-initialize the Extra parameter of map(), make sure this is consistent 32 // with ZX_HANDLE_INVALID. 33 static_assert(ZX_HANDLE_INVALID == 0, ""); 34 35 static void NORETURN dieOnError(zx_status_t Status, const char *FnName, 36 uptr Size) { 37 char Error[128]; 38 formatString(Error, sizeof(Error), 39 "SCUDO ERROR: %s failed with size %zuKB (%s)", FnName, 40 Size >> 10, zx_status_get_string(Status)); 41 outputRaw(Error); 42 die(); 43 } 44 45 static void *allocateVmar(uptr Size, MapPlatformData *Data, bool AllowNoMem) { 46 // Only scenario so far. 47 DCHECK(Data); 48 DCHECK_EQ(Data->Vmar, ZX_HANDLE_INVALID); 49 50 const zx_status_t Status = _zx_vmar_allocate( 51 _zx_vmar_root_self(), 52 ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE | ZX_VM_CAN_MAP_SPECIFIC, 0, 53 Size, &Data->Vmar, &Data->VmarBase); 54 if (UNLIKELY(Status != ZX_OK)) { 55 if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem) 56 dieOnError(Status, "zx_vmar_allocate", Size); 57 return nullptr; 58 } 59 return reinterpret_cast<void *>(Data->VmarBase); 60 } 61 62 void *map(void *Addr, uptr Size, const char *Name, uptr Flags, 63 MapPlatformData *Data) { 64 DCHECK_EQ(Size % getPageSizeCached(), 0); 65 const bool AllowNoMem = !!(Flags & MAP_ALLOWNOMEM); 66 67 // For MAP_NOACCESS, just allocate a Vmar and return. 68 if (Flags & MAP_NOACCESS) 69 return allocateVmar(Size, Data, AllowNoMem); 70 71 const zx_handle_t Vmar = (Data && Data->Vmar != ZX_HANDLE_INVALID) 72 ? Data->Vmar 73 : _zx_vmar_root_self(); 74 75 zx_status_t Status; 76 zx_handle_t Vmo; 77 uint64_t VmoSize = 0; 78 if (Data && Data->Vmo != ZX_HANDLE_INVALID) { 79 // If a Vmo was specified, it's a resize operation. 80 CHECK(Addr); 81 DCHECK(Flags & MAP_RESIZABLE); 82 Vmo = Data->Vmo; 83 VmoSize = Data->VmoSize; 84 Status = _zx_vmo_set_size(Vmo, VmoSize + Size); 85 if (Status != ZX_OK) { 86 if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem) 87 dieOnError(Status, "zx_vmo_set_size", VmoSize + Size); 88 return nullptr; 89 } 90 } else { 91 // Otherwise, create a Vmo and set its name. 92 Status = _zx_vmo_create(Size, ZX_VMO_RESIZABLE, &Vmo); 93 if (UNLIKELY(Status != ZX_OK)) { 94 if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem) 95 dieOnError(Status, "zx_vmo_create", Size); 96 return nullptr; 97 } 98 _zx_object_set_property(Vmo, ZX_PROP_NAME, Name, strlen(Name)); 99 } 100 101 uintptr_t P; 102 zx_vm_option_t MapFlags = 103 ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_ALLOW_FAULTS; 104 if (Addr) 105 DCHECK(Data); 106 const uint64_t Offset = 107 Addr ? reinterpret_cast<uintptr_t>(Addr) - Data->VmarBase : 0; 108 if (Offset) 109 MapFlags |= ZX_VM_SPECIFIC; 110 Status = _zx_vmar_map(Vmar, MapFlags, Offset, Vmo, VmoSize, Size, &P); 111 if (UNLIKELY(Status != ZX_OK)) { 112 if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem) 113 dieOnError(Status, "zx_vmar_map", Size); 114 return nullptr; 115 } 116 117 if (Flags & MAP_PRECOMMIT) { 118 Status = _zx_vmar_op_range(Vmar, ZX_VMAR_OP_COMMIT, P, Size, 119 /*buffer=*/nullptr, /*buffer_size=*/0); 120 } 121 122 // No need to track the Vmo if we don't intend on resizing it. Close it. 123 if (Flags & MAP_RESIZABLE) { 124 DCHECK(Data); 125 if (Data->Vmo == ZX_HANDLE_INVALID) 126 Data->Vmo = Vmo; 127 else 128 DCHECK_EQ(Data->Vmo, Vmo); 129 } else { 130 CHECK_EQ(_zx_handle_close(Vmo), ZX_OK); 131 } 132 if (UNLIKELY(Status != ZX_OK)) { 133 if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem) 134 dieOnError(Status, "zx_vmar_op_range", Size); 135 return nullptr; 136 } 137 138 if (Data) 139 Data->VmoSize += Size; 140 141 return reinterpret_cast<void *>(P); 142 } 143 144 void unmap(void *Addr, uptr Size, uptr Flags, MapPlatformData *Data) { 145 if (Flags & UNMAP_ALL) { 146 DCHECK_NE(Data, nullptr); 147 const zx_handle_t Vmar = Data->Vmar; 148 DCHECK_NE(Vmar, _zx_vmar_root_self()); 149 // Destroying the vmar effectively unmaps the whole mapping. 150 CHECK_EQ(_zx_vmar_destroy(Vmar), ZX_OK); 151 CHECK_EQ(_zx_handle_close(Vmar), ZX_OK); 152 } else { 153 const zx_handle_t Vmar = (Data && Data->Vmar != ZX_HANDLE_INVALID) 154 ? Data->Vmar 155 : _zx_vmar_root_self(); 156 const zx_status_t Status = 157 _zx_vmar_unmap(Vmar, reinterpret_cast<uintptr_t>(Addr), Size); 158 if (UNLIKELY(Status != ZX_OK)) 159 dieOnError(Status, "zx_vmar_unmap", Size); 160 } 161 if (Data) { 162 if (Data->Vmo != ZX_HANDLE_INVALID) 163 CHECK_EQ(_zx_handle_close(Data->Vmo), ZX_OK); 164 memset(Data, 0, sizeof(*Data)); 165 } 166 } 167 168 void setMemoryPermission(UNUSED uptr Addr, UNUSED uptr Size, UNUSED uptr Flags, 169 UNUSED MapPlatformData *Data) { 170 const zx_vm_option_t Prot = 171 (Flags & MAP_NOACCESS) ? 0 : (ZX_VM_PERM_READ | ZX_VM_PERM_WRITE); 172 DCHECK(Data); 173 DCHECK_NE(Data->Vmar, ZX_HANDLE_INVALID); 174 const zx_status_t Status = _zx_vmar_protect(Data->Vmar, Prot, Addr, Size); 175 if (Status != ZX_OK) 176 dieOnError(Status, "zx_vmar_protect", Size); 177 } 178 179 void releasePagesToOS(UNUSED uptr BaseAddress, uptr Offset, uptr Size, 180 MapPlatformData *Data) { 181 // TODO: DCHECK the BaseAddress is consistent with the data in 182 // MapPlatformData. 183 DCHECK(Data); 184 DCHECK_NE(Data->Vmar, ZX_HANDLE_INVALID); 185 DCHECK_NE(Data->Vmo, ZX_HANDLE_INVALID); 186 const zx_status_t Status = 187 _zx_vmo_op_range(Data->Vmo, ZX_VMO_OP_DECOMMIT, Offset, Size, NULL, 0); 188 CHECK_EQ(Status, ZX_OK); 189 } 190 191 const char *getEnv(const char *Name) { return getenv(Name); } 192 193 // Note: we need to flag these methods with __TA_NO_THREAD_SAFETY_ANALYSIS 194 // because the Fuchsia implementation of sync_mutex_t has clang thread safety 195 // annotations. Were we to apply proper capability annotations to the top level 196 // HybridMutex class itself, they would not be needed. As it stands, the 197 // thread analysis thinks that we are locking the mutex and accidentally leaving 198 // it locked on the way out. 199 bool HybridMutex::tryLock() __TA_NO_THREAD_SAFETY_ANALYSIS { 200 // Size and alignment must be compatible between both types. 201 return sync_mutex_trylock(&M) == ZX_OK; 202 } 203 204 void HybridMutex::lockSlow() __TA_NO_THREAD_SAFETY_ANALYSIS { 205 sync_mutex_lock(&M); 206 } 207 208 void HybridMutex::unlock() __TA_NO_THREAD_SAFETY_ANALYSIS { 209 sync_mutex_unlock(&M); 210 } 211 212 void HybridMutex::assertHeldImpl() __TA_NO_THREAD_SAFETY_ANALYSIS {} 213 214 u64 getMonotonicTime() { return _zx_clock_get_monotonic(); } 215 u64 getMonotonicTimeFast() { return _zx_clock_get_monotonic(); } 216 217 u32 getNumberOfCPUs() { return _zx_system_get_num_cpus(); } 218 219 u32 getThreadID() { return 0; } 220 221 bool getRandom(void *Buffer, uptr Length, UNUSED bool Blocking) { 222 static_assert(MaxRandomLength <= ZX_CPRNG_DRAW_MAX_LEN, ""); 223 if (UNLIKELY(!Buffer || !Length || Length > MaxRandomLength)) 224 return false; 225 _zx_cprng_draw(Buffer, Length); 226 return true; 227 } 228 229 void outputRaw(const char *Buffer) { 230 __sanitizer_log_write(Buffer, strlen(Buffer)); 231 } 232 233 void setAbortMessage(const char *Message) {} 234 235 } // namespace scudo 236 237 #endif // SCUDO_FUCHSIA 238