1 //===-- sanitizer_flat_map.h ------------------------------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Part of the Sanitizer Allocator. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #ifndef SANITIZER_FLAT_MAP_H 14 #define SANITIZER_FLAT_MAP_H 15 16 #include "sanitizer_atomic.h" 17 #include "sanitizer_common.h" 18 #include "sanitizer_internal_defs.h" 19 #include "sanitizer_local_address_space_view.h" 20 #include "sanitizer_mutex.h" 21 22 namespace __sanitizer { 23 24 // Call these callbacks on mmap/munmap. 25 struct NoOpMapUnmapCallback { 26 void OnMap(uptr p, uptr size) const {} 27 void OnUnmap(uptr p, uptr size) const {} 28 }; 29 30 // Maps integers in rage [0, kSize) to values. 31 template <typename T, u64 kSize, 32 typename AddressSpaceViewTy = LocalAddressSpaceView> 33 class FlatMap { 34 public: 35 using AddressSpaceView = AddressSpaceViewTy; 36 void Init() { internal_memset(map_, 0, sizeof(map_)); } 37 38 constexpr uptr size() const { return kSize; } 39 40 bool contains(uptr idx) const { 41 CHECK_LT(idx, kSize); 42 return true; 43 } 44 45 T &operator[](uptr idx) { 46 DCHECK_LT(idx, kSize); 47 return map_[idx]; 48 } 49 50 const T &operator[](uptr idx) const { 51 DCHECK_LT(idx, kSize); 52 return map_[idx]; 53 } 54 55 private: 56 T map_[kSize]; 57 }; 58 59 // TwoLevelMap maps integers in range [0, kSize1*kSize2) to values. 60 // It is implemented as a two-dimensional array: array of kSize1 pointers 61 // to kSize2-byte arrays. The secondary arrays are mmaped on demand. 62 // Each value is initially zero and can be set to something else only once. 63 // Setting and getting values from multiple threads is safe w/o extra locking. 64 template <typename T, u64 kSize1, u64 kSize2, 65 typename AddressSpaceViewTy = LocalAddressSpaceView, 66 class MapUnmapCallback = NoOpMapUnmapCallback> 67 class TwoLevelMap { 68 static_assert(IsPowerOfTwo(kSize2), "Use a power of two for performance."); 69 70 public: 71 using AddressSpaceView = AddressSpaceViewTy; 72 void Init() { 73 mu_.Init(); 74 internal_memset(map1_, 0, sizeof(map1_)); 75 } 76 77 void TestOnlyUnmap() { 78 for (uptr i = 0; i < kSize1; i++) { 79 T *p = Get(i); 80 if (!p) 81 continue; 82 MapUnmapCallback().OnUnmap(reinterpret_cast<uptr>(p), MmapSize()); 83 UnmapOrDie(p, kSize2); 84 } 85 Init(); 86 } 87 88 uptr MemoryUsage() const { 89 uptr res = 0; 90 for (uptr i = 0; i < kSize1; i++) { 91 T *p = Get(i); 92 if (!p) 93 continue; 94 res += MmapSize(); 95 } 96 return res; 97 } 98 99 constexpr uptr size() const { return kSize1 * kSize2; } 100 constexpr uptr size1() const { return kSize1; } 101 constexpr uptr size2() const { return kSize2; } 102 103 bool contains(uptr idx) const { 104 CHECK_LT(idx, kSize1 * kSize2); 105 return Get(idx / kSize2); 106 } 107 108 const T &operator[](uptr idx) const { 109 DCHECK_LT(idx, kSize1 * kSize2); 110 T *map2 = GetOrCreate(idx / kSize2); 111 return *AddressSpaceView::Load(&map2[idx % kSize2]); 112 } 113 114 T &operator[](uptr idx) { 115 DCHECK_LT(idx, kSize1 * kSize2); 116 T *map2 = GetOrCreate(idx / kSize2); 117 return *AddressSpaceView::LoadWritable(&map2[idx % kSize2]); 118 } 119 120 private: 121 constexpr uptr MmapSize() const { 122 return RoundUpTo(kSize2 * sizeof(T), GetPageSizeCached()); 123 } 124 125 T *Get(uptr idx) const { 126 DCHECK_LT(idx, kSize1); 127 return reinterpret_cast<T *>( 128 atomic_load(&map1_[idx], memory_order_acquire)); 129 } 130 131 T *GetOrCreate(uptr idx) const { 132 DCHECK_LT(idx, kSize1); 133 // This code needs to use memory_order_acquire/consume, but we use 134 // memory_order_relaxed for performance reasons (matters for arm64). We 135 // expect memory_order_relaxed to be effectively equivalent to 136 // memory_order_consume in this case for all relevant architectures: all 137 // dependent data is reachable only by dereferencing the resulting pointer. 138 // If relaxed load fails to see stored ptr, the code will fall back to 139 // Create() and reload the value again with locked mutex as a memory 140 // barrier. 141 T *res = reinterpret_cast<T *>(atomic_load_relaxed(&map1_[idx])); 142 if (LIKELY(res)) 143 return res; 144 return Create(idx); 145 } 146 147 NOINLINE T *Create(uptr idx) const { 148 SpinMutexLock l(&mu_); 149 T *res = Get(idx); 150 if (!res) { 151 res = reinterpret_cast<T *>(MmapOrDie(MmapSize(), "TwoLevelMap")); 152 MapUnmapCallback().OnMap(reinterpret_cast<uptr>(res), kSize2); 153 atomic_store(&map1_[idx], reinterpret_cast<uptr>(res), 154 memory_order_release); 155 } 156 return res; 157 } 158 159 mutable StaticSpinMutex mu_; 160 mutable atomic_uintptr_t map1_[kSize1]; 161 }; 162 163 template <u64 kSize, typename AddressSpaceViewTy = LocalAddressSpaceView> 164 using FlatByteMap = FlatMap<u8, kSize, AddressSpaceViewTy>; 165 166 template <u64 kSize1, u64 kSize2, 167 typename AddressSpaceViewTy = LocalAddressSpaceView, 168 class MapUnmapCallback = NoOpMapUnmapCallback> 169 using TwoLevelByteMap = 170 TwoLevelMap<u8, kSize1, kSize2, AddressSpaceViewTy, MapUnmapCallback>; 171 } // namespace __sanitizer 172 173 #endif 174