1 //===-- sanitizer_allocator_primary64.h -------------------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Part of the Sanitizer Allocator. 10 // 11 //===----------------------------------------------------------------------===// 12 #ifndef SANITIZER_ALLOCATOR_H 13 #error This file must be included inside sanitizer_allocator.h 14 #endif 15 16 template<class SizeClassAllocator> struct SizeClassAllocator64LocalCache; 17 18 // SizeClassAllocator64 -- allocator for 64-bit address space. 19 // The template parameter Params is a class containing the actual parameters. 20 // 21 // Space: a portion of address space of kSpaceSize bytes starting at SpaceBeg. 22 // If kSpaceBeg is ~0 then SpaceBeg is chosen dynamically by mmap. 23 // Otherwise SpaceBeg=kSpaceBeg (fixed address). 24 // kSpaceSize is a power of two. 25 // At the beginning the entire space is mprotect-ed, then small parts of it 26 // are mapped on demand. 27 // 28 // Region: a part of Space dedicated to a single size class. 29 // There are kNumClasses Regions of equal size. 30 // 31 // UserChunk: a piece of memory returned to user. 32 // MetaChunk: kMetadataSize bytes of metadata associated with a UserChunk. 33 34 // FreeArray is an array free-d chunks (stored as 4-byte offsets) 35 // 36 // A Region looks like this: 37 // UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1 FreeArray 38 39 struct SizeClassAllocator64FlagMasks { // Bit masks. 40 enum { 41 kRandomShuffleChunks = 1, 42 }; 43 }; 44 45 template <typename Allocator> 46 class MemoryMapper { 47 public: 48 typedef typename Allocator::CompactPtrT CompactPtrT; 49 MemoryMapper(const Allocator & allocator)50 explicit MemoryMapper(const Allocator &allocator) : allocator_(allocator) {} 51 GetAndResetStats(uptr & ranges,uptr & bytes)52 bool GetAndResetStats(uptr &ranges, uptr &bytes) { 53 ranges = released_ranges_count_; 54 released_ranges_count_ = 0; 55 bytes = released_bytes_; 56 released_bytes_ = 0; 57 return ranges != 0; 58 } 59 MapPackedCounterArrayBuffer(uptr count)60 u64 *MapPackedCounterArrayBuffer(uptr count) { 61 buffer_.clear(); 62 buffer_.resize(count); 63 return buffer_.data(); 64 } 65 66 // Releases [from, to) range of pages back to OS. ReleasePageRangeToOS(uptr class_id,CompactPtrT from,CompactPtrT to)67 void ReleasePageRangeToOS(uptr class_id, CompactPtrT from, CompactPtrT to) { 68 const uptr region_base = allocator_.GetRegionBeginBySizeClass(class_id); 69 const uptr from_page = allocator_.CompactPtrToPointer(region_base, from); 70 const uptr to_page = allocator_.CompactPtrToPointer(region_base, to); 71 ReleaseMemoryPagesToOS(from_page, to_page); 72 released_ranges_count_++; 73 released_bytes_ += to_page - from_page; 74 } 75 76 private: 77 const Allocator &allocator_; 78 uptr released_ranges_count_ = 0; 79 uptr released_bytes_ = 0; 80 InternalMmapVector<u64> buffer_; 81 }; 82 83 template <class Params> 84 class SizeClassAllocator64 { 85 public: 86 using AddressSpaceView = typename Params::AddressSpaceView; 87 static const uptr kSpaceBeg = Params::kSpaceBeg; 88 static const uptr kSpaceSize = Params::kSpaceSize; 89 static const uptr kMetadataSize = Params::kMetadataSize; 90 typedef typename Params::SizeClassMap SizeClassMap; 91 typedef typename Params::MapUnmapCallback MapUnmapCallback; 92 93 static const bool kRandomShuffleChunks = 94 Params::kFlags & SizeClassAllocator64FlagMasks::kRandomShuffleChunks; 95 96 typedef SizeClassAllocator64<Params> ThisT; 97 typedef SizeClassAllocator64LocalCache<ThisT> AllocatorCache; 98 typedef MemoryMapper<ThisT> MemoryMapperT; 99 100 // When we know the size class (the region base) we can represent a pointer 101 // as a 4-byte integer (offset from the region start shifted right by 4). 102 typedef u32 CompactPtrT; 103 static const uptr kCompactPtrScale = 4; PointerToCompactPtr(uptr base,uptr ptr)104 CompactPtrT PointerToCompactPtr(uptr base, uptr ptr) const { 105 return static_cast<CompactPtrT>((ptr - base) >> kCompactPtrScale); 106 } CompactPtrToPointer(uptr base,CompactPtrT ptr32)107 uptr CompactPtrToPointer(uptr base, CompactPtrT ptr32) const { 108 return base + (static_cast<uptr>(ptr32) << kCompactPtrScale); 109 } 110 111 // If heap_start is nonzero, assumes kSpaceSize bytes are already mapped R/W 112 // at heap_start and places the heap there. This mode requires kSpaceBeg == 113 // ~(uptr)0. 114 void Init(s32 release_to_os_interval_ms, uptr heap_start = 0) { 115 uptr TotalSpaceSize = kSpaceSize + AdditionalSize(); 116 PremappedHeap = heap_start != 0; 117 if (PremappedHeap) { 118 CHECK(!kUsingConstantSpaceBeg); 119 NonConstSpaceBeg = heap_start; 120 uptr RegionInfoSize = AdditionalSize(); 121 RegionInfoSpace = 122 address_range.Init(RegionInfoSize, PrimaryAllocatorName); 123 CHECK_NE(RegionInfoSpace, ~(uptr)0); 124 CHECK_EQ(RegionInfoSpace, 125 address_range.MapOrDie(RegionInfoSpace, RegionInfoSize, 126 "SizeClassAllocator: region info")); 127 MapUnmapCallback().OnMap(RegionInfoSpace, RegionInfoSize); 128 } else { 129 if (kUsingConstantSpaceBeg) { 130 CHECK(IsAligned(kSpaceBeg, SizeClassMap::kMaxSize)); 131 CHECK_EQ(kSpaceBeg, 132 address_range.Init(TotalSpaceSize, PrimaryAllocatorName, 133 kSpaceBeg)); 134 } else { 135 // Combined allocator expects that an 2^N allocation is always aligned 136 // to 2^N. For this to work, the start of the space needs to be aligned 137 // as high as the largest size class (which also needs to be a power of 138 // 2). 139 NonConstSpaceBeg = address_range.InitAligned( 140 TotalSpaceSize, SizeClassMap::kMaxSize, PrimaryAllocatorName); 141 CHECK_NE(NonConstSpaceBeg, ~(uptr)0); 142 } 143 RegionInfoSpace = SpaceEnd(); 144 MapWithCallbackOrDie(RegionInfoSpace, AdditionalSize(), 145 "SizeClassAllocator: region info"); 146 } 147 SetReleaseToOSIntervalMs(release_to_os_interval_ms); 148 // Check that the RegionInfo array is aligned on the CacheLine size. 149 DCHECK_EQ(RegionInfoSpace % kCacheLineSize, 0); 150 } 151 ReleaseToOSIntervalMs()152 s32 ReleaseToOSIntervalMs() const { 153 return atomic_load(&release_to_os_interval_ms_, memory_order_relaxed); 154 } 155 SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms)156 void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) { 157 atomic_store(&release_to_os_interval_ms_, release_to_os_interval_ms, 158 memory_order_relaxed); 159 } 160 ForceReleaseToOS()161 void ForceReleaseToOS() { 162 MemoryMapperT memory_mapper(*this); 163 for (uptr class_id = 1; class_id < kNumClasses; class_id++) { 164 Lock l(&GetRegionInfo(class_id)->mutex); 165 MaybeReleaseToOS(&memory_mapper, class_id, true /*force*/); 166 } 167 } 168 CanAllocate(uptr size,uptr alignment)169 static bool CanAllocate(uptr size, uptr alignment) { 170 return size <= SizeClassMap::kMaxSize && 171 alignment <= SizeClassMap::kMaxSize; 172 } 173 ReturnToAllocator(MemoryMapperT * memory_mapper,AllocatorStats * stat,uptr class_id,const CompactPtrT * chunks,uptr n_chunks)174 NOINLINE void ReturnToAllocator(MemoryMapperT *memory_mapper, 175 AllocatorStats *stat, uptr class_id, 176 const CompactPtrT *chunks, uptr n_chunks) { 177 RegionInfo *region = GetRegionInfo(class_id); 178 uptr region_beg = GetRegionBeginBySizeClass(class_id); 179 CompactPtrT *free_array = GetFreeArray(region_beg); 180 181 Lock l(®ion->mutex); 182 uptr old_num_chunks = region->num_freed_chunks; 183 uptr new_num_freed_chunks = old_num_chunks + n_chunks; 184 // Failure to allocate free array space while releasing memory is non 185 // recoverable. 186 if (UNLIKELY(!EnsureFreeArraySpace(region, region_beg, 187 new_num_freed_chunks))) { 188 Report("FATAL: Internal error: %s's allocator exhausted the free list " 189 "space for size class %zd (%zd bytes).\n", SanitizerToolName, 190 class_id, ClassIdToSize(class_id)); 191 Die(); 192 } 193 for (uptr i = 0; i < n_chunks; i++) 194 free_array[old_num_chunks + i] = chunks[i]; 195 region->num_freed_chunks = new_num_freed_chunks; 196 region->stats.n_freed += n_chunks; 197 198 MaybeReleaseToOS(memory_mapper, class_id, false /*force*/); 199 } 200 GetFromAllocator(AllocatorStats * stat,uptr class_id,CompactPtrT * chunks,uptr n_chunks)201 NOINLINE bool GetFromAllocator(AllocatorStats *stat, uptr class_id, 202 CompactPtrT *chunks, uptr n_chunks) { 203 RegionInfo *region = GetRegionInfo(class_id); 204 uptr region_beg = GetRegionBeginBySizeClass(class_id); 205 CompactPtrT *free_array = GetFreeArray(region_beg); 206 207 Lock l(®ion->mutex); 208 #if SANITIZER_WINDOWS 209 /* On Windows unmapping of memory during __sanitizer_purge_allocator is 210 explicit and immediate, so unmapped regions must be explicitly mapped back 211 in when they are accessed again. */ 212 if (region->rtoi.last_released_bytes > 0) { 213 MmapFixedOrDie(region_beg, region->mapped_user, 214 "SizeClassAllocator: region data"); 215 region->rtoi.n_freed_at_last_release = 0; 216 region->rtoi.last_released_bytes = 0; 217 } 218 #endif 219 if (UNLIKELY(region->num_freed_chunks < n_chunks)) { 220 if (UNLIKELY(!PopulateFreeArray(stat, class_id, region, 221 n_chunks - region->num_freed_chunks))) 222 return false; 223 CHECK_GE(region->num_freed_chunks, n_chunks); 224 } 225 region->num_freed_chunks -= n_chunks; 226 uptr base_idx = region->num_freed_chunks; 227 for (uptr i = 0; i < n_chunks; i++) 228 chunks[i] = free_array[base_idx + i]; 229 region->stats.n_allocated += n_chunks; 230 return true; 231 } 232 PointerIsMine(const void * p)233 bool PointerIsMine(const void *p) const { 234 uptr P = reinterpret_cast<uptr>(p); 235 if (kUsingConstantSpaceBeg && (kSpaceBeg % kSpaceSize) == 0) 236 return P / kSpaceSize == kSpaceBeg / kSpaceSize; 237 return P >= SpaceBeg() && P < SpaceEnd(); 238 } 239 GetRegionBegin(const void * p)240 uptr GetRegionBegin(const void *p) { 241 if (kUsingConstantSpaceBeg) 242 return reinterpret_cast<uptr>(p) & ~(kRegionSize - 1); 243 uptr space_beg = SpaceBeg(); 244 return ((reinterpret_cast<uptr>(p) - space_beg) & ~(kRegionSize - 1)) + 245 space_beg; 246 } 247 GetRegionBeginBySizeClass(uptr class_id)248 uptr GetRegionBeginBySizeClass(uptr class_id) const { 249 return SpaceBeg() + kRegionSize * class_id; 250 } 251 GetSizeClass(const void * p)252 uptr GetSizeClass(const void *p) { 253 if (kUsingConstantSpaceBeg && (kSpaceBeg % kSpaceSize) == 0) 254 return ((reinterpret_cast<uptr>(p)) / kRegionSize) % kNumClassesRounded; 255 return ((reinterpret_cast<uptr>(p) - SpaceBeg()) / kRegionSize) % 256 kNumClassesRounded; 257 } 258 GetBlockBegin(const void * p)259 void *GetBlockBegin(const void *p) { 260 uptr class_id = GetSizeClass(p); 261 if (class_id >= kNumClasses) return nullptr; 262 uptr size = ClassIdToSize(class_id); 263 if (!size) return nullptr; 264 uptr chunk_idx = GetChunkIdx((uptr)p, size); 265 uptr reg_beg = GetRegionBegin(p); 266 uptr beg = chunk_idx * size; 267 uptr next_beg = beg + size; 268 const RegionInfo *region = AddressSpaceView::Load(GetRegionInfo(class_id)); 269 if (region->mapped_user >= next_beg) 270 return reinterpret_cast<void*>(reg_beg + beg); 271 return nullptr; 272 } 273 GetActuallyAllocatedSize(void * p)274 uptr GetActuallyAllocatedSize(void *p) { 275 CHECK(PointerIsMine(p)); 276 return ClassIdToSize(GetSizeClass(p)); 277 } 278 ClassID(uptr size)279 static uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); } 280 GetMetaData(const void * p)281 void *GetMetaData(const void *p) { 282 CHECK(kMetadataSize); 283 uptr class_id = GetSizeClass(p); 284 uptr size = ClassIdToSize(class_id); 285 if (!size) 286 return nullptr; 287 uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), size); 288 uptr region_beg = GetRegionBeginBySizeClass(class_id); 289 return reinterpret_cast<void *>(GetMetadataEnd(region_beg) - 290 (1 + chunk_idx) * kMetadataSize); 291 } 292 TotalMemoryUsed()293 uptr TotalMemoryUsed() { 294 uptr res = 0; 295 for (uptr i = 0; i < kNumClasses; i++) 296 res += GetRegionInfo(i)->allocated_user; 297 return res; 298 } 299 300 // Test-only. TestOnlyUnmap()301 void TestOnlyUnmap() { 302 UnmapWithCallbackOrDie((uptr)address_range.base(), address_range.size()); 303 } 304 FillMemoryProfile(uptr start,uptr rss,bool file,uptr * stats)305 static void FillMemoryProfile(uptr start, uptr rss, bool file, uptr *stats) { 306 for (uptr class_id = 0; class_id < kNumClasses; class_id++) 307 if (stats[class_id] == start) 308 stats[class_id] = rss; 309 } 310 PrintStats(uptr class_id,uptr rss)311 void PrintStats(uptr class_id, uptr rss) { 312 RegionInfo *region = GetRegionInfo(class_id); 313 if (region->mapped_user == 0) return; 314 uptr in_use = region->stats.n_allocated - region->stats.n_freed; 315 uptr avail_chunks = region->allocated_user / ClassIdToSize(class_id); 316 Printf( 317 "%s %02zd (%6zd): mapped: %6zdK allocs: %7zd frees: %7zd inuse: %6zd " 318 "num_freed_chunks %7zd avail: %6zd rss: %6zdK releases: %6zd " 319 "last released: %6lldK region: %p\n", 320 region->exhausted ? "F" : " ", class_id, ClassIdToSize(class_id), 321 region->mapped_user >> 10, region->stats.n_allocated, 322 region->stats.n_freed, in_use, region->num_freed_chunks, avail_chunks, 323 rss >> 10, region->rtoi.num_releases, 324 region->rtoi.last_released_bytes >> 10, 325 (void *)(SpaceBeg() + kRegionSize * class_id)); 326 } 327 PrintStats()328 void PrintStats() { 329 uptr rss_stats[kNumClasses]; 330 for (uptr class_id = 0; class_id < kNumClasses; class_id++) 331 rss_stats[class_id] = SpaceBeg() + kRegionSize * class_id; 332 GetMemoryProfile(FillMemoryProfile, rss_stats); 333 334 uptr total_mapped = 0; 335 uptr total_rss = 0; 336 uptr n_allocated = 0; 337 uptr n_freed = 0; 338 for (uptr class_id = 1; class_id < kNumClasses; class_id++) { 339 RegionInfo *region = GetRegionInfo(class_id); 340 if (region->mapped_user != 0) { 341 total_mapped += region->mapped_user; 342 total_rss += rss_stats[class_id]; 343 } 344 n_allocated += region->stats.n_allocated; 345 n_freed += region->stats.n_freed; 346 } 347 348 Printf("Stats: SizeClassAllocator64: %zdM mapped (%zdM rss) in " 349 "%zd allocations; remains %zd\n", total_mapped >> 20, 350 total_rss >> 20, n_allocated, n_allocated - n_freed); 351 for (uptr class_id = 1; class_id < kNumClasses; class_id++) 352 PrintStats(class_id, rss_stats[class_id]); 353 } 354 355 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone 356 // introspection API. ForceLock()357 void ForceLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { 358 for (uptr i = 0; i < kNumClasses; i++) { 359 GetRegionInfo(i)->mutex.Lock(); 360 } 361 } 362 ForceUnlock()363 void ForceUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { 364 for (int i = (int)kNumClasses - 1; i >= 0; i--) { 365 GetRegionInfo(i)->mutex.Unlock(); 366 } 367 } 368 369 // Iterate over all existing chunks. 370 // The allocator must be locked when calling this function. ForEachChunk(ForEachChunkCallback callback,void * arg)371 void ForEachChunk(ForEachChunkCallback callback, void *arg) { 372 for (uptr class_id = 1; class_id < kNumClasses; class_id++) { 373 RegionInfo *region = GetRegionInfo(class_id); 374 uptr chunk_size = ClassIdToSize(class_id); 375 uptr region_beg = SpaceBeg() + class_id * kRegionSize; 376 uptr region_allocated_user_size = 377 AddressSpaceView::Load(region)->allocated_user; 378 for (uptr chunk = region_beg; 379 chunk < region_beg + region_allocated_user_size; 380 chunk += chunk_size) { 381 // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk)); 382 callback(chunk, arg); 383 } 384 } 385 } 386 ClassIdToSize(uptr class_id)387 static uptr ClassIdToSize(uptr class_id) { 388 return SizeClassMap::Size(class_id); 389 } 390 AdditionalSize()391 static uptr AdditionalSize() { 392 return RoundUpTo(sizeof(RegionInfo) * kNumClassesRounded, 393 GetPageSizeCached()); 394 } 395 396 typedef SizeClassMap SizeClassMapT; 397 static const uptr kNumClasses = SizeClassMap::kNumClasses; 398 static const uptr kNumClassesRounded = SizeClassMap::kNumClassesRounded; 399 400 // A packed array of counters. Each counter occupies 2^n bits, enough to store 401 // counter's max_value. Ctor will try to allocate the required buffer via 402 // mapper->MapPackedCounterArrayBuffer and the caller is expected to check 403 // whether the initialization was successful by checking IsAllocated() result. 404 // For the performance sake, none of the accessors check the validity of the 405 // arguments, it is assumed that index is always in [0, n) range and the value 406 // is not incremented past max_value. 407 class PackedCounterArray { 408 public: 409 template <typename MemoryMapper> PackedCounterArray(u64 num_counters,u64 max_value,MemoryMapper * mapper)410 PackedCounterArray(u64 num_counters, u64 max_value, MemoryMapper *mapper) 411 : n(num_counters) { 412 CHECK_GT(num_counters, 0); 413 CHECK_GT(max_value, 0); 414 constexpr u64 kMaxCounterBits = sizeof(*buffer) * 8ULL; 415 // Rounding counter storage size up to the power of two allows for using 416 // bit shifts calculating particular counter's index and offset. 417 uptr counter_size_bits = 418 RoundUpToPowerOfTwo(MostSignificantSetBitIndex(max_value) + 1); 419 CHECK_LE(counter_size_bits, kMaxCounterBits); 420 counter_size_bits_log = Log2(counter_size_bits); 421 counter_mask = ~0ULL >> (kMaxCounterBits - counter_size_bits); 422 423 uptr packing_ratio = kMaxCounterBits >> counter_size_bits_log; 424 CHECK_GT(packing_ratio, 0); 425 packing_ratio_log = Log2(packing_ratio); 426 bit_offset_mask = packing_ratio - 1; 427 428 buffer = mapper->MapPackedCounterArrayBuffer( 429 RoundUpTo(n, 1ULL << packing_ratio_log) >> packing_ratio_log); 430 } 431 IsAllocated()432 bool IsAllocated() const { 433 return !!buffer; 434 } 435 GetCount()436 u64 GetCount() const { 437 return n; 438 } 439 Get(uptr i)440 uptr Get(uptr i) const { 441 DCHECK_LT(i, n); 442 uptr index = i >> packing_ratio_log; 443 uptr bit_offset = (i & bit_offset_mask) << counter_size_bits_log; 444 return (buffer[index] >> bit_offset) & counter_mask; 445 } 446 Inc(uptr i)447 void Inc(uptr i) const { 448 DCHECK_LT(Get(i), counter_mask); 449 uptr index = i >> packing_ratio_log; 450 uptr bit_offset = (i & bit_offset_mask) << counter_size_bits_log; 451 buffer[index] += 1ULL << bit_offset; 452 } 453 IncRange(uptr from,uptr to)454 void IncRange(uptr from, uptr to) const { 455 DCHECK_LE(from, to); 456 for (uptr i = from; i <= to; i++) 457 Inc(i); 458 } 459 460 private: 461 const u64 n; 462 u64 counter_size_bits_log; 463 u64 counter_mask; 464 u64 packing_ratio_log; 465 u64 bit_offset_mask; 466 u64* buffer; 467 }; 468 469 template <class MemoryMapperT> 470 class FreePagesRangeTracker { 471 public: FreePagesRangeTracker(MemoryMapperT * mapper,uptr class_id)472 FreePagesRangeTracker(MemoryMapperT *mapper, uptr class_id) 473 : memory_mapper(mapper), 474 class_id(class_id), 475 page_size_scaled_log(Log2(GetPageSizeCached() >> kCompactPtrScale)) {} 476 NextPage(bool freed)477 void NextPage(bool freed) { 478 if (freed) { 479 if (!in_the_range) { 480 current_range_start_page = current_page; 481 in_the_range = true; 482 } 483 } else { 484 CloseOpenedRange(); 485 } 486 current_page++; 487 } 488 Done()489 void Done() { 490 CloseOpenedRange(); 491 } 492 493 private: CloseOpenedRange()494 void CloseOpenedRange() { 495 if (in_the_range) { 496 memory_mapper->ReleasePageRangeToOS( 497 class_id, current_range_start_page << page_size_scaled_log, 498 current_page << page_size_scaled_log); 499 in_the_range = false; 500 } 501 } 502 503 MemoryMapperT *const memory_mapper = nullptr; 504 const uptr class_id = 0; 505 const uptr page_size_scaled_log = 0; 506 bool in_the_range = false; 507 uptr current_page = 0; 508 uptr current_range_start_page = 0; 509 }; 510 511 // Iterates over the free_array to identify memory pages containing freed 512 // chunks only and returns these pages back to OS. 513 // allocated_pages_count is the total number of pages allocated for the 514 // current bucket. 515 template <typename MemoryMapper> ReleaseFreeMemoryToOS(CompactPtrT * free_array,uptr free_array_count,uptr chunk_size,uptr allocated_pages_count,MemoryMapper * memory_mapper,uptr class_id)516 static void ReleaseFreeMemoryToOS(CompactPtrT *free_array, 517 uptr free_array_count, uptr chunk_size, 518 uptr allocated_pages_count, 519 MemoryMapper *memory_mapper, 520 uptr class_id) { 521 const uptr page_size = GetPageSizeCached(); 522 523 // Figure out the number of chunks per page and whether we can take a fast 524 // path (the number of chunks per page is the same for all pages). 525 uptr full_pages_chunk_count_max; 526 bool same_chunk_count_per_page; 527 if (chunk_size <= page_size && page_size % chunk_size == 0) { 528 // Same number of chunks per page, no cross overs. 529 full_pages_chunk_count_max = page_size / chunk_size; 530 same_chunk_count_per_page = true; 531 } else if (chunk_size <= page_size && page_size % chunk_size != 0 && 532 chunk_size % (page_size % chunk_size) == 0) { 533 // Some chunks are crossing page boundaries, which means that the page 534 // contains one or two partial chunks, but all pages contain the same 535 // number of chunks. 536 full_pages_chunk_count_max = page_size / chunk_size + 1; 537 same_chunk_count_per_page = true; 538 } else if (chunk_size <= page_size) { 539 // Some chunks are crossing page boundaries, which means that the page 540 // contains one or two partial chunks. 541 full_pages_chunk_count_max = page_size / chunk_size + 2; 542 same_chunk_count_per_page = false; 543 } else if (chunk_size > page_size && chunk_size % page_size == 0) { 544 // One chunk covers multiple pages, no cross overs. 545 full_pages_chunk_count_max = 1; 546 same_chunk_count_per_page = true; 547 } else if (chunk_size > page_size) { 548 // One chunk covers multiple pages, Some chunks are crossing page 549 // boundaries. Some pages contain one chunk, some contain two. 550 full_pages_chunk_count_max = 2; 551 same_chunk_count_per_page = false; 552 } else { 553 UNREACHABLE("All chunk_size/page_size ratios must be handled."); 554 } 555 556 PackedCounterArray counters(allocated_pages_count, 557 full_pages_chunk_count_max, memory_mapper); 558 if (!counters.IsAllocated()) 559 return; 560 561 const uptr chunk_size_scaled = chunk_size >> kCompactPtrScale; 562 const uptr page_size_scaled = page_size >> kCompactPtrScale; 563 const uptr page_size_scaled_log = Log2(page_size_scaled); 564 565 // Iterate over free chunks and count how many free chunks affect each 566 // allocated page. 567 if (chunk_size <= page_size && page_size % chunk_size == 0) { 568 // Each chunk affects one page only. 569 for (uptr i = 0; i < free_array_count; i++) 570 counters.Inc(free_array[i] >> page_size_scaled_log); 571 } else { 572 // In all other cases chunks might affect more than one page. 573 for (uptr i = 0; i < free_array_count; i++) { 574 counters.IncRange( 575 free_array[i] >> page_size_scaled_log, 576 (free_array[i] + chunk_size_scaled - 1) >> page_size_scaled_log); 577 } 578 } 579 580 // Iterate over pages detecting ranges of pages with chunk counters equal 581 // to the expected number of chunks for the particular page. 582 FreePagesRangeTracker<MemoryMapper> range_tracker(memory_mapper, class_id); 583 if (same_chunk_count_per_page) { 584 // Fast path, every page has the same number of chunks affecting it. 585 for (uptr i = 0; i < counters.GetCount(); i++) 586 range_tracker.NextPage(counters.Get(i) == full_pages_chunk_count_max); 587 } else { 588 // Show path, go through the pages keeping count how many chunks affect 589 // each page. 590 const uptr pn = 591 chunk_size < page_size ? page_size_scaled / chunk_size_scaled : 1; 592 const uptr pnc = pn * chunk_size_scaled; 593 // The idea is to increment the current page pointer by the first chunk 594 // size, middle portion size (the portion of the page covered by chunks 595 // except the first and the last one) and then the last chunk size, adding 596 // up the number of chunks on the current page and checking on every step 597 // whether the page boundary was crossed. 598 uptr prev_page_boundary = 0; 599 uptr current_boundary = 0; 600 for (uptr i = 0; i < counters.GetCount(); i++) { 601 uptr page_boundary = prev_page_boundary + page_size_scaled; 602 uptr chunks_per_page = pn; 603 if (current_boundary < page_boundary) { 604 if (current_boundary > prev_page_boundary) 605 chunks_per_page++; 606 current_boundary += pnc; 607 if (current_boundary < page_boundary) { 608 chunks_per_page++; 609 current_boundary += chunk_size_scaled; 610 } 611 } 612 prev_page_boundary = page_boundary; 613 614 range_tracker.NextPage(counters.Get(i) == chunks_per_page); 615 } 616 } 617 range_tracker.Done(); 618 } 619 620 private: 621 friend class MemoryMapper<ThisT>; 622 623 ReservedAddressRange address_range; 624 625 static const uptr kRegionSize = kSpaceSize / kNumClassesRounded; 626 // FreeArray is the array of free-d chunks (stored as 4-byte offsets). 627 // In the worst case it may require kRegionSize/SizeClassMap::kMinSize 628 // elements, but in reality this will not happen. For simplicity we 629 // dedicate 1/8 of the region's virtual space to FreeArray. 630 static const uptr kFreeArraySize = kRegionSize / 8; 631 632 static const bool kUsingConstantSpaceBeg = kSpaceBeg != ~(uptr)0; 633 uptr NonConstSpaceBeg; SpaceBeg()634 uptr SpaceBeg() const { 635 return kUsingConstantSpaceBeg ? kSpaceBeg : NonConstSpaceBeg; 636 } SpaceEnd()637 uptr SpaceEnd() const { return SpaceBeg() + kSpaceSize; } 638 // kRegionSize should be able to satisfy the largest size class. 639 static_assert(kRegionSize >= SizeClassMap::kMaxSize, 640 "Region size exceed largest size"); 641 // kRegionSize must be <= 2^36, see CompactPtrT. 642 COMPILER_CHECK((kRegionSize) <= 643 (1ULL << (sizeof(CompactPtrT) * 8 + kCompactPtrScale))); 644 // Call mmap for user memory with at least this size. 645 static const uptr kUserMapSize = 1 << 18; 646 // Call mmap for metadata memory with at least this size. 647 static const uptr kMetaMapSize = 1 << 16; 648 // Call mmap for free array memory with at least this size. 649 static const uptr kFreeArrayMapSize = 1 << 18; 650 651 atomic_sint32_t release_to_os_interval_ms_; 652 653 uptr RegionInfoSpace; 654 655 // True if the user has already mapped the entire heap R/W. 656 bool PremappedHeap; 657 658 struct Stats { 659 uptr n_allocated; 660 uptr n_freed; 661 }; 662 663 struct ReleaseToOsInfo { 664 uptr n_freed_at_last_release; 665 uptr num_releases; 666 u64 last_release_at_ns; 667 u64 last_released_bytes; 668 }; 669 670 struct alignas(SANITIZER_CACHE_LINE_SIZE) RegionInfo { 671 Mutex mutex; 672 uptr num_freed_chunks; // Number of elements in the freearray. 673 uptr mapped_free_array; // Bytes mapped for freearray. 674 uptr allocated_user; // Bytes allocated for user memory. 675 uptr allocated_meta; // Bytes allocated for metadata. 676 uptr mapped_user; // Bytes mapped for user memory. 677 uptr mapped_meta; // Bytes mapped for metadata. 678 u32 rand_state; // Seed for random shuffle, used if kRandomShuffleChunks. 679 bool exhausted; // Whether region is out of space for new chunks. 680 Stats stats; 681 ReleaseToOsInfo rtoi; 682 }; 683 COMPILER_CHECK(sizeof(RegionInfo) % kCacheLineSize == 0); 684 GetRegionInfo(uptr class_id)685 RegionInfo *GetRegionInfo(uptr class_id) const { 686 DCHECK_LT(class_id, kNumClasses); 687 RegionInfo *regions = reinterpret_cast<RegionInfo *>(RegionInfoSpace); 688 return ®ions[class_id]; 689 } 690 GetMetadataEnd(uptr region_beg)691 uptr GetMetadataEnd(uptr region_beg) const { 692 return region_beg + kRegionSize - kFreeArraySize; 693 } 694 GetChunkIdx(uptr chunk,uptr size)695 uptr GetChunkIdx(uptr chunk, uptr size) const { 696 if (!kUsingConstantSpaceBeg) 697 chunk -= SpaceBeg(); 698 699 uptr offset = chunk % kRegionSize; 700 // Here we divide by a non-constant. This is costly. 701 // size always fits into 32-bits. If the offset fits too, use 32-bit div. 702 if (offset >> (SANITIZER_WORDSIZE / 2)) 703 return offset / size; 704 return (u32)offset / (u32)size; 705 } 706 GetFreeArray(uptr region_beg)707 CompactPtrT *GetFreeArray(uptr region_beg) const { 708 return reinterpret_cast<CompactPtrT *>(GetMetadataEnd(region_beg)); 709 } 710 MapWithCallback(uptr beg,uptr size,const char * name)711 bool MapWithCallback(uptr beg, uptr size, const char *name) { 712 if (PremappedHeap) 713 return beg >= NonConstSpaceBeg && 714 beg + size <= NonConstSpaceBeg + kSpaceSize; 715 uptr mapped = address_range.Map(beg, size, name); 716 if (UNLIKELY(!mapped)) 717 return false; 718 CHECK_EQ(beg, mapped); 719 MapUnmapCallback().OnMap(beg, size); 720 return true; 721 } 722 MapWithCallbackOrDie(uptr beg,uptr size,const char * name)723 void MapWithCallbackOrDie(uptr beg, uptr size, const char *name) { 724 if (PremappedHeap) { 725 CHECK_GE(beg, NonConstSpaceBeg); 726 CHECK_LE(beg + size, NonConstSpaceBeg + kSpaceSize); 727 return; 728 } 729 CHECK_EQ(beg, address_range.MapOrDie(beg, size, name)); 730 MapUnmapCallback().OnMap(beg, size); 731 } 732 UnmapWithCallbackOrDie(uptr beg,uptr size)733 void UnmapWithCallbackOrDie(uptr beg, uptr size) { 734 if (PremappedHeap) 735 return; 736 MapUnmapCallback().OnUnmap(beg, size); 737 address_range.Unmap(beg, size); 738 } 739 EnsureFreeArraySpace(RegionInfo * region,uptr region_beg,uptr num_freed_chunks)740 bool EnsureFreeArraySpace(RegionInfo *region, uptr region_beg, 741 uptr num_freed_chunks) { 742 uptr needed_space = num_freed_chunks * sizeof(CompactPtrT); 743 if (region->mapped_free_array < needed_space) { 744 uptr new_mapped_free_array = RoundUpTo(needed_space, kFreeArrayMapSize); 745 CHECK_LE(new_mapped_free_array, kFreeArraySize); 746 uptr current_map_end = reinterpret_cast<uptr>(GetFreeArray(region_beg)) + 747 region->mapped_free_array; 748 uptr new_map_size = new_mapped_free_array - region->mapped_free_array; 749 if (UNLIKELY(!MapWithCallback(current_map_end, new_map_size, 750 "SizeClassAllocator: freearray"))) 751 return false; 752 region->mapped_free_array = new_mapped_free_array; 753 } 754 return true; 755 } 756 757 // Check whether this size class is exhausted. IsRegionExhausted(RegionInfo * region,uptr class_id,uptr additional_map_size)758 bool IsRegionExhausted(RegionInfo *region, uptr class_id, 759 uptr additional_map_size) { 760 if (LIKELY(region->mapped_user + region->mapped_meta + 761 additional_map_size <= kRegionSize - kFreeArraySize)) 762 return false; 763 if (!region->exhausted) { 764 region->exhausted = true; 765 Printf("%s: Out of memory. ", SanitizerToolName); 766 Printf("The process has exhausted %zuMB for size class %zu.\n", 767 kRegionSize >> 20, ClassIdToSize(class_id)); 768 } 769 return true; 770 } 771 PopulateFreeArray(AllocatorStats * stat,uptr class_id,RegionInfo * region,uptr requested_count)772 NOINLINE bool PopulateFreeArray(AllocatorStats *stat, uptr class_id, 773 RegionInfo *region, uptr requested_count) { 774 // region->mutex is held. 775 const uptr region_beg = GetRegionBeginBySizeClass(class_id); 776 const uptr size = ClassIdToSize(class_id); 777 778 const uptr total_user_bytes = 779 region->allocated_user + requested_count * size; 780 // Map more space for chunks, if necessary. 781 if (LIKELY(total_user_bytes > region->mapped_user)) { 782 if (UNLIKELY(region->mapped_user == 0)) { 783 if (!kUsingConstantSpaceBeg && kRandomShuffleChunks) 784 // The random state is initialized from ASLR. 785 region->rand_state = static_cast<u32>(region_beg >> 12); 786 // Postpone the first release to OS attempt for ReleaseToOSIntervalMs, 787 // preventing just allocated memory from being released sooner than 788 // necessary and also preventing extraneous ReleaseMemoryPagesToOS calls 789 // for short lived processes. 790 // Do it only when the feature is turned on, to avoid a potentially 791 // extraneous syscall. 792 if (ReleaseToOSIntervalMs() >= 0) 793 region->rtoi.last_release_at_ns = MonotonicNanoTime(); 794 } 795 // Do the mmap for the user memory. 796 const uptr user_map_size = 797 RoundUpTo(total_user_bytes - region->mapped_user, kUserMapSize); 798 if (UNLIKELY(IsRegionExhausted(region, class_id, user_map_size))) 799 return false; 800 if (UNLIKELY(!MapWithCallback(region_beg + region->mapped_user, 801 user_map_size, 802 "SizeClassAllocator: region data"))) 803 return false; 804 stat->Add(AllocatorStatMapped, user_map_size); 805 region->mapped_user += user_map_size; 806 } 807 const uptr new_chunks_count = 808 (region->mapped_user - region->allocated_user) / size; 809 810 if (kMetadataSize) { 811 // Calculate the required space for metadata. 812 const uptr total_meta_bytes = 813 region->allocated_meta + new_chunks_count * kMetadataSize; 814 const uptr meta_map_size = (total_meta_bytes > region->mapped_meta) ? 815 RoundUpTo(total_meta_bytes - region->mapped_meta, kMetaMapSize) : 0; 816 // Map more space for metadata, if necessary. 817 if (meta_map_size) { 818 if (UNLIKELY(IsRegionExhausted(region, class_id, meta_map_size))) 819 return false; 820 if (UNLIKELY(!MapWithCallback( 821 GetMetadataEnd(region_beg) - region->mapped_meta - meta_map_size, 822 meta_map_size, "SizeClassAllocator: region metadata"))) 823 return false; 824 region->mapped_meta += meta_map_size; 825 } 826 } 827 828 // If necessary, allocate more space for the free array and populate it with 829 // newly allocated chunks. 830 const uptr total_freed_chunks = region->num_freed_chunks + new_chunks_count; 831 if (UNLIKELY(!EnsureFreeArraySpace(region, region_beg, total_freed_chunks))) 832 return false; 833 CompactPtrT *free_array = GetFreeArray(region_beg); 834 for (uptr i = 0, chunk = region->allocated_user; i < new_chunks_count; 835 i++, chunk += size) 836 free_array[total_freed_chunks - 1 - i] = PointerToCompactPtr(0, chunk); 837 if (kRandomShuffleChunks) 838 RandomShuffle(&free_array[region->num_freed_chunks], new_chunks_count, 839 ®ion->rand_state); 840 841 // All necessary memory is mapped and now it is safe to advance all 842 // 'allocated_*' counters. 843 region->num_freed_chunks += new_chunks_count; 844 region->allocated_user += new_chunks_count * size; 845 CHECK_LE(region->allocated_user, region->mapped_user); 846 region->allocated_meta += new_chunks_count * kMetadataSize; 847 CHECK_LE(region->allocated_meta, region->mapped_meta); 848 region->exhausted = false; 849 850 // TODO(alekseyshl): Consider bumping last_release_at_ns here to prevent 851 // MaybeReleaseToOS from releasing just allocated pages or protect these 852 // not yet used chunks some other way. 853 854 return true; 855 } 856 857 // Attempts to release RAM occupied by freed chunks back to OS. The region is 858 // expected to be locked. 859 // 860 // TODO(morehouse): Support a callback on memory release so HWASan can release 861 // aliases as well. MaybeReleaseToOS(MemoryMapperT * memory_mapper,uptr class_id,bool force)862 void MaybeReleaseToOS(MemoryMapperT *memory_mapper, uptr class_id, 863 bool force) { 864 RegionInfo *region = GetRegionInfo(class_id); 865 const uptr chunk_size = ClassIdToSize(class_id); 866 const uptr page_size = GetPageSizeCached(); 867 868 uptr n = region->num_freed_chunks; 869 if (n * chunk_size < page_size) 870 return; // No chance to release anything. 871 if ((region->stats.n_freed - 872 region->rtoi.n_freed_at_last_release) * chunk_size < page_size) { 873 return; // Nothing new to release. 874 } 875 876 if (!force) { 877 s32 interval_ms = ReleaseToOSIntervalMs(); 878 if (interval_ms < 0) 879 return; 880 881 if (region->rtoi.last_release_at_ns + interval_ms * 1000000ULL > 882 MonotonicNanoTime()) { 883 return; // Memory was returned recently. 884 } 885 } 886 887 ReleaseFreeMemoryToOS( 888 GetFreeArray(GetRegionBeginBySizeClass(class_id)), n, chunk_size, 889 RoundUpTo(region->allocated_user, page_size) / page_size, memory_mapper, 890 class_id); 891 892 uptr ranges, bytes; 893 if (memory_mapper->GetAndResetStats(ranges, bytes)) { 894 region->rtoi.n_freed_at_last_release = region->stats.n_freed; 895 region->rtoi.num_releases += ranges; 896 region->rtoi.last_released_bytes = bytes; 897 } 898 region->rtoi.last_release_at_ns = MonotonicNanoTime(); 899 } 900 }; 901