1 //===-- primary32.h ---------------------------------------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #ifndef SCUDO_PRIMARY32_H_ 10 #define SCUDO_PRIMARY32_H_ 11 12 #include "bytemap.h" 13 #include "common.h" 14 #include "list.h" 15 #include "local_cache.h" 16 #include "options.h" 17 #include "release.h" 18 #include "report.h" 19 #include "stats.h" 20 #include "string_utils.h" 21 22 namespace scudo { 23 24 // SizeClassAllocator32 is an allocator for 32 or 64-bit address space. 25 // 26 // It maps Regions of 2^RegionSizeLog bytes aligned on a 2^RegionSizeLog bytes 27 // boundary, and keeps a bytemap of the mappable address space to track the size 28 // class they are associated with. 29 // 30 // Mapped regions are split into equally sized Blocks according to the size 31 // class they belong to, and the associated pointers are shuffled to prevent any 32 // predictable address pattern (the predictability increases with the block 33 // size). 34 // 35 // Regions for size class 0 are special and used to hold TransferBatches, which 36 // allow to transfer arrays of pointers from the global size class freelist to 37 // the thread specific freelist for said class, and back. 38 // 39 // Memory used by this allocator is never unmapped but can be partially 40 // reclaimed if the platform allows for it. 41 42 template <typename Config> class SizeClassAllocator32 { 43 public: 44 typedef typename Config::PrimaryCompactPtrT CompactPtrT; 45 typedef typename Config::SizeClassMap SizeClassMap; 46 // The bytemap can only track UINT8_MAX - 1 classes. 47 static_assert(SizeClassMap::LargestClassId <= (UINT8_MAX - 1), ""); 48 // Regions should be large enough to hold the largest Block. 49 static_assert((1UL << Config::PrimaryRegionSizeLog) >= SizeClassMap::MaxSize, 50 ""); 51 typedef SizeClassAllocator32<Config> ThisT; 52 typedef SizeClassAllocatorLocalCache<ThisT> CacheT; 53 typedef typename CacheT::TransferBatch TransferBatch; 54 55 static uptr getSizeByClassId(uptr ClassId) { 56 return (ClassId == SizeClassMap::BatchClassId) 57 ? sizeof(TransferBatch) 58 : SizeClassMap::getSizeByClassId(ClassId); 59 } 60 61 static bool canAllocate(uptr Size) { return Size <= SizeClassMap::MaxSize; } 62 63 void init(s32 ReleaseToOsInterval) { 64 if (SCUDO_FUCHSIA) 65 reportError("SizeClassAllocator32 is not supported on Fuchsia"); 66 67 if (SCUDO_TRUSTY) 68 reportError("SizeClassAllocator32 is not supported on Trusty"); 69 70 DCHECK(isAligned(reinterpret_cast<uptr>(this), alignof(ThisT))); 71 PossibleRegions.init(); 72 u32 Seed; 73 const u64 Time = getMonotonicTime(); 74 if (!getRandom(reinterpret_cast<void *>(&Seed), sizeof(Seed))) 75 Seed = static_cast<u32>( 76 Time ^ (reinterpret_cast<uptr>(SizeClassInfoArray) >> 6)); 77 for (uptr I = 0; I < NumClasses; I++) { 78 SizeClassInfo *Sci = getSizeClassInfo(I); 79 Sci->RandState = getRandomU32(&Seed); 80 // Sci->MaxRegionIndex is already initialized to 0. 81 Sci->MinRegionIndex = NumRegions; 82 Sci->ReleaseInfo.LastReleaseAtNs = Time; 83 } 84 setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval)); 85 } 86 87 void unmapTestOnly() { 88 while (NumberOfStashedRegions > 0) 89 unmap(reinterpret_cast<void *>(RegionsStash[--NumberOfStashedRegions]), 90 RegionSize); 91 uptr MinRegionIndex = NumRegions, MaxRegionIndex = 0; 92 for (uptr I = 0; I < NumClasses; I++) { 93 SizeClassInfo *Sci = getSizeClassInfo(I); 94 if (Sci->MinRegionIndex < MinRegionIndex) 95 MinRegionIndex = Sci->MinRegionIndex; 96 if (Sci->MaxRegionIndex > MaxRegionIndex) 97 MaxRegionIndex = Sci->MaxRegionIndex; 98 *Sci = {}; 99 } 100 for (uptr I = MinRegionIndex; I < MaxRegionIndex; I++) 101 if (PossibleRegions[I]) 102 unmap(reinterpret_cast<void *>(I * RegionSize), RegionSize); 103 PossibleRegions.unmapTestOnly(); 104 } 105 106 CompactPtrT compactPtr(UNUSED uptr ClassId, uptr Ptr) const { 107 return static_cast<CompactPtrT>(Ptr); 108 } 109 110 void *decompactPtr(UNUSED uptr ClassId, CompactPtrT CompactPtr) const { 111 return reinterpret_cast<void *>(static_cast<uptr>(CompactPtr)); 112 } 113 114 TransferBatch *popBatch(CacheT *C, uptr ClassId) { 115 DCHECK_LT(ClassId, NumClasses); 116 SizeClassInfo *Sci = getSizeClassInfo(ClassId); 117 ScopedLock L(Sci->Mutex); 118 TransferBatch *B = Sci->FreeList.front(); 119 if (B) { 120 Sci->FreeList.pop_front(); 121 } else { 122 B = populateFreeList(C, ClassId, Sci); 123 if (UNLIKELY(!B)) 124 return nullptr; 125 } 126 DCHECK_GT(B->getCount(), 0); 127 Sci->Stats.PoppedBlocks += B->getCount(); 128 return B; 129 } 130 131 void pushBatch(uptr ClassId, TransferBatch *B) { 132 DCHECK_LT(ClassId, NumClasses); 133 DCHECK_GT(B->getCount(), 0); 134 SizeClassInfo *Sci = getSizeClassInfo(ClassId); 135 ScopedLock L(Sci->Mutex); 136 Sci->FreeList.push_front(B); 137 Sci->Stats.PushedBlocks += B->getCount(); 138 if (ClassId != SizeClassMap::BatchClassId) 139 releaseToOSMaybe(Sci, ClassId); 140 } 141 142 void disable() { 143 // The BatchClassId must be locked last since other classes can use it. 144 for (sptr I = static_cast<sptr>(NumClasses) - 1; I >= 0; I--) { 145 if (static_cast<uptr>(I) == SizeClassMap::BatchClassId) 146 continue; 147 getSizeClassInfo(static_cast<uptr>(I))->Mutex.lock(); 148 } 149 getSizeClassInfo(SizeClassMap::BatchClassId)->Mutex.lock(); 150 RegionsStashMutex.lock(); 151 PossibleRegions.disable(); 152 } 153 154 void enable() { 155 PossibleRegions.enable(); 156 RegionsStashMutex.unlock(); 157 getSizeClassInfo(SizeClassMap::BatchClassId)->Mutex.unlock(); 158 for (uptr I = 0; I < NumClasses; I++) { 159 if (I == SizeClassMap::BatchClassId) 160 continue; 161 getSizeClassInfo(I)->Mutex.unlock(); 162 } 163 } 164 165 template <typename F> void iterateOverBlocks(F Callback) { 166 uptr MinRegionIndex = NumRegions, MaxRegionIndex = 0; 167 for (uptr I = 0; I < NumClasses; I++) { 168 SizeClassInfo *Sci = getSizeClassInfo(I); 169 if (Sci->MinRegionIndex < MinRegionIndex) 170 MinRegionIndex = Sci->MinRegionIndex; 171 if (Sci->MaxRegionIndex > MaxRegionIndex) 172 MaxRegionIndex = Sci->MaxRegionIndex; 173 } 174 for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++) 175 if (PossibleRegions[I] && 176 (PossibleRegions[I] - 1U) != SizeClassMap::BatchClassId) { 177 const uptr BlockSize = getSizeByClassId(PossibleRegions[I] - 1U); 178 const uptr From = I * RegionSize; 179 const uptr To = From + (RegionSize / BlockSize) * BlockSize; 180 for (uptr Block = From; Block < To; Block += BlockSize) 181 Callback(Block); 182 } 183 } 184 185 void getStats(ScopedString *Str) { 186 // TODO(kostyak): get the RSS per region. 187 uptr TotalMapped = 0; 188 uptr PoppedBlocks = 0; 189 uptr PushedBlocks = 0; 190 for (uptr I = 0; I < NumClasses; I++) { 191 SizeClassInfo *Sci = getSizeClassInfo(I); 192 TotalMapped += Sci->AllocatedUser; 193 PoppedBlocks += Sci->Stats.PoppedBlocks; 194 PushedBlocks += Sci->Stats.PushedBlocks; 195 } 196 Str->append("Stats: SizeClassAllocator32: %zuM mapped in %zu allocations; " 197 "remains %zu\n", 198 TotalMapped >> 20, PoppedBlocks, PoppedBlocks - PushedBlocks); 199 for (uptr I = 0; I < NumClasses; I++) 200 getStats(Str, I, 0); 201 } 202 203 bool setOption(Option O, sptr Value) { 204 if (O == Option::ReleaseInterval) { 205 const s32 Interval = Max( 206 Min(static_cast<s32>(Value), Config::PrimaryMaxReleaseToOsIntervalMs), 207 Config::PrimaryMinReleaseToOsIntervalMs); 208 atomic_store_relaxed(&ReleaseToOsIntervalMs, Interval); 209 return true; 210 } 211 // Not supported by the Primary, but not an error either. 212 return true; 213 } 214 215 uptr releaseToOS() { 216 uptr TotalReleasedBytes = 0; 217 for (uptr I = 0; I < NumClasses; I++) { 218 if (I == SizeClassMap::BatchClassId) 219 continue; 220 SizeClassInfo *Sci = getSizeClassInfo(I); 221 ScopedLock L(Sci->Mutex); 222 TotalReleasedBytes += releaseToOSMaybe(Sci, I, /*Force=*/true); 223 } 224 return TotalReleasedBytes; 225 } 226 227 const char *getRegionInfoArrayAddress() const { return nullptr; } 228 static uptr getRegionInfoArraySize() { return 0; } 229 230 static BlockInfo findNearestBlock(UNUSED const char *RegionInfoData, 231 UNUSED uptr Ptr) { 232 return {}; 233 } 234 235 AtomicOptions Options; 236 237 private: 238 static const uptr NumClasses = SizeClassMap::NumClasses; 239 static const uptr RegionSize = 1UL << Config::PrimaryRegionSizeLog; 240 static const uptr NumRegions = 241 SCUDO_MMAP_RANGE_SIZE >> Config::PrimaryRegionSizeLog; 242 static const u32 MaxNumBatches = SCUDO_ANDROID ? 4U : 8U; 243 typedef FlatByteMap<NumRegions> ByteMap; 244 245 struct SizeClassStats { 246 uptr PoppedBlocks; 247 uptr PushedBlocks; 248 }; 249 250 struct ReleaseToOsInfo { 251 uptr PushedBlocksAtLastRelease; 252 uptr RangesReleased; 253 uptr LastReleasedBytes; 254 u64 LastReleaseAtNs; 255 }; 256 257 struct alignas(SCUDO_CACHE_LINE_SIZE) SizeClassInfo { 258 HybridMutex Mutex; 259 SinglyLinkedList<TransferBatch> FreeList; 260 uptr CurrentRegion; 261 uptr CurrentRegionAllocated; 262 SizeClassStats Stats; 263 u32 RandState; 264 uptr AllocatedUser; 265 // Lowest & highest region index allocated for this size class, to avoid 266 // looping through the whole NumRegions. 267 uptr MinRegionIndex; 268 uptr MaxRegionIndex; 269 ReleaseToOsInfo ReleaseInfo; 270 }; 271 static_assert(sizeof(SizeClassInfo) % SCUDO_CACHE_LINE_SIZE == 0, ""); 272 273 uptr computeRegionId(uptr Mem) { 274 const uptr Id = Mem >> Config::PrimaryRegionSizeLog; 275 CHECK_LT(Id, NumRegions); 276 return Id; 277 } 278 279 uptr allocateRegionSlow() { 280 uptr MapSize = 2 * RegionSize; 281 const uptr MapBase = reinterpret_cast<uptr>( 282 map(nullptr, MapSize, "scudo:primary", MAP_ALLOWNOMEM)); 283 if (!MapBase) 284 return 0; 285 const uptr MapEnd = MapBase + MapSize; 286 uptr Region = MapBase; 287 if (isAligned(Region, RegionSize)) { 288 ScopedLock L(RegionsStashMutex); 289 if (NumberOfStashedRegions < MaxStashedRegions) 290 RegionsStash[NumberOfStashedRegions++] = MapBase + RegionSize; 291 else 292 MapSize = RegionSize; 293 } else { 294 Region = roundUpTo(MapBase, RegionSize); 295 unmap(reinterpret_cast<void *>(MapBase), Region - MapBase); 296 MapSize = RegionSize; 297 } 298 const uptr End = Region + MapSize; 299 if (End != MapEnd) 300 unmap(reinterpret_cast<void *>(End), MapEnd - End); 301 return Region; 302 } 303 304 uptr allocateRegion(SizeClassInfo *Sci, uptr ClassId) { 305 DCHECK_LT(ClassId, NumClasses); 306 uptr Region = 0; 307 { 308 ScopedLock L(RegionsStashMutex); 309 if (NumberOfStashedRegions > 0) 310 Region = RegionsStash[--NumberOfStashedRegions]; 311 } 312 if (!Region) 313 Region = allocateRegionSlow(); 314 if (LIKELY(Region)) { 315 // Sci->Mutex is held by the caller, updating the Min/Max is safe. 316 const uptr RegionIndex = computeRegionId(Region); 317 if (RegionIndex < Sci->MinRegionIndex) 318 Sci->MinRegionIndex = RegionIndex; 319 if (RegionIndex > Sci->MaxRegionIndex) 320 Sci->MaxRegionIndex = RegionIndex; 321 PossibleRegions.set(RegionIndex, static_cast<u8>(ClassId + 1U)); 322 } 323 return Region; 324 } 325 326 SizeClassInfo *getSizeClassInfo(uptr ClassId) { 327 DCHECK_LT(ClassId, NumClasses); 328 return &SizeClassInfoArray[ClassId]; 329 } 330 331 NOINLINE TransferBatch *populateFreeList(CacheT *C, uptr ClassId, 332 SizeClassInfo *Sci) { 333 uptr Region; 334 uptr Offset; 335 // If the size-class currently has a region associated to it, use it. The 336 // newly created blocks will be located after the currently allocated memory 337 // for that region (up to RegionSize). Otherwise, create a new region, where 338 // the new blocks will be carved from the beginning. 339 if (Sci->CurrentRegion) { 340 Region = Sci->CurrentRegion; 341 DCHECK_GT(Sci->CurrentRegionAllocated, 0U); 342 Offset = Sci->CurrentRegionAllocated; 343 } else { 344 DCHECK_EQ(Sci->CurrentRegionAllocated, 0U); 345 Region = allocateRegion(Sci, ClassId); 346 if (UNLIKELY(!Region)) 347 return nullptr; 348 C->getStats().add(StatMapped, RegionSize); 349 Sci->CurrentRegion = Region; 350 Offset = 0; 351 } 352 353 const uptr Size = getSizeByClassId(ClassId); 354 const u32 MaxCount = TransferBatch::getMaxCached(Size); 355 DCHECK_GT(MaxCount, 0U); 356 // The maximum number of blocks we should carve in the region is dictated 357 // by the maximum number of batches we want to fill, and the amount of 358 // memory left in the current region (we use the lowest of the two). This 359 // will not be 0 as we ensure that a region can at least hold one block (via 360 // static_assert and at the end of this function). 361 const u32 NumberOfBlocks = 362 Min(MaxNumBatches * MaxCount, 363 static_cast<u32>((RegionSize - Offset) / Size)); 364 DCHECK_GT(NumberOfBlocks, 0U); 365 366 constexpr u32 ShuffleArraySize = 367 MaxNumBatches * TransferBatch::MaxNumCached; 368 // Fill the transfer batches and put them in the size-class freelist. We 369 // need to randomize the blocks for security purposes, so we first fill a 370 // local array that we then shuffle before populating the batches. 371 CompactPtrT ShuffleArray[ShuffleArraySize]; 372 DCHECK_LE(NumberOfBlocks, ShuffleArraySize); 373 374 uptr P = Region + Offset; 375 for (u32 I = 0; I < NumberOfBlocks; I++, P += Size) 376 ShuffleArray[I] = reinterpret_cast<CompactPtrT>(P); 377 // No need to shuffle the batches size class. 378 if (ClassId != SizeClassMap::BatchClassId) 379 shuffle(ShuffleArray, NumberOfBlocks, &Sci->RandState); 380 for (u32 I = 0; I < NumberOfBlocks;) { 381 TransferBatch *B = 382 C->createBatch(ClassId, reinterpret_cast<void *>(ShuffleArray[I])); 383 if (UNLIKELY(!B)) 384 return nullptr; 385 const u32 N = Min(MaxCount, NumberOfBlocks - I); 386 B->setFromArray(&ShuffleArray[I], N); 387 Sci->FreeList.push_back(B); 388 I += N; 389 } 390 TransferBatch *B = Sci->FreeList.front(); 391 Sci->FreeList.pop_front(); 392 DCHECK(B); 393 DCHECK_GT(B->getCount(), 0); 394 395 const uptr AllocatedUser = Size * NumberOfBlocks; 396 C->getStats().add(StatFree, AllocatedUser); 397 DCHECK_LE(Sci->CurrentRegionAllocated + AllocatedUser, RegionSize); 398 // If there is not enough room in the region currently associated to fit 399 // more blocks, we deassociate the region by resetting CurrentRegion and 400 // CurrentRegionAllocated. Otherwise, update the allocated amount. 401 if (RegionSize - (Sci->CurrentRegionAllocated + AllocatedUser) < Size) { 402 Sci->CurrentRegion = 0; 403 Sci->CurrentRegionAllocated = 0; 404 } else { 405 Sci->CurrentRegionAllocated += AllocatedUser; 406 } 407 Sci->AllocatedUser += AllocatedUser; 408 409 return B; 410 } 411 412 void getStats(ScopedString *Str, uptr ClassId, uptr Rss) { 413 SizeClassInfo *Sci = getSizeClassInfo(ClassId); 414 if (Sci->AllocatedUser == 0) 415 return; 416 const uptr InUse = Sci->Stats.PoppedBlocks - Sci->Stats.PushedBlocks; 417 const uptr AvailableChunks = Sci->AllocatedUser / getSizeByClassId(ClassId); 418 Str->append(" %02zu (%6zu): mapped: %6zuK popped: %7zu pushed: %7zu " 419 "inuse: %6zu avail: %6zu rss: %6zuK releases: %6zu\n", 420 ClassId, getSizeByClassId(ClassId), Sci->AllocatedUser >> 10, 421 Sci->Stats.PoppedBlocks, Sci->Stats.PushedBlocks, InUse, 422 AvailableChunks, Rss >> 10, Sci->ReleaseInfo.RangesReleased); 423 } 424 425 NOINLINE uptr releaseToOSMaybe(SizeClassInfo *Sci, uptr ClassId, 426 bool Force = false) { 427 const uptr BlockSize = getSizeByClassId(ClassId); 428 const uptr PageSize = getPageSizeCached(); 429 430 DCHECK_GE(Sci->Stats.PoppedBlocks, Sci->Stats.PushedBlocks); 431 const uptr BytesInFreeList = 432 Sci->AllocatedUser - 433 (Sci->Stats.PoppedBlocks - Sci->Stats.PushedBlocks) * BlockSize; 434 if (BytesInFreeList < PageSize) 435 return 0; // No chance to release anything. 436 const uptr BytesPushed = 437 (Sci->Stats.PushedBlocks - Sci->ReleaseInfo.PushedBlocksAtLastRelease) * 438 BlockSize; 439 if (BytesPushed < PageSize) 440 return 0; // Nothing new to release. 441 442 // Releasing smaller blocks is expensive, so we want to make sure that a 443 // significant amount of bytes are free, and that there has been a good 444 // amount of batches pushed to the freelist before attempting to release. 445 if (BlockSize < PageSize / 16U) { 446 if (!Force && BytesPushed < Sci->AllocatedUser / 16U) 447 return 0; 448 // We want 8x% to 9x% free bytes (the larger the block, the lower the %). 449 if ((BytesInFreeList * 100U) / Sci->AllocatedUser < 450 (100U - 1U - BlockSize / 16U)) 451 return 0; 452 } 453 454 if (!Force) { 455 const s32 IntervalMs = atomic_load_relaxed(&ReleaseToOsIntervalMs); 456 if (IntervalMs < 0) 457 return 0; 458 if (Sci->ReleaseInfo.LastReleaseAtNs + 459 static_cast<u64>(IntervalMs) * 1000000 > 460 getMonotonicTime()) { 461 return 0; // Memory was returned recently. 462 } 463 } 464 465 const uptr First = Sci->MinRegionIndex; 466 const uptr Last = Sci->MaxRegionIndex; 467 DCHECK_NE(Last, 0U); 468 DCHECK_LE(First, Last); 469 uptr TotalReleasedBytes = 0; 470 const uptr Base = First * RegionSize; 471 const uptr NumberOfRegions = Last - First + 1U; 472 ReleaseRecorder Recorder(Base); 473 auto SkipRegion = [this, First, ClassId](uptr RegionIndex) { 474 return (PossibleRegions[First + RegionIndex] - 1U) != ClassId; 475 }; 476 auto DecompactPtr = [](CompactPtrT CompactPtr) { 477 return reinterpret_cast<uptr>(CompactPtr); 478 }; 479 releaseFreeMemoryToOS(Sci->FreeList, RegionSize, NumberOfRegions, BlockSize, 480 &Recorder, DecompactPtr, SkipRegion); 481 if (Recorder.getReleasedRangesCount() > 0) { 482 Sci->ReleaseInfo.PushedBlocksAtLastRelease = Sci->Stats.PushedBlocks; 483 Sci->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount(); 484 Sci->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes(); 485 TotalReleasedBytes += Sci->ReleaseInfo.LastReleasedBytes; 486 } 487 Sci->ReleaseInfo.LastReleaseAtNs = getMonotonicTime(); 488 489 return TotalReleasedBytes; 490 } 491 492 SizeClassInfo SizeClassInfoArray[NumClasses] = {}; 493 494 // Track the regions in use, 0 is unused, otherwise store ClassId + 1. 495 ByteMap PossibleRegions = {}; 496 atomic_s32 ReleaseToOsIntervalMs = {}; 497 // Unless several threads request regions simultaneously from different size 498 // classes, the stash rarely contains more than 1 entry. 499 static constexpr uptr MaxStashedRegions = 4; 500 HybridMutex RegionsStashMutex; 501 uptr NumberOfStashedRegions = 0; 502 uptr RegionsStash[MaxStashedRegions] = {}; 503 }; 504 505 } // namespace scudo 506 507 #endif // SCUDO_PRIMARY32_H_ 508