xref: /freebsd/contrib/llvm-project/compiler-rt/lib/gwp_asan/guarded_pool_allocator.cpp (revision 0fca6ea1d4eea4c934cfff25ac9ee8ad6fe95583)
1 //===-- guarded_pool_allocator.cpp ------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "gwp_asan/guarded_pool_allocator.h"
10 
11 #include "gwp_asan/crash_handler.h"
12 #include "gwp_asan/options.h"
13 #include "gwp_asan/utilities.h"
14 
15 #include <assert.h>
16 #include <stddef.h>
17 
18 using AllocationMetadata = gwp_asan::AllocationMetadata;
19 using Error = gwp_asan::Error;
20 
21 namespace gwp_asan {
22 namespace {
23 // Forward declare the pointer to the singleton version of this class.
24 // Instantiated during initialisation, this allows the signal handler
25 // to find this class in order to deduce the root cause of failures. Must not be
26 // referenced by users outside this translation unit, in order to avoid
27 // init-order-fiasco.
28 GuardedPoolAllocator *SingletonPtr = nullptr;
29 
roundUpTo(size_t Size,size_t Boundary)30 size_t roundUpTo(size_t Size, size_t Boundary) {
31   return (Size + Boundary - 1) & ~(Boundary - 1);
32 }
33 
getPageAddr(uintptr_t Ptr,uintptr_t PageSize)34 uintptr_t getPageAddr(uintptr_t Ptr, uintptr_t PageSize) {
35   return Ptr & ~(PageSize - 1);
36 }
37 
isPowerOfTwo(uintptr_t X)38 bool isPowerOfTwo(uintptr_t X) { return (X & (X - 1)) == 0; }
39 } // anonymous namespace
40 
41 // Gets the singleton implementation of this class. Thread-compatible until
42 // init() is called, thread-safe afterwards.
getSingleton()43 GuardedPoolAllocator *GuardedPoolAllocator::getSingleton() {
44   return SingletonPtr;
45 }
46 
init(const options::Options & Opts)47 void GuardedPoolAllocator::init(const options::Options &Opts) {
48   // Note: We return from the constructor here if GWP-ASan is not available.
49   // This will stop heap-allocation of class members, as well as mmap() of the
50   // guarded slots.
51   if (!Opts.Enabled || Opts.SampleRate == 0 ||
52       Opts.MaxSimultaneousAllocations == 0)
53     return;
54 
55   check(Opts.SampleRate >= 0, "GWP-ASan Error: SampleRate is < 0.");
56   check(Opts.SampleRate < (1 << 30), "GWP-ASan Error: SampleRate is >= 2^30.");
57   check(Opts.MaxSimultaneousAllocations >= 0,
58         "GWP-ASan Error: MaxSimultaneousAllocations is < 0.");
59 
60   check(SingletonPtr == nullptr,
61         "There's already a live GuardedPoolAllocator!");
62   SingletonPtr = this;
63   Backtrace = Opts.Backtrace;
64 
65   State.VersionMagic = {{AllocatorVersionMagic::kAllocatorVersionMagic[0],
66                          AllocatorVersionMagic::kAllocatorVersionMagic[1],
67                          AllocatorVersionMagic::kAllocatorVersionMagic[2],
68                          AllocatorVersionMagic::kAllocatorVersionMagic[3]},
69                         AllocatorVersionMagic::kAllocatorVersion,
70                         0};
71 
72   State.MaxSimultaneousAllocations = Opts.MaxSimultaneousAllocations;
73 
74   const size_t PageSize = getPlatformPageSize();
75   // getPageAddr() and roundUpTo() assume the page size to be a power of 2.
76   assert((PageSize & (PageSize - 1)) == 0);
77   State.PageSize = PageSize;
78 
79   // Number of pages required =
80   //  + MaxSimultaneousAllocations * maximumAllocationSize (N pages per slot)
81   //  + MaxSimultaneousAllocations (one guard on the left side of each slot)
82   //  + 1 (an extra guard page at the end of the pool, on the right side)
83   //  + 1 (an extra page that's used for reporting internally-detected crashes,
84   //       like double free and invalid free, to the signal handler; see
85   //       raiseInternallyDetectedError() for more info)
86   size_t PoolBytesRequired =
87       PageSize * (2 + State.MaxSimultaneousAllocations) +
88       State.MaxSimultaneousAllocations * State.maximumAllocationSize();
89   assert(PoolBytesRequired % PageSize == 0);
90   void *GuardedPoolMemory = reserveGuardedPool(PoolBytesRequired);
91 
92   size_t BytesRequired =
93       roundUpTo(State.MaxSimultaneousAllocations * sizeof(*Metadata), PageSize);
94   Metadata = reinterpret_cast<AllocationMetadata *>(
95       map(BytesRequired, kGwpAsanMetadataName));
96 
97   // Allocate memory and set up the free pages queue.
98   BytesRequired = roundUpTo(
99       State.MaxSimultaneousAllocations * sizeof(*FreeSlots), PageSize);
100   FreeSlots =
101       reinterpret_cast<size_t *>(map(BytesRequired, kGwpAsanFreeSlotsName));
102 
103   // Multiply the sample rate by 2 to give a good, fast approximation for (1 /
104   // SampleRate) chance of sampling.
105   if (Opts.SampleRate != 1)
106     AdjustedSampleRatePlusOne = static_cast<uint32_t>(Opts.SampleRate) * 2 + 1;
107   else
108     AdjustedSampleRatePlusOne = 2;
109 
110   initPRNG();
111   getThreadLocals()->NextSampleCounter =
112       ((getRandomUnsigned32() % (AdjustedSampleRatePlusOne - 1)) + 1) &
113       ThreadLocalPackedVariables::NextSampleCounterMask;
114 
115   State.GuardedPagePool = reinterpret_cast<uintptr_t>(GuardedPoolMemory);
116   State.GuardedPagePoolEnd =
117       reinterpret_cast<uintptr_t>(GuardedPoolMemory) + PoolBytesRequired;
118 
119   if (Opts.InstallForkHandlers)
120     installAtFork();
121 }
122 
disable()123 void GuardedPoolAllocator::disable() {
124   PoolMutex.lock();
125   BacktraceMutex.lock();
126 }
127 
enable()128 void GuardedPoolAllocator::enable() {
129   PoolMutex.unlock();
130   BacktraceMutex.unlock();
131 }
132 
iterate(void * Base,size_t Size,iterate_callback Cb,void * Arg)133 void GuardedPoolAllocator::iterate(void *Base, size_t Size, iterate_callback Cb,
134                                    void *Arg) {
135   uintptr_t Start = reinterpret_cast<uintptr_t>(Base);
136   for (size_t i = 0; i < State.MaxSimultaneousAllocations; ++i) {
137     const AllocationMetadata &Meta = Metadata[i];
138     if (Meta.Addr && !Meta.IsDeallocated && Meta.Addr >= Start &&
139         Meta.Addr < Start + Size)
140       Cb(Meta.Addr, Meta.RequestedSize, Arg);
141   }
142 }
143 
uninitTestOnly()144 void GuardedPoolAllocator::uninitTestOnly() {
145   if (State.GuardedPagePool) {
146     unreserveGuardedPool();
147     State.GuardedPagePool = 0;
148     State.GuardedPagePoolEnd = 0;
149   }
150   if (Metadata) {
151     unmap(Metadata,
152           roundUpTo(State.MaxSimultaneousAllocations * sizeof(*Metadata),
153                     State.PageSize));
154     Metadata = nullptr;
155   }
156   if (FreeSlots) {
157     unmap(FreeSlots,
158           roundUpTo(State.MaxSimultaneousAllocations * sizeof(*FreeSlots),
159                     State.PageSize));
160     FreeSlots = nullptr;
161   }
162   *getThreadLocals() = ThreadLocalPackedVariables();
163   SingletonPtr = nullptr;
164 }
165 
166 // Note, minimum backing allocation size in GWP-ASan is always one page, and
167 // each slot could potentially be multiple pages (but always in
168 // page-increments). Thus, for anything that requires less than page size
169 // alignment, we don't need to allocate extra padding to ensure the alignment
170 // can be met.
getRequiredBackingSize(size_t Size,size_t Alignment,size_t PageSize)171 size_t GuardedPoolAllocator::getRequiredBackingSize(size_t Size,
172                                                     size_t Alignment,
173                                                     size_t PageSize) {
174   assert(isPowerOfTwo(Alignment) && "Alignment must be a power of two!");
175   assert(Alignment != 0 && "Alignment should be non-zero");
176   assert(Size != 0 && "Size should be non-zero");
177 
178   if (Alignment <= PageSize)
179     return Size;
180 
181   return Size + Alignment - PageSize;
182 }
183 
alignUp(uintptr_t Ptr,size_t Alignment)184 uintptr_t GuardedPoolAllocator::alignUp(uintptr_t Ptr, size_t Alignment) {
185   assert(isPowerOfTwo(Alignment) && "Alignment must be a power of two!");
186   assert(Alignment != 0 && "Alignment should be non-zero");
187   if ((Ptr & (Alignment - 1)) == 0)
188     return Ptr;
189 
190   Ptr += Alignment - (Ptr & (Alignment - 1));
191   return Ptr;
192 }
193 
alignDown(uintptr_t Ptr,size_t Alignment)194 uintptr_t GuardedPoolAllocator::alignDown(uintptr_t Ptr, size_t Alignment) {
195   assert(isPowerOfTwo(Alignment) && "Alignment must be a power of two!");
196   assert(Alignment != 0 && "Alignment should be non-zero");
197   if ((Ptr & (Alignment - 1)) == 0)
198     return Ptr;
199 
200   Ptr -= Ptr & (Alignment - 1);
201   return Ptr;
202 }
203 
allocate(size_t Size,size_t Alignment)204 void *GuardedPoolAllocator::allocate(size_t Size, size_t Alignment) {
205   // GuardedPagePoolEnd == 0 when GWP-ASan is disabled. If we are disabled, fall
206   // back to the supporting allocator.
207   if (State.GuardedPagePoolEnd == 0) {
208     getThreadLocals()->NextSampleCounter =
209         (AdjustedSampleRatePlusOne - 1) &
210         ThreadLocalPackedVariables::NextSampleCounterMask;
211     return nullptr;
212   }
213 
214   if (Size == 0)
215     Size = 1;
216   if (Alignment == 0)
217     Alignment = alignof(max_align_t);
218 
219   if (!isPowerOfTwo(Alignment) || Alignment > State.maximumAllocationSize() ||
220       Size > State.maximumAllocationSize())
221     return nullptr;
222 
223   size_t BackingSize = getRequiredBackingSize(Size, Alignment, State.PageSize);
224   if (BackingSize > State.maximumAllocationSize())
225     return nullptr;
226 
227   // Protect against recursivity.
228   if (getThreadLocals()->RecursiveGuard)
229     return nullptr;
230   ScopedRecursiveGuard SRG;
231 
232   size_t Index;
233   {
234     ScopedLock L(PoolMutex);
235     Index = reserveSlot();
236   }
237 
238   if (Index == kInvalidSlotID)
239     return nullptr;
240 
241   uintptr_t SlotStart = State.slotToAddr(Index);
242   AllocationMetadata *Meta = addrToMetadata(SlotStart);
243   uintptr_t SlotEnd = State.slotToAddr(Index) + State.maximumAllocationSize();
244   uintptr_t UserPtr;
245   // Randomly choose whether to left-align or right-align the allocation, and
246   // then apply the necessary adjustments to get an aligned pointer.
247   if (getRandomUnsigned32() % 2 == 0)
248     UserPtr = alignUp(SlotStart, Alignment);
249   else
250     UserPtr = alignDown(SlotEnd - Size, Alignment);
251 
252   assert(UserPtr >= SlotStart);
253   assert(UserPtr + Size <= SlotEnd);
254 
255   // If a slot is multiple pages in size, and the allocation takes up a single
256   // page, we can improve overflow detection by leaving the unused pages as
257   // unmapped.
258   const size_t PageSize = State.PageSize;
259   allocateInGuardedPool(
260       reinterpret_cast<void *>(getPageAddr(UserPtr, PageSize)),
261       roundUpTo(Size, PageSize));
262 
263   Meta->RecordAllocation(UserPtr, Size);
264   {
265     ScopedLock UL(BacktraceMutex);
266     Meta->AllocationTrace.RecordBacktrace(Backtrace);
267   }
268 
269   return reinterpret_cast<void *>(UserPtr);
270 }
271 
raiseInternallyDetectedError(uintptr_t Address,Error E)272 void GuardedPoolAllocator::raiseInternallyDetectedError(uintptr_t Address,
273                                                         Error E) {
274   // Disable the allocator before setting the internal failure state. In
275   // non-recoverable mode, the allocator will be permanently disabled, and so
276   // things will be accessed without locks.
277   disable();
278 
279   // Races between internally- and externally-raised faults can happen. Right
280   // now, in this thread we've locked the allocator in order to raise an
281   // internally-detected fault, and another thread could SIGSEGV to raise an
282   // externally-detected fault. What will happen is that the other thread will
283   // wait in the signal handler, as we hold the allocator's locks from the
284   // disable() above. We'll trigger the signal handler by touching the
285   // internal-signal-raising address below, and the signal handler from our
286   // thread will get to run first as we will continue to hold the allocator
287   // locks until the enable() at the end of this function. Be careful though, if
288   // this thread receives another SIGSEGV after the disable() above, but before
289   // touching the internal-signal-raising address below, then this thread will
290   // get an "externally-raised" SIGSEGV while *also* holding the allocator
291   // locks, which means this thread's signal handler will deadlock. This could
292   // be resolved with a re-entrant lock, but asking platforms to implement this
293   // seems unnecessary given the only way to get a SIGSEGV in this critical
294   // section is either a memory safety bug in the couple lines of code below (be
295   // careful!), or someone outside uses `kill(this_thread, SIGSEGV)`, which
296   // really shouldn't happen.
297 
298   State.FailureType = E;
299   State.FailureAddress = Address;
300 
301   // Raise a SEGV by touching a specific address that identifies to the crash
302   // handler that this is an internally-raised fault. Changing this address?
303   // Don't forget to update __gwp_asan_get_internal_crash_address.
304   volatile char *p =
305       reinterpret_cast<char *>(State.internallyDetectedErrorFaultAddress());
306   *p = 0;
307 
308   // This should never be reached in non-recoverable mode. Ensure that the
309   // signal handler called handleRecoverablePostCrashReport(), which was
310   // responsible for re-setting these fields.
311   assert(State.FailureType == Error::UNKNOWN);
312   assert(State.FailureAddress == 0u);
313 
314   // In recoverable mode, the signal handler (after dumping the crash) marked
315   // the page containing the InternalFaultSegvAddress as read/writeable, to
316   // allow the second touch to succeed after returning from the signal handler.
317   // Now, we need to mark the page as non-read/write-able again, so future
318   // internal faults can be raised.
319   deallocateInGuardedPool(
320       reinterpret_cast<void *>(getPageAddr(
321           State.internallyDetectedErrorFaultAddress(), State.PageSize)),
322       State.PageSize);
323 
324   // And now we're done with patching ourselves back up, enable the allocator.
325   enable();
326 }
327 
deallocate(void * Ptr)328 void GuardedPoolAllocator::deallocate(void *Ptr) {
329   assert(pointerIsMine(Ptr) && "Pointer is not mine!");
330   uintptr_t UPtr = reinterpret_cast<uintptr_t>(Ptr);
331   size_t Slot = State.getNearestSlot(UPtr);
332   uintptr_t SlotStart = State.slotToAddr(Slot);
333   AllocationMetadata *Meta = addrToMetadata(UPtr);
334 
335   // If this allocation is responsible for crash, never recycle it. Turn the
336   // deallocate() call into a no-op.
337   if (Meta->HasCrashed)
338     return;
339 
340   if (Meta->Addr != UPtr) {
341     raiseInternallyDetectedError(UPtr, Error::INVALID_FREE);
342     return;
343   }
344   if (Meta->IsDeallocated) {
345     raiseInternallyDetectedError(UPtr, Error::DOUBLE_FREE);
346     return;
347   }
348 
349   // Intentionally scope the mutex here, so that other threads can access the
350   // pool during the expensive markInaccessible() call.
351   {
352     ScopedLock L(PoolMutex);
353 
354     // Ensure that the deallocation is recorded before marking the page as
355     // inaccessible. Otherwise, a racy use-after-free will have inconsistent
356     // metadata.
357     Meta->RecordDeallocation();
358 
359     // Ensure that the unwinder is not called if the recursive flag is set,
360     // otherwise non-reentrant unwinders may deadlock.
361     if (!getThreadLocals()->RecursiveGuard) {
362       ScopedRecursiveGuard SRG;
363       ScopedLock UL(BacktraceMutex);
364       Meta->DeallocationTrace.RecordBacktrace(Backtrace);
365     }
366   }
367 
368   deallocateInGuardedPool(reinterpret_cast<void *>(SlotStart),
369                           State.maximumAllocationSize());
370 
371   // And finally, lock again to release the slot back into the pool.
372   ScopedLock L(PoolMutex);
373   freeSlot(Slot);
374 }
375 
376 // Thread-compatible, protected by PoolMutex.
377 static bool PreviousRecursiveGuard;
378 
preCrashReport(void * Ptr)379 void GuardedPoolAllocator::preCrashReport(void *Ptr) {
380   assert(pointerIsMine(Ptr) && "Pointer is not mine!");
381   uintptr_t InternalCrashAddr = __gwp_asan_get_internal_crash_address(
382       &State, reinterpret_cast<uintptr_t>(Ptr));
383   if (!InternalCrashAddr)
384     disable();
385 
386   // If something in the signal handler calls malloc() while dumping the
387   // GWP-ASan report (e.g. backtrace_symbols()), make sure that GWP-ASan doesn't
388   // service that allocation. `PreviousRecursiveGuard` is protected by the
389   // allocator locks taken in disable(), either explicitly above for
390   // externally-raised errors, or implicitly in raiseInternallyDetectedError()
391   // for internally-detected errors.
392   PreviousRecursiveGuard = getThreadLocals()->RecursiveGuard;
393   getThreadLocals()->RecursiveGuard = true;
394 }
395 
postCrashReportRecoverableOnly(void * SignalPtr)396 void GuardedPoolAllocator::postCrashReportRecoverableOnly(void *SignalPtr) {
397   uintptr_t SignalUPtr = reinterpret_cast<uintptr_t>(SignalPtr);
398   uintptr_t InternalCrashAddr =
399       __gwp_asan_get_internal_crash_address(&State, SignalUPtr);
400   uintptr_t ErrorUptr = InternalCrashAddr ?: SignalUPtr;
401 
402   AllocationMetadata *Metadata = addrToMetadata(ErrorUptr);
403   Metadata->HasCrashed = true;
404 
405   allocateInGuardedPool(
406       reinterpret_cast<void *>(getPageAddr(SignalUPtr, State.PageSize)),
407       State.PageSize);
408 
409   // Clear the internal state in order to not confuse the crash handler if a
410   // use-after-free or buffer-overflow comes from a different allocation in the
411   // future.
412   if (InternalCrashAddr) {
413     State.FailureType = Error::UNKNOWN;
414     State.FailureAddress = 0;
415   }
416 
417   size_t Slot = State.getNearestSlot(ErrorUptr);
418   // If the slot is available, remove it permanently.
419   for (size_t i = 0; i < FreeSlotsLength; ++i) {
420     if (FreeSlots[i] == Slot) {
421       FreeSlots[i] = FreeSlots[FreeSlotsLength - 1];
422       FreeSlotsLength -= 1;
423       break;
424     }
425   }
426 
427   getThreadLocals()->RecursiveGuard = PreviousRecursiveGuard;
428   if (!InternalCrashAddr)
429     enable();
430 }
431 
getSize(const void * Ptr)432 size_t GuardedPoolAllocator::getSize(const void *Ptr) {
433   assert(pointerIsMine(Ptr));
434   ScopedLock L(PoolMutex);
435   AllocationMetadata *Meta = addrToMetadata(reinterpret_cast<uintptr_t>(Ptr));
436   assert(Meta->Addr == reinterpret_cast<uintptr_t>(Ptr));
437   return Meta->RequestedSize;
438 }
439 
addrToMetadata(uintptr_t Ptr) const440 AllocationMetadata *GuardedPoolAllocator::addrToMetadata(uintptr_t Ptr) const {
441   return &Metadata[State.getNearestSlot(Ptr)];
442 }
443 
reserveSlot()444 size_t GuardedPoolAllocator::reserveSlot() {
445   // Avoid potential reuse of a slot before we have made at least a single
446   // allocation in each slot. Helps with our use-after-free detection.
447   if (NumSampledAllocations < State.MaxSimultaneousAllocations)
448     return NumSampledAllocations++;
449 
450   if (FreeSlotsLength == 0)
451     return kInvalidSlotID;
452 
453   size_t ReservedIndex = getRandomUnsigned32() % FreeSlotsLength;
454   size_t SlotIndex = FreeSlots[ReservedIndex];
455   FreeSlots[ReservedIndex] = FreeSlots[--FreeSlotsLength];
456   return SlotIndex;
457 }
458 
freeSlot(size_t SlotIndex)459 void GuardedPoolAllocator::freeSlot(size_t SlotIndex) {
460   assert(FreeSlotsLength < State.MaxSimultaneousAllocations);
461   FreeSlots[FreeSlotsLength++] = SlotIndex;
462 }
463 
getRandomUnsigned32()464 uint32_t GuardedPoolAllocator::getRandomUnsigned32() {
465   uint32_t RandomState = getThreadLocals()->RandomState;
466   RandomState ^= RandomState << 13;
467   RandomState ^= RandomState >> 17;
468   RandomState ^= RandomState << 5;
469   getThreadLocals()->RandomState = RandomState;
470   return RandomState;
471 }
472 } // namespace gwp_asan
473