xref: /freebsd/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_mman.cpp (revision 0fca6ea1d4eea4c934cfff25ac9ee8ad6fe95583)
1 //===-- tsan_mman.cpp -----------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //
11 //===----------------------------------------------------------------------===//
12 #include "tsan_mman.h"
13 
14 #include "sanitizer_common/sanitizer_allocator_checks.h"
15 #include "sanitizer_common/sanitizer_allocator_interface.h"
16 #include "sanitizer_common/sanitizer_allocator_report.h"
17 #include "sanitizer_common/sanitizer_common.h"
18 #include "sanitizer_common/sanitizer_errno.h"
19 #include "sanitizer_common/sanitizer_placement_new.h"
20 #include "sanitizer_common/sanitizer_stackdepot.h"
21 #include "tsan_flags.h"
22 #include "tsan_interface.h"
23 #include "tsan_report.h"
24 #include "tsan_rtl.h"
25 
26 namespace __tsan {
27 
28 struct MapUnmapCallback {
OnMap__tsan::MapUnmapCallback29   void OnMap(uptr p, uptr size) const { }
OnMapSecondary__tsan::MapUnmapCallback30   void OnMapSecondary(uptr p, uptr size, uptr user_begin,
31                       uptr user_size) const {};
OnUnmap__tsan::MapUnmapCallback32   void OnUnmap(uptr p, uptr size) const {
33     // We are about to unmap a chunk of user memory.
34     // Mark the corresponding shadow memory as not needed.
35     DontNeedShadowFor(p, size);
36     // Mark the corresponding meta shadow memory as not needed.
37     // Note the block does not contain any meta info at this point
38     // (this happens after free).
39     const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize;
40     const uptr kPageSize = GetPageSizeCached() * kMetaRatio;
41     // Block came from LargeMmapAllocator, so must be large.
42     // We rely on this in the calculations below.
43     CHECK_GE(size, 2 * kPageSize);
44     uptr diff = RoundUp(p, kPageSize) - p;
45     if (diff != 0) {
46       p += diff;
47       size -= diff;
48     }
49     diff = p + size - RoundDown(p + size, kPageSize);
50     if (diff != 0)
51       size -= diff;
52     uptr p_meta = (uptr)MemToMeta(p);
53     ReleaseMemoryPagesToOS(p_meta, p_meta + size / kMetaRatio);
54   }
55 };
56 
57 alignas(64) static char allocator_placeholder[sizeof(Allocator)];
allocator()58 Allocator *allocator() {
59   return reinterpret_cast<Allocator*>(&allocator_placeholder);
60 }
61 
62 struct GlobalProc {
63   Mutex mtx;
64   Processor *proc;
65   // This mutex represents the internal allocator combined for
66   // the purposes of deadlock detection. The internal allocator
67   // uses multiple mutexes, moreover they are locked only occasionally
68   // and they are spin mutexes which don't support deadlock detection.
69   // So we use this fake mutex to serve as a substitute for these mutexes.
70   CheckedMutex internal_alloc_mtx;
71 
GlobalProc__tsan::GlobalProc72   GlobalProc()
73       : mtx(MutexTypeGlobalProc),
74         proc(ProcCreate()),
75         internal_alloc_mtx(MutexTypeInternalAlloc) {}
76 };
77 
78 alignas(64) static char global_proc_placeholder[sizeof(GlobalProc)];
global_proc()79 GlobalProc *global_proc() {
80   return reinterpret_cast<GlobalProc*>(&global_proc_placeholder);
81 }
82 
InternalAllocAccess()83 static void InternalAllocAccess() {
84   global_proc()->internal_alloc_mtx.Lock();
85   global_proc()->internal_alloc_mtx.Unlock();
86 }
87 
ScopedGlobalProcessor()88 ScopedGlobalProcessor::ScopedGlobalProcessor() {
89   GlobalProc *gp = global_proc();
90   ThreadState *thr = cur_thread();
91   if (thr->proc())
92     return;
93   // If we don't have a proc, use the global one.
94   // There are currently only two known case where this path is triggered:
95   //   __interceptor_free
96   //   __nptl_deallocate_tsd
97   //   start_thread
98   //   clone
99   // and:
100   //   ResetRange
101   //   __interceptor_munmap
102   //   __deallocate_stack
103   //   start_thread
104   //   clone
105   // Ideally, we destroy thread state (and unwire proc) when a thread actually
106   // exits (i.e. when we join/wait it). Then we would not need the global proc
107   gp->mtx.Lock();
108   ProcWire(gp->proc, thr);
109 }
110 
~ScopedGlobalProcessor()111 ScopedGlobalProcessor::~ScopedGlobalProcessor() {
112   GlobalProc *gp = global_proc();
113   ThreadState *thr = cur_thread();
114   if (thr->proc() != gp->proc)
115     return;
116   ProcUnwire(gp->proc, thr);
117   gp->mtx.Unlock();
118 }
119 
AllocatorLockBeforeFork()120 void AllocatorLockBeforeFork() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
121   global_proc()->internal_alloc_mtx.Lock();
122   InternalAllocatorLock();
123 #if !SANITIZER_APPLE
124   // OS X allocates from hooks, see 6a3958247a.
125   allocator()->ForceLock();
126   StackDepotLockBeforeFork();
127 #endif
128 }
129 
AllocatorUnlockAfterFork(bool child)130 void AllocatorUnlockAfterFork(bool child) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
131 #if !SANITIZER_APPLE
132   StackDepotUnlockAfterFork(child);
133   allocator()->ForceUnlock();
134 #endif
135   InternalAllocatorUnlock();
136   global_proc()->internal_alloc_mtx.Unlock();
137 }
138 
GlobalProcessorLock()139 void GlobalProcessorLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
140   global_proc()->mtx.Lock();
141 }
142 
GlobalProcessorUnlock()143 void GlobalProcessorUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
144   global_proc()->mtx.Unlock();
145 }
146 
147 static constexpr uptr kMaxAllowedMallocSize = 1ull << 40;
148 static uptr max_user_defined_malloc_size;
149 
InitializeAllocator()150 void InitializeAllocator() {
151   SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
152   allocator()->Init(common_flags()->allocator_release_to_os_interval_ms);
153   max_user_defined_malloc_size = common_flags()->max_allocation_size_mb
154                                      ? common_flags()->max_allocation_size_mb
155                                            << 20
156                                      : kMaxAllowedMallocSize;
157 }
158 
InitializeAllocatorLate()159 void InitializeAllocatorLate() {
160   new(global_proc()) GlobalProc();
161 }
162 
AllocatorProcStart(Processor * proc)163 void AllocatorProcStart(Processor *proc) {
164   allocator()->InitCache(&proc->alloc_cache);
165   internal_allocator()->InitCache(&proc->internal_alloc_cache);
166 }
167 
AllocatorProcFinish(Processor * proc)168 void AllocatorProcFinish(Processor *proc) {
169   allocator()->DestroyCache(&proc->alloc_cache);
170   internal_allocator()->DestroyCache(&proc->internal_alloc_cache);
171 }
172 
AllocatorPrintStats()173 void AllocatorPrintStats() {
174   allocator()->PrintStats();
175 }
176 
SignalUnsafeCall(ThreadState * thr,uptr pc)177 static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
178   if (atomic_load_relaxed(&thr->in_signal_handler) == 0 ||
179       !ShouldReport(thr, ReportTypeSignalUnsafe))
180     return;
181   VarSizeStackTrace stack;
182   ObtainCurrentStack(thr, pc, &stack);
183   if (IsFiredSuppression(ctx, ReportTypeSignalUnsafe, stack))
184     return;
185   ThreadRegistryLock l(&ctx->thread_registry);
186   ScopedReport rep(ReportTypeSignalUnsafe);
187   rep.AddStack(stack, true);
188   OutputReport(thr, rep);
189 }
190 
191 
user_alloc_internal(ThreadState * thr,uptr pc,uptr sz,uptr align,bool signal)192 void *user_alloc_internal(ThreadState *thr, uptr pc, uptr sz, uptr align,
193                           bool signal) {
194   if (sz >= kMaxAllowedMallocSize || align >= kMaxAllowedMallocSize ||
195       sz > max_user_defined_malloc_size) {
196     if (AllocatorMayReturnNull())
197       return nullptr;
198     uptr malloc_limit =
199         Min(kMaxAllowedMallocSize, max_user_defined_malloc_size);
200     GET_STACK_TRACE_FATAL(thr, pc);
201     ReportAllocationSizeTooBig(sz, malloc_limit, &stack);
202   }
203   if (UNLIKELY(IsRssLimitExceeded())) {
204     if (AllocatorMayReturnNull())
205       return nullptr;
206     GET_STACK_TRACE_FATAL(thr, pc);
207     ReportRssLimitExceeded(&stack);
208   }
209   void *p = allocator()->Allocate(&thr->proc()->alloc_cache, sz, align);
210   if (UNLIKELY(!p)) {
211     SetAllocatorOutOfMemory();
212     if (AllocatorMayReturnNull())
213       return nullptr;
214     GET_STACK_TRACE_FATAL(thr, pc);
215     ReportOutOfMemory(sz, &stack);
216   }
217   if (ctx && ctx->initialized)
218     OnUserAlloc(thr, pc, (uptr)p, sz, true);
219   if (signal)
220     SignalUnsafeCall(thr, pc);
221   return p;
222 }
223 
user_free(ThreadState * thr,uptr pc,void * p,bool signal)224 void user_free(ThreadState *thr, uptr pc, void *p, bool signal) {
225   ScopedGlobalProcessor sgp;
226   if (ctx && ctx->initialized)
227     OnUserFree(thr, pc, (uptr)p, true);
228   allocator()->Deallocate(&thr->proc()->alloc_cache, p);
229   if (signal)
230     SignalUnsafeCall(thr, pc);
231 }
232 
user_alloc(ThreadState * thr,uptr pc,uptr sz)233 void *user_alloc(ThreadState *thr, uptr pc, uptr sz) {
234   return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, kDefaultAlignment));
235 }
236 
user_calloc(ThreadState * thr,uptr pc,uptr size,uptr n)237 void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
238   if (UNLIKELY(CheckForCallocOverflow(size, n))) {
239     if (AllocatorMayReturnNull())
240       return SetErrnoOnNull(nullptr);
241     GET_STACK_TRACE_FATAL(thr, pc);
242     ReportCallocOverflow(n, size, &stack);
243   }
244   void *p = user_alloc_internal(thr, pc, n * size);
245   if (p)
246     internal_memset(p, 0, n * size);
247   return SetErrnoOnNull(p);
248 }
249 
user_reallocarray(ThreadState * thr,uptr pc,void * p,uptr size,uptr n)250 void *user_reallocarray(ThreadState *thr, uptr pc, void *p, uptr size, uptr n) {
251   if (UNLIKELY(CheckForCallocOverflow(size, n))) {
252     if (AllocatorMayReturnNull())
253       return SetErrnoOnNull(nullptr);
254     GET_STACK_TRACE_FATAL(thr, pc);
255     ReportReallocArrayOverflow(size, n, &stack);
256   }
257   return user_realloc(thr, pc, p, size * n);
258 }
259 
OnUserAlloc(ThreadState * thr,uptr pc,uptr p,uptr sz,bool write)260 void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) {
261   DPrintf("#%d: alloc(%zu) = 0x%zx\n", thr->tid, sz, p);
262   // Note: this can run before thread initialization/after finalization.
263   // As a result this is not necessarily synchronized with DoReset,
264   // which iterates over and resets all sync objects,
265   // but it is fine to create new MBlocks in this context.
266   ctx->metamap.AllocBlock(thr, pc, p, sz);
267   // If this runs before thread initialization/after finalization
268   // and we don't have trace initialized, we can't imitate writes.
269   // In such case just reset the shadow range, it is fine since
270   // it affects only a small fraction of special objects.
271   if (write && thr->ignore_reads_and_writes == 0 &&
272       atomic_load_relaxed(&thr->trace_pos))
273     MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
274   else
275     MemoryResetRange(thr, pc, (uptr)p, sz);
276 }
277 
OnUserFree(ThreadState * thr,uptr pc,uptr p,bool write)278 void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write) {
279   CHECK_NE(p, (void*)0);
280   if (!thr->slot) {
281     // Very early/late in thread lifetime, or during fork.
282     UNUSED uptr sz = ctx->metamap.FreeBlock(thr->proc(), p, false);
283     DPrintf("#%d: free(0x%zx, %zu) (no slot)\n", thr->tid, p, sz);
284     return;
285   }
286   SlotLocker locker(thr);
287   uptr sz = ctx->metamap.FreeBlock(thr->proc(), p, true);
288   DPrintf("#%d: free(0x%zx, %zu)\n", thr->tid, p, sz);
289   if (write && thr->ignore_reads_and_writes == 0)
290     MemoryRangeFreed(thr, pc, (uptr)p, sz);
291 }
292 
user_realloc(ThreadState * thr,uptr pc,void * p,uptr sz)293 void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
294   // FIXME: Handle "shrinking" more efficiently,
295   // it seems that some software actually does this.
296   if (!p)
297     return SetErrnoOnNull(user_alloc_internal(thr, pc, sz));
298   if (!sz) {
299     user_free(thr, pc, p);
300     return nullptr;
301   }
302   void *new_p = user_alloc_internal(thr, pc, sz);
303   if (new_p) {
304     uptr old_sz = user_alloc_usable_size(p);
305     internal_memcpy(new_p, p, min(old_sz, sz));
306     user_free(thr, pc, p);
307   }
308   return SetErrnoOnNull(new_p);
309 }
310 
user_memalign(ThreadState * thr,uptr pc,uptr align,uptr sz)311 void *user_memalign(ThreadState *thr, uptr pc, uptr align, uptr sz) {
312   if (UNLIKELY(!IsPowerOfTwo(align))) {
313     errno = errno_EINVAL;
314     if (AllocatorMayReturnNull())
315       return nullptr;
316     GET_STACK_TRACE_FATAL(thr, pc);
317     ReportInvalidAllocationAlignment(align, &stack);
318   }
319   return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));
320 }
321 
user_posix_memalign(ThreadState * thr,uptr pc,void ** memptr,uptr align,uptr sz)322 int user_posix_memalign(ThreadState *thr, uptr pc, void **memptr, uptr align,
323                         uptr sz) {
324   if (UNLIKELY(!CheckPosixMemalignAlignment(align))) {
325     if (AllocatorMayReturnNull())
326       return errno_EINVAL;
327     GET_STACK_TRACE_FATAL(thr, pc);
328     ReportInvalidPosixMemalignAlignment(align, &stack);
329   }
330   void *ptr = user_alloc_internal(thr, pc, sz, align);
331   if (UNLIKELY(!ptr))
332     // OOM error is already taken care of by user_alloc_internal.
333     return errno_ENOMEM;
334   CHECK(IsAligned((uptr)ptr, align));
335   *memptr = ptr;
336   return 0;
337 }
338 
user_aligned_alloc(ThreadState * thr,uptr pc,uptr align,uptr sz)339 void *user_aligned_alloc(ThreadState *thr, uptr pc, uptr align, uptr sz) {
340   if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(align, sz))) {
341     errno = errno_EINVAL;
342     if (AllocatorMayReturnNull())
343       return nullptr;
344     GET_STACK_TRACE_FATAL(thr, pc);
345     ReportInvalidAlignedAllocAlignment(sz, align, &stack);
346   }
347   return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));
348 }
349 
user_valloc(ThreadState * thr,uptr pc,uptr sz)350 void *user_valloc(ThreadState *thr, uptr pc, uptr sz) {
351   return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, GetPageSizeCached()));
352 }
353 
user_pvalloc(ThreadState * thr,uptr pc,uptr sz)354 void *user_pvalloc(ThreadState *thr, uptr pc, uptr sz) {
355   uptr PageSize = GetPageSizeCached();
356   if (UNLIKELY(CheckForPvallocOverflow(sz, PageSize))) {
357     errno = errno_ENOMEM;
358     if (AllocatorMayReturnNull())
359       return nullptr;
360     GET_STACK_TRACE_FATAL(thr, pc);
361     ReportPvallocOverflow(sz, &stack);
362   }
363   // pvalloc(0) should allocate one page.
364   sz = sz ? RoundUpTo(sz, PageSize) : PageSize;
365   return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, PageSize));
366 }
367 
user_alloc_begin(const void * p)368 static const void *user_alloc_begin(const void *p) {
369   if (p == nullptr || !IsAppMem((uptr)p))
370     return nullptr;
371   void *beg = allocator()->GetBlockBegin(p);
372   if (!beg)
373     return nullptr;
374 
375   MBlock *b = ctx->metamap.GetBlock((uptr)beg);
376   if (!b)
377     return nullptr;  // Not a valid pointer.
378 
379   return (const void *)beg;
380 }
381 
user_alloc_usable_size(const void * p)382 uptr user_alloc_usable_size(const void *p) {
383   if (p == 0 || !IsAppMem((uptr)p))
384     return 0;
385   MBlock *b = ctx->metamap.GetBlock((uptr)p);
386   if (!b)
387     return 0;  // Not a valid pointer.
388   if (b->siz == 0)
389     return 1;  // Zero-sized allocations are actually 1 byte.
390   return b->siz;
391 }
392 
user_alloc_usable_size_fast(const void * p)393 uptr user_alloc_usable_size_fast(const void *p) {
394   MBlock *b = ctx->metamap.GetBlock((uptr)p);
395   // Static objects may have malloc'd before tsan completes
396   // initialization, and may believe returned ptrs to be valid.
397   if (!b)
398     return 0;  // Not a valid pointer.
399   if (b->siz == 0)
400     return 1;  // Zero-sized allocations are actually 1 byte.
401   return b->siz;
402 }
403 
invoke_malloc_hook(void * ptr,uptr size)404 void invoke_malloc_hook(void *ptr, uptr size) {
405   ThreadState *thr = cur_thread();
406   if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
407     return;
408   RunMallocHooks(ptr, size);
409 }
410 
invoke_free_hook(void * ptr)411 void invoke_free_hook(void *ptr) {
412   ThreadState *thr = cur_thread();
413   if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
414     return;
415   RunFreeHooks(ptr);
416 }
417 
Alloc(uptr sz)418 void *Alloc(uptr sz) {
419   ThreadState *thr = cur_thread();
420   if (thr->nomalloc) {
421     thr->nomalloc = 0;  // CHECK calls internal_malloc().
422     CHECK(0);
423   }
424   InternalAllocAccess();
425   return InternalAlloc(sz, &thr->proc()->internal_alloc_cache);
426 }
427 
FreeImpl(void * p)428 void FreeImpl(void *p) {
429   ThreadState *thr = cur_thread();
430   if (thr->nomalloc) {
431     thr->nomalloc = 0;  // CHECK calls internal_malloc().
432     CHECK(0);
433   }
434   InternalAllocAccess();
435   InternalFree(p, &thr->proc()->internal_alloc_cache);
436 }
437 
438 }  // namespace __tsan
439 
440 using namespace __tsan;
441 
442 extern "C" {
__sanitizer_get_current_allocated_bytes()443 uptr __sanitizer_get_current_allocated_bytes() {
444   uptr stats[AllocatorStatCount];
445   allocator()->GetStats(stats);
446   return stats[AllocatorStatAllocated];
447 }
448 
__sanitizer_get_heap_size()449 uptr __sanitizer_get_heap_size() {
450   uptr stats[AllocatorStatCount];
451   allocator()->GetStats(stats);
452   return stats[AllocatorStatMapped];
453 }
454 
__sanitizer_get_free_bytes()455 uptr __sanitizer_get_free_bytes() {
456   return 1;
457 }
458 
__sanitizer_get_unmapped_bytes()459 uptr __sanitizer_get_unmapped_bytes() {
460   return 1;
461 }
462 
__sanitizer_get_estimated_allocated_size(uptr size)463 uptr __sanitizer_get_estimated_allocated_size(uptr size) {
464   return size;
465 }
466 
__sanitizer_get_ownership(const void * p)467 int __sanitizer_get_ownership(const void *p) {
468   return allocator()->GetBlockBegin(p) != 0;
469 }
470 
__sanitizer_get_allocated_begin(const void * p)471 const void *__sanitizer_get_allocated_begin(const void *p) {
472   return user_alloc_begin(p);
473 }
474 
__sanitizer_get_allocated_size(const void * p)475 uptr __sanitizer_get_allocated_size(const void *p) {
476   return user_alloc_usable_size(p);
477 }
478 
__sanitizer_get_allocated_size_fast(const void * p)479 uptr __sanitizer_get_allocated_size_fast(const void *p) {
480   DCHECK_EQ(p, __sanitizer_get_allocated_begin(p));
481   uptr ret = user_alloc_usable_size_fast(p);
482   DCHECK_EQ(ret, __sanitizer_get_allocated_size(p));
483   return ret;
484 }
485 
__sanitizer_purge_allocator()486 void __sanitizer_purge_allocator() {
487   allocator()->ForceReleaseToOS();
488 }
489 
__tsan_on_thread_idle()490 void __tsan_on_thread_idle() {
491   ThreadState *thr = cur_thread();
492   allocator()->SwallowCache(&thr->proc()->alloc_cache);
493   internal_allocator()->SwallowCache(&thr->proc()->internal_alloc_cache);
494   ctx->metamap.OnProcIdle(thr->proc());
495 }
496 }  // extern "C"
497