xref: /freebsd/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_mman.cpp (revision f126890ac5386406dadf7c4cfa9566cbb56537c5)
1 //===-- tsan_mman.cpp -----------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //
11 //===----------------------------------------------------------------------===//
12 #include "sanitizer_common/sanitizer_allocator_checks.h"
13 #include "sanitizer_common/sanitizer_allocator_interface.h"
14 #include "sanitizer_common/sanitizer_allocator_report.h"
15 #include "sanitizer_common/sanitizer_common.h"
16 #include "sanitizer_common/sanitizer_errno.h"
17 #include "sanitizer_common/sanitizer_placement_new.h"
18 #include "tsan_interface.h"
19 #include "tsan_mman.h"
20 #include "tsan_rtl.h"
21 #include "tsan_report.h"
22 #include "tsan_flags.h"
23 
24 namespace __tsan {
25 
26 struct MapUnmapCallback {
27   void OnMap(uptr p, uptr size) const { }
28   void OnMapSecondary(uptr p, uptr size, uptr user_begin,
29                       uptr user_size) const {};
30   void OnUnmap(uptr p, uptr size) const {
31     // We are about to unmap a chunk of user memory.
32     // Mark the corresponding shadow memory as not needed.
33     DontNeedShadowFor(p, size);
34     // Mark the corresponding meta shadow memory as not needed.
35     // Note the block does not contain any meta info at this point
36     // (this happens after free).
37     const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize;
38     const uptr kPageSize = GetPageSizeCached() * kMetaRatio;
39     // Block came from LargeMmapAllocator, so must be large.
40     // We rely on this in the calculations below.
41     CHECK_GE(size, 2 * kPageSize);
42     uptr diff = RoundUp(p, kPageSize) - p;
43     if (diff != 0) {
44       p += diff;
45       size -= diff;
46     }
47     diff = p + size - RoundDown(p + size, kPageSize);
48     if (diff != 0)
49       size -= diff;
50     uptr p_meta = (uptr)MemToMeta(p);
51     ReleaseMemoryPagesToOS(p_meta, p_meta + size / kMetaRatio);
52   }
53 };
54 
55 static char allocator_placeholder[sizeof(Allocator)] ALIGNED(64);
56 Allocator *allocator() {
57   return reinterpret_cast<Allocator*>(&allocator_placeholder);
58 }
59 
60 struct GlobalProc {
61   Mutex mtx;
62   Processor *proc;
63   // This mutex represents the internal allocator combined for
64   // the purposes of deadlock detection. The internal allocator
65   // uses multiple mutexes, moreover they are locked only occasionally
66   // and they are spin mutexes which don't support deadlock detection.
67   // So we use this fake mutex to serve as a substitute for these mutexes.
68   CheckedMutex internal_alloc_mtx;
69 
70   GlobalProc()
71       : mtx(MutexTypeGlobalProc),
72         proc(ProcCreate()),
73         internal_alloc_mtx(MutexTypeInternalAlloc) {}
74 };
75 
76 static char global_proc_placeholder[sizeof(GlobalProc)] ALIGNED(64);
77 GlobalProc *global_proc() {
78   return reinterpret_cast<GlobalProc*>(&global_proc_placeholder);
79 }
80 
81 static void InternalAllocAccess() {
82   global_proc()->internal_alloc_mtx.Lock();
83   global_proc()->internal_alloc_mtx.Unlock();
84 }
85 
86 ScopedGlobalProcessor::ScopedGlobalProcessor() {
87   GlobalProc *gp = global_proc();
88   ThreadState *thr = cur_thread();
89   if (thr->proc())
90     return;
91   // If we don't have a proc, use the global one.
92   // There are currently only two known case where this path is triggered:
93   //   __interceptor_free
94   //   __nptl_deallocate_tsd
95   //   start_thread
96   //   clone
97   // and:
98   //   ResetRange
99   //   __interceptor_munmap
100   //   __deallocate_stack
101   //   start_thread
102   //   clone
103   // Ideally, we destroy thread state (and unwire proc) when a thread actually
104   // exits (i.e. when we join/wait it). Then we would not need the global proc
105   gp->mtx.Lock();
106   ProcWire(gp->proc, thr);
107 }
108 
109 ScopedGlobalProcessor::~ScopedGlobalProcessor() {
110   GlobalProc *gp = global_proc();
111   ThreadState *thr = cur_thread();
112   if (thr->proc() != gp->proc)
113     return;
114   ProcUnwire(gp->proc, thr);
115   gp->mtx.Unlock();
116 }
117 
118 void AllocatorLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
119   global_proc()->internal_alloc_mtx.Lock();
120   InternalAllocatorLock();
121 }
122 
123 void AllocatorUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
124   InternalAllocatorUnlock();
125   global_proc()->internal_alloc_mtx.Unlock();
126 }
127 
128 void GlobalProcessorLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
129   global_proc()->mtx.Lock();
130 }
131 
132 void GlobalProcessorUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
133   global_proc()->mtx.Unlock();
134 }
135 
136 static constexpr uptr kMaxAllowedMallocSize = 1ull << 40;
137 static uptr max_user_defined_malloc_size;
138 
139 void InitializeAllocator() {
140   SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
141   allocator()->Init(common_flags()->allocator_release_to_os_interval_ms);
142   max_user_defined_malloc_size = common_flags()->max_allocation_size_mb
143                                      ? common_flags()->max_allocation_size_mb
144                                            << 20
145                                      : kMaxAllowedMallocSize;
146 }
147 
148 void InitializeAllocatorLate() {
149   new(global_proc()) GlobalProc();
150 }
151 
152 void AllocatorProcStart(Processor *proc) {
153   allocator()->InitCache(&proc->alloc_cache);
154   internal_allocator()->InitCache(&proc->internal_alloc_cache);
155 }
156 
157 void AllocatorProcFinish(Processor *proc) {
158   allocator()->DestroyCache(&proc->alloc_cache);
159   internal_allocator()->DestroyCache(&proc->internal_alloc_cache);
160 }
161 
162 void AllocatorPrintStats() {
163   allocator()->PrintStats();
164 }
165 
166 static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
167   if (atomic_load_relaxed(&thr->in_signal_handler) == 0 ||
168       !ShouldReport(thr, ReportTypeSignalUnsafe))
169     return;
170   VarSizeStackTrace stack;
171   ObtainCurrentStack(thr, pc, &stack);
172   if (IsFiredSuppression(ctx, ReportTypeSignalUnsafe, stack))
173     return;
174   ThreadRegistryLock l(&ctx->thread_registry);
175   ScopedReport rep(ReportTypeSignalUnsafe);
176   rep.AddStack(stack, true);
177   OutputReport(thr, rep);
178 }
179 
180 
181 void *user_alloc_internal(ThreadState *thr, uptr pc, uptr sz, uptr align,
182                           bool signal) {
183   if (sz >= kMaxAllowedMallocSize || align >= kMaxAllowedMallocSize ||
184       sz > max_user_defined_malloc_size) {
185     if (AllocatorMayReturnNull())
186       return nullptr;
187     uptr malloc_limit =
188         Min(kMaxAllowedMallocSize, max_user_defined_malloc_size);
189     GET_STACK_TRACE_FATAL(thr, pc);
190     ReportAllocationSizeTooBig(sz, malloc_limit, &stack);
191   }
192   if (UNLIKELY(IsRssLimitExceeded())) {
193     if (AllocatorMayReturnNull())
194       return nullptr;
195     GET_STACK_TRACE_FATAL(thr, pc);
196     ReportRssLimitExceeded(&stack);
197   }
198   void *p = allocator()->Allocate(&thr->proc()->alloc_cache, sz, align);
199   if (UNLIKELY(!p)) {
200     SetAllocatorOutOfMemory();
201     if (AllocatorMayReturnNull())
202       return nullptr;
203     GET_STACK_TRACE_FATAL(thr, pc);
204     ReportOutOfMemory(sz, &stack);
205   }
206   if (ctx && ctx->initialized)
207     OnUserAlloc(thr, pc, (uptr)p, sz, true);
208   if (signal)
209     SignalUnsafeCall(thr, pc);
210   return p;
211 }
212 
213 void user_free(ThreadState *thr, uptr pc, void *p, bool signal) {
214   ScopedGlobalProcessor sgp;
215   if (ctx && ctx->initialized)
216     OnUserFree(thr, pc, (uptr)p, true);
217   allocator()->Deallocate(&thr->proc()->alloc_cache, p);
218   if (signal)
219     SignalUnsafeCall(thr, pc);
220 }
221 
222 void *user_alloc(ThreadState *thr, uptr pc, uptr sz) {
223   return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, kDefaultAlignment));
224 }
225 
226 void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
227   if (UNLIKELY(CheckForCallocOverflow(size, n))) {
228     if (AllocatorMayReturnNull())
229       return SetErrnoOnNull(nullptr);
230     GET_STACK_TRACE_FATAL(thr, pc);
231     ReportCallocOverflow(n, size, &stack);
232   }
233   void *p = user_alloc_internal(thr, pc, n * size);
234   if (p)
235     internal_memset(p, 0, n * size);
236   return SetErrnoOnNull(p);
237 }
238 
239 void *user_reallocarray(ThreadState *thr, uptr pc, void *p, uptr size, uptr n) {
240   if (UNLIKELY(CheckForCallocOverflow(size, n))) {
241     if (AllocatorMayReturnNull())
242       return SetErrnoOnNull(nullptr);
243     GET_STACK_TRACE_FATAL(thr, pc);
244     ReportReallocArrayOverflow(size, n, &stack);
245   }
246   return user_realloc(thr, pc, p, size * n);
247 }
248 
249 void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) {
250   DPrintf("#%d: alloc(%zu) = 0x%zx\n", thr->tid, sz, p);
251   // Note: this can run before thread initialization/after finalization.
252   // As a result this is not necessarily synchronized with DoReset,
253   // which iterates over and resets all sync objects,
254   // but it is fine to create new MBlocks in this context.
255   ctx->metamap.AllocBlock(thr, pc, p, sz);
256   // If this runs before thread initialization/after finalization
257   // and we don't have trace initialized, we can't imitate writes.
258   // In such case just reset the shadow range, it is fine since
259   // it affects only a small fraction of special objects.
260   if (write && thr->ignore_reads_and_writes == 0 &&
261       atomic_load_relaxed(&thr->trace_pos))
262     MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
263   else
264     MemoryResetRange(thr, pc, (uptr)p, sz);
265 }
266 
267 void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write) {
268   CHECK_NE(p, (void*)0);
269   if (!thr->slot) {
270     // Very early/late in thread lifetime, or during fork.
271     UNUSED uptr sz = ctx->metamap.FreeBlock(thr->proc(), p, false);
272     DPrintf("#%d: free(0x%zx, %zu) (no slot)\n", thr->tid, p, sz);
273     return;
274   }
275   SlotLocker locker(thr);
276   uptr sz = ctx->metamap.FreeBlock(thr->proc(), p, true);
277   DPrintf("#%d: free(0x%zx, %zu)\n", thr->tid, p, sz);
278   if (write && thr->ignore_reads_and_writes == 0)
279     MemoryRangeFreed(thr, pc, (uptr)p, sz);
280 }
281 
282 void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
283   // FIXME: Handle "shrinking" more efficiently,
284   // it seems that some software actually does this.
285   if (!p)
286     return SetErrnoOnNull(user_alloc_internal(thr, pc, sz));
287   if (!sz) {
288     user_free(thr, pc, p);
289     return nullptr;
290   }
291   void *new_p = user_alloc_internal(thr, pc, sz);
292   if (new_p) {
293     uptr old_sz = user_alloc_usable_size(p);
294     internal_memcpy(new_p, p, min(old_sz, sz));
295     user_free(thr, pc, p);
296   }
297   return SetErrnoOnNull(new_p);
298 }
299 
300 void *user_memalign(ThreadState *thr, uptr pc, uptr align, uptr sz) {
301   if (UNLIKELY(!IsPowerOfTwo(align))) {
302     errno = errno_EINVAL;
303     if (AllocatorMayReturnNull())
304       return nullptr;
305     GET_STACK_TRACE_FATAL(thr, pc);
306     ReportInvalidAllocationAlignment(align, &stack);
307   }
308   return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));
309 }
310 
311 int user_posix_memalign(ThreadState *thr, uptr pc, void **memptr, uptr align,
312                         uptr sz) {
313   if (UNLIKELY(!CheckPosixMemalignAlignment(align))) {
314     if (AllocatorMayReturnNull())
315       return errno_EINVAL;
316     GET_STACK_TRACE_FATAL(thr, pc);
317     ReportInvalidPosixMemalignAlignment(align, &stack);
318   }
319   void *ptr = user_alloc_internal(thr, pc, sz, align);
320   if (UNLIKELY(!ptr))
321     // OOM error is already taken care of by user_alloc_internal.
322     return errno_ENOMEM;
323   CHECK(IsAligned((uptr)ptr, align));
324   *memptr = ptr;
325   return 0;
326 }
327 
328 void *user_aligned_alloc(ThreadState *thr, uptr pc, uptr align, uptr sz) {
329   if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(align, sz))) {
330     errno = errno_EINVAL;
331     if (AllocatorMayReturnNull())
332       return nullptr;
333     GET_STACK_TRACE_FATAL(thr, pc);
334     ReportInvalidAlignedAllocAlignment(sz, align, &stack);
335   }
336   return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));
337 }
338 
339 void *user_valloc(ThreadState *thr, uptr pc, uptr sz) {
340   return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, GetPageSizeCached()));
341 }
342 
343 void *user_pvalloc(ThreadState *thr, uptr pc, uptr sz) {
344   uptr PageSize = GetPageSizeCached();
345   if (UNLIKELY(CheckForPvallocOverflow(sz, PageSize))) {
346     errno = errno_ENOMEM;
347     if (AllocatorMayReturnNull())
348       return nullptr;
349     GET_STACK_TRACE_FATAL(thr, pc);
350     ReportPvallocOverflow(sz, &stack);
351   }
352   // pvalloc(0) should allocate one page.
353   sz = sz ? RoundUpTo(sz, PageSize) : PageSize;
354   return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, PageSize));
355 }
356 
357 static const void *user_alloc_begin(const void *p) {
358   if (p == nullptr || !IsAppMem((uptr)p))
359     return nullptr;
360   void *beg = allocator()->GetBlockBegin(p);
361   if (!beg)
362     return nullptr;
363 
364   MBlock *b = ctx->metamap.GetBlock((uptr)beg);
365   if (!b)
366     return nullptr;  // Not a valid pointer.
367 
368   return (const void *)beg;
369 }
370 
371 uptr user_alloc_usable_size(const void *p) {
372   if (p == 0 || !IsAppMem((uptr)p))
373     return 0;
374   MBlock *b = ctx->metamap.GetBlock((uptr)p);
375   if (!b)
376     return 0;  // Not a valid pointer.
377   if (b->siz == 0)
378     return 1;  // Zero-sized allocations are actually 1 byte.
379   return b->siz;
380 }
381 
382 uptr user_alloc_usable_size_fast(const void *p) {
383   MBlock *b = ctx->metamap.GetBlock((uptr)p);
384   // Static objects may have malloc'd before tsan completes
385   // initialization, and may believe returned ptrs to be valid.
386   if (!b)
387     return 0;  // Not a valid pointer.
388   if (b->siz == 0)
389     return 1;  // Zero-sized allocations are actually 1 byte.
390   return b->siz;
391 }
392 
393 void invoke_malloc_hook(void *ptr, uptr size) {
394   ThreadState *thr = cur_thread();
395   if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
396     return;
397   RunMallocHooks(ptr, size);
398 }
399 
400 void invoke_free_hook(void *ptr) {
401   ThreadState *thr = cur_thread();
402   if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
403     return;
404   RunFreeHooks(ptr);
405 }
406 
407 void *Alloc(uptr sz) {
408   ThreadState *thr = cur_thread();
409   if (thr->nomalloc) {
410     thr->nomalloc = 0;  // CHECK calls internal_malloc().
411     CHECK(0);
412   }
413   InternalAllocAccess();
414   return InternalAlloc(sz, &thr->proc()->internal_alloc_cache);
415 }
416 
417 void FreeImpl(void *p) {
418   ThreadState *thr = cur_thread();
419   if (thr->nomalloc) {
420     thr->nomalloc = 0;  // CHECK calls internal_malloc().
421     CHECK(0);
422   }
423   InternalAllocAccess();
424   InternalFree(p, &thr->proc()->internal_alloc_cache);
425 }
426 
427 }  // namespace __tsan
428 
429 using namespace __tsan;
430 
431 extern "C" {
432 uptr __sanitizer_get_current_allocated_bytes() {
433   uptr stats[AllocatorStatCount];
434   allocator()->GetStats(stats);
435   return stats[AllocatorStatAllocated];
436 }
437 
438 uptr __sanitizer_get_heap_size() {
439   uptr stats[AllocatorStatCount];
440   allocator()->GetStats(stats);
441   return stats[AllocatorStatMapped];
442 }
443 
444 uptr __sanitizer_get_free_bytes() {
445   return 1;
446 }
447 
448 uptr __sanitizer_get_unmapped_bytes() {
449   return 1;
450 }
451 
452 uptr __sanitizer_get_estimated_allocated_size(uptr size) {
453   return size;
454 }
455 
456 int __sanitizer_get_ownership(const void *p) {
457   return allocator()->GetBlockBegin(p) != 0;
458 }
459 
460 const void *__sanitizer_get_allocated_begin(const void *p) {
461   return user_alloc_begin(p);
462 }
463 
464 uptr __sanitizer_get_allocated_size(const void *p) {
465   return user_alloc_usable_size(p);
466 }
467 
468 uptr __sanitizer_get_allocated_size_fast(const void *p) {
469   DCHECK_EQ(p, __sanitizer_get_allocated_begin(p));
470   uptr ret = user_alloc_usable_size_fast(p);
471   DCHECK_EQ(ret, __sanitizer_get_allocated_size(p));
472   return ret;
473 }
474 
475 void __sanitizer_purge_allocator() {
476   allocator()->ForceReleaseToOS();
477 }
478 
479 void __tsan_on_thread_idle() {
480   ThreadState *thr = cur_thread();
481   allocator()->SwallowCache(&thr->proc()->alloc_cache);
482   internal_allocator()->SwallowCache(&thr->proc()->internal_alloc_cache);
483   ctx->metamap.OnProcIdle(thr->proc());
484 }
485 }  // extern "C"
486