xref: /freebsd/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_mman.cpp (revision 1165fc9a526630487a1feb63daef65c5aee1a583)
1 //===-- tsan_mman.cpp -----------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //
11 //===----------------------------------------------------------------------===//
12 #include "sanitizer_common/sanitizer_allocator_checks.h"
13 #include "sanitizer_common/sanitizer_allocator_interface.h"
14 #include "sanitizer_common/sanitizer_allocator_report.h"
15 #include "sanitizer_common/sanitizer_common.h"
16 #include "sanitizer_common/sanitizer_errno.h"
17 #include "sanitizer_common/sanitizer_placement_new.h"
18 #include "tsan_mman.h"
19 #include "tsan_rtl.h"
20 #include "tsan_report.h"
21 #include "tsan_flags.h"
22 
23 // May be overriden by front-end.
24 SANITIZER_WEAK_DEFAULT_IMPL
25 void __sanitizer_malloc_hook(void *ptr, uptr size) {
26   (void)ptr;
27   (void)size;
28 }
29 
30 SANITIZER_WEAK_DEFAULT_IMPL
31 void __sanitizer_free_hook(void *ptr) {
32   (void)ptr;
33 }
34 
35 namespace __tsan {
36 
37 struct MapUnmapCallback {
38   void OnMap(uptr p, uptr size) const { }
39   void OnUnmap(uptr p, uptr size) const {
40     // We are about to unmap a chunk of user memory.
41     // Mark the corresponding shadow memory as not needed.
42     DontNeedShadowFor(p, size);
43     // Mark the corresponding meta shadow memory as not needed.
44     // Note the block does not contain any meta info at this point
45     // (this happens after free).
46     const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize;
47     const uptr kPageSize = GetPageSizeCached() * kMetaRatio;
48     // Block came from LargeMmapAllocator, so must be large.
49     // We rely on this in the calculations below.
50     CHECK_GE(size, 2 * kPageSize);
51     uptr diff = RoundUp(p, kPageSize) - p;
52     if (diff != 0) {
53       p += diff;
54       size -= diff;
55     }
56     diff = p + size - RoundDown(p + size, kPageSize);
57     if (diff != 0)
58       size -= diff;
59     uptr p_meta = (uptr)MemToMeta(p);
60     ReleaseMemoryPagesToOS(p_meta, p_meta + size / kMetaRatio);
61   }
62 };
63 
64 static char allocator_placeholder[sizeof(Allocator)] ALIGNED(64);
65 Allocator *allocator() {
66   return reinterpret_cast<Allocator*>(&allocator_placeholder);
67 }
68 
69 struct GlobalProc {
70   Mutex mtx;
71   Processor *proc;
72   // This mutex represents the internal allocator combined for
73   // the purposes of deadlock detection. The internal allocator
74   // uses multiple mutexes, moreover they are locked only occasionally
75   // and they are spin mutexes which don't support deadlock detection.
76   // So we use this fake mutex to serve as a substitute for these mutexes.
77   CheckedMutex internal_alloc_mtx;
78 
79   GlobalProc()
80       : mtx(MutexTypeGlobalProc),
81         proc(ProcCreate()),
82         internal_alloc_mtx(MutexTypeInternalAlloc) {}
83 };
84 
85 static char global_proc_placeholder[sizeof(GlobalProc)] ALIGNED(64);
86 GlobalProc *global_proc() {
87   return reinterpret_cast<GlobalProc*>(&global_proc_placeholder);
88 }
89 
90 static void InternalAllocAccess() {
91   global_proc()->internal_alloc_mtx.Lock();
92   global_proc()->internal_alloc_mtx.Unlock();
93 }
94 
95 ScopedGlobalProcessor::ScopedGlobalProcessor() {
96   GlobalProc *gp = global_proc();
97   ThreadState *thr = cur_thread();
98   if (thr->proc())
99     return;
100   // If we don't have a proc, use the global one.
101   // There are currently only two known case where this path is triggered:
102   //   __interceptor_free
103   //   __nptl_deallocate_tsd
104   //   start_thread
105   //   clone
106   // and:
107   //   ResetRange
108   //   __interceptor_munmap
109   //   __deallocate_stack
110   //   start_thread
111   //   clone
112   // Ideally, we destroy thread state (and unwire proc) when a thread actually
113   // exits (i.e. when we join/wait it). Then we would not need the global proc
114   gp->mtx.Lock();
115   ProcWire(gp->proc, thr);
116 }
117 
118 ScopedGlobalProcessor::~ScopedGlobalProcessor() {
119   GlobalProc *gp = global_proc();
120   ThreadState *thr = cur_thread();
121   if (thr->proc() != gp->proc)
122     return;
123   ProcUnwire(gp->proc, thr);
124   gp->mtx.Unlock();
125 }
126 
127 void AllocatorLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
128   global_proc()->internal_alloc_mtx.Lock();
129   InternalAllocatorLock();
130 }
131 
132 void AllocatorUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
133   InternalAllocatorUnlock();
134   global_proc()->internal_alloc_mtx.Unlock();
135 }
136 
137 void GlobalProcessorLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
138   global_proc()->mtx.Lock();
139 }
140 
141 void GlobalProcessorUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
142   global_proc()->mtx.Unlock();
143 }
144 
145 static constexpr uptr kMaxAllowedMallocSize = 1ull << 40;
146 static uptr max_user_defined_malloc_size;
147 
148 void InitializeAllocator() {
149   SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
150   allocator()->Init(common_flags()->allocator_release_to_os_interval_ms);
151   max_user_defined_malloc_size = common_flags()->max_allocation_size_mb
152                                      ? common_flags()->max_allocation_size_mb
153                                            << 20
154                                      : kMaxAllowedMallocSize;
155 }
156 
157 void InitializeAllocatorLate() {
158   new(global_proc()) GlobalProc();
159 }
160 
161 void AllocatorProcStart(Processor *proc) {
162   allocator()->InitCache(&proc->alloc_cache);
163   internal_allocator()->InitCache(&proc->internal_alloc_cache);
164 }
165 
166 void AllocatorProcFinish(Processor *proc) {
167   allocator()->DestroyCache(&proc->alloc_cache);
168   internal_allocator()->DestroyCache(&proc->internal_alloc_cache);
169 }
170 
171 void AllocatorPrintStats() {
172   allocator()->PrintStats();
173 }
174 
175 static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
176   if (atomic_load_relaxed(&thr->in_signal_handler) == 0 ||
177       !ShouldReport(thr, ReportTypeSignalUnsafe))
178     return;
179   VarSizeStackTrace stack;
180   ObtainCurrentStack(thr, pc, &stack);
181   if (IsFiredSuppression(ctx, ReportTypeSignalUnsafe, stack))
182     return;
183   ThreadRegistryLock l(&ctx->thread_registry);
184   ScopedReport rep(ReportTypeSignalUnsafe);
185   rep.AddStack(stack, true);
186   OutputReport(thr, rep);
187 }
188 
189 
190 void *user_alloc_internal(ThreadState *thr, uptr pc, uptr sz, uptr align,
191                           bool signal) {
192   if (sz >= kMaxAllowedMallocSize || align >= kMaxAllowedMallocSize ||
193       sz > max_user_defined_malloc_size) {
194     if (AllocatorMayReturnNull())
195       return nullptr;
196     uptr malloc_limit =
197         Min(kMaxAllowedMallocSize, max_user_defined_malloc_size);
198     GET_STACK_TRACE_FATAL(thr, pc);
199     ReportAllocationSizeTooBig(sz, malloc_limit, &stack);
200   }
201   if (UNLIKELY(IsRssLimitExceeded())) {
202     if (AllocatorMayReturnNull())
203       return nullptr;
204     GET_STACK_TRACE_FATAL(thr, pc);
205     ReportRssLimitExceeded(&stack);
206   }
207   void *p = allocator()->Allocate(&thr->proc()->alloc_cache, sz, align);
208   if (UNLIKELY(!p)) {
209     SetAllocatorOutOfMemory();
210     if (AllocatorMayReturnNull())
211       return nullptr;
212     GET_STACK_TRACE_FATAL(thr, pc);
213     ReportOutOfMemory(sz, &stack);
214   }
215   if (ctx && ctx->initialized)
216     OnUserAlloc(thr, pc, (uptr)p, sz, true);
217   if (signal)
218     SignalUnsafeCall(thr, pc);
219   return p;
220 }
221 
222 void user_free(ThreadState *thr, uptr pc, void *p, bool signal) {
223   ScopedGlobalProcessor sgp;
224   if (ctx && ctx->initialized)
225     OnUserFree(thr, pc, (uptr)p, true);
226   allocator()->Deallocate(&thr->proc()->alloc_cache, p);
227   if (signal)
228     SignalUnsafeCall(thr, pc);
229 }
230 
231 void *user_alloc(ThreadState *thr, uptr pc, uptr sz) {
232   return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, kDefaultAlignment));
233 }
234 
235 void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
236   if (UNLIKELY(CheckForCallocOverflow(size, n))) {
237     if (AllocatorMayReturnNull())
238       return SetErrnoOnNull(nullptr);
239     GET_STACK_TRACE_FATAL(thr, pc);
240     ReportCallocOverflow(n, size, &stack);
241   }
242   void *p = user_alloc_internal(thr, pc, n * size);
243   if (p)
244     internal_memset(p, 0, n * size);
245   return SetErrnoOnNull(p);
246 }
247 
248 void *user_reallocarray(ThreadState *thr, uptr pc, void *p, uptr size, uptr n) {
249   if (UNLIKELY(CheckForCallocOverflow(size, n))) {
250     if (AllocatorMayReturnNull())
251       return SetErrnoOnNull(nullptr);
252     GET_STACK_TRACE_FATAL(thr, pc);
253     ReportReallocArrayOverflow(size, n, &stack);
254   }
255   return user_realloc(thr, pc, p, size * n);
256 }
257 
258 void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) {
259   DPrintf("#%d: alloc(%zu) = 0x%zx\n", thr->tid, sz, p);
260   // Note: this can run before thread initialization/after finalization.
261   // As a result this is not necessarily synchronized with DoReset,
262   // which iterates over and resets all sync objects,
263   // but it is fine to create new MBlocks in this context.
264   ctx->metamap.AllocBlock(thr, pc, p, sz);
265   // If this runs before thread initialization/after finalization
266   // and we don't have trace initialized, we can't imitate writes.
267   // In such case just reset the shadow range, it is fine since
268   // it affects only a small fraction of special objects.
269   if (write && thr->ignore_reads_and_writes == 0 &&
270       atomic_load_relaxed(&thr->trace_pos))
271     MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
272   else
273     MemoryResetRange(thr, pc, (uptr)p, sz);
274 }
275 
276 void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write) {
277   CHECK_NE(p, (void*)0);
278   if (!thr->slot) {
279     // Very early/late in thread lifetime, or during fork.
280     UNUSED uptr sz = ctx->metamap.FreeBlock(thr->proc(), p, false);
281     DPrintf("#%d: free(0x%zx, %zu) (no slot)\n", thr->tid, p, sz);
282     return;
283   }
284   SlotLocker locker(thr);
285   uptr sz = ctx->metamap.FreeBlock(thr->proc(), p, true);
286   DPrintf("#%d: free(0x%zx, %zu)\n", thr->tid, p, sz);
287   if (write && thr->ignore_reads_and_writes == 0)
288     MemoryRangeFreed(thr, pc, (uptr)p, sz);
289 }
290 
291 void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
292   // FIXME: Handle "shrinking" more efficiently,
293   // it seems that some software actually does this.
294   if (!p)
295     return SetErrnoOnNull(user_alloc_internal(thr, pc, sz));
296   if (!sz) {
297     user_free(thr, pc, p);
298     return nullptr;
299   }
300   void *new_p = user_alloc_internal(thr, pc, sz);
301   if (new_p) {
302     uptr old_sz = user_alloc_usable_size(p);
303     internal_memcpy(new_p, p, min(old_sz, sz));
304     user_free(thr, pc, p);
305   }
306   return SetErrnoOnNull(new_p);
307 }
308 
309 void *user_memalign(ThreadState *thr, uptr pc, uptr align, uptr sz) {
310   if (UNLIKELY(!IsPowerOfTwo(align))) {
311     errno = errno_EINVAL;
312     if (AllocatorMayReturnNull())
313       return nullptr;
314     GET_STACK_TRACE_FATAL(thr, pc);
315     ReportInvalidAllocationAlignment(align, &stack);
316   }
317   return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));
318 }
319 
320 int user_posix_memalign(ThreadState *thr, uptr pc, void **memptr, uptr align,
321                         uptr sz) {
322   if (UNLIKELY(!CheckPosixMemalignAlignment(align))) {
323     if (AllocatorMayReturnNull())
324       return errno_EINVAL;
325     GET_STACK_TRACE_FATAL(thr, pc);
326     ReportInvalidPosixMemalignAlignment(align, &stack);
327   }
328   void *ptr = user_alloc_internal(thr, pc, sz, align);
329   if (UNLIKELY(!ptr))
330     // OOM error is already taken care of by user_alloc_internal.
331     return errno_ENOMEM;
332   CHECK(IsAligned((uptr)ptr, align));
333   *memptr = ptr;
334   return 0;
335 }
336 
337 void *user_aligned_alloc(ThreadState *thr, uptr pc, uptr align, uptr sz) {
338   if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(align, sz))) {
339     errno = errno_EINVAL;
340     if (AllocatorMayReturnNull())
341       return nullptr;
342     GET_STACK_TRACE_FATAL(thr, pc);
343     ReportInvalidAlignedAllocAlignment(sz, align, &stack);
344   }
345   return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));
346 }
347 
348 void *user_valloc(ThreadState *thr, uptr pc, uptr sz) {
349   return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, GetPageSizeCached()));
350 }
351 
352 void *user_pvalloc(ThreadState *thr, uptr pc, uptr sz) {
353   uptr PageSize = GetPageSizeCached();
354   if (UNLIKELY(CheckForPvallocOverflow(sz, PageSize))) {
355     errno = errno_ENOMEM;
356     if (AllocatorMayReturnNull())
357       return nullptr;
358     GET_STACK_TRACE_FATAL(thr, pc);
359     ReportPvallocOverflow(sz, &stack);
360   }
361   // pvalloc(0) should allocate one page.
362   sz = sz ? RoundUpTo(sz, PageSize) : PageSize;
363   return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, PageSize));
364 }
365 
366 uptr user_alloc_usable_size(const void *p) {
367   if (p == 0 || !IsAppMem((uptr)p))
368     return 0;
369   MBlock *b = ctx->metamap.GetBlock((uptr)p);
370   if (!b)
371     return 0;  // Not a valid pointer.
372   if (b->siz == 0)
373     return 1;  // Zero-sized allocations are actually 1 byte.
374   return b->siz;
375 }
376 
377 void invoke_malloc_hook(void *ptr, uptr size) {
378   ThreadState *thr = cur_thread();
379   if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
380     return;
381   __sanitizer_malloc_hook(ptr, size);
382   RunMallocHooks(ptr, size);
383 }
384 
385 void invoke_free_hook(void *ptr) {
386   ThreadState *thr = cur_thread();
387   if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
388     return;
389   __sanitizer_free_hook(ptr);
390   RunFreeHooks(ptr);
391 }
392 
393 void *Alloc(uptr sz) {
394   ThreadState *thr = cur_thread();
395   if (thr->nomalloc) {
396     thr->nomalloc = 0;  // CHECK calls internal_malloc().
397     CHECK(0);
398   }
399   InternalAllocAccess();
400   return InternalAlloc(sz, &thr->proc()->internal_alloc_cache);
401 }
402 
403 void FreeImpl(void *p) {
404   ThreadState *thr = cur_thread();
405   if (thr->nomalloc) {
406     thr->nomalloc = 0;  // CHECK calls internal_malloc().
407     CHECK(0);
408   }
409   InternalAllocAccess();
410   InternalFree(p, &thr->proc()->internal_alloc_cache);
411 }
412 
413 }  // namespace __tsan
414 
415 using namespace __tsan;
416 
417 extern "C" {
418 uptr __sanitizer_get_current_allocated_bytes() {
419   uptr stats[AllocatorStatCount];
420   allocator()->GetStats(stats);
421   return stats[AllocatorStatAllocated];
422 }
423 
424 uptr __sanitizer_get_heap_size() {
425   uptr stats[AllocatorStatCount];
426   allocator()->GetStats(stats);
427   return stats[AllocatorStatMapped];
428 }
429 
430 uptr __sanitizer_get_free_bytes() {
431   return 1;
432 }
433 
434 uptr __sanitizer_get_unmapped_bytes() {
435   return 1;
436 }
437 
438 uptr __sanitizer_get_estimated_allocated_size(uptr size) {
439   return size;
440 }
441 
442 int __sanitizer_get_ownership(const void *p) {
443   return allocator()->GetBlockBegin(p) != 0;
444 }
445 
446 uptr __sanitizer_get_allocated_size(const void *p) {
447   return user_alloc_usable_size(p);
448 }
449 
450 void __tsan_on_thread_idle() {
451   ThreadState *thr = cur_thread();
452   allocator()->SwallowCache(&thr->proc()->alloc_cache);
453   internal_allocator()->SwallowCache(&thr->proc()->internal_alloc_cache);
454   ctx->metamap.OnProcIdle(thr->proc());
455 }
456 }  // extern "C"
457