xref: /freebsd/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_mman.cpp (revision 85868e8a1daeaae7a0e48effb2ea2310ae3b02c6)
1 //===-- tsan_mman.cpp -----------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //
11 //===----------------------------------------------------------------------===//
12 #include "sanitizer_common/sanitizer_allocator_checks.h"
13 #include "sanitizer_common/sanitizer_allocator_interface.h"
14 #include "sanitizer_common/sanitizer_allocator_report.h"
15 #include "sanitizer_common/sanitizer_common.h"
16 #include "sanitizer_common/sanitizer_errno.h"
17 #include "sanitizer_common/sanitizer_placement_new.h"
18 #include "tsan_mman.h"
19 #include "tsan_rtl.h"
20 #include "tsan_report.h"
21 #include "tsan_flags.h"
22 
23 // May be overriden by front-end.
24 SANITIZER_WEAK_DEFAULT_IMPL
25 void __sanitizer_malloc_hook(void *ptr, uptr size) {
26   (void)ptr;
27   (void)size;
28 }
29 
30 SANITIZER_WEAK_DEFAULT_IMPL
31 void __sanitizer_free_hook(void *ptr) {
32   (void)ptr;
33 }
34 
35 namespace __tsan {
36 
37 struct MapUnmapCallback {
38   void OnMap(uptr p, uptr size) const { }
39   void OnUnmap(uptr p, uptr size) const {
40     // We are about to unmap a chunk of user memory.
41     // Mark the corresponding shadow memory as not needed.
42     DontNeedShadowFor(p, size);
43     // Mark the corresponding meta shadow memory as not needed.
44     // Note the block does not contain any meta info at this point
45     // (this happens after free).
46     const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize;
47     const uptr kPageSize = GetPageSizeCached() * kMetaRatio;
48     // Block came from LargeMmapAllocator, so must be large.
49     // We rely on this in the calculations below.
50     CHECK_GE(size, 2 * kPageSize);
51     uptr diff = RoundUp(p, kPageSize) - p;
52     if (diff != 0) {
53       p += diff;
54       size -= diff;
55     }
56     diff = p + size - RoundDown(p + size, kPageSize);
57     if (diff != 0)
58       size -= diff;
59     uptr p_meta = (uptr)MemToMeta(p);
60     ReleaseMemoryPagesToOS(p_meta, p_meta + size / kMetaRatio);
61   }
62 };
63 
64 static char allocator_placeholder[sizeof(Allocator)] ALIGNED(64);
65 Allocator *allocator() {
66   return reinterpret_cast<Allocator*>(&allocator_placeholder);
67 }
68 
69 struct GlobalProc {
70   Mutex mtx;
71   Processor *proc;
72 
73   GlobalProc()
74       : mtx(MutexTypeGlobalProc, StatMtxGlobalProc)
75       , proc(ProcCreate()) {
76   }
77 };
78 
79 static char global_proc_placeholder[sizeof(GlobalProc)] ALIGNED(64);
80 GlobalProc *global_proc() {
81   return reinterpret_cast<GlobalProc*>(&global_proc_placeholder);
82 }
83 
84 ScopedGlobalProcessor::ScopedGlobalProcessor() {
85   GlobalProc *gp = global_proc();
86   ThreadState *thr = cur_thread();
87   if (thr->proc())
88     return;
89   // If we don't have a proc, use the global one.
90   // There are currently only two known case where this path is triggered:
91   //   __interceptor_free
92   //   __nptl_deallocate_tsd
93   //   start_thread
94   //   clone
95   // and:
96   //   ResetRange
97   //   __interceptor_munmap
98   //   __deallocate_stack
99   //   start_thread
100   //   clone
101   // Ideally, we destroy thread state (and unwire proc) when a thread actually
102   // exits (i.e. when we join/wait it). Then we would not need the global proc
103   gp->mtx.Lock();
104   ProcWire(gp->proc, thr);
105 }
106 
107 ScopedGlobalProcessor::~ScopedGlobalProcessor() {
108   GlobalProc *gp = global_proc();
109   ThreadState *thr = cur_thread();
110   if (thr->proc() != gp->proc)
111     return;
112   ProcUnwire(gp->proc, thr);
113   gp->mtx.Unlock();
114 }
115 
116 void InitializeAllocator() {
117   SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
118   allocator()->Init(common_flags()->allocator_release_to_os_interval_ms);
119 }
120 
121 void InitializeAllocatorLate() {
122   new(global_proc()) GlobalProc();
123 }
124 
125 void AllocatorProcStart(Processor *proc) {
126   allocator()->InitCache(&proc->alloc_cache);
127   internal_allocator()->InitCache(&proc->internal_alloc_cache);
128 }
129 
130 void AllocatorProcFinish(Processor *proc) {
131   allocator()->DestroyCache(&proc->alloc_cache);
132   internal_allocator()->DestroyCache(&proc->internal_alloc_cache);
133 }
134 
135 void AllocatorPrintStats() {
136   allocator()->PrintStats();
137 }
138 
139 static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
140   if (atomic_load_relaxed(&thr->in_signal_handler) == 0 ||
141       !flags()->report_signal_unsafe)
142     return;
143   VarSizeStackTrace stack;
144   ObtainCurrentStack(thr, pc, &stack);
145   if (IsFiredSuppression(ctx, ReportTypeSignalUnsafe, stack))
146     return;
147   ThreadRegistryLock l(ctx->thread_registry);
148   ScopedReport rep(ReportTypeSignalUnsafe);
149   rep.AddStack(stack, true);
150   OutputReport(thr, rep);
151 }
152 
153 static constexpr uptr kMaxAllowedMallocSize = 1ull << 40;
154 
155 void *user_alloc_internal(ThreadState *thr, uptr pc, uptr sz, uptr align,
156                           bool signal) {
157   if (sz >= kMaxAllowedMallocSize || align >= kMaxAllowedMallocSize) {
158     if (AllocatorMayReturnNull())
159       return nullptr;
160     GET_STACK_TRACE_FATAL(thr, pc);
161     ReportAllocationSizeTooBig(sz, kMaxAllowedMallocSize, &stack);
162   }
163   void *p = allocator()->Allocate(&thr->proc()->alloc_cache, sz, align);
164   if (UNLIKELY(!p)) {
165     SetAllocatorOutOfMemory();
166     if (AllocatorMayReturnNull())
167       return nullptr;
168     GET_STACK_TRACE_FATAL(thr, pc);
169     ReportOutOfMemory(sz, &stack);
170   }
171   if (ctx && ctx->initialized)
172     OnUserAlloc(thr, pc, (uptr)p, sz, true);
173   if (signal)
174     SignalUnsafeCall(thr, pc);
175   return p;
176 }
177 
178 void user_free(ThreadState *thr, uptr pc, void *p, bool signal) {
179   ScopedGlobalProcessor sgp;
180   if (ctx && ctx->initialized)
181     OnUserFree(thr, pc, (uptr)p, true);
182   allocator()->Deallocate(&thr->proc()->alloc_cache, p);
183   if (signal)
184     SignalUnsafeCall(thr, pc);
185 }
186 
187 void *user_alloc(ThreadState *thr, uptr pc, uptr sz) {
188   return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, kDefaultAlignment));
189 }
190 
191 void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
192   if (UNLIKELY(CheckForCallocOverflow(size, n))) {
193     if (AllocatorMayReturnNull())
194       return SetErrnoOnNull(nullptr);
195     GET_STACK_TRACE_FATAL(thr, pc);
196     ReportCallocOverflow(n, size, &stack);
197   }
198   void *p = user_alloc_internal(thr, pc, n * size);
199   if (p)
200     internal_memset(p, 0, n * size);
201   return SetErrnoOnNull(p);
202 }
203 
204 void *user_reallocarray(ThreadState *thr, uptr pc, void *p, uptr size, uptr n) {
205   if (UNLIKELY(CheckForCallocOverflow(size, n))) {
206     if (AllocatorMayReturnNull())
207       return SetErrnoOnNull(nullptr);
208     GET_STACK_TRACE_FATAL(thr, pc);
209     ReportReallocArrayOverflow(size, n, &stack);
210   }
211   return user_realloc(thr, pc, p, size * n);
212 }
213 
214 void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) {
215   DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p);
216   ctx->metamap.AllocBlock(thr, pc, p, sz);
217   if (write && thr->ignore_reads_and_writes == 0)
218     MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
219   else
220     MemoryResetRange(thr, pc, (uptr)p, sz);
221 }
222 
223 void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write) {
224   CHECK_NE(p, (void*)0);
225   uptr sz = ctx->metamap.FreeBlock(thr->proc(), p);
226   DPrintf("#%d: free(%p, %zu)\n", thr->tid, p, sz);
227   if (write && thr->ignore_reads_and_writes == 0)
228     MemoryRangeFreed(thr, pc, (uptr)p, sz);
229 }
230 
231 void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
232   // FIXME: Handle "shrinking" more efficiently,
233   // it seems that some software actually does this.
234   if (!p)
235     return SetErrnoOnNull(user_alloc_internal(thr, pc, sz));
236   if (!sz) {
237     user_free(thr, pc, p);
238     return nullptr;
239   }
240   void *new_p = user_alloc_internal(thr, pc, sz);
241   if (new_p) {
242     uptr old_sz = user_alloc_usable_size(p);
243     internal_memcpy(new_p, p, min(old_sz, sz));
244     user_free(thr, pc, p);
245   }
246   return SetErrnoOnNull(new_p);
247 }
248 
249 void *user_memalign(ThreadState *thr, uptr pc, uptr align, uptr sz) {
250   if (UNLIKELY(!IsPowerOfTwo(align))) {
251     errno = errno_EINVAL;
252     if (AllocatorMayReturnNull())
253       return nullptr;
254     GET_STACK_TRACE_FATAL(thr, pc);
255     ReportInvalidAllocationAlignment(align, &stack);
256   }
257   return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));
258 }
259 
260 int user_posix_memalign(ThreadState *thr, uptr pc, void **memptr, uptr align,
261                         uptr sz) {
262   if (UNLIKELY(!CheckPosixMemalignAlignment(align))) {
263     if (AllocatorMayReturnNull())
264       return errno_EINVAL;
265     GET_STACK_TRACE_FATAL(thr, pc);
266     ReportInvalidPosixMemalignAlignment(align, &stack);
267   }
268   void *ptr = user_alloc_internal(thr, pc, sz, align);
269   if (UNLIKELY(!ptr))
270     // OOM error is already taken care of by user_alloc_internal.
271     return errno_ENOMEM;
272   CHECK(IsAligned((uptr)ptr, align));
273   *memptr = ptr;
274   return 0;
275 }
276 
277 void *user_aligned_alloc(ThreadState *thr, uptr pc, uptr align, uptr sz) {
278   if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(align, sz))) {
279     errno = errno_EINVAL;
280     if (AllocatorMayReturnNull())
281       return nullptr;
282     GET_STACK_TRACE_FATAL(thr, pc);
283     ReportInvalidAlignedAllocAlignment(sz, align, &stack);
284   }
285   return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));
286 }
287 
288 void *user_valloc(ThreadState *thr, uptr pc, uptr sz) {
289   return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, GetPageSizeCached()));
290 }
291 
292 void *user_pvalloc(ThreadState *thr, uptr pc, uptr sz) {
293   uptr PageSize = GetPageSizeCached();
294   if (UNLIKELY(CheckForPvallocOverflow(sz, PageSize))) {
295     errno = errno_ENOMEM;
296     if (AllocatorMayReturnNull())
297       return nullptr;
298     GET_STACK_TRACE_FATAL(thr, pc);
299     ReportPvallocOverflow(sz, &stack);
300   }
301   // pvalloc(0) should allocate one page.
302   sz = sz ? RoundUpTo(sz, PageSize) : PageSize;
303   return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, PageSize));
304 }
305 
306 uptr user_alloc_usable_size(const void *p) {
307   if (p == 0)
308     return 0;
309   MBlock *b = ctx->metamap.GetBlock((uptr)p);
310   if (!b)
311     return 0;  // Not a valid pointer.
312   if (b->siz == 0)
313     return 1;  // Zero-sized allocations are actually 1 byte.
314   return b->siz;
315 }
316 
317 void invoke_malloc_hook(void *ptr, uptr size) {
318   ThreadState *thr = cur_thread();
319   if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
320     return;
321   __sanitizer_malloc_hook(ptr, size);
322   RunMallocHooks(ptr, size);
323 }
324 
325 void invoke_free_hook(void *ptr) {
326   ThreadState *thr = cur_thread();
327   if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
328     return;
329   __sanitizer_free_hook(ptr);
330   RunFreeHooks(ptr);
331 }
332 
333 void *internal_alloc(MBlockType typ, uptr sz) {
334   ThreadState *thr = cur_thread();
335   if (thr->nomalloc) {
336     thr->nomalloc = 0;  // CHECK calls internal_malloc().
337     CHECK(0);
338   }
339   return InternalAlloc(sz, &thr->proc()->internal_alloc_cache);
340 }
341 
342 void internal_free(void *p) {
343   ThreadState *thr = cur_thread();
344   if (thr->nomalloc) {
345     thr->nomalloc = 0;  // CHECK calls internal_malloc().
346     CHECK(0);
347   }
348   InternalFree(p, &thr->proc()->internal_alloc_cache);
349 }
350 
351 }  // namespace __tsan
352 
353 using namespace __tsan;
354 
355 extern "C" {
356 uptr __sanitizer_get_current_allocated_bytes() {
357   uptr stats[AllocatorStatCount];
358   allocator()->GetStats(stats);
359   return stats[AllocatorStatAllocated];
360 }
361 
362 uptr __sanitizer_get_heap_size() {
363   uptr stats[AllocatorStatCount];
364   allocator()->GetStats(stats);
365   return stats[AllocatorStatMapped];
366 }
367 
368 uptr __sanitizer_get_free_bytes() {
369   return 1;
370 }
371 
372 uptr __sanitizer_get_unmapped_bytes() {
373   return 1;
374 }
375 
376 uptr __sanitizer_get_estimated_allocated_size(uptr size) {
377   return size;
378 }
379 
380 int __sanitizer_get_ownership(const void *p) {
381   return allocator()->GetBlockBegin(p) != 0;
382 }
383 
384 uptr __sanitizer_get_allocated_size(const void *p) {
385   return user_alloc_usable_size(p);
386 }
387 
388 void __tsan_on_thread_idle() {
389   ThreadState *thr = cur_thread();
390   thr->clock.ResetCached(&thr->proc()->clock_cache);
391   thr->last_sleep_clock.ResetCached(&thr->proc()->clock_cache);
392   allocator()->SwallowCache(&thr->proc()->alloc_cache);
393   internal_allocator()->SwallowCache(&thr->proc()->internal_alloc_cache);
394   ctx->metamap.OnProcIdle(thr->proc());
395 }
396 }  // extern "C"
397