1 //===-- tsan_interceptors_posix.cpp ---------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //
11 // FIXME: move as many interceptors as possible into
12 // sanitizer_common/sanitizer_common_interceptors.inc
13 //===----------------------------------------------------------------------===//
14
15 #include "sanitizer_common/sanitizer_atomic.h"
16 #include "sanitizer_common/sanitizer_errno.h"
17 #include "sanitizer_common/sanitizer_glibc_version.h"
18 #include "sanitizer_common/sanitizer_libc.h"
19 #include "sanitizer_common/sanitizer_linux.h"
20 #include "sanitizer_common/sanitizer_platform_limits_netbsd.h"
21 #include "sanitizer_common/sanitizer_platform_limits_posix.h"
22 #include "sanitizer_common/sanitizer_placement_new.h"
23 #include "sanitizer_common/sanitizer_posix.h"
24 #include "sanitizer_common/sanitizer_stacktrace.h"
25 #include "sanitizer_common/sanitizer_tls_get_addr.h"
26 #include "interception/interception.h"
27 #include "tsan_interceptors.h"
28 #include "tsan_interface.h"
29 #include "tsan_platform.h"
30 #include "tsan_suppressions.h"
31 #include "tsan_rtl.h"
32 #include "tsan_mman.h"
33 #include "tsan_fd.h"
34
35 #include <stdarg.h>
36
37 using namespace __tsan;
38
39 DECLARE_REAL(void *, memcpy, void *to, const void *from, SIZE_T size)
40 DECLARE_REAL(void *, memset, void *block, int c, SIZE_T size)
41
42 #if SANITIZER_FREEBSD || SANITIZER_APPLE
43 #define stdout __stdoutp
44 #define stderr __stderrp
45 #endif
46
47 #if SANITIZER_NETBSD
48 #define dirfd(dirp) (*(int *)(dirp))
49 #define fileno_unlocked(fp) \
50 (((__sanitizer_FILE *)fp)->_file == -1 \
51 ? -1 \
52 : (int)(unsigned short)(((__sanitizer_FILE *)fp)->_file))
53
54 #define stdout ((__sanitizer_FILE*)&__sF[1])
55 #define stderr ((__sanitizer_FILE*)&__sF[2])
56
57 #define nanosleep __nanosleep50
58 #define vfork __vfork14
59 #endif
60
61 #ifdef __mips__
62 const int kSigCount = 129;
63 #else
64 const int kSigCount = 65;
65 #endif
66
67 #ifdef __mips__
68 struct ucontext_t {
69 u64 opaque[768 / sizeof(u64) + 1];
70 };
71 #else
72 struct ucontext_t {
73 // The size is determined by looking at sizeof of real ucontext_t on linux.
74 u64 opaque[936 / sizeof(u64) + 1];
75 };
76 #endif
77
78 #if defined(__x86_64__) || defined(__mips__) || SANITIZER_PPC64V1 || \
79 defined(__s390x__)
80 #define PTHREAD_ABI_BASE "GLIBC_2.3.2"
81 #elif defined(__aarch64__) || SANITIZER_PPC64V2
82 #define PTHREAD_ABI_BASE "GLIBC_2.17"
83 #elif SANITIZER_LOONGARCH64
84 #define PTHREAD_ABI_BASE "GLIBC_2.36"
85 #elif SANITIZER_RISCV64
86 # define PTHREAD_ABI_BASE "GLIBC_2.27"
87 #endif
88
89 extern "C" int pthread_attr_init(void *attr);
90 extern "C" int pthread_attr_destroy(void *attr);
91 DECLARE_REAL(int, pthread_attr_getdetachstate, void *, void *)
92 extern "C" int pthread_attr_setstacksize(void *attr, uptr stacksize);
93 extern "C" int pthread_atfork(void (*prepare)(void), void (*parent)(void),
94 void (*child)(void));
95 extern "C" int pthread_key_create(unsigned *key, void (*destructor)(void* v));
96 extern "C" int pthread_setspecific(unsigned key, const void *v);
97 DECLARE_REAL(int, pthread_mutexattr_gettype, void *, void *)
98 DECLARE_REAL(int, fflush, __sanitizer_FILE *fp)
99 DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, uptr size)
100 DECLARE_REAL_AND_INTERCEPTOR(void, free, void *ptr)
101 extern "C" int pthread_equal(void *t1, void *t2);
102 extern "C" void *pthread_self();
103 extern "C" void _exit(int status);
104 #if !SANITIZER_NETBSD
105 extern "C" int fileno_unlocked(void *stream);
106 extern "C" int dirfd(void *dirp);
107 #endif
108 #if SANITIZER_NETBSD
109 extern __sanitizer_FILE __sF[];
110 #else
111 extern __sanitizer_FILE *stdout, *stderr;
112 #endif
113 #if !SANITIZER_FREEBSD && !SANITIZER_APPLE && !SANITIZER_NETBSD
114 const int PTHREAD_MUTEX_RECURSIVE = 1;
115 const int PTHREAD_MUTEX_RECURSIVE_NP = 1;
116 #else
117 const int PTHREAD_MUTEX_RECURSIVE = 2;
118 const int PTHREAD_MUTEX_RECURSIVE_NP = 2;
119 #endif
120 #if !SANITIZER_FREEBSD && !SANITIZER_APPLE && !SANITIZER_NETBSD
121 const int EPOLL_CTL_ADD = 1;
122 #endif
123 const int SIGILL = 4;
124 const int SIGTRAP = 5;
125 const int SIGABRT = 6;
126 const int SIGFPE = 8;
127 const int SIGSEGV = 11;
128 const int SIGPIPE = 13;
129 const int SIGTERM = 15;
130 #if defined(__mips__) || SANITIZER_FREEBSD || SANITIZER_APPLE || SANITIZER_NETBSD
131 const int SIGBUS = 10;
132 const int SIGSYS = 12;
133 #else
134 const int SIGBUS = 7;
135 const int SIGSYS = 31;
136 #endif
137 #if SANITIZER_HAS_SIGINFO
138 const int SI_TIMER = -2;
139 #endif
140 void *const MAP_FAILED = (void*)-1;
141 #if SANITIZER_NETBSD
142 const int PTHREAD_BARRIER_SERIAL_THREAD = 1234567;
143 #elif !SANITIZER_APPLE
144 const int PTHREAD_BARRIER_SERIAL_THREAD = -1;
145 #endif
146 const int MAP_FIXED = 0x10;
147 typedef long long_t;
148 typedef __sanitizer::u16 mode_t;
149
150 // From /usr/include/unistd.h
151 # define F_ULOCK 0 /* Unlock a previously locked region. */
152 # define F_LOCK 1 /* Lock a region for exclusive use. */
153 # define F_TLOCK 2 /* Test and lock a region for exclusive use. */
154 # define F_TEST 3 /* Test a region for other processes locks. */
155
156 #if SANITIZER_FREEBSD || SANITIZER_APPLE || SANITIZER_NETBSD
157 const int SA_SIGINFO = 0x40;
158 const int SIG_SETMASK = 3;
159 #elif defined(__mips__)
160 const int SA_SIGINFO = 8;
161 const int SIG_SETMASK = 3;
162 #else
163 const int SA_SIGINFO = 4;
164 const int SIG_SETMASK = 2;
165 #endif
166
167 namespace __tsan {
168 struct SignalDesc {
169 bool armed;
170 __sanitizer_siginfo siginfo;
171 ucontext_t ctx;
172 };
173
174 struct ThreadSignalContext {
175 int int_signal_send;
176 SignalDesc pending_signals[kSigCount];
177 // emptyset and oldset are too big for stack.
178 __sanitizer_sigset_t emptyset;
179 __sanitizer_sigset_t oldset;
180 };
181
EnterBlockingFunc(ThreadState * thr)182 void EnterBlockingFunc(ThreadState *thr) {
183 for (;;) {
184 // The order is important to not delay a signal infinitely if it's
185 // delivered right before we set in_blocking_func. Note: we can't call
186 // ProcessPendingSignals when in_blocking_func is set, or we can handle
187 // a signal synchronously when we are already handling a signal.
188 atomic_store(&thr->in_blocking_func, 1, memory_order_relaxed);
189 if (atomic_load(&thr->pending_signals, memory_order_relaxed) == 0)
190 break;
191 atomic_store(&thr->in_blocking_func, 0, memory_order_relaxed);
192 ProcessPendingSignals(thr);
193 }
194 }
195
196 // The sole reason tsan wraps atexit callbacks is to establish synchronization
197 // between callback setup and callback execution.
198 struct AtExitCtx {
199 void (*f)();
200 void *arg;
201 uptr pc;
202 };
203
204 // InterceptorContext holds all global data required for interceptors.
205 // It's explicitly constructed in InitializeInterceptors with placement new
206 // and is never destroyed. This allows usage of members with non-trivial
207 // constructors and destructors.
208 struct InterceptorContext {
209 // The object is 64-byte aligned, because we want hot data to be located
210 // in a single cache line if possible (it's accessed in every interceptor).
211 alignas(64) LibIgnore libignore;
212 __sanitizer_sigaction sigactions[kSigCount];
213 #if !SANITIZER_APPLE && !SANITIZER_NETBSD
214 unsigned finalize_key;
215 #endif
216
217 Mutex atexit_mu;
218 Vector<struct AtExitCtx *> AtExitStack;
219
InterceptorContext__tsan::InterceptorContext220 InterceptorContext() : libignore(LINKER_INITIALIZED), atexit_mu(MutexTypeAtExit), AtExitStack() {}
221 };
222
223 alignas(64) static char interceptor_placeholder[sizeof(InterceptorContext)];
interceptor_ctx()224 InterceptorContext *interceptor_ctx() {
225 return reinterpret_cast<InterceptorContext*>(&interceptor_placeholder[0]);
226 }
227
libignore()228 LibIgnore *libignore() {
229 return &interceptor_ctx()->libignore;
230 }
231
InitializeLibIgnore()232 void InitializeLibIgnore() {
233 const SuppressionContext &supp = *Suppressions();
234 const uptr n = supp.SuppressionCount();
235 for (uptr i = 0; i < n; i++) {
236 const Suppression *s = supp.SuppressionAt(i);
237 if (0 == internal_strcmp(s->type, kSuppressionLib))
238 libignore()->AddIgnoredLibrary(s->templ);
239 }
240 if (flags()->ignore_noninstrumented_modules)
241 libignore()->IgnoreNoninstrumentedModules(true);
242 libignore()->OnLibraryLoaded(0);
243 }
244
245 // The following two hooks can be used by for cooperative scheduling when
246 // locking.
247 #ifdef TSAN_EXTERNAL_HOOKS
248 void OnPotentiallyBlockingRegionBegin();
249 void OnPotentiallyBlockingRegionEnd();
250 #else
OnPotentiallyBlockingRegionBegin()251 SANITIZER_WEAK_CXX_DEFAULT_IMPL void OnPotentiallyBlockingRegionBegin() {}
OnPotentiallyBlockingRegionEnd()252 SANITIZER_WEAK_CXX_DEFAULT_IMPL void OnPotentiallyBlockingRegionEnd() {}
253 #endif
254
255 } // namespace __tsan
256
SigCtx(ThreadState * thr)257 static ThreadSignalContext *SigCtx(ThreadState *thr) {
258 // This function may be called reentrantly if it is interrupted by a signal
259 // handler. Use CAS to handle the race.
260 uptr ctx = atomic_load(&thr->signal_ctx, memory_order_relaxed);
261 if (ctx == 0 && !thr->is_dead) {
262 uptr pctx =
263 (uptr)MmapOrDie(sizeof(ThreadSignalContext), "ThreadSignalContext");
264 MemoryResetRange(thr, (uptr)&SigCtx, pctx, sizeof(ThreadSignalContext));
265 if (atomic_compare_exchange_strong(&thr->signal_ctx, &ctx, pctx,
266 memory_order_relaxed)) {
267 ctx = pctx;
268 } else {
269 UnmapOrDie((ThreadSignalContext *)pctx, sizeof(ThreadSignalContext));
270 }
271 }
272 return (ThreadSignalContext *)ctx;
273 }
274
ScopedInterceptor(ThreadState * thr,const char * fname,uptr pc)275 ScopedInterceptor::ScopedInterceptor(ThreadState *thr, const char *fname,
276 uptr pc)
277 : thr_(thr) {
278 LazyInitialize(thr);
279 if (UNLIKELY(atomic_load(&thr->in_blocking_func, memory_order_relaxed))) {
280 // pthread_join is marked as blocking, but it's also known to call other
281 // intercepted functions (mmap, free). If we don't reset in_blocking_func
282 // we can get deadlocks and memory corruptions if we deliver a synchronous
283 // signal inside of an mmap/free interceptor.
284 // So reset it and restore it back in the destructor.
285 // See https://github.com/google/sanitizers/issues/1540
286 atomic_store(&thr->in_blocking_func, 0, memory_order_relaxed);
287 in_blocking_func_ = true;
288 }
289 if (!thr_->is_inited) return;
290 if (!thr_->ignore_interceptors) FuncEntry(thr, pc);
291 DPrintf("#%d: intercept %s()\n", thr_->tid, fname);
292 ignoring_ =
293 !thr_->in_ignored_lib && (flags()->ignore_interceptors_accesses ||
294 libignore()->IsIgnored(pc, &in_ignored_lib_));
295 EnableIgnores();
296 }
297
~ScopedInterceptor()298 ScopedInterceptor::~ScopedInterceptor() {
299 if (!thr_->is_inited) return;
300 DisableIgnores();
301 if (UNLIKELY(in_blocking_func_))
302 EnterBlockingFunc(thr_);
303 if (!thr_->ignore_interceptors) {
304 ProcessPendingSignals(thr_);
305 FuncExit(thr_);
306 CheckedMutex::CheckNoLocks();
307 }
308 }
309
310 NOINLINE
EnableIgnoresImpl()311 void ScopedInterceptor::EnableIgnoresImpl() {
312 ThreadIgnoreBegin(thr_, 0);
313 if (flags()->ignore_noninstrumented_modules)
314 thr_->suppress_reports++;
315 if (in_ignored_lib_) {
316 DCHECK(!thr_->in_ignored_lib);
317 thr_->in_ignored_lib = true;
318 }
319 }
320
321 NOINLINE
DisableIgnoresImpl()322 void ScopedInterceptor::DisableIgnoresImpl() {
323 ThreadIgnoreEnd(thr_);
324 if (flags()->ignore_noninstrumented_modules)
325 thr_->suppress_reports--;
326 if (in_ignored_lib_) {
327 DCHECK(thr_->in_ignored_lib);
328 thr_->in_ignored_lib = false;
329 }
330 }
331
332 #define TSAN_INTERCEPT(func) INTERCEPT_FUNCTION(func)
333 #if SANITIZER_FREEBSD || SANITIZER_NETBSD
334 # define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION(func)
335 #else
336 # define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION_VER(func, ver)
337 #endif
338 #if SANITIZER_FREEBSD
339 # define TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(func) \
340 INTERCEPT_FUNCTION(_pthread_##func)
341 #else
342 # define TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(func)
343 #endif
344 #if SANITIZER_NETBSD
345 # define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func) \
346 INTERCEPT_FUNCTION(__libc_##func)
347 # define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func) \
348 INTERCEPT_FUNCTION(__libc_thr_##func)
349 #else
350 # define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func)
351 # define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func)
352 #endif
353
354 #define READ_STRING_OF_LEN(thr, pc, s, len, n) \
355 MemoryAccessRange((thr), (pc), (uptr)(s), \
356 common_flags()->strict_string_checks ? (len) + 1 : (n), false)
357
358 #define READ_STRING(thr, pc, s, n) \
359 READ_STRING_OF_LEN((thr), (pc), (s), internal_strlen(s), (n))
360
361 #define BLOCK_REAL(name) (BlockingCall(thr), REAL(name))
362
363 struct BlockingCall {
BlockingCallBlockingCall364 explicit BlockingCall(ThreadState *thr)
365 : thr(thr) {
366 EnterBlockingFunc(thr);
367 // When we are in a "blocking call", we process signals asynchronously
368 // (right when they arrive). In this context we do not expect to be
369 // executing any user/runtime code. The known interceptor sequence when
370 // this is not true is: pthread_join -> munmap(stack). It's fine
371 // to ignore munmap in this case -- we handle stack shadow separately.
372 thr->ignore_interceptors++;
373 }
374
~BlockingCallBlockingCall375 ~BlockingCall() {
376 thr->ignore_interceptors--;
377 atomic_store(&thr->in_blocking_func, 0, memory_order_relaxed);
378 }
379
380 ThreadState *thr;
381 };
382
TSAN_INTERCEPTOR(unsigned,sleep,unsigned sec)383 TSAN_INTERCEPTOR(unsigned, sleep, unsigned sec) {
384 SCOPED_TSAN_INTERCEPTOR(sleep, sec);
385 unsigned res = BLOCK_REAL(sleep)(sec);
386 AfterSleep(thr, pc);
387 return res;
388 }
389
TSAN_INTERCEPTOR(int,usleep,long_t usec)390 TSAN_INTERCEPTOR(int, usleep, long_t usec) {
391 SCOPED_TSAN_INTERCEPTOR(usleep, usec);
392 int res = BLOCK_REAL(usleep)(usec);
393 AfterSleep(thr, pc);
394 return res;
395 }
396
TSAN_INTERCEPTOR(int,nanosleep,void * req,void * rem)397 TSAN_INTERCEPTOR(int, nanosleep, void *req, void *rem) {
398 SCOPED_TSAN_INTERCEPTOR(nanosleep, req, rem);
399 int res = BLOCK_REAL(nanosleep)(req, rem);
400 AfterSleep(thr, pc);
401 return res;
402 }
403
TSAN_INTERCEPTOR(int,pause,int fake)404 TSAN_INTERCEPTOR(int, pause, int fake) {
405 SCOPED_TSAN_INTERCEPTOR(pause, fake);
406 return BLOCK_REAL(pause)(fake);
407 }
408
409 // Note: we specifically call the function in such strange way
410 // with "installed_at" because in reports it will appear between
411 // callback frames and the frame that installed the callback.
at_exit_callback_installed_at()412 static void at_exit_callback_installed_at() {
413 AtExitCtx *ctx;
414 {
415 // Ensure thread-safety.
416 Lock l(&interceptor_ctx()->atexit_mu);
417
418 // Pop AtExitCtx from the top of the stack of callback functions
419 uptr element = interceptor_ctx()->AtExitStack.Size() - 1;
420 ctx = interceptor_ctx()->AtExitStack[element];
421 interceptor_ctx()->AtExitStack.PopBack();
422 }
423
424 ThreadState *thr = cur_thread();
425 Acquire(thr, ctx->pc, (uptr)ctx);
426 FuncEntry(thr, ctx->pc);
427 ((void(*)())ctx->f)();
428 FuncExit(thr);
429 Free(ctx);
430 }
431
cxa_at_exit_callback_installed_at(void * arg)432 static void cxa_at_exit_callback_installed_at(void *arg) {
433 ThreadState *thr = cur_thread();
434 AtExitCtx *ctx = (AtExitCtx*)arg;
435 Acquire(thr, ctx->pc, (uptr)arg);
436 FuncEntry(thr, ctx->pc);
437 ((void(*)(void *arg))ctx->f)(ctx->arg);
438 FuncExit(thr);
439 Free(ctx);
440 }
441
442 static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
443 void *arg, void *dso);
444
445 #if !SANITIZER_ANDROID
TSAN_INTERCEPTOR(int,atexit,void (* f)())446 TSAN_INTERCEPTOR(int, atexit, void (*f)()) {
447 if (in_symbolizer())
448 return 0;
449 // We want to setup the atexit callback even if we are in ignored lib
450 // or after fork.
451 SCOPED_INTERCEPTOR_RAW(atexit, f);
452 return setup_at_exit_wrapper(thr, GET_CALLER_PC(), (void (*)())f, 0, 0);
453 }
454 #endif
455
TSAN_INTERCEPTOR(int,__cxa_atexit,void (* f)(void * a),void * arg,void * dso)456 TSAN_INTERCEPTOR(int, __cxa_atexit, void (*f)(void *a), void *arg, void *dso) {
457 if (in_symbolizer())
458 return 0;
459 SCOPED_TSAN_INTERCEPTOR(__cxa_atexit, f, arg, dso);
460 return setup_at_exit_wrapper(thr, GET_CALLER_PC(), (void (*)())f, arg, dso);
461 }
462
setup_at_exit_wrapper(ThreadState * thr,uptr pc,void (* f)(),void * arg,void * dso)463 static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
464 void *arg, void *dso) {
465 auto *ctx = New<AtExitCtx>();
466 ctx->f = f;
467 ctx->arg = arg;
468 ctx->pc = pc;
469 Release(thr, pc, (uptr)ctx);
470 // Memory allocation in __cxa_atexit will race with free during exit,
471 // because we do not see synchronization around atexit callback list.
472 ThreadIgnoreBegin(thr, pc);
473 int res;
474 if (!dso) {
475 // NetBSD does not preserve the 2nd argument if dso is equal to 0
476 // Store ctx in a local stack-like structure
477
478 // Ensure thread-safety.
479 Lock l(&interceptor_ctx()->atexit_mu);
480 // __cxa_atexit calls calloc. If we don't ignore interceptors, we will fail
481 // due to atexit_mu held on exit from the calloc interceptor.
482 ScopedIgnoreInterceptors ignore;
483
484 res = REAL(__cxa_atexit)((void (*)(void *a))at_exit_callback_installed_at,
485 0, 0);
486 // Push AtExitCtx on the top of the stack of callback functions
487 if (!res) {
488 interceptor_ctx()->AtExitStack.PushBack(ctx);
489 }
490 } else {
491 res = REAL(__cxa_atexit)(cxa_at_exit_callback_installed_at, ctx, dso);
492 }
493 ThreadIgnoreEnd(thr);
494 return res;
495 }
496
497 #if !SANITIZER_APPLE && !SANITIZER_NETBSD
on_exit_callback_installed_at(int status,void * arg)498 static void on_exit_callback_installed_at(int status, void *arg) {
499 ThreadState *thr = cur_thread();
500 AtExitCtx *ctx = (AtExitCtx*)arg;
501 Acquire(thr, ctx->pc, (uptr)arg);
502 FuncEntry(thr, ctx->pc);
503 ((void(*)(int status, void *arg))ctx->f)(status, ctx->arg);
504 FuncExit(thr);
505 Free(ctx);
506 }
507
TSAN_INTERCEPTOR(int,on_exit,void (* f)(int,void *),void * arg)508 TSAN_INTERCEPTOR(int, on_exit, void(*f)(int, void*), void *arg) {
509 if (in_symbolizer())
510 return 0;
511 SCOPED_TSAN_INTERCEPTOR(on_exit, f, arg);
512 auto *ctx = New<AtExitCtx>();
513 ctx->f = (void(*)())f;
514 ctx->arg = arg;
515 ctx->pc = GET_CALLER_PC();
516 Release(thr, pc, (uptr)ctx);
517 // Memory allocation in __cxa_atexit will race with free during exit,
518 // because we do not see synchronization around atexit callback list.
519 ThreadIgnoreBegin(thr, pc);
520 int res = REAL(on_exit)(on_exit_callback_installed_at, ctx);
521 ThreadIgnoreEnd(thr);
522 return res;
523 }
524 #define TSAN_MAYBE_INTERCEPT_ON_EXIT TSAN_INTERCEPT(on_exit)
525 #else
526 #define TSAN_MAYBE_INTERCEPT_ON_EXIT
527 #endif
528
529 // Cleanup old bufs.
JmpBufGarbageCollect(ThreadState * thr,uptr sp)530 static void JmpBufGarbageCollect(ThreadState *thr, uptr sp) {
531 for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) {
532 JmpBuf *buf = &thr->jmp_bufs[i];
533 if (buf->sp <= sp) {
534 uptr sz = thr->jmp_bufs.Size();
535 internal_memcpy(buf, &thr->jmp_bufs[sz - 1], sizeof(*buf));
536 thr->jmp_bufs.PopBack();
537 i--;
538 }
539 }
540 }
541
SetJmp(ThreadState * thr,uptr sp)542 static void SetJmp(ThreadState *thr, uptr sp) {
543 if (!thr->is_inited) // called from libc guts during bootstrap
544 return;
545 // Cleanup old bufs.
546 JmpBufGarbageCollect(thr, sp);
547 // Remember the buf.
548 JmpBuf *buf = thr->jmp_bufs.PushBack();
549 buf->sp = sp;
550 buf->shadow_stack_pos = thr->shadow_stack_pos;
551 ThreadSignalContext *sctx = SigCtx(thr);
552 buf->int_signal_send = sctx ? sctx->int_signal_send : 0;
553 buf->in_blocking_func = atomic_load(&thr->in_blocking_func, memory_order_relaxed);
554 buf->in_signal_handler = atomic_load(&thr->in_signal_handler,
555 memory_order_relaxed);
556 }
557
LongJmp(ThreadState * thr,uptr * env)558 static void LongJmp(ThreadState *thr, uptr *env) {
559 uptr sp = ExtractLongJmpSp(env);
560 // Find the saved buf with matching sp.
561 for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) {
562 JmpBuf *buf = &thr->jmp_bufs[i];
563 if (buf->sp == sp) {
564 CHECK_GE(thr->shadow_stack_pos, buf->shadow_stack_pos);
565 // Unwind the stack.
566 while (thr->shadow_stack_pos > buf->shadow_stack_pos)
567 FuncExit(thr);
568 ThreadSignalContext *sctx = SigCtx(thr);
569 if (sctx)
570 sctx->int_signal_send = buf->int_signal_send;
571 atomic_store(&thr->in_blocking_func, buf->in_blocking_func,
572 memory_order_relaxed);
573 atomic_store(&thr->in_signal_handler, buf->in_signal_handler,
574 memory_order_relaxed);
575 JmpBufGarbageCollect(thr, buf->sp - 1); // do not collect buf->sp
576 return;
577 }
578 }
579 Printf("ThreadSanitizer: can't find longjmp buf\n");
580 CHECK(0);
581 }
582
583 // FIXME: put everything below into a common extern "C" block?
__tsan_setjmp(uptr sp)584 extern "C" void __tsan_setjmp(uptr sp) { SetJmp(cur_thread_init(), sp); }
585
586 #if SANITIZER_APPLE
587 TSAN_INTERCEPTOR(int, setjmp, void *env);
588 TSAN_INTERCEPTOR(int, _setjmp, void *env);
589 TSAN_INTERCEPTOR(int, sigsetjmp, void *env);
590 #else // SANITIZER_APPLE
591
592 #if SANITIZER_NETBSD
593 #define setjmp_symname __setjmp14
594 #define sigsetjmp_symname __sigsetjmp14
595 #else
596 #define setjmp_symname setjmp
597 #define sigsetjmp_symname sigsetjmp
598 #endif
599
DEFINE_REAL(int,setjmp_symname,void * env)600 DEFINE_REAL(int, setjmp_symname, void *env)
601 DEFINE_REAL(int, _setjmp, void *env)
602 DEFINE_REAL(int, sigsetjmp_symname, void *env)
603 #if !SANITIZER_NETBSD
604 DEFINE_REAL(int, __sigsetjmp, void *env)
605 #endif
606
607 // The real interceptor for setjmp is special, and implemented in pure asm. We
608 // just need to initialize the REAL functions so that they can be used in asm.
609 static void InitializeSetjmpInterceptors() {
610 // We can not use TSAN_INTERCEPT to get setjmp addr, because it does &setjmp and
611 // setjmp is not present in some versions of libc.
612 using __interception::InterceptFunction;
613 InterceptFunction(SANITIZER_STRINGIFY(setjmp_symname), (uptr*)&REAL(setjmp_symname), 0, 0);
614 InterceptFunction("_setjmp", (uptr*)&REAL(_setjmp), 0, 0);
615 InterceptFunction(SANITIZER_STRINGIFY(sigsetjmp_symname), (uptr*)&REAL(sigsetjmp_symname), 0,
616 0);
617 #if !SANITIZER_NETBSD
618 InterceptFunction("__sigsetjmp", (uptr*)&REAL(__sigsetjmp), 0, 0);
619 #endif
620 }
621 #endif // SANITIZER_APPLE
622
623 #if SANITIZER_NETBSD
624 #define longjmp_symname __longjmp14
625 #define siglongjmp_symname __siglongjmp14
626 #else
627 #define longjmp_symname longjmp
628 #define siglongjmp_symname siglongjmp
629 #endif
630
TSAN_INTERCEPTOR(void,longjmp_symname,uptr * env,int val)631 TSAN_INTERCEPTOR(void, longjmp_symname, uptr *env, int val) {
632 // Note: if we call REAL(longjmp) in the context of ScopedInterceptor,
633 // bad things will happen. We will jump over ScopedInterceptor dtor and can
634 // leave thr->in_ignored_lib set.
635 {
636 SCOPED_INTERCEPTOR_RAW(longjmp_symname, env, val);
637 }
638 LongJmp(cur_thread(), env);
639 REAL(longjmp_symname)(env, val);
640 }
641
TSAN_INTERCEPTOR(void,siglongjmp_symname,uptr * env,int val)642 TSAN_INTERCEPTOR(void, siglongjmp_symname, uptr *env, int val) {
643 {
644 SCOPED_INTERCEPTOR_RAW(siglongjmp_symname, env, val);
645 }
646 LongJmp(cur_thread(), env);
647 REAL(siglongjmp_symname)(env, val);
648 }
649
650 #if SANITIZER_NETBSD
TSAN_INTERCEPTOR(void,_longjmp,uptr * env,int val)651 TSAN_INTERCEPTOR(void, _longjmp, uptr *env, int val) {
652 {
653 SCOPED_INTERCEPTOR_RAW(_longjmp, env, val);
654 }
655 LongJmp(cur_thread(), env);
656 REAL(_longjmp)(env, val);
657 }
658 #endif
659
660 #if !SANITIZER_APPLE
TSAN_INTERCEPTOR(void *,malloc,uptr size)661 TSAN_INTERCEPTOR(void*, malloc, uptr size) {
662 if (in_symbolizer())
663 return InternalAlloc(size);
664 void *p = 0;
665 {
666 SCOPED_INTERCEPTOR_RAW(malloc, size);
667 p = user_alloc(thr, pc, size);
668 }
669 invoke_malloc_hook(p, size);
670 return p;
671 }
672
673 // In glibc<2.25, dynamic TLS blocks are allocated by __libc_memalign. Intercept
674 // __libc_memalign so that (1) we can detect races (2) free will not be called
675 // on libc internally allocated blocks.
TSAN_INTERCEPTOR(void *,__libc_memalign,uptr align,uptr sz)676 TSAN_INTERCEPTOR(void*, __libc_memalign, uptr align, uptr sz) {
677 SCOPED_INTERCEPTOR_RAW(__libc_memalign, align, sz);
678 return user_memalign(thr, pc, align, sz);
679 }
680
TSAN_INTERCEPTOR(void *,calloc,uptr size,uptr n)681 TSAN_INTERCEPTOR(void*, calloc, uptr size, uptr n) {
682 if (in_symbolizer())
683 return InternalCalloc(size, n);
684 void *p = 0;
685 {
686 SCOPED_INTERCEPTOR_RAW(calloc, size, n);
687 p = user_calloc(thr, pc, size, n);
688 }
689 invoke_malloc_hook(p, n * size);
690 return p;
691 }
692
TSAN_INTERCEPTOR(void *,realloc,void * p,uptr size)693 TSAN_INTERCEPTOR(void*, realloc, void *p, uptr size) {
694 if (in_symbolizer())
695 return InternalRealloc(p, size);
696 if (p)
697 invoke_free_hook(p);
698 {
699 SCOPED_INTERCEPTOR_RAW(realloc, p, size);
700 p = user_realloc(thr, pc, p, size);
701 }
702 invoke_malloc_hook(p, size);
703 return p;
704 }
705
TSAN_INTERCEPTOR(void *,reallocarray,void * p,uptr size,uptr n)706 TSAN_INTERCEPTOR(void*, reallocarray, void *p, uptr size, uptr n) {
707 if (in_symbolizer())
708 return InternalReallocArray(p, size, n);
709 if (p)
710 invoke_free_hook(p);
711 {
712 SCOPED_INTERCEPTOR_RAW(reallocarray, p, size, n);
713 p = user_reallocarray(thr, pc, p, size, n);
714 }
715 invoke_malloc_hook(p, size);
716 return p;
717 }
718
TSAN_INTERCEPTOR(void,free,void * p)719 TSAN_INTERCEPTOR(void, free, void *p) {
720 if (p == 0)
721 return;
722 if (in_symbolizer())
723 return InternalFree(p);
724 invoke_free_hook(p);
725 SCOPED_INTERCEPTOR_RAW(free, p);
726 user_free(thr, pc, p);
727 }
728
TSAN_INTERCEPTOR(void,cfree,void * p)729 TSAN_INTERCEPTOR(void, cfree, void *p) {
730 if (p == 0)
731 return;
732 if (in_symbolizer())
733 return InternalFree(p);
734 invoke_free_hook(p);
735 SCOPED_INTERCEPTOR_RAW(cfree, p);
736 user_free(thr, pc, p);
737 }
738
TSAN_INTERCEPTOR(uptr,malloc_usable_size,void * p)739 TSAN_INTERCEPTOR(uptr, malloc_usable_size, void *p) {
740 SCOPED_INTERCEPTOR_RAW(malloc_usable_size, p);
741 return user_alloc_usable_size(p);
742 }
743 #endif
744
TSAN_INTERCEPTOR(char *,strcpy,char * dst,const char * src)745 TSAN_INTERCEPTOR(char *, strcpy, char *dst, const char *src) {
746 SCOPED_TSAN_INTERCEPTOR(strcpy, dst, src);
747 uptr srclen = internal_strlen(src);
748 MemoryAccessRange(thr, pc, (uptr)dst, srclen + 1, true);
749 MemoryAccessRange(thr, pc, (uptr)src, srclen + 1, false);
750 return REAL(strcpy)(dst, src);
751 }
752
TSAN_INTERCEPTOR(char *,strncpy,char * dst,char * src,uptr n)753 TSAN_INTERCEPTOR(char*, strncpy, char *dst, char *src, uptr n) {
754 SCOPED_TSAN_INTERCEPTOR(strncpy, dst, src, n);
755 uptr srclen = internal_strnlen(src, n);
756 MemoryAccessRange(thr, pc, (uptr)dst, n, true);
757 MemoryAccessRange(thr, pc, (uptr)src, min(srclen + 1, n), false);
758 return REAL(strncpy)(dst, src, n);
759 }
760
TSAN_INTERCEPTOR(char *,strdup,const char * str)761 TSAN_INTERCEPTOR(char*, strdup, const char *str) {
762 SCOPED_TSAN_INTERCEPTOR(strdup, str);
763 // strdup will call malloc, so no instrumentation is required here.
764 return REAL(strdup)(str);
765 }
766
767 // Zero out addr if it points into shadow memory and was provided as a hint
768 // only, i.e., MAP_FIXED is not set.
fix_mmap_addr(void ** addr,long_t sz,int flags)769 static bool fix_mmap_addr(void **addr, long_t sz, int flags) {
770 if (*addr) {
771 if (!IsAppMem((uptr)*addr) || !IsAppMem((uptr)*addr + sz - 1)) {
772 if (flags & MAP_FIXED) {
773 errno = errno_EINVAL;
774 return false;
775 } else {
776 *addr = 0;
777 }
778 }
779 }
780 return true;
781 }
782
783 template <class Mmap>
mmap_interceptor(ThreadState * thr,uptr pc,Mmap real_mmap,void * addr,SIZE_T sz,int prot,int flags,int fd,OFF64_T off)784 static void *mmap_interceptor(ThreadState *thr, uptr pc, Mmap real_mmap,
785 void *addr, SIZE_T sz, int prot, int flags,
786 int fd, OFF64_T off) {
787 if (!fix_mmap_addr(&addr, sz, flags)) return MAP_FAILED;
788 void *res = real_mmap(addr, sz, prot, flags, fd, off);
789 if (res != MAP_FAILED) {
790 if (!IsAppMem((uptr)res) || !IsAppMem((uptr)res + sz - 1)) {
791 Report("ThreadSanitizer: mmap at bad address: addr=%p size=%p res=%p\n",
792 addr, (void*)sz, res);
793 Die();
794 }
795 if (fd > 0) FdAccess(thr, pc, fd);
796 MemoryRangeImitateWriteOrResetRange(thr, pc, (uptr)res, sz);
797 }
798 return res;
799 }
800
801 template <class Munmap>
munmap_interceptor(ThreadState * thr,uptr pc,Munmap real_munmap,void * addr,SIZE_T sz)802 static int munmap_interceptor(ThreadState *thr, uptr pc, Munmap real_munmap,
803 void *addr, SIZE_T sz) {
804 UnmapShadow(thr, (uptr)addr, sz);
805 int res = real_munmap(addr, sz);
806 return res;
807 }
808
809 #if SANITIZER_LINUX
TSAN_INTERCEPTOR(void *,memalign,uptr align,uptr sz)810 TSAN_INTERCEPTOR(void*, memalign, uptr align, uptr sz) {
811 SCOPED_INTERCEPTOR_RAW(memalign, align, sz);
812 return user_memalign(thr, pc, align, sz);
813 }
814 #define TSAN_MAYBE_INTERCEPT_MEMALIGN TSAN_INTERCEPT(memalign)
815 #else
816 #define TSAN_MAYBE_INTERCEPT_MEMALIGN
817 #endif
818
819 #if !SANITIZER_APPLE
TSAN_INTERCEPTOR(void *,aligned_alloc,uptr align,uptr sz)820 TSAN_INTERCEPTOR(void*, aligned_alloc, uptr align, uptr sz) {
821 if (in_symbolizer())
822 return InternalAlloc(sz, nullptr, align);
823 SCOPED_INTERCEPTOR_RAW(aligned_alloc, align, sz);
824 return user_aligned_alloc(thr, pc, align, sz);
825 }
826
TSAN_INTERCEPTOR(void *,valloc,uptr sz)827 TSAN_INTERCEPTOR(void*, valloc, uptr sz) {
828 if (in_symbolizer())
829 return InternalAlloc(sz, nullptr, GetPageSizeCached());
830 SCOPED_INTERCEPTOR_RAW(valloc, sz);
831 return user_valloc(thr, pc, sz);
832 }
833 #endif
834
835 #if SANITIZER_LINUX
TSAN_INTERCEPTOR(void *,pvalloc,uptr sz)836 TSAN_INTERCEPTOR(void*, pvalloc, uptr sz) {
837 if (in_symbolizer()) {
838 uptr PageSize = GetPageSizeCached();
839 sz = sz ? RoundUpTo(sz, PageSize) : PageSize;
840 return InternalAlloc(sz, nullptr, PageSize);
841 }
842 SCOPED_INTERCEPTOR_RAW(pvalloc, sz);
843 return user_pvalloc(thr, pc, sz);
844 }
845 #define TSAN_MAYBE_INTERCEPT_PVALLOC TSAN_INTERCEPT(pvalloc)
846 #else
847 #define TSAN_MAYBE_INTERCEPT_PVALLOC
848 #endif
849
850 #if !SANITIZER_APPLE
TSAN_INTERCEPTOR(int,posix_memalign,void ** memptr,uptr align,uptr sz)851 TSAN_INTERCEPTOR(int, posix_memalign, void **memptr, uptr align, uptr sz) {
852 if (in_symbolizer()) {
853 void *p = InternalAlloc(sz, nullptr, align);
854 if (!p)
855 return errno_ENOMEM;
856 *memptr = p;
857 return 0;
858 }
859 SCOPED_INTERCEPTOR_RAW(posix_memalign, memptr, align, sz);
860 return user_posix_memalign(thr, pc, memptr, align, sz);
861 }
862 #endif
863
864 // Both __cxa_guard_acquire and pthread_once 0-initialize
865 // the object initially. pthread_once does not have any
866 // other ABI requirements. __cxa_guard_acquire assumes
867 // that any non-0 value in the first byte means that
868 // initialization is completed. Contents of the remaining
869 // bytes are up to us.
870 constexpr u32 kGuardInit = 0;
871 constexpr u32 kGuardDone = 1;
872 constexpr u32 kGuardRunning = 1 << 16;
873 constexpr u32 kGuardWaiter = 1 << 17;
874
guard_acquire(ThreadState * thr,uptr pc,atomic_uint32_t * g,bool blocking_hooks=true)875 static int guard_acquire(ThreadState *thr, uptr pc, atomic_uint32_t *g,
876 bool blocking_hooks = true) {
877 if (blocking_hooks)
878 OnPotentiallyBlockingRegionBegin();
879 auto on_exit = at_scope_exit([blocking_hooks] {
880 if (blocking_hooks)
881 OnPotentiallyBlockingRegionEnd();
882 });
883
884 for (;;) {
885 u32 cmp = atomic_load(g, memory_order_acquire);
886 if (cmp == kGuardInit) {
887 if (atomic_compare_exchange_strong(g, &cmp, kGuardRunning,
888 memory_order_relaxed))
889 return 1;
890 } else if (cmp == kGuardDone) {
891 if (!thr->in_ignored_lib)
892 Acquire(thr, pc, (uptr)g);
893 return 0;
894 } else {
895 if ((cmp & kGuardWaiter) ||
896 atomic_compare_exchange_strong(g, &cmp, cmp | kGuardWaiter,
897 memory_order_relaxed))
898 FutexWait(g, cmp | kGuardWaiter);
899 }
900 }
901 }
902
guard_release(ThreadState * thr,uptr pc,atomic_uint32_t * g,u32 v)903 static void guard_release(ThreadState *thr, uptr pc, atomic_uint32_t *g,
904 u32 v) {
905 if (!thr->in_ignored_lib)
906 Release(thr, pc, (uptr)g);
907 u32 old = atomic_exchange(g, v, memory_order_release);
908 if (old & kGuardWaiter)
909 FutexWake(g, 1 << 30);
910 }
911
912 // __cxa_guard_acquire and friends need to be intercepted in a special way -
913 // regular interceptors will break statically-linked libstdc++. Linux
914 // interceptors are especially defined as weak functions (so that they don't
915 // cause link errors when user defines them as well). So they silently
916 // auto-disable themselves when such symbol is already present in the binary. If
917 // we link libstdc++ statically, it will bring own __cxa_guard_acquire which
918 // will silently replace our interceptor. That's why on Linux we simply export
919 // these interceptors with INTERFACE_ATTRIBUTE.
920 // On OS X, we don't support statically linking, so we just use a regular
921 // interceptor.
922 #if SANITIZER_APPLE
923 #define STDCXX_INTERCEPTOR TSAN_INTERCEPTOR
924 #else
925 #define STDCXX_INTERCEPTOR(rettype, name, ...) \
926 extern "C" rettype INTERFACE_ATTRIBUTE name(__VA_ARGS__)
927 #endif
928
929 // Used in thread-safe function static initialization.
STDCXX_INTERCEPTOR(int,__cxa_guard_acquire,atomic_uint32_t * g)930 STDCXX_INTERCEPTOR(int, __cxa_guard_acquire, atomic_uint32_t *g) {
931 SCOPED_INTERCEPTOR_RAW(__cxa_guard_acquire, g);
932 return guard_acquire(thr, pc, g);
933 }
934
STDCXX_INTERCEPTOR(void,__cxa_guard_release,atomic_uint32_t * g)935 STDCXX_INTERCEPTOR(void, __cxa_guard_release, atomic_uint32_t *g) {
936 SCOPED_INTERCEPTOR_RAW(__cxa_guard_release, g);
937 guard_release(thr, pc, g, kGuardDone);
938 }
939
STDCXX_INTERCEPTOR(void,__cxa_guard_abort,atomic_uint32_t * g)940 STDCXX_INTERCEPTOR(void, __cxa_guard_abort, atomic_uint32_t *g) {
941 SCOPED_INTERCEPTOR_RAW(__cxa_guard_abort, g);
942 guard_release(thr, pc, g, kGuardInit);
943 }
944
945 namespace __tsan {
DestroyThreadState()946 void DestroyThreadState() {
947 ThreadState *thr = cur_thread();
948 Processor *proc = thr->proc();
949 ThreadFinish(thr);
950 ProcUnwire(proc, thr);
951 ProcDestroy(proc);
952 DTLS_Destroy();
953 cur_thread_finalize();
954 }
955
PlatformCleanUpThreadState(ThreadState * thr)956 void PlatformCleanUpThreadState(ThreadState *thr) {
957 ThreadSignalContext *sctx = (ThreadSignalContext *)atomic_load(
958 &thr->signal_ctx, memory_order_relaxed);
959 if (sctx) {
960 atomic_store(&thr->signal_ctx, 0, memory_order_relaxed);
961 UnmapOrDie(sctx, sizeof(*sctx));
962 }
963 }
964 } // namespace __tsan
965
966 #if !SANITIZER_APPLE && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
thread_finalize(void * v)967 static void thread_finalize(void *v) {
968 uptr iter = (uptr)v;
969 if (iter > 1) {
970 if (pthread_setspecific(interceptor_ctx()->finalize_key,
971 (void*)(iter - 1))) {
972 Printf("ThreadSanitizer: failed to set thread key\n");
973 Die();
974 }
975 return;
976 }
977 DestroyThreadState();
978 }
979 #endif
980
981
982 struct ThreadParam {
983 void* (*callback)(void *arg);
984 void *param;
985 Tid tid;
986 Semaphore created;
987 Semaphore started;
988 };
989
__tsan_thread_start_func(void * arg)990 extern "C" void *__tsan_thread_start_func(void *arg) {
991 ThreadParam *p = (ThreadParam*)arg;
992 void* (*callback)(void *arg) = p->callback;
993 void *param = p->param;
994 {
995 ThreadState *thr = cur_thread_init();
996 // Thread-local state is not initialized yet.
997 ScopedIgnoreInterceptors ignore;
998 #if !SANITIZER_APPLE && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
999 ThreadIgnoreBegin(thr, 0);
1000 if (pthread_setspecific(interceptor_ctx()->finalize_key,
1001 (void *)GetPthreadDestructorIterations())) {
1002 Printf("ThreadSanitizer: failed to set thread key\n");
1003 Die();
1004 }
1005 ThreadIgnoreEnd(thr);
1006 #endif
1007 p->created.Wait();
1008 Processor *proc = ProcCreate();
1009 ProcWire(proc, thr);
1010 ThreadStart(thr, p->tid, GetTid(), ThreadType::Regular);
1011 p->started.Post();
1012 }
1013 void *res = callback(param);
1014 // Prevent the callback from being tail called,
1015 // it mixes up stack traces.
1016 volatile int foo = 42;
1017 foo++;
1018 return res;
1019 }
1020
TSAN_INTERCEPTOR(int,pthread_create,void * th,void * attr,void * (* callback)(void *),void * param)1021 TSAN_INTERCEPTOR(int, pthread_create,
1022 void *th, void *attr, void *(*callback)(void*), void * param) {
1023 SCOPED_INTERCEPTOR_RAW(pthread_create, th, attr, callback, param);
1024
1025 MaybeSpawnBackgroundThread();
1026
1027 if (ctx->after_multithreaded_fork) {
1028 if (flags()->die_after_fork) {
1029 Report("ThreadSanitizer: starting new threads after multi-threaded "
1030 "fork is not supported. Dying (set die_after_fork=0 to override)\n");
1031 Die();
1032 } else {
1033 VPrintf(1,
1034 "ThreadSanitizer: starting new threads after multi-threaded "
1035 "fork is not supported (pid %lu). Continuing because of "
1036 "die_after_fork=0, but you are on your own\n",
1037 internal_getpid());
1038 }
1039 }
1040 __sanitizer_pthread_attr_t myattr;
1041 if (attr == 0) {
1042 pthread_attr_init(&myattr);
1043 attr = &myattr;
1044 }
1045 int detached = 0;
1046 REAL(pthread_attr_getdetachstate)(attr, &detached);
1047 AdjustStackSize(attr);
1048
1049 ThreadParam p;
1050 p.callback = callback;
1051 p.param = param;
1052 p.tid = kMainTid;
1053 int res = -1;
1054 {
1055 // Otherwise we see false positives in pthread stack manipulation.
1056 ScopedIgnoreInterceptors ignore;
1057 ThreadIgnoreBegin(thr, pc);
1058 res = REAL(pthread_create)(th, attr, __tsan_thread_start_func, &p);
1059 ThreadIgnoreEnd(thr);
1060 }
1061 if (res == 0) {
1062 p.tid = ThreadCreate(thr, pc, *(uptr *)th, IsStateDetached(detached));
1063 CHECK_NE(p.tid, kMainTid);
1064 // Synchronization on p.tid serves two purposes:
1065 // 1. ThreadCreate must finish before the new thread starts.
1066 // Otherwise the new thread can call pthread_detach, but the pthread_t
1067 // identifier is not yet registered in ThreadRegistry by ThreadCreate.
1068 // 2. ThreadStart must finish before this thread continues.
1069 // Otherwise, this thread can call pthread_detach and reset thr->sync
1070 // before the new thread got a chance to acquire from it in ThreadStart.
1071 p.created.Post();
1072 p.started.Wait();
1073 }
1074 if (attr == &myattr)
1075 pthread_attr_destroy(&myattr);
1076 return res;
1077 }
1078
TSAN_INTERCEPTOR(int,pthread_join,void * th,void ** ret)1079 TSAN_INTERCEPTOR(int, pthread_join, void *th, void **ret) {
1080 SCOPED_INTERCEPTOR_RAW(pthread_join, th, ret);
1081 Tid tid = ThreadConsumeTid(thr, pc, (uptr)th);
1082 ThreadIgnoreBegin(thr, pc);
1083 int res = BLOCK_REAL(pthread_join)(th, ret);
1084 ThreadIgnoreEnd(thr);
1085 if (res == 0) {
1086 ThreadJoin(thr, pc, tid);
1087 }
1088 return res;
1089 }
1090
1091 // DEFINE_INTERNAL_PTHREAD_FUNCTIONS
1092 namespace __sanitizer {
internal_pthread_create(void * th,void * attr,void * (* callback)(void *),void * param)1093 int internal_pthread_create(void *th, void *attr, void *(*callback)(void *),
1094 void *param) {
1095 ScopedIgnoreInterceptors ignore;
1096 return REAL(pthread_create)(th, attr, callback, param);
1097 }
internal_pthread_join(void * th,void ** ret)1098 int internal_pthread_join(void *th, void **ret) {
1099 ScopedIgnoreInterceptors ignore;
1100 return REAL(pthread_join(th, ret));
1101 }
1102 } // namespace __sanitizer
1103
TSAN_INTERCEPTOR(int,pthread_detach,void * th)1104 TSAN_INTERCEPTOR(int, pthread_detach, void *th) {
1105 SCOPED_INTERCEPTOR_RAW(pthread_detach, th);
1106 Tid tid = ThreadConsumeTid(thr, pc, (uptr)th);
1107 int res = REAL(pthread_detach)(th);
1108 if (res == 0) {
1109 ThreadDetach(thr, pc, tid);
1110 }
1111 return res;
1112 }
1113
TSAN_INTERCEPTOR(void,pthread_exit,void * retval)1114 TSAN_INTERCEPTOR(void, pthread_exit, void *retval) {
1115 {
1116 SCOPED_INTERCEPTOR_RAW(pthread_exit, retval);
1117 #if !SANITIZER_APPLE && !SANITIZER_ANDROID
1118 CHECK_EQ(thr, &cur_thread_placeholder);
1119 #endif
1120 }
1121 REAL(pthread_exit)(retval);
1122 }
1123
1124 #if SANITIZER_LINUX
TSAN_INTERCEPTOR(int,pthread_tryjoin_np,void * th,void ** ret)1125 TSAN_INTERCEPTOR(int, pthread_tryjoin_np, void *th, void **ret) {
1126 SCOPED_INTERCEPTOR_RAW(pthread_tryjoin_np, th, ret);
1127 Tid tid = ThreadConsumeTid(thr, pc, (uptr)th);
1128 ThreadIgnoreBegin(thr, pc);
1129 int res = REAL(pthread_tryjoin_np)(th, ret);
1130 ThreadIgnoreEnd(thr);
1131 if (res == 0)
1132 ThreadJoin(thr, pc, tid);
1133 else
1134 ThreadNotJoined(thr, pc, tid, (uptr)th);
1135 return res;
1136 }
1137
TSAN_INTERCEPTOR(int,pthread_timedjoin_np,void * th,void ** ret,const struct timespec * abstime)1138 TSAN_INTERCEPTOR(int, pthread_timedjoin_np, void *th, void **ret,
1139 const struct timespec *abstime) {
1140 SCOPED_INTERCEPTOR_RAW(pthread_timedjoin_np, th, ret, abstime);
1141 Tid tid = ThreadConsumeTid(thr, pc, (uptr)th);
1142 ThreadIgnoreBegin(thr, pc);
1143 int res = BLOCK_REAL(pthread_timedjoin_np)(th, ret, abstime);
1144 ThreadIgnoreEnd(thr);
1145 if (res == 0)
1146 ThreadJoin(thr, pc, tid);
1147 else
1148 ThreadNotJoined(thr, pc, tid, (uptr)th);
1149 return res;
1150 }
1151 #endif
1152
1153 // Problem:
1154 // NPTL implementation of pthread_cond has 2 versions (2.2.5 and 2.3.2).
1155 // pthread_cond_t has different size in the different versions.
1156 // If call new REAL functions for old pthread_cond_t, they will corrupt memory
1157 // after pthread_cond_t (old cond is smaller).
1158 // If we call old REAL functions for new pthread_cond_t, we will lose some
1159 // functionality (e.g. old functions do not support waiting against
1160 // CLOCK_REALTIME).
1161 // Proper handling would require to have 2 versions of interceptors as well.
1162 // But this is messy, in particular requires linker scripts when sanitizer
1163 // runtime is linked into a shared library.
1164 // Instead we assume we don't have dynamic libraries built against old
1165 // pthread (2.2.5 is dated by 2002). And provide legacy_pthread_cond flag
1166 // that allows to work with old libraries (but this mode does not support
1167 // some features, e.g. pthread_condattr_getpshared).
init_cond(void * c,bool force=false)1168 static void *init_cond(void *c, bool force = false) {
1169 // sizeof(pthread_cond_t) >= sizeof(uptr) in both versions.
1170 // So we allocate additional memory on the side large enough to hold
1171 // any pthread_cond_t object. Always call new REAL functions, but pass
1172 // the aux object to them.
1173 // Note: the code assumes that PTHREAD_COND_INITIALIZER initializes
1174 // first word of pthread_cond_t to zero.
1175 // It's all relevant only for linux.
1176 if (!common_flags()->legacy_pthread_cond)
1177 return c;
1178 atomic_uintptr_t *p = (atomic_uintptr_t*)c;
1179 uptr cond = atomic_load(p, memory_order_acquire);
1180 if (!force && cond != 0)
1181 return (void*)cond;
1182 void *newcond = WRAP(malloc)(pthread_cond_t_sz);
1183 internal_memset(newcond, 0, pthread_cond_t_sz);
1184 if (atomic_compare_exchange_strong(p, &cond, (uptr)newcond,
1185 memory_order_acq_rel))
1186 return newcond;
1187 WRAP(free)(newcond);
1188 return (void*)cond;
1189 }
1190
1191 namespace {
1192
1193 template <class Fn>
1194 struct CondMutexUnlockCtx {
1195 ScopedInterceptor *si;
1196 ThreadState *thr;
1197 uptr pc;
1198 void *m;
1199 void *c;
1200 const Fn &fn;
1201
Cancel__anone12077120211::CondMutexUnlockCtx1202 int Cancel() const { return fn(); }
1203 void Unlock() const;
1204 };
1205
1206 template <class Fn>
Unlock() const1207 void CondMutexUnlockCtx<Fn>::Unlock() const {
1208 // pthread_cond_wait interceptor has enabled async signal delivery
1209 // (see BlockingCall below). Disable async signals since we are running
1210 // tsan code. Also ScopedInterceptor and BlockingCall destructors won't run
1211 // since the thread is cancelled, so we have to manually execute them
1212 // (the thread still can run some user code due to pthread_cleanup_push).
1213 CHECK_EQ(atomic_load(&thr->in_blocking_func, memory_order_relaxed), 1);
1214 atomic_store(&thr->in_blocking_func, 0, memory_order_relaxed);
1215 MutexPostLock(thr, pc, (uptr)m, MutexFlagDoPreLockOnPostLock);
1216 // Undo BlockingCall ctor effects.
1217 thr->ignore_interceptors--;
1218 si->~ScopedInterceptor();
1219 }
1220 } // namespace
1221
INTERCEPTOR(int,pthread_cond_init,void * c,void * a)1222 INTERCEPTOR(int, pthread_cond_init, void *c, void *a) {
1223 void *cond = init_cond(c, true);
1224 SCOPED_TSAN_INTERCEPTOR(pthread_cond_init, cond, a);
1225 MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), true);
1226 return REAL(pthread_cond_init)(cond, a);
1227 }
1228
1229 template <class Fn>
cond_wait(ThreadState * thr,uptr pc,ScopedInterceptor * si,const Fn & fn,void * c,void * m)1230 int cond_wait(ThreadState *thr, uptr pc, ScopedInterceptor *si, const Fn &fn,
1231 void *c, void *m) {
1232 MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
1233 MutexUnlock(thr, pc, (uptr)m);
1234 int res = 0;
1235 // This ensures that we handle mutex lock even in case of pthread_cancel.
1236 // See test/tsan/cond_cancel.cpp.
1237 {
1238 // Enable signal delivery while the thread is blocked.
1239 BlockingCall bc(thr);
1240 CondMutexUnlockCtx<Fn> arg = {si, thr, pc, m, c, fn};
1241 res = call_pthread_cancel_with_cleanup(
1242 [](void *arg) -> int {
1243 return ((const CondMutexUnlockCtx<Fn> *)arg)->Cancel();
1244 },
1245 [](void *arg) { ((const CondMutexUnlockCtx<Fn> *)arg)->Unlock(); },
1246 &arg);
1247 }
1248 if (res == errno_EOWNERDEAD) MutexRepair(thr, pc, (uptr)m);
1249 MutexPostLock(thr, pc, (uptr)m, MutexFlagDoPreLockOnPostLock);
1250 return res;
1251 }
1252
INTERCEPTOR(int,pthread_cond_wait,void * c,void * m)1253 INTERCEPTOR(int, pthread_cond_wait, void *c, void *m) {
1254 void *cond = init_cond(c);
1255 SCOPED_TSAN_INTERCEPTOR(pthread_cond_wait, cond, m);
1256 return cond_wait(
1257 thr, pc, &si, [=]() { return REAL(pthread_cond_wait)(cond, m); }, cond,
1258 m);
1259 }
1260
INTERCEPTOR(int,pthread_cond_timedwait,void * c,void * m,void * abstime)1261 INTERCEPTOR(int, pthread_cond_timedwait, void *c, void *m, void *abstime) {
1262 void *cond = init_cond(c);
1263 SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait, cond, m, abstime);
1264 return cond_wait(
1265 thr, pc, &si,
1266 [=]() { return REAL(pthread_cond_timedwait)(cond, m, abstime); }, cond,
1267 m);
1268 }
1269
1270 #if SANITIZER_LINUX
INTERCEPTOR(int,pthread_cond_clockwait,void * c,void * m,__sanitizer_clockid_t clock,void * abstime)1271 INTERCEPTOR(int, pthread_cond_clockwait, void *c, void *m,
1272 __sanitizer_clockid_t clock, void *abstime) {
1273 void *cond = init_cond(c);
1274 SCOPED_TSAN_INTERCEPTOR(pthread_cond_clockwait, cond, m, clock, abstime);
1275 return cond_wait(
1276 thr, pc, &si,
1277 [=]() { return REAL(pthread_cond_clockwait)(cond, m, clock, abstime); },
1278 cond, m);
1279 }
1280 #define TSAN_MAYBE_PTHREAD_COND_CLOCKWAIT TSAN_INTERCEPT(pthread_cond_clockwait)
1281 #else
1282 #define TSAN_MAYBE_PTHREAD_COND_CLOCKWAIT
1283 #endif
1284
1285 #if SANITIZER_APPLE
INTERCEPTOR(int,pthread_cond_timedwait_relative_np,void * c,void * m,void * reltime)1286 INTERCEPTOR(int, pthread_cond_timedwait_relative_np, void *c, void *m,
1287 void *reltime) {
1288 void *cond = init_cond(c);
1289 SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait_relative_np, cond, m, reltime);
1290 return cond_wait(
1291 thr, pc, &si,
1292 [=]() {
1293 return REAL(pthread_cond_timedwait_relative_np)(cond, m, reltime);
1294 },
1295 cond, m);
1296 }
1297 #endif
1298
INTERCEPTOR(int,pthread_cond_signal,void * c)1299 INTERCEPTOR(int, pthread_cond_signal, void *c) {
1300 void *cond = init_cond(c);
1301 SCOPED_TSAN_INTERCEPTOR(pthread_cond_signal, cond);
1302 MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
1303 return REAL(pthread_cond_signal)(cond);
1304 }
1305
INTERCEPTOR(int,pthread_cond_broadcast,void * c)1306 INTERCEPTOR(int, pthread_cond_broadcast, void *c) {
1307 void *cond = init_cond(c);
1308 SCOPED_TSAN_INTERCEPTOR(pthread_cond_broadcast, cond);
1309 MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
1310 return REAL(pthread_cond_broadcast)(cond);
1311 }
1312
INTERCEPTOR(int,pthread_cond_destroy,void * c)1313 INTERCEPTOR(int, pthread_cond_destroy, void *c) {
1314 void *cond = init_cond(c);
1315 SCOPED_TSAN_INTERCEPTOR(pthread_cond_destroy, cond);
1316 MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), true);
1317 int res = REAL(pthread_cond_destroy)(cond);
1318 if (common_flags()->legacy_pthread_cond) {
1319 // Free our aux cond and zero the pointer to not leave dangling pointers.
1320 WRAP(free)(cond);
1321 atomic_store((atomic_uintptr_t*)c, 0, memory_order_relaxed);
1322 }
1323 return res;
1324 }
1325
TSAN_INTERCEPTOR(int,pthread_mutex_init,void * m,void * a)1326 TSAN_INTERCEPTOR(int, pthread_mutex_init, void *m, void *a) {
1327 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_init, m, a);
1328 int res = REAL(pthread_mutex_init)(m, a);
1329 if (res == 0) {
1330 u32 flagz = 0;
1331 if (a) {
1332 int type = 0;
1333 if (REAL(pthread_mutexattr_gettype)(a, &type) == 0)
1334 if (type == PTHREAD_MUTEX_RECURSIVE ||
1335 type == PTHREAD_MUTEX_RECURSIVE_NP)
1336 flagz |= MutexFlagWriteReentrant;
1337 }
1338 MutexCreate(thr, pc, (uptr)m, flagz);
1339 }
1340 return res;
1341 }
1342
TSAN_INTERCEPTOR(int,pthread_mutex_destroy,void * m)1343 TSAN_INTERCEPTOR(int, pthread_mutex_destroy, void *m) {
1344 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_destroy, m);
1345 int res = REAL(pthread_mutex_destroy)(m);
1346 if (res == 0 || res == errno_EBUSY) {
1347 MutexDestroy(thr, pc, (uptr)m);
1348 }
1349 return res;
1350 }
1351
TSAN_INTERCEPTOR(int,pthread_mutex_lock,void * m)1352 TSAN_INTERCEPTOR(int, pthread_mutex_lock, void *m) {
1353 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_lock, m);
1354 MutexPreLock(thr, pc, (uptr)m);
1355 int res = BLOCK_REAL(pthread_mutex_lock)(m);
1356 if (res == errno_EOWNERDEAD)
1357 MutexRepair(thr, pc, (uptr)m);
1358 if (res == 0 || res == errno_EOWNERDEAD)
1359 MutexPostLock(thr, pc, (uptr)m);
1360 if (res == errno_EINVAL)
1361 MutexInvalidAccess(thr, pc, (uptr)m);
1362 return res;
1363 }
1364
TSAN_INTERCEPTOR(int,pthread_mutex_trylock,void * m)1365 TSAN_INTERCEPTOR(int, pthread_mutex_trylock, void *m) {
1366 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_trylock, m);
1367 int res = REAL(pthread_mutex_trylock)(m);
1368 if (res == errno_EOWNERDEAD)
1369 MutexRepair(thr, pc, (uptr)m);
1370 if (res == 0 || res == errno_EOWNERDEAD)
1371 MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
1372 return res;
1373 }
1374
1375 #if !SANITIZER_APPLE
TSAN_INTERCEPTOR(int,pthread_mutex_timedlock,void * m,void * abstime)1376 TSAN_INTERCEPTOR(int, pthread_mutex_timedlock, void *m, void *abstime) {
1377 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_timedlock, m, abstime);
1378 int res = REAL(pthread_mutex_timedlock)(m, abstime);
1379 if (res == 0) {
1380 MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
1381 }
1382 return res;
1383 }
1384 #endif
1385
TSAN_INTERCEPTOR(int,pthread_mutex_unlock,void * m)1386 TSAN_INTERCEPTOR(int, pthread_mutex_unlock, void *m) {
1387 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_unlock, m);
1388 MutexUnlock(thr, pc, (uptr)m);
1389 int res = REAL(pthread_mutex_unlock)(m);
1390 if (res == errno_EINVAL)
1391 MutexInvalidAccess(thr, pc, (uptr)m);
1392 return res;
1393 }
1394
1395 #if SANITIZER_LINUX
TSAN_INTERCEPTOR(int,pthread_mutex_clocklock,void * m,__sanitizer_clockid_t clock,void * abstime)1396 TSAN_INTERCEPTOR(int, pthread_mutex_clocklock, void *m,
1397 __sanitizer_clockid_t clock, void *abstime) {
1398 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_clocklock, m, clock, abstime);
1399 MutexPreLock(thr, pc, (uptr)m);
1400 int res = BLOCK_REAL(pthread_mutex_clocklock)(m, clock, abstime);
1401 if (res == errno_EOWNERDEAD)
1402 MutexRepair(thr, pc, (uptr)m);
1403 if (res == 0 || res == errno_EOWNERDEAD)
1404 MutexPostLock(thr, pc, (uptr)m);
1405 if (res == errno_EINVAL)
1406 MutexInvalidAccess(thr, pc, (uptr)m);
1407 return res;
1408 }
1409 #endif
1410
1411 #if SANITIZER_GLIBC
1412 # if !__GLIBC_PREREQ(2, 34)
1413 // glibc 2.34 applies a non-default version for the two functions. They are no
1414 // longer expected to be intercepted by programs.
TSAN_INTERCEPTOR(int,__pthread_mutex_lock,void * m)1415 TSAN_INTERCEPTOR(int, __pthread_mutex_lock, void *m) {
1416 SCOPED_TSAN_INTERCEPTOR(__pthread_mutex_lock, m);
1417 MutexPreLock(thr, pc, (uptr)m);
1418 int res = BLOCK_REAL(__pthread_mutex_lock)(m);
1419 if (res == errno_EOWNERDEAD)
1420 MutexRepair(thr, pc, (uptr)m);
1421 if (res == 0 || res == errno_EOWNERDEAD)
1422 MutexPostLock(thr, pc, (uptr)m);
1423 if (res == errno_EINVAL)
1424 MutexInvalidAccess(thr, pc, (uptr)m);
1425 return res;
1426 }
1427
TSAN_INTERCEPTOR(int,__pthread_mutex_unlock,void * m)1428 TSAN_INTERCEPTOR(int, __pthread_mutex_unlock, void *m) {
1429 SCOPED_TSAN_INTERCEPTOR(__pthread_mutex_unlock, m);
1430 MutexUnlock(thr, pc, (uptr)m);
1431 int res = REAL(__pthread_mutex_unlock)(m);
1432 if (res == errno_EINVAL)
1433 MutexInvalidAccess(thr, pc, (uptr)m);
1434 return res;
1435 }
1436 # endif
1437 #endif
1438
1439 #if !SANITIZER_APPLE
TSAN_INTERCEPTOR(int,pthread_spin_init,void * m,int pshared)1440 TSAN_INTERCEPTOR(int, pthread_spin_init, void *m, int pshared) {
1441 SCOPED_TSAN_INTERCEPTOR(pthread_spin_init, m, pshared);
1442 int res = REAL(pthread_spin_init)(m, pshared);
1443 if (res == 0) {
1444 MutexCreate(thr, pc, (uptr)m);
1445 }
1446 return res;
1447 }
1448
TSAN_INTERCEPTOR(int,pthread_spin_destroy,void * m)1449 TSAN_INTERCEPTOR(int, pthread_spin_destroy, void *m) {
1450 SCOPED_TSAN_INTERCEPTOR(pthread_spin_destroy, m);
1451 int res = REAL(pthread_spin_destroy)(m);
1452 if (res == 0) {
1453 MutexDestroy(thr, pc, (uptr)m);
1454 }
1455 return res;
1456 }
1457
TSAN_INTERCEPTOR(int,pthread_spin_lock,void * m)1458 TSAN_INTERCEPTOR(int, pthread_spin_lock, void *m) {
1459 SCOPED_TSAN_INTERCEPTOR(pthread_spin_lock, m);
1460 MutexPreLock(thr, pc, (uptr)m);
1461 int res = BLOCK_REAL(pthread_spin_lock)(m);
1462 if (res == 0) {
1463 MutexPostLock(thr, pc, (uptr)m);
1464 }
1465 return res;
1466 }
1467
TSAN_INTERCEPTOR(int,pthread_spin_trylock,void * m)1468 TSAN_INTERCEPTOR(int, pthread_spin_trylock, void *m) {
1469 SCOPED_TSAN_INTERCEPTOR(pthread_spin_trylock, m);
1470 int res = REAL(pthread_spin_trylock)(m);
1471 if (res == 0) {
1472 MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
1473 }
1474 return res;
1475 }
1476
TSAN_INTERCEPTOR(int,pthread_spin_unlock,void * m)1477 TSAN_INTERCEPTOR(int, pthread_spin_unlock, void *m) {
1478 SCOPED_TSAN_INTERCEPTOR(pthread_spin_unlock, m);
1479 MutexUnlock(thr, pc, (uptr)m);
1480 int res = REAL(pthread_spin_unlock)(m);
1481 return res;
1482 }
1483 #endif
1484
TSAN_INTERCEPTOR(int,pthread_rwlock_init,void * m,void * a)1485 TSAN_INTERCEPTOR(int, pthread_rwlock_init, void *m, void *a) {
1486 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_init, m, a);
1487 int res = REAL(pthread_rwlock_init)(m, a);
1488 if (res == 0) {
1489 MutexCreate(thr, pc, (uptr)m);
1490 }
1491 return res;
1492 }
1493
TSAN_INTERCEPTOR(int,pthread_rwlock_destroy,void * m)1494 TSAN_INTERCEPTOR(int, pthread_rwlock_destroy, void *m) {
1495 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_destroy, m);
1496 int res = REAL(pthread_rwlock_destroy)(m);
1497 if (res == 0) {
1498 MutexDestroy(thr, pc, (uptr)m);
1499 }
1500 return res;
1501 }
1502
TSAN_INTERCEPTOR(int,pthread_rwlock_rdlock,void * m)1503 TSAN_INTERCEPTOR(int, pthread_rwlock_rdlock, void *m) {
1504 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_rdlock, m);
1505 MutexPreReadLock(thr, pc, (uptr)m);
1506 int res = REAL(pthread_rwlock_rdlock)(m);
1507 if (res == 0) {
1508 MutexPostReadLock(thr, pc, (uptr)m);
1509 }
1510 return res;
1511 }
1512
TSAN_INTERCEPTOR(int,pthread_rwlock_tryrdlock,void * m)1513 TSAN_INTERCEPTOR(int, pthread_rwlock_tryrdlock, void *m) {
1514 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_tryrdlock, m);
1515 int res = REAL(pthread_rwlock_tryrdlock)(m);
1516 if (res == 0) {
1517 MutexPostReadLock(thr, pc, (uptr)m, MutexFlagTryLock);
1518 }
1519 return res;
1520 }
1521
1522 #if !SANITIZER_APPLE
TSAN_INTERCEPTOR(int,pthread_rwlock_timedrdlock,void * m,void * abstime)1523 TSAN_INTERCEPTOR(int, pthread_rwlock_timedrdlock, void *m, void *abstime) {
1524 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedrdlock, m, abstime);
1525 int res = REAL(pthread_rwlock_timedrdlock)(m, abstime);
1526 if (res == 0) {
1527 MutexPostReadLock(thr, pc, (uptr)m);
1528 }
1529 return res;
1530 }
1531 #endif
1532
TSAN_INTERCEPTOR(int,pthread_rwlock_wrlock,void * m)1533 TSAN_INTERCEPTOR(int, pthread_rwlock_wrlock, void *m) {
1534 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_wrlock, m);
1535 MutexPreLock(thr, pc, (uptr)m);
1536 int res = BLOCK_REAL(pthread_rwlock_wrlock)(m);
1537 if (res == 0) {
1538 MutexPostLock(thr, pc, (uptr)m);
1539 }
1540 return res;
1541 }
1542
TSAN_INTERCEPTOR(int,pthread_rwlock_trywrlock,void * m)1543 TSAN_INTERCEPTOR(int, pthread_rwlock_trywrlock, void *m) {
1544 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_trywrlock, m);
1545 int res = REAL(pthread_rwlock_trywrlock)(m);
1546 if (res == 0) {
1547 MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
1548 }
1549 return res;
1550 }
1551
1552 #if !SANITIZER_APPLE
TSAN_INTERCEPTOR(int,pthread_rwlock_timedwrlock,void * m,void * abstime)1553 TSAN_INTERCEPTOR(int, pthread_rwlock_timedwrlock, void *m, void *abstime) {
1554 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedwrlock, m, abstime);
1555 int res = REAL(pthread_rwlock_timedwrlock)(m, abstime);
1556 if (res == 0) {
1557 MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
1558 }
1559 return res;
1560 }
1561 #endif
1562
TSAN_INTERCEPTOR(int,pthread_rwlock_unlock,void * m)1563 TSAN_INTERCEPTOR(int, pthread_rwlock_unlock, void *m) {
1564 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_unlock, m);
1565 MutexReadOrWriteUnlock(thr, pc, (uptr)m);
1566 int res = REAL(pthread_rwlock_unlock)(m);
1567 return res;
1568 }
1569
1570 #if !SANITIZER_APPLE
TSAN_INTERCEPTOR(int,pthread_barrier_init,void * b,void * a,unsigned count)1571 TSAN_INTERCEPTOR(int, pthread_barrier_init, void *b, void *a, unsigned count) {
1572 SCOPED_TSAN_INTERCEPTOR(pthread_barrier_init, b, a, count);
1573 MemoryAccess(thr, pc, (uptr)b, 1, kAccessWrite);
1574 int res = REAL(pthread_barrier_init)(b, a, count);
1575 return res;
1576 }
1577
TSAN_INTERCEPTOR(int,pthread_barrier_destroy,void * b)1578 TSAN_INTERCEPTOR(int, pthread_barrier_destroy, void *b) {
1579 SCOPED_TSAN_INTERCEPTOR(pthread_barrier_destroy, b);
1580 MemoryAccess(thr, pc, (uptr)b, 1, kAccessWrite);
1581 int res = REAL(pthread_barrier_destroy)(b);
1582 return res;
1583 }
1584
TSAN_INTERCEPTOR(int,pthread_barrier_wait,void * b)1585 TSAN_INTERCEPTOR(int, pthread_barrier_wait, void *b) {
1586 SCOPED_TSAN_INTERCEPTOR(pthread_barrier_wait, b);
1587 Release(thr, pc, (uptr)b);
1588 MemoryAccess(thr, pc, (uptr)b, 1, kAccessRead);
1589 int res = REAL(pthread_barrier_wait)(b);
1590 MemoryAccess(thr, pc, (uptr)b, 1, kAccessRead);
1591 if (res == 0 || res == PTHREAD_BARRIER_SERIAL_THREAD) {
1592 Acquire(thr, pc, (uptr)b);
1593 }
1594 return res;
1595 }
1596 #endif
1597
TSAN_INTERCEPTOR(int,pthread_once,void * o,void (* f)())1598 TSAN_INTERCEPTOR(int, pthread_once, void *o, void (*f)()) {
1599 SCOPED_INTERCEPTOR_RAW(pthread_once, o, f);
1600 if (o == 0 || f == 0)
1601 return errno_EINVAL;
1602 atomic_uint32_t *a;
1603
1604 if (SANITIZER_APPLE)
1605 a = static_cast<atomic_uint32_t*>((void *)((char *)o + sizeof(long_t)));
1606 else if (SANITIZER_NETBSD)
1607 a = static_cast<atomic_uint32_t*>
1608 ((void *)((char *)o + __sanitizer::pthread_mutex_t_sz));
1609 else
1610 a = static_cast<atomic_uint32_t*>(o);
1611
1612 // Mac OS X appears to use pthread_once() where calling BlockingRegion hooks
1613 // result in crashes due to too little stack space.
1614 if (guard_acquire(thr, pc, a, !SANITIZER_APPLE)) {
1615 (*f)();
1616 guard_release(thr, pc, a, kGuardDone);
1617 }
1618 return 0;
1619 }
1620
1621 #if SANITIZER_GLIBC
TSAN_INTERCEPTOR(int,__fxstat,int version,int fd,void * buf)1622 TSAN_INTERCEPTOR(int, __fxstat, int version, int fd, void *buf) {
1623 SCOPED_TSAN_INTERCEPTOR(__fxstat, version, fd, buf);
1624 if (fd > 0)
1625 FdAccess(thr, pc, fd);
1626 return REAL(__fxstat)(version, fd, buf);
1627 }
1628
TSAN_INTERCEPTOR(int,__fxstat64,int version,int fd,void * buf)1629 TSAN_INTERCEPTOR(int, __fxstat64, int version, int fd, void *buf) {
1630 SCOPED_TSAN_INTERCEPTOR(__fxstat64, version, fd, buf);
1631 if (fd > 0)
1632 FdAccess(thr, pc, fd);
1633 return REAL(__fxstat64)(version, fd, buf);
1634 }
1635 #define TSAN_MAYBE_INTERCEPT___FXSTAT TSAN_INTERCEPT(__fxstat); TSAN_INTERCEPT(__fxstat64)
1636 #else
1637 #define TSAN_MAYBE_INTERCEPT___FXSTAT
1638 #endif
1639
1640 #if !SANITIZER_GLIBC || __GLIBC_PREREQ(2, 33)
TSAN_INTERCEPTOR(int,fstat,int fd,void * buf)1641 TSAN_INTERCEPTOR(int, fstat, int fd, void *buf) {
1642 SCOPED_TSAN_INTERCEPTOR(fstat, fd, buf);
1643 if (fd > 0)
1644 FdAccess(thr, pc, fd);
1645 return REAL(fstat)(fd, buf);
1646 }
1647 # define TSAN_MAYBE_INTERCEPT_FSTAT TSAN_INTERCEPT(fstat)
1648 #else
1649 # define TSAN_MAYBE_INTERCEPT_FSTAT
1650 #endif
1651
1652 #if __GLIBC_PREREQ(2, 33)
TSAN_INTERCEPTOR(int,fstat64,int fd,void * buf)1653 TSAN_INTERCEPTOR(int, fstat64, int fd, void *buf) {
1654 SCOPED_TSAN_INTERCEPTOR(fstat64, fd, buf);
1655 if (fd > 0)
1656 FdAccess(thr, pc, fd);
1657 return REAL(fstat64)(fd, buf);
1658 }
1659 # define TSAN_MAYBE_INTERCEPT_FSTAT64 TSAN_INTERCEPT(fstat64)
1660 #else
1661 # define TSAN_MAYBE_INTERCEPT_FSTAT64
1662 #endif
1663
TSAN_INTERCEPTOR(int,open,const char * name,int oflag,...)1664 TSAN_INTERCEPTOR(int, open, const char *name, int oflag, ...) {
1665 va_list ap;
1666 va_start(ap, oflag);
1667 mode_t mode = va_arg(ap, int);
1668 va_end(ap);
1669 SCOPED_TSAN_INTERCEPTOR(open, name, oflag, mode);
1670 READ_STRING(thr, pc, name, 0);
1671 int fd = REAL(open)(name, oflag, mode);
1672 if (fd >= 0)
1673 FdFileCreate(thr, pc, fd);
1674 return fd;
1675 }
1676
1677 #if SANITIZER_LINUX
TSAN_INTERCEPTOR(int,open64,const char * name,int oflag,...)1678 TSAN_INTERCEPTOR(int, open64, const char *name, int oflag, ...) {
1679 va_list ap;
1680 va_start(ap, oflag);
1681 mode_t mode = va_arg(ap, int);
1682 va_end(ap);
1683 SCOPED_TSAN_INTERCEPTOR(open64, name, oflag, mode);
1684 READ_STRING(thr, pc, name, 0);
1685 int fd = REAL(open64)(name, oflag, mode);
1686 if (fd >= 0)
1687 FdFileCreate(thr, pc, fd);
1688 return fd;
1689 }
1690 #define TSAN_MAYBE_INTERCEPT_OPEN64 TSAN_INTERCEPT(open64)
1691 #else
1692 #define TSAN_MAYBE_INTERCEPT_OPEN64
1693 #endif
1694
TSAN_INTERCEPTOR(int,creat,const char * name,int mode)1695 TSAN_INTERCEPTOR(int, creat, const char *name, int mode) {
1696 SCOPED_TSAN_INTERCEPTOR(creat, name, mode);
1697 READ_STRING(thr, pc, name, 0);
1698 int fd = REAL(creat)(name, mode);
1699 if (fd >= 0)
1700 FdFileCreate(thr, pc, fd);
1701 return fd;
1702 }
1703
1704 #if SANITIZER_LINUX
TSAN_INTERCEPTOR(int,creat64,const char * name,int mode)1705 TSAN_INTERCEPTOR(int, creat64, const char *name, int mode) {
1706 SCOPED_TSAN_INTERCEPTOR(creat64, name, mode);
1707 READ_STRING(thr, pc, name, 0);
1708 int fd = REAL(creat64)(name, mode);
1709 if (fd >= 0)
1710 FdFileCreate(thr, pc, fd);
1711 return fd;
1712 }
1713 #define TSAN_MAYBE_INTERCEPT_CREAT64 TSAN_INTERCEPT(creat64)
1714 #else
1715 #define TSAN_MAYBE_INTERCEPT_CREAT64
1716 #endif
1717
TSAN_INTERCEPTOR(int,dup,int oldfd)1718 TSAN_INTERCEPTOR(int, dup, int oldfd) {
1719 SCOPED_TSAN_INTERCEPTOR(dup, oldfd);
1720 int newfd = REAL(dup)(oldfd);
1721 if (oldfd >= 0 && newfd >= 0 && newfd != oldfd)
1722 FdDup(thr, pc, oldfd, newfd, true);
1723 return newfd;
1724 }
1725
TSAN_INTERCEPTOR(int,dup2,int oldfd,int newfd)1726 TSAN_INTERCEPTOR(int, dup2, int oldfd, int newfd) {
1727 SCOPED_TSAN_INTERCEPTOR(dup2, oldfd, newfd);
1728 int newfd2 = REAL(dup2)(oldfd, newfd);
1729 if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd)
1730 FdDup(thr, pc, oldfd, newfd2, false);
1731 return newfd2;
1732 }
1733
1734 #if !SANITIZER_APPLE
TSAN_INTERCEPTOR(int,dup3,int oldfd,int newfd,int flags)1735 TSAN_INTERCEPTOR(int, dup3, int oldfd, int newfd, int flags) {
1736 SCOPED_TSAN_INTERCEPTOR(dup3, oldfd, newfd, flags);
1737 int newfd2 = REAL(dup3)(oldfd, newfd, flags);
1738 if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd)
1739 FdDup(thr, pc, oldfd, newfd2, false);
1740 return newfd2;
1741 }
1742 #endif
1743
1744 #if SANITIZER_LINUX
TSAN_INTERCEPTOR(int,eventfd,unsigned initval,int flags)1745 TSAN_INTERCEPTOR(int, eventfd, unsigned initval, int flags) {
1746 SCOPED_TSAN_INTERCEPTOR(eventfd, initval, flags);
1747 int fd = REAL(eventfd)(initval, flags);
1748 if (fd >= 0)
1749 FdEventCreate(thr, pc, fd);
1750 return fd;
1751 }
1752 #define TSAN_MAYBE_INTERCEPT_EVENTFD TSAN_INTERCEPT(eventfd)
1753 #else
1754 #define TSAN_MAYBE_INTERCEPT_EVENTFD
1755 #endif
1756
1757 #if SANITIZER_LINUX
TSAN_INTERCEPTOR(int,signalfd,int fd,void * mask,int flags)1758 TSAN_INTERCEPTOR(int, signalfd, int fd, void *mask, int flags) {
1759 SCOPED_INTERCEPTOR_RAW(signalfd, fd, mask, flags);
1760 FdClose(thr, pc, fd);
1761 fd = REAL(signalfd)(fd, mask, flags);
1762 if (!MustIgnoreInterceptor(thr))
1763 FdSignalCreate(thr, pc, fd);
1764 return fd;
1765 }
1766 #define TSAN_MAYBE_INTERCEPT_SIGNALFD TSAN_INTERCEPT(signalfd)
1767 #else
1768 #define TSAN_MAYBE_INTERCEPT_SIGNALFD
1769 #endif
1770
1771 #if SANITIZER_LINUX
TSAN_INTERCEPTOR(int,inotify_init,int fake)1772 TSAN_INTERCEPTOR(int, inotify_init, int fake) {
1773 SCOPED_TSAN_INTERCEPTOR(inotify_init, fake);
1774 int fd = REAL(inotify_init)(fake);
1775 if (fd >= 0)
1776 FdInotifyCreate(thr, pc, fd);
1777 return fd;
1778 }
1779 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT TSAN_INTERCEPT(inotify_init)
1780 #else
1781 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT
1782 #endif
1783
1784 #if SANITIZER_LINUX
TSAN_INTERCEPTOR(int,inotify_init1,int flags)1785 TSAN_INTERCEPTOR(int, inotify_init1, int flags) {
1786 SCOPED_TSAN_INTERCEPTOR(inotify_init1, flags);
1787 int fd = REAL(inotify_init1)(flags);
1788 if (fd >= 0)
1789 FdInotifyCreate(thr, pc, fd);
1790 return fd;
1791 }
1792 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1 TSAN_INTERCEPT(inotify_init1)
1793 #else
1794 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1
1795 #endif
1796
TSAN_INTERCEPTOR(int,socket,int domain,int type,int protocol)1797 TSAN_INTERCEPTOR(int, socket, int domain, int type, int protocol) {
1798 SCOPED_TSAN_INTERCEPTOR(socket, domain, type, protocol);
1799 int fd = REAL(socket)(domain, type, protocol);
1800 if (fd >= 0)
1801 FdSocketCreate(thr, pc, fd);
1802 return fd;
1803 }
1804
TSAN_INTERCEPTOR(int,socketpair,int domain,int type,int protocol,int * fd)1805 TSAN_INTERCEPTOR(int, socketpair, int domain, int type, int protocol, int *fd) {
1806 SCOPED_TSAN_INTERCEPTOR(socketpair, domain, type, protocol, fd);
1807 int res = REAL(socketpair)(domain, type, protocol, fd);
1808 if (res == 0 && fd[0] >= 0 && fd[1] >= 0)
1809 FdPipeCreate(thr, pc, fd[0], fd[1]);
1810 return res;
1811 }
1812
TSAN_INTERCEPTOR(int,connect,int fd,void * addr,unsigned addrlen)1813 TSAN_INTERCEPTOR(int, connect, int fd, void *addr, unsigned addrlen) {
1814 SCOPED_TSAN_INTERCEPTOR(connect, fd, addr, addrlen);
1815 FdSocketConnecting(thr, pc, fd);
1816 int res = REAL(connect)(fd, addr, addrlen);
1817 if (res == 0 && fd >= 0)
1818 FdSocketConnect(thr, pc, fd);
1819 return res;
1820 }
1821
TSAN_INTERCEPTOR(int,bind,int fd,void * addr,unsigned addrlen)1822 TSAN_INTERCEPTOR(int, bind, int fd, void *addr, unsigned addrlen) {
1823 SCOPED_TSAN_INTERCEPTOR(bind, fd, addr, addrlen);
1824 int res = REAL(bind)(fd, addr, addrlen);
1825 if (fd > 0 && res == 0)
1826 FdAccess(thr, pc, fd);
1827 return res;
1828 }
1829
TSAN_INTERCEPTOR(int,listen,int fd,int backlog)1830 TSAN_INTERCEPTOR(int, listen, int fd, int backlog) {
1831 SCOPED_TSAN_INTERCEPTOR(listen, fd, backlog);
1832 int res = REAL(listen)(fd, backlog);
1833 if (fd > 0 && res == 0)
1834 FdAccess(thr, pc, fd);
1835 return res;
1836 }
1837
TSAN_INTERCEPTOR(int,close,int fd)1838 TSAN_INTERCEPTOR(int, close, int fd) {
1839 SCOPED_INTERCEPTOR_RAW(close, fd);
1840 if (!in_symbolizer())
1841 FdClose(thr, pc, fd);
1842 return REAL(close)(fd);
1843 }
1844
1845 #if SANITIZER_LINUX
TSAN_INTERCEPTOR(int,__close,int fd)1846 TSAN_INTERCEPTOR(int, __close, int fd) {
1847 SCOPED_INTERCEPTOR_RAW(__close, fd);
1848 FdClose(thr, pc, fd);
1849 return REAL(__close)(fd);
1850 }
1851 #define TSAN_MAYBE_INTERCEPT___CLOSE TSAN_INTERCEPT(__close)
1852 #else
1853 #define TSAN_MAYBE_INTERCEPT___CLOSE
1854 #endif
1855
1856 // glibc guts
1857 #if SANITIZER_LINUX && !SANITIZER_ANDROID
TSAN_INTERCEPTOR(void,__res_iclose,void * state,bool free_addr)1858 TSAN_INTERCEPTOR(void, __res_iclose, void *state, bool free_addr) {
1859 SCOPED_INTERCEPTOR_RAW(__res_iclose, state, free_addr);
1860 int fds[64];
1861 int cnt = ExtractResolvFDs(state, fds, ARRAY_SIZE(fds));
1862 for (int i = 0; i < cnt; i++) FdClose(thr, pc, fds[i]);
1863 REAL(__res_iclose)(state, free_addr);
1864 }
1865 #define TSAN_MAYBE_INTERCEPT___RES_ICLOSE TSAN_INTERCEPT(__res_iclose)
1866 #else
1867 #define TSAN_MAYBE_INTERCEPT___RES_ICLOSE
1868 #endif
1869
TSAN_INTERCEPTOR(int,pipe,int * pipefd)1870 TSAN_INTERCEPTOR(int, pipe, int *pipefd) {
1871 SCOPED_TSAN_INTERCEPTOR(pipe, pipefd);
1872 int res = REAL(pipe)(pipefd);
1873 if (res == 0 && pipefd[0] >= 0 && pipefd[1] >= 0)
1874 FdPipeCreate(thr, pc, pipefd[0], pipefd[1]);
1875 return res;
1876 }
1877
1878 #if !SANITIZER_APPLE
TSAN_INTERCEPTOR(int,pipe2,int * pipefd,int flags)1879 TSAN_INTERCEPTOR(int, pipe2, int *pipefd, int flags) {
1880 SCOPED_TSAN_INTERCEPTOR(pipe2, pipefd, flags);
1881 int res = REAL(pipe2)(pipefd, flags);
1882 if (res == 0 && pipefd[0] >= 0 && pipefd[1] >= 0)
1883 FdPipeCreate(thr, pc, pipefd[0], pipefd[1]);
1884 return res;
1885 }
1886 #endif
1887
TSAN_INTERCEPTOR(int,unlink,char * path)1888 TSAN_INTERCEPTOR(int, unlink, char *path) {
1889 SCOPED_TSAN_INTERCEPTOR(unlink, path);
1890 Release(thr, pc, File2addr(path));
1891 int res = REAL(unlink)(path);
1892 return res;
1893 }
1894
TSAN_INTERCEPTOR(void *,tmpfile,int fake)1895 TSAN_INTERCEPTOR(void*, tmpfile, int fake) {
1896 SCOPED_TSAN_INTERCEPTOR(tmpfile, fake);
1897 void *res = REAL(tmpfile)(fake);
1898 if (res) {
1899 int fd = fileno_unlocked(res);
1900 if (fd >= 0)
1901 FdFileCreate(thr, pc, fd);
1902 }
1903 return res;
1904 }
1905
1906 #if SANITIZER_LINUX
TSAN_INTERCEPTOR(void *,tmpfile64,int fake)1907 TSAN_INTERCEPTOR(void*, tmpfile64, int fake) {
1908 SCOPED_TSAN_INTERCEPTOR(tmpfile64, fake);
1909 void *res = REAL(tmpfile64)(fake);
1910 if (res) {
1911 int fd = fileno_unlocked(res);
1912 if (fd >= 0)
1913 FdFileCreate(thr, pc, fd);
1914 }
1915 return res;
1916 }
1917 #define TSAN_MAYBE_INTERCEPT_TMPFILE64 TSAN_INTERCEPT(tmpfile64)
1918 #else
1919 #define TSAN_MAYBE_INTERCEPT_TMPFILE64
1920 #endif
1921
FlushStreams()1922 static void FlushStreams() {
1923 // Flushing all the streams here may freeze the process if a child thread is
1924 // performing file stream operations at the same time.
1925 REAL(fflush)(stdout);
1926 REAL(fflush)(stderr);
1927 }
1928
TSAN_INTERCEPTOR(void,abort,int fake)1929 TSAN_INTERCEPTOR(void, abort, int fake) {
1930 SCOPED_TSAN_INTERCEPTOR(abort, fake);
1931 FlushStreams();
1932 REAL(abort)(fake);
1933 }
1934
TSAN_INTERCEPTOR(int,rmdir,char * path)1935 TSAN_INTERCEPTOR(int, rmdir, char *path) {
1936 SCOPED_TSAN_INTERCEPTOR(rmdir, path);
1937 Release(thr, pc, Dir2addr(path));
1938 int res = REAL(rmdir)(path);
1939 return res;
1940 }
1941
TSAN_INTERCEPTOR(int,closedir,void * dirp)1942 TSAN_INTERCEPTOR(int, closedir, void *dirp) {
1943 SCOPED_INTERCEPTOR_RAW(closedir, dirp);
1944 if (dirp) {
1945 int fd = dirfd(dirp);
1946 FdClose(thr, pc, fd);
1947 }
1948 return REAL(closedir)(dirp);
1949 }
1950
1951 #if SANITIZER_LINUX
TSAN_INTERCEPTOR(int,epoll_create,int size)1952 TSAN_INTERCEPTOR(int, epoll_create, int size) {
1953 SCOPED_TSAN_INTERCEPTOR(epoll_create, size);
1954 int fd = REAL(epoll_create)(size);
1955 if (fd >= 0)
1956 FdPollCreate(thr, pc, fd);
1957 return fd;
1958 }
1959
TSAN_INTERCEPTOR(int,epoll_create1,int flags)1960 TSAN_INTERCEPTOR(int, epoll_create1, int flags) {
1961 SCOPED_TSAN_INTERCEPTOR(epoll_create1, flags);
1962 int fd = REAL(epoll_create1)(flags);
1963 if (fd >= 0)
1964 FdPollCreate(thr, pc, fd);
1965 return fd;
1966 }
1967
TSAN_INTERCEPTOR(int,epoll_ctl,int epfd,int op,int fd,void * ev)1968 TSAN_INTERCEPTOR(int, epoll_ctl, int epfd, int op, int fd, void *ev) {
1969 SCOPED_TSAN_INTERCEPTOR(epoll_ctl, epfd, op, fd, ev);
1970 if (epfd >= 0)
1971 FdAccess(thr, pc, epfd);
1972 if (epfd >= 0 && fd >= 0)
1973 FdAccess(thr, pc, fd);
1974 if (op == EPOLL_CTL_ADD && epfd >= 0) {
1975 FdPollAdd(thr, pc, epfd, fd);
1976 FdRelease(thr, pc, epfd);
1977 }
1978 int res = REAL(epoll_ctl)(epfd, op, fd, ev);
1979 return res;
1980 }
1981
TSAN_INTERCEPTOR(int,epoll_wait,int epfd,void * ev,int cnt,int timeout)1982 TSAN_INTERCEPTOR(int, epoll_wait, int epfd, void *ev, int cnt, int timeout) {
1983 SCOPED_TSAN_INTERCEPTOR(epoll_wait, epfd, ev, cnt, timeout);
1984 if (epfd >= 0)
1985 FdAccess(thr, pc, epfd);
1986 int res = BLOCK_REAL(epoll_wait)(epfd, ev, cnt, timeout);
1987 if (res > 0 && epfd >= 0)
1988 FdAcquire(thr, pc, epfd);
1989 return res;
1990 }
1991
TSAN_INTERCEPTOR(int,epoll_pwait,int epfd,void * ev,int cnt,int timeout,void * sigmask)1992 TSAN_INTERCEPTOR(int, epoll_pwait, int epfd, void *ev, int cnt, int timeout,
1993 void *sigmask) {
1994 SCOPED_TSAN_INTERCEPTOR(epoll_pwait, epfd, ev, cnt, timeout, sigmask);
1995 if (epfd >= 0)
1996 FdAccess(thr, pc, epfd);
1997 int res = BLOCK_REAL(epoll_pwait)(epfd, ev, cnt, timeout, sigmask);
1998 if (res > 0 && epfd >= 0)
1999 FdAcquire(thr, pc, epfd);
2000 return res;
2001 }
2002
TSAN_INTERCEPTOR(int,epoll_pwait2,int epfd,void * ev,int cnt,void * timeout,void * sigmask)2003 TSAN_INTERCEPTOR(int, epoll_pwait2, int epfd, void *ev, int cnt, void *timeout,
2004 void *sigmask) {
2005 SCOPED_INTERCEPTOR_RAW(epoll_pwait2, epfd, ev, cnt, timeout, sigmask);
2006 // This function is new and may not be present in libc and/or kernel.
2007 // Since we effectively add it to libc (as will be probed by the program
2008 // using dlsym or a weak function pointer) we need to handle the case
2009 // when it's not present in the actual libc.
2010 if (!REAL(epoll_pwait2)) {
2011 errno = errno_ENOSYS;
2012 return -1;
2013 }
2014 if (MustIgnoreInterceptor(thr))
2015 REAL(epoll_pwait2)(epfd, ev, cnt, timeout, sigmask);
2016 if (epfd >= 0)
2017 FdAccess(thr, pc, epfd);
2018 int res = BLOCK_REAL(epoll_pwait2)(epfd, ev, cnt, timeout, sigmask);
2019 if (res > 0 && epfd >= 0)
2020 FdAcquire(thr, pc, epfd);
2021 return res;
2022 }
2023
2024 # define TSAN_MAYBE_INTERCEPT_EPOLL \
2025 TSAN_INTERCEPT(epoll_create); \
2026 TSAN_INTERCEPT(epoll_create1); \
2027 TSAN_INTERCEPT(epoll_ctl); \
2028 TSAN_INTERCEPT(epoll_wait); \
2029 TSAN_INTERCEPT(epoll_pwait); \
2030 TSAN_INTERCEPT(epoll_pwait2)
2031 #else
2032 #define TSAN_MAYBE_INTERCEPT_EPOLL
2033 #endif
2034
2035 // The following functions are intercepted merely to process pending signals.
2036 // If program blocks signal X, we must deliver the signal before the function
2037 // returns. Similarly, if program unblocks a signal (or returns from sigsuspend)
2038 // it's better to deliver the signal straight away.
TSAN_INTERCEPTOR(int,sigsuspend,const __sanitizer_sigset_t * mask)2039 TSAN_INTERCEPTOR(int, sigsuspend, const __sanitizer_sigset_t *mask) {
2040 SCOPED_TSAN_INTERCEPTOR(sigsuspend, mask);
2041 return REAL(sigsuspend)(mask);
2042 }
2043
TSAN_INTERCEPTOR(int,sigblock,int mask)2044 TSAN_INTERCEPTOR(int, sigblock, int mask) {
2045 SCOPED_TSAN_INTERCEPTOR(sigblock, mask);
2046 return REAL(sigblock)(mask);
2047 }
2048
TSAN_INTERCEPTOR(int,sigsetmask,int mask)2049 TSAN_INTERCEPTOR(int, sigsetmask, int mask) {
2050 SCOPED_TSAN_INTERCEPTOR(sigsetmask, mask);
2051 return REAL(sigsetmask)(mask);
2052 }
2053
TSAN_INTERCEPTOR(int,pthread_sigmask,int how,const __sanitizer_sigset_t * set,__sanitizer_sigset_t * oldset)2054 TSAN_INTERCEPTOR(int, pthread_sigmask, int how, const __sanitizer_sigset_t *set,
2055 __sanitizer_sigset_t *oldset) {
2056 SCOPED_TSAN_INTERCEPTOR(pthread_sigmask, how, set, oldset);
2057 return REAL(pthread_sigmask)(how, set, oldset);
2058 }
2059
2060 namespace __tsan {
2061
ReportErrnoSpoiling(ThreadState * thr,uptr pc,int sig)2062 static void ReportErrnoSpoiling(ThreadState *thr, uptr pc, int sig) {
2063 VarSizeStackTrace stack;
2064 // StackTrace::GetNestInstructionPc(pc) is used because return address is
2065 // expected, OutputReport() will undo this.
2066 ObtainCurrentStack(thr, StackTrace::GetNextInstructionPc(pc), &stack);
2067 ThreadRegistryLock l(&ctx->thread_registry);
2068 ScopedReport rep(ReportTypeErrnoInSignal);
2069 rep.SetSigNum(sig);
2070 if (!IsFiredSuppression(ctx, ReportTypeErrnoInSignal, stack)) {
2071 rep.AddStack(stack, true);
2072 OutputReport(thr, rep);
2073 }
2074 }
2075
CallUserSignalHandler(ThreadState * thr,bool sync,bool acquire,int sig,__sanitizer_siginfo * info,void * uctx)2076 static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire,
2077 int sig, __sanitizer_siginfo *info,
2078 void *uctx) {
2079 CHECK(thr->slot);
2080 __sanitizer_sigaction *sigactions = interceptor_ctx()->sigactions;
2081 if (acquire)
2082 Acquire(thr, 0, (uptr)&sigactions[sig]);
2083 // Signals are generally asynchronous, so if we receive a signals when
2084 // ignores are enabled we should disable ignores. This is critical for sync
2085 // and interceptors, because otherwise we can miss synchronization and report
2086 // false races.
2087 int ignore_reads_and_writes = thr->ignore_reads_and_writes;
2088 int ignore_interceptors = thr->ignore_interceptors;
2089 int ignore_sync = thr->ignore_sync;
2090 // For symbolizer we only process SIGSEGVs synchronously
2091 // (bug in symbolizer or in tsan). But we want to reset
2092 // in_symbolizer to fail gracefully. Symbolizer and user code
2093 // use different memory allocators, so if we don't reset
2094 // in_symbolizer we can get memory allocated with one being
2095 // feed with another, which can cause more crashes.
2096 int in_symbolizer = thr->in_symbolizer;
2097 if (!ctx->after_multithreaded_fork) {
2098 thr->ignore_reads_and_writes = 0;
2099 thr->fast_state.ClearIgnoreBit();
2100 thr->ignore_interceptors = 0;
2101 thr->ignore_sync = 0;
2102 thr->in_symbolizer = 0;
2103 }
2104 // Ensure that the handler does not spoil errno.
2105 const int saved_errno = errno;
2106 errno = 99;
2107 // This code races with sigaction. Be careful to not read sa_sigaction twice.
2108 // Also need to remember pc for reporting before the call,
2109 // because the handler can reset it.
2110 volatile uptr pc = (sigactions[sig].sa_flags & SA_SIGINFO)
2111 ? (uptr)sigactions[sig].sigaction
2112 : (uptr)sigactions[sig].handler;
2113 if (pc != sig_dfl && pc != sig_ign) {
2114 // The callback can be either sa_handler or sa_sigaction.
2115 // They have different signatures, but we assume that passing
2116 // additional arguments to sa_handler works and is harmless.
2117 ((__sanitizer_sigactionhandler_ptr)pc)(sig, info, uctx);
2118 }
2119 if (!ctx->after_multithreaded_fork) {
2120 thr->ignore_reads_and_writes = ignore_reads_and_writes;
2121 if (ignore_reads_and_writes)
2122 thr->fast_state.SetIgnoreBit();
2123 thr->ignore_interceptors = ignore_interceptors;
2124 thr->ignore_sync = ignore_sync;
2125 thr->in_symbolizer = in_symbolizer;
2126 }
2127 // We do not detect errno spoiling for SIGTERM,
2128 // because some SIGTERM handlers do spoil errno but reraise SIGTERM,
2129 // tsan reports false positive in such case.
2130 // It's difficult to properly detect this situation (reraise),
2131 // because in async signal processing case (when handler is called directly
2132 // from rtl_generic_sighandler) we have not yet received the reraised
2133 // signal; and it looks too fragile to intercept all ways to reraise a signal.
2134 if (ShouldReport(thr, ReportTypeErrnoInSignal) && !sync && sig != SIGTERM &&
2135 errno != 99)
2136 ReportErrnoSpoiling(thr, pc, sig);
2137 errno = saved_errno;
2138 }
2139
ProcessPendingSignalsImpl(ThreadState * thr)2140 void ProcessPendingSignalsImpl(ThreadState *thr) {
2141 atomic_store(&thr->pending_signals, 0, memory_order_relaxed);
2142 ThreadSignalContext *sctx = SigCtx(thr);
2143 if (sctx == 0)
2144 return;
2145 atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed);
2146 internal_sigfillset(&sctx->emptyset);
2147 int res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->emptyset, &sctx->oldset);
2148 CHECK_EQ(res, 0);
2149 for (int sig = 0; sig < kSigCount; sig++) {
2150 SignalDesc *signal = &sctx->pending_signals[sig];
2151 if (signal->armed) {
2152 signal->armed = false;
2153 CallUserSignalHandler(thr, false, true, sig, &signal->siginfo,
2154 &signal->ctx);
2155 }
2156 }
2157 res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->oldset, 0);
2158 CHECK_EQ(res, 0);
2159 atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed);
2160 }
2161
2162 } // namespace __tsan
2163
is_sync_signal(ThreadSignalContext * sctx,int sig,__sanitizer_siginfo * info)2164 static bool is_sync_signal(ThreadSignalContext *sctx, int sig,
2165 __sanitizer_siginfo *info) {
2166 // If we are sending signal to ourselves, we must process it now.
2167 if (sctx && sig == sctx->int_signal_send)
2168 return true;
2169 #if SANITIZER_HAS_SIGINFO
2170 // POSIX timers can be configured to send any kind of signal; however, it
2171 // doesn't make any sense to consider a timer signal as synchronous!
2172 if (info->si_code == SI_TIMER)
2173 return false;
2174 #endif
2175 return sig == SIGSEGV || sig == SIGBUS || sig == SIGILL || sig == SIGTRAP ||
2176 sig == SIGABRT || sig == SIGFPE || sig == SIGPIPE || sig == SIGSYS;
2177 }
2178
sighandler(int sig,__sanitizer_siginfo * info,void * ctx)2179 void sighandler(int sig, __sanitizer_siginfo *info, void *ctx) {
2180 ThreadState *thr = cur_thread_init();
2181 ThreadSignalContext *sctx = SigCtx(thr);
2182 if (sig < 0 || sig >= kSigCount) {
2183 VPrintf(1, "ThreadSanitizer: ignoring signal %d\n", sig);
2184 return;
2185 }
2186 // Don't mess with synchronous signals.
2187 const bool sync = is_sync_signal(sctx, sig, info);
2188 if (sync ||
2189 // If we are in blocking function, we can safely process it now
2190 // (but check if we are in a recursive interceptor,
2191 // i.e. pthread_join()->munmap()).
2192 atomic_load(&thr->in_blocking_func, memory_order_relaxed)) {
2193 atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed);
2194 if (atomic_load(&thr->in_blocking_func, memory_order_relaxed)) {
2195 atomic_store(&thr->in_blocking_func, 0, memory_order_relaxed);
2196 CallUserSignalHandler(thr, sync, true, sig, info, ctx);
2197 atomic_store(&thr->in_blocking_func, 1, memory_order_relaxed);
2198 } else {
2199 // Be very conservative with when we do acquire in this case.
2200 // It's unsafe to do acquire in async handlers, because ThreadState
2201 // can be in inconsistent state.
2202 // SIGSYS looks relatively safe -- it's synchronous and can actually
2203 // need some global state.
2204 bool acq = (sig == SIGSYS);
2205 CallUserSignalHandler(thr, sync, acq, sig, info, ctx);
2206 }
2207 atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed);
2208 return;
2209 }
2210
2211 if (sctx == 0)
2212 return;
2213 SignalDesc *signal = &sctx->pending_signals[sig];
2214 if (signal->armed == false) {
2215 signal->armed = true;
2216 internal_memcpy(&signal->siginfo, info, sizeof(*info));
2217 internal_memcpy(&signal->ctx, ctx, sizeof(signal->ctx));
2218 atomic_store(&thr->pending_signals, 1, memory_order_relaxed);
2219 }
2220 }
2221
TSAN_INTERCEPTOR(int,raise,int sig)2222 TSAN_INTERCEPTOR(int, raise, int sig) {
2223 SCOPED_TSAN_INTERCEPTOR(raise, sig);
2224 ThreadSignalContext *sctx = SigCtx(thr);
2225 CHECK_NE(sctx, 0);
2226 int prev = sctx->int_signal_send;
2227 sctx->int_signal_send = sig;
2228 int res = REAL(raise)(sig);
2229 CHECK_EQ(sctx->int_signal_send, sig);
2230 sctx->int_signal_send = prev;
2231 return res;
2232 }
2233
TSAN_INTERCEPTOR(int,kill,int pid,int sig)2234 TSAN_INTERCEPTOR(int, kill, int pid, int sig) {
2235 SCOPED_TSAN_INTERCEPTOR(kill, pid, sig);
2236 ThreadSignalContext *sctx = SigCtx(thr);
2237 CHECK_NE(sctx, 0);
2238 int prev = sctx->int_signal_send;
2239 if (pid == (int)internal_getpid()) {
2240 sctx->int_signal_send = sig;
2241 }
2242 int res = REAL(kill)(pid, sig);
2243 if (pid == (int)internal_getpid()) {
2244 CHECK_EQ(sctx->int_signal_send, sig);
2245 sctx->int_signal_send = prev;
2246 }
2247 return res;
2248 }
2249
TSAN_INTERCEPTOR(int,pthread_kill,void * tid,int sig)2250 TSAN_INTERCEPTOR(int, pthread_kill, void *tid, int sig) {
2251 SCOPED_TSAN_INTERCEPTOR(pthread_kill, tid, sig);
2252 ThreadSignalContext *sctx = SigCtx(thr);
2253 CHECK_NE(sctx, 0);
2254 int prev = sctx->int_signal_send;
2255 bool self = pthread_equal(tid, pthread_self());
2256 if (self)
2257 sctx->int_signal_send = sig;
2258 int res = REAL(pthread_kill)(tid, sig);
2259 if (self) {
2260 CHECK_EQ(sctx->int_signal_send, sig);
2261 sctx->int_signal_send = prev;
2262 }
2263 return res;
2264 }
2265
TSAN_INTERCEPTOR(int,gettimeofday,void * tv,void * tz)2266 TSAN_INTERCEPTOR(int, gettimeofday, void *tv, void *tz) {
2267 SCOPED_TSAN_INTERCEPTOR(gettimeofday, tv, tz);
2268 // It's intercepted merely to process pending signals.
2269 return REAL(gettimeofday)(tv, tz);
2270 }
2271
TSAN_INTERCEPTOR(int,getaddrinfo,void * node,void * service,void * hints,void * rv)2272 TSAN_INTERCEPTOR(int, getaddrinfo, void *node, void *service,
2273 void *hints, void *rv) {
2274 SCOPED_TSAN_INTERCEPTOR(getaddrinfo, node, service, hints, rv);
2275 // We miss atomic synchronization in getaddrinfo,
2276 // and can report false race between malloc and free
2277 // inside of getaddrinfo. So ignore memory accesses.
2278 ThreadIgnoreBegin(thr, pc);
2279 int res = REAL(getaddrinfo)(node, service, hints, rv);
2280 ThreadIgnoreEnd(thr);
2281 return res;
2282 }
2283
TSAN_INTERCEPTOR(int,fork,int fake)2284 TSAN_INTERCEPTOR(int, fork, int fake) {
2285 if (in_symbolizer())
2286 return REAL(fork)(fake);
2287 SCOPED_INTERCEPTOR_RAW(fork, fake);
2288 return REAL(fork)(fake);
2289 }
2290
atfork_prepare()2291 void atfork_prepare() {
2292 if (in_symbolizer())
2293 return;
2294 ThreadState *thr = cur_thread();
2295 const uptr pc = StackTrace::GetCurrentPc();
2296 ForkBefore(thr, pc);
2297 }
2298
atfork_parent()2299 void atfork_parent() {
2300 if (in_symbolizer())
2301 return;
2302 ThreadState *thr = cur_thread();
2303 const uptr pc = StackTrace::GetCurrentPc();
2304 ForkParentAfter(thr, pc);
2305 }
2306
atfork_child()2307 void atfork_child() {
2308 if (in_symbolizer())
2309 return;
2310 ThreadState *thr = cur_thread();
2311 const uptr pc = StackTrace::GetCurrentPc();
2312 ForkChildAfter(thr, pc, true);
2313 FdOnFork(thr, pc);
2314 }
2315
2316 #if !SANITIZER_IOS
TSAN_INTERCEPTOR(int,vfork,int fake)2317 TSAN_INTERCEPTOR(int, vfork, int fake) {
2318 // Some programs (e.g. openjdk) call close for all file descriptors
2319 // in the child process. Under tsan it leads to false positives, because
2320 // address space is shared, so the parent process also thinks that
2321 // the descriptors are closed (while they are actually not).
2322 // This leads to false positives due to missed synchronization.
2323 // Strictly saying this is undefined behavior, because vfork child is not
2324 // allowed to call any functions other than exec/exit. But this is what
2325 // openjdk does, so we want to handle it.
2326 // We could disable interceptors in the child process. But it's not possible
2327 // to simply intercept and wrap vfork, because vfork child is not allowed
2328 // to return from the function that calls vfork, and that's exactly what
2329 // we would do. So this would require some assembly trickery as well.
2330 // Instead we simply turn vfork into fork.
2331 return WRAP(fork)(fake);
2332 }
2333 #endif
2334
2335 #if SANITIZER_LINUX
TSAN_INTERCEPTOR(int,clone,int (* fn)(void *),void * stack,int flags,void * arg,int * parent_tid,void * tls,pid_t * child_tid)2336 TSAN_INTERCEPTOR(int, clone, int (*fn)(void *), void *stack, int flags,
2337 void *arg, int *parent_tid, void *tls, pid_t *child_tid) {
2338 SCOPED_INTERCEPTOR_RAW(clone, fn, stack, flags, arg, parent_tid, tls,
2339 child_tid);
2340 struct Arg {
2341 int (*fn)(void *);
2342 void *arg;
2343 };
2344 auto wrapper = +[](void *p) -> int {
2345 auto *thr = cur_thread();
2346 uptr pc = GET_CURRENT_PC();
2347 // Start the background thread for fork, but not for clone.
2348 // For fork we did this always and it's known to work (or user code has
2349 // adopted). But if we do this for the new clone interceptor some code
2350 // (sandbox2) fails. So model we used to do for years and don't start the
2351 // background thread after clone.
2352 ForkChildAfter(thr, pc, false);
2353 FdOnFork(thr, pc);
2354 auto *arg = static_cast<Arg *>(p);
2355 return arg->fn(arg->arg);
2356 };
2357 ForkBefore(thr, pc);
2358 Arg arg_wrapper = {fn, arg};
2359 int pid = REAL(clone)(wrapper, stack, flags, &arg_wrapper, parent_tid, tls,
2360 child_tid);
2361 ForkParentAfter(thr, pc);
2362 return pid;
2363 }
2364 #endif
2365
2366 #if !SANITIZER_APPLE && !SANITIZER_ANDROID
2367 typedef int (*dl_iterate_phdr_cb_t)(__sanitizer_dl_phdr_info *info, SIZE_T size,
2368 void *data);
2369 struct dl_iterate_phdr_data {
2370 ThreadState *thr;
2371 uptr pc;
2372 dl_iterate_phdr_cb_t cb;
2373 void *data;
2374 };
2375
IsAppNotRodata(uptr addr)2376 static bool IsAppNotRodata(uptr addr) {
2377 return IsAppMem(addr) && *MemToShadow(addr) != Shadow::kRodata;
2378 }
2379
dl_iterate_phdr_cb(__sanitizer_dl_phdr_info * info,SIZE_T size,void * data)2380 static int dl_iterate_phdr_cb(__sanitizer_dl_phdr_info *info, SIZE_T size,
2381 void *data) {
2382 dl_iterate_phdr_data *cbdata = (dl_iterate_phdr_data *)data;
2383 // dlopen/dlclose allocate/free dynamic-linker-internal memory, which is later
2384 // accessible in dl_iterate_phdr callback. But we don't see synchronization
2385 // inside of dynamic linker, so we "unpoison" it here in order to not
2386 // produce false reports. Ignoring malloc/free in dlopen/dlclose is not enough
2387 // because some libc functions call __libc_dlopen.
2388 if (info && IsAppNotRodata((uptr)info->dlpi_name))
2389 MemoryResetRange(cbdata->thr, cbdata->pc, (uptr)info->dlpi_name,
2390 internal_strlen(info->dlpi_name));
2391 int res = cbdata->cb(info, size, cbdata->data);
2392 // Perform the check one more time in case info->dlpi_name was overwritten
2393 // by user callback.
2394 if (info && IsAppNotRodata((uptr)info->dlpi_name))
2395 MemoryResetRange(cbdata->thr, cbdata->pc, (uptr)info->dlpi_name,
2396 internal_strlen(info->dlpi_name));
2397 return res;
2398 }
2399
TSAN_INTERCEPTOR(int,dl_iterate_phdr,dl_iterate_phdr_cb_t cb,void * data)2400 TSAN_INTERCEPTOR(int, dl_iterate_phdr, dl_iterate_phdr_cb_t cb, void *data) {
2401 SCOPED_TSAN_INTERCEPTOR(dl_iterate_phdr, cb, data);
2402 dl_iterate_phdr_data cbdata;
2403 cbdata.thr = thr;
2404 cbdata.pc = pc;
2405 cbdata.cb = cb;
2406 cbdata.data = data;
2407 int res = REAL(dl_iterate_phdr)(dl_iterate_phdr_cb, &cbdata);
2408 return res;
2409 }
2410 #endif
2411
OnExit(ThreadState * thr)2412 static int OnExit(ThreadState *thr) {
2413 int status = Finalize(thr);
2414 FlushStreams();
2415 return status;
2416 }
2417
2418 #if !SANITIZER_APPLE
HandleRecvmsg(ThreadState * thr,uptr pc,__sanitizer_msghdr * msg)2419 static void HandleRecvmsg(ThreadState *thr, uptr pc,
2420 __sanitizer_msghdr *msg) {
2421 int fds[64];
2422 int cnt = ExtractRecvmsgFDs(msg, fds, ARRAY_SIZE(fds));
2423 for (int i = 0; i < cnt; i++)
2424 FdEventCreate(thr, pc, fds[i]);
2425 }
2426 #endif
2427
2428 #include "sanitizer_common/sanitizer_platform_interceptors.h"
2429 // Causes interceptor recursion (getaddrinfo() and fopen())
2430 #undef SANITIZER_INTERCEPT_GETADDRINFO
2431 // We define our own.
2432 #if SANITIZER_INTERCEPT_TLS_GET_ADDR
2433 #define NEED_TLS_GET_ADDR
2434 #endif
2435 #undef SANITIZER_INTERCEPT_TLS_GET_ADDR
2436 #define SANITIZER_INTERCEPT_TLS_GET_OFFSET 1
2437 #undef SANITIZER_INTERCEPT_PTHREAD_SIGMASK
2438
2439 #define COMMON_INTERCEPT_FUNCTION_VER(name, ver) \
2440 INTERCEPT_FUNCTION_VER(name, ver)
2441 #define COMMON_INTERCEPT_FUNCTION_VER_UNVERSIONED_FALLBACK(name, ver) \
2442 (INTERCEPT_FUNCTION_VER(name, ver) || INTERCEPT_FUNCTION(name))
2443
2444 #define COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, func, ...) \
2445 SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__); \
2446 TsanInterceptorContext _ctx = {thr, pc}; \
2447 ctx = (void *)&_ctx; \
2448 (void)ctx;
2449
2450 #define COMMON_INTERCEPTOR_FILE_OPEN(ctx, file, path) \
2451 if (path) \
2452 Acquire(thr, pc, File2addr(path)); \
2453 if (file) { \
2454 int fd = fileno_unlocked(file); \
2455 if (fd >= 0) FdFileCreate(thr, pc, fd); \
2456 }
2457
2458 #define COMMON_INTERCEPTOR_FILE_CLOSE(ctx, file) \
2459 if (file) { \
2460 int fd = fileno_unlocked(file); \
2461 FdClose(thr, pc, fd); \
2462 }
2463
2464 #define COMMON_INTERCEPTOR_DLOPEN(filename, flag) \
2465 ({ \
2466 CheckNoDeepBind(filename, flag); \
2467 ThreadIgnoreBegin(thr, 0); \
2468 void *res = REAL(dlopen)(filename, flag); \
2469 ThreadIgnoreEnd(thr); \
2470 res; \
2471 })
2472
2473 // Ignore interceptors in OnLibraryLoaded()/Unloaded(). These hooks use code
2474 // (ListOfModules::init, MemoryMappingLayout::DumpListOfModules) that make
2475 // intercepted calls, which can cause deadlockes with ReportRace() which also
2476 // uses this code.
2477 #define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) \
2478 ({ \
2479 ScopedIgnoreInterceptors ignore_interceptors; \
2480 libignore()->OnLibraryLoaded(filename); \
2481 })
2482
2483 #define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() \
2484 ({ \
2485 ScopedIgnoreInterceptors ignore_interceptors; \
2486 libignore()->OnLibraryUnloaded(); \
2487 })
2488
2489 #define COMMON_INTERCEPTOR_ACQUIRE(ctx, u) \
2490 Acquire(((TsanInterceptorContext *) ctx)->thr, pc, u)
2491
2492 #define COMMON_INTERCEPTOR_RELEASE(ctx, u) \
2493 Release(((TsanInterceptorContext *) ctx)->thr, pc, u)
2494
2495 #define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \
2496 Acquire(((TsanInterceptorContext *) ctx)->thr, pc, Dir2addr(path))
2497
2498 #define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \
2499 FdAcquire(((TsanInterceptorContext *) ctx)->thr, pc, fd)
2500
2501 #define COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd) \
2502 FdRelease(((TsanInterceptorContext *) ctx)->thr, pc, fd)
2503
2504 #define COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd) \
2505 FdAccess(((TsanInterceptorContext *) ctx)->thr, pc, fd)
2506
2507 #define COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, newfd) \
2508 FdSocketAccept(((TsanInterceptorContext *) ctx)->thr, pc, fd, newfd)
2509
2510 #define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) \
2511 ThreadSetName(((TsanInterceptorContext *) ctx)->thr, name)
2512
2513 #define COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name) \
2514 if (pthread_equal(pthread_self(), reinterpret_cast<void *>(thread))) \
2515 COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name); \
2516 else \
2517 __tsan::ctx->thread_registry.SetThreadNameByUserId(thread, name)
2518
2519 #define COMMON_INTERCEPTOR_BLOCK_REAL(name) BLOCK_REAL(name)
2520
2521 #define COMMON_INTERCEPTOR_ON_EXIT(ctx) \
2522 OnExit(((TsanInterceptorContext *) ctx)->thr)
2523
2524 #define COMMON_INTERCEPTOR_MMAP_IMPL(ctx, mmap, addr, sz, prot, flags, fd, \
2525 off) \
2526 do { \
2527 return mmap_interceptor(thr, pc, REAL(mmap), addr, sz, prot, flags, fd, \
2528 off); \
2529 } while (false)
2530
2531 #define COMMON_INTERCEPTOR_MUNMAP_IMPL(ctx, addr, sz) \
2532 do { \
2533 return munmap_interceptor(thr, pc, REAL(munmap), addr, sz); \
2534 } while (false)
2535
2536 #if !SANITIZER_APPLE
2537 #define COMMON_INTERCEPTOR_HANDLE_RECVMSG(ctx, msg) \
2538 HandleRecvmsg(((TsanInterceptorContext *)ctx)->thr, \
2539 ((TsanInterceptorContext *)ctx)->pc, msg)
2540 #endif
2541
2542 #define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end) \
2543 if (TsanThread *t = GetCurrentThread()) { \
2544 *begin = t->tls_begin(); \
2545 *end = t->tls_end(); \
2546 } else { \
2547 *begin = *end = 0; \
2548 }
2549
2550 #define COMMON_INTERCEPTOR_USER_CALLBACK_START() \
2551 SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START()
2552
2553 #define COMMON_INTERCEPTOR_USER_CALLBACK_END() \
2554 SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END()
2555
2556 #include "sanitizer_common/sanitizer_common_interceptors.inc"
2557
2558 static int sigaction_impl(int sig, const __sanitizer_sigaction *act,
2559 __sanitizer_sigaction *old);
2560 static __sanitizer_sighandler_ptr signal_impl(int sig,
2561 __sanitizer_sighandler_ptr h);
2562
2563 #define SIGNAL_INTERCEPTOR_SIGACTION_IMPL(signo, act, oldact) \
2564 { return sigaction_impl(signo, act, oldact); }
2565
2566 #define SIGNAL_INTERCEPTOR_SIGNAL_IMPL(func, signo, handler) \
2567 { return (uptr)signal_impl(signo, (__sanitizer_sighandler_ptr)handler); }
2568
2569 #define SIGNAL_INTERCEPTOR_ENTER() LazyInitialize(cur_thread_init())
2570
2571 #include "sanitizer_common/sanitizer_signal_interceptors.inc"
2572
sigaction_impl(int sig,const __sanitizer_sigaction * act,__sanitizer_sigaction * old)2573 int sigaction_impl(int sig, const __sanitizer_sigaction *act,
2574 __sanitizer_sigaction *old) {
2575 // Note: if we call REAL(sigaction) directly for any reason without proxying
2576 // the signal handler through sighandler, very bad things will happen.
2577 // The handler will run synchronously and corrupt tsan per-thread state.
2578 SCOPED_INTERCEPTOR_RAW(sigaction, sig, act, old);
2579 if (sig <= 0 || sig >= kSigCount) {
2580 errno = errno_EINVAL;
2581 return -1;
2582 }
2583 __sanitizer_sigaction *sigactions = interceptor_ctx()->sigactions;
2584 __sanitizer_sigaction old_stored;
2585 if (old) internal_memcpy(&old_stored, &sigactions[sig], sizeof(old_stored));
2586 __sanitizer_sigaction newact;
2587 if (act) {
2588 // Copy act into sigactions[sig].
2589 // Can't use struct copy, because compiler can emit call to memcpy.
2590 // Can't use internal_memcpy, because it copies byte-by-byte,
2591 // and signal handler reads the handler concurrently. It can read
2592 // some bytes from old value and some bytes from new value.
2593 // Use volatile to prevent insertion of memcpy.
2594 sigactions[sig].handler =
2595 *(volatile __sanitizer_sighandler_ptr const *)&act->handler;
2596 sigactions[sig].sa_flags = *(volatile int const *)&act->sa_flags;
2597 internal_memcpy(&sigactions[sig].sa_mask, &act->sa_mask,
2598 sizeof(sigactions[sig].sa_mask));
2599 #if !SANITIZER_FREEBSD && !SANITIZER_APPLE && !SANITIZER_NETBSD
2600 sigactions[sig].sa_restorer = act->sa_restorer;
2601 #endif
2602 internal_memcpy(&newact, act, sizeof(newact));
2603 internal_sigfillset(&newact.sa_mask);
2604 if ((act->sa_flags & SA_SIGINFO) ||
2605 ((uptr)act->handler != sig_ign && (uptr)act->handler != sig_dfl)) {
2606 newact.sa_flags |= SA_SIGINFO;
2607 newact.sigaction = sighandler;
2608 }
2609 ReleaseStore(thr, pc, (uptr)&sigactions[sig]);
2610 act = &newact;
2611 }
2612 int res = REAL(sigaction)(sig, act, old);
2613 if (res == 0 && old && old->sigaction == sighandler)
2614 internal_memcpy(old, &old_stored, sizeof(*old));
2615 return res;
2616 }
2617
signal_impl(int sig,__sanitizer_sighandler_ptr h)2618 static __sanitizer_sighandler_ptr signal_impl(int sig,
2619 __sanitizer_sighandler_ptr h) {
2620 __sanitizer_sigaction act;
2621 act.handler = h;
2622 internal_memset(&act.sa_mask, -1, sizeof(act.sa_mask));
2623 act.sa_flags = 0;
2624 __sanitizer_sigaction old;
2625 int res = sigaction_symname(sig, &act, &old);
2626 if (res) return (__sanitizer_sighandler_ptr)sig_err;
2627 return old.handler;
2628 }
2629
2630 #define TSAN_SYSCALL() \
2631 ThreadState *thr = cur_thread(); \
2632 if (thr->ignore_interceptors) \
2633 return; \
2634 ScopedSyscall scoped_syscall(thr)
2635
2636 struct ScopedSyscall {
2637 ThreadState *thr;
2638
ScopedSyscallScopedSyscall2639 explicit ScopedSyscall(ThreadState *thr) : thr(thr) { LazyInitialize(thr); }
2640
~ScopedSyscallScopedSyscall2641 ~ScopedSyscall() {
2642 ProcessPendingSignals(thr);
2643 }
2644 };
2645
2646 #if !SANITIZER_FREEBSD && !SANITIZER_APPLE
syscall_access_range(uptr pc,uptr p,uptr s,bool write)2647 static void syscall_access_range(uptr pc, uptr p, uptr s, bool write) {
2648 TSAN_SYSCALL();
2649 MemoryAccessRange(thr, pc, p, s, write);
2650 }
2651
syscall_acquire(uptr pc,uptr addr)2652 static USED void syscall_acquire(uptr pc, uptr addr) {
2653 TSAN_SYSCALL();
2654 Acquire(thr, pc, addr);
2655 DPrintf("syscall_acquire(0x%zx))\n", addr);
2656 }
2657
syscall_release(uptr pc,uptr addr)2658 static USED void syscall_release(uptr pc, uptr addr) {
2659 TSAN_SYSCALL();
2660 DPrintf("syscall_release(0x%zx)\n", addr);
2661 Release(thr, pc, addr);
2662 }
2663
syscall_fd_close(uptr pc,int fd)2664 static void syscall_fd_close(uptr pc, int fd) {
2665 auto *thr = cur_thread();
2666 FdClose(thr, pc, fd);
2667 }
2668
syscall_fd_acquire(uptr pc,int fd)2669 static USED void syscall_fd_acquire(uptr pc, int fd) {
2670 TSAN_SYSCALL();
2671 FdAcquire(thr, pc, fd);
2672 DPrintf("syscall_fd_acquire(%d)\n", fd);
2673 }
2674
syscall_fd_release(uptr pc,int fd)2675 static USED void syscall_fd_release(uptr pc, int fd) {
2676 TSAN_SYSCALL();
2677 DPrintf("syscall_fd_release(%d)\n", fd);
2678 FdRelease(thr, pc, fd);
2679 }
2680
sycall_blocking_start()2681 static USED void sycall_blocking_start() {
2682 DPrintf("sycall_blocking_start()\n");
2683 ThreadState *thr = cur_thread();
2684 EnterBlockingFunc(thr);
2685 // When we are in a "blocking call", we process signals asynchronously
2686 // (right when they arrive). In this context we do not expect to be
2687 // executing any user/runtime code. The known interceptor sequence when
2688 // this is not true is: pthread_join -> munmap(stack). It's fine
2689 // to ignore munmap in this case -- we handle stack shadow separately.
2690 thr->ignore_interceptors++;
2691 }
2692
sycall_blocking_end()2693 static USED void sycall_blocking_end() {
2694 DPrintf("sycall_blocking_end()\n");
2695 ThreadState *thr = cur_thread();
2696 thr->ignore_interceptors--;
2697 atomic_store(&thr->in_blocking_func, 0, memory_order_relaxed);
2698 }
2699
syscall_pre_fork(uptr pc)2700 static void syscall_pre_fork(uptr pc) { ForkBefore(cur_thread(), pc); }
2701
syscall_post_fork(uptr pc,int pid)2702 static void syscall_post_fork(uptr pc, int pid) {
2703 ThreadState *thr = cur_thread();
2704 if (pid == 0) {
2705 // child
2706 ForkChildAfter(thr, pc, true);
2707 FdOnFork(thr, pc);
2708 } else if (pid > 0) {
2709 // parent
2710 ForkParentAfter(thr, pc);
2711 } else {
2712 // error
2713 ForkParentAfter(thr, pc);
2714 }
2715 }
2716 #endif
2717
2718 #define COMMON_SYSCALL_PRE_READ_RANGE(p, s) \
2719 syscall_access_range(GET_CALLER_PC(), (uptr)(p), (uptr)(s), false)
2720
2721 #define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) \
2722 syscall_access_range(GET_CALLER_PC(), (uptr)(p), (uptr)(s), true)
2723
2724 #define COMMON_SYSCALL_POST_READ_RANGE(p, s) \
2725 do { \
2726 (void)(p); \
2727 (void)(s); \
2728 } while (false)
2729
2730 #define COMMON_SYSCALL_POST_WRITE_RANGE(p, s) \
2731 do { \
2732 (void)(p); \
2733 (void)(s); \
2734 } while (false)
2735
2736 #define COMMON_SYSCALL_ACQUIRE(addr) \
2737 syscall_acquire(GET_CALLER_PC(), (uptr)(addr))
2738
2739 #define COMMON_SYSCALL_RELEASE(addr) \
2740 syscall_release(GET_CALLER_PC(), (uptr)(addr))
2741
2742 #define COMMON_SYSCALL_FD_CLOSE(fd) syscall_fd_close(GET_CALLER_PC(), fd)
2743
2744 #define COMMON_SYSCALL_FD_ACQUIRE(fd) syscall_fd_acquire(GET_CALLER_PC(), fd)
2745
2746 #define COMMON_SYSCALL_FD_RELEASE(fd) syscall_fd_release(GET_CALLER_PC(), fd)
2747
2748 #define COMMON_SYSCALL_PRE_FORK() \
2749 syscall_pre_fork(GET_CALLER_PC())
2750
2751 #define COMMON_SYSCALL_POST_FORK(res) \
2752 syscall_post_fork(GET_CALLER_PC(), res)
2753
2754 #define COMMON_SYSCALL_BLOCKING_START() sycall_blocking_start()
2755 #define COMMON_SYSCALL_BLOCKING_END() sycall_blocking_end()
2756
2757 #include "sanitizer_common/sanitizer_common_syscalls.inc"
2758 #include "sanitizer_common/sanitizer_syscalls_netbsd.inc"
2759
2760 #ifdef NEED_TLS_GET_ADDR
2761
handle_tls_addr(void * arg,void * res)2762 static void handle_tls_addr(void *arg, void *res) {
2763 ThreadState *thr = cur_thread();
2764 if (!thr)
2765 return;
2766 DTLS::DTV *dtv = DTLS_on_tls_get_addr(arg, res, thr->tls_addr,
2767 thr->tls_addr + thr->tls_size);
2768 if (!dtv)
2769 return;
2770 // New DTLS block has been allocated.
2771 MemoryResetRange(thr, 0, dtv->beg, dtv->size);
2772 }
2773
2774 #if !SANITIZER_S390
2775 // Define own interceptor instead of sanitizer_common's for three reasons:
2776 // 1. It must not process pending signals.
2777 // Signal handlers may contain MOVDQA instruction (see below).
2778 // 2. It must be as simple as possible to not contain MOVDQA.
2779 // 3. Sanitizer_common version uses COMMON_INTERCEPTOR_INITIALIZE_RANGE which
2780 // is empty for tsan (meant only for msan).
2781 // Note: __tls_get_addr can be called with mis-aligned stack due to:
2782 // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58066
2783 // So the interceptor must work with mis-aligned stack, in particular, does not
2784 // execute MOVDQA with stack addresses.
TSAN_INTERCEPTOR(void *,__tls_get_addr,void * arg)2785 TSAN_INTERCEPTOR(void *, __tls_get_addr, void *arg) {
2786 void *res = REAL(__tls_get_addr)(arg);
2787 handle_tls_addr(arg, res);
2788 return res;
2789 }
2790 #else // SANITIZER_S390
TSAN_INTERCEPTOR(uptr,__tls_get_addr_internal,void * arg)2791 TSAN_INTERCEPTOR(uptr, __tls_get_addr_internal, void *arg) {
2792 uptr res = __tls_get_offset_wrapper(arg, REAL(__tls_get_offset));
2793 char *tp = static_cast<char *>(__builtin_thread_pointer());
2794 handle_tls_addr(arg, res + tp);
2795 return res;
2796 }
2797 #endif
2798 #endif
2799
2800 #if SANITIZER_NETBSD
TSAN_INTERCEPTOR(void,_lwp_exit)2801 TSAN_INTERCEPTOR(void, _lwp_exit) {
2802 SCOPED_TSAN_INTERCEPTOR(_lwp_exit);
2803 DestroyThreadState();
2804 REAL(_lwp_exit)();
2805 }
2806 #define TSAN_MAYBE_INTERCEPT__LWP_EXIT TSAN_INTERCEPT(_lwp_exit)
2807 #else
2808 #define TSAN_MAYBE_INTERCEPT__LWP_EXIT
2809 #endif
2810
2811 #if SANITIZER_FREEBSD
TSAN_INTERCEPTOR(void,thr_exit,tid_t * state)2812 TSAN_INTERCEPTOR(void, thr_exit, tid_t *state) {
2813 SCOPED_TSAN_INTERCEPTOR(thr_exit, state);
2814 DestroyThreadState();
2815 REAL(thr_exit(state));
2816 }
2817 #define TSAN_MAYBE_INTERCEPT_THR_EXIT TSAN_INTERCEPT(thr_exit)
2818 #else
2819 #define TSAN_MAYBE_INTERCEPT_THR_EXIT
2820 #endif
2821
2822 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_init, void *c, void *a)
2823 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_destroy, void *c)
2824 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_signal, void *c)
2825 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_broadcast, void *c)
2826 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_wait, void *c, void *m)
2827 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_init, void *m, void *a)
2828 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_destroy, void *m)
2829 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_lock, void *m)
2830 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_trylock, void *m)
2831 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_unlock, void *m)
2832 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_init, void *l, void *a)
2833 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_destroy, void *l)
2834 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_rdlock, void *l)
2835 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_tryrdlock, void *l)
2836 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_wrlock, void *l)
2837 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_trywrlock, void *l)
2838 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_unlock, void *l)
2839 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, once, void *o, void (*i)())
2840 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, sigmask, int f, void *n, void *o)
2841
2842 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_init, void *c, void *a)
2843 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_signal, void *c)
2844 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_broadcast, void *c)
2845 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_wait, void *c, void *m)
2846 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_destroy, void *c)
2847 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_init, void *m, void *a)
2848 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_destroy, void *m)
2849 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_lock, void *m)
2850 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_trylock, void *m)
2851 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_unlock, void *m)
2852 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_init, void *m, void *a)
2853 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_destroy, void *m)
2854 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_rdlock, void *m)
2855 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_tryrdlock, void *m)
2856 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_wrlock, void *m)
2857 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_trywrlock, void *m)
2858 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_unlock, void *m)
2859 TSAN_INTERCEPTOR_NETBSD_ALIAS_THR(int, once, void *o, void (*f)())
2860 TSAN_INTERCEPTOR_NETBSD_ALIAS_THR2(int, sigsetmask, sigmask, int a, void *b,
2861 void *c)
2862
2863 namespace __tsan {
2864
finalize(void * arg)2865 static void finalize(void *arg) {
2866 ThreadState *thr = cur_thread();
2867 int status = Finalize(thr);
2868 // Make sure the output is not lost.
2869 FlushStreams();
2870 if (status)
2871 Die();
2872 }
2873
2874 #if !SANITIZER_APPLE && !SANITIZER_ANDROID
unreachable()2875 static void unreachable() {
2876 Report("FATAL: ThreadSanitizer: unreachable called\n");
2877 Die();
2878 }
2879 #endif
2880
2881 // Define default implementation since interception of libdispatch is optional.
InitializeLibdispatchInterceptors()2882 SANITIZER_WEAK_ATTRIBUTE void InitializeLibdispatchInterceptors() {}
2883
InitializeInterceptors()2884 void InitializeInterceptors() {
2885 #if !SANITIZER_APPLE
2886 // We need to setup it early, because functions like dlsym() can call it.
2887 REAL(memset) = internal_memset;
2888 REAL(memcpy) = internal_memcpy;
2889 #endif
2890
2891 __interception::DoesNotSupportStaticLinking();
2892
2893 new(interceptor_ctx()) InterceptorContext();
2894
2895 // Interpose __tls_get_addr before the common interposers. This is needed
2896 // because dlsym() may call malloc on failure which could result in other
2897 // interposed functions being called that could eventually make use of TLS.
2898 #ifdef NEED_TLS_GET_ADDR
2899 # if !SANITIZER_S390
2900 TSAN_INTERCEPT(__tls_get_addr);
2901 # else
2902 TSAN_INTERCEPT(__tls_get_addr_internal);
2903 TSAN_INTERCEPT(__tls_get_offset);
2904 # endif
2905 #endif
2906 InitializeCommonInterceptors();
2907 InitializeSignalInterceptors();
2908 InitializeLibdispatchInterceptors();
2909
2910 #if !SANITIZER_APPLE
2911 InitializeSetjmpInterceptors();
2912 #endif
2913
2914 TSAN_INTERCEPT(longjmp_symname);
2915 TSAN_INTERCEPT(siglongjmp_symname);
2916 #if SANITIZER_NETBSD
2917 TSAN_INTERCEPT(_longjmp);
2918 #endif
2919
2920 TSAN_INTERCEPT(malloc);
2921 TSAN_INTERCEPT(__libc_memalign);
2922 TSAN_INTERCEPT(calloc);
2923 TSAN_INTERCEPT(realloc);
2924 TSAN_INTERCEPT(reallocarray);
2925 TSAN_INTERCEPT(free);
2926 TSAN_INTERCEPT(cfree);
2927 TSAN_INTERCEPT(munmap);
2928 TSAN_MAYBE_INTERCEPT_MEMALIGN;
2929 TSAN_INTERCEPT(valloc);
2930 TSAN_MAYBE_INTERCEPT_PVALLOC;
2931 TSAN_INTERCEPT(posix_memalign);
2932
2933 TSAN_INTERCEPT(strcpy);
2934 TSAN_INTERCEPT(strncpy);
2935 TSAN_INTERCEPT(strdup);
2936
2937 TSAN_INTERCEPT(pthread_create);
2938 TSAN_INTERCEPT(pthread_join);
2939 TSAN_INTERCEPT(pthread_detach);
2940 TSAN_INTERCEPT(pthread_exit);
2941 #if SANITIZER_LINUX
2942 TSAN_INTERCEPT(pthread_tryjoin_np);
2943 TSAN_INTERCEPT(pthread_timedjoin_np);
2944 #endif
2945
2946 TSAN_INTERCEPT_VER(pthread_cond_init, PTHREAD_ABI_BASE);
2947 TSAN_INTERCEPT_VER(pthread_cond_signal, PTHREAD_ABI_BASE);
2948 TSAN_INTERCEPT_VER(pthread_cond_broadcast, PTHREAD_ABI_BASE);
2949 TSAN_INTERCEPT_VER(pthread_cond_wait, PTHREAD_ABI_BASE);
2950 TSAN_INTERCEPT_VER(pthread_cond_timedwait, PTHREAD_ABI_BASE);
2951 TSAN_INTERCEPT_VER(pthread_cond_destroy, PTHREAD_ABI_BASE);
2952
2953 TSAN_MAYBE_PTHREAD_COND_CLOCKWAIT;
2954
2955 TSAN_INTERCEPT(pthread_mutex_init);
2956 TSAN_INTERCEPT(pthread_mutex_destroy);
2957 TSAN_INTERCEPT(pthread_mutex_lock);
2958 TSAN_INTERCEPT(pthread_mutex_trylock);
2959 TSAN_INTERCEPT(pthread_mutex_timedlock);
2960 TSAN_INTERCEPT(pthread_mutex_unlock);
2961 #if SANITIZER_LINUX
2962 TSAN_INTERCEPT(pthread_mutex_clocklock);
2963 #endif
2964 #if SANITIZER_GLIBC
2965 # if !__GLIBC_PREREQ(2, 34)
2966 TSAN_INTERCEPT(__pthread_mutex_lock);
2967 TSAN_INTERCEPT(__pthread_mutex_unlock);
2968 # endif
2969 #endif
2970
2971 TSAN_INTERCEPT(pthread_spin_init);
2972 TSAN_INTERCEPT(pthread_spin_destroy);
2973 TSAN_INTERCEPT(pthread_spin_lock);
2974 TSAN_INTERCEPT(pthread_spin_trylock);
2975 TSAN_INTERCEPT(pthread_spin_unlock);
2976
2977 TSAN_INTERCEPT(pthread_rwlock_init);
2978 TSAN_INTERCEPT(pthread_rwlock_destroy);
2979 TSAN_INTERCEPT(pthread_rwlock_rdlock);
2980 TSAN_INTERCEPT(pthread_rwlock_tryrdlock);
2981 TSAN_INTERCEPT(pthread_rwlock_timedrdlock);
2982 TSAN_INTERCEPT(pthread_rwlock_wrlock);
2983 TSAN_INTERCEPT(pthread_rwlock_trywrlock);
2984 TSAN_INTERCEPT(pthread_rwlock_timedwrlock);
2985 TSAN_INTERCEPT(pthread_rwlock_unlock);
2986
2987 TSAN_INTERCEPT(pthread_barrier_init);
2988 TSAN_INTERCEPT(pthread_barrier_destroy);
2989 TSAN_INTERCEPT(pthread_barrier_wait);
2990
2991 TSAN_INTERCEPT(pthread_once);
2992
2993 TSAN_MAYBE_INTERCEPT___FXSTAT;
2994 TSAN_MAYBE_INTERCEPT_FSTAT;
2995 TSAN_MAYBE_INTERCEPT_FSTAT64;
2996 TSAN_INTERCEPT(open);
2997 TSAN_MAYBE_INTERCEPT_OPEN64;
2998 TSAN_INTERCEPT(creat);
2999 TSAN_MAYBE_INTERCEPT_CREAT64;
3000 TSAN_INTERCEPT(dup);
3001 TSAN_INTERCEPT(dup2);
3002 TSAN_INTERCEPT(dup3);
3003 TSAN_MAYBE_INTERCEPT_EVENTFD;
3004 TSAN_MAYBE_INTERCEPT_SIGNALFD;
3005 TSAN_MAYBE_INTERCEPT_INOTIFY_INIT;
3006 TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1;
3007 TSAN_INTERCEPT(socket);
3008 TSAN_INTERCEPT(socketpair);
3009 TSAN_INTERCEPT(connect);
3010 TSAN_INTERCEPT(bind);
3011 TSAN_INTERCEPT(listen);
3012 TSAN_MAYBE_INTERCEPT_EPOLL;
3013 TSAN_INTERCEPT(close);
3014 TSAN_MAYBE_INTERCEPT___CLOSE;
3015 TSAN_MAYBE_INTERCEPT___RES_ICLOSE;
3016 TSAN_INTERCEPT(pipe);
3017 TSAN_INTERCEPT(pipe2);
3018
3019 TSAN_INTERCEPT(unlink);
3020 TSAN_INTERCEPT(tmpfile);
3021 TSAN_MAYBE_INTERCEPT_TMPFILE64;
3022 TSAN_INTERCEPT(abort);
3023 TSAN_INTERCEPT(rmdir);
3024 TSAN_INTERCEPT(closedir);
3025
3026 TSAN_INTERCEPT(sigsuspend);
3027 TSAN_INTERCEPT(sigblock);
3028 TSAN_INTERCEPT(sigsetmask);
3029 TSAN_INTERCEPT(pthread_sigmask);
3030 TSAN_INTERCEPT(raise);
3031 TSAN_INTERCEPT(kill);
3032 TSAN_INTERCEPT(pthread_kill);
3033 TSAN_INTERCEPT(sleep);
3034 TSAN_INTERCEPT(usleep);
3035 TSAN_INTERCEPT(nanosleep);
3036 TSAN_INTERCEPT(pause);
3037 TSAN_INTERCEPT(gettimeofday);
3038 TSAN_INTERCEPT(getaddrinfo);
3039
3040 TSAN_INTERCEPT(fork);
3041 TSAN_INTERCEPT(vfork);
3042 #if SANITIZER_LINUX
3043 TSAN_INTERCEPT(clone);
3044 #endif
3045 #if !SANITIZER_ANDROID
3046 TSAN_INTERCEPT(dl_iterate_phdr);
3047 #endif
3048 TSAN_MAYBE_INTERCEPT_ON_EXIT;
3049 TSAN_INTERCEPT(__cxa_atexit);
3050 TSAN_INTERCEPT(_exit);
3051
3052 TSAN_MAYBE_INTERCEPT__LWP_EXIT;
3053 TSAN_MAYBE_INTERCEPT_THR_EXIT;
3054
3055 #if !SANITIZER_APPLE && !SANITIZER_ANDROID
3056 // Need to setup it, because interceptors check that the function is resolved.
3057 // But atexit is emitted directly into the module, so can't be resolved.
3058 REAL(atexit) = (int(*)(void(*)()))unreachable;
3059 #endif
3060
3061 if (REAL(__cxa_atexit)(&finalize, 0, 0)) {
3062 Printf("ThreadSanitizer: failed to setup atexit callback\n");
3063 Die();
3064 }
3065 if (pthread_atfork(atfork_prepare, atfork_parent, atfork_child)) {
3066 Printf("ThreadSanitizer: failed to setup atfork callbacks\n");
3067 Die();
3068 }
3069
3070 #if !SANITIZER_APPLE && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
3071 if (pthread_key_create(&interceptor_ctx()->finalize_key, &thread_finalize)) {
3072 Printf("ThreadSanitizer: failed to create thread key\n");
3073 Die();
3074 }
3075 #endif
3076
3077 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_init);
3078 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_destroy);
3079 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_signal);
3080 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_broadcast);
3081 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_wait);
3082 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_init);
3083 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_destroy);
3084 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_lock);
3085 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_trylock);
3086 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_unlock);
3087 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_init);
3088 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_destroy);
3089 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_rdlock);
3090 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_tryrdlock);
3091 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_wrlock);
3092 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_trywrlock);
3093 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_unlock);
3094 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(once);
3095 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(sigmask);
3096
3097 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_init);
3098 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_signal);
3099 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_broadcast);
3100 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_wait);
3101 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_destroy);
3102 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_init);
3103 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_destroy);
3104 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_lock);
3105 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_trylock);
3106 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_unlock);
3107 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_init);
3108 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_destroy);
3109 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_rdlock);
3110 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_tryrdlock);
3111 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_wrlock);
3112 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_trywrlock);
3113 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_unlock);
3114 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(once);
3115 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(sigsetmask);
3116
3117 FdInit();
3118 }
3119
3120 } // namespace __tsan
3121
3122 // Invisible barrier for tests.
3123 // There were several unsuccessful iterations for this functionality:
3124 // 1. Initially it was implemented in user code using
3125 // REAL(pthread_barrier_wait). But pthread_barrier_wait is not supported on
3126 // MacOS. Futexes are linux-specific for this matter.
3127 // 2. Then we switched to atomics+usleep(10). But usleep produced parasitic
3128 // "as-if synchronized via sleep" messages in reports which failed some
3129 // output tests.
3130 // 3. Then we switched to atomics+sched_yield. But this produced tons of tsan-
3131 // visible events, which lead to "failed to restore stack trace" failures.
3132 // Note that no_sanitize_thread attribute does not turn off atomic interception
3133 // so attaching it to the function defined in user code does not help.
3134 // That's why we now have what we have.
3135 constexpr u32 kBarrierThreadBits = 10;
3136 constexpr u32 kBarrierThreads = 1 << kBarrierThreadBits;
3137
3138 extern "C" {
3139
__tsan_testonly_barrier_init(atomic_uint32_t * barrier,u32 num_threads)3140 SANITIZER_INTERFACE_ATTRIBUTE void __tsan_testonly_barrier_init(
3141 atomic_uint32_t *barrier, u32 num_threads) {
3142 if (num_threads >= kBarrierThreads) {
3143 Printf("barrier_init: count is too large (%d)\n", num_threads);
3144 Die();
3145 }
3146 // kBarrierThreadBits lsb is thread count,
3147 // the remaining are count of entered threads.
3148 atomic_store(barrier, num_threads, memory_order_relaxed);
3149 }
3150
barrier_epoch(u32 value)3151 static u32 barrier_epoch(u32 value) {
3152 return (value >> kBarrierThreadBits) / (value & (kBarrierThreads - 1));
3153 }
3154
__tsan_testonly_barrier_wait(atomic_uint32_t * barrier)3155 SANITIZER_INTERFACE_ATTRIBUTE void __tsan_testonly_barrier_wait(
3156 atomic_uint32_t *barrier) {
3157 u32 old = atomic_fetch_add(barrier, kBarrierThreads, memory_order_relaxed);
3158 u32 old_epoch = barrier_epoch(old);
3159 if (barrier_epoch(old + kBarrierThreads) != old_epoch) {
3160 FutexWake(barrier, (1 << 30));
3161 return;
3162 }
3163 for (;;) {
3164 u32 cur = atomic_load(barrier, memory_order_relaxed);
3165 if (barrier_epoch(cur) != old_epoch)
3166 return;
3167 FutexWait(barrier, cur);
3168 }
3169 }
3170
3171 } // extern "C"
3172