1 //===-- tsan_interface_ann.cpp --------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file is a part of ThreadSanitizer (TSan), a race detector. 10 // 11 //===----------------------------------------------------------------------===// 12 #include "sanitizer_common/sanitizer_libc.h" 13 #include "sanitizer_common/sanitizer_internal_defs.h" 14 #include "sanitizer_common/sanitizer_placement_new.h" 15 #include "sanitizer_common/sanitizer_stacktrace.h" 16 #include "sanitizer_common/sanitizer_vector.h" 17 #include "tsan_interface_ann.h" 18 #include "tsan_report.h" 19 #include "tsan_rtl.h" 20 #include "tsan_mman.h" 21 #include "tsan_flags.h" 22 #include "tsan_platform.h" 23 24 #define CALLERPC ((uptr)__builtin_return_address(0)) 25 26 using namespace __tsan; 27 28 namespace __tsan { 29 30 class ScopedAnnotation { 31 public: 32 ScopedAnnotation(ThreadState *thr, const char *aname, uptr pc) 33 : thr_(thr) { 34 FuncEntry(thr_, pc); 35 DPrintf("#%d: annotation %s()\n", thr_->tid, aname); 36 } 37 38 ~ScopedAnnotation() { 39 FuncExit(thr_); 40 CheckedMutex::CheckNoLocks(); 41 } 42 private: 43 ThreadState *const thr_; 44 }; 45 46 #define SCOPED_ANNOTATION_RET(typ, ret) \ 47 if (!flags()->enable_annotations) \ 48 return ret; \ 49 ThreadState *thr = cur_thread(); \ 50 const uptr caller_pc = (uptr)__builtin_return_address(0); \ 51 ScopedAnnotation sa(thr, __func__, caller_pc); \ 52 const uptr pc = StackTrace::GetCurrentPc(); \ 53 (void)pc; 54 55 #define SCOPED_ANNOTATION(typ) SCOPED_ANNOTATION_RET(typ, ) 56 57 static const int kMaxDescLen = 128; 58 59 struct ExpectRace { 60 ExpectRace *next; 61 ExpectRace *prev; 62 atomic_uintptr_t hitcount; 63 atomic_uintptr_t addcount; 64 uptr addr; 65 uptr size; 66 char *file; 67 int line; 68 char desc[kMaxDescLen]; 69 }; 70 71 struct DynamicAnnContext { 72 Mutex mtx; 73 ExpectRace benign; 74 75 DynamicAnnContext() : mtx(MutexTypeAnnotations) {} 76 }; 77 78 static DynamicAnnContext *dyn_ann_ctx; 79 static char dyn_ann_ctx_placeholder[sizeof(DynamicAnnContext)] ALIGNED(64); 80 81 static void AddExpectRace(ExpectRace *list, 82 char *f, int l, uptr addr, uptr size, char *desc) { 83 ExpectRace *race = list->next; 84 for (; race != list; race = race->next) { 85 if (race->addr == addr && race->size == size) { 86 atomic_store_relaxed(&race->addcount, 87 atomic_load_relaxed(&race->addcount) + 1); 88 return; 89 } 90 } 91 race = static_cast<ExpectRace *>(Alloc(sizeof(ExpectRace))); 92 race->addr = addr; 93 race->size = size; 94 race->file = f; 95 race->line = l; 96 race->desc[0] = 0; 97 atomic_store_relaxed(&race->hitcount, 0); 98 atomic_store_relaxed(&race->addcount, 1); 99 if (desc) { 100 int i = 0; 101 for (; i < kMaxDescLen - 1 && desc[i]; i++) 102 race->desc[i] = desc[i]; 103 race->desc[i] = 0; 104 } 105 race->prev = list; 106 race->next = list->next; 107 race->next->prev = race; 108 list->next = race; 109 } 110 111 static ExpectRace *FindRace(ExpectRace *list, uptr addr, uptr size) { 112 for (ExpectRace *race = list->next; race != list; race = race->next) { 113 uptr maxbegin = max(race->addr, addr); 114 uptr minend = min(race->addr + race->size, addr + size); 115 if (maxbegin < minend) 116 return race; 117 } 118 return 0; 119 } 120 121 static bool CheckContains(ExpectRace *list, uptr addr, uptr size) { 122 ExpectRace *race = FindRace(list, addr, size); 123 if (race == 0) 124 return false; 125 DPrintf("Hit expected/benign race: %s addr=%zx:%d %s:%d\n", 126 race->desc, race->addr, (int)race->size, race->file, race->line); 127 atomic_fetch_add(&race->hitcount, 1, memory_order_relaxed); 128 return true; 129 } 130 131 static void InitList(ExpectRace *list) { 132 list->next = list; 133 list->prev = list; 134 } 135 136 void InitializeDynamicAnnotations() { 137 dyn_ann_ctx = new(dyn_ann_ctx_placeholder) DynamicAnnContext; 138 InitList(&dyn_ann_ctx->benign); 139 } 140 141 bool IsExpectedReport(uptr addr, uptr size) { 142 ReadLock lock(&dyn_ann_ctx->mtx); 143 return CheckContains(&dyn_ann_ctx->benign, addr, size); 144 } 145 } // namespace __tsan 146 147 using namespace __tsan; 148 149 extern "C" { 150 void INTERFACE_ATTRIBUTE AnnotateHappensBefore(char *f, int l, uptr addr) { 151 SCOPED_ANNOTATION(AnnotateHappensBefore); 152 Release(thr, pc, addr); 153 } 154 155 void INTERFACE_ATTRIBUTE AnnotateHappensAfter(char *f, int l, uptr addr) { 156 SCOPED_ANNOTATION(AnnotateHappensAfter); 157 Acquire(thr, pc, addr); 158 } 159 160 void INTERFACE_ATTRIBUTE AnnotateCondVarSignal(char *f, int l, uptr cv) { 161 } 162 163 void INTERFACE_ATTRIBUTE AnnotateCondVarSignalAll(char *f, int l, uptr cv) { 164 } 165 166 void INTERFACE_ATTRIBUTE AnnotateMutexIsNotPHB(char *f, int l, uptr mu) { 167 } 168 169 void INTERFACE_ATTRIBUTE AnnotateCondVarWait(char *f, int l, uptr cv, 170 uptr lock) { 171 } 172 173 void INTERFACE_ATTRIBUTE AnnotateRWLockCreate(char *f, int l, uptr m) { 174 SCOPED_ANNOTATION(AnnotateRWLockCreate); 175 MutexCreate(thr, pc, m, MutexFlagWriteReentrant); 176 } 177 178 void INTERFACE_ATTRIBUTE AnnotateRWLockCreateStatic(char *f, int l, uptr m) { 179 SCOPED_ANNOTATION(AnnotateRWLockCreateStatic); 180 MutexCreate(thr, pc, m, MutexFlagWriteReentrant | MutexFlagLinkerInit); 181 } 182 183 void INTERFACE_ATTRIBUTE AnnotateRWLockDestroy(char *f, int l, uptr m) { 184 SCOPED_ANNOTATION(AnnotateRWLockDestroy); 185 MutexDestroy(thr, pc, m); 186 } 187 188 void INTERFACE_ATTRIBUTE AnnotateRWLockAcquired(char *f, int l, uptr m, 189 uptr is_w) { 190 SCOPED_ANNOTATION(AnnotateRWLockAcquired); 191 if (is_w) 192 MutexPostLock(thr, pc, m, MutexFlagDoPreLockOnPostLock); 193 else 194 MutexPostReadLock(thr, pc, m, MutexFlagDoPreLockOnPostLock); 195 } 196 197 void INTERFACE_ATTRIBUTE AnnotateRWLockReleased(char *f, int l, uptr m, 198 uptr is_w) { 199 SCOPED_ANNOTATION(AnnotateRWLockReleased); 200 if (is_w) 201 MutexUnlock(thr, pc, m); 202 else 203 MutexReadUnlock(thr, pc, m); 204 } 205 206 void INTERFACE_ATTRIBUTE AnnotateTraceMemory(char *f, int l, uptr mem) { 207 } 208 209 void INTERFACE_ATTRIBUTE AnnotateFlushState(char *f, int l) { 210 } 211 212 void INTERFACE_ATTRIBUTE AnnotateNewMemory(char *f, int l, uptr mem, 213 uptr size) { 214 } 215 216 void INTERFACE_ATTRIBUTE AnnotateNoOp(char *f, int l, uptr mem) { 217 } 218 219 void INTERFACE_ATTRIBUTE AnnotateFlushExpectedRaces(char *f, int l) { 220 } 221 222 void INTERFACE_ATTRIBUTE AnnotateEnableRaceDetection( 223 char *f, int l, int enable) { 224 } 225 226 void INTERFACE_ATTRIBUTE AnnotateMutexIsUsedAsCondVar( 227 char *f, int l, uptr mu) { 228 } 229 230 void INTERFACE_ATTRIBUTE AnnotatePCQGet( 231 char *f, int l, uptr pcq) { 232 } 233 234 void INTERFACE_ATTRIBUTE AnnotatePCQPut( 235 char *f, int l, uptr pcq) { 236 } 237 238 void INTERFACE_ATTRIBUTE AnnotatePCQDestroy( 239 char *f, int l, uptr pcq) { 240 } 241 242 void INTERFACE_ATTRIBUTE AnnotatePCQCreate( 243 char *f, int l, uptr pcq) { 244 } 245 246 void INTERFACE_ATTRIBUTE AnnotateExpectRace( 247 char *f, int l, uptr mem, char *desc) { 248 } 249 250 static void BenignRaceImpl(char *f, int l, uptr mem, uptr size, char *desc) { 251 Lock lock(&dyn_ann_ctx->mtx); 252 AddExpectRace(&dyn_ann_ctx->benign, 253 f, l, mem, size, desc); 254 DPrintf("Add benign race: %s addr=%zx %s:%d\n", desc, mem, f, l); 255 } 256 257 void INTERFACE_ATTRIBUTE AnnotateBenignRaceSized( 258 char *f, int l, uptr mem, uptr size, char *desc) { 259 SCOPED_ANNOTATION(AnnotateBenignRaceSized); 260 BenignRaceImpl(f, l, mem, size, desc); 261 } 262 263 void INTERFACE_ATTRIBUTE AnnotateBenignRace( 264 char *f, int l, uptr mem, char *desc) { 265 SCOPED_ANNOTATION(AnnotateBenignRace); 266 BenignRaceImpl(f, l, mem, 1, desc); 267 } 268 269 void INTERFACE_ATTRIBUTE AnnotateIgnoreReadsBegin(char *f, int l) { 270 SCOPED_ANNOTATION(AnnotateIgnoreReadsBegin); 271 ThreadIgnoreBegin(thr, pc); 272 } 273 274 void INTERFACE_ATTRIBUTE AnnotateIgnoreReadsEnd(char *f, int l) { 275 SCOPED_ANNOTATION(AnnotateIgnoreReadsEnd); 276 ThreadIgnoreEnd(thr); 277 } 278 279 void INTERFACE_ATTRIBUTE AnnotateIgnoreWritesBegin(char *f, int l) { 280 SCOPED_ANNOTATION(AnnotateIgnoreWritesBegin); 281 ThreadIgnoreBegin(thr, pc); 282 } 283 284 void INTERFACE_ATTRIBUTE AnnotateIgnoreWritesEnd(char *f, int l) { 285 SCOPED_ANNOTATION(AnnotateIgnoreWritesEnd); 286 ThreadIgnoreEnd(thr); 287 } 288 289 void INTERFACE_ATTRIBUTE AnnotateIgnoreSyncBegin(char *f, int l) { 290 SCOPED_ANNOTATION(AnnotateIgnoreSyncBegin); 291 ThreadIgnoreSyncBegin(thr, pc); 292 } 293 294 void INTERFACE_ATTRIBUTE AnnotateIgnoreSyncEnd(char *f, int l) { 295 SCOPED_ANNOTATION(AnnotateIgnoreSyncEnd); 296 ThreadIgnoreSyncEnd(thr); 297 } 298 299 void INTERFACE_ATTRIBUTE AnnotatePublishMemoryRange( 300 char *f, int l, uptr addr, uptr size) { 301 } 302 303 void INTERFACE_ATTRIBUTE AnnotateUnpublishMemoryRange( 304 char *f, int l, uptr addr, uptr size) { 305 } 306 307 void INTERFACE_ATTRIBUTE AnnotateThreadName( 308 char *f, int l, char *name) { 309 SCOPED_ANNOTATION(AnnotateThreadName); 310 ThreadSetName(thr, name); 311 } 312 313 // We deliberately omit the implementation of WTFAnnotateHappensBefore() and 314 // WTFAnnotateHappensAfter(). Those are being used by Webkit to annotate 315 // atomic operations, which should be handled by ThreadSanitizer correctly. 316 void INTERFACE_ATTRIBUTE WTFAnnotateHappensBefore(char *f, int l, uptr addr) { 317 } 318 319 void INTERFACE_ATTRIBUTE WTFAnnotateHappensAfter(char *f, int l, uptr addr) { 320 } 321 322 void INTERFACE_ATTRIBUTE WTFAnnotateBenignRaceSized( 323 char *f, int l, uptr mem, uptr sz, char *desc) { 324 SCOPED_ANNOTATION(AnnotateBenignRaceSized); 325 BenignRaceImpl(f, l, mem, sz, desc); 326 } 327 328 int INTERFACE_ATTRIBUTE RunningOnValgrind() { 329 return flags()->running_on_valgrind; 330 } 331 332 double __attribute__((weak)) INTERFACE_ATTRIBUTE ValgrindSlowdown(void) { 333 return 10.0; 334 } 335 336 const char INTERFACE_ATTRIBUTE* ThreadSanitizerQuery(const char *query) { 337 if (internal_strcmp(query, "pure_happens_before") == 0) 338 return "1"; 339 else 340 return "0"; 341 } 342 343 void INTERFACE_ATTRIBUTE 344 AnnotateMemoryIsInitialized(char *f, int l, uptr mem, uptr sz) {} 345 void INTERFACE_ATTRIBUTE 346 AnnotateMemoryIsUninitialized(char *f, int l, uptr mem, uptr sz) {} 347 348 // Note: the parameter is called flagz, because flags is already taken 349 // by the global function that returns flags. 350 INTERFACE_ATTRIBUTE 351 void __tsan_mutex_create(void *m, unsigned flagz) { 352 SCOPED_ANNOTATION(__tsan_mutex_create); 353 MutexCreate(thr, pc, (uptr)m, flagz & MutexCreationFlagMask); 354 } 355 356 INTERFACE_ATTRIBUTE 357 void __tsan_mutex_destroy(void *m, unsigned flagz) { 358 SCOPED_ANNOTATION(__tsan_mutex_destroy); 359 MutexDestroy(thr, pc, (uptr)m, flagz); 360 } 361 362 INTERFACE_ATTRIBUTE 363 void __tsan_mutex_pre_lock(void *m, unsigned flagz) { 364 SCOPED_ANNOTATION(__tsan_mutex_pre_lock); 365 if (!(flagz & MutexFlagTryLock)) { 366 if (flagz & MutexFlagReadLock) 367 MutexPreReadLock(thr, pc, (uptr)m); 368 else 369 MutexPreLock(thr, pc, (uptr)m); 370 } 371 ThreadIgnoreBegin(thr, 0); 372 ThreadIgnoreSyncBegin(thr, 0); 373 } 374 375 INTERFACE_ATTRIBUTE 376 void __tsan_mutex_post_lock(void *m, unsigned flagz, int rec) { 377 SCOPED_ANNOTATION(__tsan_mutex_post_lock); 378 ThreadIgnoreSyncEnd(thr); 379 ThreadIgnoreEnd(thr); 380 if (!(flagz & MutexFlagTryLockFailed)) { 381 if (flagz & MutexFlagReadLock) 382 MutexPostReadLock(thr, pc, (uptr)m, flagz); 383 else 384 MutexPostLock(thr, pc, (uptr)m, flagz, rec); 385 } 386 } 387 388 INTERFACE_ATTRIBUTE 389 int __tsan_mutex_pre_unlock(void *m, unsigned flagz) { 390 SCOPED_ANNOTATION_RET(__tsan_mutex_pre_unlock, 0); 391 int ret = 0; 392 if (flagz & MutexFlagReadLock) { 393 CHECK(!(flagz & MutexFlagRecursiveUnlock)); 394 MutexReadUnlock(thr, pc, (uptr)m); 395 } else { 396 ret = MutexUnlock(thr, pc, (uptr)m, flagz); 397 } 398 ThreadIgnoreBegin(thr, 0); 399 ThreadIgnoreSyncBegin(thr, 0); 400 return ret; 401 } 402 403 INTERFACE_ATTRIBUTE 404 void __tsan_mutex_post_unlock(void *m, unsigned flagz) { 405 SCOPED_ANNOTATION(__tsan_mutex_post_unlock); 406 ThreadIgnoreSyncEnd(thr); 407 ThreadIgnoreEnd(thr); 408 } 409 410 INTERFACE_ATTRIBUTE 411 void __tsan_mutex_pre_signal(void *addr, unsigned flagz) { 412 SCOPED_ANNOTATION(__tsan_mutex_pre_signal); 413 ThreadIgnoreBegin(thr, 0); 414 ThreadIgnoreSyncBegin(thr, 0); 415 } 416 417 INTERFACE_ATTRIBUTE 418 void __tsan_mutex_post_signal(void *addr, unsigned flagz) { 419 SCOPED_ANNOTATION(__tsan_mutex_post_signal); 420 ThreadIgnoreSyncEnd(thr); 421 ThreadIgnoreEnd(thr); 422 } 423 424 INTERFACE_ATTRIBUTE 425 void __tsan_mutex_pre_divert(void *addr, unsigned flagz) { 426 SCOPED_ANNOTATION(__tsan_mutex_pre_divert); 427 // Exit from ignore region started in __tsan_mutex_pre_lock/unlock/signal. 428 ThreadIgnoreSyncEnd(thr); 429 ThreadIgnoreEnd(thr); 430 } 431 432 INTERFACE_ATTRIBUTE 433 void __tsan_mutex_post_divert(void *addr, unsigned flagz) { 434 SCOPED_ANNOTATION(__tsan_mutex_post_divert); 435 ThreadIgnoreBegin(thr, 0); 436 ThreadIgnoreSyncBegin(thr, 0); 437 } 438 439 static void ReportMutexHeldWrongContext(ThreadState *thr, uptr pc) { 440 ThreadRegistryLock l(&ctx->thread_registry); 441 ScopedReport rep(ReportTypeMutexHeldWrongContext); 442 for (uptr i = 0; i < thr->mset.Size(); ++i) { 443 MutexSet::Desc desc = thr->mset.Get(i); 444 rep.AddMutex(desc.addr, desc.stack_id); 445 } 446 VarSizeStackTrace trace; 447 ObtainCurrentStack(thr, pc, &trace); 448 rep.AddStack(trace, true); 449 OutputReport(thr, rep); 450 } 451 452 INTERFACE_ATTRIBUTE 453 void __tsan_check_no_mutexes_held() { 454 SCOPED_ANNOTATION(__tsan_check_no_mutexes_held); 455 if (thr->mset.Size() == 0) { 456 return; 457 } 458 ReportMutexHeldWrongContext(thr, pc); 459 } 460 } // extern "C" 461