1 //===-- tsan_interface_ann.cpp --------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file is a part of ThreadSanitizer (TSan), a race detector. 10 // 11 //===----------------------------------------------------------------------===// 12 #include "sanitizer_common/sanitizer_libc.h" 13 #include "sanitizer_common/sanitizer_internal_defs.h" 14 #include "sanitizer_common/sanitizer_placement_new.h" 15 #include "sanitizer_common/sanitizer_stacktrace.h" 16 #include "sanitizer_common/sanitizer_vector.h" 17 #include "tsan_interface_ann.h" 18 #include "tsan_report.h" 19 #include "tsan_rtl.h" 20 #include "tsan_mman.h" 21 #include "tsan_flags.h" 22 #include "tsan_platform.h" 23 24 #define CALLERPC ((uptr)__builtin_return_address(0)) 25 26 using namespace __tsan; 27 28 namespace __tsan { 29 30 class ScopedAnnotation { 31 public: 32 ScopedAnnotation(ThreadState *thr, const char *aname, uptr pc) 33 : thr_(thr) { 34 FuncEntry(thr_, pc); 35 DPrintf("#%d: annotation %s()\n", thr_->tid, aname); 36 } 37 38 ~ScopedAnnotation() { 39 FuncExit(thr_); 40 CheckedMutex::CheckNoLocks(); 41 } 42 private: 43 ThreadState *const thr_; 44 }; 45 46 #define SCOPED_ANNOTATION_RET(typ, ret) \ 47 if (!flags()->enable_annotations) \ 48 return ret; \ 49 ThreadState *thr = cur_thread(); \ 50 const uptr caller_pc = (uptr)__builtin_return_address(0); \ 51 ScopedAnnotation sa(thr, __func__, caller_pc); \ 52 const uptr pc = StackTrace::GetCurrentPc(); \ 53 (void)pc; \ 54 /**/ 55 56 #define SCOPED_ANNOTATION(typ) SCOPED_ANNOTATION_RET(typ, ) 57 58 static const int kMaxDescLen = 128; 59 60 struct ExpectRace { 61 ExpectRace *next; 62 ExpectRace *prev; 63 atomic_uintptr_t hitcount; 64 atomic_uintptr_t addcount; 65 uptr addr; 66 uptr size; 67 char *file; 68 int line; 69 char desc[kMaxDescLen]; 70 }; 71 72 struct DynamicAnnContext { 73 Mutex mtx; 74 ExpectRace expect; 75 ExpectRace benign; 76 77 DynamicAnnContext() : mtx(MutexTypeAnnotations) {} 78 }; 79 80 static DynamicAnnContext *dyn_ann_ctx; 81 static char dyn_ann_ctx_placeholder[sizeof(DynamicAnnContext)] ALIGNED(64); 82 83 static void AddExpectRace(ExpectRace *list, 84 char *f, int l, uptr addr, uptr size, char *desc) { 85 ExpectRace *race = list->next; 86 for (; race != list; race = race->next) { 87 if (race->addr == addr && race->size == size) { 88 atomic_store_relaxed(&race->addcount, 89 atomic_load_relaxed(&race->addcount) + 1); 90 return; 91 } 92 } 93 race = (ExpectRace*)internal_alloc(MBlockExpectRace, sizeof(ExpectRace)); 94 race->addr = addr; 95 race->size = size; 96 race->file = f; 97 race->line = l; 98 race->desc[0] = 0; 99 atomic_store_relaxed(&race->hitcount, 0); 100 atomic_store_relaxed(&race->addcount, 1); 101 if (desc) { 102 int i = 0; 103 for (; i < kMaxDescLen - 1 && desc[i]; i++) 104 race->desc[i] = desc[i]; 105 race->desc[i] = 0; 106 } 107 race->prev = list; 108 race->next = list->next; 109 race->next->prev = race; 110 list->next = race; 111 } 112 113 static ExpectRace *FindRace(ExpectRace *list, uptr addr, uptr size) { 114 for (ExpectRace *race = list->next; race != list; race = race->next) { 115 uptr maxbegin = max(race->addr, addr); 116 uptr minend = min(race->addr + race->size, addr + size); 117 if (maxbegin < minend) 118 return race; 119 } 120 return 0; 121 } 122 123 static bool CheckContains(ExpectRace *list, uptr addr, uptr size) { 124 ExpectRace *race = FindRace(list, addr, size); 125 if (race == 0) 126 return false; 127 DPrintf("Hit expected/benign race: %s addr=%zx:%d %s:%d\n", 128 race->desc, race->addr, (int)race->size, race->file, race->line); 129 atomic_fetch_add(&race->hitcount, 1, memory_order_relaxed); 130 return true; 131 } 132 133 static void InitList(ExpectRace *list) { 134 list->next = list; 135 list->prev = list; 136 } 137 138 void InitializeDynamicAnnotations() { 139 dyn_ann_ctx = new(dyn_ann_ctx_placeholder) DynamicAnnContext; 140 InitList(&dyn_ann_ctx->expect); 141 InitList(&dyn_ann_ctx->benign); 142 } 143 144 bool IsExpectedReport(uptr addr, uptr size) { 145 ReadLock lock(&dyn_ann_ctx->mtx); 146 if (CheckContains(&dyn_ann_ctx->expect, addr, size)) 147 return true; 148 if (CheckContains(&dyn_ann_ctx->benign, addr, size)) 149 return true; 150 return false; 151 } 152 153 static void CollectMatchedBenignRaces(Vector<ExpectRace> *matched, 154 int *unique_count, int *hit_count, atomic_uintptr_t ExpectRace::*counter) { 155 ExpectRace *list = &dyn_ann_ctx->benign; 156 for (ExpectRace *race = list->next; race != list; race = race->next) { 157 (*unique_count)++; 158 const uptr cnt = atomic_load_relaxed(&(race->*counter)); 159 if (cnt == 0) 160 continue; 161 *hit_count += cnt; 162 uptr i = 0; 163 for (; i < matched->Size(); i++) { 164 ExpectRace *race0 = &(*matched)[i]; 165 if (race->line == race0->line 166 && internal_strcmp(race->file, race0->file) == 0 167 && internal_strcmp(race->desc, race0->desc) == 0) { 168 atomic_fetch_add(&(race0->*counter), cnt, memory_order_relaxed); 169 break; 170 } 171 } 172 if (i == matched->Size()) 173 matched->PushBack(*race); 174 } 175 } 176 177 void PrintMatchedBenignRaces() { 178 Lock lock(&dyn_ann_ctx->mtx); 179 int unique_count = 0; 180 int hit_count = 0; 181 int add_count = 0; 182 Vector<ExpectRace> hit_matched; 183 CollectMatchedBenignRaces(&hit_matched, &unique_count, &hit_count, 184 &ExpectRace::hitcount); 185 Vector<ExpectRace> add_matched; 186 CollectMatchedBenignRaces(&add_matched, &unique_count, &add_count, 187 &ExpectRace::addcount); 188 if (hit_matched.Size()) { 189 Printf("ThreadSanitizer: Matched %d \"benign\" races (pid=%d):\n", 190 hit_count, (int)internal_getpid()); 191 for (uptr i = 0; i < hit_matched.Size(); i++) { 192 Printf("%d %s:%d %s\n", 193 atomic_load_relaxed(&hit_matched[i].hitcount), 194 hit_matched[i].file, hit_matched[i].line, hit_matched[i].desc); 195 } 196 } 197 if (hit_matched.Size()) { 198 Printf("ThreadSanitizer: Annotated %d \"benign\" races, %d unique" 199 " (pid=%d):\n", 200 add_count, unique_count, (int)internal_getpid()); 201 for (uptr i = 0; i < add_matched.Size(); i++) { 202 Printf("%d %s:%d %s\n", 203 atomic_load_relaxed(&add_matched[i].addcount), 204 add_matched[i].file, add_matched[i].line, add_matched[i].desc); 205 } 206 } 207 } 208 209 static void ReportMissedExpectedRace(ExpectRace *race) { 210 Printf("==================\n"); 211 Printf("WARNING: ThreadSanitizer: missed expected data race\n"); 212 Printf(" %s addr=%zx %s:%d\n", 213 race->desc, race->addr, race->file, race->line); 214 Printf("==================\n"); 215 } 216 } // namespace __tsan 217 218 using namespace __tsan; 219 220 extern "C" { 221 void INTERFACE_ATTRIBUTE AnnotateHappensBefore(char *f, int l, uptr addr) { 222 SCOPED_ANNOTATION(AnnotateHappensBefore); 223 Release(thr, pc, addr); 224 } 225 226 void INTERFACE_ATTRIBUTE AnnotateHappensAfter(char *f, int l, uptr addr) { 227 SCOPED_ANNOTATION(AnnotateHappensAfter); 228 Acquire(thr, pc, addr); 229 } 230 231 void INTERFACE_ATTRIBUTE AnnotateCondVarSignal(char *f, int l, uptr cv) { 232 SCOPED_ANNOTATION(AnnotateCondVarSignal); 233 } 234 235 void INTERFACE_ATTRIBUTE AnnotateCondVarSignalAll(char *f, int l, uptr cv) { 236 SCOPED_ANNOTATION(AnnotateCondVarSignalAll); 237 } 238 239 void INTERFACE_ATTRIBUTE AnnotateMutexIsNotPHB(char *f, int l, uptr mu) { 240 SCOPED_ANNOTATION(AnnotateMutexIsNotPHB); 241 } 242 243 void INTERFACE_ATTRIBUTE AnnotateCondVarWait(char *f, int l, uptr cv, 244 uptr lock) { 245 SCOPED_ANNOTATION(AnnotateCondVarWait); 246 } 247 248 void INTERFACE_ATTRIBUTE AnnotateRWLockCreate(char *f, int l, uptr m) { 249 SCOPED_ANNOTATION(AnnotateRWLockCreate); 250 MutexCreate(thr, pc, m, MutexFlagWriteReentrant); 251 } 252 253 void INTERFACE_ATTRIBUTE AnnotateRWLockCreateStatic(char *f, int l, uptr m) { 254 SCOPED_ANNOTATION(AnnotateRWLockCreateStatic); 255 MutexCreate(thr, pc, m, MutexFlagWriteReentrant | MutexFlagLinkerInit); 256 } 257 258 void INTERFACE_ATTRIBUTE AnnotateRWLockDestroy(char *f, int l, uptr m) { 259 SCOPED_ANNOTATION(AnnotateRWLockDestroy); 260 MutexDestroy(thr, pc, m); 261 } 262 263 void INTERFACE_ATTRIBUTE AnnotateRWLockAcquired(char *f, int l, uptr m, 264 uptr is_w) { 265 SCOPED_ANNOTATION(AnnotateRWLockAcquired); 266 if (is_w) 267 MutexPostLock(thr, pc, m, MutexFlagDoPreLockOnPostLock); 268 else 269 MutexPostReadLock(thr, pc, m, MutexFlagDoPreLockOnPostLock); 270 } 271 272 void INTERFACE_ATTRIBUTE AnnotateRWLockReleased(char *f, int l, uptr m, 273 uptr is_w) { 274 SCOPED_ANNOTATION(AnnotateRWLockReleased); 275 if (is_w) 276 MutexUnlock(thr, pc, m); 277 else 278 MutexReadUnlock(thr, pc, m); 279 } 280 281 void INTERFACE_ATTRIBUTE AnnotateTraceMemory(char *f, int l, uptr mem) { 282 SCOPED_ANNOTATION(AnnotateTraceMemory); 283 } 284 285 void INTERFACE_ATTRIBUTE AnnotateFlushState(char *f, int l) { 286 SCOPED_ANNOTATION(AnnotateFlushState); 287 } 288 289 void INTERFACE_ATTRIBUTE AnnotateNewMemory(char *f, int l, uptr mem, 290 uptr size) { 291 SCOPED_ANNOTATION(AnnotateNewMemory); 292 } 293 294 void INTERFACE_ATTRIBUTE AnnotateNoOp(char *f, int l, uptr mem) { 295 SCOPED_ANNOTATION(AnnotateNoOp); 296 } 297 298 void INTERFACE_ATTRIBUTE AnnotateFlushExpectedRaces(char *f, int l) { 299 SCOPED_ANNOTATION(AnnotateFlushExpectedRaces); 300 Lock lock(&dyn_ann_ctx->mtx); 301 while (dyn_ann_ctx->expect.next != &dyn_ann_ctx->expect) { 302 ExpectRace *race = dyn_ann_ctx->expect.next; 303 if (atomic_load_relaxed(&race->hitcount) == 0) { 304 ctx->nmissed_expected++; 305 ReportMissedExpectedRace(race); 306 } 307 race->prev->next = race->next; 308 race->next->prev = race->prev; 309 internal_free(race); 310 } 311 } 312 313 void INTERFACE_ATTRIBUTE AnnotateEnableRaceDetection( 314 char *f, int l, int enable) { 315 SCOPED_ANNOTATION(AnnotateEnableRaceDetection); 316 // FIXME: Reconsider this functionality later. It may be irrelevant. 317 } 318 319 void INTERFACE_ATTRIBUTE AnnotateMutexIsUsedAsCondVar( 320 char *f, int l, uptr mu) { 321 SCOPED_ANNOTATION(AnnotateMutexIsUsedAsCondVar); 322 } 323 324 void INTERFACE_ATTRIBUTE AnnotatePCQGet( 325 char *f, int l, uptr pcq) { 326 SCOPED_ANNOTATION(AnnotatePCQGet); 327 } 328 329 void INTERFACE_ATTRIBUTE AnnotatePCQPut( 330 char *f, int l, uptr pcq) { 331 SCOPED_ANNOTATION(AnnotatePCQPut); 332 } 333 334 void INTERFACE_ATTRIBUTE AnnotatePCQDestroy( 335 char *f, int l, uptr pcq) { 336 SCOPED_ANNOTATION(AnnotatePCQDestroy); 337 } 338 339 void INTERFACE_ATTRIBUTE AnnotatePCQCreate( 340 char *f, int l, uptr pcq) { 341 SCOPED_ANNOTATION(AnnotatePCQCreate); 342 } 343 344 void INTERFACE_ATTRIBUTE AnnotateExpectRace( 345 char *f, int l, uptr mem, char *desc) { 346 SCOPED_ANNOTATION(AnnotateExpectRace); 347 Lock lock(&dyn_ann_ctx->mtx); 348 AddExpectRace(&dyn_ann_ctx->expect, 349 f, l, mem, 1, desc); 350 DPrintf("Add expected race: %s addr=%zx %s:%d\n", desc, mem, f, l); 351 } 352 353 static void BenignRaceImpl( 354 char *f, int l, uptr mem, uptr size, char *desc) { 355 Lock lock(&dyn_ann_ctx->mtx); 356 AddExpectRace(&dyn_ann_ctx->benign, 357 f, l, mem, size, desc); 358 DPrintf("Add benign race: %s addr=%zx %s:%d\n", desc, mem, f, l); 359 } 360 361 // FIXME: Turn it off later. WTF is benign race?1?? Go talk to Hans Boehm. 362 void INTERFACE_ATTRIBUTE AnnotateBenignRaceSized( 363 char *f, int l, uptr mem, uptr size, char *desc) { 364 SCOPED_ANNOTATION(AnnotateBenignRaceSized); 365 BenignRaceImpl(f, l, mem, size, desc); 366 } 367 368 void INTERFACE_ATTRIBUTE AnnotateBenignRace( 369 char *f, int l, uptr mem, char *desc) { 370 SCOPED_ANNOTATION(AnnotateBenignRace); 371 BenignRaceImpl(f, l, mem, 1, desc); 372 } 373 374 void INTERFACE_ATTRIBUTE AnnotateIgnoreReadsBegin(char *f, int l) { 375 SCOPED_ANNOTATION(AnnotateIgnoreReadsBegin); 376 ThreadIgnoreBegin(thr, pc); 377 } 378 379 void INTERFACE_ATTRIBUTE AnnotateIgnoreReadsEnd(char *f, int l) { 380 SCOPED_ANNOTATION(AnnotateIgnoreReadsEnd); 381 ThreadIgnoreEnd(thr, pc); 382 } 383 384 void INTERFACE_ATTRIBUTE AnnotateIgnoreWritesBegin(char *f, int l) { 385 SCOPED_ANNOTATION(AnnotateIgnoreWritesBegin); 386 ThreadIgnoreBegin(thr, pc); 387 } 388 389 void INTERFACE_ATTRIBUTE AnnotateIgnoreWritesEnd(char *f, int l) { 390 SCOPED_ANNOTATION(AnnotateIgnoreWritesEnd); 391 ThreadIgnoreEnd(thr, pc); 392 } 393 394 void INTERFACE_ATTRIBUTE AnnotateIgnoreSyncBegin(char *f, int l) { 395 SCOPED_ANNOTATION(AnnotateIgnoreSyncBegin); 396 ThreadIgnoreSyncBegin(thr, pc); 397 } 398 399 void INTERFACE_ATTRIBUTE AnnotateIgnoreSyncEnd(char *f, int l) { 400 SCOPED_ANNOTATION(AnnotateIgnoreSyncEnd); 401 ThreadIgnoreSyncEnd(thr, pc); 402 } 403 404 void INTERFACE_ATTRIBUTE AnnotatePublishMemoryRange( 405 char *f, int l, uptr addr, uptr size) { 406 SCOPED_ANNOTATION(AnnotatePublishMemoryRange); 407 } 408 409 void INTERFACE_ATTRIBUTE AnnotateUnpublishMemoryRange( 410 char *f, int l, uptr addr, uptr size) { 411 SCOPED_ANNOTATION(AnnotateUnpublishMemoryRange); 412 } 413 414 void INTERFACE_ATTRIBUTE AnnotateThreadName( 415 char *f, int l, char *name) { 416 SCOPED_ANNOTATION(AnnotateThreadName); 417 ThreadSetName(thr, name); 418 } 419 420 // We deliberately omit the implementation of WTFAnnotateHappensBefore() and 421 // WTFAnnotateHappensAfter(). Those are being used by Webkit to annotate 422 // atomic operations, which should be handled by ThreadSanitizer correctly. 423 void INTERFACE_ATTRIBUTE WTFAnnotateHappensBefore(char *f, int l, uptr addr) { 424 SCOPED_ANNOTATION(AnnotateHappensBefore); 425 } 426 427 void INTERFACE_ATTRIBUTE WTFAnnotateHappensAfter(char *f, int l, uptr addr) { 428 SCOPED_ANNOTATION(AnnotateHappensAfter); 429 } 430 431 void INTERFACE_ATTRIBUTE WTFAnnotateBenignRaceSized( 432 char *f, int l, uptr mem, uptr sz, char *desc) { 433 SCOPED_ANNOTATION(AnnotateBenignRaceSized); 434 BenignRaceImpl(f, l, mem, sz, desc); 435 } 436 437 int INTERFACE_ATTRIBUTE RunningOnValgrind() { 438 return flags()->running_on_valgrind; 439 } 440 441 double __attribute__((weak)) INTERFACE_ATTRIBUTE ValgrindSlowdown(void) { 442 return 10.0; 443 } 444 445 const char INTERFACE_ATTRIBUTE* ThreadSanitizerQuery(const char *query) { 446 if (internal_strcmp(query, "pure_happens_before") == 0) 447 return "1"; 448 else 449 return "0"; 450 } 451 452 void INTERFACE_ATTRIBUTE 453 AnnotateMemoryIsInitialized(char *f, int l, uptr mem, uptr sz) {} 454 void INTERFACE_ATTRIBUTE 455 AnnotateMemoryIsUninitialized(char *f, int l, uptr mem, uptr sz) {} 456 457 // Note: the parameter is called flagz, because flags is already taken 458 // by the global function that returns flags. 459 INTERFACE_ATTRIBUTE 460 void __tsan_mutex_create(void *m, unsigned flagz) { 461 SCOPED_ANNOTATION(__tsan_mutex_create); 462 MutexCreate(thr, pc, (uptr)m, flagz & MutexCreationFlagMask); 463 } 464 465 INTERFACE_ATTRIBUTE 466 void __tsan_mutex_destroy(void *m, unsigned flagz) { 467 SCOPED_ANNOTATION(__tsan_mutex_destroy); 468 MutexDestroy(thr, pc, (uptr)m, flagz); 469 } 470 471 INTERFACE_ATTRIBUTE 472 void __tsan_mutex_pre_lock(void *m, unsigned flagz) { 473 SCOPED_ANNOTATION(__tsan_mutex_pre_lock); 474 if (!(flagz & MutexFlagTryLock)) { 475 if (flagz & MutexFlagReadLock) 476 MutexPreReadLock(thr, pc, (uptr)m); 477 else 478 MutexPreLock(thr, pc, (uptr)m); 479 } 480 ThreadIgnoreBegin(thr, pc, /*save_stack=*/false); 481 ThreadIgnoreSyncBegin(thr, pc, /*save_stack=*/false); 482 } 483 484 INTERFACE_ATTRIBUTE 485 void __tsan_mutex_post_lock(void *m, unsigned flagz, int rec) { 486 SCOPED_ANNOTATION(__tsan_mutex_post_lock); 487 ThreadIgnoreSyncEnd(thr, pc); 488 ThreadIgnoreEnd(thr, pc); 489 if (!(flagz & MutexFlagTryLockFailed)) { 490 if (flagz & MutexFlagReadLock) 491 MutexPostReadLock(thr, pc, (uptr)m, flagz); 492 else 493 MutexPostLock(thr, pc, (uptr)m, flagz, rec); 494 } 495 } 496 497 INTERFACE_ATTRIBUTE 498 int __tsan_mutex_pre_unlock(void *m, unsigned flagz) { 499 SCOPED_ANNOTATION_RET(__tsan_mutex_pre_unlock, 0); 500 int ret = 0; 501 if (flagz & MutexFlagReadLock) { 502 CHECK(!(flagz & MutexFlagRecursiveUnlock)); 503 MutexReadUnlock(thr, pc, (uptr)m); 504 } else { 505 ret = MutexUnlock(thr, pc, (uptr)m, flagz); 506 } 507 ThreadIgnoreBegin(thr, pc, /*save_stack=*/false); 508 ThreadIgnoreSyncBegin(thr, pc, /*save_stack=*/false); 509 return ret; 510 } 511 512 INTERFACE_ATTRIBUTE 513 void __tsan_mutex_post_unlock(void *m, unsigned flagz) { 514 SCOPED_ANNOTATION(__tsan_mutex_post_unlock); 515 ThreadIgnoreSyncEnd(thr, pc); 516 ThreadIgnoreEnd(thr, pc); 517 } 518 519 INTERFACE_ATTRIBUTE 520 void __tsan_mutex_pre_signal(void *addr, unsigned flagz) { 521 SCOPED_ANNOTATION(__tsan_mutex_pre_signal); 522 ThreadIgnoreBegin(thr, pc, /*save_stack=*/false); 523 ThreadIgnoreSyncBegin(thr, pc, /*save_stack=*/false); 524 } 525 526 INTERFACE_ATTRIBUTE 527 void __tsan_mutex_post_signal(void *addr, unsigned flagz) { 528 SCOPED_ANNOTATION(__tsan_mutex_post_signal); 529 ThreadIgnoreSyncEnd(thr, pc); 530 ThreadIgnoreEnd(thr, pc); 531 } 532 533 INTERFACE_ATTRIBUTE 534 void __tsan_mutex_pre_divert(void *addr, unsigned flagz) { 535 SCOPED_ANNOTATION(__tsan_mutex_pre_divert); 536 // Exit from ignore region started in __tsan_mutex_pre_lock/unlock/signal. 537 ThreadIgnoreSyncEnd(thr, pc); 538 ThreadIgnoreEnd(thr, pc); 539 } 540 541 INTERFACE_ATTRIBUTE 542 void __tsan_mutex_post_divert(void *addr, unsigned flagz) { 543 SCOPED_ANNOTATION(__tsan_mutex_post_divert); 544 ThreadIgnoreBegin(thr, pc, /*save_stack=*/false); 545 ThreadIgnoreSyncBegin(thr, pc, /*save_stack=*/false); 546 } 547 } // extern "C" 548