1 //===-- tsan_rtl_mutex.cpp ------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include <sanitizer_common/sanitizer_deadlock_detector_interface.h>
14 #include <sanitizer_common/sanitizer_stackdepot.h>
15
16 #include "tsan_rtl.h"
17 #include "tsan_flags.h"
18 #include "tsan_sync.h"
19 #include "tsan_report.h"
20 #include "tsan_symbolize.h"
21 #include "tsan_platform.h"
22
23 namespace __tsan {
24
25 void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r);
26 void ReportDestroyLocked(ThreadState *thr, uptr pc, uptr addr,
27 FastState last_lock, StackID creation_stack_id);
28
29 struct Callback final : public DDCallback {
30 ThreadState *thr;
31 uptr pc;
32
Callback__tsan::Callback33 Callback(ThreadState *thr, uptr pc)
34 : thr(thr)
35 , pc(pc) {
36 DDCallback::pt = thr->proc()->dd_pt;
37 DDCallback::lt = thr->dd_lt;
38 }
39
Unwind__tsan::Callback40 StackID Unwind() override { return CurrentStackId(thr, pc); }
UniqueTid__tsan::Callback41 int UniqueTid() override { return thr->tid; }
42 };
43
DDMutexInit(ThreadState * thr,uptr pc,SyncVar * s)44 void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s) {
45 Callback cb(thr, pc);
46 ctx->dd->MutexInit(&cb, &s->dd);
47 s->dd.ctx = s->addr;
48 }
49
ReportMutexMisuse(ThreadState * thr,uptr pc,ReportType typ,uptr addr,StackID creation_stack_id)50 static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ,
51 uptr addr, StackID creation_stack_id) {
52 // In Go, these misuses are either impossible, or detected by std lib,
53 // or false positives (e.g. unlock in a different thread).
54 if (SANITIZER_GO)
55 return;
56 if (!ShouldReport(thr, typ))
57 return;
58 ThreadRegistryLock l(&ctx->thread_registry);
59 ScopedReport rep(typ);
60 rep.AddMutex(addr, creation_stack_id);
61 VarSizeStackTrace trace;
62 ObtainCurrentStack(thr, pc, &trace);
63 rep.AddStack(trace, true);
64 rep.AddLocation(addr, 1);
65 OutputReport(thr, rep);
66 }
67
RecordMutexLock(ThreadState * thr,uptr pc,uptr addr,StackID stack_id,bool write)68 static void RecordMutexLock(ThreadState *thr, uptr pc, uptr addr,
69 StackID stack_id, bool write) {
70 auto typ = write ? EventType::kLock : EventType::kRLock;
71 // Note: it's important to trace before modifying mutex set
72 // because tracing can switch trace part and we write the current
73 // mutex set in the beginning of each part.
74 // If we do it in the opposite order, we will write already reduced
75 // mutex set in the beginning of the part and then trace unlock again.
76 TraceMutexLock(thr, typ, pc, addr, stack_id);
77 thr->mset.AddAddr(addr, stack_id, write);
78 }
79
RecordMutexUnlock(ThreadState * thr,uptr addr)80 static void RecordMutexUnlock(ThreadState *thr, uptr addr) {
81 // See the comment in RecordMutexLock re order of operations.
82 TraceMutexUnlock(thr, addr);
83 thr->mset.DelAddr(addr);
84 }
85
MutexCreate(ThreadState * thr,uptr pc,uptr addr,u32 flagz)86 void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
87 DPrintf("#%d: MutexCreate %zx flagz=0x%x\n", thr->tid, addr, flagz);
88 if (!(flagz & MutexFlagLinkerInit) && pc && IsAppMem(addr))
89 MemoryAccess(thr, pc, addr, 1, kAccessWrite);
90 SlotLocker locker(thr);
91 auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
92 s->SetFlags(flagz & MutexCreationFlagMask);
93 // Save stack in the case the sync object was created before as atomic.
94 if (!SANITIZER_GO && s->creation_stack_id == kInvalidStackID)
95 s->creation_stack_id = CurrentStackId(thr, pc);
96 }
97
MutexDestroy(ThreadState * thr,uptr pc,uptr addr,u32 flagz)98 void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
99 DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr);
100 bool unlock_locked = false;
101 StackID creation_stack_id;
102 FastState last_lock;
103 {
104 auto s = ctx->metamap.GetSyncIfExists(addr);
105 if (!s)
106 return;
107 SlotLocker locker(thr);
108 {
109 Lock lock(&s->mtx);
110 creation_stack_id = s->creation_stack_id;
111 last_lock = s->last_lock;
112 if ((flagz & MutexFlagLinkerInit) || s->IsFlagSet(MutexFlagLinkerInit) ||
113 ((flagz & MutexFlagNotStatic) && !s->IsFlagSet(MutexFlagNotStatic))) {
114 // Destroy is no-op for linker-initialized mutexes.
115 return;
116 }
117 if (common_flags()->detect_deadlocks) {
118 Callback cb(thr, pc);
119 ctx->dd->MutexDestroy(&cb, &s->dd);
120 ctx->dd->MutexInit(&cb, &s->dd);
121 }
122 if (flags()->report_destroy_locked && s->owner_tid != kInvalidTid &&
123 !s->IsFlagSet(MutexFlagBroken)) {
124 s->SetFlags(MutexFlagBroken);
125 unlock_locked = true;
126 }
127 s->Reset();
128 }
129 // Imitate a memory write to catch unlock-destroy races.
130 if (pc && IsAppMem(addr))
131 MemoryAccess(thr, pc, addr, 1,
132 kAccessWrite | kAccessFree | kAccessSlotLocked);
133 }
134 if (unlock_locked && ShouldReport(thr, ReportTypeMutexDestroyLocked))
135 ReportDestroyLocked(thr, pc, addr, last_lock, creation_stack_id);
136 thr->mset.DelAddr(addr, true);
137 // s will be destroyed and freed in MetaMap::FreeBlock.
138 }
139
MutexPreLock(ThreadState * thr,uptr pc,uptr addr,u32 flagz)140 void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
141 DPrintf("#%d: MutexPreLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
142 if (flagz & MutexFlagTryLock)
143 return;
144 if (!common_flags()->detect_deadlocks)
145 return;
146 Callback cb(thr, pc);
147 {
148 SlotLocker locker(thr);
149 auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
150 ReadLock lock(&s->mtx);
151 s->UpdateFlags(flagz);
152 if (s->owner_tid != thr->tid)
153 ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
154 }
155 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
156 }
157
MutexPostLock(ThreadState * thr,uptr pc,uptr addr,u32 flagz,int rec)158 void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz, int rec) {
159 DPrintf("#%d: MutexPostLock %zx flag=0x%x rec=%d\n",
160 thr->tid, addr, flagz, rec);
161 if (flagz & MutexFlagRecursiveLock)
162 CHECK_GT(rec, 0);
163 else
164 rec = 1;
165 if (pc && IsAppMem(addr))
166 MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic);
167 bool report_double_lock = false;
168 bool pre_lock = false;
169 bool first = false;
170 StackID creation_stack_id = kInvalidStackID;
171 {
172 SlotLocker locker(thr);
173 auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
174 creation_stack_id = s->creation_stack_id;
175 RecordMutexLock(thr, pc, addr, creation_stack_id, true);
176 {
177 Lock lock(&s->mtx);
178 first = s->recursion == 0;
179 s->UpdateFlags(flagz);
180 if (s->owner_tid == kInvalidTid) {
181 CHECK_EQ(s->recursion, 0);
182 s->owner_tid = thr->tid;
183 s->last_lock = thr->fast_state;
184 } else if (s->owner_tid == thr->tid) {
185 CHECK_GT(s->recursion, 0);
186 } else if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
187 s->SetFlags(MutexFlagBroken);
188 report_double_lock = true;
189 }
190 s->recursion += rec;
191 if (first) {
192 if (!thr->ignore_sync) {
193 thr->clock.Acquire(s->clock);
194 thr->clock.Acquire(s->read_clock);
195 }
196 }
197 if (first && common_flags()->detect_deadlocks) {
198 pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) &&
199 !(flagz & MutexFlagTryLock);
200 Callback cb(thr, pc);
201 if (pre_lock)
202 ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
203 ctx->dd->MutexAfterLock(&cb, &s->dd, true, flagz & MutexFlagTryLock);
204 }
205 }
206 }
207 if (report_double_lock)
208 ReportMutexMisuse(thr, pc, ReportTypeMutexDoubleLock, addr,
209 creation_stack_id);
210 if (first && pre_lock && common_flags()->detect_deadlocks) {
211 Callback cb(thr, pc);
212 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
213 }
214 }
215
MutexUnlock(ThreadState * thr,uptr pc,uptr addr,u32 flagz)216 int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
217 DPrintf("#%d: MutexUnlock %zx flagz=0x%x\n", thr->tid, addr, flagz);
218 if (pc && IsAppMem(addr))
219 MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic);
220 StackID creation_stack_id;
221 RecordMutexUnlock(thr, addr);
222 bool report_bad_unlock = false;
223 int rec = 0;
224 {
225 SlotLocker locker(thr);
226 auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
227 bool released = false;
228 {
229 Lock lock(&s->mtx);
230 creation_stack_id = s->creation_stack_id;
231 if (!SANITIZER_GO && (s->recursion == 0 || s->owner_tid != thr->tid)) {
232 if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
233 s->SetFlags(MutexFlagBroken);
234 report_bad_unlock = true;
235 }
236 } else {
237 rec = (flagz & MutexFlagRecursiveUnlock) ? s->recursion : 1;
238 s->recursion -= rec;
239 if (s->recursion == 0) {
240 s->owner_tid = kInvalidTid;
241 if (!thr->ignore_sync) {
242 thr->clock.ReleaseStore(&s->clock);
243 released = true;
244 }
245 }
246 }
247 if (common_flags()->detect_deadlocks && s->recursion == 0 &&
248 !report_bad_unlock) {
249 Callback cb(thr, pc);
250 ctx->dd->MutexBeforeUnlock(&cb, &s->dd, true);
251 }
252 }
253 if (released)
254 IncrementEpoch(thr);
255 }
256 if (report_bad_unlock)
257 ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr,
258 creation_stack_id);
259 if (common_flags()->detect_deadlocks && !report_bad_unlock) {
260 Callback cb(thr, pc);
261 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
262 }
263 return rec;
264 }
265
MutexPreReadLock(ThreadState * thr,uptr pc,uptr addr,u32 flagz)266 void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
267 DPrintf("#%d: MutexPreReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
268 if ((flagz & MutexFlagTryLock) || !common_flags()->detect_deadlocks)
269 return;
270 Callback cb(thr, pc);
271 {
272 SlotLocker locker(thr);
273 auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
274 ReadLock lock(&s->mtx);
275 s->UpdateFlags(flagz);
276 ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
277 }
278 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
279 }
280
MutexPostReadLock(ThreadState * thr,uptr pc,uptr addr,u32 flagz)281 void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
282 DPrintf("#%d: MutexPostReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
283 if (pc && IsAppMem(addr))
284 MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic);
285 bool report_bad_lock = false;
286 bool pre_lock = false;
287 StackID creation_stack_id = kInvalidStackID;
288 {
289 SlotLocker locker(thr);
290 auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
291 creation_stack_id = s->creation_stack_id;
292 RecordMutexLock(thr, pc, addr, creation_stack_id, false);
293 {
294 ReadLock lock(&s->mtx);
295 s->UpdateFlags(flagz);
296 if (s->owner_tid != kInvalidTid) {
297 if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
298 s->SetFlags(MutexFlagBroken);
299 report_bad_lock = true;
300 }
301 }
302 if (!thr->ignore_sync)
303 thr->clock.Acquire(s->clock);
304 s->last_lock = thr->fast_state;
305 if (common_flags()->detect_deadlocks) {
306 pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) &&
307 !(flagz & MutexFlagTryLock);
308 Callback cb(thr, pc);
309 if (pre_lock)
310 ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
311 ctx->dd->MutexAfterLock(&cb, &s->dd, false, flagz & MutexFlagTryLock);
312 }
313 }
314 }
315 if (report_bad_lock)
316 ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadLock, addr,
317 creation_stack_id);
318 if (pre_lock && common_flags()->detect_deadlocks) {
319 Callback cb(thr, pc);
320 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
321 }
322 }
323
MutexReadUnlock(ThreadState * thr,uptr pc,uptr addr)324 void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
325 DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr);
326 if (pc && IsAppMem(addr))
327 MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic);
328 RecordMutexUnlock(thr, addr);
329 StackID creation_stack_id;
330 bool report_bad_unlock = false;
331 {
332 SlotLocker locker(thr);
333 auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
334 bool released = false;
335 {
336 Lock lock(&s->mtx);
337 creation_stack_id = s->creation_stack_id;
338 if (s->owner_tid != kInvalidTid) {
339 if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
340 s->SetFlags(MutexFlagBroken);
341 report_bad_unlock = true;
342 }
343 }
344 if (!thr->ignore_sync) {
345 thr->clock.Release(&s->read_clock);
346 released = true;
347 }
348 if (common_flags()->detect_deadlocks && s->recursion == 0) {
349 Callback cb(thr, pc);
350 ctx->dd->MutexBeforeUnlock(&cb, &s->dd, false);
351 }
352 }
353 if (released)
354 IncrementEpoch(thr);
355 }
356 if (report_bad_unlock)
357 ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadUnlock, addr,
358 creation_stack_id);
359 if (common_flags()->detect_deadlocks) {
360 Callback cb(thr, pc);
361 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
362 }
363 }
364
MutexReadOrWriteUnlock(ThreadState * thr,uptr pc,uptr addr)365 void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
366 DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr);
367 if (pc && IsAppMem(addr))
368 MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic);
369 RecordMutexUnlock(thr, addr);
370 StackID creation_stack_id;
371 bool report_bad_unlock = false;
372 bool write = true;
373 {
374 SlotLocker locker(thr);
375 auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
376 bool released = false;
377 {
378 Lock lock(&s->mtx);
379 creation_stack_id = s->creation_stack_id;
380 if (s->owner_tid == kInvalidTid) {
381 // Seems to be read unlock.
382 write = false;
383 if (!thr->ignore_sync) {
384 thr->clock.Release(&s->read_clock);
385 released = true;
386 }
387 } else if (s->owner_tid == thr->tid) {
388 // Seems to be write unlock.
389 CHECK_GT(s->recursion, 0);
390 s->recursion--;
391 if (s->recursion == 0) {
392 s->owner_tid = kInvalidTid;
393 if (!thr->ignore_sync) {
394 thr->clock.ReleaseStore(&s->clock);
395 released = true;
396 }
397 }
398 } else if (!s->IsFlagSet(MutexFlagBroken)) {
399 s->SetFlags(MutexFlagBroken);
400 report_bad_unlock = true;
401 }
402 if (common_flags()->detect_deadlocks && s->recursion == 0) {
403 Callback cb(thr, pc);
404 ctx->dd->MutexBeforeUnlock(&cb, &s->dd, write);
405 }
406 }
407 if (released)
408 IncrementEpoch(thr);
409 }
410 if (report_bad_unlock)
411 ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr,
412 creation_stack_id);
413 if (common_flags()->detect_deadlocks) {
414 Callback cb(thr, pc);
415 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
416 }
417 }
418
MutexRepair(ThreadState * thr,uptr pc,uptr addr)419 void MutexRepair(ThreadState *thr, uptr pc, uptr addr) {
420 DPrintf("#%d: MutexRepair %zx\n", thr->tid, addr);
421 SlotLocker locker(thr);
422 auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
423 Lock lock(&s->mtx);
424 s->owner_tid = kInvalidTid;
425 s->recursion = 0;
426 }
427
MutexInvalidAccess(ThreadState * thr,uptr pc,uptr addr)428 void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr) {
429 DPrintf("#%d: MutexInvalidAccess %zx\n", thr->tid, addr);
430 StackID creation_stack_id = kInvalidStackID;
431 {
432 SlotLocker locker(thr);
433 auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
434 if (s)
435 creation_stack_id = s->creation_stack_id;
436 }
437 ReportMutexMisuse(thr, pc, ReportTypeMutexInvalidAccess, addr,
438 creation_stack_id);
439 }
440
Acquire(ThreadState * thr,uptr pc,uptr addr)441 void Acquire(ThreadState *thr, uptr pc, uptr addr) {
442 DPrintf("#%d: Acquire %zx\n", thr->tid, addr);
443 if (thr->ignore_sync)
444 return;
445 auto s = ctx->metamap.GetSyncIfExists(addr);
446 if (!s)
447 return;
448 SlotLocker locker(thr);
449 ReadLock lock(&s->mtx);
450 if (!s->clock)
451 return;
452 thr->clock.Acquire(s->clock);
453 }
454
AcquireGlobal(ThreadState * thr)455 void AcquireGlobal(ThreadState *thr) {
456 DPrintf("#%d: AcquireGlobal\n", thr->tid);
457 if (thr->ignore_sync)
458 return;
459 SlotLocker locker(thr);
460 for (auto &slot : ctx->slots) thr->clock.Set(slot.sid, slot.epoch());
461 }
462
Release(ThreadState * thr,uptr pc,uptr addr)463 void Release(ThreadState *thr, uptr pc, uptr addr) {
464 DPrintf("#%d: Release %zx\n", thr->tid, addr);
465 if (thr->ignore_sync)
466 return;
467 SlotLocker locker(thr);
468 {
469 auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, false);
470 Lock lock(&s->mtx);
471 thr->clock.Release(&s->clock);
472 }
473 IncrementEpoch(thr);
474 }
475
ReleaseStore(ThreadState * thr,uptr pc,uptr addr)476 void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
477 DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr);
478 if (thr->ignore_sync)
479 return;
480 SlotLocker locker(thr);
481 {
482 auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, false);
483 Lock lock(&s->mtx);
484 thr->clock.ReleaseStore(&s->clock);
485 }
486 IncrementEpoch(thr);
487 }
488
ReleaseStoreAcquire(ThreadState * thr,uptr pc,uptr addr)489 void ReleaseStoreAcquire(ThreadState *thr, uptr pc, uptr addr) {
490 DPrintf("#%d: ReleaseStoreAcquire %zx\n", thr->tid, addr);
491 if (thr->ignore_sync)
492 return;
493 SlotLocker locker(thr);
494 {
495 auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, false);
496 Lock lock(&s->mtx);
497 thr->clock.ReleaseStoreAcquire(&s->clock);
498 }
499 IncrementEpoch(thr);
500 }
501
IncrementEpoch(ThreadState * thr)502 void IncrementEpoch(ThreadState *thr) {
503 DCHECK(!thr->ignore_sync);
504 DCHECK(thr->slot_locked);
505 Epoch epoch = EpochInc(thr->fast_state.epoch());
506 if (!EpochOverflow(epoch)) {
507 Sid sid = thr->fast_state.sid();
508 thr->clock.Set(sid, epoch);
509 thr->fast_state.SetEpoch(epoch);
510 thr->slot->SetEpoch(epoch);
511 TraceTime(thr);
512 }
513 }
514
515 #if !SANITIZER_GO
AfterSleep(ThreadState * thr,uptr pc)516 void AfterSleep(ThreadState *thr, uptr pc) {
517 DPrintf("#%d: AfterSleep\n", thr->tid);
518 if (thr->ignore_sync)
519 return;
520 thr->last_sleep_stack_id = CurrentStackId(thr, pc);
521 thr->last_sleep_clock.Reset();
522 SlotLocker locker(thr);
523 for (auto &slot : ctx->slots)
524 thr->last_sleep_clock.Set(slot.sid, slot.epoch());
525 }
526 #endif
527
ReportDeadlock(ThreadState * thr,uptr pc,DDReport * r)528 void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) {
529 if (r == 0 || !ShouldReport(thr, ReportTypeDeadlock))
530 return;
531 ThreadRegistryLock l(&ctx->thread_registry);
532 ScopedReport rep(ReportTypeDeadlock);
533 for (int i = 0; i < r->n; i++) {
534 rep.AddMutex(r->loop[i].mtx_ctx0, r->loop[i].stk[0]);
535 rep.AddUniqueTid((int)r->loop[i].thr_ctx);
536 rep.AddThread((int)r->loop[i].thr_ctx);
537 }
538 uptr dummy_pc = 0x42;
539 for (int i = 0; i < r->n; i++) {
540 for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) {
541 u32 stk = r->loop[i].stk[j];
542 if (stk && stk != kInvalidStackID) {
543 rep.AddStack(StackDepotGet(stk), true);
544 } else {
545 // Sometimes we fail to extract the stack trace (FIXME: investigate),
546 // but we should still produce some stack trace in the report.
547 rep.AddStack(StackTrace(&dummy_pc, 1), true);
548 }
549 }
550 }
551 OutputReport(thr, rep);
552 }
553
ReportDestroyLocked(ThreadState * thr,uptr pc,uptr addr,FastState last_lock,StackID creation_stack_id)554 void ReportDestroyLocked(ThreadState *thr, uptr pc, uptr addr,
555 FastState last_lock, StackID creation_stack_id) {
556 // We need to lock the slot during RestoreStack because it protects
557 // the slot journal.
558 Lock slot_lock(&ctx->slots[static_cast<uptr>(last_lock.sid())].mtx);
559 ThreadRegistryLock l0(&ctx->thread_registry);
560 Lock slots_lock(&ctx->slot_mtx);
561 ScopedReport rep(ReportTypeMutexDestroyLocked);
562 rep.AddMutex(addr, creation_stack_id);
563 VarSizeStackTrace trace;
564 ObtainCurrentStack(thr, pc, &trace);
565 rep.AddStack(trace, true);
566
567 Tid tid;
568 DynamicMutexSet mset;
569 uptr tag;
570 if (!RestoreStack(EventType::kLock, last_lock.sid(), last_lock.epoch(), addr,
571 0, kAccessWrite, &tid, &trace, mset, &tag))
572 return;
573 rep.AddStack(trace, true);
574 rep.AddLocation(addr, 1);
575 OutputReport(thr, rep);
576 }
577
578 } // namespace __tsan
579