xref: /freebsd/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp (revision 62cfcf62f627e5093fb37026a6d8c98e4d2ef04c)
1 //===-- tsan_rtl_mutex.cpp ------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include <sanitizer_common/sanitizer_deadlock_detector_interface.h>
14 #include <sanitizer_common/sanitizer_stackdepot.h>
15 
16 #include "tsan_rtl.h"
17 #include "tsan_flags.h"
18 #include "tsan_sync.h"
19 #include "tsan_report.h"
20 #include "tsan_symbolize.h"
21 #include "tsan_platform.h"
22 
23 namespace __tsan {
24 
25 void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r);
26 
27 struct Callback : DDCallback {
28   ThreadState *thr;
29   uptr pc;
30 
31   Callback(ThreadState *thr, uptr pc)
32       : thr(thr)
33       , pc(pc) {
34     DDCallback::pt = thr->proc()->dd_pt;
35     DDCallback::lt = thr->dd_lt;
36   }
37 
38   u32 Unwind() override { return CurrentStackId(thr, pc); }
39   int UniqueTid() override { return thr->unique_id; }
40 };
41 
42 void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s) {
43   Callback cb(thr, pc);
44   ctx->dd->MutexInit(&cb, &s->dd);
45   s->dd.ctx = s->GetId();
46 }
47 
48 static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ,
49     uptr addr, u64 mid) {
50   // In Go, these misuses are either impossible, or detected by std lib,
51   // or false positives (e.g. unlock in a different thread).
52   if (SANITIZER_GO)
53     return;
54   ThreadRegistryLock l(ctx->thread_registry);
55   ScopedReport rep(typ);
56   rep.AddMutex(mid);
57   VarSizeStackTrace trace;
58   ObtainCurrentStack(thr, pc, &trace);
59   rep.AddStack(trace, true);
60   rep.AddLocation(addr, 1);
61   OutputReport(thr, rep);
62 }
63 
64 void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
65   DPrintf("#%d: MutexCreate %zx flagz=0x%x\n", thr->tid, addr, flagz);
66   StatInc(thr, StatMutexCreate);
67   if (!(flagz & MutexFlagLinkerInit) && IsAppMem(addr)) {
68     CHECK(!thr->is_freeing);
69     thr->is_freeing = true;
70     MemoryWrite(thr, pc, addr, kSizeLog1);
71     thr->is_freeing = false;
72   }
73   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
74   s->SetFlags(flagz & MutexCreationFlagMask);
75   if (!SANITIZER_GO && s->creation_stack_id == 0)
76     s->creation_stack_id = CurrentStackId(thr, pc);
77   s->mtx.Unlock();
78 }
79 
80 void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
81   DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr);
82   StatInc(thr, StatMutexDestroy);
83   SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true);
84   if (s == 0)
85     return;
86   if ((flagz & MutexFlagLinkerInit)
87       || s->IsFlagSet(MutexFlagLinkerInit)
88       || ((flagz & MutexFlagNotStatic) && !s->IsFlagSet(MutexFlagNotStatic))) {
89     // Destroy is no-op for linker-initialized mutexes.
90     s->mtx.Unlock();
91     return;
92   }
93   if (common_flags()->detect_deadlocks) {
94     Callback cb(thr, pc);
95     ctx->dd->MutexDestroy(&cb, &s->dd);
96     ctx->dd->MutexInit(&cb, &s->dd);
97   }
98   bool unlock_locked = false;
99   if (flags()->report_destroy_locked
100       && s->owner_tid != SyncVar::kInvalidTid
101       && !s->IsFlagSet(MutexFlagBroken)) {
102     s->SetFlags(MutexFlagBroken);
103     unlock_locked = true;
104   }
105   u64 mid = s->GetId();
106   u64 last_lock = s->last_lock;
107   if (!unlock_locked)
108     s->Reset(thr->proc());  // must not reset it before the report is printed
109   s->mtx.Unlock();
110   if (unlock_locked) {
111     ThreadRegistryLock l(ctx->thread_registry);
112     ScopedReport rep(ReportTypeMutexDestroyLocked);
113     rep.AddMutex(mid);
114     VarSizeStackTrace trace;
115     ObtainCurrentStack(thr, pc, &trace);
116     rep.AddStack(trace, true);
117     FastState last(last_lock);
118     RestoreStack(last.tid(), last.epoch(), &trace, 0);
119     rep.AddStack(trace, true);
120     rep.AddLocation(addr, 1);
121     OutputReport(thr, rep);
122 
123     SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true);
124     if (s != 0) {
125       s->Reset(thr->proc());
126       s->mtx.Unlock();
127     }
128   }
129   thr->mset.Remove(mid);
130   // Imitate a memory write to catch unlock-destroy races.
131   // Do this outside of sync mutex, because it can report a race which locks
132   // sync mutexes.
133   if (IsAppMem(addr)) {
134     CHECK(!thr->is_freeing);
135     thr->is_freeing = true;
136     MemoryWrite(thr, pc, addr, kSizeLog1);
137     thr->is_freeing = false;
138   }
139   // s will be destroyed and freed in MetaMap::FreeBlock.
140 }
141 
142 void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
143   DPrintf("#%d: MutexPreLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
144   if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) {
145     SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
146     s->UpdateFlags(flagz);
147     if (s->owner_tid != thr->tid) {
148       Callback cb(thr, pc);
149       ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
150       s->mtx.ReadUnlock();
151       ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
152     } else {
153       s->mtx.ReadUnlock();
154     }
155   }
156 }
157 
158 void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz, int rec) {
159   DPrintf("#%d: MutexPostLock %zx flag=0x%x rec=%d\n",
160       thr->tid, addr, flagz, rec);
161   if (flagz & MutexFlagRecursiveLock)
162     CHECK_GT(rec, 0);
163   else
164     rec = 1;
165   if (IsAppMem(addr))
166     MemoryReadAtomic(thr, pc, addr, kSizeLog1);
167   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
168   s->UpdateFlags(flagz);
169   thr->fast_state.IncrementEpoch();
170   TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId());
171   bool report_double_lock = false;
172   if (s->owner_tid == SyncVar::kInvalidTid) {
173     CHECK_EQ(s->recursion, 0);
174     s->owner_tid = thr->tid;
175     s->last_lock = thr->fast_state.raw();
176   } else if (s->owner_tid == thr->tid) {
177     CHECK_GT(s->recursion, 0);
178   } else if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
179     s->SetFlags(MutexFlagBroken);
180     report_double_lock = true;
181   }
182   const bool first = s->recursion == 0;
183   s->recursion += rec;
184   if (first) {
185     StatInc(thr, StatMutexLock);
186     AcquireImpl(thr, pc, &s->clock);
187     AcquireImpl(thr, pc, &s->read_clock);
188   } else if (!s->IsFlagSet(MutexFlagWriteReentrant)) {
189     StatInc(thr, StatMutexRecLock);
190   }
191   thr->mset.Add(s->GetId(), true, thr->fast_state.epoch());
192   bool pre_lock = false;
193   if (first && common_flags()->detect_deadlocks) {
194     pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) &&
195         !(flagz & MutexFlagTryLock);
196     Callback cb(thr, pc);
197     if (pre_lock)
198       ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
199     ctx->dd->MutexAfterLock(&cb, &s->dd, true, flagz & MutexFlagTryLock);
200   }
201   u64 mid = s->GetId();
202   s->mtx.Unlock();
203   // Can't touch s after this point.
204   s = 0;
205   if (report_double_lock)
206     ReportMutexMisuse(thr, pc, ReportTypeMutexDoubleLock, addr, mid);
207   if (first && pre_lock && common_flags()->detect_deadlocks) {
208     Callback cb(thr, pc);
209     ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
210   }
211 }
212 
213 int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
214   DPrintf("#%d: MutexUnlock %zx flagz=0x%x\n", thr->tid, addr, flagz);
215   if (IsAppMem(addr))
216     MemoryReadAtomic(thr, pc, addr, kSizeLog1);
217   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
218   thr->fast_state.IncrementEpoch();
219   TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
220   int rec = 0;
221   bool report_bad_unlock = false;
222   if (!SANITIZER_GO && (s->recursion == 0 || s->owner_tid != thr->tid)) {
223     if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
224       s->SetFlags(MutexFlagBroken);
225       report_bad_unlock = true;
226     }
227   } else {
228     rec = (flagz & MutexFlagRecursiveUnlock) ? s->recursion : 1;
229     s->recursion -= rec;
230     if (s->recursion == 0) {
231       StatInc(thr, StatMutexUnlock);
232       s->owner_tid = SyncVar::kInvalidTid;
233       ReleaseStoreImpl(thr, pc, &s->clock);
234     } else {
235       StatInc(thr, StatMutexRecUnlock);
236     }
237   }
238   thr->mset.Del(s->GetId(), true);
239   if (common_flags()->detect_deadlocks && s->recursion == 0 &&
240       !report_bad_unlock) {
241     Callback cb(thr, pc);
242     ctx->dd->MutexBeforeUnlock(&cb, &s->dd, true);
243   }
244   u64 mid = s->GetId();
245   s->mtx.Unlock();
246   // Can't touch s after this point.
247   if (report_bad_unlock)
248     ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
249   if (common_flags()->detect_deadlocks && !report_bad_unlock) {
250     Callback cb(thr, pc);
251     ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
252   }
253   return rec;
254 }
255 
256 void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
257   DPrintf("#%d: MutexPreReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
258   if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) {
259     SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
260     s->UpdateFlags(flagz);
261     Callback cb(thr, pc);
262     ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
263     s->mtx.ReadUnlock();
264     ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
265   }
266 }
267 
268 void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
269   DPrintf("#%d: MutexPostReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
270   StatInc(thr, StatMutexReadLock);
271   if (IsAppMem(addr))
272     MemoryReadAtomic(thr, pc, addr, kSizeLog1);
273   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
274   s->UpdateFlags(flagz);
275   thr->fast_state.IncrementEpoch();
276   TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId());
277   bool report_bad_lock = false;
278   if (s->owner_tid != SyncVar::kInvalidTid) {
279     if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
280       s->SetFlags(MutexFlagBroken);
281       report_bad_lock = true;
282     }
283   }
284   AcquireImpl(thr, pc, &s->clock);
285   s->last_lock = thr->fast_state.raw();
286   thr->mset.Add(s->GetId(), false, thr->fast_state.epoch());
287   bool pre_lock = false;
288   if (common_flags()->detect_deadlocks) {
289     pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) &&
290         !(flagz & MutexFlagTryLock);
291     Callback cb(thr, pc);
292     if (pre_lock)
293       ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
294     ctx->dd->MutexAfterLock(&cb, &s->dd, false, flagz & MutexFlagTryLock);
295   }
296   u64 mid = s->GetId();
297   s->mtx.ReadUnlock();
298   // Can't touch s after this point.
299   s = 0;
300   if (report_bad_lock)
301     ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadLock, addr, mid);
302   if (pre_lock  && common_flags()->detect_deadlocks) {
303     Callback cb(thr, pc);
304     ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
305   }
306 }
307 
308 void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
309   DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr);
310   StatInc(thr, StatMutexReadUnlock);
311   if (IsAppMem(addr))
312     MemoryReadAtomic(thr, pc, addr, kSizeLog1);
313   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
314   thr->fast_state.IncrementEpoch();
315   TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
316   bool report_bad_unlock = false;
317   if (s->owner_tid != SyncVar::kInvalidTid) {
318     if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
319       s->SetFlags(MutexFlagBroken);
320       report_bad_unlock = true;
321     }
322   }
323   ReleaseImpl(thr, pc, &s->read_clock);
324   if (common_flags()->detect_deadlocks && s->recursion == 0) {
325     Callback cb(thr, pc);
326     ctx->dd->MutexBeforeUnlock(&cb, &s->dd, false);
327   }
328   u64 mid = s->GetId();
329   s->mtx.Unlock();
330   // Can't touch s after this point.
331   thr->mset.Del(mid, false);
332   if (report_bad_unlock)
333     ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadUnlock, addr, mid);
334   if (common_flags()->detect_deadlocks) {
335     Callback cb(thr, pc);
336     ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
337   }
338 }
339 
340 void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
341   DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr);
342   if (IsAppMem(addr))
343     MemoryReadAtomic(thr, pc, addr, kSizeLog1);
344   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
345   bool write = true;
346   bool report_bad_unlock = false;
347   if (s->owner_tid == SyncVar::kInvalidTid) {
348     // Seems to be read unlock.
349     write = false;
350     StatInc(thr, StatMutexReadUnlock);
351     thr->fast_state.IncrementEpoch();
352     TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
353     ReleaseImpl(thr, pc, &s->read_clock);
354   } else if (s->owner_tid == thr->tid) {
355     // Seems to be write unlock.
356     thr->fast_state.IncrementEpoch();
357     TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
358     CHECK_GT(s->recursion, 0);
359     s->recursion--;
360     if (s->recursion == 0) {
361       StatInc(thr, StatMutexUnlock);
362       s->owner_tid = SyncVar::kInvalidTid;
363       ReleaseStoreImpl(thr, pc, &s->clock);
364     } else {
365       StatInc(thr, StatMutexRecUnlock);
366     }
367   } else if (!s->IsFlagSet(MutexFlagBroken)) {
368     s->SetFlags(MutexFlagBroken);
369     report_bad_unlock = true;
370   }
371   thr->mset.Del(s->GetId(), write);
372   if (common_flags()->detect_deadlocks && s->recursion == 0) {
373     Callback cb(thr, pc);
374     ctx->dd->MutexBeforeUnlock(&cb, &s->dd, write);
375   }
376   u64 mid = s->GetId();
377   s->mtx.Unlock();
378   // Can't touch s after this point.
379   if (report_bad_unlock)
380     ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
381   if (common_flags()->detect_deadlocks) {
382     Callback cb(thr, pc);
383     ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
384   }
385 }
386 
387 void MutexRepair(ThreadState *thr, uptr pc, uptr addr) {
388   DPrintf("#%d: MutexRepair %zx\n", thr->tid, addr);
389   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
390   s->owner_tid = SyncVar::kInvalidTid;
391   s->recursion = 0;
392   s->mtx.Unlock();
393 }
394 
395 void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr) {
396   DPrintf("#%d: MutexInvalidAccess %zx\n", thr->tid, addr);
397   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
398   u64 mid = s->GetId();
399   s->mtx.Unlock();
400   ReportMutexMisuse(thr, pc, ReportTypeMutexInvalidAccess, addr, mid);
401 }
402 
403 void Acquire(ThreadState *thr, uptr pc, uptr addr) {
404   DPrintf("#%d: Acquire %zx\n", thr->tid, addr);
405   if (thr->ignore_sync)
406     return;
407   SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, false);
408   if (!s)
409     return;
410   AcquireImpl(thr, pc, &s->clock);
411   s->mtx.ReadUnlock();
412 }
413 
414 static void UpdateClockCallback(ThreadContextBase *tctx_base, void *arg) {
415   ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
416   ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
417   u64 epoch = tctx->epoch1;
418   if (tctx->status == ThreadStatusRunning)
419     epoch = tctx->thr->fast_state.epoch();
420   thr->clock.set(&thr->proc()->clock_cache, tctx->tid, epoch);
421 }
422 
423 void AcquireGlobal(ThreadState *thr, uptr pc) {
424   DPrintf("#%d: AcquireGlobal\n", thr->tid);
425   if (thr->ignore_sync)
426     return;
427   ThreadRegistryLock l(ctx->thread_registry);
428   ctx->thread_registry->RunCallbackForEachThreadLocked(
429       UpdateClockCallback, thr);
430 }
431 
432 void Release(ThreadState *thr, uptr pc, uptr addr) {
433   DPrintf("#%d: Release %zx\n", thr->tid, addr);
434   if (thr->ignore_sync)
435     return;
436   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
437   thr->fast_state.IncrementEpoch();
438   // Can't increment epoch w/o writing to the trace as well.
439   TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
440   ReleaseImpl(thr, pc, &s->clock);
441   s->mtx.Unlock();
442 }
443 
444 void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
445   DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr);
446   if (thr->ignore_sync)
447     return;
448   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
449   thr->fast_state.IncrementEpoch();
450   // Can't increment epoch w/o writing to the trace as well.
451   TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
452   ReleaseStoreImpl(thr, pc, &s->clock);
453   s->mtx.Unlock();
454 }
455 
456 #if !SANITIZER_GO
457 static void UpdateSleepClockCallback(ThreadContextBase *tctx_base, void *arg) {
458   ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
459   ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
460   u64 epoch = tctx->epoch1;
461   if (tctx->status == ThreadStatusRunning)
462     epoch = tctx->thr->fast_state.epoch();
463   thr->last_sleep_clock.set(&thr->proc()->clock_cache, tctx->tid, epoch);
464 }
465 
466 void AfterSleep(ThreadState *thr, uptr pc) {
467   DPrintf("#%d: AfterSleep %zx\n", thr->tid);
468   if (thr->ignore_sync)
469     return;
470   thr->last_sleep_stack_id = CurrentStackId(thr, pc);
471   ThreadRegistryLock l(ctx->thread_registry);
472   ctx->thread_registry->RunCallbackForEachThreadLocked(
473       UpdateSleepClockCallback, thr);
474 }
475 #endif
476 
477 void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) {
478   if (thr->ignore_sync)
479     return;
480   thr->clock.set(thr->fast_state.epoch());
481   thr->clock.acquire(&thr->proc()->clock_cache, c);
482   StatInc(thr, StatSyncAcquire);
483 }
484 
485 void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
486   if (thr->ignore_sync)
487     return;
488   thr->clock.set(thr->fast_state.epoch());
489   thr->fast_synch_epoch = thr->fast_state.epoch();
490   thr->clock.release(&thr->proc()->clock_cache, c);
491   StatInc(thr, StatSyncRelease);
492 }
493 
494 void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c) {
495   if (thr->ignore_sync)
496     return;
497   thr->clock.set(thr->fast_state.epoch());
498   thr->fast_synch_epoch = thr->fast_state.epoch();
499   thr->clock.ReleaseStore(&thr->proc()->clock_cache, c);
500   StatInc(thr, StatSyncRelease);
501 }
502 
503 void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
504   if (thr->ignore_sync)
505     return;
506   thr->clock.set(thr->fast_state.epoch());
507   thr->fast_synch_epoch = thr->fast_state.epoch();
508   thr->clock.acq_rel(&thr->proc()->clock_cache, c);
509   StatInc(thr, StatSyncAcquire);
510   StatInc(thr, StatSyncRelease);
511 }
512 
513 void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) {
514   if (r == 0)
515     return;
516   ThreadRegistryLock l(ctx->thread_registry);
517   ScopedReport rep(ReportTypeDeadlock);
518   for (int i = 0; i < r->n; i++) {
519     rep.AddMutex(r->loop[i].mtx_ctx0);
520     rep.AddUniqueTid((int)r->loop[i].thr_ctx);
521     rep.AddThread((int)r->loop[i].thr_ctx);
522   }
523   uptr dummy_pc = 0x42;
524   for (int i = 0; i < r->n; i++) {
525     for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) {
526       u32 stk = r->loop[i].stk[j];
527       if (stk && stk != 0xffffffff) {
528         rep.AddStack(StackDepotGet(stk), true);
529       } else {
530         // Sometimes we fail to extract the stack trace (FIXME: investigate),
531         // but we should still produce some stack trace in the report.
532         rep.AddStack(StackTrace(&dummy_pc, 1), true);
533       }
534     }
535   }
536   OutputReport(thr, rep);
537 }
538 
539 }  // namespace __tsan
540