xref: /freebsd/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp (revision c7a063741720ef81d4caa4613242579d12f1d605)
1 //===-- tsan_rtl_mutex.cpp ------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include <sanitizer_common/sanitizer_deadlock_detector_interface.h>
14 #include <sanitizer_common/sanitizer_stackdepot.h>
15 
16 #include "tsan_rtl.h"
17 #include "tsan_flags.h"
18 #include "tsan_sync.h"
19 #include "tsan_report.h"
20 #include "tsan_symbolize.h"
21 #include "tsan_platform.h"
22 
23 namespace __tsan {
24 
25 void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r);
26 void ReportDestroyLocked(ThreadState *thr, uptr pc, uptr addr,
27                          FastState last_lock, StackID creation_stack_id);
28 
29 struct Callback final : public DDCallback {
30   ThreadState *thr;
31   uptr pc;
32 
33   Callback(ThreadState *thr, uptr pc)
34       : thr(thr)
35       , pc(pc) {
36     DDCallback::pt = thr->proc()->dd_pt;
37     DDCallback::lt = thr->dd_lt;
38   }
39 
40   StackID Unwind() override { return CurrentStackId(thr, pc); }
41   int UniqueTid() override { return thr->tid; }
42 };
43 
44 void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s) {
45   Callback cb(thr, pc);
46   ctx->dd->MutexInit(&cb, &s->dd);
47   s->dd.ctx = s->addr;
48 }
49 
50 static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ,
51                               uptr addr, StackID creation_stack_id) {
52   // In Go, these misuses are either impossible, or detected by std lib,
53   // or false positives (e.g. unlock in a different thread).
54   if (SANITIZER_GO)
55     return;
56   if (!ShouldReport(thr, typ))
57     return;
58   ThreadRegistryLock l(&ctx->thread_registry);
59   ScopedReport rep(typ);
60   rep.AddMutex(addr, creation_stack_id);
61   VarSizeStackTrace trace;
62   ObtainCurrentStack(thr, pc, &trace);
63   rep.AddStack(trace, true);
64   rep.AddLocation(addr, 1);
65   OutputReport(thr, rep);
66 }
67 
68 static void RecordMutexLock(ThreadState *thr, uptr pc, uptr addr,
69                             StackID stack_id, bool write) {
70   auto typ = write ? EventType::kLock : EventType::kRLock;
71   // Note: it's important to trace before modifying mutex set
72   // because tracing can switch trace part and we write the current
73   // mutex set in the beginning of each part.
74   // If we do it in the opposite order, we will write already reduced
75   // mutex set in the beginning of the part and then trace unlock again.
76   TraceMutexLock(thr, typ, pc, addr, stack_id);
77   thr->mset.AddAddr(addr, stack_id, write);
78 }
79 
80 static void RecordMutexUnlock(ThreadState *thr, uptr addr) {
81   // See the comment in RecordMutexLock re order of operations.
82   TraceMutexUnlock(thr, addr);
83   thr->mset.DelAddr(addr);
84 }
85 
86 void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
87   DPrintf("#%d: MutexCreate %zx flagz=0x%x\n", thr->tid, addr, flagz);
88   if (!(flagz & MutexFlagLinkerInit) && pc && IsAppMem(addr))
89     MemoryAccess(thr, pc, addr, 1, kAccessWrite);
90   SlotLocker locker(thr);
91   auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
92   s->SetFlags(flagz & MutexCreationFlagMask);
93   // Save stack in the case the sync object was created before as atomic.
94   if (!SANITIZER_GO && s->creation_stack_id == kInvalidStackID)
95     s->creation_stack_id = CurrentStackId(thr, pc);
96 }
97 
98 void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
99   DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr);
100   bool unlock_locked = false;
101   StackID creation_stack_id;
102   FastState last_lock;
103   {
104     auto s = ctx->metamap.GetSyncIfExists(addr);
105     if (!s)
106       return;
107     SlotLocker locker(thr);
108     {
109       Lock lock(&s->mtx);
110       creation_stack_id = s->creation_stack_id;
111       last_lock = s->last_lock;
112       if ((flagz & MutexFlagLinkerInit) || s->IsFlagSet(MutexFlagLinkerInit) ||
113           ((flagz & MutexFlagNotStatic) && !s->IsFlagSet(MutexFlagNotStatic))) {
114         // Destroy is no-op for linker-initialized mutexes.
115         return;
116       }
117       if (common_flags()->detect_deadlocks) {
118         Callback cb(thr, pc);
119         ctx->dd->MutexDestroy(&cb, &s->dd);
120         ctx->dd->MutexInit(&cb, &s->dd);
121       }
122       if (flags()->report_destroy_locked && s->owner_tid != kInvalidTid &&
123           !s->IsFlagSet(MutexFlagBroken)) {
124         s->SetFlags(MutexFlagBroken);
125         unlock_locked = true;
126       }
127       s->Reset();
128     }
129     // Imitate a memory write to catch unlock-destroy races.
130     if (pc && IsAppMem(addr))
131       MemoryAccess(thr, pc, addr, 1, kAccessWrite | kAccessFree);
132   }
133   if (unlock_locked && ShouldReport(thr, ReportTypeMutexDestroyLocked))
134     ReportDestroyLocked(thr, pc, addr, last_lock, creation_stack_id);
135   thr->mset.DelAddr(addr, true);
136   // s will be destroyed and freed in MetaMap::FreeBlock.
137 }
138 
139 void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
140   DPrintf("#%d: MutexPreLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
141   if (flagz & MutexFlagTryLock)
142     return;
143   if (!common_flags()->detect_deadlocks)
144     return;
145   Callback cb(thr, pc);
146   {
147     SlotLocker locker(thr);
148     auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
149     ReadLock lock(&s->mtx);
150     s->UpdateFlags(flagz);
151     if (s->owner_tid != thr->tid)
152       ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
153   }
154   ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
155 }
156 
157 void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz, int rec) {
158   DPrintf("#%d: MutexPostLock %zx flag=0x%x rec=%d\n",
159       thr->tid, addr, flagz, rec);
160   if (flagz & MutexFlagRecursiveLock)
161     CHECK_GT(rec, 0);
162   else
163     rec = 1;
164   if (pc && IsAppMem(addr))
165     MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic);
166   bool report_double_lock = false;
167   bool pre_lock = false;
168   bool first = false;
169   StackID creation_stack_id = kInvalidStackID;
170   {
171     SlotLocker locker(thr);
172     auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
173     creation_stack_id = s->creation_stack_id;
174     RecordMutexLock(thr, pc, addr, creation_stack_id, true);
175     {
176       Lock lock(&s->mtx);
177       first = s->recursion == 0;
178       s->UpdateFlags(flagz);
179       if (s->owner_tid == kInvalidTid) {
180         CHECK_EQ(s->recursion, 0);
181         s->owner_tid = thr->tid;
182         s->last_lock = thr->fast_state;
183       } else if (s->owner_tid == thr->tid) {
184         CHECK_GT(s->recursion, 0);
185       } else if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
186         s->SetFlags(MutexFlagBroken);
187         report_double_lock = true;
188       }
189       s->recursion += rec;
190       if (first) {
191         if (!thr->ignore_sync) {
192           thr->clock.Acquire(s->clock);
193           thr->clock.Acquire(s->read_clock);
194         }
195       }
196       if (first && common_flags()->detect_deadlocks) {
197         pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) &&
198                    !(flagz & MutexFlagTryLock);
199         Callback cb(thr, pc);
200         if (pre_lock)
201           ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
202         ctx->dd->MutexAfterLock(&cb, &s->dd, true, flagz & MutexFlagTryLock);
203       }
204     }
205   }
206   if (report_double_lock)
207     ReportMutexMisuse(thr, pc, ReportTypeMutexDoubleLock, addr,
208                       creation_stack_id);
209   if (first && pre_lock && common_flags()->detect_deadlocks) {
210     Callback cb(thr, pc);
211     ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
212   }
213 }
214 
215 int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
216   DPrintf("#%d: MutexUnlock %zx flagz=0x%x\n", thr->tid, addr, flagz);
217   if (pc && IsAppMem(addr))
218     MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic);
219   StackID creation_stack_id;
220   RecordMutexUnlock(thr, addr);
221   bool report_bad_unlock = false;
222   int rec = 0;
223   {
224     SlotLocker locker(thr);
225     auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
226     bool released = false;
227     {
228       Lock lock(&s->mtx);
229       creation_stack_id = s->creation_stack_id;
230       if (!SANITIZER_GO && (s->recursion == 0 || s->owner_tid != thr->tid)) {
231         if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
232           s->SetFlags(MutexFlagBroken);
233           report_bad_unlock = true;
234         }
235       } else {
236         rec = (flagz & MutexFlagRecursiveUnlock) ? s->recursion : 1;
237         s->recursion -= rec;
238         if (s->recursion == 0) {
239           s->owner_tid = kInvalidTid;
240           if (!thr->ignore_sync) {
241             thr->clock.ReleaseStore(&s->clock);
242             released = true;
243           }
244         }
245       }
246       if (common_flags()->detect_deadlocks && s->recursion == 0 &&
247           !report_bad_unlock) {
248         Callback cb(thr, pc);
249         ctx->dd->MutexBeforeUnlock(&cb, &s->dd, true);
250       }
251     }
252     if (released)
253       IncrementEpoch(thr);
254   }
255   if (report_bad_unlock)
256     ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr,
257                       creation_stack_id);
258   if (common_flags()->detect_deadlocks && !report_bad_unlock) {
259     Callback cb(thr, pc);
260     ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
261   }
262   return rec;
263 }
264 
265 void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
266   DPrintf("#%d: MutexPreReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
267   if ((flagz & MutexFlagTryLock) || !common_flags()->detect_deadlocks)
268     return;
269   Callback cb(thr, pc);
270   {
271     SlotLocker locker(thr);
272     auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
273     ReadLock lock(&s->mtx);
274     s->UpdateFlags(flagz);
275     ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
276   }
277   ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
278 }
279 
280 void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
281   DPrintf("#%d: MutexPostReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
282   if (pc && IsAppMem(addr))
283     MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic);
284   bool report_bad_lock = false;
285   bool pre_lock = false;
286   StackID creation_stack_id = kInvalidStackID;
287   {
288     SlotLocker locker(thr);
289     auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
290     creation_stack_id = s->creation_stack_id;
291     RecordMutexLock(thr, pc, addr, creation_stack_id, false);
292     {
293       ReadLock lock(&s->mtx);
294       s->UpdateFlags(flagz);
295       if (s->owner_tid != kInvalidTid) {
296         if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
297           s->SetFlags(MutexFlagBroken);
298           report_bad_lock = true;
299         }
300       }
301       if (!thr->ignore_sync)
302         thr->clock.Acquire(s->clock);
303       s->last_lock = thr->fast_state;
304       if (common_flags()->detect_deadlocks) {
305         pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) &&
306                    !(flagz & MutexFlagTryLock);
307         Callback cb(thr, pc);
308         if (pre_lock)
309           ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
310         ctx->dd->MutexAfterLock(&cb, &s->dd, false, flagz & MutexFlagTryLock);
311       }
312     }
313   }
314   if (report_bad_lock)
315     ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadLock, addr,
316                       creation_stack_id);
317   if (pre_lock  && common_flags()->detect_deadlocks) {
318     Callback cb(thr, pc);
319     ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
320   }
321 }
322 
323 void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
324   DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr);
325   if (pc && IsAppMem(addr))
326     MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic);
327   RecordMutexUnlock(thr, addr);
328   StackID creation_stack_id;
329   bool report_bad_unlock = false;
330   {
331     SlotLocker locker(thr);
332     auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
333     bool released = false;
334     {
335       Lock lock(&s->mtx);
336       creation_stack_id = s->creation_stack_id;
337       if (s->owner_tid != kInvalidTid) {
338         if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
339           s->SetFlags(MutexFlagBroken);
340           report_bad_unlock = true;
341         }
342       }
343       if (!thr->ignore_sync) {
344         thr->clock.Release(&s->read_clock);
345         released = true;
346       }
347       if (common_flags()->detect_deadlocks && s->recursion == 0) {
348         Callback cb(thr, pc);
349         ctx->dd->MutexBeforeUnlock(&cb, &s->dd, false);
350       }
351     }
352     if (released)
353       IncrementEpoch(thr);
354   }
355   if (report_bad_unlock)
356     ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadUnlock, addr,
357                       creation_stack_id);
358   if (common_flags()->detect_deadlocks) {
359     Callback cb(thr, pc);
360     ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
361   }
362 }
363 
364 void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
365   DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr);
366   if (pc && IsAppMem(addr))
367     MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic);
368   RecordMutexUnlock(thr, addr);
369   StackID creation_stack_id;
370   bool report_bad_unlock = false;
371   bool write = true;
372   {
373     SlotLocker locker(thr);
374     auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
375     bool released = false;
376     {
377       Lock lock(&s->mtx);
378       creation_stack_id = s->creation_stack_id;
379       if (s->owner_tid == kInvalidTid) {
380         // Seems to be read unlock.
381         write = false;
382         if (!thr->ignore_sync) {
383           thr->clock.Release(&s->read_clock);
384           released = true;
385         }
386       } else if (s->owner_tid == thr->tid) {
387         // Seems to be write unlock.
388         CHECK_GT(s->recursion, 0);
389         s->recursion--;
390         if (s->recursion == 0) {
391           s->owner_tid = kInvalidTid;
392           if (!thr->ignore_sync) {
393             thr->clock.ReleaseStore(&s->clock);
394             released = true;
395           }
396         }
397       } else if (!s->IsFlagSet(MutexFlagBroken)) {
398         s->SetFlags(MutexFlagBroken);
399         report_bad_unlock = true;
400       }
401       if (common_flags()->detect_deadlocks && s->recursion == 0) {
402         Callback cb(thr, pc);
403         ctx->dd->MutexBeforeUnlock(&cb, &s->dd, write);
404       }
405     }
406     if (released)
407       IncrementEpoch(thr);
408   }
409   if (report_bad_unlock)
410     ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr,
411                       creation_stack_id);
412   if (common_flags()->detect_deadlocks) {
413     Callback cb(thr, pc);
414     ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
415   }
416 }
417 
418 void MutexRepair(ThreadState *thr, uptr pc, uptr addr) {
419   DPrintf("#%d: MutexRepair %zx\n", thr->tid, addr);
420   SlotLocker locker(thr);
421   auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
422   Lock lock(&s->mtx);
423   s->owner_tid = kInvalidTid;
424   s->recursion = 0;
425 }
426 
427 void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr) {
428   DPrintf("#%d: MutexInvalidAccess %zx\n", thr->tid, addr);
429   StackID creation_stack_id = kInvalidStackID;
430   {
431     SlotLocker locker(thr);
432     auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
433     if (s)
434       creation_stack_id = s->creation_stack_id;
435   }
436   ReportMutexMisuse(thr, pc, ReportTypeMutexInvalidAccess, addr,
437                     creation_stack_id);
438 }
439 
440 void Acquire(ThreadState *thr, uptr pc, uptr addr) {
441   DPrintf("#%d: Acquire %zx\n", thr->tid, addr);
442   if (thr->ignore_sync)
443     return;
444   auto s = ctx->metamap.GetSyncIfExists(addr);
445   if (!s)
446     return;
447   SlotLocker locker(thr);
448   if (!s->clock)
449     return;
450   ReadLock lock(&s->mtx);
451   thr->clock.Acquire(s->clock);
452 }
453 
454 void AcquireGlobal(ThreadState *thr) {
455   DPrintf("#%d: AcquireGlobal\n", thr->tid);
456   if (thr->ignore_sync)
457     return;
458   SlotLocker locker(thr);
459   for (auto &slot : ctx->slots) thr->clock.Set(slot.sid, slot.epoch());
460 }
461 
462 void Release(ThreadState *thr, uptr pc, uptr addr) {
463   DPrintf("#%d: Release %zx\n", thr->tid, addr);
464   if (thr->ignore_sync)
465     return;
466   SlotLocker locker(thr);
467   {
468     auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, false);
469     Lock lock(&s->mtx);
470     thr->clock.Release(&s->clock);
471   }
472   IncrementEpoch(thr);
473 }
474 
475 void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
476   DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr);
477   if (thr->ignore_sync)
478     return;
479   SlotLocker locker(thr);
480   {
481     auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, false);
482     Lock lock(&s->mtx);
483     thr->clock.ReleaseStore(&s->clock);
484   }
485   IncrementEpoch(thr);
486 }
487 
488 void ReleaseStoreAcquire(ThreadState *thr, uptr pc, uptr addr) {
489   DPrintf("#%d: ReleaseStoreAcquire %zx\n", thr->tid, addr);
490   if (thr->ignore_sync)
491     return;
492   SlotLocker locker(thr);
493   {
494     auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, false);
495     Lock lock(&s->mtx);
496     thr->clock.ReleaseStoreAcquire(&s->clock);
497   }
498   IncrementEpoch(thr);
499 }
500 
501 void IncrementEpoch(ThreadState *thr) {
502   DCHECK(!thr->ignore_sync);
503   DCHECK(thr->slot_locked);
504   Epoch epoch = EpochInc(thr->fast_state.epoch());
505   if (!EpochOverflow(epoch)) {
506     Sid sid = thr->fast_state.sid();
507     thr->clock.Set(sid, epoch);
508     thr->fast_state.SetEpoch(epoch);
509     thr->slot->SetEpoch(epoch);
510     TraceTime(thr);
511   }
512 }
513 
514 #if !SANITIZER_GO
515 void AfterSleep(ThreadState *thr, uptr pc) {
516   DPrintf("#%d: AfterSleep\n", thr->tid);
517   if (thr->ignore_sync)
518     return;
519   thr->last_sleep_stack_id = CurrentStackId(thr, pc);
520   thr->last_sleep_clock.Reset();
521   SlotLocker locker(thr);
522   for (auto &slot : ctx->slots)
523     thr->last_sleep_clock.Set(slot.sid, slot.epoch());
524 }
525 #endif
526 
527 void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) {
528   if (r == 0 || !ShouldReport(thr, ReportTypeDeadlock))
529     return;
530   ThreadRegistryLock l(&ctx->thread_registry);
531   ScopedReport rep(ReportTypeDeadlock);
532   for (int i = 0; i < r->n; i++) {
533     rep.AddMutex(r->loop[i].mtx_ctx0, r->loop[i].stk[0]);
534     rep.AddUniqueTid((int)r->loop[i].thr_ctx);
535     rep.AddThread((int)r->loop[i].thr_ctx);
536   }
537   uptr dummy_pc = 0x42;
538   for (int i = 0; i < r->n; i++) {
539     for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) {
540       u32 stk = r->loop[i].stk[j];
541       if (stk && stk != kInvalidStackID) {
542         rep.AddStack(StackDepotGet(stk), true);
543       } else {
544         // Sometimes we fail to extract the stack trace (FIXME: investigate),
545         // but we should still produce some stack trace in the report.
546         rep.AddStack(StackTrace(&dummy_pc, 1), true);
547       }
548     }
549   }
550   OutputReport(thr, rep);
551 }
552 
553 void ReportDestroyLocked(ThreadState *thr, uptr pc, uptr addr,
554                          FastState last_lock, StackID creation_stack_id) {
555   // We need to lock the slot during RestoreStack because it protects
556   // the slot journal.
557   Lock slot_lock(&ctx->slots[static_cast<uptr>(last_lock.sid())].mtx);
558   ThreadRegistryLock l0(&ctx->thread_registry);
559   Lock slots_lock(&ctx->slot_mtx);
560   ScopedReport rep(ReportTypeMutexDestroyLocked);
561   rep.AddMutex(addr, creation_stack_id);
562   VarSizeStackTrace trace;
563   ObtainCurrentStack(thr, pc, &trace);
564   rep.AddStack(trace, true);
565 
566   Tid tid;
567   DynamicMutexSet mset;
568   uptr tag;
569   if (!RestoreStack(EventType::kLock, last_lock.sid(), last_lock.epoch(), addr,
570                     0, kAccessWrite, &tid, &trace, mset, &tag))
571     return;
572   rep.AddStack(trace, true);
573   rep.AddLocation(addr, 1);
574   OutputReport(thr, rep);
575 }
576 
577 }  // namespace __tsan
578