1//===-- tsan_rtl_mutex.cpp ------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of ThreadSanitizer (TSan), a race detector.
10//
11//===----------------------------------------------------------------------===//
12
13#include <sanitizer_common/sanitizer_deadlock_detector_interface.h>
14#include <sanitizer_common/sanitizer_placement_new.h>
15#include <sanitizer_common/sanitizer_stackdepot.h>
16
17#include "tsan_flags.h"
18#include "tsan_platform.h"
19#include "tsan_report.h"
20#include "tsan_rtl.h"
21#include "tsan_symbolize.h"
22#include "tsan_sync.h"
23
24namespace __tsan {
25
26void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r);
27void ReportDestroyLocked(ThreadState *thr, uptr pc, uptr addr,
28 FastState last_lock, StackID creation_stack_id);
29
30struct Callback final : public DDCallback {
31 ThreadState *thr;
32 uptr pc;
33
34 Callback(ThreadState *thr, uptr pc)
35 : thr(thr)
36 , pc(pc) {
37 DDCallback::pt = thr->proc()->dd_pt;
38 DDCallback::lt = thr->dd_lt;
39 }
40
41 StackID Unwind() override { return CurrentStackId(thr, pc); }
42 int UniqueTid() override { return thr->tid; }
43};
44
45void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s) {
46 Callback cb(thr, pc);
47 ctx->dd->MutexInit(cb: &cb, m: &s->dd);
48 s->dd.ctx = s->addr;
49}
50
51static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ,
52 uptr addr, StackID creation_stack_id) {
53 // In Go, these misuses are either impossible, or detected by std lib,
54 // or false positives (e.g. unlock in a different thread).
55 if (SANITIZER_GO)
56 return;
57 if (!ShouldReport(thr, typ))
58 return;
59 // Use alloca, because malloc during signal handling deadlocks
60 ScopedReport *rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport));
61 // Take a new scope as Apple platforms require the below locks released
62 // before symbolizing in order to avoid a deadlock
63 {
64 ThreadRegistryLock l(&ctx->thread_registry);
65 new (rep) ScopedReport(typ);
66 rep->AddMutex(addr, creation_stack_id);
67 VarSizeStackTrace trace;
68 ObtainCurrentStack(thr, toppc: pc, stack: &trace);
69 rep->AddStack(stack: trace, suppressable: true);
70 rep->AddLocation(addr, size: 1);
71#if SANITIZER_APPLE
72 } // Close this scope to release the locks
73#endif
74 OutputReport(thr, srep&: *rep);
75
76 // Need to manually destroy this because we used placement new to allocate
77 rep->~ScopedReport();
78#if !SANITIZER_APPLE
79 }
80#endif
81}
82
83static void RecordMutexLock(ThreadState *thr, uptr pc, uptr addr,
84 StackID stack_id, bool write) {
85 auto typ = write ? EventType::kLock : EventType::kRLock;
86 // Note: it's important to trace before modifying mutex set
87 // because tracing can switch trace part and we write the current
88 // mutex set in the beginning of each part.
89 // If we do it in the opposite order, we will write already reduced
90 // mutex set in the beginning of the part and then trace unlock again.
91 TraceMutexLock(thr, type: typ, pc, addr, stk: stack_id);
92 thr->mset.AddAddr(addr, stack_id, write);
93}
94
95static void RecordMutexUnlock(ThreadState *thr, uptr addr) {
96 // See the comment in RecordMutexLock re order of operations.
97 TraceMutexUnlock(thr, addr);
98 thr->mset.DelAddr(addr);
99}
100
101void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
102 DPrintf("#%d: MutexCreate %zx flagz=0x%x\n", thr->tid, addr, flagz);
103 if (!(flagz & MutexFlagLinkerInit) && pc && IsAppMem(mem: addr))
104 MemoryAccess(thr, pc, addr, size: 1, typ: kAccessWrite);
105 SlotLocker locker(thr);
106 auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, save_stack: true);
107 s->SetFlags(flagz & MutexCreationFlagMask);
108 // Save stack in the case the sync object was created before as atomic.
109 if (!SANITIZER_GO && s->creation_stack_id == kInvalidStackID)
110 s->creation_stack_id = CurrentStackId(thr, pc);
111}
112
113void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
114 DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr);
115 bool unlock_locked = false;
116 StackID creation_stack_id;
117 FastState last_lock;
118 {
119 auto s = ctx->metamap.GetSyncIfExists(addr);
120 if (!s)
121 return;
122 SlotLocker locker(thr);
123 {
124 Lock lock(&s->mtx);
125 creation_stack_id = s->creation_stack_id;
126 last_lock = s->last_lock;
127 if ((flagz & MutexFlagLinkerInit) || s->IsFlagSet(f: MutexFlagLinkerInit) ||
128 ((flagz & MutexFlagNotStatic) && !s->IsFlagSet(f: MutexFlagNotStatic))) {
129 // Destroy is no-op for linker-initialized mutexes.
130 return;
131 }
132 if (common_flags()->detect_deadlocks) {
133 Callback cb(thr, pc);
134 ctx->dd->MutexDestroy(cb: &cb, m: &s->dd);
135 ctx->dd->MutexInit(cb: &cb, m: &s->dd);
136 }
137 if (flags()->report_destroy_locked && s->owner_tid != kInvalidTid &&
138 !s->IsFlagSet(f: MutexFlagBroken)) {
139 s->SetFlags(MutexFlagBroken);
140 unlock_locked = true;
141 }
142 s->Reset();
143 }
144 // Imitate a memory write to catch unlock-destroy races.
145 if (pc && IsAppMem(mem: addr))
146 MemoryAccess(thr, pc, addr, size: 1,
147 typ: kAccessWrite | kAccessFree | kAccessSlotLocked);
148 }
149 if (unlock_locked && ShouldReport(thr, typ: ReportTypeMutexDestroyLocked))
150 ReportDestroyLocked(thr, pc, addr, last_lock, creation_stack_id);
151 thr->mset.DelAddr(addr, destroy: true);
152 // s will be destroyed and freed in MetaMap::FreeBlock.
153}
154
155void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
156 DPrintf("#%d: MutexPreLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
157 if (flagz & MutexFlagTryLock)
158 return;
159 if (!common_flags()->detect_deadlocks)
160 return;
161 Callback cb(thr, pc);
162 {
163 SlotLocker locker(thr);
164 auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, save_stack: true);
165 ReadLock lock(&s->mtx);
166 s->UpdateFlags(flagz);
167 if (s->owner_tid != thr->tid)
168 ctx->dd->MutexBeforeLock(cb: &cb, m: &s->dd, wlock: true);
169 }
170 ReportDeadlock(thr, pc, r: ctx->dd->GetReport(cb: &cb));
171}
172
173void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz, int rec) {
174 DPrintf("#%d: MutexPostLock %zx flag=0x%x rec=%d\n",
175 thr->tid, addr, flagz, rec);
176 if (flagz & MutexFlagRecursiveLock)
177 CHECK_GT(rec, 0);
178 else
179 rec = 1;
180 if (pc && IsAppMem(mem: addr))
181 MemoryAccess(thr, pc, addr, size: 1, typ: kAccessRead | kAccessAtomic);
182 bool report_double_lock = false;
183 bool pre_lock = false;
184 bool first = false;
185 StackID creation_stack_id = kInvalidStackID;
186 {
187 SlotLocker locker(thr);
188 auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, save_stack: true);
189 creation_stack_id = s->creation_stack_id;
190 RecordMutexLock(thr, pc, addr, stack_id: creation_stack_id, write: true);
191 {
192 Lock lock(&s->mtx);
193 first = s->recursion == 0;
194 s->UpdateFlags(flagz);
195 if (s->owner_tid == kInvalidTid) {
196 CHECK_EQ(s->recursion, 0);
197 s->owner_tid = thr->tid;
198 s->last_lock = thr->fast_state;
199 } else if (s->owner_tid == thr->tid) {
200 CHECK_GT(s->recursion, 0);
201 } else if (flags()->report_mutex_bugs && !s->IsFlagSet(f: MutexFlagBroken)) {
202 s->SetFlags(MutexFlagBroken);
203 report_double_lock = true;
204 }
205 s->recursion += rec;
206 if (first) {
207 if (!thr->ignore_sync) {
208 thr->clock.Acquire(src: s->clock);
209 thr->clock.Acquire(src: s->read_clock);
210 }
211 }
212 if (first && common_flags()->detect_deadlocks) {
213 pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) &&
214 !(flagz & MutexFlagTryLock);
215 Callback cb(thr, pc);
216 if (pre_lock)
217 ctx->dd->MutexBeforeLock(cb: &cb, m: &s->dd, wlock: true);
218 ctx->dd->MutexAfterLock(cb: &cb, m: &s->dd, wlock: true, trylock: flagz & MutexFlagTryLock);
219 }
220 }
221 }
222 if (report_double_lock)
223 ReportMutexMisuse(thr, pc, typ: ReportTypeMutexDoubleLock, addr,
224 creation_stack_id);
225 if (first && pre_lock && common_flags()->detect_deadlocks) {
226 Callback cb(thr, pc);
227 ReportDeadlock(thr, pc, r: ctx->dd->GetReport(cb: &cb));
228 }
229}
230
231int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
232 DPrintf("#%d: MutexUnlock %zx flagz=0x%x\n", thr->tid, addr, flagz);
233 if (pc && IsAppMem(mem: addr))
234 MemoryAccess(thr, pc, addr, size: 1, typ: kAccessRead | kAccessAtomic);
235 StackID creation_stack_id;
236 RecordMutexUnlock(thr, addr);
237 bool report_bad_unlock = false;
238 int rec = 0;
239 {
240 SlotLocker locker(thr);
241 auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, save_stack: true);
242 bool released = false;
243 {
244 Lock lock(&s->mtx);
245 creation_stack_id = s->creation_stack_id;
246 if (!SANITIZER_GO && (s->recursion == 0 || s->owner_tid != thr->tid)) {
247 if (flags()->report_mutex_bugs && !s->IsFlagSet(f: MutexFlagBroken)) {
248 s->SetFlags(MutexFlagBroken);
249 report_bad_unlock = true;
250 }
251 } else {
252 rec = (flagz & MutexFlagRecursiveUnlock) ? s->recursion : 1;
253 s->recursion -= rec;
254 if (s->recursion == 0) {
255 s->owner_tid = kInvalidTid;
256 if (!thr->ignore_sync) {
257 thr->clock.ReleaseStore(dstp: &s->clock);
258 released = true;
259 }
260 }
261 }
262 if (common_flags()->detect_deadlocks && s->recursion == 0 &&
263 !report_bad_unlock) {
264 Callback cb(thr, pc);
265 ctx->dd->MutexBeforeUnlock(cb: &cb, m: &s->dd, wlock: true);
266 }
267 }
268 if (released)
269 IncrementEpoch(thr);
270 }
271 if (report_bad_unlock)
272 ReportMutexMisuse(thr, pc, typ: ReportTypeMutexBadUnlock, addr,
273 creation_stack_id);
274 if (common_flags()->detect_deadlocks && !report_bad_unlock) {
275 Callback cb(thr, pc);
276 ReportDeadlock(thr, pc, r: ctx->dd->GetReport(cb: &cb));
277 }
278 return rec;
279}
280
281void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
282 DPrintf("#%d: MutexPreReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
283 if ((flagz & MutexFlagTryLock) || !common_flags()->detect_deadlocks)
284 return;
285 Callback cb(thr, pc);
286 {
287 SlotLocker locker(thr);
288 auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, save_stack: true);
289 ReadLock lock(&s->mtx);
290 s->UpdateFlags(flagz);
291 ctx->dd->MutexBeforeLock(cb: &cb, m: &s->dd, wlock: false);
292 }
293 ReportDeadlock(thr, pc, r: ctx->dd->GetReport(cb: &cb));
294}
295
296void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
297 DPrintf("#%d: MutexPostReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
298 if (pc && IsAppMem(mem: addr))
299 MemoryAccess(thr, pc, addr, size: 1, typ: kAccessRead | kAccessAtomic);
300 bool report_bad_lock = false;
301 bool pre_lock = false;
302 StackID creation_stack_id = kInvalidStackID;
303 {
304 SlotLocker locker(thr);
305 auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, save_stack: true);
306 creation_stack_id = s->creation_stack_id;
307 RecordMutexLock(thr, pc, addr, stack_id: creation_stack_id, write: false);
308 {
309 ReadLock lock(&s->mtx);
310 s->UpdateFlags(flagz);
311 if (s->owner_tid != kInvalidTid) {
312 if (flags()->report_mutex_bugs && !s->IsFlagSet(f: MutexFlagBroken)) {
313 s->SetFlags(MutexFlagBroken);
314 report_bad_lock = true;
315 }
316 }
317 if (!thr->ignore_sync)
318 thr->clock.Acquire(src: s->clock);
319 s->last_lock = thr->fast_state;
320 if (common_flags()->detect_deadlocks) {
321 pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) &&
322 !(flagz & MutexFlagTryLock);
323 Callback cb(thr, pc);
324 if (pre_lock)
325 ctx->dd->MutexBeforeLock(cb: &cb, m: &s->dd, wlock: false);
326 ctx->dd->MutexAfterLock(cb: &cb, m: &s->dd, wlock: false, trylock: flagz & MutexFlagTryLock);
327 }
328 }
329 }
330 if (report_bad_lock)
331 ReportMutexMisuse(thr, pc, typ: ReportTypeMutexBadReadLock, addr,
332 creation_stack_id);
333 if (pre_lock && common_flags()->detect_deadlocks) {
334 Callback cb(thr, pc);
335 ReportDeadlock(thr, pc, r: ctx->dd->GetReport(cb: &cb));
336 }
337}
338
339void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
340 DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr);
341 if (pc && IsAppMem(mem: addr))
342 MemoryAccess(thr, pc, addr, size: 1, typ: kAccessRead | kAccessAtomic);
343 RecordMutexUnlock(thr, addr);
344 StackID creation_stack_id;
345 bool report_bad_unlock = false;
346 {
347 SlotLocker locker(thr);
348 auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, save_stack: true);
349 bool released = false;
350 {
351 Lock lock(&s->mtx);
352 creation_stack_id = s->creation_stack_id;
353 if (s->owner_tid != kInvalidTid) {
354 if (flags()->report_mutex_bugs && !s->IsFlagSet(f: MutexFlagBroken)) {
355 s->SetFlags(MutexFlagBroken);
356 report_bad_unlock = true;
357 }
358 }
359 if (!thr->ignore_sync) {
360 thr->clock.Release(dstp: &s->read_clock);
361 released = true;
362 }
363 if (common_flags()->detect_deadlocks && s->recursion == 0) {
364 Callback cb(thr, pc);
365 ctx->dd->MutexBeforeUnlock(cb: &cb, m: &s->dd, wlock: false);
366 }
367 }
368 if (released)
369 IncrementEpoch(thr);
370 }
371 if (report_bad_unlock)
372 ReportMutexMisuse(thr, pc, typ: ReportTypeMutexBadReadUnlock, addr,
373 creation_stack_id);
374 if (common_flags()->detect_deadlocks) {
375 Callback cb(thr, pc);
376 ReportDeadlock(thr, pc, r: ctx->dd->GetReport(cb: &cb));
377 }
378}
379
380void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
381 DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr);
382 if (pc && IsAppMem(mem: addr))
383 MemoryAccess(thr, pc, addr, size: 1, typ: kAccessRead | kAccessAtomic);
384 RecordMutexUnlock(thr, addr);
385 StackID creation_stack_id;
386 bool report_bad_unlock = false;
387 bool write = true;
388 {
389 SlotLocker locker(thr);
390 auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, save_stack: true);
391 bool released = false;
392 {
393 Lock lock(&s->mtx);
394 creation_stack_id = s->creation_stack_id;
395 if (s->owner_tid == kInvalidTid) {
396 // Seems to be read unlock.
397 write = false;
398 if (!thr->ignore_sync) {
399 thr->clock.Release(dstp: &s->read_clock);
400 released = true;
401 }
402 } else if (s->owner_tid == thr->tid) {
403 // Seems to be write unlock.
404 CHECK_GT(s->recursion, 0);
405 s->recursion--;
406 if (s->recursion == 0) {
407 s->owner_tid = kInvalidTid;
408 if (!thr->ignore_sync) {
409 thr->clock.ReleaseStore(dstp: &s->clock);
410 released = true;
411 }
412 }
413 } else if (!s->IsFlagSet(f: MutexFlagBroken)) {
414 s->SetFlags(MutexFlagBroken);
415 report_bad_unlock = true;
416 }
417 if (common_flags()->detect_deadlocks && s->recursion == 0) {
418 Callback cb(thr, pc);
419 ctx->dd->MutexBeforeUnlock(cb: &cb, m: &s->dd, wlock: write);
420 }
421 }
422 if (released)
423 IncrementEpoch(thr);
424 }
425 if (report_bad_unlock)
426 ReportMutexMisuse(thr, pc, typ: ReportTypeMutexBadUnlock, addr,
427 creation_stack_id);
428 if (common_flags()->detect_deadlocks) {
429 Callback cb(thr, pc);
430 ReportDeadlock(thr, pc, r: ctx->dd->GetReport(cb: &cb));
431 }
432}
433
434void MutexRepair(ThreadState *thr, uptr pc, uptr addr) {
435 DPrintf("#%d: MutexRepair %zx\n", thr->tid, addr);
436 SlotLocker locker(thr);
437 auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, save_stack: true);
438 Lock lock(&s->mtx);
439 s->owner_tid = kInvalidTid;
440 s->recursion = 0;
441}
442
443void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr) {
444 DPrintf("#%d: MutexInvalidAccess %zx\n", thr->tid, addr);
445 StackID creation_stack_id = kInvalidStackID;
446 {
447 SlotLocker locker(thr);
448 auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, save_stack: true);
449 if (s)
450 creation_stack_id = s->creation_stack_id;
451 }
452 ReportMutexMisuse(thr, pc, typ: ReportTypeMutexInvalidAccess, addr,
453 creation_stack_id);
454}
455
456void Acquire(ThreadState *thr, uptr pc, uptr addr) {
457 DPrintf("#%d: Acquire %zx\n", thr->tid, addr);
458 if (thr->ignore_sync)
459 return;
460 auto s = ctx->metamap.GetSyncIfExists(addr);
461 if (!s)
462 return;
463 SlotLocker locker(thr);
464 ReadLock lock(&s->mtx);
465 if (!s->clock)
466 return;
467 thr->clock.Acquire(src: s->clock);
468}
469
470void AcquireGlobal(ThreadState *thr) {
471 DPrintf("#%d: AcquireGlobal\n", thr->tid);
472 if (thr->ignore_sync)
473 return;
474 SlotLocker locker(thr);
475 for (auto &slot : ctx->slots) thr->clock.Set(sid: slot.sid, v: slot.epoch());
476}
477
478void Release(ThreadState *thr, uptr pc, uptr addr) {
479 DPrintf("#%d: Release %zx\n", thr->tid, addr);
480 if (thr->ignore_sync)
481 return;
482 SlotLocker locker(thr);
483 {
484 auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, save_stack: false);
485 Lock lock(&s->mtx);
486 thr->clock.Release(dstp: &s->clock);
487 }
488 IncrementEpoch(thr);
489}
490
491void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
492 DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr);
493 if (thr->ignore_sync)
494 return;
495 SlotLocker locker(thr);
496 {
497 auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, save_stack: false);
498 Lock lock(&s->mtx);
499 thr->clock.ReleaseStore(dstp: &s->clock);
500 }
501 IncrementEpoch(thr);
502}
503
504void ReleaseStoreAcquire(ThreadState *thr, uptr pc, uptr addr) {
505 DPrintf("#%d: ReleaseStoreAcquire %zx\n", thr->tid, addr);
506 if (thr->ignore_sync)
507 return;
508 SlotLocker locker(thr);
509 {
510 auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, save_stack: false);
511 Lock lock(&s->mtx);
512 thr->clock.ReleaseStoreAcquire(dstp: &s->clock);
513 }
514 IncrementEpoch(thr);
515}
516
517void IncrementEpoch(ThreadState *thr) {
518 DCHECK(!thr->ignore_sync);
519 DCHECK(thr->slot_locked);
520 Epoch epoch = EpochInc(epoch: thr->fast_state.epoch());
521 if (!EpochOverflow(epoch)) {
522 Sid sid = thr->fast_state.sid();
523 thr->clock.Set(sid, v: epoch);
524 thr->fast_state.SetEpoch(epoch);
525 thr->slot->SetEpoch(epoch);
526 TraceTime(thr);
527 }
528}
529
530#if !SANITIZER_GO
531void AfterSleep(ThreadState *thr, uptr pc) {
532 DPrintf("#%d: AfterSleep\n", thr->tid);
533 if (thr->ignore_sync)
534 return;
535 thr->last_sleep_stack_id = CurrentStackId(thr, pc);
536 thr->last_sleep_clock.Reset();
537 SlotLocker locker(thr);
538 for (auto &slot : ctx->slots)
539 thr->last_sleep_clock.Set(sid: slot.sid, v: slot.epoch());
540}
541#endif
542
543void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) {
544 if (r == 0 || !ShouldReport(thr, typ: ReportTypeDeadlock))
545 return;
546 // Use alloca, because malloc during signal handling deadlocks
547 ScopedReport *rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport));
548 // Take a new scope as Apple platforms require the below locks released
549 // before symbolizing in order to avoid a deadlock
550 {
551 ThreadRegistryLock l(&ctx->thread_registry);
552 new (rep) ScopedReport(ReportTypeDeadlock);
553 for (int i = 0; i < r->n; i++) {
554 rep->AddMutex(addr: r->loop[i].mtx_ctx0, creation_stack_id: r->loop[i].stk[0]);
555 rep->AddUniqueTid(unique_tid: (int)r->loop[i].thr_ctx);
556 rep->AddThread(tid: (int)r->loop[i].thr_ctx);
557 }
558 uptr dummy_pc = 0x42;
559 for (int i = 0; i < r->n; i++) {
560 for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) {
561 u32 stk = r->loop[i].stk[j];
562 StackTrace stack;
563 if (stk && stk != kInvalidStackID) {
564 stack = StackDepotGet(id: stk);
565 } else {
566 // Sometimes we fail to extract the stack trace (FIXME: investigate),
567 // but we should still produce some stack trace in the report.
568 stack = StackTrace(&dummy_pc, 1);
569 }
570 rep->AddStack(stack, suppressable: true);
571 }
572 }
573#if SANITIZER_APPLE
574 } // Close this scope to release the locks
575#endif
576 OutputReport(thr, srep&: *rep);
577
578 // Need to manually destroy this because we used placement new to allocate
579 rep->~ScopedReport();
580#if !SANITIZER_APPLE
581 }
582#endif
583}
584
585void ReportDestroyLocked(ThreadState *thr, uptr pc, uptr addr,
586 FastState last_lock, StackID creation_stack_id) {
587 // Use alloca, because malloc during signal handling deadlocks
588 ScopedReport *rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport));
589 // Take a new scope as Apple platforms require the below locks released
590 // before symbolizing in order to avoid a deadlock
591 {
592 // We need to lock the slot during RestoreStack because it protects
593 // the slot journal.
594 Lock slot_lock(&ctx->slots[static_cast<uptr>(last_lock.sid())].mtx);
595 ThreadRegistryLock l0(&ctx->thread_registry);
596 Lock slots_lock(&ctx->slot_mtx);
597 new (rep) ScopedReport(ReportTypeMutexDestroyLocked);
598 rep->AddMutex(addr, creation_stack_id);
599 VarSizeStackTrace trace;
600 ObtainCurrentStack(thr, toppc: pc, stack: &trace);
601 rep->AddStack(stack: trace, suppressable: true);
602
603 Tid tid;
604 DynamicMutexSet mset;
605 uptr tag;
606 if (!RestoreStack(type: EventType::kLock, sid: last_lock.sid(), epoch: last_lock.epoch(),
607 addr, size: 0, typ: kAccessWrite, ptid: &tid, pstk: &trace, pmset: mset, ptag: &tag))
608 return;
609 rep->AddStack(stack: trace, suppressable: true);
610 rep->AddLocation(addr, size: 1);
611#if SANITIZER_APPLE
612 } // Close this scope to release the locks
613#endif
614 OutputReport(thr, srep&: *rep);
615
616 // Need to manually destroy this because we used placement new to allocate
617 rep->~ScopedReport();
618#if !SANITIZER_APPLE
619 }
620#endif
621}
622
623} // namespace __tsan
624