1//===-- tsan_rtl.cpp ------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of ThreadSanitizer (TSan), a race detector.
10//
11// Main file (entry points) for the TSan run-time.
12//===----------------------------------------------------------------------===//
13
14#include "tsan_rtl.h"
15
16#include "sanitizer_common/sanitizer_atomic.h"
17#include "sanitizer_common/sanitizer_common.h"
18#include "sanitizer_common/sanitizer_file.h"
19#include "sanitizer_common/sanitizer_interface_internal.h"
20#include "sanitizer_common/sanitizer_libc.h"
21#include "sanitizer_common/sanitizer_placement_new.h"
22#include "sanitizer_common/sanitizer_stackdepot.h"
23#include "sanitizer_common/sanitizer_symbolizer.h"
24#include "tsan_adaptive_delay.h"
25#include "tsan_defs.h"
26#include "tsan_interface.h"
27#include "tsan_mman.h"
28#include "tsan_platform.h"
29#include "tsan_suppressions.h"
30#include "tsan_symbolize.h"
31#include "ubsan/ubsan_init.h"
32
33volatile int __tsan_resumed = 0;
34
35extern "C" void __tsan_resume() {
36 __tsan_resumed = 1;
37}
38
39#if SANITIZER_APPLE
40SANITIZER_WEAK_DEFAULT_IMPL
41void __tsan_test_only_on_fork() {}
42#endif
43
44#if SANITIZER_APPLE && !SANITIZER_GO
45// Override weak symbol from sanitizer_common
46extern void __tsan_set_in_internal_write_call(bool value) {
47 __tsan::cur_thread_init()->in_internal_write_call = value;
48}
49#endif
50
51namespace __tsan {
52
53#if !SANITIZER_GO
54void (*on_initialize)(void);
55int (*on_finalize)(int);
56#endif
57
58#if !SANITIZER_GO && !SANITIZER_APPLE
59alignas(SANITIZER_CACHE_LINE_SIZE) THREADLOCAL __attribute__((tls_model(
60 "initial-exec"))) char cur_thread_placeholder[sizeof(ThreadState)];
61#endif
62alignas(SANITIZER_CACHE_LINE_SIZE) static char ctx_placeholder[sizeof(Context)];
63Context *ctx;
64
65// Can be overriden by a front-end.
66#ifdef TSAN_EXTERNAL_HOOKS
67bool OnFinalize(bool failed);
68void OnInitialize();
69#else
70SANITIZER_WEAK_CXX_DEFAULT_IMPL
71bool OnFinalize(bool failed) {
72# if !SANITIZER_GO
73 if (on_finalize)
74 return on_finalize(failed);
75# endif
76 return failed;
77}
78
79SANITIZER_WEAK_CXX_DEFAULT_IMPL
80void OnInitialize() {
81# if !SANITIZER_GO
82 if (on_initialize)
83 on_initialize();
84# endif
85}
86#endif
87
88static TracePart* TracePartAlloc(ThreadState* thr) {
89 TracePart* part = nullptr;
90 {
91 Lock lock(&ctx->slot_mtx);
92 uptr max_parts = Trace::kMinParts + flags()->history_size;
93 Trace* trace = &thr->tctx->trace;
94 if (trace->parts_allocated == max_parts ||
95 ctx->trace_part_finished_excess) {
96 part = ctx->trace_part_recycle.PopFront();
97 DPrintf("#%d: TracePartAlloc: part=%p\n", thr->tid, part);
98 if (part && part->trace) {
99 Trace* trace1 = part->trace;
100 Lock trace_lock(&trace1->mtx);
101 part->trace = nullptr;
102 TracePart* part1 = trace1->parts.PopFront();
103 CHECK_EQ(part, part1);
104 if (trace1->parts_allocated > trace1->parts.Size()) {
105 ctx->trace_part_finished_excess +=
106 trace1->parts_allocated - trace1->parts.Size();
107 trace1->parts_allocated = trace1->parts.Size();
108 }
109 }
110 }
111 if (trace->parts_allocated < max_parts) {
112 trace->parts_allocated++;
113 if (ctx->trace_part_finished_excess)
114 ctx->trace_part_finished_excess--;
115 }
116 if (!part)
117 ctx->trace_part_total_allocated++;
118 else if (ctx->trace_part_recycle_finished)
119 ctx->trace_part_recycle_finished--;
120 }
121 if (!part)
122 part = new (MmapOrDie(size: sizeof(*part), mem_type: "TracePart")) TracePart();
123 return part;
124}
125
126static void TracePartFree(TracePart* part) SANITIZER_REQUIRES(ctx->slot_mtx) {
127 DCHECK(part->trace);
128 part->trace = nullptr;
129 ctx->trace_part_recycle.PushFront(e: part);
130}
131
132void TraceResetForTesting() {
133 Lock lock(&ctx->slot_mtx);
134 while (auto* part = ctx->trace_part_recycle.PopFront()) {
135 if (auto trace = part->trace)
136 CHECK_EQ(trace->parts.PopFront(), part);
137 UnmapOrDie(addr: part, size: sizeof(*part));
138 }
139 ctx->trace_part_total_allocated = 0;
140 ctx->trace_part_recycle_finished = 0;
141 ctx->trace_part_finished_excess = 0;
142}
143
144static void DoResetImpl(uptr epoch) {
145 ThreadRegistryLock lock0(&ctx->thread_registry);
146 Lock lock1(&ctx->slot_mtx);
147 CHECK_EQ(ctx->global_epoch, epoch);
148 ctx->global_epoch++;
149 CHECK(!ctx->resetting);
150 ctx->resetting = true;
151 for (u32 i = ctx->thread_registry.NumThreadsLocked(); i--;) {
152 ThreadContext* tctx = (ThreadContext*)ctx->thread_registry.GetThreadLocked(
153 tid: static_cast<Tid>(i));
154 // Potentially we could purge all ThreadStatusDead threads from the
155 // registry. Since we reset all shadow, they can't race with anything
156 // anymore. However, their tid's can still be stored in some aux places
157 // (e.g. tid of thread that created something).
158 auto trace = &tctx->trace;
159 Lock lock(&trace->mtx);
160 bool attached = tctx->thr && tctx->thr->slot;
161 auto parts = &trace->parts;
162 bool local = false;
163 while (!parts->Empty()) {
164 auto part = parts->Front();
165 local = local || part == trace->local_head;
166 if (local)
167 CHECK(!ctx->trace_part_recycle.Queued(part));
168 else
169 ctx->trace_part_recycle.Remove(e: part);
170 if (attached && parts->Size() == 1) {
171 // The thread is running and this is the last/current part.
172 // Set the trace position to the end of the current part
173 // to force the thread to call SwitchTracePart and re-attach
174 // to a new slot and allocate a new trace part.
175 // Note: the thread is concurrently modifying the position as well,
176 // so this is only best-effort. The thread can only modify position
177 // within this part, because switching parts is protected by
178 // slot/trace mutexes that we hold here.
179 atomic_store_relaxed(
180 a: &tctx->thr->trace_pos,
181 v: reinterpret_cast<uptr>(&part->events[TracePart::kSize]));
182 break;
183 }
184 parts->Remove(e: part);
185 TracePartFree(part);
186 }
187 CHECK_LE(parts->Size(), 1);
188 trace->local_head = parts->Front();
189 if (tctx->thr && !tctx->thr->slot) {
190 atomic_store_relaxed(a: &tctx->thr->trace_pos, v: 0);
191 tctx->thr->trace_prev_pc = 0;
192 }
193 if (trace->parts_allocated > trace->parts.Size()) {
194 ctx->trace_part_finished_excess +=
195 trace->parts_allocated - trace->parts.Size();
196 trace->parts_allocated = trace->parts.Size();
197 }
198 }
199 while (ctx->slot_queue.PopFront()) {
200 }
201 for (auto& slot : ctx->slots) {
202 slot.SetEpoch(kEpochZero);
203 slot.journal.Reset();
204 slot.thr = nullptr;
205 ctx->slot_queue.PushBack(e: &slot);
206 }
207
208 DPrintf("Resetting shadow...\n");
209 auto shadow_begin = ShadowBeg();
210 auto shadow_end = ShadowEnd();
211#if SANITIZER_GO
212 CHECK_NE(0, ctx->mapped_shadow_begin);
213 shadow_begin = ctx->mapped_shadow_begin;
214 shadow_end = ctx->mapped_shadow_end;
215 VPrintf(2, "shadow_begin-shadow_end: (0x%zx-0x%zx)\n",
216 shadow_begin, shadow_end);
217#endif
218
219#if SANITIZER_WINDOWS
220 auto resetFailed =
221 !ZeroMmapFixedRegion(shadow_begin, shadow_end - shadow_begin);
222#else
223 auto resetFailed =
224 !MmapFixedSuperNoReserve(fixed_addr: shadow_begin, size: shadow_end-shadow_begin, name: "shadow");
225# if !SANITIZER_GO
226 DontDumpShadow(addr: shadow_begin, size: shadow_end - shadow_begin);
227# endif
228#endif
229 if (resetFailed) {
230 Printf(format: "failed to reset shadow memory\n");
231 Die();
232 }
233 DPrintf("Resetting meta shadow...\n");
234 ctx->metamap.ResetClocks();
235 StoreShadow(sp: &ctx->last_spurious_race, s: Shadow::kEmpty);
236 ctx->resetting = false;
237}
238
239// Clang does not understand locking all slots in the loop:
240// error: expecting mutex 'slot.mtx' to be held at start of each loop
241void DoReset(ThreadState* thr, uptr epoch) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
242 for (auto& slot : ctx->slots) {
243 slot.mtx.Lock();
244 if (UNLIKELY(epoch == 0))
245 epoch = ctx->global_epoch;
246 if (UNLIKELY(epoch != ctx->global_epoch)) {
247 // Epoch can't change once we've locked the first slot.
248 CHECK_EQ(slot.sid, 0);
249 slot.mtx.Unlock();
250 return;
251 }
252 }
253 DPrintf("#%d: DoReset epoch=%lu\n", thr ? thr->tid : -1, epoch);
254 DoResetImpl(epoch);
255 for (auto& slot : ctx->slots) slot.mtx.Unlock();
256}
257
258void FlushShadowMemory() { DoReset(thr: nullptr, epoch: 0); }
259
260static TidSlot* FindSlotAndLock(ThreadState* thr)
261 SANITIZER_ACQUIRE(thr->slot->mtx) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
262 CHECK(!thr->slot);
263 TidSlot* slot = nullptr;
264 for (;;) {
265 uptr epoch;
266 {
267 Lock lock(&ctx->slot_mtx);
268 epoch = ctx->global_epoch;
269 if (slot) {
270 // This is an exhausted slot from the previous iteration.
271 if (ctx->slot_queue.Queued(e: slot))
272 ctx->slot_queue.Remove(e: slot);
273 thr->slot_locked = false;
274 slot->mtx.Unlock();
275 }
276 for (;;) {
277 slot = ctx->slot_queue.PopFront();
278 if (!slot)
279 break;
280 if (slot->epoch() != kEpochLast) {
281 ctx->slot_queue.PushBack(e: slot);
282 break;
283 }
284 }
285 }
286 if (!slot) {
287 DoReset(thr, epoch);
288 continue;
289 }
290 slot->mtx.Lock();
291 CHECK(!thr->slot_locked);
292 thr->slot_locked = true;
293 if (slot->thr) {
294 DPrintf("#%d: preempting sid=%d tid=%d\n", thr->tid, (u32)slot->sid,
295 slot->thr->tid);
296 slot->SetEpoch(slot->thr->fast_state.epoch());
297 slot->thr = nullptr;
298 }
299 if (slot->epoch() != kEpochLast)
300 return slot;
301 }
302}
303
304void SlotAttachAndLock(ThreadState* thr) {
305 TidSlot* slot = FindSlotAndLock(thr);
306 DPrintf("#%d: SlotAttach: slot=%u\n", thr->tid, static_cast<int>(slot->sid));
307 CHECK(!slot->thr);
308 CHECK(!thr->slot);
309 slot->thr = thr;
310 thr->slot = slot;
311 Epoch epoch = EpochInc(epoch: slot->epoch());
312 CHECK(!EpochOverflow(epoch));
313 slot->SetEpoch(epoch);
314 thr->fast_state.SetSid(slot->sid);
315 thr->fast_state.SetEpoch(epoch);
316 if (thr->slot_epoch != ctx->global_epoch) {
317 thr->slot_epoch = ctx->global_epoch;
318 thr->clock.Reset();
319#if !SANITIZER_GO
320 thr->last_sleep_stack_id = kInvalidStackID;
321 thr->last_sleep_clock.Reset();
322#endif
323 }
324 thr->clock.Set(sid: slot->sid, v: epoch);
325 slot->journal.PushBack(v: {.tid: thr->tid, .epoch: epoch});
326}
327
328static void SlotDetachImpl(ThreadState* thr, bool exiting) {
329 TidSlot* slot = thr->slot;
330 thr->slot = nullptr;
331 if (thr != slot->thr) {
332 slot = nullptr; // we don't own the slot anymore
333 if (thr->slot_epoch != ctx->global_epoch) {
334 TracePart* part = nullptr;
335 auto* trace = &thr->tctx->trace;
336 {
337 Lock l(&trace->mtx);
338 auto* parts = &trace->parts;
339 // The trace can be completely empty in an unlikely event
340 // the thread is preempted right after it acquired the slot
341 // in ThreadStart and did not trace any events yet.
342 CHECK_LE(parts->Size(), 1);
343 part = parts->PopFront();
344 thr->tctx->trace.local_head = nullptr;
345 atomic_store_relaxed(a: &thr->trace_pos, v: 0);
346 thr->trace_prev_pc = 0;
347 }
348 if (part) {
349 Lock l(&ctx->slot_mtx);
350 TracePartFree(part);
351 }
352 }
353 return;
354 }
355 CHECK(exiting || thr->fast_state.epoch() == kEpochLast);
356 slot->SetEpoch(thr->fast_state.epoch());
357 slot->thr = nullptr;
358}
359
360void SlotDetach(ThreadState* thr) {
361 Lock lock(&thr->slot->mtx);
362 SlotDetachImpl(thr, exiting: true);
363}
364
365void SlotLock(ThreadState* thr) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
366 DCHECK(!thr->slot_locked);
367#if SANITIZER_DEBUG
368 // Check these mutexes are not locked.
369 // We can call DoReset from SlotAttachAndLock, which will lock
370 // these mutexes, but it happens only every once in a while.
371 { ThreadRegistryLock lock(&ctx->thread_registry); }
372 { Lock lock(&ctx->slot_mtx); }
373#endif
374 TidSlot* slot = thr->slot;
375 slot->mtx.Lock();
376 thr->slot_locked = true;
377 if (LIKELY(thr == slot->thr && thr->fast_state.epoch() != kEpochLast))
378 return;
379 SlotDetachImpl(thr, exiting: false);
380 thr->slot_locked = false;
381 slot->mtx.Unlock();
382 SlotAttachAndLock(thr);
383}
384
385void SlotUnlock(ThreadState* thr) {
386 DCHECK(thr->slot_locked);
387 thr->slot_locked = false;
388 thr->slot->mtx.Unlock();
389}
390
391Context::Context()
392 : initialized(),
393 report_mtx(MutexTypeReport),
394 nreported(),
395 thread_registry([](Tid tid) -> ThreadContextBase* {
396 return new (Alloc(sz: sizeof(ThreadContext))) ThreadContext(tid);
397 }),
398 racy_mtx(MutexTypeRacy),
399 racy_stacks(),
400 fired_suppressions_mtx(MutexTypeFired),
401 slot_mtx(MutexTypeSlots),
402 resetting() {
403 fired_suppressions.reserve(new_size: 8);
404 for (uptr i = 0; i < ARRAY_SIZE(slots); i++) {
405 TidSlot* slot = &slots[i];
406 slot->sid = static_cast<Sid>(i);
407 slot_queue.PushBack(e: slot);
408 }
409 global_epoch = 1;
410}
411
412TidSlot::TidSlot() : mtx(MutexTypeSlot) {}
413
414// The objects are allocated in TLS, so one may rely on zero-initialization.
415ThreadState::ThreadState(Tid tid)
416 // Do not touch these, rely on zero initialization,
417 // they may be accessed before the ctor.
418 // ignore_reads_and_writes()
419 // ignore_interceptors()
420 : tid(tid) {
421 CHECK_EQ(reinterpret_cast<uptr>(this) % SANITIZER_CACHE_LINE_SIZE, 0);
422#if !SANITIZER_GO
423 // C/C++ uses fixed size shadow stack.
424 const int kInitStackSize = kShadowStackSize;
425 shadow_stack = static_cast<uptr*>(
426 MmapNoReserveOrDie(size: kInitStackSize * sizeof(uptr), mem_type: "shadow stack"));
427 SetShadowRegionHugePageMode(addr: reinterpret_cast<uptr>(shadow_stack),
428 length: kInitStackSize * sizeof(uptr));
429#else
430 // Go uses malloc-allocated shadow stack with dynamic size.
431 const int kInitStackSize = 8;
432 shadow_stack = static_cast<uptr*>(Alloc(kInitStackSize * sizeof(uptr)));
433#endif
434 shadow_stack_pos = shadow_stack;
435 shadow_stack_end = shadow_stack + kInitStackSize;
436}
437
438#if !SANITIZER_GO
439void MemoryProfiler(u64 uptime) {
440 if (ctx->memprof_fd == kInvalidFd)
441 return;
442 InternalMmapVector<char> buf(4096);
443 WriteMemoryProfile(buf: buf.data(), buf_size: buf.size(), uptime_ns: uptime);
444 WriteToFile(fd: ctx->memprof_fd, buff: buf.data(), buff_size: internal_strlen(s: buf.data()));
445}
446
447static bool InitializeMemoryProfiler() {
448 ctx->memprof_fd = kInvalidFd;
449 const char *fname = flags()->profile_memory;
450 if (!fname || !fname[0])
451 return false;
452 if (internal_strcmp(s1: fname, s2: "stdout") == 0) {
453 ctx->memprof_fd = 1;
454 } else if (internal_strcmp(s1: fname, s2: "stderr") == 0) {
455 ctx->memprof_fd = 2;
456 } else {
457 InternalScopedString filename;
458 filename.AppendF(format: "%s.%d", fname, (int)internal_getpid());
459 ctx->memprof_fd = OpenFile(filename: filename.data(), mode: WrOnly);
460 if (ctx->memprof_fd == kInvalidFd) {
461 Printf(format: "ThreadSanitizer: failed to open memory profile file '%s'\n",
462 filename.data());
463 return false;
464 }
465 }
466 MemoryProfiler(uptime: 0);
467 return true;
468}
469
470static void *BackgroundThread(void *arg) {
471 // This is a non-initialized non-user thread, nothing to see here.
472 // We don't use ScopedIgnoreInterceptors, because we want ignores to be
473 // enabled even when the thread function exits (e.g. during pthread thread
474 // shutdown code).
475 cur_thread_init()->ignore_interceptors++;
476 const u64 kMs2Ns = 1000 * 1000;
477 const u64 start = NanoTime();
478
479 u64 last_flush = start;
480 uptr last_rss = 0;
481 while (!atomic_load_relaxed(a: &ctx->stop_background_thread)) {
482 SleepForMillis(millis: 100);
483 u64 now = NanoTime();
484
485 // Flush memory if requested.
486 if (flags()->flush_memory_ms > 0) {
487 if (last_flush + flags()->flush_memory_ms * kMs2Ns < now) {
488 VReport(1, "ThreadSanitizer: periodic memory flush\n");
489 FlushShadowMemory();
490 now = last_flush = NanoTime();
491 }
492 }
493 if (flags()->memory_limit_mb > 0) {
494 uptr rss = GetRSS();
495 uptr limit = uptr(flags()->memory_limit_mb) << 20;
496 VReport(1,
497 "ThreadSanitizer: memory flush check"
498 " RSS=%llu LAST=%llu LIMIT=%llu\n",
499 (u64)rss >> 20, (u64)last_rss >> 20, (u64)limit >> 20);
500 if (2 * rss > limit + last_rss) {
501 VReport(1, "ThreadSanitizer: flushing memory due to RSS\n");
502 FlushShadowMemory();
503 rss = GetRSS();
504 now = NanoTime();
505 VReport(1, "ThreadSanitizer: memory flushed RSS=%llu\n",
506 (u64)rss >> 20);
507 }
508 last_rss = rss;
509 }
510
511 MemoryProfiler(uptime: now - start);
512
513 // Flush symbolizer cache if requested.
514 if (flags()->flush_symbolizer_ms > 0) {
515 u64 last = atomic_load(a: &ctx->last_symbolize_time_ns,
516 mo: memory_order_relaxed);
517 if (last != 0 && last + flags()->flush_symbolizer_ms * kMs2Ns < now) {
518 Lock l(&ctx->report_mtx);
519 ScopedErrorReportLock l2;
520 SymbolizeFlush();
521 atomic_store(a: &ctx->last_symbolize_time_ns, v: 0, mo: memory_order_relaxed);
522 }
523 }
524 }
525 return nullptr;
526}
527
528static void StartBackgroundThread() {
529 ctx->background_thread = internal_start_thread(func: &BackgroundThread, arg: 0);
530}
531
532#ifndef __mips__
533static void StopBackgroundThread() {
534 atomic_store(a: &ctx->stop_background_thread, v: 1, mo: memory_order_relaxed);
535 internal_join_thread(th: ctx->background_thread);
536 ctx->background_thread = 0;
537}
538#endif
539#endif
540
541void DontNeedShadowFor(uptr addr, uptr size) {
542 ReleaseMemoryPagesToOS(beg: reinterpret_cast<uptr>(MemToShadow(x: addr)),
543 end: reinterpret_cast<uptr>(MemToShadow(x: addr + size)));
544}
545
546#if !SANITIZER_GO
547// We call UnmapShadow before the actual munmap, at that point we don't yet
548// know if the provided address/size are sane. We can't call UnmapShadow
549// after the actual munmap becuase at that point the memory range can
550// already be reused for something else, so we can't rely on the munmap
551// return value to understand is the values are sane.
552// While calling munmap with insane values (non-canonical address, negative
553// size, etc) is an error, the kernel won't crash. We must also try to not
554// crash as the failure mode is very confusing (paging fault inside of the
555// runtime on some derived shadow address).
556static bool IsValidMmapRange(uptr addr, uptr size) {
557 if (size == 0)
558 return true;
559 if (static_cast<sptr>(size) < 0)
560 return false;
561 if (!IsAppMem(mem: addr) || !IsAppMem(mem: addr + size - 1))
562 return false;
563 // Check that if the start of the region belongs to one of app ranges,
564 // end of the region belongs to the same region.
565 const uptr ranges[][2] = {
566 {LoAppMemBeg(), LoAppMemEnd()},
567 {MidAppMemBeg(), MidAppMemEnd()},
568 {HiAppMemBeg(), HiAppMemEnd()},
569 };
570 for (auto range : ranges) {
571 if (addr >= range[0] && addr < range[1])
572 return addr + size <= range[1];
573 }
574 return false;
575}
576
577void UnmapShadow(ThreadState* thr, uptr addr, uptr size) {
578 if (size == 0 || !IsValidMmapRange(addr, size))
579 return;
580 // unmap shadow is related to semantic of mmap/munmap, so we
581 // should clear the whole shadow range, including the tail shadow
582 // while addr + size % kShadowCell != 0.
583 uptr rounded_size_shadow = RoundUp(p: addr + size, align: kShadowCell) - addr;
584 DontNeedShadowFor(addr, size: rounded_size_shadow);
585 ScopedGlobalProcessor sgp;
586 SlotLocker locker(thr, true);
587 uptr rounded_size_meta = RoundUp(p: addr + size, align: kMetaShadowCell) - addr;
588 ctx->metamap.ResetRange(proc: thr->proc(), p: addr, sz: rounded_size_meta, reset: true);
589}
590#endif
591
592void MapShadow(uptr addr, uptr size) {
593 // Although named MapShadow, this function's semantic is unrelated to
594 // UnmapShadow. This function currently only used for Go's lazy allocation
595 // of shadow, whose targets are program section (e.g., bss, data, etc.).
596 // Therefore, we can guarantee that the addr and size align to kShadowCell
597 // and kMetaShadowCell by the following assertions.
598 DCHECK_EQ(addr % kShadowCell, 0);
599 DCHECK_EQ(size % kShadowCell, 0);
600 DCHECK_EQ(addr % kMetaShadowCell, 0);
601 DCHECK_EQ(size % kMetaShadowCell, 0);
602
603 // Ensure thead registry lock held, so as to synchronize
604 // with DoReset, which also access the mapped_shadow_* ctxt fields.
605 ThreadRegistryLock lock0(&ctx->thread_registry);
606 static bool data_mapped = false;
607
608#if !SANITIZER_GO
609 // Global data is not 64K aligned, but there are no adjacent mappings,
610 // so we can get away with unaligned mapping.
611 // CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
612 const uptr kPageSize = GetPageSizeCached();
613 uptr shadow_begin = RoundDownTo(x: (uptr)MemToShadow(x: addr), boundary: kPageSize);
614 uptr shadow_end = RoundUpTo(size: (uptr)MemToShadow(x: addr + size), boundary: kPageSize);
615 if (!MmapFixedNoReserve(fixed_addr: shadow_begin, size: shadow_end - shadow_begin, name: "shadow"))
616 Die();
617#else
618 uptr shadow_begin = RoundDownTo((uptr)MemToShadow(addr), (64 << 10));
619 uptr shadow_end = RoundUpTo((uptr)MemToShadow(addr + size), (64 << 10));
620 VPrintf(2, "MapShadow for (0x%zx-0x%zx), begin/end: (0x%zx-0x%zx)\n",
621 addr, addr + size, shadow_begin, shadow_end);
622
623 if (!data_mapped) {
624 // First call maps data+bss.
625 if (!MmapFixedSuperNoReserve(shadow_begin, shadow_end - shadow_begin, "shadow"))
626 Die();
627 } else {
628 VPrintf(2, "ctx->mapped_shadow_{begin,end} = (0x%zx-0x%zx)\n",
629 ctx->mapped_shadow_begin, ctx->mapped_shadow_end);
630 // Second and subsequent calls map heap.
631 if (shadow_end <= ctx->mapped_shadow_end)
632 return;
633 if (!ctx->mapped_shadow_begin || ctx->mapped_shadow_begin > shadow_begin)
634 ctx->mapped_shadow_begin = shadow_begin;
635 if (shadow_begin < ctx->mapped_shadow_end)
636 shadow_begin = ctx->mapped_shadow_end;
637 VPrintf(2, "MapShadow begin/end = (0x%zx-0x%zx)\n",
638 shadow_begin, shadow_end);
639 if (!MmapFixedSuperNoReserve(shadow_begin, shadow_end - shadow_begin,
640 "shadow"))
641 Die();
642 ctx->mapped_shadow_end = shadow_end;
643 }
644#endif
645
646 // Meta shadow is 2:1, so tread carefully.
647 static uptr mapped_meta_end = 0;
648 uptr meta_begin = (uptr)MemToMeta(x: addr);
649 uptr meta_end = (uptr)MemToMeta(x: addr + size);
650 // Windows wants 64K alignment.
651 meta_begin = RoundDownTo(x: meta_begin, boundary: 64 << 10);
652 meta_end = RoundUpTo(size: meta_end, boundary: 64 << 10);
653 if (!data_mapped) {
654 // First call maps data+bss.
655 data_mapped = true;
656 if (!MmapFixedSuperNoReserve(fixed_addr: meta_begin, size: meta_end - meta_begin,
657 name: "meta shadow"))
658 Die();
659 } else {
660 // Mapping continuous heap.
661 CHECK_GT(meta_end, mapped_meta_end);
662 if (meta_begin < mapped_meta_end)
663 meta_begin = mapped_meta_end;
664 if (!MmapFixedSuperNoReserve(fixed_addr: meta_begin, size: meta_end - meta_begin,
665 name: "meta shadow"))
666 Die();
667 mapped_meta_end = meta_end;
668 }
669 VPrintf(2, "mapped meta shadow for (0x%zx-0x%zx) at (0x%zx-0x%zx)\n", addr,
670 addr + size, meta_begin, meta_end);
671}
672
673#if !SANITIZER_GO
674static void OnStackUnwind(const SignalContext &sig, const void *,
675 BufferedStackTrace *stack) {
676 stack->Unwind(pc: StackTrace::GetNextInstructionPc(pc: sig.pc), bp: sig.bp, context: sig.context,
677 request_fast: common_flags()->fast_unwind_on_fatal);
678}
679
680static void TsanOnDeadlySignal(int signo, void *siginfo, void *context) {
681 HandleDeadlySignal(siginfo, context, tid: GetTid(), unwind: &OnStackUnwind, unwind_context: nullptr);
682}
683#endif
684
685void CheckUnwind() {
686 // There is high probability that interceptors will check-fail as well,
687 // on the other hand there is no sense in processing interceptors
688 // since we are going to die soon.
689 ScopedIgnoreInterceptors ignore;
690#if !SANITIZER_GO
691 ThreadState* thr = cur_thread();
692 thr->nomalloc = false;
693 thr->ignore_sync++;
694 thr->ignore_reads_and_writes++;
695 atomic_store_relaxed(a: &thr->in_signal_handler, v: 0);
696#endif
697 PrintCurrentStack(pc: StackTrace::GetCurrentPc(),
698 fast: common_flags()->fast_unwind_on_fatal);
699}
700
701bool is_initialized;
702
703// Symbolization indirectly calls dl_iterate_phdr. If a CHECK() fails early on
704// (prior to the dl_iterate_phdr interceptor setup), resulting in an attempted
705// symbolization, it will segfault.
706// dl_iterate_phdr is not intercepted for Android.
707bool ready_to_symbolize = SANITIZER_ANDROID;
708
709void Initialize(ThreadState *thr) {
710 // Thread safe because done before all threads exist.
711 if (is_initialized)
712 return;
713 is_initialized = true;
714 // We are not ready to handle interceptors yet.
715 ScopedIgnoreInterceptors ignore;
716 SanitizerToolName = "ThreadSanitizer";
717 // Install tool-specific callbacks in sanitizer_common.
718 SetCheckUnwindCallback(CheckUnwind);
719
720 ctx = new(ctx_placeholder) Context;
721 const char *env_name = SANITIZER_GO ? "GORACE" : "TSAN_OPTIONS";
722 const char *options = GetEnv(name: env_name);
723 CacheBinaryName();
724 CheckASLR();
725 InitializeFlags(flags: &ctx->flags, env: options, env_option_name: env_name);
726 AvoidCVE_2016_2143();
727 __sanitizer::InitializePlatformEarly();
728 __tsan::InitializePlatformEarly();
729
730#if !SANITIZER_GO
731 InitializeAllocator();
732 ReplaceSystemMalloc();
733#endif
734 if (common_flags()->detect_deadlocks)
735 ctx->dd = DDetector::Create(flags: flags());
736 Processor *proc = ProcCreate();
737 ProcWire(proc, thr);
738 InitializeInterceptors();
739 InitializePlatform();
740 InitializeDynamicAnnotations();
741#if !SANITIZER_GO
742 InitializeShadowMemory();
743 InitializeAllocatorLate();
744 InstallDeadlySignalHandlers(handler: TsanOnDeadlySignal);
745#endif
746 // Setup correct file descriptor for error reports.
747 __sanitizer_set_report_path(path: common_flags()->log_path);
748 InitializeSuppressions();
749#if !SANITIZER_GO
750 InitializeLibIgnore();
751 Symbolizer::GetOrInit()->AddHooks(start_hook: EnterSymbolizer, end_hook: ExitSymbolizer);
752#endif
753
754 VPrintf(1, "***** Running under ThreadSanitizer v3 (pid %d) *****\n",
755 (int)internal_getpid());
756
757 // Initialize thread 0.
758 Tid tid = ThreadCreate(thr: nullptr, pc: 0, uid: 0, detached: true);
759 CHECK_EQ(tid, kMainTid);
760 ThreadStart(thr, tid, os_id: GetTid(), thread_type: ThreadType::Regular);
761#if TSAN_CONTAINS_UBSAN
762 __ubsan::InitAsPlugin();
763#endif
764
765#if !SANITIZER_GO
766 Symbolizer::LateInitialize();
767 if (InitializeMemoryProfiler() || flags()->force_background_thread)
768 MaybeSpawnBackgroundThread();
769#endif
770 ctx->initialized = true;
771
772 if (flags()->stop_on_start) {
773 Printf(format: "ThreadSanitizer is suspended at startup (pid %d)."
774 " Call __tsan_resume().\n",
775 (int)internal_getpid());
776 while (__tsan_resumed == 0) {}
777 }
778
779#if !SANITIZER_GO
780 AdaptiveDelay::Init();
781#endif
782
783 OnInitialize();
784}
785
786void MaybeSpawnBackgroundThread() {
787 // On MIPS, TSan initialization is run before
788 // __pthread_initialize_minimal_internal() is finished, so we can not spawn
789 // new threads.
790#if !SANITIZER_GO && !defined(__mips__)
791 static atomic_uint32_t bg_thread = {};
792 if (atomic_load(a: &bg_thread, mo: memory_order_relaxed) == 0 &&
793 atomic_exchange(a: &bg_thread, v: 1, mo: memory_order_relaxed) == 0) {
794 StartBackgroundThread();
795 SetSandboxingCallback(StopBackgroundThread);
796 }
797#endif
798}
799
800int Finalize(ThreadState *thr) {
801 bool failed = false;
802
803#if !SANITIZER_GO
804 if (common_flags()->print_module_map == 1)
805 DumpProcessMap();
806#endif
807
808 if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1)
809 internal_usleep(useconds: u64(flags()->atexit_sleep_ms) * 1000);
810
811 {
812 // Wait for pending reports.
813 ScopedErrorReportLock lock;
814 }
815
816#if !SANITIZER_GO
817 if (Verbosity()) AllocatorPrintStats();
818#endif
819
820 ThreadFinalize(thr);
821
822 if (ctx->nreported) {
823 failed = true;
824#if !SANITIZER_GO
825 Printf(format: "ThreadSanitizer: reported %d warnings\n", ctx->nreported);
826#else
827 Printf("Found %d data race(s)\n", ctx->nreported);
828#endif
829 }
830
831 if (common_flags()->print_suppressions)
832 PrintMatchedSuppressions();
833
834 failed = OnFinalize(failed);
835
836 return failed ? common_flags()->exitcode : 0;
837}
838
839#if !SANITIZER_GO
840void ForkBefore(ThreadState* thr, uptr pc) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
841 VReport(2, "BeforeFork tid: %llu\n", GetTid());
842 GlobalProcessorLock();
843 // Detaching from the slot makes OnUserFree skip writing to the shadow.
844 // The slot will be locked so any attempts to use it will deadlock anyway.
845 SlotDetach(thr);
846 for (auto& slot : ctx->slots) slot.mtx.Lock();
847 ctx->thread_registry.Lock();
848 ctx->slot_mtx.Lock();
849 ScopedErrorReportLock::Lock();
850 AllocatorLockBeforeFork();
851 // Suppress all reports in the pthread_atfork callbacks.
852 // Reports will deadlock on the report_mtx.
853 // We could ignore sync operations as well,
854 // but so far it's unclear if it will do more good or harm.
855 // Unnecessarily ignoring things can lead to false positives later.
856 thr->suppress_reports++;
857 // On OS X, REAL(fork) can call intercepted functions (OSSpinLockLock), and
858 // we'll assert in CheckNoLocks() unless we ignore interceptors.
859 // On OS X libSystem_atfork_prepare/parent/child callbacks are called
860 // after/before our callbacks and they call free.
861 thr->ignore_interceptors++;
862 // Disables memory write in OnUserAlloc/Free.
863 thr->ignore_reads_and_writes++;
864
865# if SANITIZER_APPLE
866 __tsan_test_only_on_fork();
867# endif
868}
869
870static void ForkAfter(ThreadState* thr,
871 bool child) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
872 thr->suppress_reports--; // Enabled in ForkBefore.
873 thr->ignore_interceptors--;
874 thr->ignore_reads_and_writes--;
875 AllocatorUnlockAfterFork(child);
876 ScopedErrorReportLock::Unlock();
877 ctx->slot_mtx.Unlock();
878 ctx->thread_registry.Unlock();
879 for (auto& slot : ctx->slots) slot.mtx.Unlock();
880 SlotAttachAndLock(thr);
881 SlotUnlock(thr);
882 GlobalProcessorUnlock();
883 VReport(2, "AfterFork tid: %llu\n", GetTid());
884}
885
886void ForkParentAfter(ThreadState* thr, uptr pc) { ForkAfter(thr, child: false); }
887
888void ForkChildAfter(ThreadState* thr, uptr pc, bool start_thread) {
889 ForkAfter(thr, child: true);
890 u32 nthread = ctx->thread_registry.OnFork(tid: thr->tid);
891 VPrintf(1,
892 "ThreadSanitizer: forked new process with pid %d,"
893 " parent had %d threads\n",
894 (int)internal_getpid(), (int)nthread);
895 if (nthread == 1) {
896 if (start_thread)
897 StartBackgroundThread();
898 } else {
899 // We've just forked a multi-threaded process. We cannot reasonably function
900 // after that (some mutexes may be locked before fork). So just enable
901 // ignores for everything in the hope that we will exec soon.
902 ctx->after_multithreaded_fork = true;
903 thr->ignore_interceptors++;
904 thr->suppress_reports++;
905 ThreadIgnoreBegin(thr, pc);
906 ThreadIgnoreSyncBegin(thr, pc);
907 }
908
909# if SANITIZER_APPLE && !SANITIZER_GO
910 // This flag can have inheritance disabled - we are the child so act
911 // accordingly
912 if (flags()->lock_during_write == kNoLockDuringWritesCurrentProcess)
913 flags()->lock_during_write = kLockDuringAllWrites;
914# endif
915}
916#endif
917
918#if SANITIZER_GO
919NOINLINE
920void GrowShadowStack(ThreadState *thr) {
921 const int sz = thr->shadow_stack_end - thr->shadow_stack;
922 const int newsz = 2 * sz;
923 auto *newstack = (uptr *)Alloc(newsz * sizeof(uptr));
924 internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr));
925 Free(thr->shadow_stack);
926 thr->shadow_stack = newstack;
927 thr->shadow_stack_pos = newstack + sz;
928 thr->shadow_stack_end = newstack + newsz;
929}
930#endif
931
932StackID CurrentStackId(ThreadState *thr, uptr pc) {
933#if !SANITIZER_GO
934 if (!thr->is_inited) // May happen during bootstrap.
935 return kInvalidStackID;
936#endif
937 if (pc != 0) {
938#if !SANITIZER_GO
939 DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
940#else
941 if (thr->shadow_stack_pos == thr->shadow_stack_end)
942 GrowShadowStack(thr);
943#endif
944 thr->shadow_stack_pos[0] = pc;
945 thr->shadow_stack_pos++;
946 }
947 StackID id = StackDepotPut(
948 stack: StackTrace(thr->shadow_stack, thr->shadow_stack_pos - thr->shadow_stack));
949 if (pc != 0)
950 thr->shadow_stack_pos--;
951 return id;
952}
953
954static bool TraceSkipGap(ThreadState* thr) {
955 Trace *trace = &thr->tctx->trace;
956 Event *pos = reinterpret_cast<Event *>(atomic_load_relaxed(a: &thr->trace_pos));
957 DCHECK_EQ(reinterpret_cast<uptr>(pos + 1) & TracePart::kAlignment, 0);
958 auto *part = trace->parts.Back();
959 DPrintf("#%d: TraceSwitchPart enter trace=%p parts=%p-%p pos=%p\n", thr->tid,
960 trace, trace->parts.Front(), part, pos);
961 if (!part)
962 return false;
963 // We can get here when we still have space in the current trace part.
964 // The fast-path check in TraceAcquire has false positives in the middle of
965 // the part. Check if we are indeed at the end of the current part or not,
966 // and fill any gaps with NopEvent's.
967 Event* end = &part->events[TracePart::kSize];
968 DCHECK_GE(pos, &part->events[0]);
969 DCHECK_LE(pos, end);
970 if (pos + 1 < end) {
971 if ((reinterpret_cast<uptr>(pos) & TracePart::kAlignment) ==
972 TracePart::kAlignment)
973 *pos++ = NopEvent;
974 *pos++ = NopEvent;
975 DCHECK_LE(pos + 2, end);
976 atomic_store_relaxed(a: &thr->trace_pos, v: reinterpret_cast<uptr>(pos));
977 return true;
978 }
979 // We are indeed at the end.
980 for (; pos < end; pos++) *pos = NopEvent;
981 return false;
982}
983
984NOINLINE
985void TraceSwitchPart(ThreadState* thr) {
986 if (TraceSkipGap(thr))
987 return;
988#if !SANITIZER_GO
989 if (ctx->after_multithreaded_fork) {
990 // We just need to survive till exec.
991 TracePart* part = thr->tctx->trace.parts.Back();
992 if (part) {
993 atomic_store_relaxed(a: &thr->trace_pos,
994 v: reinterpret_cast<uptr>(&part->events[0]));
995 return;
996 }
997 }
998#endif
999 TraceSwitchPartImpl(thr);
1000}
1001
1002void TraceSwitchPartImpl(ThreadState* thr) {
1003 SlotLocker locker(thr, true);
1004 Trace* trace = &thr->tctx->trace;
1005 TracePart* part = TracePartAlloc(thr);
1006 part->trace = trace;
1007 thr->trace_prev_pc = 0;
1008 TracePart* recycle = nullptr;
1009 // Keep roughly half of parts local to the thread
1010 // (not queued into the recycle queue).
1011 uptr local_parts = (Trace::kMinParts + flags()->history_size + 1) / 2;
1012 {
1013 Lock lock(&trace->mtx);
1014 if (trace->parts.Empty())
1015 trace->local_head = part;
1016 if (trace->parts.Size() >= local_parts) {
1017 recycle = trace->local_head;
1018 trace->local_head = trace->parts.Next(e: recycle);
1019 }
1020 trace->parts.PushBack(e: part);
1021 atomic_store_relaxed(a: &thr->trace_pos,
1022 v: reinterpret_cast<uptr>(&part->events[0]));
1023 }
1024 // Make this part self-sufficient by restoring the current stack
1025 // and mutex set in the beginning of the trace.
1026 TraceTime(thr);
1027 {
1028 // Pathologically large stacks may not fit into the part.
1029 // In these cases we log only fixed number of top frames.
1030 const uptr kMaxFrames = 1000;
1031 // Check that kMaxFrames won't consume the whole part.
1032 static_assert(kMaxFrames < TracePart::kSize / 2, "kMaxFrames is too big");
1033 uptr* pos = Max(a: &thr->shadow_stack[0], b: thr->shadow_stack_pos - kMaxFrames);
1034 for (; pos < thr->shadow_stack_pos; pos++) {
1035 if (TryTraceFunc(thr, pc: *pos))
1036 continue;
1037 CHECK(TraceSkipGap(thr));
1038 CHECK(TryTraceFunc(thr, *pos));
1039 }
1040 }
1041 for (uptr i = 0; i < thr->mset.Size(); i++) {
1042 MutexSet::Desc d = thr->mset.Get(i);
1043 for (uptr i = 0; i < d.count; i++)
1044 TraceMutexLock(thr, type: d.write ? EventType::kLock : EventType::kRLock, pc: 0,
1045 addr: d.addr, stk: d.stack_id);
1046 }
1047 // Callers of TraceSwitchPart expect that TraceAcquire will always succeed
1048 // after the call. It's possible that TryTraceFunc/TraceMutexLock above
1049 // filled the trace part exactly up to the TracePart::kAlignment gap
1050 // and the next TraceAcquire won't succeed. Skip the gap to avoid that.
1051 EventFunc *ev;
1052 if (!TraceAcquire(thr, ev: &ev)) {
1053 CHECK(TraceSkipGap(thr));
1054 CHECK(TraceAcquire(thr, &ev));
1055 }
1056 {
1057 Lock lock(&ctx->slot_mtx);
1058 // There is a small chance that the slot may be not queued at this point.
1059 // This can happen if the slot has kEpochLast epoch and another thread
1060 // in FindSlotAndLock discovered that it's exhausted and removed it from
1061 // the slot queue. kEpochLast can happen in 2 cases: (1) if TraceSwitchPart
1062 // was called with the slot locked and epoch already at kEpochLast,
1063 // or (2) if we've acquired a new slot in SlotLock in the beginning
1064 // of the function and the slot was at kEpochLast - 1, so after increment
1065 // in SlotAttachAndLock it become kEpochLast.
1066 if (ctx->slot_queue.Queued(e: thr->slot)) {
1067 ctx->slot_queue.Remove(e: thr->slot);
1068 ctx->slot_queue.PushBack(e: thr->slot);
1069 }
1070 if (recycle)
1071 ctx->trace_part_recycle.PushBack(e: recycle);
1072 }
1073 DPrintf("#%d: TraceSwitchPart exit parts=%p-%p pos=0x%zx\n", thr->tid,
1074 trace->parts.Front(), trace->parts.Back(),
1075 atomic_load_relaxed(&thr->trace_pos));
1076}
1077
1078void ThreadIgnoreBegin(ThreadState* thr, uptr pc) {
1079 DPrintf("#%d: ThreadIgnoreBegin\n", thr->tid);
1080 thr->ignore_reads_and_writes++;
1081 CHECK_GT(thr->ignore_reads_and_writes, 0);
1082 thr->fast_state.SetIgnoreBit();
1083#if !SANITIZER_GO
1084 if (pc && !ctx->after_multithreaded_fork)
1085 thr->mop_ignore_set.Add(stack_id: CurrentStackId(thr, pc));
1086#endif
1087}
1088
1089void ThreadIgnoreEnd(ThreadState *thr) {
1090 DPrintf("#%d: ThreadIgnoreEnd\n", thr->tid);
1091 CHECK_GT(thr->ignore_reads_and_writes, 0);
1092 thr->ignore_reads_and_writes--;
1093 if (thr->ignore_reads_and_writes == 0) {
1094 thr->fast_state.ClearIgnoreBit();
1095#if !SANITIZER_GO
1096 thr->mop_ignore_set.Reset();
1097#endif
1098 }
1099}
1100
1101#if !SANITIZER_GO
1102extern "C" SANITIZER_INTERFACE_ATTRIBUTE
1103uptr __tsan_testonly_shadow_stack_current_size() {
1104 ThreadState *thr = cur_thread();
1105 return thr->shadow_stack_pos - thr->shadow_stack;
1106}
1107#endif
1108
1109void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc) {
1110 DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr->tid);
1111 thr->ignore_sync++;
1112 CHECK_GT(thr->ignore_sync, 0);
1113#if !SANITIZER_GO
1114 if (pc && !ctx->after_multithreaded_fork)
1115 thr->sync_ignore_set.Add(stack_id: CurrentStackId(thr, pc));
1116#endif
1117}
1118
1119void ThreadIgnoreSyncEnd(ThreadState *thr) {
1120 DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr->tid);
1121 CHECK_GT(thr->ignore_sync, 0);
1122 thr->ignore_sync--;
1123#if !SANITIZER_GO
1124 if (thr->ignore_sync == 0)
1125 thr->sync_ignore_set.Reset();
1126#endif
1127}
1128
1129bool MD5Hash::operator==(const MD5Hash &other) const {
1130 return hash[0] == other.hash[0] && hash[1] == other.hash[1];
1131}
1132
1133#if SANITIZER_DEBUG
1134void build_consistency_debug() {}
1135#else
1136void build_consistency_release() {}
1137#endif
1138} // namespace __tsan
1139
1140#if SANITIZER_CHECK_DEADLOCKS
1141namespace __sanitizer {
1142using namespace __tsan;
1143MutexMeta mutex_meta[] = {
1144 {MutexInvalid, "Invalid", {}},
1145 {MutexThreadRegistry,
1146 "ThreadRegistry",
1147 {MutexTypeSlots, MutexTypeTrace, MutexTypeReport}},
1148 {MutexTypeReport, "Report", {MutexTypeTrace}},
1149 {MutexTypeSyncVar, "SyncVar", {MutexTypeReport, MutexTypeTrace}},
1150 {MutexTypeAnnotations, "Annotations", {}},
1151 {MutexTypeAtExit, "AtExit", {}},
1152 {MutexTypeFired, "Fired", {MutexLeaf}},
1153 {MutexTypeRacy, "Racy", {MutexLeaf}},
1154 {MutexTypeGlobalProc, "GlobalProc", {MutexTypeSlot, MutexTypeSlots}},
1155 {MutexTypeInternalAlloc, "InternalAlloc", {MutexLeaf}},
1156 {MutexTypeTrace, "Trace", {}},
1157 {MutexTypeSlot,
1158 "Slot",
1159 {MutexMulti, MutexTypeTrace, MutexTypeSyncVar, MutexThreadRegistry,
1160 MutexTypeSlots}},
1161 {MutexTypeSlots, "Slots", {MutexTypeTrace, MutexTypeReport}},
1162 {},
1163};
1164
1165void PrintMutexPC(uptr pc) { StackTrace(&pc, 1).Print(); }
1166
1167} // namespace __sanitizer
1168#endif
1169