1//===-- tsan_rtl.cpp ------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of ThreadSanitizer (TSan), a race detector.
10//
11// Main file (entry points) for the TSan run-time.
12//===----------------------------------------------------------------------===//
13
14#include "tsan_rtl.h"
15
16#include "sanitizer_common/sanitizer_atomic.h"
17#include "sanitizer_common/sanitizer_common.h"
18#include "sanitizer_common/sanitizer_file.h"
19#include "sanitizer_common/sanitizer_interface_internal.h"
20#include "sanitizer_common/sanitizer_libc.h"
21#include "sanitizer_common/sanitizer_placement_new.h"
22#include "sanitizer_common/sanitizer_stackdepot.h"
23#include "sanitizer_common/sanitizer_symbolizer.h"
24#include "tsan_defs.h"
25#include "tsan_interface.h"
26#include "tsan_mman.h"
27#include "tsan_platform.h"
28#include "tsan_suppressions.h"
29#include "tsan_symbolize.h"
30#include "ubsan/ubsan_init.h"
31
32volatile int __tsan_resumed = 0;
33
34extern "C" void __tsan_resume() {
35 __tsan_resumed = 1;
36}
37
38#if SANITIZER_APPLE
39SANITIZER_WEAK_DEFAULT_IMPL
40void __tsan_test_only_on_fork() {}
41#endif
42
43#if SANITIZER_APPLE && !SANITIZER_GO
44// Override weak symbol from sanitizer_common
45extern void __tsan_set_in_internal_write_call(bool value) {
46 __tsan::cur_thread_init()->in_internal_write_call = value;
47}
48#endif
49
50namespace __tsan {
51
52#if !SANITIZER_GO
53void (*on_initialize)(void);
54int (*on_finalize)(int);
55#endif
56
57#if !SANITIZER_GO && !SANITIZER_APPLE
58alignas(SANITIZER_CACHE_LINE_SIZE) THREADLOCAL __attribute__((tls_model(
59 "initial-exec"))) char cur_thread_placeholder[sizeof(ThreadState)];
60#endif
61alignas(SANITIZER_CACHE_LINE_SIZE) static char ctx_placeholder[sizeof(Context)];
62Context *ctx;
63
64// Can be overriden by a front-end.
65#ifdef TSAN_EXTERNAL_HOOKS
66bool OnFinalize(bool failed);
67void OnInitialize();
68#else
69SANITIZER_WEAK_CXX_DEFAULT_IMPL
70bool OnFinalize(bool failed) {
71# if !SANITIZER_GO
72 if (on_finalize)
73 return on_finalize(failed);
74# endif
75 return failed;
76}
77
78SANITIZER_WEAK_CXX_DEFAULT_IMPL
79void OnInitialize() {
80# if !SANITIZER_GO
81 if (on_initialize)
82 on_initialize();
83# endif
84}
85#endif
86
87static TracePart* TracePartAlloc(ThreadState* thr) {
88 TracePart* part = nullptr;
89 {
90 Lock lock(&ctx->slot_mtx);
91 uptr max_parts = Trace::kMinParts + flags()->history_size;
92 Trace* trace = &thr->tctx->trace;
93 if (trace->parts_allocated == max_parts ||
94 ctx->trace_part_finished_excess) {
95 part = ctx->trace_part_recycle.PopFront();
96 DPrintf("#%d: TracePartAlloc: part=%p\n", thr->tid, part);
97 if (part && part->trace) {
98 Trace* trace1 = part->trace;
99 Lock trace_lock(&trace1->mtx);
100 part->trace = nullptr;
101 TracePart* part1 = trace1->parts.PopFront();
102 CHECK_EQ(part, part1);
103 if (trace1->parts_allocated > trace1->parts.Size()) {
104 ctx->trace_part_finished_excess +=
105 trace1->parts_allocated - trace1->parts.Size();
106 trace1->parts_allocated = trace1->parts.Size();
107 }
108 }
109 }
110 if (trace->parts_allocated < max_parts) {
111 trace->parts_allocated++;
112 if (ctx->trace_part_finished_excess)
113 ctx->trace_part_finished_excess--;
114 }
115 if (!part)
116 ctx->trace_part_total_allocated++;
117 else if (ctx->trace_part_recycle_finished)
118 ctx->trace_part_recycle_finished--;
119 }
120 if (!part)
121 part = new (MmapOrDie(size: sizeof(*part), mem_type: "TracePart")) TracePart();
122 return part;
123}
124
125static void TracePartFree(TracePart* part) SANITIZER_REQUIRES(ctx->slot_mtx) {
126 DCHECK(part->trace);
127 part->trace = nullptr;
128 ctx->trace_part_recycle.PushFront(e: part);
129}
130
131void TraceResetForTesting() {
132 Lock lock(&ctx->slot_mtx);
133 while (auto* part = ctx->trace_part_recycle.PopFront()) {
134 if (auto trace = part->trace)
135 CHECK_EQ(trace->parts.PopFront(), part);
136 UnmapOrDie(addr: part, size: sizeof(*part));
137 }
138 ctx->trace_part_total_allocated = 0;
139 ctx->trace_part_recycle_finished = 0;
140 ctx->trace_part_finished_excess = 0;
141}
142
143static void DoResetImpl(uptr epoch) {
144 ThreadRegistryLock lock0(&ctx->thread_registry);
145 Lock lock1(&ctx->slot_mtx);
146 CHECK_EQ(ctx->global_epoch, epoch);
147 ctx->global_epoch++;
148 CHECK(!ctx->resetting);
149 ctx->resetting = true;
150 for (u32 i = ctx->thread_registry.NumThreadsLocked(); i--;) {
151 ThreadContext* tctx = (ThreadContext*)ctx->thread_registry.GetThreadLocked(
152 tid: static_cast<Tid>(i));
153 // Potentially we could purge all ThreadStatusDead threads from the
154 // registry. Since we reset all shadow, they can't race with anything
155 // anymore. However, their tid's can still be stored in some aux places
156 // (e.g. tid of thread that created something).
157 auto trace = &tctx->trace;
158 Lock lock(&trace->mtx);
159 bool attached = tctx->thr && tctx->thr->slot;
160 auto parts = &trace->parts;
161 bool local = false;
162 while (!parts->Empty()) {
163 auto part = parts->Front();
164 local = local || part == trace->local_head;
165 if (local)
166 CHECK(!ctx->trace_part_recycle.Queued(part));
167 else
168 ctx->trace_part_recycle.Remove(e: part);
169 if (attached && parts->Size() == 1) {
170 // The thread is running and this is the last/current part.
171 // Set the trace position to the end of the current part
172 // to force the thread to call SwitchTracePart and re-attach
173 // to a new slot and allocate a new trace part.
174 // Note: the thread is concurrently modifying the position as well,
175 // so this is only best-effort. The thread can only modify position
176 // within this part, because switching parts is protected by
177 // slot/trace mutexes that we hold here.
178 atomic_store_relaxed(
179 a: &tctx->thr->trace_pos,
180 v: reinterpret_cast<uptr>(&part->events[TracePart::kSize]));
181 break;
182 }
183 parts->Remove(e: part);
184 TracePartFree(part);
185 }
186 CHECK_LE(parts->Size(), 1);
187 trace->local_head = parts->Front();
188 if (tctx->thr && !tctx->thr->slot) {
189 atomic_store_relaxed(a: &tctx->thr->trace_pos, v: 0);
190 tctx->thr->trace_prev_pc = 0;
191 }
192 if (trace->parts_allocated > trace->parts.Size()) {
193 ctx->trace_part_finished_excess +=
194 trace->parts_allocated - trace->parts.Size();
195 trace->parts_allocated = trace->parts.Size();
196 }
197 }
198 while (ctx->slot_queue.PopFront()) {
199 }
200 for (auto& slot : ctx->slots) {
201 slot.SetEpoch(kEpochZero);
202 slot.journal.Reset();
203 slot.thr = nullptr;
204 ctx->slot_queue.PushBack(e: &slot);
205 }
206
207 DPrintf("Resetting shadow...\n");
208 auto shadow_begin = ShadowBeg();
209 auto shadow_end = ShadowEnd();
210#if SANITIZER_GO
211 CHECK_NE(0, ctx->mapped_shadow_begin);
212 shadow_begin = ctx->mapped_shadow_begin;
213 shadow_end = ctx->mapped_shadow_end;
214 VPrintf(2, "shadow_begin-shadow_end: (0x%zx-0x%zx)\n",
215 shadow_begin, shadow_end);
216#endif
217
218#if SANITIZER_WINDOWS
219 auto resetFailed =
220 !ZeroMmapFixedRegion(shadow_begin, shadow_end - shadow_begin);
221#else
222 auto resetFailed =
223 !MmapFixedSuperNoReserve(fixed_addr: shadow_begin, size: shadow_end-shadow_begin, name: "shadow");
224# if !SANITIZER_GO
225 DontDumpShadow(addr: shadow_begin, size: shadow_end - shadow_begin);
226# endif
227#endif
228 if (resetFailed) {
229 Printf(format: "failed to reset shadow memory\n");
230 Die();
231 }
232 DPrintf("Resetting meta shadow...\n");
233 ctx->metamap.ResetClocks();
234 StoreShadow(sp: &ctx->last_spurious_race, s: Shadow::kEmpty);
235 ctx->resetting = false;
236}
237
238// Clang does not understand locking all slots in the loop:
239// error: expecting mutex 'slot.mtx' to be held at start of each loop
240void DoReset(ThreadState* thr, uptr epoch) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
241 for (auto& slot : ctx->slots) {
242 slot.mtx.Lock();
243 if (UNLIKELY(epoch == 0))
244 epoch = ctx->global_epoch;
245 if (UNLIKELY(epoch != ctx->global_epoch)) {
246 // Epoch can't change once we've locked the first slot.
247 CHECK_EQ(slot.sid, 0);
248 slot.mtx.Unlock();
249 return;
250 }
251 }
252 DPrintf("#%d: DoReset epoch=%lu\n", thr ? thr->tid : -1, epoch);
253 DoResetImpl(epoch);
254 for (auto& slot : ctx->slots) slot.mtx.Unlock();
255}
256
257void FlushShadowMemory() { DoReset(thr: nullptr, epoch: 0); }
258
259static TidSlot* FindSlotAndLock(ThreadState* thr)
260 SANITIZER_ACQUIRE(thr->slot->mtx) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
261 CHECK(!thr->slot);
262 TidSlot* slot = nullptr;
263 for (;;) {
264 uptr epoch;
265 {
266 Lock lock(&ctx->slot_mtx);
267 epoch = ctx->global_epoch;
268 if (slot) {
269 // This is an exhausted slot from the previous iteration.
270 if (ctx->slot_queue.Queued(e: slot))
271 ctx->slot_queue.Remove(e: slot);
272 thr->slot_locked = false;
273 slot->mtx.Unlock();
274 }
275 for (;;) {
276 slot = ctx->slot_queue.PopFront();
277 if (!slot)
278 break;
279 if (slot->epoch() != kEpochLast) {
280 ctx->slot_queue.PushBack(e: slot);
281 break;
282 }
283 }
284 }
285 if (!slot) {
286 DoReset(thr, epoch);
287 continue;
288 }
289 slot->mtx.Lock();
290 CHECK(!thr->slot_locked);
291 thr->slot_locked = true;
292 if (slot->thr) {
293 DPrintf("#%d: preempting sid=%d tid=%d\n", thr->tid, (u32)slot->sid,
294 slot->thr->tid);
295 slot->SetEpoch(slot->thr->fast_state.epoch());
296 slot->thr = nullptr;
297 }
298 if (slot->epoch() != kEpochLast)
299 return slot;
300 }
301}
302
303void SlotAttachAndLock(ThreadState* thr) {
304 TidSlot* slot = FindSlotAndLock(thr);
305 DPrintf("#%d: SlotAttach: slot=%u\n", thr->tid, static_cast<int>(slot->sid));
306 CHECK(!slot->thr);
307 CHECK(!thr->slot);
308 slot->thr = thr;
309 thr->slot = slot;
310 Epoch epoch = EpochInc(epoch: slot->epoch());
311 CHECK(!EpochOverflow(epoch));
312 slot->SetEpoch(epoch);
313 thr->fast_state.SetSid(slot->sid);
314 thr->fast_state.SetEpoch(epoch);
315 if (thr->slot_epoch != ctx->global_epoch) {
316 thr->slot_epoch = ctx->global_epoch;
317 thr->clock.Reset();
318#if !SANITIZER_GO
319 thr->last_sleep_stack_id = kInvalidStackID;
320 thr->last_sleep_clock.Reset();
321#endif
322 }
323 thr->clock.Set(sid: slot->sid, v: epoch);
324 slot->journal.PushBack(v: {.tid: thr->tid, .epoch: epoch});
325}
326
327static void SlotDetachImpl(ThreadState* thr, bool exiting) {
328 TidSlot* slot = thr->slot;
329 thr->slot = nullptr;
330 if (thr != slot->thr) {
331 slot = nullptr; // we don't own the slot anymore
332 if (thr->slot_epoch != ctx->global_epoch) {
333 TracePart* part = nullptr;
334 auto* trace = &thr->tctx->trace;
335 {
336 Lock l(&trace->mtx);
337 auto* parts = &trace->parts;
338 // The trace can be completely empty in an unlikely event
339 // the thread is preempted right after it acquired the slot
340 // in ThreadStart and did not trace any events yet.
341 CHECK_LE(parts->Size(), 1);
342 part = parts->PopFront();
343 thr->tctx->trace.local_head = nullptr;
344 atomic_store_relaxed(a: &thr->trace_pos, v: 0);
345 thr->trace_prev_pc = 0;
346 }
347 if (part) {
348 Lock l(&ctx->slot_mtx);
349 TracePartFree(part);
350 }
351 }
352 return;
353 }
354 CHECK(exiting || thr->fast_state.epoch() == kEpochLast);
355 slot->SetEpoch(thr->fast_state.epoch());
356 slot->thr = nullptr;
357}
358
359void SlotDetach(ThreadState* thr) {
360 Lock lock(&thr->slot->mtx);
361 SlotDetachImpl(thr, exiting: true);
362}
363
364void SlotLock(ThreadState* thr) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
365 DCHECK(!thr->slot_locked);
366#if SANITIZER_DEBUG
367 // Check these mutexes are not locked.
368 // We can call DoReset from SlotAttachAndLock, which will lock
369 // these mutexes, but it happens only every once in a while.
370 { ThreadRegistryLock lock(&ctx->thread_registry); }
371 { Lock lock(&ctx->slot_mtx); }
372#endif
373 TidSlot* slot = thr->slot;
374 slot->mtx.Lock();
375 thr->slot_locked = true;
376 if (LIKELY(thr == slot->thr && thr->fast_state.epoch() != kEpochLast))
377 return;
378 SlotDetachImpl(thr, exiting: false);
379 thr->slot_locked = false;
380 slot->mtx.Unlock();
381 SlotAttachAndLock(thr);
382}
383
384void SlotUnlock(ThreadState* thr) {
385 DCHECK(thr->slot_locked);
386 thr->slot_locked = false;
387 thr->slot->mtx.Unlock();
388}
389
390Context::Context()
391 : initialized(),
392 report_mtx(MutexTypeReport),
393 nreported(),
394 thread_registry([](Tid tid) -> ThreadContextBase* {
395 return new (Alloc(sz: sizeof(ThreadContext))) ThreadContext(tid);
396 }),
397 racy_mtx(MutexTypeRacy),
398 racy_stacks(),
399 fired_suppressions_mtx(MutexTypeFired),
400 slot_mtx(MutexTypeSlots),
401 resetting() {
402 fired_suppressions.reserve(new_size: 8);
403 for (uptr i = 0; i < ARRAY_SIZE(slots); i++) {
404 TidSlot* slot = &slots[i];
405 slot->sid = static_cast<Sid>(i);
406 slot_queue.PushBack(e: slot);
407 }
408 global_epoch = 1;
409}
410
411TidSlot::TidSlot() : mtx(MutexTypeSlot) {}
412
413// The objects are allocated in TLS, so one may rely on zero-initialization.
414ThreadState::ThreadState(Tid tid)
415 // Do not touch these, rely on zero initialization,
416 // they may be accessed before the ctor.
417 // ignore_reads_and_writes()
418 // ignore_interceptors()
419 : tid(tid) {
420 CHECK_EQ(reinterpret_cast<uptr>(this) % SANITIZER_CACHE_LINE_SIZE, 0);
421#if !SANITIZER_GO
422 // C/C++ uses fixed size shadow stack.
423 const int kInitStackSize = kShadowStackSize;
424 shadow_stack = static_cast<uptr*>(
425 MmapNoReserveOrDie(size: kInitStackSize * sizeof(uptr), mem_type: "shadow stack"));
426 SetShadowRegionHugePageMode(addr: reinterpret_cast<uptr>(shadow_stack),
427 length: kInitStackSize * sizeof(uptr));
428#else
429 // Go uses malloc-allocated shadow stack with dynamic size.
430 const int kInitStackSize = 8;
431 shadow_stack = static_cast<uptr*>(Alloc(kInitStackSize * sizeof(uptr)));
432#endif
433 shadow_stack_pos = shadow_stack;
434 shadow_stack_end = shadow_stack + kInitStackSize;
435}
436
437#if !SANITIZER_GO
438void MemoryProfiler(u64 uptime) {
439 if (ctx->memprof_fd == kInvalidFd)
440 return;
441 InternalMmapVector<char> buf(4096);
442 WriteMemoryProfile(buf: buf.data(), buf_size: buf.size(), uptime_ns: uptime);
443 WriteToFile(fd: ctx->memprof_fd, buff: buf.data(), buff_size: internal_strlen(s: buf.data()));
444}
445
446static bool InitializeMemoryProfiler() {
447 ctx->memprof_fd = kInvalidFd;
448 const char *fname = flags()->profile_memory;
449 if (!fname || !fname[0])
450 return false;
451 if (internal_strcmp(s1: fname, s2: "stdout") == 0) {
452 ctx->memprof_fd = 1;
453 } else if (internal_strcmp(s1: fname, s2: "stderr") == 0) {
454 ctx->memprof_fd = 2;
455 } else {
456 InternalScopedString filename;
457 filename.AppendF(format: "%s.%d", fname, (int)internal_getpid());
458 ctx->memprof_fd = OpenFile(filename: filename.data(), mode: WrOnly);
459 if (ctx->memprof_fd == kInvalidFd) {
460 Printf(format: "ThreadSanitizer: failed to open memory profile file '%s'\n",
461 filename.data());
462 return false;
463 }
464 }
465 MemoryProfiler(uptime: 0);
466 return true;
467}
468
469static void *BackgroundThread(void *arg) {
470 // This is a non-initialized non-user thread, nothing to see here.
471 // We don't use ScopedIgnoreInterceptors, because we want ignores to be
472 // enabled even when the thread function exits (e.g. during pthread thread
473 // shutdown code).
474 cur_thread_init()->ignore_interceptors++;
475 const u64 kMs2Ns = 1000 * 1000;
476 const u64 start = NanoTime();
477
478 u64 last_flush = start;
479 uptr last_rss = 0;
480 while (!atomic_load_relaxed(a: &ctx->stop_background_thread)) {
481 SleepForMillis(millis: 100);
482 u64 now = NanoTime();
483
484 // Flush memory if requested.
485 if (flags()->flush_memory_ms > 0) {
486 if (last_flush + flags()->flush_memory_ms * kMs2Ns < now) {
487 VReport(1, "ThreadSanitizer: periodic memory flush\n");
488 FlushShadowMemory();
489 now = last_flush = NanoTime();
490 }
491 }
492 if (flags()->memory_limit_mb > 0) {
493 uptr rss = GetRSS();
494 uptr limit = uptr(flags()->memory_limit_mb) << 20;
495 VReport(1,
496 "ThreadSanitizer: memory flush check"
497 " RSS=%llu LAST=%llu LIMIT=%llu\n",
498 (u64)rss >> 20, (u64)last_rss >> 20, (u64)limit >> 20);
499 if (2 * rss > limit + last_rss) {
500 VReport(1, "ThreadSanitizer: flushing memory due to RSS\n");
501 FlushShadowMemory();
502 rss = GetRSS();
503 now = NanoTime();
504 VReport(1, "ThreadSanitizer: memory flushed RSS=%llu\n",
505 (u64)rss >> 20);
506 }
507 last_rss = rss;
508 }
509
510 MemoryProfiler(uptime: now - start);
511
512 // Flush symbolizer cache if requested.
513 if (flags()->flush_symbolizer_ms > 0) {
514 u64 last = atomic_load(a: &ctx->last_symbolize_time_ns,
515 mo: memory_order_relaxed);
516 if (last != 0 && last + flags()->flush_symbolizer_ms * kMs2Ns < now) {
517 Lock l(&ctx->report_mtx);
518 ScopedErrorReportLock l2;
519 SymbolizeFlush();
520 atomic_store(a: &ctx->last_symbolize_time_ns, v: 0, mo: memory_order_relaxed);
521 }
522 }
523 }
524 return nullptr;
525}
526
527static void StartBackgroundThread() {
528 ctx->background_thread = internal_start_thread(func: &BackgroundThread, arg: 0);
529}
530
531#ifndef __mips__
532static void StopBackgroundThread() {
533 atomic_store(a: &ctx->stop_background_thread, v: 1, mo: memory_order_relaxed);
534 internal_join_thread(th: ctx->background_thread);
535 ctx->background_thread = 0;
536}
537#endif
538#endif
539
540void DontNeedShadowFor(uptr addr, uptr size) {
541 ReleaseMemoryPagesToOS(beg: reinterpret_cast<uptr>(MemToShadow(x: addr)),
542 end: reinterpret_cast<uptr>(MemToShadow(x: addr + size)));
543}
544
545#if !SANITIZER_GO
546// We call UnmapShadow before the actual munmap, at that point we don't yet
547// know if the provided address/size are sane. We can't call UnmapShadow
548// after the actual munmap becuase at that point the memory range can
549// already be reused for something else, so we can't rely on the munmap
550// return value to understand is the values are sane.
551// While calling munmap with insane values (non-canonical address, negative
552// size, etc) is an error, the kernel won't crash. We must also try to not
553// crash as the failure mode is very confusing (paging fault inside of the
554// runtime on some derived shadow address).
555static bool IsValidMmapRange(uptr addr, uptr size) {
556 if (size == 0)
557 return true;
558 if (static_cast<sptr>(size) < 0)
559 return false;
560 if (!IsAppMem(mem: addr) || !IsAppMem(mem: addr + size - 1))
561 return false;
562 // Check that if the start of the region belongs to one of app ranges,
563 // end of the region belongs to the same region.
564 const uptr ranges[][2] = {
565 {LoAppMemBeg(), LoAppMemEnd()},
566 {MidAppMemBeg(), MidAppMemEnd()},
567 {HiAppMemBeg(), HiAppMemEnd()},
568 };
569 for (auto range : ranges) {
570 if (addr >= range[0] && addr < range[1])
571 return addr + size <= range[1];
572 }
573 return false;
574}
575
576void UnmapShadow(ThreadState* thr, uptr addr, uptr size) {
577 if (size == 0 || !IsValidMmapRange(addr, size))
578 return;
579 // unmap shadow is related to semantic of mmap/munmap, so we
580 // should clear the whole shadow range, including the tail shadow
581 // while addr + size % kShadowCell != 0.
582 uptr rounded_size_shadow = RoundUp(p: addr + size, align: kShadowCell) - addr;
583 DontNeedShadowFor(addr, size: rounded_size_shadow);
584 ScopedGlobalProcessor sgp;
585 SlotLocker locker(thr, true);
586 uptr rounded_size_meta = RoundUp(p: addr + size, align: kMetaShadowCell) - addr;
587 ctx->metamap.ResetRange(proc: thr->proc(), p: addr, sz: rounded_size_meta, reset: true);
588}
589#endif
590
591void MapShadow(uptr addr, uptr size) {
592 // Although named MapShadow, this function's semantic is unrelated to
593 // UnmapShadow. This function currently only used for Go's lazy allocation
594 // of shadow, whose targets are program section (e.g., bss, data, etc.).
595 // Therefore, we can guarantee that the addr and size align to kShadowCell
596 // and kMetaShadowCell by the following assertions.
597 DCHECK_EQ(addr % kShadowCell, 0);
598 DCHECK_EQ(size % kShadowCell, 0);
599 DCHECK_EQ(addr % kMetaShadowCell, 0);
600 DCHECK_EQ(size % kMetaShadowCell, 0);
601
602 // Ensure thead registry lock held, so as to synchronize
603 // with DoReset, which also access the mapped_shadow_* ctxt fields.
604 ThreadRegistryLock lock0(&ctx->thread_registry);
605 static bool data_mapped = false;
606
607#if !SANITIZER_GO
608 // Global data is not 64K aligned, but there are no adjacent mappings,
609 // so we can get away with unaligned mapping.
610 // CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
611 const uptr kPageSize = GetPageSizeCached();
612 uptr shadow_begin = RoundDownTo(x: (uptr)MemToShadow(x: addr), boundary: kPageSize);
613 uptr shadow_end = RoundUpTo(size: (uptr)MemToShadow(x: addr + size), boundary: kPageSize);
614 if (!MmapFixedNoReserve(fixed_addr: shadow_begin, size: shadow_end - shadow_begin, name: "shadow"))
615 Die();
616#else
617 uptr shadow_begin = RoundDownTo((uptr)MemToShadow(addr), (64 << 10));
618 uptr shadow_end = RoundUpTo((uptr)MemToShadow(addr + size), (64 << 10));
619 VPrintf(2, "MapShadow for (0x%zx-0x%zx), begin/end: (0x%zx-0x%zx)\n",
620 addr, addr + size, shadow_begin, shadow_end);
621
622 if (!data_mapped) {
623 // First call maps data+bss.
624 if (!MmapFixedSuperNoReserve(shadow_begin, shadow_end - shadow_begin, "shadow"))
625 Die();
626 } else {
627 VPrintf(2, "ctx->mapped_shadow_{begin,end} = (0x%zx-0x%zx)\n",
628 ctx->mapped_shadow_begin, ctx->mapped_shadow_end);
629 // Second and subsequent calls map heap.
630 if (shadow_end <= ctx->mapped_shadow_end)
631 return;
632 if (!ctx->mapped_shadow_begin || ctx->mapped_shadow_begin > shadow_begin)
633 ctx->mapped_shadow_begin = shadow_begin;
634 if (shadow_begin < ctx->mapped_shadow_end)
635 shadow_begin = ctx->mapped_shadow_end;
636 VPrintf(2, "MapShadow begin/end = (0x%zx-0x%zx)\n",
637 shadow_begin, shadow_end);
638 if (!MmapFixedSuperNoReserve(shadow_begin, shadow_end - shadow_begin,
639 "shadow"))
640 Die();
641 ctx->mapped_shadow_end = shadow_end;
642 }
643#endif
644
645 // Meta shadow is 2:1, so tread carefully.
646 static uptr mapped_meta_end = 0;
647 uptr meta_begin = (uptr)MemToMeta(x: addr);
648 uptr meta_end = (uptr)MemToMeta(x: addr + size);
649 // Windows wants 64K alignment.
650 meta_begin = RoundDownTo(x: meta_begin, boundary: 64 << 10);
651 meta_end = RoundUpTo(size: meta_end, boundary: 64 << 10);
652 if (!data_mapped) {
653 // First call maps data+bss.
654 data_mapped = true;
655 if (!MmapFixedSuperNoReserve(fixed_addr: meta_begin, size: meta_end - meta_begin,
656 name: "meta shadow"))
657 Die();
658 } else {
659 // Mapping continuous heap.
660 CHECK_GT(meta_end, mapped_meta_end);
661 if (meta_begin < mapped_meta_end)
662 meta_begin = mapped_meta_end;
663 if (!MmapFixedSuperNoReserve(fixed_addr: meta_begin, size: meta_end - meta_begin,
664 name: "meta shadow"))
665 Die();
666 mapped_meta_end = meta_end;
667 }
668 VPrintf(2, "mapped meta shadow for (0x%zx-0x%zx) at (0x%zx-0x%zx)\n", addr,
669 addr + size, meta_begin, meta_end);
670}
671
672#if !SANITIZER_GO
673static void OnStackUnwind(const SignalContext &sig, const void *,
674 BufferedStackTrace *stack) {
675 stack->Unwind(pc: StackTrace::GetNextInstructionPc(pc: sig.pc), bp: sig.bp, context: sig.context,
676 request_fast: common_flags()->fast_unwind_on_fatal);
677}
678
679static void TsanOnDeadlySignal(int signo, void *siginfo, void *context) {
680 HandleDeadlySignal(siginfo, context, tid: GetTid(), unwind: &OnStackUnwind, unwind_context: nullptr);
681}
682#endif
683
684void CheckUnwind() {
685 // There is high probability that interceptors will check-fail as well,
686 // on the other hand there is no sense in processing interceptors
687 // since we are going to die soon.
688 ScopedIgnoreInterceptors ignore;
689#if !SANITIZER_GO
690 ThreadState* thr = cur_thread();
691 thr->nomalloc = false;
692 thr->ignore_sync++;
693 thr->ignore_reads_and_writes++;
694 atomic_store_relaxed(a: &thr->in_signal_handler, v: 0);
695#endif
696 PrintCurrentStack(pc: StackTrace::GetCurrentPc(),
697 fast: common_flags()->fast_unwind_on_fatal);
698}
699
700bool is_initialized;
701
702// Symbolization indirectly calls dl_iterate_phdr. If a CHECK() fails early on
703// (prior to the dl_iterate_phdr interceptor setup), resulting in an attempted
704// symbolization, it will segfault.
705// dl_iterate_phdr is not intercepted for Android.
706bool ready_to_symbolize = SANITIZER_ANDROID;
707
708void Initialize(ThreadState *thr) {
709 // Thread safe because done before all threads exist.
710 if (is_initialized)
711 return;
712 is_initialized = true;
713 // We are not ready to handle interceptors yet.
714 ScopedIgnoreInterceptors ignore;
715 SanitizerToolName = "ThreadSanitizer";
716 // Install tool-specific callbacks in sanitizer_common.
717 SetCheckUnwindCallback(CheckUnwind);
718
719 ctx = new(ctx_placeholder) Context;
720 const char *env_name = SANITIZER_GO ? "GORACE" : "TSAN_OPTIONS";
721 const char *options = GetEnv(name: env_name);
722 CacheBinaryName();
723 CheckASLR();
724 InitializeFlags(flags: &ctx->flags, env: options, env_option_name: env_name);
725 AvoidCVE_2016_2143();
726 __sanitizer::InitializePlatformEarly();
727 __tsan::InitializePlatformEarly();
728
729#if !SANITIZER_GO
730 InitializeAllocator();
731 ReplaceSystemMalloc();
732#endif
733 if (common_flags()->detect_deadlocks)
734 ctx->dd = DDetector::Create(flags: flags());
735 Processor *proc = ProcCreate();
736 ProcWire(proc, thr);
737 InitializeInterceptors();
738 InitializePlatform();
739 InitializeDynamicAnnotations();
740#if !SANITIZER_GO
741 InitializeShadowMemory();
742 InitializeAllocatorLate();
743 InstallDeadlySignalHandlers(handler: TsanOnDeadlySignal);
744#endif
745 // Setup correct file descriptor for error reports.
746 __sanitizer_set_report_path(path: common_flags()->log_path);
747 InitializeSuppressions();
748#if !SANITIZER_GO
749 InitializeLibIgnore();
750 Symbolizer::GetOrInit()->AddHooks(start_hook: EnterSymbolizer, end_hook: ExitSymbolizer);
751#endif
752
753 VPrintf(1, "***** Running under ThreadSanitizer v3 (pid %d) *****\n",
754 (int)internal_getpid());
755
756 // Initialize thread 0.
757 Tid tid = ThreadCreate(thr: nullptr, pc: 0, uid: 0, detached: true);
758 CHECK_EQ(tid, kMainTid);
759 ThreadStart(thr, tid, os_id: GetTid(), thread_type: ThreadType::Regular);
760#if TSAN_CONTAINS_UBSAN
761 __ubsan::InitAsPlugin();
762#endif
763
764#if !SANITIZER_GO
765 Symbolizer::LateInitialize();
766 if (InitializeMemoryProfiler() || flags()->force_background_thread)
767 MaybeSpawnBackgroundThread();
768#endif
769 ctx->initialized = true;
770
771 if (flags()->stop_on_start) {
772 Printf(format: "ThreadSanitizer is suspended at startup (pid %d)."
773 " Call __tsan_resume().\n",
774 (int)internal_getpid());
775 while (__tsan_resumed == 0) {}
776 }
777
778 OnInitialize();
779}
780
781void MaybeSpawnBackgroundThread() {
782 // On MIPS, TSan initialization is run before
783 // __pthread_initialize_minimal_internal() is finished, so we can not spawn
784 // new threads.
785#if !SANITIZER_GO && !defined(__mips__)
786 static atomic_uint32_t bg_thread = {};
787 if (atomic_load(a: &bg_thread, mo: memory_order_relaxed) == 0 &&
788 atomic_exchange(a: &bg_thread, v: 1, mo: memory_order_relaxed) == 0) {
789 StartBackgroundThread();
790 SetSandboxingCallback(StopBackgroundThread);
791 }
792#endif
793}
794
795int Finalize(ThreadState *thr) {
796 bool failed = false;
797
798#if !SANITIZER_GO
799 if (common_flags()->print_module_map == 1)
800 DumpProcessMap();
801#endif
802
803 if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1)
804 internal_usleep(useconds: u64(flags()->atexit_sleep_ms) * 1000);
805
806 {
807 // Wait for pending reports.
808 ScopedErrorReportLock lock;
809 }
810
811#if !SANITIZER_GO
812 if (Verbosity()) AllocatorPrintStats();
813#endif
814
815 ThreadFinalize(thr);
816
817 if (ctx->nreported) {
818 failed = true;
819#if !SANITIZER_GO
820 Printf(format: "ThreadSanitizer: reported %d warnings\n", ctx->nreported);
821#else
822 Printf("Found %d data race(s)\n", ctx->nreported);
823#endif
824 }
825
826 if (common_flags()->print_suppressions)
827 PrintMatchedSuppressions();
828
829 failed = OnFinalize(failed);
830
831 return failed ? common_flags()->exitcode : 0;
832}
833
834#if !SANITIZER_GO
835void ForkBefore(ThreadState* thr, uptr pc) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
836 VReport(2, "BeforeFork tid: %llu\n", GetTid());
837 GlobalProcessorLock();
838 // Detaching from the slot makes OnUserFree skip writing to the shadow.
839 // The slot will be locked so any attempts to use it will deadlock anyway.
840 SlotDetach(thr);
841 for (auto& slot : ctx->slots) slot.mtx.Lock();
842 ctx->thread_registry.Lock();
843 ctx->slot_mtx.Lock();
844 ScopedErrorReportLock::Lock();
845 AllocatorLockBeforeFork();
846 // Suppress all reports in the pthread_atfork callbacks.
847 // Reports will deadlock on the report_mtx.
848 // We could ignore sync operations as well,
849 // but so far it's unclear if it will do more good or harm.
850 // Unnecessarily ignoring things can lead to false positives later.
851 thr->suppress_reports++;
852 // On OS X, REAL(fork) can call intercepted functions (OSSpinLockLock), and
853 // we'll assert in CheckNoLocks() unless we ignore interceptors.
854 // On OS X libSystem_atfork_prepare/parent/child callbacks are called
855 // after/before our callbacks and they call free.
856 thr->ignore_interceptors++;
857 // Disables memory write in OnUserAlloc/Free.
858 thr->ignore_reads_and_writes++;
859
860# if SANITIZER_APPLE
861 __tsan_test_only_on_fork();
862# endif
863}
864
865static void ForkAfter(ThreadState* thr,
866 bool child) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
867 thr->suppress_reports--; // Enabled in ForkBefore.
868 thr->ignore_interceptors--;
869 thr->ignore_reads_and_writes--;
870 AllocatorUnlockAfterFork(child);
871 ScopedErrorReportLock::Unlock();
872 ctx->slot_mtx.Unlock();
873 ctx->thread_registry.Unlock();
874 for (auto& slot : ctx->slots) slot.mtx.Unlock();
875 SlotAttachAndLock(thr);
876 SlotUnlock(thr);
877 GlobalProcessorUnlock();
878 VReport(2, "AfterFork tid: %llu\n", GetTid());
879}
880
881void ForkParentAfter(ThreadState* thr, uptr pc) { ForkAfter(thr, child: false); }
882
883void ForkChildAfter(ThreadState* thr, uptr pc, bool start_thread) {
884 ForkAfter(thr, child: true);
885 u32 nthread = ctx->thread_registry.OnFork(tid: thr->tid);
886 VPrintf(1,
887 "ThreadSanitizer: forked new process with pid %d,"
888 " parent had %d threads\n",
889 (int)internal_getpid(), (int)nthread);
890 if (nthread == 1) {
891 if (start_thread)
892 StartBackgroundThread();
893 } else {
894 // We've just forked a multi-threaded process. We cannot reasonably function
895 // after that (some mutexes may be locked before fork). So just enable
896 // ignores for everything in the hope that we will exec soon.
897 ctx->after_multithreaded_fork = true;
898 thr->ignore_interceptors++;
899 thr->suppress_reports++;
900 ThreadIgnoreBegin(thr, pc);
901 ThreadIgnoreSyncBegin(thr, pc);
902 }
903
904# if SANITIZER_APPLE && !SANITIZER_GO
905 // This flag can have inheritance disabled - we are the child so act
906 // accordingly
907 if (flags()->lock_during_write == kNoLockDuringWritesCurrentProcess)
908 flags()->lock_during_write = kLockDuringAllWrites;
909# endif
910}
911#endif
912
913#if SANITIZER_GO
914NOINLINE
915void GrowShadowStack(ThreadState *thr) {
916 const int sz = thr->shadow_stack_end - thr->shadow_stack;
917 const int newsz = 2 * sz;
918 auto *newstack = (uptr *)Alloc(newsz * sizeof(uptr));
919 internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr));
920 Free(thr->shadow_stack);
921 thr->shadow_stack = newstack;
922 thr->shadow_stack_pos = newstack + sz;
923 thr->shadow_stack_end = newstack + newsz;
924}
925#endif
926
927StackID CurrentStackId(ThreadState *thr, uptr pc) {
928#if !SANITIZER_GO
929 if (!thr->is_inited) // May happen during bootstrap.
930 return kInvalidStackID;
931#endif
932 if (pc != 0) {
933#if !SANITIZER_GO
934 DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
935#else
936 if (thr->shadow_stack_pos == thr->shadow_stack_end)
937 GrowShadowStack(thr);
938#endif
939 thr->shadow_stack_pos[0] = pc;
940 thr->shadow_stack_pos++;
941 }
942 StackID id = StackDepotPut(
943 stack: StackTrace(thr->shadow_stack, thr->shadow_stack_pos - thr->shadow_stack));
944 if (pc != 0)
945 thr->shadow_stack_pos--;
946 return id;
947}
948
949static bool TraceSkipGap(ThreadState* thr) {
950 Trace *trace = &thr->tctx->trace;
951 Event *pos = reinterpret_cast<Event *>(atomic_load_relaxed(a: &thr->trace_pos));
952 DCHECK_EQ(reinterpret_cast<uptr>(pos + 1) & TracePart::kAlignment, 0);
953 auto *part = trace->parts.Back();
954 DPrintf("#%d: TraceSwitchPart enter trace=%p parts=%p-%p pos=%p\n", thr->tid,
955 trace, trace->parts.Front(), part, pos);
956 if (!part)
957 return false;
958 // We can get here when we still have space in the current trace part.
959 // The fast-path check in TraceAcquire has false positives in the middle of
960 // the part. Check if we are indeed at the end of the current part or not,
961 // and fill any gaps with NopEvent's.
962 Event* end = &part->events[TracePart::kSize];
963 DCHECK_GE(pos, &part->events[0]);
964 DCHECK_LE(pos, end);
965 if (pos + 1 < end) {
966 if ((reinterpret_cast<uptr>(pos) & TracePart::kAlignment) ==
967 TracePart::kAlignment)
968 *pos++ = NopEvent;
969 *pos++ = NopEvent;
970 DCHECK_LE(pos + 2, end);
971 atomic_store_relaxed(a: &thr->trace_pos, v: reinterpret_cast<uptr>(pos));
972 return true;
973 }
974 // We are indeed at the end.
975 for (; pos < end; pos++) *pos = NopEvent;
976 return false;
977}
978
979NOINLINE
980void TraceSwitchPart(ThreadState* thr) {
981 if (TraceSkipGap(thr))
982 return;
983#if !SANITIZER_GO
984 if (ctx->after_multithreaded_fork) {
985 // We just need to survive till exec.
986 TracePart* part = thr->tctx->trace.parts.Back();
987 if (part) {
988 atomic_store_relaxed(a: &thr->trace_pos,
989 v: reinterpret_cast<uptr>(&part->events[0]));
990 return;
991 }
992 }
993#endif
994 TraceSwitchPartImpl(thr);
995}
996
997void TraceSwitchPartImpl(ThreadState* thr) {
998 SlotLocker locker(thr, true);
999 Trace* trace = &thr->tctx->trace;
1000 TracePart* part = TracePartAlloc(thr);
1001 part->trace = trace;
1002 thr->trace_prev_pc = 0;
1003 TracePart* recycle = nullptr;
1004 // Keep roughly half of parts local to the thread
1005 // (not queued into the recycle queue).
1006 uptr local_parts = (Trace::kMinParts + flags()->history_size + 1) / 2;
1007 {
1008 Lock lock(&trace->mtx);
1009 if (trace->parts.Empty())
1010 trace->local_head = part;
1011 if (trace->parts.Size() >= local_parts) {
1012 recycle = trace->local_head;
1013 trace->local_head = trace->parts.Next(e: recycle);
1014 }
1015 trace->parts.PushBack(e: part);
1016 atomic_store_relaxed(a: &thr->trace_pos,
1017 v: reinterpret_cast<uptr>(&part->events[0]));
1018 }
1019 // Make this part self-sufficient by restoring the current stack
1020 // and mutex set in the beginning of the trace.
1021 TraceTime(thr);
1022 {
1023 // Pathologically large stacks may not fit into the part.
1024 // In these cases we log only fixed number of top frames.
1025 const uptr kMaxFrames = 1000;
1026 // Check that kMaxFrames won't consume the whole part.
1027 static_assert(kMaxFrames < TracePart::kSize / 2, "kMaxFrames is too big");
1028 uptr* pos = Max(a: &thr->shadow_stack[0], b: thr->shadow_stack_pos - kMaxFrames);
1029 for (; pos < thr->shadow_stack_pos; pos++) {
1030 if (TryTraceFunc(thr, pc: *pos))
1031 continue;
1032 CHECK(TraceSkipGap(thr));
1033 CHECK(TryTraceFunc(thr, *pos));
1034 }
1035 }
1036 for (uptr i = 0; i < thr->mset.Size(); i++) {
1037 MutexSet::Desc d = thr->mset.Get(i);
1038 for (uptr i = 0; i < d.count; i++)
1039 TraceMutexLock(thr, type: d.write ? EventType::kLock : EventType::kRLock, pc: 0,
1040 addr: d.addr, stk: d.stack_id);
1041 }
1042 // Callers of TraceSwitchPart expect that TraceAcquire will always succeed
1043 // after the call. It's possible that TryTraceFunc/TraceMutexLock above
1044 // filled the trace part exactly up to the TracePart::kAlignment gap
1045 // and the next TraceAcquire won't succeed. Skip the gap to avoid that.
1046 EventFunc *ev;
1047 if (!TraceAcquire(thr, ev: &ev)) {
1048 CHECK(TraceSkipGap(thr));
1049 CHECK(TraceAcquire(thr, &ev));
1050 }
1051 {
1052 Lock lock(&ctx->slot_mtx);
1053 // There is a small chance that the slot may be not queued at this point.
1054 // This can happen if the slot has kEpochLast epoch and another thread
1055 // in FindSlotAndLock discovered that it's exhausted and removed it from
1056 // the slot queue. kEpochLast can happen in 2 cases: (1) if TraceSwitchPart
1057 // was called with the slot locked and epoch already at kEpochLast,
1058 // or (2) if we've acquired a new slot in SlotLock in the beginning
1059 // of the function and the slot was at kEpochLast - 1, so after increment
1060 // in SlotAttachAndLock it become kEpochLast.
1061 if (ctx->slot_queue.Queued(e: thr->slot)) {
1062 ctx->slot_queue.Remove(e: thr->slot);
1063 ctx->slot_queue.PushBack(e: thr->slot);
1064 }
1065 if (recycle)
1066 ctx->trace_part_recycle.PushBack(e: recycle);
1067 }
1068 DPrintf("#%d: TraceSwitchPart exit parts=%p-%p pos=0x%zx\n", thr->tid,
1069 trace->parts.Front(), trace->parts.Back(),
1070 atomic_load_relaxed(&thr->trace_pos));
1071}
1072
1073void ThreadIgnoreBegin(ThreadState* thr, uptr pc) {
1074 DPrintf("#%d: ThreadIgnoreBegin\n", thr->tid);
1075 thr->ignore_reads_and_writes++;
1076 CHECK_GT(thr->ignore_reads_and_writes, 0);
1077 thr->fast_state.SetIgnoreBit();
1078#if !SANITIZER_GO
1079 if (pc && !ctx->after_multithreaded_fork)
1080 thr->mop_ignore_set.Add(stack_id: CurrentStackId(thr, pc));
1081#endif
1082}
1083
1084void ThreadIgnoreEnd(ThreadState *thr) {
1085 DPrintf("#%d: ThreadIgnoreEnd\n", thr->tid);
1086 CHECK_GT(thr->ignore_reads_and_writes, 0);
1087 thr->ignore_reads_and_writes--;
1088 if (thr->ignore_reads_and_writes == 0) {
1089 thr->fast_state.ClearIgnoreBit();
1090#if !SANITIZER_GO
1091 thr->mop_ignore_set.Reset();
1092#endif
1093 }
1094}
1095
1096#if !SANITIZER_GO
1097extern "C" SANITIZER_INTERFACE_ATTRIBUTE
1098uptr __tsan_testonly_shadow_stack_current_size() {
1099 ThreadState *thr = cur_thread();
1100 return thr->shadow_stack_pos - thr->shadow_stack;
1101}
1102#endif
1103
1104void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc) {
1105 DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr->tid);
1106 thr->ignore_sync++;
1107 CHECK_GT(thr->ignore_sync, 0);
1108#if !SANITIZER_GO
1109 if (pc && !ctx->after_multithreaded_fork)
1110 thr->sync_ignore_set.Add(stack_id: CurrentStackId(thr, pc));
1111#endif
1112}
1113
1114void ThreadIgnoreSyncEnd(ThreadState *thr) {
1115 DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr->tid);
1116 CHECK_GT(thr->ignore_sync, 0);
1117 thr->ignore_sync--;
1118#if !SANITIZER_GO
1119 if (thr->ignore_sync == 0)
1120 thr->sync_ignore_set.Reset();
1121#endif
1122}
1123
1124bool MD5Hash::operator==(const MD5Hash &other) const {
1125 return hash[0] == other.hash[0] && hash[1] == other.hash[1];
1126}
1127
1128#if SANITIZER_DEBUG
1129void build_consistency_debug() {}
1130#else
1131void build_consistency_release() {}
1132#endif
1133} // namespace __tsan
1134
1135#if SANITIZER_CHECK_DEADLOCKS
1136namespace __sanitizer {
1137using namespace __tsan;
1138MutexMeta mutex_meta[] = {
1139 {MutexInvalid, "Invalid", {}},
1140 {MutexThreadRegistry,
1141 "ThreadRegistry",
1142 {MutexTypeSlots, MutexTypeTrace, MutexTypeReport}},
1143 {MutexTypeReport, "Report", {MutexTypeTrace}},
1144 {MutexTypeSyncVar, "SyncVar", {MutexTypeReport, MutexTypeTrace}},
1145 {MutexTypeAnnotations, "Annotations", {}},
1146 {MutexTypeAtExit, "AtExit", {}},
1147 {MutexTypeFired, "Fired", {MutexLeaf}},
1148 {MutexTypeRacy, "Racy", {MutexLeaf}},
1149 {MutexTypeGlobalProc, "GlobalProc", {MutexTypeSlot, MutexTypeSlots}},
1150 {MutexTypeInternalAlloc, "InternalAlloc", {MutexLeaf}},
1151 {MutexTypeTrace, "Trace", {}},
1152 {MutexTypeSlot,
1153 "Slot",
1154 {MutexMulti, MutexTypeTrace, MutexTypeSyncVar, MutexThreadRegistry,
1155 MutexTypeSlots}},
1156 {MutexTypeSlots, "Slots", {MutexTypeTrace, MutexTypeReport}},
1157 {},
1158};
1159
1160void PrintMutexPC(uptr pc) { StackTrace(&pc, 1).Print(); }
1161
1162} // namespace __sanitizer
1163#endif
1164