1//===-- tsan_rtl.h ----------------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of ThreadSanitizer (TSan), a race detector.
10//
11// Main internal TSan header file.
12//
13// Ground rules:
14// - C++ run-time should not be used (static CTORs, RTTI, exceptions, static
15// function-scope locals)
16// - All functions/classes/etc reside in namespace __tsan, except for those
17// declared in tsan_interface.h.
18// - Platform-specific files should be used instead of ifdefs (*).
19// - No system headers included in header files (*).
20// - Platform specific headres included only into platform-specific files (*).
21//
22// (*) Except when inlining is critical for performance.
23//===----------------------------------------------------------------------===//
24
25#ifndef TSAN_RTL_H
26#define TSAN_RTL_H
27
28#include "sanitizer_common/sanitizer_allocator.h"
29#include "sanitizer_common/sanitizer_allocator_internal.h"
30#include "sanitizer_common/sanitizer_asm.h"
31#include "sanitizer_common/sanitizer_common.h"
32#include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
33#include "sanitizer_common/sanitizer_libignore.h"
34#include "sanitizer_common/sanitizer_suppressions.h"
35#include "sanitizer_common/sanitizer_thread_registry.h"
36#include "sanitizer_common/sanitizer_vector.h"
37#include "tsan_defs.h"
38#include "tsan_flags.h"
39#include "tsan_ignoreset.h"
40#include "tsan_ilist.h"
41#include "tsan_mman.h"
42#include "tsan_mutexset.h"
43#include "tsan_platform.h"
44#include "tsan_report.h"
45#include "tsan_shadow.h"
46#include "tsan_stack_trace.h"
47#include "tsan_sync.h"
48#include "tsan_trace.h"
49#include "tsan_vector_clock.h"
50
51#if SANITIZER_WORDSIZE != 64
52# error "ThreadSanitizer is supported only on 64-bit platforms"
53#endif
54
55namespace __tsan {
56
57extern bool ready_to_symbolize;
58
59#if !SANITIZER_GO
60struct MapUnmapCallback;
61# if defined(__mips64) || defined(__aarch64__) || defined(__loongarch__) || \
62 defined(__powerpc__) || SANITIZER_RISCV64
63
64struct AP32 {
65 static const uptr kSpaceBeg = SANITIZER_MMAP_BEGIN;
66 static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
67 static const uptr kMetadataSize = 0;
68 typedef __sanitizer::CompactSizeClassMap SizeClassMap;
69 static const uptr kRegionSizeLog = 20;
70 using AddressSpaceView = LocalAddressSpaceView;
71 typedef __tsan::MapUnmapCallback MapUnmapCallback;
72 static const uptr kFlags = 0;
73};
74typedef SizeClassAllocator32<AP32> PrimaryAllocator;
75#else
76struct AP64 { // Allocator64 parameters. Deliberately using a short name.
77# if defined(__s390x__)
78 typedef MappingS390x Mapping;
79# else
80 typedef Mapping48AddressSpace Mapping;
81# endif
82 static const uptr kSpaceBeg = Mapping::kHeapMemBeg;
83 static const uptr kSpaceSize = Mapping::kHeapMemEnd - Mapping::kHeapMemBeg;
84 static const uptr kMetadataSize = 0;
85 typedef DefaultSizeClassMap SizeClassMap;
86 typedef __tsan::MapUnmapCallback MapUnmapCallback;
87 static const uptr kFlags = 0;
88 using AddressSpaceView = LocalAddressSpaceView;
89};
90typedef SizeClassAllocator64<AP64> PrimaryAllocator;
91#endif
92typedef CombinedAllocator<PrimaryAllocator> Allocator;
93typedef Allocator::AllocatorCache AllocatorCache;
94Allocator *allocator();
95#endif
96
97struct ThreadSignalContext;
98
99struct JmpBuf {
100 uptr sp;
101 int int_signal_send;
102 bool in_blocking_func;
103 uptr oldset_stack_size;
104 uptr in_signal_handler;
105 uptr *shadow_stack_pos;
106};
107
108// A Processor represents a physical thread, or a P for Go.
109// It is used to store internal resources like allocate cache, and does not
110// participate in race-detection logic (invisible to end user).
111// In C++ it is tied to an OS thread just like ThreadState, however ideally
112// it should be tied to a CPU (this way we will have fewer allocator caches).
113// In Go it is tied to a P, so there are significantly fewer Processor's than
114// ThreadState's (which are tied to Gs).
115// A ThreadState must be wired with a Processor to handle events.
116struct Processor {
117 ThreadState *thr; // currently wired thread, or nullptr
118#if !SANITIZER_GO
119 AllocatorCache alloc_cache;
120 InternalAllocatorCache internal_alloc_cache;
121#endif
122 DenseSlabAllocCache block_cache;
123 DenseSlabAllocCache sync_cache;
124 DDPhysicalThread *dd_pt;
125};
126
127#if !SANITIZER_GO
128// ScopedGlobalProcessor temporary setups a global processor for the current
129// thread, if it does not have one. Intended for interceptors that can run
130// at the very thread end, when we already destroyed the thread processor.
131struct ScopedGlobalProcessor {
132 ScopedGlobalProcessor();
133 ~ScopedGlobalProcessor();
134};
135#endif
136
137struct TidEpoch {
138 Tid tid;
139 Epoch epoch;
140};
141
142struct alignas(SANITIZER_CACHE_LINE_SIZE) TidSlot {
143 Mutex mtx;
144 Sid sid;
145 atomic_uint32_t raw_epoch;
146 ThreadState *thr;
147 Vector<TidEpoch> journal;
148 INode node;
149
150 Epoch epoch() const {
151 return static_cast<Epoch>(atomic_load(a: &raw_epoch, mo: memory_order_relaxed));
152 }
153
154 void SetEpoch(Epoch v) {
155 atomic_store(a: &raw_epoch, v: static_cast<u32>(v), mo: memory_order_relaxed);
156 }
157
158 TidSlot();
159};
160
161// This struct is stored in TLS.
162struct alignas(SANITIZER_CACHE_LINE_SIZE) ThreadState {
163 FastState fast_state;
164 int ignore_sync;
165#if !SANITIZER_GO
166 int ignore_interceptors;
167#endif
168 uptr *shadow_stack_pos;
169
170 // Current position in tctx->trace.Back()->events (Event*).
171 atomic_uintptr_t trace_pos;
172 // PC of the last memory access, used to compute PC deltas in the trace.
173 uptr trace_prev_pc;
174
175 // Technically `current` should be a separate THREADLOCAL variable;
176 // but it is placed here in order to share cache line with previous fields.
177 ThreadState* current;
178
179 atomic_sint32_t pending_signals;
180
181 VectorClock clock;
182
183 // This is a slow path flag. On fast path, fast_state.GetIgnoreBit() is read.
184 // We do not distinguish beteween ignoring reads and writes
185 // for better performance.
186 int ignore_reads_and_writes;
187 int suppress_reports;
188 // Go does not support ignores.
189#if !SANITIZER_GO
190 IgnoreSet mop_ignore_set;
191 IgnoreSet sync_ignore_set;
192#endif
193 uptr *shadow_stack;
194 uptr *shadow_stack_end;
195#if !SANITIZER_GO
196 Vector<JmpBuf> jmp_bufs;
197 int in_symbolizer;
198 atomic_uintptr_t in_blocking_func;
199 bool in_ignored_lib;
200 bool is_inited;
201#endif
202 MutexSet mset;
203 bool is_dead;
204 const Tid tid;
205 uptr stk_addr;
206 uptr stk_size;
207 uptr tls_addr;
208 uptr tls_size;
209 ThreadContext *tctx;
210
211 DDLogicalThread *dd_lt;
212
213 TidSlot *slot;
214 uptr slot_epoch;
215 bool slot_locked;
216
217 // Current wired Processor, or nullptr. Required to handle any events.
218 Processor *proc1;
219#if !SANITIZER_GO
220 Processor *proc() { return proc1; }
221#else
222 Processor *proc();
223#endif
224
225 atomic_uintptr_t in_signal_handler;
226 atomic_uintptr_t signal_ctx;
227
228#if !SANITIZER_GO
229 StackID last_sleep_stack_id;
230 VectorClock last_sleep_clock;
231#endif
232
233 // Set in regions of runtime that must be signal-safe and fork-safe.
234 // If set, malloc must not be called.
235 int nomalloc;
236
237 const ReportDesc *current_report;
238
239#if SANITIZER_APPLE && !SANITIZER_GO
240 bool in_internal_write_call;
241#endif
242
243 explicit ThreadState(Tid tid);
244};
245
246#if !SANITIZER_GO
247#if SANITIZER_APPLE || SANITIZER_ANDROID
248ThreadState *cur_thread();
249void set_cur_thread(ThreadState *thr);
250void cur_thread_finalize();
251inline ThreadState *cur_thread_init() { return cur_thread(); }
252# else
253__attribute__((tls_model("initial-exec")))
254extern THREADLOCAL char cur_thread_placeholder[];
255inline ThreadState *cur_thread() {
256 return reinterpret_cast<ThreadState *>(cur_thread_placeholder)->current;
257}
258inline ThreadState *cur_thread_init() {
259 ThreadState *thr = reinterpret_cast<ThreadState *>(cur_thread_placeholder);
260 if (UNLIKELY(!thr->current))
261 thr->current = thr;
262 return thr->current;
263}
264inline void set_cur_thread(ThreadState *thr) {
265 reinterpret_cast<ThreadState *>(cur_thread_placeholder)->current = thr;
266}
267inline void cur_thread_finalize() { }
268# endif // SANITIZER_APPLE || SANITIZER_ANDROID
269#endif // SANITIZER_GO
270
271class ThreadContext final : public ThreadContextBase {
272 public:
273 explicit ThreadContext(Tid tid);
274 ~ThreadContext();
275 ThreadState *thr;
276 StackID creation_stack_id;
277 VectorClock *sync;
278 uptr sync_epoch;
279 Trace trace;
280
281 // Override superclass callbacks.
282 void OnDead() override;
283 void OnJoined(void *arg) override;
284 void OnFinished() override;
285 void OnStarted(void *arg) override;
286 void OnCreated(void *arg) override;
287 void OnReset() override;
288 void OnDetached(void *arg) override;
289};
290
291struct RacyStacks {
292 MD5Hash hash[2];
293 bool operator==(const RacyStacks &other) const;
294};
295
296struct RacyAddress {
297 uptr addr_min;
298 uptr addr_max;
299};
300
301struct FiredSuppression {
302 ReportType type;
303 uptr pc_or_addr;
304 Suppression *supp;
305};
306
307struct Context {
308 Context();
309
310 bool initialized;
311#if !SANITIZER_GO
312 bool after_multithreaded_fork;
313#endif
314
315 MetaMap metamap;
316
317 Mutex report_mtx;
318 int nreported;
319 atomic_uint64_t last_symbolize_time_ns;
320
321 void *background_thread;
322 atomic_uint32_t stop_background_thread;
323
324 ThreadRegistry thread_registry;
325
326 // This is used to prevent a very unlikely but very pathological behavior.
327 // Since memory access handling is not synchronized with DoReset,
328 // a thread running concurrently with DoReset can leave a bogus shadow value
329 // that will be later falsely detected as a race. For such false races
330 // RestoreStack will return false and we will not report it.
331 // However, consider that a thread leaves a whole lot of such bogus values
332 // and these values are later read by a whole lot of threads.
333 // This will cause massive amounts of ReportRace calls and lots of
334 // serialization. In very pathological cases the resulting slowdown
335 // can be >100x. This is very unlikely, but it was presumably observed
336 // in practice: https://github.com/google/sanitizers/issues/1552
337 // If this happens, previous access sid+epoch will be the same for all of
338 // these false races b/c if the thread will try to increment epoch, it will
339 // notice that DoReset has happened and will stop producing bogus shadow
340 // values. So, last_spurious_race is used to remember the last sid+epoch
341 // for which RestoreStack returned false. Then it is used to filter out
342 // races with the same sid+epoch very early and quickly.
343 // It is of course possible that multiple threads left multiple bogus shadow
344 // values and all of them are read by lots of threads at the same time.
345 // In such case last_spurious_race will only be able to deduplicate a few
346 // races from one thread, then few from another and so on. An alternative
347 // would be to hold an array of such sid+epoch, but we consider such scenario
348 // as even less likely.
349 // Note: this can lead to some rare false negatives as well:
350 // 1. When a legit access with the same sid+epoch participates in a race
351 // as the "previous" memory access, it will be wrongly filtered out.
352 // 2. When RestoreStack returns false for a legit memory access because it
353 // was already evicted from the thread trace, we will still remember it in
354 // last_spurious_race. Then if there is another racing memory access from
355 // the same thread that happened in the same epoch, but was stored in the
356 // next thread trace part (which is still preserved in the thread trace),
357 // we will also wrongly filter it out while RestoreStack would actually
358 // succeed for that second memory access.
359 RawShadow last_spurious_race;
360
361 Mutex racy_mtx;
362 Vector<RacyStacks> racy_stacks;
363 // Number of fired suppressions may be large enough.
364 Mutex fired_suppressions_mtx;
365 InternalMmapVector<FiredSuppression> fired_suppressions;
366 DDetector *dd;
367
368 Flags flags;
369 fd_t memprof_fd;
370
371 // The last slot index (kFreeSid) is used to denote freed memory.
372 TidSlot slots[kThreadSlotCount - 1];
373
374 // Protects global_epoch, slot_queue, trace_part_recycle.
375 Mutex slot_mtx;
376 uptr global_epoch; // guarded by slot_mtx and by all slot mutexes
377 bool resetting; // global reset is in progress
378 IList<TidSlot, &TidSlot::node> slot_queue SANITIZER_GUARDED_BY(slot_mtx);
379 IList<TraceHeader, &TraceHeader::global, TracePart> trace_part_recycle
380 SANITIZER_GUARDED_BY(slot_mtx);
381 uptr trace_part_total_allocated SANITIZER_GUARDED_BY(slot_mtx);
382 uptr trace_part_recycle_finished SANITIZER_GUARDED_BY(slot_mtx);
383 uptr trace_part_finished_excess SANITIZER_GUARDED_BY(slot_mtx);
384#if SANITIZER_GO
385 uptr mapped_shadow_begin;
386 uptr mapped_shadow_end;
387#endif
388};
389
390extern Context *ctx; // The one and the only global runtime context.
391
392ALWAYS_INLINE Flags *flags() {
393 return &ctx->flags;
394}
395
396struct ScopedIgnoreInterceptors {
397 ScopedIgnoreInterceptors() {
398#if !SANITIZER_GO
399 cur_thread()->ignore_interceptors++;
400#endif
401 }
402
403 ~ScopedIgnoreInterceptors() {
404#if !SANITIZER_GO
405 cur_thread()->ignore_interceptors--;
406#endif
407 }
408};
409
410const char *GetObjectTypeFromTag(uptr tag);
411const char *GetReportHeaderFromTag(uptr tag);
412uptr TagFromShadowStackFrame(uptr pc);
413
414class ScopedReportBase {
415 public:
416 void AddMemoryAccess(uptr addr, uptr external_tag, Shadow s, Tid tid,
417 StackTrace stack, const MutexSet *mset);
418 void AddStack(StackTrace stack, bool suppressable = false);
419 void AddThread(const ThreadContext *tctx, bool suppressable = false);
420 void AddThread(Tid tid, bool suppressable = false);
421 void AddUniqueTid(Tid unique_tid);
422 int AddMutex(uptr addr, StackID creation_stack_id);
423 void AddLocation(uptr addr, uptr size);
424 void AddSleep(StackID stack_id);
425 void SetCount(int count);
426 void SetSigNum(int sig);
427 void SymbolizeStackElems(void);
428
429 const ReportDesc *GetReport() const;
430
431 protected:
432 ScopedReportBase(ReportType typ, uptr tag);
433 ~ScopedReportBase();
434
435 private:
436 ReportDesc *rep_;
437 // Symbolizer makes lots of intercepted calls. If we try to process them,
438 // at best it will cause deadlocks on internal mutexes.
439 ScopedIgnoreInterceptors ignore_interceptors_;
440
441 ScopedReportBase(const ScopedReportBase &) = delete;
442 void operator=(const ScopedReportBase &) = delete;
443};
444
445class ScopedReport : public ScopedReportBase {
446 public:
447 explicit ScopedReport(ReportType typ, uptr tag = kExternalTagNone);
448 ~ScopedReport();
449
450 private:
451 ScopedErrorReportLock lock_;
452};
453
454bool ShouldReport(ThreadState *thr, ReportType typ);
455ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack);
456
457// The stack could look like:
458// <start> | <main> | <foo> | tag | <bar>
459// This will extract the tag and keep:
460// <start> | <main> | <foo> | <bar>
461template<typename StackTraceTy>
462void ExtractTagFromStack(StackTraceTy *stack, uptr *tag = nullptr) {
463 if (stack->size < 2) return;
464 uptr possible_tag_pc = stack->trace[stack->size - 2];
465 uptr possible_tag = TagFromShadowStackFrame(pc: possible_tag_pc);
466 if (possible_tag == kExternalTagNone) return;
467 stack->trace_buffer[stack->size - 2] = stack->trace_buffer[stack->size - 1];
468 stack->size -= 1;
469 if (tag) *tag = possible_tag;
470}
471
472template<typename StackTraceTy>
473void ObtainCurrentStack(ThreadState *thr, uptr toppc, StackTraceTy *stack,
474 uptr *tag = nullptr) {
475 uptr size = thr->shadow_stack_pos - thr->shadow_stack;
476 uptr start = 0;
477 if (size + !!toppc > kStackTraceMax) {
478 start = size + !!toppc - kStackTraceMax;
479 size = kStackTraceMax - !!toppc;
480 }
481 stack->Init(&thr->shadow_stack[start], size, toppc);
482 ExtractTagFromStack(stack, tag);
483}
484
485#define GET_STACK_TRACE_FATAL(thr, pc) \
486 VarSizeStackTrace stack; \
487 ObtainCurrentStack(thr, pc, &stack); \
488 stack.ReverseOrder();
489
490void MapShadow(uptr addr, uptr size);
491void MapThreadTrace(uptr addr, uptr size, const char *name);
492void DontNeedShadowFor(uptr addr, uptr size);
493void UnmapShadow(ThreadState *thr, uptr addr, uptr size);
494void InitializeShadowMemory();
495void DontDumpShadow(uptr addr, uptr size);
496void InitializeInterceptors();
497void InitializeLibIgnore();
498void InitializeDynamicAnnotations();
499
500void ForkBefore(ThreadState *thr, uptr pc);
501void ForkParentAfter(ThreadState *thr, uptr pc);
502void ForkChildAfter(ThreadState *thr, uptr pc, bool start_thread);
503
504void ReportRace(ThreadState *thr, RawShadow *shadow_mem, Shadow cur, Shadow old,
505 AccessType typ);
506bool OutputReport(ThreadState *thr, ScopedReport &srep);
507bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace);
508bool IsExpectedReport(uptr addr, uptr size);
509
510#if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 1
511# define DPrintf Printf
512#else
513# define DPrintf(...)
514#endif
515
516#if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 2
517# define DPrintf2 Printf
518#else
519# define DPrintf2(...)
520#endif
521
522StackID CurrentStackId(ThreadState *thr, uptr pc);
523ReportStack *SymbolizeStackId(StackID stack_id);
524void PrintCurrentStack(ThreadState *thr, uptr pc);
525void PrintCurrentStack(uptr pc, bool fast); // may uses libunwind
526MBlock *JavaHeapBlock(uptr addr, uptr *start);
527
528void Initialize(ThreadState *thr);
529void MaybeSpawnBackgroundThread();
530int Finalize(ThreadState *thr);
531
532void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write);
533void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write);
534
535void MemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size,
536 AccessType typ);
537void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size,
538 AccessType typ);
539// This creates 2 non-inlined specialized versions of MemoryAccessRange.
540template <bool is_read>
541void MemoryAccessRangeT(ThreadState *thr, uptr pc, uptr addr, uptr size);
542
543ALWAYS_INLINE
544void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size,
545 bool is_write) {
546 if (size == 0)
547 return;
548 if (is_write)
549 MemoryAccessRangeT<false>(thr, pc, addr, size);
550 else
551 MemoryAccessRangeT<true>(thr, pc, addr, size);
552}
553
554void ShadowSet(RawShadow *p, RawShadow *end, RawShadow v);
555void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size);
556void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size);
557void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size);
558void MemoryRangeImitateWriteOrResetRange(ThreadState *thr, uptr pc, uptr addr,
559 uptr size);
560
561void ThreadIgnoreBegin(ThreadState *thr, uptr pc);
562void ThreadIgnoreEnd(ThreadState *thr);
563void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc);
564void ThreadIgnoreSyncEnd(ThreadState *thr);
565
566Tid ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached);
567void ThreadStart(ThreadState *thr, Tid tid, ThreadID os_id,
568 ThreadType thread_type);
569void ThreadFinish(ThreadState *thr);
570Tid ThreadConsumeTid(ThreadState *thr, uptr pc, uptr uid);
571void ThreadJoin(ThreadState *thr, uptr pc, Tid tid);
572void ThreadDetach(ThreadState *thr, uptr pc, Tid tid);
573void ThreadFinalize(ThreadState *thr);
574void ThreadSetName(ThreadState *thr, const char *name);
575int ThreadCount(ThreadState *thr);
576void ProcessPendingSignalsImpl(ThreadState *thr);
577void ThreadNotJoined(ThreadState *thr, uptr pc, Tid tid, uptr uid);
578
579Processor *ProcCreate();
580void ProcDestroy(Processor *proc);
581void ProcWire(Processor *proc, ThreadState *thr);
582void ProcUnwire(Processor *proc, ThreadState *thr);
583
584// Note: the parameter is called flagz, because flags is already taken
585// by the global function that returns flags.
586void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
587void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
588void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
589void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0,
590 int rec = 1);
591int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
592void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
593void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
594void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr);
595void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr);
596void MutexRepair(ThreadState *thr, uptr pc, uptr addr); // call on EOWNERDEAD
597void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr);
598
599void Acquire(ThreadState *thr, uptr pc, uptr addr);
600// AcquireGlobal synchronizes the current thread with all other threads.
601// In terms of happens-before relation, it draws a HB edge from all threads
602// (where they happen to execute right now) to the current thread. We use it to
603// handle Go finalizers. Namely, finalizer goroutine executes AcquireGlobal
604// right before executing finalizers. This provides a coarse, but simple
605// approximation of the actual required synchronization.
606void AcquireGlobal(ThreadState *thr);
607void Release(ThreadState *thr, uptr pc, uptr addr);
608void ReleaseStoreAcquire(ThreadState *thr, uptr pc, uptr addr);
609void ReleaseStore(ThreadState *thr, uptr pc, uptr addr);
610void AfterSleep(ThreadState *thr, uptr pc);
611void IncrementEpoch(ThreadState *thr);
612
613#if !SANITIZER_GO
614uptr ALWAYS_INLINE HeapEnd() {
615 return HeapMemEnd() + PrimaryAllocator::AdditionalSize();
616}
617#endif
618
619void SlotAttachAndLock(ThreadState *thr) SANITIZER_ACQUIRE(thr->slot->mtx);
620void SlotDetach(ThreadState *thr);
621void SlotLock(ThreadState *thr) SANITIZER_ACQUIRE(thr->slot->mtx);
622void SlotUnlock(ThreadState *thr) SANITIZER_RELEASE(thr->slot->mtx);
623void DoReset(ThreadState *thr, uptr epoch);
624void FlushShadowMemory();
625
626ThreadState *FiberCreate(ThreadState *thr, uptr pc, unsigned flags);
627void FiberDestroy(ThreadState *thr, uptr pc, ThreadState *fiber);
628void FiberSwitch(ThreadState *thr, uptr pc, ThreadState *fiber, unsigned flags);
629
630// These need to match __tsan_switch_to_fiber_* flags defined in
631// tsan_interface.h. See documentation there as well.
632enum FiberSwitchFlags {
633 FiberSwitchFlagNoSync = 1 << 0, // __tsan_switch_to_fiber_no_sync
634};
635
636class SlotLocker {
637 public:
638 ALWAYS_INLINE
639 SlotLocker(ThreadState *thr, bool recursive = false)
640 : thr_(thr), locked_(recursive ? thr->slot_locked : false) {
641#if !SANITIZER_GO
642 // We are in trouble if we are here with in_blocking_func set.
643 // If in_blocking_func is set, all signals will be delivered synchronously,
644 // which means we can't lock slots since the signal handler will try
645 // to lock it recursively and deadlock.
646 DCHECK(!atomic_load(&thr->in_blocking_func, memory_order_relaxed));
647#endif
648 if (!locked_)
649 SlotLock(thr: thr_);
650 }
651
652 ALWAYS_INLINE
653 ~SlotLocker() {
654 if (!locked_)
655 SlotUnlock(thr: thr_);
656 }
657
658 private:
659 ThreadState *thr_;
660 bool locked_;
661};
662
663class SlotUnlocker {
664 public:
665 SlotUnlocker(ThreadState *thr) : thr_(thr), locked_(thr->slot_locked) {
666 if (locked_)
667 SlotUnlock(thr: thr_);
668 }
669
670 ~SlotUnlocker() {
671 if (locked_)
672 SlotLock(thr: thr_);
673 }
674
675 private:
676 ThreadState *thr_;
677 bool locked_;
678};
679
680ALWAYS_INLINE void ProcessPendingSignals(ThreadState *thr) {
681 if (UNLIKELY(atomic_load_relaxed(&thr->pending_signals)))
682 ProcessPendingSignalsImpl(thr);
683}
684
685extern bool is_initialized;
686
687ALWAYS_INLINE
688void LazyInitialize(ThreadState *thr) {
689 // If we can use .preinit_array, assume that __tsan_init
690 // called from .preinit_array initializes runtime before
691 // any instrumented code except when tsan is used as a
692 // shared library.
693#if (!SANITIZER_CAN_USE_PREINIT_ARRAY || defined(SANITIZER_SHARED))
694 if (UNLIKELY(!is_initialized))
695 Initialize(thr);
696#endif
697}
698
699void TraceResetForTesting();
700void TraceSwitchPart(ThreadState *thr);
701void TraceSwitchPartImpl(ThreadState *thr);
702bool RestoreStack(EventType type, Sid sid, Epoch epoch, uptr addr, uptr size,
703 AccessType typ, Tid *ptid, VarSizeStackTrace *pstk,
704 MutexSet *pmset, uptr *ptag);
705
706template <typename EventT>
707ALWAYS_INLINE WARN_UNUSED_RESULT bool TraceAcquire(ThreadState *thr,
708 EventT **ev) {
709 // TraceSwitchPart accesses shadow_stack, but it's called infrequently,
710 // so we check it here proactively.
711 DCHECK(thr->shadow_stack);
712 Event *pos = reinterpret_cast<Event *>(atomic_load_relaxed(a: &thr->trace_pos));
713#if SANITIZER_DEBUG
714 // TraceSwitch acquires these mutexes,
715 // so we lock them here to detect deadlocks more reliably.
716 { Lock lock(&ctx->slot_mtx); }
717 { Lock lock(&thr->tctx->trace.mtx); }
718 TracePart *current = thr->tctx->trace.parts.Back();
719 if (current) {
720 DCHECK_GE(pos, &current->events[0]);
721 DCHECK_LE(pos, &current->events[TracePart::kSize]);
722 } else {
723 DCHECK_EQ(pos, nullptr);
724 }
725#endif
726 // TracePart is allocated with mmap and is at least 4K aligned.
727 // So the following check is a faster way to check for part end.
728 // It may have false positives in the middle of the trace,
729 // they are filtered out in TraceSwitch.
730 if (UNLIKELY(((uptr)(pos + 1) & TracePart::kAlignment) == 0))
731 return false;
732 *ev = reinterpret_cast<EventT *>(pos);
733 return true;
734}
735
736template <typename EventT>
737ALWAYS_INLINE void TraceRelease(ThreadState *thr, EventT *evp) {
738 DCHECK_LE(evp + 1, &thr->tctx->trace.parts.Back()->events[TracePart::kSize]);
739 atomic_store_relaxed(a: &thr->trace_pos, v: (uptr)(evp + 1));
740}
741
742template <typename EventT>
743void TraceEvent(ThreadState *thr, EventT ev) {
744 EventT *evp;
745 if (!TraceAcquire(thr, &evp)) {
746 TraceSwitchPart(thr);
747 UNUSED bool res = TraceAcquire(thr, &evp);
748 DCHECK(res);
749 }
750 *evp = ev;
751 TraceRelease(thr, evp);
752}
753
754ALWAYS_INLINE WARN_UNUSED_RESULT bool TryTraceFunc(ThreadState *thr,
755 uptr pc = 0) {
756 if (!kCollectHistory)
757 return true;
758 EventFunc *ev;
759 if (UNLIKELY(!TraceAcquire(thr, &ev)))
760 return false;
761 ev->is_access = 0;
762 ev->is_func = 1;
763 ev->pc = pc;
764 TraceRelease(thr, evp: ev);
765 return true;
766}
767
768WARN_UNUSED_RESULT
769bool TryTraceMemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size,
770 AccessType typ);
771WARN_UNUSED_RESULT
772bool TryTraceMemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size,
773 AccessType typ);
774void TraceMemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size,
775 AccessType typ);
776void TraceFunc(ThreadState *thr, uptr pc = 0);
777void TraceMutexLock(ThreadState *thr, EventType type, uptr pc, uptr addr,
778 StackID stk);
779void TraceMutexUnlock(ThreadState *thr, uptr addr);
780void TraceTime(ThreadState *thr);
781
782void TraceRestartFuncExit(ThreadState *thr);
783void TraceRestartFuncEntry(ThreadState *thr, uptr pc);
784
785void GrowShadowStack(ThreadState *thr);
786
787ALWAYS_INLINE
788void FuncEntry(ThreadState *thr, uptr pc) {
789 DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.sid(), (void *)pc);
790 if (UNLIKELY(!TryTraceFunc(thr, pc)))
791 return TraceRestartFuncEntry(thr, pc);
792 DCHECK_GE(thr->shadow_stack_pos, thr->shadow_stack);
793#if !SANITIZER_GO
794 DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
795#else
796 if (thr->shadow_stack_pos == thr->shadow_stack_end)
797 GrowShadowStack(thr);
798#endif
799 thr->shadow_stack_pos[0] = pc;
800 thr->shadow_stack_pos++;
801}
802
803ALWAYS_INLINE
804void FuncExit(ThreadState *thr) {
805 DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.sid());
806 if (UNLIKELY(!TryTraceFunc(thr, 0)))
807 return TraceRestartFuncExit(thr);
808 DCHECK_GT(thr->shadow_stack_pos, thr->shadow_stack);
809#if !SANITIZER_GO
810 DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
811#endif
812 thr->shadow_stack_pos--;
813}
814
815#if !SANITIZER_GO
816extern void (*on_initialize)(void);
817extern int (*on_finalize)(int);
818#endif
819} // namespace __tsan
820
821#endif // TSAN_RTL_H
822