1//===-- tsan_rtl.h ----------------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of ThreadSanitizer (TSan), a race detector.
10//
11// Main internal TSan header file.
12//
13// Ground rules:
14// - C++ run-time should not be used (static CTORs, RTTI, exceptions, static
15// function-scope locals)
16// - All functions/classes/etc reside in namespace __tsan, except for those
17// declared in tsan_interface.h.
18// - Platform-specific files should be used instead of ifdefs (*).
19// - No system headers included in header files (*).
20// - Platform specific headres included only into platform-specific files (*).
21//
22// (*) Except when inlining is critical for performance.
23//===----------------------------------------------------------------------===//
24
25#ifndef TSAN_RTL_H
26#define TSAN_RTL_H
27
28#include "sanitizer_common/sanitizer_allocator.h"
29#include "sanitizer_common/sanitizer_allocator_internal.h"
30#include "sanitizer_common/sanitizer_asm.h"
31#include "sanitizer_common/sanitizer_common.h"
32#include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
33#include "sanitizer_common/sanitizer_libignore.h"
34#include "sanitizer_common/sanitizer_suppressions.h"
35#include "sanitizer_common/sanitizer_thread_registry.h"
36#include "sanitizer_common/sanitizer_vector.h"
37#include "tsan_adaptive_delay.h"
38#include "tsan_defs.h"
39#include "tsan_flags.h"
40#include "tsan_ignoreset.h"
41#include "tsan_ilist.h"
42#include "tsan_mman.h"
43#include "tsan_mutexset.h"
44#include "tsan_platform.h"
45#include "tsan_report.h"
46#include "tsan_shadow.h"
47#include "tsan_stack_trace.h"
48#include "tsan_sync.h"
49#include "tsan_trace.h"
50#include "tsan_vector_clock.h"
51
52#if SANITIZER_WORDSIZE != 64
53# error "ThreadSanitizer is supported only on 64-bit platforms"
54#endif
55
56namespace __tsan {
57
58extern bool ready_to_symbolize;
59
60#if !SANITIZER_GO
61struct MapUnmapCallback;
62# if defined(__mips64) || defined(__aarch64__) || defined(__loongarch__) || \
63 defined(__powerpc__) || SANITIZER_RISCV64
64
65struct AP32 {
66 static const uptr kSpaceBeg = SANITIZER_MMAP_BEGIN;
67 static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
68 static const uptr kMetadataSize = 0;
69 typedef __sanitizer::CompactSizeClassMap SizeClassMap;
70 static const uptr kRegionSizeLog = 20;
71 using AddressSpaceView = LocalAddressSpaceView;
72 typedef __tsan::MapUnmapCallback MapUnmapCallback;
73 static const uptr kFlags = 0;
74};
75typedef SizeClassAllocator32<AP32> PrimaryAllocator;
76#else
77struct AP64 { // Allocator64 parameters. Deliberately using a short name.
78# if defined(__s390x__)
79 typedef MappingS390x Mapping;
80# else
81 typedef Mapping48AddressSpace Mapping;
82# endif
83 static const uptr kSpaceBeg = Mapping::kHeapMemBeg;
84 static const uptr kSpaceSize = Mapping::kHeapMemEnd - Mapping::kHeapMemBeg;
85 static const uptr kMetadataSize = 0;
86 typedef DefaultSizeClassMap SizeClassMap;
87 typedef __tsan::MapUnmapCallback MapUnmapCallback;
88 static const uptr kFlags = 0;
89 using AddressSpaceView = LocalAddressSpaceView;
90};
91typedef SizeClassAllocator64<AP64> PrimaryAllocator;
92#endif
93typedef CombinedAllocator<PrimaryAllocator> Allocator;
94typedef Allocator::AllocatorCache AllocatorCache;
95Allocator *allocator();
96#endif
97
98struct ThreadSignalContext;
99
100struct JmpBuf {
101 uptr sp;
102 int int_signal_send;
103 bool in_blocking_func;
104 uptr oldset_stack_size;
105 uptr in_signal_handler;
106 uptr *shadow_stack_pos;
107};
108
109// A Processor represents a physical thread, or a P for Go.
110// It is used to store internal resources like allocate cache, and does not
111// participate in race-detection logic (invisible to end user).
112// In C++ it is tied to an OS thread just like ThreadState, however ideally
113// it should be tied to a CPU (this way we will have fewer allocator caches).
114// In Go it is tied to a P, so there are significantly fewer Processor's than
115// ThreadState's (which are tied to Gs).
116// A ThreadState must be wired with a Processor to handle events.
117struct Processor {
118 ThreadState *thr; // currently wired thread, or nullptr
119#if !SANITIZER_GO
120 AllocatorCache alloc_cache;
121 InternalAllocatorCache internal_alloc_cache;
122#endif
123 DenseSlabAllocCache block_cache;
124 DenseSlabAllocCache sync_cache;
125 DDPhysicalThread *dd_pt;
126};
127
128#if !SANITIZER_GO
129// ScopedGlobalProcessor temporary setups a global processor for the current
130// thread, if it does not have one. Intended for interceptors that can run
131// at the very thread end, when we already destroyed the thread processor.
132struct ScopedGlobalProcessor {
133 ScopedGlobalProcessor();
134 ~ScopedGlobalProcessor();
135};
136#endif
137
138struct TidEpoch {
139 Tid tid;
140 Epoch epoch;
141};
142
143struct alignas(SANITIZER_CACHE_LINE_SIZE) TidSlot {
144 Mutex mtx;
145 Sid sid;
146 atomic_uint32_t raw_epoch;
147 ThreadState *thr;
148 Vector<TidEpoch> journal;
149 INode node;
150
151 Epoch epoch() const {
152 return static_cast<Epoch>(atomic_load(a: &raw_epoch, mo: memory_order_relaxed));
153 }
154
155 void SetEpoch(Epoch v) {
156 atomic_store(a: &raw_epoch, v: static_cast<u32>(v), mo: memory_order_relaxed);
157 }
158
159 TidSlot();
160};
161
162// This struct is stored in TLS.
163struct alignas(SANITIZER_CACHE_LINE_SIZE) ThreadState {
164 FastState fast_state;
165 int ignore_sync;
166#if !SANITIZER_GO
167 int ignore_interceptors;
168#endif
169 uptr *shadow_stack_pos;
170
171 // Current position in tctx->trace.Back()->events (Event*).
172 atomic_uintptr_t trace_pos;
173 // PC of the last memory access, used to compute PC deltas in the trace.
174 uptr trace_prev_pc;
175
176 // Technically `current` should be a separate THREADLOCAL variable;
177 // but it is placed here in order to share cache line with previous fields.
178 ThreadState* current;
179
180 atomic_sint32_t pending_signals;
181
182 VectorClock clock;
183
184 // This is a slow path flag. On fast path, fast_state.GetIgnoreBit() is read.
185 // We do not distinguish beteween ignoring reads and writes
186 // for better performance.
187 int ignore_reads_and_writes;
188 int suppress_reports;
189 // Go does not support ignores.
190#if !SANITIZER_GO
191 IgnoreSet mop_ignore_set;
192 IgnoreSet sync_ignore_set;
193#endif
194 uptr *shadow_stack;
195 uptr *shadow_stack_end;
196#if !SANITIZER_GO
197 Vector<JmpBuf> jmp_bufs;
198 int in_symbolizer;
199 atomic_uintptr_t in_blocking_func;
200 bool in_ignored_lib;
201 bool is_inited;
202#endif
203 MutexSet mset;
204 bool is_dead;
205 const Tid tid;
206 uptr stk_addr;
207 uptr stk_size;
208 uptr tls_addr;
209 uptr tls_size;
210 ThreadContext *tctx;
211
212 DDLogicalThread *dd_lt;
213
214 TidSlot *slot;
215 uptr slot_epoch;
216 bool slot_locked;
217
218 // Current wired Processor, or nullptr. Required to handle any events.
219 Processor *proc1;
220#if !SANITIZER_GO
221 Processor *proc() { return proc1; }
222#else
223 Processor *proc();
224#endif
225
226 atomic_uintptr_t in_signal_handler;
227 atomic_uintptr_t signal_ctx;
228
229#if !SANITIZER_GO
230 StackID last_sleep_stack_id;
231 VectorClock last_sleep_clock;
232#endif
233
234 // Set in regions of runtime that must be signal-safe and fork-safe.
235 // If set, malloc must not be called.
236 int nomalloc;
237
238 const ReportDesc *current_report;
239
240#if SANITIZER_APPLE && !SANITIZER_GO
241 bool in_internal_write_call;
242#endif
243
244 AdaptiveDelayState adaptive_delay_state;
245
246 explicit ThreadState(Tid tid);
247};
248
249#if !SANITIZER_GO
250#if SANITIZER_APPLE || SANITIZER_ANDROID
251ThreadState *cur_thread();
252void set_cur_thread(ThreadState *thr);
253void cur_thread_finalize();
254inline ThreadState *cur_thread_init() { return cur_thread(); }
255# else
256__attribute__((tls_model("initial-exec")))
257extern THREADLOCAL char cur_thread_placeholder[];
258inline ThreadState *cur_thread() {
259 return reinterpret_cast<ThreadState *>(cur_thread_placeholder)->current;
260}
261inline ThreadState *cur_thread_init() {
262 ThreadState *thr = reinterpret_cast<ThreadState *>(cur_thread_placeholder);
263 if (UNLIKELY(!thr->current))
264 thr->current = thr;
265 return thr->current;
266}
267inline void set_cur_thread(ThreadState *thr) {
268 reinterpret_cast<ThreadState *>(cur_thread_placeholder)->current = thr;
269}
270inline void cur_thread_finalize() { }
271# endif // SANITIZER_APPLE || SANITIZER_ANDROID
272#endif // SANITIZER_GO
273
274class ThreadContext final : public ThreadContextBase {
275 public:
276 explicit ThreadContext(Tid tid);
277 ~ThreadContext();
278 ThreadState *thr;
279 StackID creation_stack_id;
280 VectorClock *sync;
281 uptr sync_epoch;
282 Trace trace;
283
284 // Override superclass callbacks.
285 void OnDead() override;
286 void OnJoined(void *arg) override;
287 void OnFinished() override;
288 void OnStarted(void *arg) override;
289 void OnCreated(void *arg) override;
290 void OnReset() override;
291 void OnDetached(void *arg) override;
292};
293
294struct RacyStacks {
295 MD5Hash hash[2];
296 bool operator==(const RacyStacks &other) const;
297};
298
299struct RacyAddress {
300 uptr addr_min;
301 uptr addr_max;
302};
303
304struct FiredSuppression {
305 ReportType type;
306 uptr pc_or_addr;
307 Suppression *supp;
308};
309
310struct Context {
311 Context();
312
313 bool initialized;
314#if !SANITIZER_GO
315 bool after_multithreaded_fork;
316#endif
317
318 MetaMap metamap;
319
320 Mutex report_mtx;
321 int nreported;
322 atomic_uint64_t last_symbolize_time_ns;
323
324 void *background_thread;
325 atomic_uint32_t stop_background_thread;
326
327 ThreadRegistry thread_registry;
328
329 // This is used to prevent a very unlikely but very pathological behavior.
330 // Since memory access handling is not synchronized with DoReset,
331 // a thread running concurrently with DoReset can leave a bogus shadow value
332 // that will be later falsely detected as a race. For such false races
333 // RestoreStack will return false and we will not report it.
334 // However, consider that a thread leaves a whole lot of such bogus values
335 // and these values are later read by a whole lot of threads.
336 // This will cause massive amounts of ReportRace calls and lots of
337 // serialization. In very pathological cases the resulting slowdown
338 // can be >100x. This is very unlikely, but it was presumably observed
339 // in practice: https://github.com/google/sanitizers/issues/1552
340 // If this happens, previous access sid+epoch will be the same for all of
341 // these false races b/c if the thread will try to increment epoch, it will
342 // notice that DoReset has happened and will stop producing bogus shadow
343 // values. So, last_spurious_race is used to remember the last sid+epoch
344 // for which RestoreStack returned false. Then it is used to filter out
345 // races with the same sid+epoch very early and quickly.
346 // It is of course possible that multiple threads left multiple bogus shadow
347 // values and all of them are read by lots of threads at the same time.
348 // In such case last_spurious_race will only be able to deduplicate a few
349 // races from one thread, then few from another and so on. An alternative
350 // would be to hold an array of such sid+epoch, but we consider such scenario
351 // as even less likely.
352 // Note: this can lead to some rare false negatives as well:
353 // 1. When a legit access with the same sid+epoch participates in a race
354 // as the "previous" memory access, it will be wrongly filtered out.
355 // 2. When RestoreStack returns false for a legit memory access because it
356 // was already evicted from the thread trace, we will still remember it in
357 // last_spurious_race. Then if there is another racing memory access from
358 // the same thread that happened in the same epoch, but was stored in the
359 // next thread trace part (which is still preserved in the thread trace),
360 // we will also wrongly filter it out while RestoreStack would actually
361 // succeed for that second memory access.
362 RawShadow last_spurious_race;
363
364 Mutex racy_mtx;
365 Vector<RacyStacks> racy_stacks;
366 // Number of fired suppressions may be large enough.
367 Mutex fired_suppressions_mtx;
368 InternalMmapVector<FiredSuppression> fired_suppressions;
369 DDetector *dd;
370
371 Flags flags;
372 fd_t memprof_fd;
373
374 // The last slot index (kFreeSid) is used to denote freed memory.
375 TidSlot slots[kThreadSlotCount - 1];
376
377 // Protects global_epoch, slot_queue, trace_part_recycle.
378 Mutex slot_mtx;
379 uptr global_epoch; // guarded by slot_mtx and by all slot mutexes
380 bool resetting; // global reset is in progress
381 IList<TidSlot, &TidSlot::node> slot_queue SANITIZER_GUARDED_BY(slot_mtx);
382 IList<TraceHeader, &TraceHeader::global, TracePart> trace_part_recycle
383 SANITIZER_GUARDED_BY(slot_mtx);
384 uptr trace_part_total_allocated SANITIZER_GUARDED_BY(slot_mtx);
385 uptr trace_part_recycle_finished SANITIZER_GUARDED_BY(slot_mtx);
386 uptr trace_part_finished_excess SANITIZER_GUARDED_BY(slot_mtx);
387#if SANITIZER_GO
388 uptr mapped_shadow_begin;
389 uptr mapped_shadow_end;
390#endif
391};
392
393extern Context *ctx; // The one and the only global runtime context.
394
395ALWAYS_INLINE Flags *flags() {
396 return &ctx->flags;
397}
398
399struct ScopedIgnoreInterceptors {
400 ScopedIgnoreInterceptors() {
401#if !SANITIZER_GO
402 cur_thread()->ignore_interceptors++;
403#endif
404 }
405
406 ~ScopedIgnoreInterceptors() {
407#if !SANITIZER_GO
408 cur_thread()->ignore_interceptors--;
409#endif
410 }
411};
412
413const char *GetObjectTypeFromTag(uptr tag);
414const char *GetReportHeaderFromTag(uptr tag);
415uptr TagFromShadowStackFrame(uptr pc);
416
417class ScopedReportBase {
418 public:
419 void AddMemoryAccess(uptr addr, uptr external_tag, Shadow s, Tid tid,
420 StackTrace stack, const MutexSet *mset);
421 void AddStack(StackTrace stack, bool suppressable = false);
422 void AddThread(const ThreadContext *tctx, bool suppressable = false);
423 void AddThread(Tid tid, bool suppressable = false);
424 void AddUniqueTid(Tid unique_tid);
425 int AddMutex(uptr addr, StackID creation_stack_id);
426 void AddLocation(uptr addr, uptr size);
427 void AddSleep(StackID stack_id);
428 void SetCount(int count);
429 void SetSigNum(int sig);
430 void SymbolizeStackElems(void);
431
432 const ReportDesc *GetReport() const;
433
434 protected:
435 ScopedReportBase(ReportType typ, uptr tag);
436 ~ScopedReportBase();
437
438 private:
439 ReportDesc *rep_;
440 // Symbolizer makes lots of intercepted calls. If we try to process them,
441 // at best it will cause deadlocks on internal mutexes.
442 ScopedIgnoreInterceptors ignore_interceptors_;
443
444 ScopedReportBase(const ScopedReportBase &) = delete;
445 void operator=(const ScopedReportBase &) = delete;
446};
447
448class ScopedReport : public ScopedReportBase {
449 public:
450 explicit ScopedReport(ReportType typ, uptr tag = kExternalTagNone);
451 ~ScopedReport();
452
453 private:
454 ScopedErrorReportLock lock_;
455};
456
457bool ShouldReport(ThreadState *thr, ReportType typ);
458ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack);
459
460// The stack could look like:
461// <start> | <main> | <foo> | tag | <bar>
462// This will extract the tag and keep:
463// <start> | <main> | <foo> | <bar>
464template<typename StackTraceTy>
465void ExtractTagFromStack(StackTraceTy *stack, uptr *tag = nullptr) {
466 if (stack->size < 2) return;
467 uptr possible_tag_pc = stack->trace[stack->size - 2];
468 uptr possible_tag = TagFromShadowStackFrame(pc: possible_tag_pc);
469 if (possible_tag == kExternalTagNone) return;
470 stack->trace_buffer[stack->size - 2] = stack->trace_buffer[stack->size - 1];
471 stack->size -= 1;
472 if (tag) *tag = possible_tag;
473}
474
475template<typename StackTraceTy>
476void ObtainCurrentStack(ThreadState *thr, uptr toppc, StackTraceTy *stack,
477 uptr *tag = nullptr) {
478 uptr size = thr->shadow_stack_pos - thr->shadow_stack;
479 uptr start = 0;
480 if (size + !!toppc > kStackTraceMax) {
481 start = size + !!toppc - kStackTraceMax;
482 size = kStackTraceMax - !!toppc;
483 }
484 stack->Init(&thr->shadow_stack[start], size, toppc);
485 ExtractTagFromStack(stack, tag);
486}
487
488#define GET_STACK_TRACE_FATAL(thr, pc) \
489 VarSizeStackTrace stack; \
490 ObtainCurrentStack(thr, pc, &stack); \
491 stack.ReverseOrder();
492
493void MapShadow(uptr addr, uptr size);
494void MapThreadTrace(uptr addr, uptr size, const char *name);
495void DontNeedShadowFor(uptr addr, uptr size);
496void UnmapShadow(ThreadState *thr, uptr addr, uptr size);
497void InitializeShadowMemory();
498void DontDumpShadow(uptr addr, uptr size);
499void InitializeInterceptors();
500void InitializeLibIgnore();
501void InitializeDynamicAnnotations();
502
503void ForkBefore(ThreadState *thr, uptr pc);
504void ForkParentAfter(ThreadState *thr, uptr pc);
505void ForkChildAfter(ThreadState *thr, uptr pc, bool start_thread);
506
507void ReportRace(ThreadState *thr, RawShadow *shadow_mem, Shadow cur, Shadow old,
508 AccessType typ);
509bool OutputReport(ThreadState *thr, ScopedReport &srep);
510bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace);
511bool IsExpectedReport(uptr addr, uptr size);
512
513#if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 1
514# define DPrintf Printf
515#else
516# define DPrintf(...)
517#endif
518
519#if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 2
520# define DPrintf2 Printf
521#else
522# define DPrintf2(...)
523#endif
524
525StackID CurrentStackId(ThreadState *thr, uptr pc);
526ReportStack *SymbolizeStackId(StackID stack_id);
527void PrintCurrentStack(ThreadState *thr, uptr pc);
528void PrintCurrentStack(uptr pc, bool fast); // may uses libunwind
529MBlock *JavaHeapBlock(uptr addr, uptr *start);
530
531void Initialize(ThreadState *thr);
532void MaybeSpawnBackgroundThread();
533int Finalize(ThreadState *thr);
534
535void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write);
536void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write);
537
538void MemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size,
539 AccessType typ);
540void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size,
541 AccessType typ);
542// This creates 2 non-inlined specialized versions of MemoryAccessRange.
543template <bool is_read>
544void MemoryAccessRangeT(ThreadState *thr, uptr pc, uptr addr, uptr size);
545
546ALWAYS_INLINE
547void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size,
548 bool is_write) {
549 if (size == 0)
550 return;
551 if (is_write)
552 MemoryAccessRangeT<false>(thr, pc, addr, size);
553 else
554 MemoryAccessRangeT<true>(thr, pc, addr, size);
555}
556
557void ShadowSet(RawShadow *p, RawShadow *end, RawShadow v);
558void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size);
559void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size);
560void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size);
561void MemoryRangeImitateWriteOrResetRange(ThreadState *thr, uptr pc, uptr addr,
562 uptr size);
563
564void ThreadIgnoreBegin(ThreadState *thr, uptr pc);
565void ThreadIgnoreEnd(ThreadState *thr);
566void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc);
567void ThreadIgnoreSyncEnd(ThreadState *thr);
568
569Tid ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached);
570void ThreadStart(ThreadState *thr, Tid tid, ThreadID os_id,
571 ThreadType thread_type);
572void ThreadFinish(ThreadState *thr);
573Tid ThreadConsumeTid(ThreadState *thr, uptr pc, uptr uid);
574void ThreadJoin(ThreadState *thr, uptr pc, Tid tid);
575void ThreadDetach(ThreadState *thr, uptr pc, Tid tid);
576void ThreadFinalize(ThreadState *thr);
577void ThreadSetName(ThreadState *thr, const char *name);
578int ThreadCount(ThreadState *thr);
579void ProcessPendingSignalsImpl(ThreadState *thr);
580void ThreadNotJoined(ThreadState *thr, uptr pc, Tid tid, uptr uid);
581
582Processor *ProcCreate();
583void ProcDestroy(Processor *proc);
584void ProcWire(Processor *proc, ThreadState *thr);
585void ProcUnwire(Processor *proc, ThreadState *thr);
586
587// Note: the parameter is called flagz, because flags is already taken
588// by the global function that returns flags.
589void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
590void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
591void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
592void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0,
593 int rec = 1);
594int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
595void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
596void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
597void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr);
598void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr);
599void MutexRepair(ThreadState *thr, uptr pc, uptr addr); // call on EOWNERDEAD
600void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr);
601
602void Acquire(ThreadState *thr, uptr pc, uptr addr);
603// AcquireGlobal synchronizes the current thread with all other threads.
604// In terms of happens-before relation, it draws a HB edge from all threads
605// (where they happen to execute right now) to the current thread. We use it to
606// handle Go finalizers. Namely, finalizer goroutine executes AcquireGlobal
607// right before executing finalizers. This provides a coarse, but simple
608// approximation of the actual required synchronization.
609void AcquireGlobal(ThreadState *thr);
610void Release(ThreadState *thr, uptr pc, uptr addr);
611void ReleaseStoreAcquire(ThreadState *thr, uptr pc, uptr addr);
612void ReleaseStore(ThreadState *thr, uptr pc, uptr addr);
613void AfterSleep(ThreadState *thr, uptr pc);
614void IncrementEpoch(ThreadState *thr);
615
616#if !SANITIZER_GO
617uptr ALWAYS_INLINE HeapEnd() {
618 return HeapMemEnd() + PrimaryAllocator::AdditionalSize();
619}
620#endif
621
622void SlotAttachAndLock(ThreadState *thr) SANITIZER_ACQUIRE(thr->slot->mtx);
623void SlotDetach(ThreadState *thr);
624void SlotLock(ThreadState *thr) SANITIZER_ACQUIRE(thr->slot->mtx);
625void SlotUnlock(ThreadState *thr) SANITIZER_RELEASE(thr->slot->mtx);
626void DoReset(ThreadState *thr, uptr epoch);
627void FlushShadowMemory();
628
629ThreadState *FiberCreate(ThreadState *thr, uptr pc, unsigned flags);
630void FiberDestroy(ThreadState *thr, uptr pc, ThreadState *fiber);
631void FiberSwitch(ThreadState *thr, uptr pc, ThreadState *fiber, unsigned flags);
632
633// These need to match __tsan_switch_to_fiber_* flags defined in
634// tsan_interface.h. See documentation there as well.
635enum FiberSwitchFlags {
636 FiberSwitchFlagNoSync = 1 << 0, // __tsan_switch_to_fiber_no_sync
637};
638
639class SlotLocker {
640 public:
641 ALWAYS_INLINE
642 SlotLocker(ThreadState *thr, bool recursive = false)
643 : thr_(thr), locked_(recursive ? thr->slot_locked : false) {
644#if !SANITIZER_GO
645 // We are in trouble if we are here with in_blocking_func set.
646 // If in_blocking_func is set, all signals will be delivered synchronously,
647 // which means we can't lock slots since the signal handler will try
648 // to lock it recursively and deadlock.
649 DCHECK(!atomic_load(&thr->in_blocking_func, memory_order_relaxed));
650#endif
651 if (!locked_)
652 SlotLock(thr: thr_);
653 }
654
655 ALWAYS_INLINE
656 ~SlotLocker() {
657 if (!locked_)
658 SlotUnlock(thr: thr_);
659 }
660
661 private:
662 ThreadState *thr_;
663 bool locked_;
664};
665
666class SlotUnlocker {
667 public:
668 SlotUnlocker(ThreadState *thr) : thr_(thr), locked_(thr->slot_locked) {
669 if (locked_)
670 SlotUnlock(thr: thr_);
671 }
672
673 ~SlotUnlocker() {
674 if (locked_)
675 SlotLock(thr: thr_);
676 }
677
678 private:
679 ThreadState *thr_;
680 bool locked_;
681};
682
683ALWAYS_INLINE void ProcessPendingSignals(ThreadState *thr) {
684 if (UNLIKELY(atomic_load_relaxed(&thr->pending_signals)))
685 ProcessPendingSignalsImpl(thr);
686}
687
688extern bool is_initialized;
689
690ALWAYS_INLINE
691void LazyInitialize(ThreadState *thr) {
692 // If we can use .preinit_array, assume that __tsan_init
693 // called from .preinit_array initializes runtime before
694 // any instrumented code except when tsan is used as a
695 // shared library.
696#if (!SANITIZER_CAN_USE_PREINIT_ARRAY || defined(SANITIZER_SHARED))
697 if (UNLIKELY(!is_initialized))
698 Initialize(thr);
699#endif
700}
701
702void TraceResetForTesting();
703void TraceSwitchPart(ThreadState *thr);
704void TraceSwitchPartImpl(ThreadState *thr);
705bool RestoreStack(EventType type, Sid sid, Epoch epoch, uptr addr, uptr size,
706 AccessType typ, Tid *ptid, VarSizeStackTrace *pstk,
707 MutexSet *pmset, uptr *ptag);
708
709template <typename EventT>
710ALWAYS_INLINE WARN_UNUSED_RESULT bool TraceAcquire(ThreadState *thr,
711 EventT **ev) {
712 // TraceSwitchPart accesses shadow_stack, but it's called infrequently,
713 // so we check it here proactively.
714 DCHECK(thr->shadow_stack);
715 Event *pos = reinterpret_cast<Event *>(atomic_load_relaxed(a: &thr->trace_pos));
716#if SANITIZER_DEBUG
717 // TraceSwitch acquires these mutexes,
718 // so we lock them here to detect deadlocks more reliably.
719 { Lock lock(&ctx->slot_mtx); }
720 { Lock lock(&thr->tctx->trace.mtx); }
721 TracePart *current = thr->tctx->trace.parts.Back();
722 if (current) {
723 DCHECK_GE(pos, &current->events[0]);
724 DCHECK_LE(pos, &current->events[TracePart::kSize]);
725 } else {
726 DCHECK_EQ(pos, nullptr);
727 }
728#endif
729 // TracePart is allocated with mmap and is at least 4K aligned.
730 // So the following check is a faster way to check for part end.
731 // It may have false positives in the middle of the trace,
732 // they are filtered out in TraceSwitch.
733 if (UNLIKELY(((uptr)(pos + 1) & TracePart::kAlignment) == 0))
734 return false;
735 *ev = reinterpret_cast<EventT *>(pos);
736 return true;
737}
738
739template <typename EventT>
740ALWAYS_INLINE void TraceRelease(ThreadState *thr, EventT *evp) {
741 DCHECK_LE(evp + 1, &thr->tctx->trace.parts.Back()->events[TracePart::kSize]);
742 atomic_store_relaxed(a: &thr->trace_pos, v: (uptr)(evp + 1));
743}
744
745template <typename EventT>
746void TraceEvent(ThreadState *thr, EventT ev) {
747 EventT *evp;
748 if (!TraceAcquire(thr, &evp)) {
749 TraceSwitchPart(thr);
750 UNUSED bool res = TraceAcquire(thr, &evp);
751 DCHECK(res);
752 }
753 *evp = ev;
754 TraceRelease(thr, evp);
755}
756
757ALWAYS_INLINE WARN_UNUSED_RESULT bool TryTraceFunc(ThreadState *thr,
758 uptr pc = 0) {
759 if (!kCollectHistory)
760 return true;
761 EventFunc *ev;
762 if (UNLIKELY(!TraceAcquire(thr, &ev)))
763 return false;
764 ev->is_access = 0;
765 ev->is_func = 1;
766 ev->pc = pc;
767 TraceRelease(thr, evp: ev);
768 return true;
769}
770
771WARN_UNUSED_RESULT
772bool TryTraceMemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size,
773 AccessType typ);
774WARN_UNUSED_RESULT
775bool TryTraceMemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size,
776 AccessType typ);
777void TraceMemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size,
778 AccessType typ);
779void TraceFunc(ThreadState *thr, uptr pc = 0);
780void TraceMutexLock(ThreadState *thr, EventType type, uptr pc, uptr addr,
781 StackID stk);
782void TraceMutexUnlock(ThreadState *thr, uptr addr);
783void TraceTime(ThreadState *thr);
784
785void TraceRestartFuncExit(ThreadState *thr);
786void TraceRestartFuncEntry(ThreadState *thr, uptr pc);
787
788void GrowShadowStack(ThreadState *thr);
789
790ALWAYS_INLINE
791void FuncEntry(ThreadState *thr, uptr pc) {
792 DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.sid(), (void *)pc);
793 if (UNLIKELY(!TryTraceFunc(thr, pc)))
794 return TraceRestartFuncEntry(thr, pc);
795 DCHECK_GE(thr->shadow_stack_pos, thr->shadow_stack);
796#if !SANITIZER_GO
797 DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
798#else
799 if (thr->shadow_stack_pos == thr->shadow_stack_end)
800 GrowShadowStack(thr);
801#endif
802 thr->shadow_stack_pos[0] = pc;
803 thr->shadow_stack_pos++;
804}
805
806ALWAYS_INLINE
807void FuncExit(ThreadState *thr) {
808 DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.sid());
809 if (UNLIKELY(!TryTraceFunc(thr, 0)))
810 return TraceRestartFuncExit(thr);
811 DCHECK_GT(thr->shadow_stack_pos, thr->shadow_stack);
812#if !SANITIZER_GO
813 DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
814#endif
815 thr->shadow_stack_pos--;
816}
817
818#if !SANITIZER_GO
819extern void (*on_initialize)(void);
820extern int (*on_finalize)(int);
821#endif
822} // namespace __tsan
823
824#endif // TSAN_RTL_H
825