1//===-- tsan_rtl_thread.cpp -----------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of ThreadSanitizer (TSan), a race detector.
10//
11//===----------------------------------------------------------------------===//
12
13#include "sanitizer_common/sanitizer_placement_new.h"
14#include "tsan_rtl.h"
15#include "tsan_mman.h"
16#include "tsan_platform.h"
17#include "tsan_report.h"
18#include "tsan_sync.h"
19
20namespace __tsan {
21
22// ThreadContext implementation.
23
24ThreadContext::ThreadContext(Tid tid) : ThreadContextBase(tid), thr(), sync() {}
25
26#if !SANITIZER_GO
27ThreadContext::~ThreadContext() {
28}
29#endif
30
31void ThreadContext::OnReset() { CHECK(!sync); }
32
33#if !SANITIZER_GO
34struct ThreadLeak {
35 ThreadContext *tctx;
36 int count;
37};
38
39static void CollectThreadLeaks(ThreadContextBase *tctx_base, void *arg) {
40 auto &leaks = *static_cast<Vector<ThreadLeak> *>(arg);
41 auto *tctx = static_cast<ThreadContext *>(tctx_base);
42 if (tctx->detached || tctx->status != ThreadStatusFinished)
43 return;
44 for (uptr i = 0; i < leaks.Size(); i++) {
45 if (leaks[i].tctx->creation_stack_id == tctx->creation_stack_id) {
46 leaks[i].count++;
47 return;
48 }
49 }
50 leaks.PushBack(v: {.tctx: tctx, .count: 1});
51}
52#endif
53
54// Disabled on Mac because lldb test TestTsanBasic fails:
55// https://reviews.llvm.org/D112603#3163158
56#if !SANITIZER_GO && !SANITIZER_APPLE
57static void ReportIgnoresEnabled(ThreadContext *tctx, IgnoreSet *set) {
58 if (tctx->tid == kMainTid) {
59 Printf(format: "ThreadSanitizer: main thread finished with ignores enabled\n");
60 } else {
61 Printf(format: "ThreadSanitizer: thread T%d %s finished with ignores enabled,"
62 " created at:\n", tctx->tid, tctx->name);
63 PrintStack(stack: SymbolizeStackId(stack_id: tctx->creation_stack_id));
64 }
65 Printf(format: " One of the following ignores was not ended"
66 " (in order of probability)\n");
67 for (uptr i = 0; i < set->Size(); i++) {
68 Printf(format: " Ignore was enabled at:\n");
69 PrintStack(stack: SymbolizeStackId(stack_id: set->At(i)));
70 }
71 Die();
72}
73
74static void ThreadCheckIgnore(ThreadState *thr) {
75 if (ctx->after_multithreaded_fork)
76 return;
77 if (thr->ignore_reads_and_writes)
78 ReportIgnoresEnabled(tctx: thr->tctx, set: &thr->mop_ignore_set);
79 if (thr->ignore_sync)
80 ReportIgnoresEnabled(tctx: thr->tctx, set: &thr->sync_ignore_set);
81}
82#else
83static void ThreadCheckIgnore(ThreadState *thr) {}
84#endif
85
86void ThreadFinalize(ThreadState *thr) {
87 ThreadCheckIgnore(thr);
88#if !SANITIZER_GO
89 if (!ShouldReport(thr, typ: ReportTypeThreadLeak))
90 return;
91 Vector<ThreadLeak> leaks;
92 {
93 ThreadRegistryLock l(&ctx->thread_registry);
94 ctx->thread_registry.RunCallbackForEachThreadLocked(cb: CollectThreadLeaks,
95 arg: &leaks);
96 }
97
98 for (uptr i = 0; i < leaks.Size(); i++) {
99 // Use alloca, because malloc during signal handling deadlocks
100 ScopedReport *rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport));
101 // Take a new scope as Apple platforms require the below locks released
102 // before symbolizing in order to avoid a deadlock
103 {
104 ThreadRegistryLock l(&ctx->thread_registry);
105 new (rep) ScopedReport(ReportTypeThreadLeak);
106 rep->AddThread(tctx: leaks[i].tctx, suppressable: true);
107 rep->SetCount(leaks[i].count);
108# if SANITIZER_APPLE
109 } // Close this scope to release the locks
110# endif
111 OutputReport(thr, srep&: *rep);
112
113 // Need to manually destroy this because we used placement new to allocate
114 rep->~ScopedReport();
115# if !SANITIZER_APPLE
116 }
117# endif
118 }
119#endif
120}
121
122int ThreadCount(ThreadState *thr) {
123 uptr result;
124 ctx->thread_registry.GetNumberOfThreads(total: 0, running: 0, alive: &result);
125 return (int)result;
126}
127
128struct OnCreatedArgs {
129 VectorClock *sync;
130 uptr sync_epoch;
131 StackID stack;
132};
133
134Tid ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) {
135 // The main thread and GCD workers don't have a parent thread.
136 Tid parent = kInvalidTid;
137 OnCreatedArgs arg = {.sync: nullptr, .sync_epoch: 0, .stack: kInvalidStackID};
138 if (thr) {
139 parent = thr->tid;
140 arg.stack = CurrentStackId(thr, pc);
141 if (!thr->ignore_sync) {
142 SlotLocker locker(thr);
143 thr->clock.ReleaseStore(dstp: &arg.sync);
144 arg.sync_epoch = ctx->global_epoch;
145 IncrementEpoch(thr);
146 }
147 }
148 Tid tid = ctx->thread_registry.CreateThread(user_id: uid, detached, parent_tid: parent, arg: &arg);
149 DPrintf("#%d: ThreadCreate tid=%d uid=%zu\n", parent, tid, uid);
150 return tid;
151}
152
153void ThreadContext::OnCreated(void *arg) {
154 OnCreatedArgs *args = static_cast<OnCreatedArgs *>(arg);
155 sync = args->sync;
156 sync_epoch = args->sync_epoch;
157 creation_stack_id = args->stack;
158}
159
160extern "C" void __tsan_stack_initialization() {}
161
162struct OnStartedArgs {
163 ThreadState *thr;
164 uptr stk_addr;
165 uptr stk_size;
166 uptr tls_addr;
167 uptr tls_size;
168};
169
170void ThreadStart(ThreadState *thr, Tid tid, ThreadID os_id,
171 ThreadType thread_type) {
172 ctx->thread_registry.StartThread(tid, os_id, thread_type, arg: thr);
173 if (!thr->ignore_sync) {
174 SlotAttachAndLock(thr);
175 if (thr->tctx->sync_epoch == ctx->global_epoch)
176 thr->clock.Acquire(src: thr->tctx->sync);
177 SlotUnlock(thr);
178 }
179 Free(p&: thr->tctx->sync);
180
181#if !SANITIZER_GO
182 thr->is_inited = true;
183#endif
184
185 uptr stk_addr = 0;
186 uptr stk_end = 0;
187 uptr tls_addr = 0;
188 uptr tls_end = 0;
189#if !SANITIZER_GO
190 if (thread_type != ThreadType::Fiber)
191 GetThreadStackAndTls(main: tid == kMainTid, stk_begin: &stk_addr, stk_end: &stk_end, tls_begin: &tls_addr,
192 tls_end: &tls_end);
193#endif
194 uptr stk_size = stk_end - stk_addr;
195 uptr tls_size = tls_end - tls_addr;
196 thr->stk_addr = stk_addr;
197 thr->stk_size = stk_size;
198 thr->tls_addr = tls_addr;
199 thr->tls_size = tls_size;
200
201#if !SANITIZER_GO
202 if (ctx->after_multithreaded_fork) {
203 thr->ignore_interceptors++;
204 ThreadIgnoreBegin(thr, pc: 0);
205 ThreadIgnoreSyncBegin(thr, pc: 0);
206 }
207#endif
208
209#if !SANITIZER_GO && !SANITIZER_ANDROID
210 // Don't imitate stack/TLS writes for the main thread,
211 // because its initialization is synchronized with all
212 // subsequent threads anyway.
213 // Because thr is created by MmapOrDie, the thr object
214 // is not in tls, the pointer to the thr object is in
215 // TLS_SLOT_SANITIZER slot. So skip this check on
216 // Android platform.
217 if (tid != kMainTid) {
218 if (stk_addr && stk_size) {
219 const uptr pc = StackTrace::GetNextInstructionPc(
220 pc: reinterpret_cast<uptr>(__tsan_stack_initialization));
221 MemoryRangeImitateWrite(thr, pc, addr: stk_addr, size: stk_size);
222 }
223
224 if (tls_addr && tls_size)
225 ImitateTlsWrite(thr, tls_addr, tls_size);
226 }
227#endif
228}
229
230void ThreadContext::OnStarted(void *arg) {
231 DPrintf("#%d: ThreadStart\n", tid);
232 thr = new (arg) ThreadState(tid);
233 if (common_flags()->detect_deadlocks)
234 thr->dd_lt = ctx->dd->CreateLogicalThread(ctx: tid);
235 thr->tctx = this;
236}
237
238void ThreadFinish(ThreadState *thr) {
239 DPrintf("#%d: ThreadFinish\n", thr->tid);
240 ThreadCheckIgnore(thr);
241 if (thr->stk_addr && thr->stk_size)
242 DontNeedShadowFor(addr: thr->stk_addr, size: thr->stk_size);
243 if (thr->tls_addr && thr->tls_size)
244 DontNeedShadowFor(addr: thr->tls_addr, size: thr->tls_size);
245 thr->is_dead = true;
246#if !SANITIZER_GO
247 thr->is_inited = false;
248 thr->ignore_interceptors++;
249 PlatformCleanUpThreadState(thr);
250#endif
251 if (!thr->ignore_sync) {
252 SlotLocker locker(thr);
253 ThreadRegistryLock lock(&ctx->thread_registry);
254 // Note: detached is protected by the thread registry mutex,
255 // the thread may be detaching concurrently in another thread.
256 if (!thr->tctx->detached) {
257 thr->clock.ReleaseStore(dstp: &thr->tctx->sync);
258 thr->tctx->sync_epoch = ctx->global_epoch;
259 IncrementEpoch(thr);
260 }
261 }
262#if !SANITIZER_GO
263 UnmapOrDie(addr: thr->shadow_stack, size: kShadowStackSize * sizeof(uptr));
264#else
265 Free(thr->shadow_stack);
266#endif
267 thr->shadow_stack = nullptr;
268 thr->shadow_stack_pos = nullptr;
269 thr->shadow_stack_end = nullptr;
270 if (common_flags()->detect_deadlocks)
271 ctx->dd->DestroyLogicalThread(lt: thr->dd_lt);
272 SlotDetach(thr);
273 ctx->thread_registry.FinishThread(tid: thr->tid);
274 thr->~ThreadState();
275}
276
277void ThreadContext::OnFinished() {
278 Lock lock(&ctx->slot_mtx);
279 Lock lock1(&trace.mtx);
280 // Queue all trace parts into the global recycle queue.
281 auto parts = &trace.parts;
282 while (trace.local_head) {
283 CHECK(parts->Queued(trace.local_head));
284 ctx->trace_part_recycle.PushBack(e: trace.local_head);
285 trace.local_head = parts->Next(e: trace.local_head);
286 }
287 ctx->trace_part_recycle_finished += parts->Size();
288 if (ctx->trace_part_recycle_finished > Trace::kFinishedThreadHi) {
289 ctx->trace_part_finished_excess += parts->Size();
290 trace.parts_allocated = 0;
291 } else if (ctx->trace_part_recycle_finished > Trace::kFinishedThreadLo &&
292 parts->Size() > 1) {
293 ctx->trace_part_finished_excess += parts->Size() - 1;
294 trace.parts_allocated = 1;
295 }
296 // From now on replay will use trace->final_pos.
297 trace.final_pos = (Event *)atomic_load_relaxed(a: &thr->trace_pos);
298 atomic_store_relaxed(a: &thr->trace_pos, v: 0);
299 thr->tctx = nullptr;
300 thr = nullptr;
301}
302
303struct ConsumeThreadContext {
304 uptr uid;
305 ThreadContextBase *tctx;
306};
307
308Tid ThreadConsumeTid(ThreadState *thr, uptr pc, uptr uid) {
309 return ctx->thread_registry.ConsumeThreadUserId(user_id: uid);
310}
311
312struct JoinArg {
313 VectorClock *sync;
314 uptr sync_epoch;
315};
316
317void ThreadJoin(ThreadState *thr, uptr pc, Tid tid) {
318 CHECK_GT(tid, 0);
319 DPrintf("#%d: ThreadJoin tid=%d\n", thr->tid, tid);
320 JoinArg arg = {};
321 ctx->thread_registry.JoinThread(tid, arg: &arg);
322 if (!thr->ignore_sync) {
323 SlotLocker locker(thr);
324 if (arg.sync_epoch == ctx->global_epoch)
325 thr->clock.Acquire(src: arg.sync);
326 }
327 Free(p&: arg.sync);
328}
329
330void ThreadContext::OnJoined(void *ptr) {
331 auto arg = static_cast<JoinArg *>(ptr);
332 arg->sync = sync;
333 arg->sync_epoch = sync_epoch;
334 sync = nullptr;
335 sync_epoch = 0;
336}
337
338void ThreadContext::OnDead() { CHECK_EQ(sync, nullptr); }
339
340void ThreadDetach(ThreadState *thr, uptr pc, Tid tid) {
341 CHECK_GT(tid, 0);
342 ctx->thread_registry.DetachThread(tid, arg: thr);
343}
344
345void ThreadContext::OnDetached(void *arg) { Free(p&: sync); }
346
347void ThreadNotJoined(ThreadState *thr, uptr pc, Tid tid, uptr uid) {
348 CHECK_GT(tid, 0);
349 ctx->thread_registry.SetThreadUserId(tid, user_id: uid);
350}
351
352void ThreadSetName(ThreadState *thr, const char *name) {
353 ctx->thread_registry.SetThreadName(tid: thr->tid, name);
354}
355
356#if !SANITIZER_GO
357void FiberSwitchImpl(ThreadState *from, ThreadState *to) {
358 Processor *proc = from->proc();
359 ProcUnwire(proc, thr: from);
360 ProcWire(proc, thr: to);
361 set_cur_thread(to);
362}
363
364ThreadState *FiberCreate(ThreadState *thr, uptr pc, unsigned flags) {
365 void *mem = Alloc(sz: sizeof(ThreadState));
366 ThreadState *fiber = static_cast<ThreadState *>(mem);
367 internal_memset(s: fiber, c: 0, n: sizeof(*fiber));
368 Tid tid = ThreadCreate(thr, pc, uid: 0, detached: true);
369 FiberSwitchImpl(from: thr, to: fiber);
370 ThreadStart(thr: fiber, tid, os_id: 0, thread_type: ThreadType::Fiber);
371 FiberSwitchImpl(from: fiber, to: thr);
372 return fiber;
373}
374
375void FiberDestroy(ThreadState *thr, uptr pc, ThreadState *fiber) {
376 FiberSwitchImpl(from: thr, to: fiber);
377 ThreadFinish(thr: fiber);
378 FiberSwitchImpl(from: fiber, to: thr);
379 Free(p&: fiber);
380}
381
382void FiberSwitch(ThreadState *thr, uptr pc,
383 ThreadState *fiber, unsigned flags) {
384 if (!(flags & FiberSwitchFlagNoSync))
385 Release(thr, pc, addr: (uptr)fiber);
386 FiberSwitchImpl(from: thr, to: fiber);
387 if (!(flags & FiberSwitchFlagNoSync))
388 Acquire(thr: fiber, pc, addr: (uptr)fiber);
389}
390#endif
391
392} // namespace __tsan
393