1 | //===-- tsan_rtl.cpp ------------------------------------------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file is a part of ThreadSanitizer (TSan), a race detector. |
10 | // |
11 | // Main file (entry points) for the TSan run-time. |
12 | //===----------------------------------------------------------------------===// |
13 | |
14 | #include "tsan_rtl.h" |
15 | |
16 | #include "sanitizer_common/sanitizer_atomic.h" |
17 | #include "sanitizer_common/sanitizer_common.h" |
18 | #include "sanitizer_common/sanitizer_file.h" |
19 | #include "sanitizer_common/sanitizer_interface_internal.h" |
20 | #include "sanitizer_common/sanitizer_libc.h" |
21 | #include "sanitizer_common/sanitizer_placement_new.h" |
22 | #include "sanitizer_common/sanitizer_stackdepot.h" |
23 | #include "sanitizer_common/sanitizer_symbolizer.h" |
24 | #include "tsan_defs.h" |
25 | #include "tsan_interface.h" |
26 | #include "tsan_mman.h" |
27 | #include "tsan_platform.h" |
28 | #include "tsan_suppressions.h" |
29 | #include "tsan_symbolize.h" |
30 | #include "ubsan/ubsan_init.h" |
31 | |
32 | volatile int __tsan_resumed = 0; |
33 | |
34 | extern "C" void __tsan_resume() { |
35 | __tsan_resumed = 1; |
36 | } |
37 | |
38 | #if SANITIZER_APPLE |
39 | SANITIZER_WEAK_DEFAULT_IMPL |
40 | void __tsan_test_only_on_fork() {} |
41 | #endif |
42 | |
43 | namespace __tsan { |
44 | |
45 | #if !SANITIZER_GO |
46 | void (*on_initialize)(void); |
47 | int (*on_finalize)(int); |
48 | #endif |
49 | |
50 | #if !SANITIZER_GO && !SANITIZER_APPLE |
51 | alignas(SANITIZER_CACHE_LINE_SIZE) THREADLOCAL __attribute__((tls_model( |
52 | "initial-exec" ))) char cur_thread_placeholder[sizeof(ThreadState)]; |
53 | #endif |
54 | alignas(SANITIZER_CACHE_LINE_SIZE) static char ctx_placeholder[sizeof(Context)]; |
55 | Context *ctx; |
56 | |
57 | // Can be overriden by a front-end. |
58 | #ifdef TSAN_EXTERNAL_HOOKS |
59 | bool OnFinalize(bool failed); |
60 | void OnInitialize(); |
61 | #else |
62 | SANITIZER_WEAK_CXX_DEFAULT_IMPL |
63 | bool OnFinalize(bool failed) { |
64 | # if !SANITIZER_GO |
65 | if (on_finalize) |
66 | return on_finalize(failed); |
67 | # endif |
68 | return failed; |
69 | } |
70 | |
71 | SANITIZER_WEAK_CXX_DEFAULT_IMPL |
72 | void OnInitialize() { |
73 | # if !SANITIZER_GO |
74 | if (on_initialize) |
75 | on_initialize(); |
76 | # endif |
77 | } |
78 | #endif |
79 | |
80 | static TracePart* TracePartAlloc(ThreadState* thr) { |
81 | TracePart* part = nullptr; |
82 | { |
83 | Lock lock(&ctx->slot_mtx); |
84 | uptr max_parts = Trace::kMinParts + flags()->history_size; |
85 | Trace* trace = &thr->tctx->trace; |
86 | if (trace->parts_allocated == max_parts || |
87 | ctx->trace_part_finished_excess) { |
88 | part = ctx->trace_part_recycle.PopFront(); |
89 | DPrintf("#%d: TracePartAlloc: part=%p\n" , thr->tid, part); |
90 | if (part && part->trace) { |
91 | Trace* trace1 = part->trace; |
92 | Lock trace_lock(&trace1->mtx); |
93 | part->trace = nullptr; |
94 | TracePart* part1 = trace1->parts.PopFront(); |
95 | CHECK_EQ(part, part1); |
96 | if (trace1->parts_allocated > trace1->parts.Size()) { |
97 | ctx->trace_part_finished_excess += |
98 | trace1->parts_allocated - trace1->parts.Size(); |
99 | trace1->parts_allocated = trace1->parts.Size(); |
100 | } |
101 | } |
102 | } |
103 | if (trace->parts_allocated < max_parts) { |
104 | trace->parts_allocated++; |
105 | if (ctx->trace_part_finished_excess) |
106 | ctx->trace_part_finished_excess--; |
107 | } |
108 | if (!part) |
109 | ctx->trace_part_total_allocated++; |
110 | else if (ctx->trace_part_recycle_finished) |
111 | ctx->trace_part_recycle_finished--; |
112 | } |
113 | if (!part) |
114 | part = new (MmapOrDie(size: sizeof(*part), mem_type: "TracePart" )) TracePart(); |
115 | return part; |
116 | } |
117 | |
118 | static void TracePartFree(TracePart* part) SANITIZER_REQUIRES(ctx->slot_mtx) { |
119 | DCHECK(part->trace); |
120 | part->trace = nullptr; |
121 | ctx->trace_part_recycle.PushFront(e: part); |
122 | } |
123 | |
124 | void TraceResetForTesting() { |
125 | Lock lock(&ctx->slot_mtx); |
126 | while (auto* part = ctx->trace_part_recycle.PopFront()) { |
127 | if (auto trace = part->trace) |
128 | CHECK_EQ(trace->parts.PopFront(), part); |
129 | UnmapOrDie(addr: part, size: sizeof(*part)); |
130 | } |
131 | ctx->trace_part_total_allocated = 0; |
132 | ctx->trace_part_recycle_finished = 0; |
133 | ctx->trace_part_finished_excess = 0; |
134 | } |
135 | |
136 | static void DoResetImpl(uptr epoch) { |
137 | ThreadRegistryLock lock0(&ctx->thread_registry); |
138 | Lock lock1(&ctx->slot_mtx); |
139 | CHECK_EQ(ctx->global_epoch, epoch); |
140 | ctx->global_epoch++; |
141 | CHECK(!ctx->resetting); |
142 | ctx->resetting = true; |
143 | for (u32 i = ctx->thread_registry.NumThreadsLocked(); i--;) { |
144 | ThreadContext* tctx = (ThreadContext*)ctx->thread_registry.GetThreadLocked( |
145 | tid: static_cast<Tid>(i)); |
146 | // Potentially we could purge all ThreadStatusDead threads from the |
147 | // registry. Since we reset all shadow, they can't race with anything |
148 | // anymore. However, their tid's can still be stored in some aux places |
149 | // (e.g. tid of thread that created something). |
150 | auto trace = &tctx->trace; |
151 | Lock lock(&trace->mtx); |
152 | bool attached = tctx->thr && tctx->thr->slot; |
153 | auto parts = &trace->parts; |
154 | bool local = false; |
155 | while (!parts->Empty()) { |
156 | auto part = parts->Front(); |
157 | local = local || part == trace->local_head; |
158 | if (local) |
159 | CHECK(!ctx->trace_part_recycle.Queued(part)); |
160 | else |
161 | ctx->trace_part_recycle.Remove(e: part); |
162 | if (attached && parts->Size() == 1) { |
163 | // The thread is running and this is the last/current part. |
164 | // Set the trace position to the end of the current part |
165 | // to force the thread to call SwitchTracePart and re-attach |
166 | // to a new slot and allocate a new trace part. |
167 | // Note: the thread is concurrently modifying the position as well, |
168 | // so this is only best-effort. The thread can only modify position |
169 | // within this part, because switching parts is protected by |
170 | // slot/trace mutexes that we hold here. |
171 | atomic_store_relaxed( |
172 | a: &tctx->thr->trace_pos, |
173 | v: reinterpret_cast<uptr>(&part->events[TracePart::kSize])); |
174 | break; |
175 | } |
176 | parts->Remove(e: part); |
177 | TracePartFree(part); |
178 | } |
179 | CHECK_LE(parts->Size(), 1); |
180 | trace->local_head = parts->Front(); |
181 | if (tctx->thr && !tctx->thr->slot) { |
182 | atomic_store_relaxed(a: &tctx->thr->trace_pos, v: 0); |
183 | tctx->thr->trace_prev_pc = 0; |
184 | } |
185 | if (trace->parts_allocated > trace->parts.Size()) { |
186 | ctx->trace_part_finished_excess += |
187 | trace->parts_allocated - trace->parts.Size(); |
188 | trace->parts_allocated = trace->parts.Size(); |
189 | } |
190 | } |
191 | while (ctx->slot_queue.PopFront()) { |
192 | } |
193 | for (auto& slot : ctx->slots) { |
194 | slot.SetEpoch(kEpochZero); |
195 | slot.journal.Reset(); |
196 | slot.thr = nullptr; |
197 | ctx->slot_queue.PushBack(e: &slot); |
198 | } |
199 | |
200 | DPrintf("Resetting shadow...\n" ); |
201 | auto shadow_begin = ShadowBeg(); |
202 | auto shadow_end = ShadowEnd(); |
203 | #if SANITIZER_GO |
204 | CHECK_NE(0, ctx->mapped_shadow_begin); |
205 | shadow_begin = ctx->mapped_shadow_begin; |
206 | shadow_end = ctx->mapped_shadow_end; |
207 | VPrintf(2, "shadow_begin-shadow_end: (0x%zx-0x%zx)\n" , |
208 | shadow_begin, shadow_end); |
209 | #endif |
210 | |
211 | #if SANITIZER_WINDOWS |
212 | auto resetFailed = |
213 | !ZeroMmapFixedRegion(shadow_begin, shadow_end - shadow_begin); |
214 | #else |
215 | auto resetFailed = |
216 | !MmapFixedSuperNoReserve(fixed_addr: shadow_begin, size: shadow_end-shadow_begin, name: "shadow" ); |
217 | # if !SANITIZER_GO |
218 | DontDumpShadow(addr: shadow_begin, size: shadow_end - shadow_begin); |
219 | # endif |
220 | #endif |
221 | if (resetFailed) { |
222 | Printf(format: "failed to reset shadow memory\n" ); |
223 | Die(); |
224 | } |
225 | DPrintf("Resetting meta shadow...\n" ); |
226 | ctx->metamap.ResetClocks(); |
227 | StoreShadow(sp: &ctx->last_spurious_race, s: Shadow::kEmpty); |
228 | ctx->resetting = false; |
229 | } |
230 | |
231 | // Clang does not understand locking all slots in the loop: |
232 | // error: expecting mutex 'slot.mtx' to be held at start of each loop |
233 | void DoReset(ThreadState* thr, uptr epoch) SANITIZER_NO_THREAD_SAFETY_ANALYSIS { |
234 | for (auto& slot : ctx->slots) { |
235 | slot.mtx.Lock(); |
236 | if (UNLIKELY(epoch == 0)) |
237 | epoch = ctx->global_epoch; |
238 | if (UNLIKELY(epoch != ctx->global_epoch)) { |
239 | // Epoch can't change once we've locked the first slot. |
240 | CHECK_EQ(slot.sid, 0); |
241 | slot.mtx.Unlock(); |
242 | return; |
243 | } |
244 | } |
245 | DPrintf("#%d: DoReset epoch=%lu\n" , thr ? thr->tid : -1, epoch); |
246 | DoResetImpl(epoch); |
247 | for (auto& slot : ctx->slots) slot.mtx.Unlock(); |
248 | } |
249 | |
250 | void FlushShadowMemory() { DoReset(thr: nullptr, epoch: 0); } |
251 | |
252 | static TidSlot* FindSlotAndLock(ThreadState* thr) |
253 | SANITIZER_ACQUIRE(thr->slot->mtx) SANITIZER_NO_THREAD_SAFETY_ANALYSIS { |
254 | CHECK(!thr->slot); |
255 | TidSlot* slot = nullptr; |
256 | for (;;) { |
257 | uptr epoch; |
258 | { |
259 | Lock lock(&ctx->slot_mtx); |
260 | epoch = ctx->global_epoch; |
261 | if (slot) { |
262 | // This is an exhausted slot from the previous iteration. |
263 | if (ctx->slot_queue.Queued(e: slot)) |
264 | ctx->slot_queue.Remove(e: slot); |
265 | thr->slot_locked = false; |
266 | slot->mtx.Unlock(); |
267 | } |
268 | for (;;) { |
269 | slot = ctx->slot_queue.PopFront(); |
270 | if (!slot) |
271 | break; |
272 | if (slot->epoch() != kEpochLast) { |
273 | ctx->slot_queue.PushBack(e: slot); |
274 | break; |
275 | } |
276 | } |
277 | } |
278 | if (!slot) { |
279 | DoReset(thr, epoch); |
280 | continue; |
281 | } |
282 | slot->mtx.Lock(); |
283 | CHECK(!thr->slot_locked); |
284 | thr->slot_locked = true; |
285 | if (slot->thr) { |
286 | DPrintf("#%d: preempting sid=%d tid=%d\n" , thr->tid, (u32)slot->sid, |
287 | slot->thr->tid); |
288 | slot->SetEpoch(slot->thr->fast_state.epoch()); |
289 | slot->thr = nullptr; |
290 | } |
291 | if (slot->epoch() != kEpochLast) |
292 | return slot; |
293 | } |
294 | } |
295 | |
296 | void SlotAttachAndLock(ThreadState* thr) { |
297 | TidSlot* slot = FindSlotAndLock(thr); |
298 | DPrintf("#%d: SlotAttach: slot=%u\n" , thr->tid, static_cast<int>(slot->sid)); |
299 | CHECK(!slot->thr); |
300 | CHECK(!thr->slot); |
301 | slot->thr = thr; |
302 | thr->slot = slot; |
303 | Epoch epoch = EpochInc(epoch: slot->epoch()); |
304 | CHECK(!EpochOverflow(epoch)); |
305 | slot->SetEpoch(epoch); |
306 | thr->fast_state.SetSid(slot->sid); |
307 | thr->fast_state.SetEpoch(epoch); |
308 | if (thr->slot_epoch != ctx->global_epoch) { |
309 | thr->slot_epoch = ctx->global_epoch; |
310 | thr->clock.Reset(); |
311 | #if !SANITIZER_GO |
312 | thr->last_sleep_stack_id = kInvalidStackID; |
313 | thr->last_sleep_clock.Reset(); |
314 | #endif |
315 | } |
316 | thr->clock.Set(sid: slot->sid, v: epoch); |
317 | slot->journal.PushBack(v: {.tid: thr->tid, .epoch: epoch}); |
318 | } |
319 | |
320 | static void SlotDetachImpl(ThreadState* thr, bool exiting) { |
321 | TidSlot* slot = thr->slot; |
322 | thr->slot = nullptr; |
323 | if (thr != slot->thr) { |
324 | slot = nullptr; // we don't own the slot anymore |
325 | if (thr->slot_epoch != ctx->global_epoch) { |
326 | TracePart* part = nullptr; |
327 | auto* trace = &thr->tctx->trace; |
328 | { |
329 | Lock l(&trace->mtx); |
330 | auto* parts = &trace->parts; |
331 | // The trace can be completely empty in an unlikely event |
332 | // the thread is preempted right after it acquired the slot |
333 | // in ThreadStart and did not trace any events yet. |
334 | CHECK_LE(parts->Size(), 1); |
335 | part = parts->PopFront(); |
336 | thr->tctx->trace.local_head = nullptr; |
337 | atomic_store_relaxed(a: &thr->trace_pos, v: 0); |
338 | thr->trace_prev_pc = 0; |
339 | } |
340 | if (part) { |
341 | Lock l(&ctx->slot_mtx); |
342 | TracePartFree(part); |
343 | } |
344 | } |
345 | return; |
346 | } |
347 | CHECK(exiting || thr->fast_state.epoch() == kEpochLast); |
348 | slot->SetEpoch(thr->fast_state.epoch()); |
349 | slot->thr = nullptr; |
350 | } |
351 | |
352 | void SlotDetach(ThreadState* thr) { |
353 | Lock lock(&thr->slot->mtx); |
354 | SlotDetachImpl(thr, exiting: true); |
355 | } |
356 | |
357 | void SlotLock(ThreadState* thr) SANITIZER_NO_THREAD_SAFETY_ANALYSIS { |
358 | DCHECK(!thr->slot_locked); |
359 | #if SANITIZER_DEBUG |
360 | // Check these mutexes are not locked. |
361 | // We can call DoReset from SlotAttachAndLock, which will lock |
362 | // these mutexes, but it happens only every once in a while. |
363 | { ThreadRegistryLock lock(&ctx->thread_registry); } |
364 | { Lock lock(&ctx->slot_mtx); } |
365 | #endif |
366 | TidSlot* slot = thr->slot; |
367 | slot->mtx.Lock(); |
368 | thr->slot_locked = true; |
369 | if (LIKELY(thr == slot->thr && thr->fast_state.epoch() != kEpochLast)) |
370 | return; |
371 | SlotDetachImpl(thr, exiting: false); |
372 | thr->slot_locked = false; |
373 | slot->mtx.Unlock(); |
374 | SlotAttachAndLock(thr); |
375 | } |
376 | |
377 | void SlotUnlock(ThreadState* thr) { |
378 | DCHECK(thr->slot_locked); |
379 | thr->slot_locked = false; |
380 | thr->slot->mtx.Unlock(); |
381 | } |
382 | |
383 | Context::Context() |
384 | : initialized(), |
385 | report_mtx(MutexTypeReport), |
386 | nreported(), |
387 | thread_registry([](Tid tid) -> ThreadContextBase* { |
388 | return new (Alloc(sz: sizeof(ThreadContext))) ThreadContext(tid); |
389 | }), |
390 | racy_mtx(MutexTypeRacy), |
391 | racy_stacks(), |
392 | fired_suppressions_mtx(MutexTypeFired), |
393 | slot_mtx(MutexTypeSlots), |
394 | resetting() { |
395 | fired_suppressions.reserve(new_size: 8); |
396 | for (uptr i = 0; i < ARRAY_SIZE(slots); i++) { |
397 | TidSlot* slot = &slots[i]; |
398 | slot->sid = static_cast<Sid>(i); |
399 | slot_queue.PushBack(e: slot); |
400 | } |
401 | global_epoch = 1; |
402 | } |
403 | |
404 | TidSlot::TidSlot() : mtx(MutexTypeSlot) {} |
405 | |
406 | // The objects are allocated in TLS, so one may rely on zero-initialization. |
407 | ThreadState::ThreadState(Tid tid) |
408 | // Do not touch these, rely on zero initialization, |
409 | // they may be accessed before the ctor. |
410 | // ignore_reads_and_writes() |
411 | // ignore_interceptors() |
412 | : tid(tid) { |
413 | CHECK_EQ(reinterpret_cast<uptr>(this) % SANITIZER_CACHE_LINE_SIZE, 0); |
414 | #if !SANITIZER_GO |
415 | // C/C++ uses fixed size shadow stack. |
416 | const int kInitStackSize = kShadowStackSize; |
417 | shadow_stack = static_cast<uptr*>( |
418 | MmapNoReserveOrDie(size: kInitStackSize * sizeof(uptr), mem_type: "shadow stack" )); |
419 | SetShadowRegionHugePageMode(addr: reinterpret_cast<uptr>(shadow_stack), |
420 | length: kInitStackSize * sizeof(uptr)); |
421 | #else |
422 | // Go uses malloc-allocated shadow stack with dynamic size. |
423 | const int kInitStackSize = 8; |
424 | shadow_stack = static_cast<uptr*>(Alloc(kInitStackSize * sizeof(uptr))); |
425 | #endif |
426 | shadow_stack_pos = shadow_stack; |
427 | shadow_stack_end = shadow_stack + kInitStackSize; |
428 | } |
429 | |
430 | #if !SANITIZER_GO |
431 | void MemoryProfiler(u64 uptime) { |
432 | if (ctx->memprof_fd == kInvalidFd) |
433 | return; |
434 | InternalMmapVector<char> buf(4096); |
435 | WriteMemoryProfile(buf: buf.data(), buf_size: buf.size(), uptime_ns: uptime); |
436 | WriteToFile(fd: ctx->memprof_fd, buff: buf.data(), buff_size: internal_strlen(s: buf.data())); |
437 | } |
438 | |
439 | static bool InitializeMemoryProfiler() { |
440 | ctx->memprof_fd = kInvalidFd; |
441 | const char *fname = flags()->profile_memory; |
442 | if (!fname || !fname[0]) |
443 | return false; |
444 | if (internal_strcmp(s1: fname, s2: "stdout" ) == 0) { |
445 | ctx->memprof_fd = 1; |
446 | } else if (internal_strcmp(s1: fname, s2: "stderr" ) == 0) { |
447 | ctx->memprof_fd = 2; |
448 | } else { |
449 | InternalScopedString filename; |
450 | filename.AppendF(format: "%s.%d" , fname, (int)internal_getpid()); |
451 | ctx->memprof_fd = OpenFile(filename: filename.data(), mode: WrOnly); |
452 | if (ctx->memprof_fd == kInvalidFd) { |
453 | Printf(format: "ThreadSanitizer: failed to open memory profile file '%s'\n" , |
454 | filename.data()); |
455 | return false; |
456 | } |
457 | } |
458 | MemoryProfiler(uptime: 0); |
459 | return true; |
460 | } |
461 | |
462 | static void *BackgroundThread(void *arg) { |
463 | // This is a non-initialized non-user thread, nothing to see here. |
464 | // We don't use ScopedIgnoreInterceptors, because we want ignores to be |
465 | // enabled even when the thread function exits (e.g. during pthread thread |
466 | // shutdown code). |
467 | cur_thread_init()->ignore_interceptors++; |
468 | const u64 kMs2Ns = 1000 * 1000; |
469 | const u64 start = NanoTime(); |
470 | |
471 | u64 last_flush = start; |
472 | uptr = 0; |
473 | while (!atomic_load_relaxed(a: &ctx->stop_background_thread)) { |
474 | SleepForMillis(millis: 100); |
475 | u64 now = NanoTime(); |
476 | |
477 | // Flush memory if requested. |
478 | if (flags()->flush_memory_ms > 0) { |
479 | if (last_flush + flags()->flush_memory_ms * kMs2Ns < now) { |
480 | VReport(1, "ThreadSanitizer: periodic memory flush\n" ); |
481 | FlushShadowMemory(); |
482 | now = last_flush = NanoTime(); |
483 | } |
484 | } |
485 | if (flags()->memory_limit_mb > 0) { |
486 | uptr = GetRSS(); |
487 | uptr limit = uptr(flags()->memory_limit_mb) << 20; |
488 | VReport(1, |
489 | "ThreadSanitizer: memory flush check" |
490 | " RSS=%llu LAST=%llu LIMIT=%llu\n" , |
491 | (u64)rss >> 20, (u64)last_rss >> 20, (u64)limit >> 20); |
492 | if (2 * rss > limit + last_rss) { |
493 | VReport(1, "ThreadSanitizer: flushing memory due to RSS\n" ); |
494 | FlushShadowMemory(); |
495 | rss = GetRSS(); |
496 | now = NanoTime(); |
497 | VReport(1, "ThreadSanitizer: memory flushed RSS=%llu\n" , |
498 | (u64)rss >> 20); |
499 | } |
500 | last_rss = rss; |
501 | } |
502 | |
503 | MemoryProfiler(uptime: now - start); |
504 | |
505 | // Flush symbolizer cache if requested. |
506 | if (flags()->flush_symbolizer_ms > 0) { |
507 | u64 last = atomic_load(a: &ctx->last_symbolize_time_ns, |
508 | mo: memory_order_relaxed); |
509 | if (last != 0 && last + flags()->flush_symbolizer_ms * kMs2Ns < now) { |
510 | Lock l(&ctx->report_mtx); |
511 | ScopedErrorReportLock l2; |
512 | SymbolizeFlush(); |
513 | atomic_store(a: &ctx->last_symbolize_time_ns, v: 0, mo: memory_order_relaxed); |
514 | } |
515 | } |
516 | } |
517 | return nullptr; |
518 | } |
519 | |
520 | static void StartBackgroundThread() { |
521 | ctx->background_thread = internal_start_thread(func: &BackgroundThread, arg: 0); |
522 | } |
523 | |
524 | #ifndef __mips__ |
525 | static void StopBackgroundThread() { |
526 | atomic_store(a: &ctx->stop_background_thread, v: 1, mo: memory_order_relaxed); |
527 | internal_join_thread(th: ctx->background_thread); |
528 | ctx->background_thread = 0; |
529 | } |
530 | #endif |
531 | #endif |
532 | |
533 | void DontNeedShadowFor(uptr addr, uptr size) { |
534 | ReleaseMemoryPagesToOS(beg: reinterpret_cast<uptr>(MemToShadow(x: addr)), |
535 | end: reinterpret_cast<uptr>(MemToShadow(x: addr + size))); |
536 | } |
537 | |
538 | #if !SANITIZER_GO |
539 | // We call UnmapShadow before the actual munmap, at that point we don't yet |
540 | // know if the provided address/size are sane. We can't call UnmapShadow |
541 | // after the actual munmap becuase at that point the memory range can |
542 | // already be reused for something else, so we can't rely on the munmap |
543 | // return value to understand is the values are sane. |
544 | // While calling munmap with insane values (non-canonical address, negative |
545 | // size, etc) is an error, the kernel won't crash. We must also try to not |
546 | // crash as the failure mode is very confusing (paging fault inside of the |
547 | // runtime on some derived shadow address). |
548 | static bool IsValidMmapRange(uptr addr, uptr size) { |
549 | if (size == 0) |
550 | return true; |
551 | if (static_cast<sptr>(size) < 0) |
552 | return false; |
553 | if (!IsAppMem(mem: addr) || !IsAppMem(mem: addr + size - 1)) |
554 | return false; |
555 | // Check that if the start of the region belongs to one of app ranges, |
556 | // end of the region belongs to the same region. |
557 | const uptr ranges[][2] = { |
558 | {LoAppMemBeg(), LoAppMemEnd()}, |
559 | {MidAppMemBeg(), MidAppMemEnd()}, |
560 | {HiAppMemBeg(), HiAppMemEnd()}, |
561 | }; |
562 | for (auto range : ranges) { |
563 | if (addr >= range[0] && addr < range[1]) |
564 | return addr + size <= range[1]; |
565 | } |
566 | return false; |
567 | } |
568 | |
569 | void UnmapShadow(ThreadState *thr, uptr addr, uptr size) { |
570 | if (size == 0 || !IsValidMmapRange(addr, size)) |
571 | return; |
572 | DontNeedShadowFor(addr, size); |
573 | ScopedGlobalProcessor sgp; |
574 | SlotLocker locker(thr, true); |
575 | ctx->metamap.ResetRange(proc: thr->proc(), p: addr, sz: size, reset: true); |
576 | } |
577 | #endif |
578 | |
579 | void MapShadow(uptr addr, uptr size) { |
580 | // Ensure thead registry lock held, so as to synchronize |
581 | // with DoReset, which also access the mapped_shadow_* ctxt fields. |
582 | ThreadRegistryLock lock0(&ctx->thread_registry); |
583 | static bool data_mapped = false; |
584 | |
585 | #if !SANITIZER_GO |
586 | // Global data is not 64K aligned, but there are no adjacent mappings, |
587 | // so we can get away with unaligned mapping. |
588 | // CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment |
589 | const uptr kPageSize = GetPageSizeCached(); |
590 | uptr shadow_begin = RoundDownTo(x: (uptr)MemToShadow(x: addr), boundary: kPageSize); |
591 | uptr shadow_end = RoundUpTo(size: (uptr)MemToShadow(x: addr + size), boundary: kPageSize); |
592 | if (!MmapFixedNoReserve(fixed_addr: shadow_begin, size: shadow_end - shadow_begin, name: "shadow" )) |
593 | Die(); |
594 | #else |
595 | uptr shadow_begin = RoundDownTo((uptr)MemToShadow(addr), (64 << 10)); |
596 | uptr shadow_end = RoundUpTo((uptr)MemToShadow(addr + size), (64 << 10)); |
597 | VPrintf(2, "MapShadow for (0x%zx-0x%zx), begin/end: (0x%zx-0x%zx)\n" , |
598 | addr, addr + size, shadow_begin, shadow_end); |
599 | |
600 | if (!data_mapped) { |
601 | // First call maps data+bss. |
602 | if (!MmapFixedSuperNoReserve(shadow_begin, shadow_end - shadow_begin, "shadow" )) |
603 | Die(); |
604 | } else { |
605 | VPrintf(2, "ctx->mapped_shadow_{begin,end} = (0x%zx-0x%zx)\n" , |
606 | ctx->mapped_shadow_begin, ctx->mapped_shadow_end); |
607 | // Second and subsequent calls map heap. |
608 | if (shadow_end <= ctx->mapped_shadow_end) |
609 | return; |
610 | if (!ctx->mapped_shadow_begin || ctx->mapped_shadow_begin > shadow_begin) |
611 | ctx->mapped_shadow_begin = shadow_begin; |
612 | if (shadow_begin < ctx->mapped_shadow_end) |
613 | shadow_begin = ctx->mapped_shadow_end; |
614 | VPrintf(2, "MapShadow begin/end = (0x%zx-0x%zx)\n" , |
615 | shadow_begin, shadow_end); |
616 | if (!MmapFixedSuperNoReserve(shadow_begin, shadow_end - shadow_begin, |
617 | "shadow" )) |
618 | Die(); |
619 | ctx->mapped_shadow_end = shadow_end; |
620 | } |
621 | #endif |
622 | |
623 | // Meta shadow is 2:1, so tread carefully. |
624 | static uptr mapped_meta_end = 0; |
625 | uptr meta_begin = (uptr)MemToMeta(x: addr); |
626 | uptr meta_end = (uptr)MemToMeta(x: addr + size); |
627 | meta_begin = RoundDownTo(x: meta_begin, boundary: 64 << 10); |
628 | meta_end = RoundUpTo(size: meta_end, boundary: 64 << 10); |
629 | if (!data_mapped) { |
630 | // First call maps data+bss. |
631 | data_mapped = true; |
632 | if (!MmapFixedSuperNoReserve(fixed_addr: meta_begin, size: meta_end - meta_begin, |
633 | name: "meta shadow" )) |
634 | Die(); |
635 | } else { |
636 | // Mapping continuous heap. |
637 | // Windows wants 64K alignment. |
638 | meta_begin = RoundDownTo(x: meta_begin, boundary: 64 << 10); |
639 | meta_end = RoundUpTo(size: meta_end, boundary: 64 << 10); |
640 | CHECK_GT(meta_end, mapped_meta_end); |
641 | if (meta_begin < mapped_meta_end) |
642 | meta_begin = mapped_meta_end; |
643 | if (!MmapFixedSuperNoReserve(fixed_addr: meta_begin, size: meta_end - meta_begin, |
644 | name: "meta shadow" )) |
645 | Die(); |
646 | mapped_meta_end = meta_end; |
647 | } |
648 | VPrintf(2, "mapped meta shadow for (0x%zx-0x%zx) at (0x%zx-0x%zx)\n" , addr, |
649 | addr + size, meta_begin, meta_end); |
650 | } |
651 | |
652 | #if !SANITIZER_GO |
653 | static void OnStackUnwind(const SignalContext &sig, const void *, |
654 | BufferedStackTrace *stack) { |
655 | stack->Unwind(pc: StackTrace::GetNextInstructionPc(pc: sig.pc), bp: sig.bp, context: sig.context, |
656 | request_fast: common_flags()->fast_unwind_on_fatal); |
657 | } |
658 | |
659 | static void TsanOnDeadlySignal(int signo, void *siginfo, void *context) { |
660 | HandleDeadlySignal(siginfo, context, tid: GetTid(), unwind: &OnStackUnwind, unwind_context: nullptr); |
661 | } |
662 | #endif |
663 | |
664 | void CheckUnwind() { |
665 | // There is high probability that interceptors will check-fail as well, |
666 | // on the other hand there is no sense in processing interceptors |
667 | // since we are going to die soon. |
668 | ScopedIgnoreInterceptors ignore; |
669 | #if !SANITIZER_GO |
670 | ThreadState* thr = cur_thread(); |
671 | thr->nomalloc = false; |
672 | thr->ignore_sync++; |
673 | thr->ignore_reads_and_writes++; |
674 | atomic_store_relaxed(a: &thr->in_signal_handler, v: 0); |
675 | #endif |
676 | PrintCurrentStackSlow(pc: StackTrace::GetCurrentPc()); |
677 | } |
678 | |
679 | bool is_initialized; |
680 | |
681 | void Initialize(ThreadState *thr) { |
682 | // Thread safe because done before all threads exist. |
683 | if (is_initialized) |
684 | return; |
685 | is_initialized = true; |
686 | // We are not ready to handle interceptors yet. |
687 | ScopedIgnoreInterceptors ignore; |
688 | SanitizerToolName = "ThreadSanitizer" ; |
689 | // Install tool-specific callbacks in sanitizer_common. |
690 | SetCheckUnwindCallback(CheckUnwind); |
691 | |
692 | ctx = new(ctx_placeholder) Context; |
693 | const char *env_name = SANITIZER_GO ? "GORACE" : "TSAN_OPTIONS" ; |
694 | const char *options = GetEnv(name: env_name); |
695 | CacheBinaryName(); |
696 | CheckASLR(); |
697 | InitializeFlags(flags: &ctx->flags, env: options, env_option_name: env_name); |
698 | AvoidCVE_2016_2143(); |
699 | __sanitizer::InitializePlatformEarly(); |
700 | __tsan::InitializePlatformEarly(); |
701 | |
702 | #if !SANITIZER_GO |
703 | InitializeAllocator(); |
704 | ReplaceSystemMalloc(); |
705 | #endif |
706 | if (common_flags()->detect_deadlocks) |
707 | ctx->dd = DDetector::Create(flags: flags()); |
708 | Processor *proc = ProcCreate(); |
709 | ProcWire(proc, thr); |
710 | InitializeInterceptors(); |
711 | InitializePlatform(); |
712 | InitializeDynamicAnnotations(); |
713 | #if !SANITIZER_GO |
714 | InitializeShadowMemory(); |
715 | InitializeAllocatorLate(); |
716 | InstallDeadlySignalHandlers(handler: TsanOnDeadlySignal); |
717 | #endif |
718 | // Setup correct file descriptor for error reports. |
719 | __sanitizer_set_report_path(path: common_flags()->log_path); |
720 | InitializeSuppressions(); |
721 | #if !SANITIZER_GO |
722 | InitializeLibIgnore(); |
723 | Symbolizer::GetOrInit()->AddHooks(start_hook: EnterSymbolizer, end_hook: ExitSymbolizer); |
724 | #endif |
725 | |
726 | VPrintf(1, "***** Running under ThreadSanitizer v3 (pid %d) *****\n" , |
727 | (int)internal_getpid()); |
728 | |
729 | // Initialize thread 0. |
730 | Tid tid = ThreadCreate(thr: nullptr, pc: 0, uid: 0, detached: true); |
731 | CHECK_EQ(tid, kMainTid); |
732 | ThreadStart(thr, tid, os_id: GetTid(), thread_type: ThreadType::Regular); |
733 | #if TSAN_CONTAINS_UBSAN |
734 | __ubsan::InitAsPlugin(); |
735 | #endif |
736 | |
737 | #if !SANITIZER_GO |
738 | Symbolizer::LateInitialize(); |
739 | if (InitializeMemoryProfiler() || flags()->force_background_thread) |
740 | MaybeSpawnBackgroundThread(); |
741 | #endif |
742 | ctx->initialized = true; |
743 | |
744 | if (flags()->stop_on_start) { |
745 | Printf(format: "ThreadSanitizer is suspended at startup (pid %d)." |
746 | " Call __tsan_resume().\n" , |
747 | (int)internal_getpid()); |
748 | while (__tsan_resumed == 0) {} |
749 | } |
750 | |
751 | OnInitialize(); |
752 | } |
753 | |
754 | void MaybeSpawnBackgroundThread() { |
755 | // On MIPS, TSan initialization is run before |
756 | // __pthread_initialize_minimal_internal() is finished, so we can not spawn |
757 | // new threads. |
758 | #if !SANITIZER_GO && !defined(__mips__) |
759 | static atomic_uint32_t bg_thread = {}; |
760 | if (atomic_load(a: &bg_thread, mo: memory_order_relaxed) == 0 && |
761 | atomic_exchange(a: &bg_thread, v: 1, mo: memory_order_relaxed) == 0) { |
762 | StartBackgroundThread(); |
763 | SetSandboxingCallback(StopBackgroundThread); |
764 | } |
765 | #endif |
766 | } |
767 | |
768 | int Finalize(ThreadState *thr) { |
769 | bool failed = false; |
770 | |
771 | #if !SANITIZER_GO |
772 | if (common_flags()->print_module_map == 1) |
773 | DumpProcessMap(); |
774 | #endif |
775 | |
776 | if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1) |
777 | internal_usleep(useconds: u64(flags()->atexit_sleep_ms) * 1000); |
778 | |
779 | { |
780 | // Wait for pending reports. |
781 | ScopedErrorReportLock lock; |
782 | } |
783 | |
784 | #if !SANITIZER_GO |
785 | if (Verbosity()) AllocatorPrintStats(); |
786 | #endif |
787 | |
788 | ThreadFinalize(thr); |
789 | |
790 | if (ctx->nreported) { |
791 | failed = true; |
792 | #if !SANITIZER_GO |
793 | Printf(format: "ThreadSanitizer: reported %d warnings\n" , ctx->nreported); |
794 | #else |
795 | Printf("Found %d data race(s)\n" , ctx->nreported); |
796 | #endif |
797 | } |
798 | |
799 | if (common_flags()->print_suppressions) |
800 | PrintMatchedSuppressions(); |
801 | |
802 | failed = OnFinalize(failed); |
803 | |
804 | return failed ? common_flags()->exitcode : 0; |
805 | } |
806 | |
807 | #if !SANITIZER_GO |
808 | void ForkBefore(ThreadState* thr, uptr pc) SANITIZER_NO_THREAD_SAFETY_ANALYSIS { |
809 | GlobalProcessorLock(); |
810 | // Detaching from the slot makes OnUserFree skip writing to the shadow. |
811 | // The slot will be locked so any attempts to use it will deadlock anyway. |
812 | SlotDetach(thr); |
813 | for (auto& slot : ctx->slots) slot.mtx.Lock(); |
814 | ctx->thread_registry.Lock(); |
815 | ctx->slot_mtx.Lock(); |
816 | ScopedErrorReportLock::Lock(); |
817 | AllocatorLockBeforeFork(); |
818 | // Suppress all reports in the pthread_atfork callbacks. |
819 | // Reports will deadlock on the report_mtx. |
820 | // We could ignore sync operations as well, |
821 | // but so far it's unclear if it will do more good or harm. |
822 | // Unnecessarily ignoring things can lead to false positives later. |
823 | thr->suppress_reports++; |
824 | // On OS X, REAL(fork) can call intercepted functions (OSSpinLockLock), and |
825 | // we'll assert in CheckNoLocks() unless we ignore interceptors. |
826 | // On OS X libSystem_atfork_prepare/parent/child callbacks are called |
827 | // after/before our callbacks and they call free. |
828 | thr->ignore_interceptors++; |
829 | // Disables memory write in OnUserAlloc/Free. |
830 | thr->ignore_reads_and_writes++; |
831 | |
832 | # if SANITIZER_APPLE |
833 | __tsan_test_only_on_fork(); |
834 | # endif |
835 | } |
836 | |
837 | static void ForkAfter(ThreadState* thr, |
838 | bool child) SANITIZER_NO_THREAD_SAFETY_ANALYSIS { |
839 | thr->suppress_reports--; // Enabled in ForkBefore. |
840 | thr->ignore_interceptors--; |
841 | thr->ignore_reads_and_writes--; |
842 | AllocatorUnlockAfterFork(child); |
843 | ScopedErrorReportLock::Unlock(); |
844 | ctx->slot_mtx.Unlock(); |
845 | ctx->thread_registry.Unlock(); |
846 | for (auto& slot : ctx->slots) slot.mtx.Unlock(); |
847 | SlotAttachAndLock(thr); |
848 | SlotUnlock(thr); |
849 | GlobalProcessorUnlock(); |
850 | } |
851 | |
852 | void ForkParentAfter(ThreadState* thr, uptr pc) { ForkAfter(thr, child: false); } |
853 | |
854 | void ForkChildAfter(ThreadState* thr, uptr pc, bool start_thread) { |
855 | ForkAfter(thr, child: true); |
856 | u32 nthread = ctx->thread_registry.OnFork(tid: thr->tid); |
857 | VPrintf(1, |
858 | "ThreadSanitizer: forked new process with pid %d," |
859 | " parent had %d threads\n" , |
860 | (int)internal_getpid(), (int)nthread); |
861 | if (nthread == 1) { |
862 | if (start_thread) |
863 | StartBackgroundThread(); |
864 | } else { |
865 | // We've just forked a multi-threaded process. We cannot reasonably function |
866 | // after that (some mutexes may be locked before fork). So just enable |
867 | // ignores for everything in the hope that we will exec soon. |
868 | ctx->after_multithreaded_fork = true; |
869 | thr->ignore_interceptors++; |
870 | thr->suppress_reports++; |
871 | ThreadIgnoreBegin(thr, pc); |
872 | ThreadIgnoreSyncBegin(thr, pc); |
873 | } |
874 | } |
875 | #endif |
876 | |
877 | #if SANITIZER_GO |
878 | NOINLINE |
879 | void GrowShadowStack(ThreadState *thr) { |
880 | const int sz = thr->shadow_stack_end - thr->shadow_stack; |
881 | const int newsz = 2 * sz; |
882 | auto *newstack = (uptr *)Alloc(newsz * sizeof(uptr)); |
883 | internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr)); |
884 | Free(thr->shadow_stack); |
885 | thr->shadow_stack = newstack; |
886 | thr->shadow_stack_pos = newstack + sz; |
887 | thr->shadow_stack_end = newstack + newsz; |
888 | } |
889 | #endif |
890 | |
891 | StackID CurrentStackId(ThreadState *thr, uptr pc) { |
892 | #if !SANITIZER_GO |
893 | if (!thr->is_inited) // May happen during bootstrap. |
894 | return kInvalidStackID; |
895 | #endif |
896 | if (pc != 0) { |
897 | #if !SANITIZER_GO |
898 | DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end); |
899 | #else |
900 | if (thr->shadow_stack_pos == thr->shadow_stack_end) |
901 | GrowShadowStack(thr); |
902 | #endif |
903 | thr->shadow_stack_pos[0] = pc; |
904 | thr->shadow_stack_pos++; |
905 | } |
906 | StackID id = StackDepotPut( |
907 | stack: StackTrace(thr->shadow_stack, thr->shadow_stack_pos - thr->shadow_stack)); |
908 | if (pc != 0) |
909 | thr->shadow_stack_pos--; |
910 | return id; |
911 | } |
912 | |
913 | static bool TraceSkipGap(ThreadState* thr) { |
914 | Trace *trace = &thr->tctx->trace; |
915 | Event *pos = reinterpret_cast<Event *>(atomic_load_relaxed(a: &thr->trace_pos)); |
916 | DCHECK_EQ(reinterpret_cast<uptr>(pos + 1) & TracePart::kAlignment, 0); |
917 | auto *part = trace->parts.Back(); |
918 | DPrintf("#%d: TraceSwitchPart enter trace=%p parts=%p-%p pos=%p\n" , thr->tid, |
919 | trace, trace->parts.Front(), part, pos); |
920 | if (!part) |
921 | return false; |
922 | // We can get here when we still have space in the current trace part. |
923 | // The fast-path check in TraceAcquire has false positives in the middle of |
924 | // the part. Check if we are indeed at the end of the current part or not, |
925 | // and fill any gaps with NopEvent's. |
926 | Event* end = &part->events[TracePart::kSize]; |
927 | DCHECK_GE(pos, &part->events[0]); |
928 | DCHECK_LE(pos, end); |
929 | if (pos + 1 < end) { |
930 | if ((reinterpret_cast<uptr>(pos) & TracePart::kAlignment) == |
931 | TracePart::kAlignment) |
932 | *pos++ = NopEvent; |
933 | *pos++ = NopEvent; |
934 | DCHECK_LE(pos + 2, end); |
935 | atomic_store_relaxed(a: &thr->trace_pos, v: reinterpret_cast<uptr>(pos)); |
936 | return true; |
937 | } |
938 | // We are indeed at the end. |
939 | for (; pos < end; pos++) *pos = NopEvent; |
940 | return false; |
941 | } |
942 | |
943 | NOINLINE |
944 | void TraceSwitchPart(ThreadState* thr) { |
945 | if (TraceSkipGap(thr)) |
946 | return; |
947 | #if !SANITIZER_GO |
948 | if (ctx->after_multithreaded_fork) { |
949 | // We just need to survive till exec. |
950 | TracePart* part = thr->tctx->trace.parts.Back(); |
951 | if (part) { |
952 | atomic_store_relaxed(a: &thr->trace_pos, |
953 | v: reinterpret_cast<uptr>(&part->events[0])); |
954 | return; |
955 | } |
956 | } |
957 | #endif |
958 | TraceSwitchPartImpl(thr); |
959 | } |
960 | |
961 | void TraceSwitchPartImpl(ThreadState* thr) { |
962 | SlotLocker locker(thr, true); |
963 | Trace* trace = &thr->tctx->trace; |
964 | TracePart* part = TracePartAlloc(thr); |
965 | part->trace = trace; |
966 | thr->trace_prev_pc = 0; |
967 | TracePart* recycle = nullptr; |
968 | // Keep roughly half of parts local to the thread |
969 | // (not queued into the recycle queue). |
970 | uptr local_parts = (Trace::kMinParts + flags()->history_size + 1) / 2; |
971 | { |
972 | Lock lock(&trace->mtx); |
973 | if (trace->parts.Empty()) |
974 | trace->local_head = part; |
975 | if (trace->parts.Size() >= local_parts) { |
976 | recycle = trace->local_head; |
977 | trace->local_head = trace->parts.Next(e: recycle); |
978 | } |
979 | trace->parts.PushBack(e: part); |
980 | atomic_store_relaxed(a: &thr->trace_pos, |
981 | v: reinterpret_cast<uptr>(&part->events[0])); |
982 | } |
983 | // Make this part self-sufficient by restoring the current stack |
984 | // and mutex set in the beginning of the trace. |
985 | TraceTime(thr); |
986 | { |
987 | // Pathologically large stacks may not fit into the part. |
988 | // In these cases we log only fixed number of top frames. |
989 | const uptr kMaxFrames = 1000; |
990 | // Check that kMaxFrames won't consume the whole part. |
991 | static_assert(kMaxFrames < TracePart::kSize / 2, "kMaxFrames is too big" ); |
992 | uptr* pos = Max(a: &thr->shadow_stack[0], b: thr->shadow_stack_pos - kMaxFrames); |
993 | for (; pos < thr->shadow_stack_pos; pos++) { |
994 | if (TryTraceFunc(thr, pc: *pos)) |
995 | continue; |
996 | CHECK(TraceSkipGap(thr)); |
997 | CHECK(TryTraceFunc(thr, *pos)); |
998 | } |
999 | } |
1000 | for (uptr i = 0; i < thr->mset.Size(); i++) { |
1001 | MutexSet::Desc d = thr->mset.Get(i); |
1002 | for (uptr i = 0; i < d.count; i++) |
1003 | TraceMutexLock(thr, type: d.write ? EventType::kLock : EventType::kRLock, pc: 0, |
1004 | addr: d.addr, stk: d.stack_id); |
1005 | } |
1006 | // Callers of TraceSwitchPart expect that TraceAcquire will always succeed |
1007 | // after the call. It's possible that TryTraceFunc/TraceMutexLock above |
1008 | // filled the trace part exactly up to the TracePart::kAlignment gap |
1009 | // and the next TraceAcquire won't succeed. Skip the gap to avoid that. |
1010 | EventFunc *ev; |
1011 | if (!TraceAcquire(thr, ev: &ev)) { |
1012 | CHECK(TraceSkipGap(thr)); |
1013 | CHECK(TraceAcquire(thr, &ev)); |
1014 | } |
1015 | { |
1016 | Lock lock(&ctx->slot_mtx); |
1017 | // There is a small chance that the slot may be not queued at this point. |
1018 | // This can happen if the slot has kEpochLast epoch and another thread |
1019 | // in FindSlotAndLock discovered that it's exhausted and removed it from |
1020 | // the slot queue. kEpochLast can happen in 2 cases: (1) if TraceSwitchPart |
1021 | // was called with the slot locked and epoch already at kEpochLast, |
1022 | // or (2) if we've acquired a new slot in SlotLock in the beginning |
1023 | // of the function and the slot was at kEpochLast - 1, so after increment |
1024 | // in SlotAttachAndLock it become kEpochLast. |
1025 | if (ctx->slot_queue.Queued(e: thr->slot)) { |
1026 | ctx->slot_queue.Remove(e: thr->slot); |
1027 | ctx->slot_queue.PushBack(e: thr->slot); |
1028 | } |
1029 | if (recycle) |
1030 | ctx->trace_part_recycle.PushBack(e: recycle); |
1031 | } |
1032 | DPrintf("#%d: TraceSwitchPart exit parts=%p-%p pos=0x%zx\n" , thr->tid, |
1033 | trace->parts.Front(), trace->parts.Back(), |
1034 | atomic_load_relaxed(&thr->trace_pos)); |
1035 | } |
1036 | |
1037 | void ThreadIgnoreBegin(ThreadState* thr, uptr pc) { |
1038 | DPrintf("#%d: ThreadIgnoreBegin\n" , thr->tid); |
1039 | thr->ignore_reads_and_writes++; |
1040 | CHECK_GT(thr->ignore_reads_and_writes, 0); |
1041 | thr->fast_state.SetIgnoreBit(); |
1042 | #if !SANITIZER_GO |
1043 | if (pc && !ctx->after_multithreaded_fork) |
1044 | thr->mop_ignore_set.Add(stack_id: CurrentStackId(thr, pc)); |
1045 | #endif |
1046 | } |
1047 | |
1048 | void ThreadIgnoreEnd(ThreadState *thr) { |
1049 | DPrintf("#%d: ThreadIgnoreEnd\n" , thr->tid); |
1050 | CHECK_GT(thr->ignore_reads_and_writes, 0); |
1051 | thr->ignore_reads_and_writes--; |
1052 | if (thr->ignore_reads_and_writes == 0) { |
1053 | thr->fast_state.ClearIgnoreBit(); |
1054 | #if !SANITIZER_GO |
1055 | thr->mop_ignore_set.Reset(); |
1056 | #endif |
1057 | } |
1058 | } |
1059 | |
1060 | #if !SANITIZER_GO |
1061 | extern "C" SANITIZER_INTERFACE_ATTRIBUTE |
1062 | uptr __tsan_testonly_shadow_stack_current_size() { |
1063 | ThreadState *thr = cur_thread(); |
1064 | return thr->shadow_stack_pos - thr->shadow_stack; |
1065 | } |
1066 | #endif |
1067 | |
1068 | void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc) { |
1069 | DPrintf("#%d: ThreadIgnoreSyncBegin\n" , thr->tid); |
1070 | thr->ignore_sync++; |
1071 | CHECK_GT(thr->ignore_sync, 0); |
1072 | #if !SANITIZER_GO |
1073 | if (pc && !ctx->after_multithreaded_fork) |
1074 | thr->sync_ignore_set.Add(stack_id: CurrentStackId(thr, pc)); |
1075 | #endif |
1076 | } |
1077 | |
1078 | void ThreadIgnoreSyncEnd(ThreadState *thr) { |
1079 | DPrintf("#%d: ThreadIgnoreSyncEnd\n" , thr->tid); |
1080 | CHECK_GT(thr->ignore_sync, 0); |
1081 | thr->ignore_sync--; |
1082 | #if !SANITIZER_GO |
1083 | if (thr->ignore_sync == 0) |
1084 | thr->sync_ignore_set.Reset(); |
1085 | #endif |
1086 | } |
1087 | |
1088 | bool MD5Hash::operator==(const MD5Hash &other) const { |
1089 | return hash[0] == other.hash[0] && hash[1] == other.hash[1]; |
1090 | } |
1091 | |
1092 | #if SANITIZER_DEBUG |
1093 | void build_consistency_debug() {} |
1094 | #else |
1095 | void build_consistency_release() {} |
1096 | #endif |
1097 | } // namespace __tsan |
1098 | |
1099 | #if SANITIZER_CHECK_DEADLOCKS |
1100 | namespace __sanitizer { |
1101 | using namespace __tsan; |
1102 | MutexMeta mutex_meta[] = { |
1103 | {MutexInvalid, "Invalid" , {}}, |
1104 | {MutexThreadRegistry, |
1105 | "ThreadRegistry" , |
1106 | {MutexTypeSlots, MutexTypeTrace, MutexTypeReport}}, |
1107 | {MutexTypeReport, "Report" , {MutexTypeTrace}}, |
1108 | {MutexTypeSyncVar, "SyncVar" , {MutexTypeReport, MutexTypeTrace}}, |
1109 | {MutexTypeAnnotations, "Annotations" , {}}, |
1110 | {MutexTypeAtExit, "AtExit" , {}}, |
1111 | {MutexTypeFired, "Fired" , {MutexLeaf}}, |
1112 | {MutexTypeRacy, "Racy" , {MutexLeaf}}, |
1113 | {MutexTypeGlobalProc, "GlobalProc" , {MutexTypeSlot, MutexTypeSlots}}, |
1114 | {MutexTypeInternalAlloc, "InternalAlloc" , {MutexLeaf}}, |
1115 | {MutexTypeTrace, "Trace" , {}}, |
1116 | {MutexTypeSlot, |
1117 | "Slot" , |
1118 | {MutexMulti, MutexTypeTrace, MutexTypeSyncVar, MutexThreadRegistry, |
1119 | MutexTypeSlots}}, |
1120 | {MutexTypeSlots, "Slots" , {MutexTypeTrace, MutexTypeReport}}, |
1121 | {}, |
1122 | }; |
1123 | |
1124 | void PrintMutexPC(uptr pc) { StackTrace(&pc, 1).Print(); } |
1125 | |
1126 | } // namespace __sanitizer |
1127 | #endif |
1128 | |