1//===-- tsan_interceptors_posix.cpp ---------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of ThreadSanitizer (TSan), a race detector.
10//
11// FIXME: move as many interceptors as possible into
12// sanitizer_common/sanitizer_common_interceptors.inc
13//===----------------------------------------------------------------------===//
14
15#include <stdarg.h>
16
17#include "interception/interception.h"
18#include "sanitizer_common/sanitizer_allocator_dlsym.h"
19#include "sanitizer_common/sanitizer_atomic.h"
20#include "sanitizer_common/sanitizer_errno.h"
21#include "sanitizer_common/sanitizer_glibc_version.h"
22#include "sanitizer_common/sanitizer_internal_defs.h"
23#include "sanitizer_common/sanitizer_libc.h"
24#include "sanitizer_common/sanitizer_linux.h"
25#include "sanitizer_common/sanitizer_placement_new.h"
26#include "sanitizer_common/sanitizer_platform_interceptors.h"
27#include "sanitizer_common/sanitizer_platform_limits_netbsd.h"
28#include "sanitizer_common/sanitizer_platform_limits_posix.h"
29#include "sanitizer_common/sanitizer_posix.h"
30#include "sanitizer_common/sanitizer_stacktrace.h"
31#include "sanitizer_common/sanitizer_tls_get_addr.h"
32#include "sanitizer_common/sanitizer_vector.h"
33#include "tsan_fd.h"
34#if SANITIZER_APPLE && !SANITIZER_GO
35# include "tsan_flags.h"
36#endif
37#include "tsan_adaptive_delay.h"
38#include "tsan_interceptors.h"
39#include "tsan_interface.h"
40#include "tsan_mman.h"
41#include "tsan_platform.h"
42#include "tsan_rtl.h"
43#include "tsan_suppressions.h"
44
45using namespace __tsan;
46
47DECLARE_REAL(void *, memcpy, void *to, const void *from, SIZE_T size)
48DECLARE_REAL(void *, memset, void *block, int c, SIZE_T size)
49
50#if SANITIZER_FREEBSD || SANITIZER_APPLE
51#define stdout __stdoutp
52#define stderr __stderrp
53#endif
54
55#if SANITIZER_NETBSD
56#define dirfd(dirp) (*(int *)(dirp))
57#define fileno_unlocked(fp) \
58 (((__sanitizer_FILE *)fp)->_file == -1 \
59 ? -1 \
60 : (int)(unsigned short)(((__sanitizer_FILE *)fp)->_file))
61
62#define stdout ((__sanitizer_FILE*)&__sF[1])
63#define stderr ((__sanitizer_FILE*)&__sF[2])
64
65#define nanosleep __nanosleep50
66#define vfork __vfork14
67#endif
68
69#ifdef __mips__
70const int kSigCount = 129;
71#else
72const int kSigCount = 65;
73#endif
74
75#ifdef __mips__
76struct ucontext_t {
77 u64 opaque[768 / sizeof(u64) + 1];
78};
79#else
80struct ucontext_t {
81 // The size is determined by looking at sizeof of real ucontext_t on linux.
82 u64 opaque[936 / sizeof(u64) + 1];
83};
84#endif
85
86extern "C" int pthread_attr_init(void *attr);
87extern "C" int pthread_attr_destroy(void *attr);
88DECLARE_REAL(int, pthread_attr_getdetachstate, void *, void *)
89extern "C" int pthread_attr_setstacksize(void *attr, uptr stacksize);
90extern "C" int pthread_atfork(void (*prepare)(void), void (*parent)(void),
91 void (*child)(void));
92extern "C" int pthread_key_create(unsigned *key, void (*destructor)(void* v));
93extern "C" int pthread_setspecific(unsigned key, const void *v);
94DECLARE_REAL(int, pthread_mutexattr_gettype, void *, void *)
95DECLARE_REAL(int, fflush, __sanitizer_FILE *fp)
96DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, usize size)
97DECLARE_REAL_AND_INTERCEPTOR(void, free, void *ptr)
98extern "C" int pthread_equal(void *t1, void *t2);
99extern "C" void *pthread_self();
100extern "C" void _exit(int status);
101#if !SANITIZER_NETBSD
102extern "C" int fileno_unlocked(void *stream);
103extern "C" int dirfd(void *dirp);
104#endif
105#if SANITIZER_NETBSD
106extern __sanitizer_FILE __sF[];
107#else
108extern __sanitizer_FILE *stdout, *stderr;
109#endif
110#if !SANITIZER_FREEBSD && !SANITIZER_APPLE && !SANITIZER_NETBSD
111const int PTHREAD_MUTEX_RECURSIVE = 1;
112const int PTHREAD_MUTEX_RECURSIVE_NP = 1;
113#else
114const int PTHREAD_MUTEX_RECURSIVE = 2;
115const int PTHREAD_MUTEX_RECURSIVE_NP = 2;
116#endif
117#if !SANITIZER_FREEBSD && !SANITIZER_APPLE && !SANITIZER_NETBSD
118const int EPOLL_CTL_ADD = 1;
119#endif
120const int SIGILL = 4;
121const int SIGTRAP = 5;
122const int SIGABRT = 6;
123const int SIGFPE = 8;
124const int SIGSEGV = 11;
125const int SIGPIPE = 13;
126const int SIGTERM = 15;
127#if defined(__mips__) || SANITIZER_FREEBSD || SANITIZER_APPLE || SANITIZER_NETBSD
128const int SIGBUS = 10;
129const int SIGSYS = 12;
130#else
131const int SIGBUS = 7;
132const int SIGSYS = 31;
133#endif
134#if SANITIZER_HAS_SIGINFO
135const int SI_TIMER = -2;
136#endif
137void *const MAP_FAILED = (void*)-1;
138#if SANITIZER_NETBSD
139const int PTHREAD_BARRIER_SERIAL_THREAD = 1234567;
140#elif !SANITIZER_APPLE
141const int PTHREAD_BARRIER_SERIAL_THREAD = -1;
142#endif
143const int MAP_FIXED = 0x10;
144typedef long long_t;
145typedef __sanitizer::u16 mode_t;
146
147// From /usr/include/unistd.h
148# define F_ULOCK 0 /* Unlock a previously locked region. */
149# define F_LOCK 1 /* Lock a region for exclusive use. */
150# define F_TLOCK 2 /* Test and lock a region for exclusive use. */
151# define F_TEST 3 /* Test a region for other processes locks. */
152
153#if SANITIZER_FREEBSD || SANITIZER_APPLE || SANITIZER_NETBSD
154const int SA_SIGINFO = 0x40;
155const int SIG_SETMASK = 3;
156#elif defined(__mips__)
157const int SA_SIGINFO = 8;
158const int SIG_SETMASK = 3;
159#else
160const int SA_SIGINFO = 4;
161const int SIG_SETMASK = 2;
162#endif
163
164namespace __tsan {
165struct SignalDesc {
166 bool armed;
167 __sanitizer_siginfo siginfo;
168 ucontext_t ctx;
169};
170
171struct ThreadSignalContext {
172 int int_signal_send;
173 SignalDesc pending_signals[kSigCount];
174 // emptyset and oldset are too big for stack.
175 __sanitizer_sigset_t emptyset;
176 __sanitizer::Vector<__sanitizer_sigset_t> oldset;
177};
178
179void EnterBlockingFunc(ThreadState *thr) {
180 for (;;) {
181 // The order is important to not delay a signal infinitely if it's
182 // delivered right before we set in_blocking_func. Note: we can't call
183 // ProcessPendingSignals when in_blocking_func is set, or we can handle
184 // a signal synchronously when we are already handling a signal.
185 atomic_store(a: &thr->in_blocking_func, v: 1, mo: memory_order_relaxed);
186 if (atomic_load(a: &thr->pending_signals, mo: memory_order_relaxed) == 0)
187 break;
188 atomic_store(a: &thr->in_blocking_func, v: 0, mo: memory_order_relaxed);
189 ProcessPendingSignals(thr);
190 }
191}
192
193// The sole reason tsan wraps atexit callbacks is to establish synchronization
194// between callback setup and callback execution.
195struct AtExitCtx {
196 void (*f)();
197 void *arg;
198 uptr pc;
199};
200
201// InterceptorContext holds all global data required for interceptors.
202// It's explicitly constructed in InitializeInterceptors with placement new
203// and is never destroyed. This allows usage of members with non-trivial
204// constructors and destructors.
205struct InterceptorContext {
206 // The object is 64-byte aligned, because we want hot data to be located
207 // in a single cache line if possible (it's accessed in every interceptor).
208 alignas(64) LibIgnore libignore;
209 __sanitizer_sigaction sigactions[kSigCount];
210#if !SANITIZER_APPLE && !SANITIZER_NETBSD
211 unsigned finalize_key;
212#endif
213
214 Mutex atexit_mu;
215 Vector<struct AtExitCtx *> AtExitStack;
216
217 InterceptorContext() : libignore(LINKER_INITIALIZED), atexit_mu(MutexTypeAtExit), AtExitStack() {}
218};
219
220alignas(64) static char interceptor_placeholder[sizeof(InterceptorContext)];
221InterceptorContext *interceptor_ctx() {
222 return reinterpret_cast<InterceptorContext*>(&interceptor_placeholder[0]);
223}
224
225LibIgnore *libignore() {
226 return &interceptor_ctx()->libignore;
227}
228
229void InitializeLibIgnore() {
230 const SuppressionContext &supp = *Suppressions();
231 const uptr n = supp.SuppressionCount();
232 for (uptr i = 0; i < n; i++) {
233 const Suppression *s = supp.SuppressionAt(i);
234 if (0 == internal_strcmp(s1: s->type, s2: kSuppressionLib))
235 libignore()->AddIgnoredLibrary(name_templ: s->templ);
236 }
237 if (flags()->ignore_noninstrumented_modules)
238 libignore()->IgnoreNoninstrumentedModules(enable: true);
239 libignore()->OnLibraryLoaded(name: 0);
240}
241
242// The following two hooks can be used by for cooperative scheduling when
243// locking.
244#ifdef TSAN_EXTERNAL_HOOKS
245void OnPotentiallyBlockingRegionBegin();
246void OnPotentiallyBlockingRegionEnd();
247#else
248SANITIZER_WEAK_CXX_DEFAULT_IMPL void OnPotentiallyBlockingRegionBegin() {}
249SANITIZER_WEAK_CXX_DEFAULT_IMPL void OnPotentiallyBlockingRegionEnd() {}
250#endif
251
252// FIXME: Use for `in_symbolizer()` as well. As-is we can't use
253// `DlSymAllocator`, because it uses the primary allocator only. Symbolizer
254// requires support of the secondary allocator for larger blocks.
255struct DlsymAlloc : public DlSymAllocator<DlsymAlloc> {
256 static bool UseImpl() { return (ctx && !ctx->initialized); }
257};
258
259} // namespace __tsan
260
261static ThreadSignalContext *SigCtx(ThreadState *thr) {
262 // This function may be called reentrantly if it is interrupted by a signal
263 // handler. Use CAS to handle the race.
264 uptr ctx = atomic_load(a: &thr->signal_ctx, mo: memory_order_relaxed);
265 if (ctx == 0 && !thr->is_dead) {
266 uptr pctx =
267 (uptr)MmapOrDie(size: sizeof(ThreadSignalContext), mem_type: "ThreadSignalContext");
268 MemoryResetRange(thr, pc: (uptr)&SigCtx, addr: pctx, size: sizeof(ThreadSignalContext));
269 if (atomic_compare_exchange_strong(a: &thr->signal_ctx, cmp: &ctx, xchg: pctx,
270 mo: memory_order_relaxed)) {
271 ctx = pctx;
272 } else {
273 UnmapOrDie(addr: (ThreadSignalContext *)pctx, size: sizeof(ThreadSignalContext));
274 }
275 }
276 return (ThreadSignalContext *)ctx;
277}
278
279ScopedInterceptor::ScopedInterceptor(ThreadState *thr, const char *fname,
280 uptr pc)
281 : thr_(thr) {
282 LazyInitialize(thr);
283 if (UNLIKELY(atomic_load(&thr->in_blocking_func, memory_order_relaxed))) {
284 // pthread_join is marked as blocking, but it's also known to call other
285 // intercepted functions (mmap, free). If we don't reset in_blocking_func
286 // we can get deadlocks and memory corruptions if we deliver a synchronous
287 // signal inside of an mmap/free interceptor.
288 // So reset it and restore it back in the destructor.
289 // See https://github.com/google/sanitizers/issues/1540
290 atomic_store(a: &thr->in_blocking_func, v: 0, mo: memory_order_relaxed);
291 in_blocking_func_ = true;
292 }
293 if (!thr_->is_inited) return;
294 if (!thr_->ignore_interceptors) FuncEntry(thr, pc);
295 DPrintf("#%d: intercept %s()\n", thr_->tid, fname);
296 ignoring_ =
297 !thr_->in_ignored_lib && (flags()->ignore_interceptors_accesses ||
298 libignore()->IsIgnored(pc, pc_in_ignored_lib: &in_ignored_lib_));
299 EnableIgnores();
300}
301
302ScopedInterceptor::~ScopedInterceptor() {
303 if (!thr_->is_inited) return;
304 DisableIgnores();
305 if (UNLIKELY(in_blocking_func_))
306 EnterBlockingFunc(thr: thr_);
307 if (!thr_->ignore_interceptors) {
308 ProcessPendingSignals(thr: thr_);
309 FuncExit(thr: thr_);
310 CheckedMutex::CheckNoLocks();
311 }
312}
313
314NOINLINE
315void ScopedInterceptor::EnableIgnoresImpl() {
316 ThreadIgnoreBegin(thr: thr_, pc: 0);
317 if (flags()->ignore_noninstrumented_modules)
318 thr_->suppress_reports++;
319 if (in_ignored_lib_) {
320 DCHECK(!thr_->in_ignored_lib);
321 thr_->in_ignored_lib = true;
322 }
323}
324
325NOINLINE
326void ScopedInterceptor::DisableIgnoresImpl() {
327 ThreadIgnoreEnd(thr: thr_);
328 if (flags()->ignore_noninstrumented_modules)
329 thr_->suppress_reports--;
330 if (in_ignored_lib_) {
331 DCHECK(thr_->in_ignored_lib);
332 thr_->in_ignored_lib = false;
333 }
334}
335
336#define TSAN_INTERCEPT(func) INTERCEPT_FUNCTION(func)
337#if SANITIZER_FREEBSD
338# define TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(func) \
339 INTERCEPT_FUNCTION(_pthread_##func)
340#else
341# define TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(func)
342#endif
343#if SANITIZER_NETBSD
344# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func) \
345 INTERCEPT_FUNCTION(__libc_##func)
346# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func) \
347 INTERCEPT_FUNCTION(__libc_thr_##func)
348#else
349# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func)
350# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func)
351#endif
352
353#define READ_STRING_OF_LEN(thr, pc, s, len, n) \
354 MemoryAccessRange((thr), (pc), (uptr)(s), \
355 common_flags()->strict_string_checks ? (len) + 1 : (n), false)
356
357#define READ_STRING(thr, pc, s, n) \
358 READ_STRING_OF_LEN((thr), (pc), (s), internal_strlen(s), (n))
359
360#define BLOCK_REAL(name) (BlockingCall(thr), REAL(name))
361
362struct BlockingCall {
363 explicit BlockingCall(ThreadState *thr)
364 : thr(thr) {
365 EnterBlockingFunc(thr);
366 // When we are in a "blocking call", we process signals asynchronously
367 // (right when they arrive). In this context we do not expect to be
368 // executing any user/runtime code. The known interceptor sequence when
369 // this is not true is: pthread_join -> munmap(stack). It's fine
370 // to ignore munmap in this case -- we handle stack shadow separately.
371 thr->ignore_interceptors++;
372 }
373
374 ~BlockingCall() {
375 thr->ignore_interceptors--;
376 atomic_store(a: &thr->in_blocking_func, v: 0, mo: memory_order_relaxed);
377 }
378
379 ThreadState *thr;
380};
381
382TSAN_INTERCEPTOR(unsigned, sleep, unsigned sec) {
383 SCOPED_TSAN_INTERCEPTOR(sleep, sec);
384 unsigned res = BLOCK_REAL(sleep)(sec);
385 AfterSleep(thr, pc);
386 return res;
387}
388
389TSAN_INTERCEPTOR(int, usleep, long_t usec) {
390 SCOPED_TSAN_INTERCEPTOR(usleep, usec);
391 int res = BLOCK_REAL(usleep)(usec);
392 AfterSleep(thr, pc);
393 return res;
394}
395
396TSAN_INTERCEPTOR(int, nanosleep, void *req, void *rem) {
397 SCOPED_TSAN_INTERCEPTOR(nanosleep, req, rem);
398 int res = BLOCK_REAL(nanosleep)(req, rem);
399 AfterSleep(thr, pc);
400 return res;
401}
402
403TSAN_INTERCEPTOR(int, pause, int fake) {
404 SCOPED_TSAN_INTERCEPTOR(pause, fake);
405 return BLOCK_REAL(pause)(fake);
406}
407
408// Note: we specifically call the function in such strange way
409// with "installed_at" because in reports it will appear between
410// callback frames and the frame that installed the callback.
411static void at_exit_callback_installed_at() {
412 AtExitCtx *ctx;
413 {
414 // Ensure thread-safety.
415 Lock l(&interceptor_ctx()->atexit_mu);
416
417 // Pop AtExitCtx from the top of the stack of callback functions
418 uptr element = interceptor_ctx()->AtExitStack.Size() - 1;
419 ctx = interceptor_ctx()->AtExitStack[element];
420 interceptor_ctx()->AtExitStack.PopBack();
421 }
422
423 ThreadState *thr = cur_thread();
424 Acquire(thr, pc: ctx->pc, addr: (uptr)ctx);
425 FuncEntry(thr, pc: ctx->pc);
426 ((void(*)())ctx->f)();
427 FuncExit(thr);
428 Free(p&: ctx);
429}
430
431static void cxa_at_exit_callback_installed_at(void *arg) {
432 ThreadState *thr = cur_thread();
433 AtExitCtx *ctx = (AtExitCtx*)arg;
434 Acquire(thr, pc: ctx->pc, addr: (uptr)arg);
435 FuncEntry(thr, pc: ctx->pc);
436 ((void(*)(void *arg))ctx->f)(ctx->arg);
437 FuncExit(thr);
438 Free(p&: ctx);
439}
440
441static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
442 void *arg, void *dso);
443
444#if !SANITIZER_ANDROID
445TSAN_INTERCEPTOR(int, atexit, void (*f)()) {
446 if (in_symbolizer())
447 return 0;
448 // We want to setup the atexit callback even if we are in ignored lib
449 // or after fork.
450 SCOPED_INTERCEPTOR_RAW(atexit, f);
451 return setup_at_exit_wrapper(thr, GET_CALLER_PC(), f: (void (*)())f, arg: 0, dso: 0);
452}
453#endif
454
455TSAN_INTERCEPTOR(int, __cxa_atexit, void (*f)(void *a), void *arg, void *dso) {
456 if (in_symbolizer())
457 return 0;
458 SCOPED_TSAN_INTERCEPTOR(__cxa_atexit, f, arg, dso);
459 return setup_at_exit_wrapper(thr, GET_CALLER_PC(), f: (void (*)())f, arg, dso);
460}
461
462static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
463 void *arg, void *dso) {
464 auto *ctx = New<AtExitCtx>();
465 ctx->f = f;
466 ctx->arg = arg;
467 ctx->pc = pc;
468 Release(thr, pc, addr: (uptr)ctx);
469 // Memory allocation in __cxa_atexit will race with free during exit,
470 // because we do not see synchronization around atexit callback list.
471 ThreadIgnoreBegin(thr, pc);
472 int res;
473 if (!dso) {
474 // NetBSD does not preserve the 2nd argument if dso is equal to 0
475 // Store ctx in a local stack-like structure
476
477 // Ensure thread-safety.
478 Lock l(&interceptor_ctx()->atexit_mu);
479 // __cxa_atexit calls calloc. If we don't ignore interceptors, we will fail
480 // due to atexit_mu held on exit from the calloc interceptor.
481 ScopedIgnoreInterceptors ignore;
482
483 res = REAL(__cxa_atexit)((void (*)(void *a))at_exit_callback_installed_at,
484 0, 0);
485 // Push AtExitCtx on the top of the stack of callback functions
486 if (!res) {
487 interceptor_ctx()->AtExitStack.PushBack(v: ctx);
488 }
489 } else {
490 res = REAL(__cxa_atexit)(cxa_at_exit_callback_installed_at, ctx, dso);
491 }
492 ThreadIgnoreEnd(thr);
493 return res;
494}
495
496#if !SANITIZER_APPLE && !SANITIZER_NETBSD
497static void on_exit_callback_installed_at(int status, void *arg) {
498 ThreadState *thr = cur_thread();
499 AtExitCtx *ctx = (AtExitCtx*)arg;
500 Acquire(thr, pc: ctx->pc, addr: (uptr)arg);
501 FuncEntry(thr, pc: ctx->pc);
502 ((void(*)(int status, void *arg))ctx->f)(status, ctx->arg);
503 FuncExit(thr);
504 Free(p&: ctx);
505}
506
507TSAN_INTERCEPTOR(int, on_exit, void(*f)(int, void*), void *arg) {
508 if (in_symbolizer())
509 return 0;
510 SCOPED_TSAN_INTERCEPTOR(on_exit, f, arg);
511 auto *ctx = New<AtExitCtx>();
512 ctx->f = (void(*)())f;
513 ctx->arg = arg;
514 ctx->pc = GET_CALLER_PC();
515 Release(thr, pc, addr: (uptr)ctx);
516 // Memory allocation in __cxa_atexit will race with free during exit,
517 // because we do not see synchronization around atexit callback list.
518 ThreadIgnoreBegin(thr, pc);
519 int res = REAL(on_exit)(on_exit_callback_installed_at, ctx);
520 ThreadIgnoreEnd(thr);
521 return res;
522}
523#define TSAN_MAYBE_INTERCEPT_ON_EXIT TSAN_INTERCEPT(on_exit)
524#else
525#define TSAN_MAYBE_INTERCEPT_ON_EXIT
526#endif
527
528// Cleanup old bufs.
529static void JmpBufGarbageCollect(ThreadState *thr, uptr sp) {
530 for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) {
531 JmpBuf *buf = &thr->jmp_bufs[i];
532 if (buf->sp <= sp) {
533 uptr sz = thr->jmp_bufs.Size();
534 internal_memcpy(dest: buf, src: &thr->jmp_bufs[sz - 1], n: sizeof(*buf));
535 thr->jmp_bufs.PopBack();
536 i--;
537 }
538 }
539}
540
541static void SetJmp(ThreadState *thr, uptr sp) {
542 if (!thr->is_inited) // called from libc guts during bootstrap
543 return;
544 // Cleanup old bufs.
545 JmpBufGarbageCollect(thr, sp);
546 // Remember the buf.
547 JmpBuf *buf = thr->jmp_bufs.PushBack();
548 buf->sp = sp;
549 buf->shadow_stack_pos = thr->shadow_stack_pos;
550 ThreadSignalContext *sctx = SigCtx(thr);
551 buf->int_signal_send = sctx ? sctx->int_signal_send : 0;
552 buf->oldset_stack_size = sctx ? sctx->oldset.Size() : 0;
553 buf->in_blocking_func = atomic_load(a: &thr->in_blocking_func, mo: memory_order_relaxed);
554 buf->in_signal_handler = atomic_load(a: &thr->in_signal_handler,
555 mo: memory_order_relaxed);
556}
557
558static void LongJmp(ThreadState *thr, uptr *env) {
559 uptr sp = ExtractLongJmpSp(env);
560 // Find the saved buf with matching sp.
561 for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) {
562 JmpBuf *buf = &thr->jmp_bufs[i];
563 if (buf->sp == sp) {
564 CHECK_GE(thr->shadow_stack_pos, buf->shadow_stack_pos);
565 // Unwind the stack.
566 while (thr->shadow_stack_pos > buf->shadow_stack_pos)
567 FuncExit(thr);
568 ThreadSignalContext *sctx = SigCtx(thr);
569 if (sctx) {
570 sctx->int_signal_send = buf->int_signal_send;
571 while (sctx->oldset.Size() > buf->oldset_stack_size)
572 sctx->oldset.PopBack();
573 }
574 atomic_store(a: &thr->in_blocking_func, v: buf->in_blocking_func,
575 mo: memory_order_relaxed);
576 atomic_store(a: &thr->in_signal_handler, v: buf->in_signal_handler,
577 mo: memory_order_relaxed);
578 JmpBufGarbageCollect(thr, sp: buf->sp - 1); // do not collect buf->sp
579 return;
580 }
581 }
582 Printf(format: "ThreadSanitizer: can't find longjmp buf\n");
583 CHECK(0);
584}
585
586// FIXME: put everything below into a common extern "C" block?
587extern "C" void __tsan_setjmp(uptr sp) { SetJmp(thr: cur_thread_init(), sp); }
588
589#if SANITIZER_APPLE
590TSAN_INTERCEPTOR(int, setjmp, void *env);
591TSAN_INTERCEPTOR(int, _setjmp, void *env);
592TSAN_INTERCEPTOR(int, sigsetjmp, void *env);
593#else // SANITIZER_APPLE
594
595#if SANITIZER_NETBSD
596#define setjmp_symname __setjmp14
597#define sigsetjmp_symname __sigsetjmp14
598#else
599#define setjmp_symname setjmp
600#define sigsetjmp_symname sigsetjmp
601#endif
602
603DEFINE_REAL(int, setjmp_symname, void *env)
604DEFINE_REAL(int, _setjmp, void *env)
605DEFINE_REAL(int, sigsetjmp_symname, void *env)
606#if !SANITIZER_NETBSD
607DEFINE_REAL(int, __sigsetjmp, void *env)
608#endif
609
610// The real interceptor for setjmp is special, and implemented in pure asm. We
611// just need to initialize the REAL functions so that they can be used in asm.
612static void InitializeSetjmpInterceptors() {
613 // We can not use TSAN_INTERCEPT to get setjmp addr, because it does &setjmp and
614 // setjmp is not present in some versions of libc.
615 using __interception::InterceptFunction;
616 InterceptFunction(SANITIZER_STRINGIFY(setjmp_symname), ptr_to_real: (uptr*)&REAL(setjmp_symname), func: 0, trampoline: 0);
617 InterceptFunction(name: "_setjmp", ptr_to_real: (uptr*)&REAL(_setjmp), func: 0, trampoline: 0);
618 InterceptFunction(SANITIZER_STRINGIFY(sigsetjmp_symname), ptr_to_real: (uptr*)&REAL(sigsetjmp_symname), func: 0,
619 trampoline: 0);
620#if !SANITIZER_NETBSD
621 InterceptFunction(name: "__sigsetjmp", ptr_to_real: (uptr*)&REAL(__sigsetjmp), func: 0, trampoline: 0);
622#endif
623}
624#endif // SANITIZER_APPLE
625
626#if SANITIZER_NETBSD
627#define longjmp_symname __longjmp14
628#define siglongjmp_symname __siglongjmp14
629#else
630#define longjmp_symname longjmp
631#define siglongjmp_symname siglongjmp
632#endif
633
634TSAN_INTERCEPTOR(void, longjmp_symname, uptr *env, int val) {
635 // Note: if we call REAL(longjmp) in the context of ScopedInterceptor,
636 // bad things will happen. We will jump over ScopedInterceptor dtor and can
637 // leave thr->in_ignored_lib set.
638 {
639 SCOPED_INTERCEPTOR_RAW(longjmp_symname, env, val);
640 }
641 LongJmp(thr: cur_thread(), env);
642 REAL(longjmp_symname)(env, val);
643}
644
645TSAN_INTERCEPTOR(void, siglongjmp_symname, uptr *env, int val) {
646 {
647 SCOPED_INTERCEPTOR_RAW(siglongjmp_symname, env, val);
648 }
649 LongJmp(thr: cur_thread(), env);
650 REAL(siglongjmp_symname)(env, val);
651}
652
653#if SANITIZER_NETBSD
654TSAN_INTERCEPTOR(void, _longjmp, uptr *env, int val) {
655 {
656 SCOPED_INTERCEPTOR_RAW(_longjmp, env, val);
657 }
658 LongJmp(cur_thread(), env);
659 REAL(_longjmp)(env, val);
660}
661#endif
662
663#if !SANITIZER_APPLE
664TSAN_INTERCEPTOR(void*, malloc, uptr size) {
665 if (in_symbolizer())
666 return InternalAlloc(size);
667 if (DlsymAlloc::Use())
668 return DlsymAlloc::Allocate(size_in_bytes: size);
669 void *p = 0;
670 {
671 SCOPED_INTERCEPTOR_RAW(malloc, size);
672 p = user_alloc(thr, pc, sz: size);
673 }
674 invoke_malloc_hook(ptr: p, size);
675 return p;
676}
677
678// In glibc<2.25, dynamic TLS blocks are allocated by __libc_memalign. Intercept
679// __libc_memalign so that (1) we can detect races (2) free will not be called
680// on libc internally allocated blocks.
681TSAN_INTERCEPTOR(void*, __libc_memalign, uptr align, uptr sz) {
682 SCOPED_INTERCEPTOR_RAW(__libc_memalign, align, sz);
683 return user_memalign(thr, pc, align, sz);
684}
685
686TSAN_INTERCEPTOR(void *, calloc, uptr n, uptr size) {
687 if (in_symbolizer())
688 return InternalCalloc(count: n, size);
689 if (DlsymAlloc::Use())
690 return DlsymAlloc::Callocate(nmemb: n, size);
691 void *p = 0;
692 {
693 SCOPED_INTERCEPTOR_RAW(calloc, n, size);
694 p = user_calloc(thr, pc, sz: size, n);
695 }
696 invoke_malloc_hook(ptr: p, size: n * size);
697 return p;
698}
699
700TSAN_INTERCEPTOR(void*, realloc, void *p, uptr size) {
701 if (in_symbolizer())
702 return InternalRealloc(p, size);
703 if (DlsymAlloc::Use() || DlsymAlloc::PointerIsMine(ptr: p))
704 return DlsymAlloc::Realloc(ptr: p, new_size: size);
705 if (p)
706 invoke_free_hook(ptr: p);
707 {
708 SCOPED_INTERCEPTOR_RAW(realloc, p, size);
709 p = user_realloc(thr, pc, p, sz: size);
710 }
711 invoke_malloc_hook(ptr: p, size);
712 return p;
713}
714
715TSAN_INTERCEPTOR(void *, reallocarray, void *p, uptr n, uptr size) {
716 if (in_symbolizer())
717 return InternalReallocArray(p, count: n, size);
718 if (p)
719 invoke_free_hook(ptr: p);
720 {
721 SCOPED_INTERCEPTOR_RAW(reallocarray, p, n, size);
722 p = user_reallocarray(thr, pc, p, sz: size, n);
723 }
724 invoke_malloc_hook(ptr: p, size);
725 return p;
726}
727
728TSAN_INTERCEPTOR(void, free, void *p) {
729 if (UNLIKELY(!p))
730 return;
731 if (in_symbolizer())
732 return InternalFree(p);
733 if (DlsymAlloc::PointerIsMine(ptr: p))
734 return DlsymAlloc::Free(ptr: p);
735 invoke_free_hook(ptr: p);
736 SCOPED_INTERCEPTOR_RAW(free, p);
737 user_free(thr, pc, p);
738}
739
740# if SANITIZER_INTERCEPT_FREE_SIZED
741TSAN_INTERCEPTOR(void, free_sized, void *p, uptr size) {
742 if (UNLIKELY(!p))
743 return;
744 if (in_symbolizer())
745 return InternalFree(p);
746 if (DlsymAlloc::PointerIsMine(ptr: p))
747 return DlsymAlloc::Free(ptr: p);
748 invoke_free_hook(ptr: p);
749 SCOPED_INTERCEPTOR_RAW(free_sized, p, size);
750 user_free(thr, pc, p);
751}
752# define TSAN_MAYBE_INTERCEPT_FREE_SIZED INTERCEPT_FUNCTION(free_sized)
753# else
754# define TSAN_MAYBE_INTERCEPT_FREE_SIZED
755# endif
756
757# if SANITIZER_INTERCEPT_FREE_ALIGNED_SIZED
758TSAN_INTERCEPTOR(void, free_aligned_sized, void *p, uptr alignment, uptr size) {
759 if (UNLIKELY(!p))
760 return;
761 if (in_symbolizer())
762 return InternalFree(p);
763 if (DlsymAlloc::PointerIsMine(ptr: p))
764 return DlsymAlloc::Free(ptr: p);
765 invoke_free_hook(ptr: p);
766 SCOPED_INTERCEPTOR_RAW(free_aligned_sized, p, alignment, size);
767 user_free(thr, pc, p);
768}
769# define TSAN_MAYBE_INTERCEPT_FREE_ALIGNED_SIZED \
770 INTERCEPT_FUNCTION(free_aligned_sized)
771# else
772# define TSAN_MAYBE_INTERCEPT_FREE_ALIGNED_SIZED
773# endif
774
775TSAN_INTERCEPTOR(void, cfree, void *p) {
776 if (UNLIKELY(!p))
777 return;
778 if (in_symbolizer())
779 return InternalFree(p);
780 if (DlsymAlloc::PointerIsMine(ptr: p))
781 return DlsymAlloc::Free(ptr: p);
782 invoke_free_hook(ptr: p);
783 SCOPED_INTERCEPTOR_RAW(cfree, p);
784 user_free(thr, pc, p);
785}
786
787TSAN_INTERCEPTOR(uptr, malloc_usable_size, void *p) {
788 SCOPED_INTERCEPTOR_RAW(malloc_usable_size, p);
789 return user_alloc_usable_size(p);
790}
791#else
792# define TSAN_MAYBE_INTERCEPT_FREE_SIZED
793# define TSAN_MAYBE_INTERCEPT_FREE_ALIGNED_SIZED
794#endif
795
796TSAN_INTERCEPTOR(char *, strcpy, char *dst, const char *src) {
797 SCOPED_TSAN_INTERCEPTOR(strcpy, dst, src);
798 uptr srclen = internal_strlen(s: src);
799 MemoryAccessRange(thr, pc, addr: (uptr)dst, size: srclen + 1, is_write: true);
800 MemoryAccessRange(thr, pc, addr: (uptr)src, size: srclen + 1, is_write: false);
801 return REAL(strcpy)(dst, src);
802}
803
804TSAN_INTERCEPTOR(char*, strncpy, char *dst, char *src, usize n) {
805 SCOPED_TSAN_INTERCEPTOR(strncpy, dst, src, n);
806 uptr srclen = internal_strnlen(s: src, maxlen: n);
807 MemoryAccessRange(thr, pc, addr: (uptr)dst, size: n, is_write: true);
808 MemoryAccessRange(thr, pc, addr: (uptr)src, size: min(a: srclen + 1, b: n), is_write: false);
809 return REAL(strncpy)(dst, src, n);
810}
811
812TSAN_INTERCEPTOR(char*, strdup, const char *str) {
813 SCOPED_TSAN_INTERCEPTOR(strdup, str);
814 // strdup will call malloc, so no instrumentation is required here.
815 return REAL(strdup)(str);
816}
817
818// Zero out addr if it points into shadow memory and was provided as a hint
819// only, i.e., MAP_FIXED is not set.
820static bool fix_mmap_addr(void **addr, long_t sz, int flags) {
821 if (*addr) {
822 if (!IsAppMem(mem: (uptr)*addr) || !IsAppMem(mem: (uptr)*addr + sz - 1)) {
823 if (flags & MAP_FIXED) {
824 errno = errno_EINVAL;
825 return false;
826 } else {
827 *addr = 0;
828 }
829 }
830 }
831 return true;
832}
833
834template <class Mmap>
835static void *mmap_interceptor(ThreadState *thr, uptr pc, Mmap real_mmap,
836 void *addr, SIZE_T sz, int prot, int flags,
837 int fd, OFF64_T off) {
838 if (!fix_mmap_addr(addr: &addr, sz, flags)) return MAP_FAILED;
839 void *res = real_mmap(addr, sz, prot, flags, fd, off);
840 if (res != MAP_FAILED) {
841 if (!IsAppMem(mem: (uptr)res) || !IsAppMem(mem: (uptr)res + sz - 1)) {
842 Report(format: "ThreadSanitizer: mmap at bad address: addr=%p size=%p res=%p\n",
843 addr, (void*)sz, res);
844 Die();
845 }
846 if (fd > 0) FdAccess(thr, pc, fd);
847 MemoryRangeImitateWriteOrResetRange(thr, pc, addr: (uptr)res, size: sz);
848 }
849 return res;
850}
851
852template <class Munmap>
853static int munmap_interceptor(ThreadState *thr, uptr pc, Munmap real_munmap,
854 void *addr, SIZE_T sz) {
855 UnmapShadow(thr, addr: (uptr)addr, size: sz);
856 int res = real_munmap(addr, sz);
857 return res;
858}
859
860#if SANITIZER_LINUX
861TSAN_INTERCEPTOR(void*, memalign, uptr align, uptr sz) {
862 SCOPED_INTERCEPTOR_RAW(memalign, align, sz);
863 return user_memalign(thr, pc, align, sz);
864}
865#define TSAN_MAYBE_INTERCEPT_MEMALIGN TSAN_INTERCEPT(memalign)
866#else
867#define TSAN_MAYBE_INTERCEPT_MEMALIGN
868#endif
869
870#if !SANITIZER_APPLE
871TSAN_INTERCEPTOR(void*, aligned_alloc, uptr align, uptr sz) {
872 if (in_symbolizer())
873 return InternalAlloc(size: sz, cache: nullptr, alignment: align);
874 SCOPED_INTERCEPTOR_RAW(aligned_alloc, align, sz);
875 return user_aligned_alloc(thr, pc, align, sz);
876}
877
878TSAN_INTERCEPTOR(void*, valloc, uptr sz) {
879 if (in_symbolizer())
880 return InternalAlloc(size: sz, cache: nullptr, alignment: GetPageSizeCached());
881 SCOPED_INTERCEPTOR_RAW(valloc, sz);
882 return user_valloc(thr, pc, sz);
883}
884#endif
885
886#if SANITIZER_LINUX
887TSAN_INTERCEPTOR(void*, pvalloc, uptr sz) {
888 if (in_symbolizer()) {
889 uptr PageSize = GetPageSizeCached();
890 sz = sz ? RoundUpTo(size: sz, boundary: PageSize) : PageSize;
891 return InternalAlloc(size: sz, cache: nullptr, alignment: PageSize);
892 }
893 SCOPED_INTERCEPTOR_RAW(pvalloc, sz);
894 return user_pvalloc(thr, pc, sz);
895}
896#define TSAN_MAYBE_INTERCEPT_PVALLOC TSAN_INTERCEPT(pvalloc)
897#else
898#define TSAN_MAYBE_INTERCEPT_PVALLOC
899#endif
900
901#if !SANITIZER_APPLE
902TSAN_INTERCEPTOR(int, posix_memalign, void **memptr, uptr align, uptr sz) {
903 if (in_symbolizer()) {
904 void *p = InternalAlloc(size: sz, cache: nullptr, alignment: align);
905 if (!p)
906 return errno_ENOMEM;
907 *memptr = p;
908 return 0;
909 }
910 SCOPED_INTERCEPTOR_RAW(posix_memalign, memptr, align, sz);
911 return user_posix_memalign(thr, pc, memptr, align, sz);
912}
913#endif
914
915// Both __cxa_guard_acquire and pthread_once 0-initialize
916// the object initially. pthread_once does not have any
917// other ABI requirements. __cxa_guard_acquire assumes
918// that any non-0 value in the first byte means that
919// initialization is completed. Contents of the remaining
920// bytes are up to us.
921constexpr u32 kGuardInit = 0;
922constexpr u32 kGuardDone = 1;
923constexpr u32 kGuardRunning = 1 << 16;
924constexpr u32 kGuardWaiter = 1 << 17;
925
926static int guard_acquire(ThreadState *thr, uptr pc, atomic_uint32_t *g,
927 bool blocking_hooks = true) {
928 bool in_potentially_blocking_region = false;
929 auto on_exit = at_scope_exit(fn: [&] {
930 if (in_potentially_blocking_region)
931 OnPotentiallyBlockingRegionEnd();
932 });
933
934 for (;;) {
935 u32 cmp = atomic_load(a: g, mo: memory_order_acquire);
936 if (cmp == kGuardInit) {
937 if (atomic_compare_exchange_strong(a: g, cmp: &cmp, xchg: kGuardRunning,
938 mo: memory_order_relaxed))
939 return 1;
940 } else if (cmp == kGuardDone) {
941 if (!thr->in_ignored_lib)
942 Acquire(thr, pc, addr: (uptr)g);
943 return 0;
944 } else {
945 if ((cmp & kGuardWaiter) ||
946 atomic_compare_exchange_strong(a: g, cmp: &cmp, xchg: cmp | kGuardWaiter,
947 mo: memory_order_relaxed)) {
948 if (blocking_hooks && !in_potentially_blocking_region) {
949 in_potentially_blocking_region = true;
950 OnPotentiallyBlockingRegionBegin();
951 }
952 FutexWait(p: g, cmp: cmp | kGuardWaiter);
953 }
954 }
955 }
956}
957
958static void guard_release(ThreadState *thr, uptr pc, atomic_uint32_t *g,
959 u32 v) {
960 if (!thr->in_ignored_lib)
961 Release(thr, pc, addr: (uptr)g);
962 u32 old = atomic_exchange(a: g, v, mo: memory_order_release);
963 if (old & kGuardWaiter)
964 FutexWake(p: g, count: 1 << 30);
965}
966
967// __cxa_guard_acquire and friends need to be intercepted in a special way -
968// regular interceptors will break statically-linked libstdc++. Linux
969// interceptors are especially defined as weak functions (so that they don't
970// cause link errors when user defines them as well). So they silently
971// auto-disable themselves when such symbol is already present in the binary. If
972// we link libstdc++ statically, it will bring own __cxa_guard_acquire which
973// will silently replace our interceptor. That's why on Linux we simply export
974// these interceptors with INTERFACE_ATTRIBUTE.
975// On OS X, we don't support statically linking, so we just use a regular
976// interceptor.
977#if SANITIZER_APPLE
978#define STDCXX_INTERCEPTOR TSAN_INTERCEPTOR
979#else
980#define STDCXX_INTERCEPTOR(rettype, name, ...) \
981 extern "C" rettype INTERFACE_ATTRIBUTE name(__VA_ARGS__)
982#endif
983
984// Used in thread-safe function static initialization.
985STDCXX_INTERCEPTOR(int, __cxa_guard_acquire, atomic_uint32_t *g) {
986 SCOPED_INTERCEPTOR_RAW(__cxa_guard_acquire, g);
987 return guard_acquire(thr, pc, g);
988}
989
990STDCXX_INTERCEPTOR(void, __cxa_guard_release, atomic_uint32_t *g) {
991 SCOPED_INTERCEPTOR_RAW(__cxa_guard_release, g);
992 guard_release(thr, pc, g, v: kGuardDone);
993}
994
995STDCXX_INTERCEPTOR(void, __cxa_guard_abort, atomic_uint32_t *g) {
996 SCOPED_INTERCEPTOR_RAW(__cxa_guard_abort, g);
997 guard_release(thr, pc, g, v: kGuardInit);
998}
999
1000namespace __tsan {
1001void DestroyThreadState() {
1002 ThreadState *thr = cur_thread();
1003 Processor *proc = thr->proc();
1004 ThreadFinish(thr);
1005 ProcUnwire(proc, thr);
1006 ProcDestroy(proc);
1007 DTLS_Destroy();
1008 cur_thread_finalize();
1009}
1010
1011void PlatformCleanUpThreadState(ThreadState *thr) {
1012 ThreadSignalContext *sctx = (ThreadSignalContext *)atomic_load(
1013 a: &thr->signal_ctx, mo: memory_order_relaxed);
1014 if (sctx) {
1015 atomic_store(a: &thr->signal_ctx, v: 0, mo: memory_order_relaxed);
1016 sctx->oldset.Reset();
1017 UnmapOrDie(addr: sctx, size: sizeof(*sctx));
1018 }
1019}
1020} // namespace __tsan
1021
1022#if !SANITIZER_APPLE && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
1023static void thread_finalize(void *v) {
1024 uptr iter = (uptr)v;
1025 if (iter > 1) {
1026 if (pthread_setspecific(key: interceptor_ctx()->finalize_key,
1027 v: (void*)(iter - 1))) {
1028 Printf(format: "ThreadSanitizer: failed to set thread key\n");
1029 Die();
1030 }
1031 return;
1032 }
1033 DestroyThreadState();
1034}
1035#endif
1036
1037
1038struct ThreadParam {
1039 void* (*callback)(void *arg);
1040 void *param;
1041 Tid tid;
1042 Semaphore created;
1043 Semaphore started;
1044};
1045
1046extern "C" void *__tsan_thread_start_func(void *arg) {
1047 ThreadParam *p = (ThreadParam*)arg;
1048 void* (*callback)(void *arg) = p->callback;
1049 void *param = p->param;
1050 {
1051 ThreadState *thr = cur_thread_init();
1052 // Thread-local state is not initialized yet.
1053 ScopedIgnoreInterceptors ignore;
1054#if !SANITIZER_APPLE && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
1055 ThreadIgnoreBegin(thr, pc: 0);
1056 if (pthread_setspecific(key: interceptor_ctx()->finalize_key,
1057 v: (void *)GetPthreadDestructorIterations())) {
1058 Printf(format: "ThreadSanitizer: failed to set thread key\n");
1059 Die();
1060 }
1061 ThreadIgnoreEnd(thr);
1062#endif
1063 p->created.Wait();
1064 Processor *proc = ProcCreate();
1065 ProcWire(proc, thr);
1066 ThreadStart(thr, tid: p->tid, os_id: GetTid(), thread_type: ThreadType::Regular);
1067 p->started.Post();
1068 }
1069
1070 AdaptiveDelay::BeforeChildThreadRuns();
1071
1072 void *res = callback(param);
1073 // Prevent the callback from being tail called,
1074 // it mixes up stack traces.
1075 volatile int foo = 42;
1076 foo++;
1077 return res;
1078}
1079
1080TSAN_INTERCEPTOR(int, pthread_create,
1081 void *th, void *attr, void *(*callback)(void*), void * param) {
1082 SCOPED_INTERCEPTOR_RAW(pthread_create, th, attr, callback, param);
1083
1084 MaybeSpawnBackgroundThread();
1085
1086 if (ctx->after_multithreaded_fork) {
1087 if (flags()->die_after_fork) {
1088 Report(format: "ThreadSanitizer: starting new threads after multi-threaded "
1089 "fork is not supported. Dying (set die_after_fork=0 to override)\n");
1090 Die();
1091 } else {
1092 VPrintf(1,
1093 "ThreadSanitizer: starting new threads after multi-threaded "
1094 "fork is not supported (pid %lu). Continuing because of "
1095 "die_after_fork=0, but you are on your own\n",
1096 internal_getpid());
1097 }
1098 }
1099 __sanitizer_pthread_attr_t myattr;
1100 if (attr == 0) {
1101 pthread_attr_init(attr: &myattr);
1102 attr = &myattr;
1103 }
1104 int detached = 0;
1105 REAL(pthread_attr_getdetachstate)(attr, &detached);
1106 AdjustStackSize(attr);
1107
1108 ThreadParam p;
1109 p.callback = callback;
1110 p.param = param;
1111 p.tid = kMainTid;
1112 int res = -1;
1113 {
1114 // Otherwise we see false positives in pthread stack manipulation.
1115 ScopedIgnoreInterceptors ignore;
1116 ThreadIgnoreBegin(thr, pc);
1117 res = REAL(pthread_create)(th, attr, __tsan_thread_start_func, &p);
1118 ThreadIgnoreEnd(thr);
1119 }
1120 if (res == 0) {
1121 p.tid = ThreadCreate(thr, pc, uid: *(uptr *)th, detached: IsStateDetached(state: detached));
1122 CHECK_NE(p.tid, kMainTid);
1123 // Synchronization on p.tid serves two purposes:
1124 // 1. ThreadCreate must finish before the new thread starts.
1125 // Otherwise the new thread can call pthread_detach, but the pthread_t
1126 // identifier is not yet registered in ThreadRegistry by ThreadCreate.
1127 // 2. ThreadStart must finish before this thread continues.
1128 // Otherwise, this thread can call pthread_detach and reset thr->sync
1129 // before the new thread got a chance to acquire from it in ThreadStart.
1130 p.created.Post();
1131 p.started.Wait();
1132 }
1133 if (attr == &myattr)
1134 pthread_attr_destroy(attr: &myattr);
1135 AdaptiveDelay::AfterThreadCreation();
1136 return res;
1137}
1138
1139TSAN_INTERCEPTOR(int, pthread_join, void *th, void **ret) {
1140 SCOPED_INTERCEPTOR_RAW(pthread_join, th, ret);
1141#if SANITIZER_ANDROID
1142 {
1143 // In Bionic, if the target thread has already exited when pthread_detach is
1144 // called, pthread_detach will call pthread_join internally to clean it up.
1145 // In that case, the thread has already been consumed by the pthread_detach
1146 // interceptor.
1147 Tid tid = ctx->thread_registry.FindThread(
1148 [](ThreadContextBase* tctx, void* arg) {
1149 return tctx->user_id == (uptr)arg;
1150 },
1151 th);
1152 if (tid == kInvalidTid) {
1153 return REAL(pthread_join)(th, ret);
1154 }
1155 }
1156#endif
1157 Tid tid = ThreadConsumeTid(thr, pc, uid: (uptr)th);
1158 ThreadIgnoreBegin(thr, pc);
1159 int res = BLOCK_REAL(pthread_join)(th, ret);
1160 ThreadIgnoreEnd(thr);
1161 if (res == 0) {
1162 ThreadJoin(thr, pc, tid);
1163 }
1164 return res;
1165}
1166
1167// DEFINE_INTERNAL_PTHREAD_FUNCTIONS
1168namespace __sanitizer {
1169int internal_pthread_create(void *th, void *attr, void *(*callback)(void *),
1170 void *param) {
1171 ScopedIgnoreInterceptors ignore;
1172 return REAL(pthread_create)(th, attr, callback, param);
1173}
1174int internal_pthread_join(void *th, void **ret) {
1175 ScopedIgnoreInterceptors ignore;
1176 return REAL(pthread_join)(th, ret);
1177}
1178} // namespace __sanitizer
1179
1180TSAN_INTERCEPTOR(int, pthread_detach, void *th) {
1181 SCOPED_INTERCEPTOR_RAW(pthread_detach, th);
1182 Tid tid = ThreadConsumeTid(thr, pc, uid: (uptr)th);
1183 int res = REAL(pthread_detach)(th);
1184 if (res == 0) {
1185 ThreadDetach(thr, pc, tid);
1186 }
1187 return res;
1188}
1189
1190TSAN_INTERCEPTOR(void, pthread_exit, void *retval) {
1191 {
1192 SCOPED_INTERCEPTOR_RAW(pthread_exit, retval);
1193#if !SANITIZER_APPLE && !SANITIZER_ANDROID
1194 CHECK_EQ(thr, &cur_thread_placeholder);
1195#endif
1196 }
1197 REAL(pthread_exit)(retval);
1198}
1199
1200#if SANITIZER_LINUX
1201TSAN_INTERCEPTOR(int, pthread_tryjoin_np, void *th, void **ret) {
1202 SCOPED_INTERCEPTOR_RAW(pthread_tryjoin_np, th, ret);
1203 Tid tid = ThreadConsumeTid(thr, pc, uid: (uptr)th);
1204 ThreadIgnoreBegin(thr, pc);
1205 int res = REAL(pthread_tryjoin_np)(th, ret);
1206 ThreadIgnoreEnd(thr);
1207 if (res == 0)
1208 ThreadJoin(thr, pc, tid);
1209 else
1210 ThreadNotJoined(thr, pc, tid, uid: (uptr)th);
1211 return res;
1212}
1213
1214TSAN_INTERCEPTOR(int, pthread_timedjoin_np, void *th, void **ret,
1215 const struct timespec *abstime) {
1216 SCOPED_INTERCEPTOR_RAW(pthread_timedjoin_np, th, ret, abstime);
1217 Tid tid = ThreadConsumeTid(thr, pc, uid: (uptr)th);
1218 ThreadIgnoreBegin(thr, pc);
1219 int res = BLOCK_REAL(pthread_timedjoin_np)(th, ret, abstime);
1220 ThreadIgnoreEnd(thr);
1221 if (res == 0)
1222 ThreadJoin(thr, pc, tid);
1223 else
1224 ThreadNotJoined(thr, pc, tid, uid: (uptr)th);
1225 return res;
1226}
1227#endif
1228
1229// Problem:
1230// NPTL implementation of pthread_cond has 2 versions (2.2.5 and 2.3.2).
1231// pthread_cond_t has different size in the different versions.
1232// If call new REAL functions for old pthread_cond_t, they will corrupt memory
1233// after pthread_cond_t (old cond is smaller).
1234// If we call old REAL functions for new pthread_cond_t, we will lose some
1235// functionality (e.g. old functions do not support waiting against
1236// CLOCK_REALTIME).
1237// Proper handling would require to have 2 versions of interceptors as well.
1238// But this is messy, in particular requires linker scripts when sanitizer
1239// runtime is linked into a shared library.
1240// Instead we assume we don't have dynamic libraries built against old
1241// pthread (2.2.5 is dated by 2002). And provide legacy_pthread_cond flag
1242// that allows to work with old libraries (but this mode does not support
1243// some features, e.g. pthread_condattr_getpshared).
1244static void *init_cond(void *c, bool force = false) {
1245 // sizeof(pthread_cond_t) >= sizeof(uptr) in both versions.
1246 // So we allocate additional memory on the side large enough to hold
1247 // any pthread_cond_t object. Always call new REAL functions, but pass
1248 // the aux object to them.
1249 // Note: the code assumes that PTHREAD_COND_INITIALIZER initializes
1250 // first word of pthread_cond_t to zero.
1251 // It's all relevant only for linux.
1252 if (!common_flags()->legacy_pthread_cond)
1253 return c;
1254 atomic_uintptr_t *p = (atomic_uintptr_t*)c;
1255 uptr cond = atomic_load(a: p, mo: memory_order_acquire);
1256 if (!force && cond != 0)
1257 return (void*)cond;
1258 void *newcond = WRAP(malloc)(size: pthread_cond_t_sz);
1259 internal_memset(s: newcond, c: 0, n: pthread_cond_t_sz);
1260 if (atomic_compare_exchange_strong(a: p, cmp: &cond, xchg: (uptr)newcond,
1261 mo: memory_order_acq_rel))
1262 return newcond;
1263 WRAP(free)(p: newcond);
1264 return (void*)cond;
1265}
1266
1267namespace {
1268
1269template <class Fn>
1270struct CondMutexUnlockCtx {
1271 ScopedInterceptor *si;
1272 ThreadState *thr;
1273 uptr pc;
1274 void *m;
1275 void *c;
1276 const Fn &fn;
1277
1278 int Cancel() const { return fn(); }
1279 void Unlock() const;
1280};
1281
1282template <class Fn>
1283void CondMutexUnlockCtx<Fn>::Unlock() const {
1284 // pthread_cond_wait interceptor has enabled async signal delivery
1285 // (see BlockingCall below). Disable async signals since we are running
1286 // tsan code. Also ScopedInterceptor and BlockingCall destructors won't run
1287 // since the thread is cancelled, so we have to manually execute them
1288 // (the thread still can run some user code due to pthread_cleanup_push).
1289 CHECK_EQ(atomic_load(&thr->in_blocking_func, memory_order_relaxed), 1);
1290 atomic_store(a: &thr->in_blocking_func, v: 0, mo: memory_order_relaxed);
1291 MutexPostLock(thr, pc, addr: (uptr)m, flagz: MutexFlagDoPreLockOnPostLock);
1292 // Undo BlockingCall ctor effects.
1293 thr->ignore_interceptors--;
1294 si->~ScopedInterceptor();
1295}
1296} // namespace
1297
1298INTERCEPTOR(int, pthread_cond_init, void *c, void *a) {
1299 void *cond = init_cond(c, force: true);
1300 SCOPED_TSAN_INTERCEPTOR(pthread_cond_init, cond, a);
1301 MemoryAccessRange(thr, pc, addr: (uptr)c, size: sizeof(uptr), is_write: true);
1302 return REAL(pthread_cond_init)(cond, a);
1303}
1304
1305template <class Fn>
1306int cond_wait(ThreadState *thr, uptr pc, ScopedInterceptor *si, const Fn &fn,
1307 void *c, void *m) {
1308 MemoryAccessRange(thr, pc, addr: (uptr)c, size: sizeof(uptr), is_write: false);
1309 MutexUnlock(thr, pc, addr: (uptr)m);
1310 int res = 0;
1311 // This ensures that we handle mutex lock even in case of pthread_cancel.
1312 // See test/tsan/cond_cancel.cpp.
1313 {
1314 // Enable signal delivery while the thread is blocked.
1315 BlockingCall bc(thr);
1316 CondMutexUnlockCtx<Fn> arg = {si, thr, pc, m, c, fn};
1317 res = call_pthread_cancel_with_cleanup(
1318 [](void *arg) -> int {
1319 return ((const CondMutexUnlockCtx<Fn> *)arg)->Cancel();
1320 },
1321 [](void *arg) { ((const CondMutexUnlockCtx<Fn> *)arg)->Unlock(); },
1322 &arg);
1323 }
1324 if (res == errno_EOWNERDEAD) MutexRepair(thr, pc, addr: (uptr)m);
1325 MutexPostLock(thr, pc, addr: (uptr)m, flagz: MutexFlagDoPreLockOnPostLock);
1326 return res;
1327}
1328
1329INTERCEPTOR(int, pthread_cond_wait, void *c, void *m) {
1330 void *cond = init_cond(c);
1331 SCOPED_TSAN_INTERCEPTOR(pthread_cond_wait, cond, m);
1332 return cond_wait(
1333 thr, pc, si: &si, fn: [=]() { return REAL(pthread_cond_wait)(cond, m); }, c: cond,
1334 m);
1335}
1336
1337INTERCEPTOR(int, pthread_cond_timedwait, void *c, void *m, void *abstime) {
1338 void *cond = init_cond(c);
1339 SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait, cond, m, abstime);
1340 return cond_wait(
1341 thr, pc, si: &si,
1342 fn: [=]() { return REAL(pthread_cond_timedwait)(cond, m, abstime); }, c: cond,
1343 m);
1344}
1345
1346#if SANITIZER_LINUX
1347INTERCEPTOR(int, pthread_cond_clockwait, void *c, void *m,
1348 __sanitizer_clockid_t clock, void *abstime) {
1349 void *cond = init_cond(c);
1350 SCOPED_TSAN_INTERCEPTOR(pthread_cond_clockwait, cond, m, clock, abstime);
1351 return cond_wait(
1352 thr, pc, si: &si,
1353 fn: [=]() { return REAL(pthread_cond_clockwait)(cond, m, clock, abstime); },
1354 c: cond, m);
1355}
1356#define TSAN_MAYBE_PTHREAD_COND_CLOCKWAIT TSAN_INTERCEPT(pthread_cond_clockwait)
1357#else
1358#define TSAN_MAYBE_PTHREAD_COND_CLOCKWAIT
1359#endif
1360
1361#if SANITIZER_APPLE
1362INTERCEPTOR(int, pthread_cond_timedwait_relative_np, void *c, void *m,
1363 void *reltime) {
1364 void *cond = init_cond(c);
1365 SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait_relative_np, cond, m, reltime);
1366 return cond_wait(
1367 thr, pc, &si,
1368 [=]() {
1369 return REAL(pthread_cond_timedwait_relative_np)(cond, m, reltime);
1370 },
1371 cond, m);
1372}
1373#endif
1374
1375INTERCEPTOR(int, pthread_cond_signal, void *c) {
1376 void *cond = init_cond(c);
1377 SCOPED_TSAN_INTERCEPTOR(pthread_cond_signal, cond);
1378 MemoryAccessRange(thr, pc, addr: (uptr)c, size: sizeof(uptr), is_write: false);
1379 return REAL(pthread_cond_signal)(cond);
1380}
1381
1382INTERCEPTOR(int, pthread_cond_broadcast, void *c) {
1383 void *cond = init_cond(c);
1384 SCOPED_TSAN_INTERCEPTOR(pthread_cond_broadcast, cond);
1385 MemoryAccessRange(thr, pc, addr: (uptr)c, size: sizeof(uptr), is_write: false);
1386 return REAL(pthread_cond_broadcast)(cond);
1387}
1388
1389INTERCEPTOR(int, pthread_cond_destroy, void *c) {
1390 void *cond = init_cond(c);
1391 SCOPED_TSAN_INTERCEPTOR(pthread_cond_destroy, cond);
1392 MemoryAccessRange(thr, pc, addr: (uptr)c, size: sizeof(uptr), is_write: true);
1393 int res = REAL(pthread_cond_destroy)(cond);
1394 if (common_flags()->legacy_pthread_cond) {
1395 // Free our aux cond and zero the pointer to not leave dangling pointers.
1396 WRAP(free)(p: cond);
1397 atomic_store(a: (atomic_uintptr_t*)c, v: 0, mo: memory_order_relaxed);
1398 }
1399 return res;
1400}
1401
1402TSAN_INTERCEPTOR(int, pthread_mutex_init, void *m, void *a) {
1403 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_init, m, a);
1404 int res = REAL(pthread_mutex_init)(m, a);
1405 if (res == 0) {
1406 u32 flagz = 0;
1407 if (a) {
1408 int type = 0;
1409 if (REAL(pthread_mutexattr_gettype)(a, &type) == 0)
1410 if (type == PTHREAD_MUTEX_RECURSIVE ||
1411 type == PTHREAD_MUTEX_RECURSIVE_NP)
1412 flagz |= MutexFlagWriteReentrant;
1413 }
1414 MutexCreate(thr, pc, addr: (uptr)m, flagz);
1415 }
1416 return res;
1417}
1418
1419TSAN_INTERCEPTOR(int, pthread_mutex_destroy, void *m) {
1420 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_destroy, m);
1421 int res = REAL(pthread_mutex_destroy)(m);
1422 if (res == 0 || res == errno_EBUSY) {
1423 MutexDestroy(thr, pc, addr: (uptr)m);
1424 }
1425 return res;
1426}
1427
1428TSAN_INTERCEPTOR(int, pthread_mutex_lock, void *m) {
1429 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_lock, m);
1430 MutexPreLock(thr, pc, addr: (uptr)m);
1431 AdaptiveDelay::SyncOp();
1432 int res = BLOCK_REAL(pthread_mutex_lock)(m);
1433 if (res == errno_EOWNERDEAD)
1434 MutexRepair(thr, pc, addr: (uptr)m);
1435 if (res == 0 || res == errno_EOWNERDEAD)
1436 MutexPostLock(thr, pc, addr: (uptr)m);
1437 if (res == errno_EINVAL)
1438 MutexInvalidAccess(thr, pc, addr: (uptr)m);
1439 return res;
1440}
1441
1442TSAN_INTERCEPTOR(int, pthread_mutex_trylock, void *m) {
1443 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_trylock, m);
1444 AdaptiveDelay::SyncOp();
1445 int res = REAL(pthread_mutex_trylock)(m);
1446 if (res == errno_EOWNERDEAD)
1447 MutexRepair(thr, pc, addr: (uptr)m);
1448 if (res == 0 || res == errno_EOWNERDEAD)
1449 MutexPostLock(thr, pc, addr: (uptr)m, flagz: MutexFlagTryLock);
1450 return res;
1451}
1452
1453#if !SANITIZER_APPLE
1454TSAN_INTERCEPTOR(int, pthread_mutex_timedlock, void *m, void *abstime) {
1455 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_timedlock, m, abstime);
1456 AdaptiveDelay::SyncOp();
1457 int res = REAL(pthread_mutex_timedlock)(m, abstime);
1458 if (res == 0) {
1459 MutexPostLock(thr, pc, addr: (uptr)m, flagz: MutexFlagTryLock);
1460 }
1461 return res;
1462}
1463#endif
1464
1465TSAN_INTERCEPTOR(int, pthread_mutex_unlock, void *m) {
1466 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_unlock, m);
1467 MutexUnlock(thr, pc, addr: (uptr)m);
1468 int res = REAL(pthread_mutex_unlock)(m);
1469 AdaptiveDelay::SyncOp();
1470 if (res == errno_EINVAL)
1471 MutexInvalidAccess(thr, pc, addr: (uptr)m);
1472 return res;
1473}
1474
1475#if SANITIZER_LINUX
1476TSAN_INTERCEPTOR(int, pthread_mutex_clocklock, void *m,
1477 __sanitizer_clockid_t clock, void *abstime) {
1478 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_clocklock, m, clock, abstime);
1479 MutexPreLock(thr, pc, addr: (uptr)m);
1480 AdaptiveDelay::SyncOp();
1481 int res = BLOCK_REAL(pthread_mutex_clocklock)(m, clock, abstime);
1482 if (res == errno_EOWNERDEAD)
1483 MutexRepair(thr, pc, addr: (uptr)m);
1484 if (res == 0 || res == errno_EOWNERDEAD)
1485 MutexPostLock(thr, pc, addr: (uptr)m);
1486 if (res == errno_EINVAL)
1487 MutexInvalidAccess(thr, pc, addr: (uptr)m);
1488 return res;
1489}
1490#endif
1491
1492#if SANITIZER_GLIBC
1493# if !__GLIBC_PREREQ(2, 34)
1494// glibc 2.34 applies a non-default version for the two functions. They are no
1495// longer expected to be intercepted by programs.
1496TSAN_INTERCEPTOR(int, __pthread_mutex_lock, void *m) {
1497 SCOPED_TSAN_INTERCEPTOR(__pthread_mutex_lock, m);
1498 MutexPreLock(thr, pc, (uptr)m);
1499 AdaptiveDelay::SyncOp();
1500 int res = BLOCK_REAL(__pthread_mutex_lock)(m);
1501 if (res == errno_EOWNERDEAD)
1502 MutexRepair(thr, pc, (uptr)m);
1503 if (res == 0 || res == errno_EOWNERDEAD)
1504 MutexPostLock(thr, pc, (uptr)m);
1505 if (res == errno_EINVAL)
1506 MutexInvalidAccess(thr, pc, (uptr)m);
1507 return res;
1508}
1509
1510TSAN_INTERCEPTOR(int, __pthread_mutex_unlock, void *m) {
1511 SCOPED_TSAN_INTERCEPTOR(__pthread_mutex_unlock, m);
1512 MutexUnlock(thr, pc, (uptr)m);
1513 int res = REAL(__pthread_mutex_unlock)(m);
1514 AdaptiveDelay::SyncOp();
1515 if (res == errno_EINVAL)
1516 MutexInvalidAccess(thr, pc, (uptr)m);
1517 return res;
1518}
1519# endif
1520#endif
1521
1522#if !SANITIZER_APPLE
1523TSAN_INTERCEPTOR(int, pthread_spin_init, void *m, int pshared) {
1524 SCOPED_TSAN_INTERCEPTOR(pthread_spin_init, m, pshared);
1525 int res = REAL(pthread_spin_init)(m, pshared);
1526 if (res == 0) {
1527 MutexCreate(thr, pc, addr: (uptr)m);
1528 }
1529 return res;
1530}
1531
1532TSAN_INTERCEPTOR(int, pthread_spin_destroy, void *m) {
1533 SCOPED_TSAN_INTERCEPTOR(pthread_spin_destroy, m);
1534 int res = REAL(pthread_spin_destroy)(m);
1535 if (res == 0) {
1536 MutexDestroy(thr, pc, addr: (uptr)m);
1537 }
1538 return res;
1539}
1540
1541TSAN_INTERCEPTOR(int, pthread_spin_lock, void *m) {
1542 SCOPED_TSAN_INTERCEPTOR(pthread_spin_lock, m);
1543 MutexPreLock(thr, pc, addr: (uptr)m);
1544 AdaptiveDelay::SyncOp();
1545 int res = BLOCK_REAL(pthread_spin_lock)(m);
1546 if (res == 0) {
1547 MutexPostLock(thr, pc, addr: (uptr)m);
1548 }
1549 return res;
1550}
1551
1552TSAN_INTERCEPTOR(int, pthread_spin_trylock, void *m) {
1553 SCOPED_TSAN_INTERCEPTOR(pthread_spin_trylock, m);
1554 AdaptiveDelay::SyncOp();
1555 int res = REAL(pthread_spin_trylock)(m);
1556 if (res == 0) {
1557 MutexPostLock(thr, pc, addr: (uptr)m, flagz: MutexFlagTryLock);
1558 }
1559 return res;
1560}
1561
1562TSAN_INTERCEPTOR(int, pthread_spin_unlock, void *m) {
1563 SCOPED_TSAN_INTERCEPTOR(pthread_spin_unlock, m);
1564 MutexUnlock(thr, pc, addr: (uptr)m);
1565 int res = REAL(pthread_spin_unlock)(m);
1566 AdaptiveDelay::SyncOp();
1567 return res;
1568}
1569#endif
1570
1571TSAN_INTERCEPTOR(int, pthread_rwlock_init, void *m, void *a) {
1572 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_init, m, a);
1573 int res = REAL(pthread_rwlock_init)(m, a);
1574 if (res == 0) {
1575 MutexCreate(thr, pc, addr: (uptr)m);
1576 }
1577 return res;
1578}
1579
1580TSAN_INTERCEPTOR(int, pthread_rwlock_destroy, void *m) {
1581 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_destroy, m);
1582 int res = REAL(pthread_rwlock_destroy)(m);
1583 if (res == 0) {
1584 MutexDestroy(thr, pc, addr: (uptr)m);
1585 }
1586 return res;
1587}
1588
1589TSAN_INTERCEPTOR(int, pthread_rwlock_rdlock, void *m) {
1590 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_rdlock, m);
1591 MutexPreReadLock(thr, pc, addr: (uptr)m);
1592 AdaptiveDelay::SyncOp();
1593 int res = REAL(pthread_rwlock_rdlock)(m);
1594 if (res == 0) {
1595 MutexPostReadLock(thr, pc, addr: (uptr)m);
1596 }
1597 return res;
1598}
1599
1600TSAN_INTERCEPTOR(int, pthread_rwlock_tryrdlock, void *m) {
1601 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_tryrdlock, m);
1602 AdaptiveDelay::SyncOp();
1603 int res = REAL(pthread_rwlock_tryrdlock)(m);
1604 if (res == 0) {
1605 MutexPostReadLock(thr, pc, addr: (uptr)m, flagz: MutexFlagTryLock);
1606 }
1607 return res;
1608}
1609
1610#if !SANITIZER_APPLE
1611TSAN_INTERCEPTOR(int, pthread_rwlock_timedrdlock, void *m, void *abstime) {
1612 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedrdlock, m, abstime);
1613 AdaptiveDelay::SyncOp();
1614 int res = REAL(pthread_rwlock_timedrdlock)(m, abstime);
1615 if (res == 0) {
1616 MutexPostReadLock(thr, pc, addr: (uptr)m);
1617 }
1618 return res;
1619}
1620#endif
1621
1622TSAN_INTERCEPTOR(int, pthread_rwlock_wrlock, void *m) {
1623 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_wrlock, m);
1624 MutexPreLock(thr, pc, addr: (uptr)m);
1625 AdaptiveDelay::SyncOp();
1626 int res = BLOCK_REAL(pthread_rwlock_wrlock)(m);
1627 if (res == 0) {
1628 MutexPostLock(thr, pc, addr: (uptr)m);
1629 }
1630 return res;
1631}
1632
1633TSAN_INTERCEPTOR(int, pthread_rwlock_trywrlock, void *m) {
1634 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_trywrlock, m);
1635 AdaptiveDelay::SyncOp();
1636 int res = REAL(pthread_rwlock_trywrlock)(m);
1637 if (res == 0) {
1638 MutexPostLock(thr, pc, addr: (uptr)m, flagz: MutexFlagTryLock);
1639 }
1640 return res;
1641}
1642
1643#if !SANITIZER_APPLE
1644TSAN_INTERCEPTOR(int, pthread_rwlock_timedwrlock, void *m, void *abstime) {
1645 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedwrlock, m, abstime);
1646 AdaptiveDelay::SyncOp();
1647 int res = REAL(pthread_rwlock_timedwrlock)(m, abstime);
1648 if (res == 0) {
1649 MutexPostLock(thr, pc, addr: (uptr)m, flagz: MutexFlagTryLock);
1650 }
1651 return res;
1652}
1653#endif
1654
1655TSAN_INTERCEPTOR(int, pthread_rwlock_unlock, void *m) {
1656 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_unlock, m);
1657 MutexReadOrWriteUnlock(thr, pc, addr: (uptr)m);
1658 int res = REAL(pthread_rwlock_unlock)(m);
1659 AdaptiveDelay::SyncOp();
1660 return res;
1661}
1662
1663#if !SANITIZER_APPLE
1664TSAN_INTERCEPTOR(int, pthread_barrier_init, void *b, void *a, unsigned count) {
1665 SCOPED_TSAN_INTERCEPTOR(pthread_barrier_init, b, a, count);
1666 MemoryAccess(thr, pc, addr: (uptr)b, size: 1, typ: kAccessWrite);
1667 int res = REAL(pthread_barrier_init)(b, a, count);
1668 return res;
1669}
1670
1671TSAN_INTERCEPTOR(int, pthread_barrier_destroy, void *b) {
1672 SCOPED_TSAN_INTERCEPTOR(pthread_barrier_destroy, b);
1673 MemoryAccess(thr, pc, addr: (uptr)b, size: 1, typ: kAccessWrite);
1674 int res = REAL(pthread_barrier_destroy)(b);
1675 return res;
1676}
1677
1678TSAN_INTERCEPTOR(int, pthread_barrier_wait, void *b) {
1679 SCOPED_TSAN_INTERCEPTOR(pthread_barrier_wait, b);
1680 Release(thr, pc, addr: (uptr)b);
1681 MemoryAccess(thr, pc, addr: (uptr)b, size: 1, typ: kAccessRead);
1682 int res = REAL(pthread_barrier_wait)(b);
1683 MemoryAccess(thr, pc, addr: (uptr)b, size: 1, typ: kAccessRead);
1684 if (res == 0 || res == PTHREAD_BARRIER_SERIAL_THREAD) {
1685 Acquire(thr, pc, addr: (uptr)b);
1686 }
1687 return res;
1688}
1689#endif
1690
1691TSAN_INTERCEPTOR(int, pthread_once, void *o, void (*f)()) {
1692 SCOPED_INTERCEPTOR_RAW(pthread_once, o, f);
1693#if SANITIZER_APPLE && !SANITIZER_GO
1694 if (flags()->lock_during_write != kLockDuringAllWrites &&
1695 cur_thread_init()->in_internal_write_call) {
1696 // This is needed to make it through process launch without hanging
1697 f();
1698 return 0;
1699 }
1700#endif
1701 if (o == 0 || f == 0)
1702 return errno_EINVAL;
1703 atomic_uint32_t *a;
1704
1705 if (SANITIZER_APPLE)
1706 a = static_cast<atomic_uint32_t*>((void *)((char *)o + sizeof(long_t)));
1707 else if (SANITIZER_NETBSD)
1708 a = static_cast<atomic_uint32_t*>
1709 ((void *)((char *)o + __sanitizer::pthread_mutex_t_sz));
1710 else
1711 a = static_cast<atomic_uint32_t*>(o);
1712
1713 // Mac OS X appears to use pthread_once() where calling BlockingRegion hooks
1714 // result in crashes due to too little stack space.
1715 if (guard_acquire(thr, pc, g: a, blocking_hooks: !SANITIZER_APPLE)) {
1716 (*f)();
1717 guard_release(thr, pc, g: a, v: kGuardDone);
1718 }
1719 return 0;
1720}
1721
1722#if SANITIZER_GLIBC
1723TSAN_INTERCEPTOR(int, __fxstat, int version, int fd, void *buf) {
1724 SCOPED_TSAN_INTERCEPTOR(__fxstat, version, fd, buf);
1725 if (fd > 0)
1726 FdAccess(thr, pc, fd);
1727 return REAL(__fxstat)(version, fd, buf);
1728}
1729
1730TSAN_INTERCEPTOR(int, __fxstat64, int version, int fd, void *buf) {
1731 SCOPED_TSAN_INTERCEPTOR(__fxstat64, version, fd, buf);
1732 if (fd > 0)
1733 FdAccess(thr, pc, fd);
1734 return REAL(__fxstat64)(version, fd, buf);
1735}
1736#define TSAN_MAYBE_INTERCEPT___FXSTAT TSAN_INTERCEPT(__fxstat); TSAN_INTERCEPT(__fxstat64)
1737#else
1738#define TSAN_MAYBE_INTERCEPT___FXSTAT
1739#endif
1740
1741#if !SANITIZER_GLIBC || __GLIBC_PREREQ(2, 33)
1742TSAN_INTERCEPTOR(int, fstat, int fd, void *buf) {
1743 SCOPED_TSAN_INTERCEPTOR(fstat, fd, buf);
1744 if (fd > 0)
1745 FdAccess(thr, pc, fd);
1746 return REAL(fstat)(fd, buf);
1747}
1748# define TSAN_MAYBE_INTERCEPT_FSTAT TSAN_INTERCEPT(fstat)
1749#else
1750# define TSAN_MAYBE_INTERCEPT_FSTAT
1751#endif
1752
1753#if __GLIBC_PREREQ(2, 33)
1754TSAN_INTERCEPTOR(int, fstat64, int fd, void *buf) {
1755 SCOPED_TSAN_INTERCEPTOR(fstat64, fd, buf);
1756 if (fd > 0)
1757 FdAccess(thr, pc, fd);
1758 return REAL(fstat64)(fd, buf);
1759}
1760# define TSAN_MAYBE_INTERCEPT_FSTAT64 TSAN_INTERCEPT(fstat64)
1761#else
1762# define TSAN_MAYBE_INTERCEPT_FSTAT64
1763#endif
1764
1765TSAN_INTERCEPTOR(int, open, const char *name, int oflag, ...) {
1766 mode_t mode = 0;
1767 if (OpenReadsVaArgs(oflag)) {
1768 va_list ap;
1769 va_start(ap, oflag);
1770 mode = va_arg(ap, int);
1771 va_end(ap);
1772 }
1773
1774 SCOPED_TSAN_INTERCEPTOR(open, name, oflag, mode);
1775 READ_STRING(thr, pc, name, 0);
1776
1777 int fd;
1778 if (OpenReadsVaArgs(oflag))
1779 fd = REAL(open)(name, oflag, mode);
1780 else
1781 fd = REAL(open)(name, oflag);
1782
1783 if (fd >= 0)
1784 FdFileCreate(thr, pc, fd);
1785 return fd;
1786}
1787
1788#if SANITIZER_LINUX
1789TSAN_INTERCEPTOR(int, open64, const char *name, int oflag, ...) {
1790 va_list ap;
1791 va_start(ap, oflag);
1792 mode_t mode = va_arg(ap, int);
1793 va_end(ap);
1794 SCOPED_TSAN_INTERCEPTOR(open64, name, oflag, mode);
1795 READ_STRING(thr, pc, name, 0);
1796 int fd = REAL(open64)(name, oflag, mode);
1797 if (fd >= 0)
1798 FdFileCreate(thr, pc, fd);
1799 return fd;
1800}
1801#define TSAN_MAYBE_INTERCEPT_OPEN64 TSAN_INTERCEPT(open64)
1802#else
1803#define TSAN_MAYBE_INTERCEPT_OPEN64
1804#endif
1805
1806TSAN_INTERCEPTOR(int, creat, const char *name, int mode) {
1807 SCOPED_TSAN_INTERCEPTOR(creat, name, mode);
1808 READ_STRING(thr, pc, name, 0);
1809 int fd = REAL(creat)(name, mode);
1810 if (fd >= 0)
1811 FdFileCreate(thr, pc, fd);
1812 return fd;
1813}
1814
1815#if SANITIZER_LINUX
1816TSAN_INTERCEPTOR(int, creat64, const char *name, int mode) {
1817 SCOPED_TSAN_INTERCEPTOR(creat64, name, mode);
1818 READ_STRING(thr, pc, name, 0);
1819 int fd = REAL(creat64)(name, mode);
1820 if (fd >= 0)
1821 FdFileCreate(thr, pc, fd);
1822 return fd;
1823}
1824#define TSAN_MAYBE_INTERCEPT_CREAT64 TSAN_INTERCEPT(creat64)
1825#else
1826#define TSAN_MAYBE_INTERCEPT_CREAT64
1827#endif
1828
1829TSAN_INTERCEPTOR(int, dup, int oldfd) {
1830 SCOPED_TSAN_INTERCEPTOR(dup, oldfd);
1831 int newfd = REAL(dup)(oldfd);
1832 if (oldfd >= 0 && newfd >= 0 && newfd != oldfd)
1833 FdDup(thr, pc, oldfd, newfd, write: true);
1834 return newfd;
1835}
1836
1837TSAN_INTERCEPTOR(int, dup2, int oldfd, int newfd) {
1838 SCOPED_TSAN_INTERCEPTOR(dup2, oldfd, newfd);
1839 int newfd2 = REAL(dup2)(oldfd, newfd);
1840 if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd)
1841 FdDup(thr, pc, oldfd, newfd: newfd2, write: false);
1842 return newfd2;
1843}
1844
1845#if !SANITIZER_APPLE
1846TSAN_INTERCEPTOR(int, dup3, int oldfd, int newfd, int flags) {
1847 SCOPED_TSAN_INTERCEPTOR(dup3, oldfd, newfd, flags);
1848 int newfd2 = REAL(dup3)(oldfd, newfd, flags);
1849 if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd)
1850 FdDup(thr, pc, oldfd, newfd: newfd2, write: false);
1851 return newfd2;
1852}
1853#endif
1854
1855#if SANITIZER_LINUX
1856TSAN_INTERCEPTOR(int, eventfd, unsigned initval, int flags) {
1857 SCOPED_TSAN_INTERCEPTOR(eventfd, initval, flags);
1858 int fd = REAL(eventfd)(initval, flags);
1859 if (fd >= 0)
1860 FdEventCreate(thr, pc, fd);
1861 return fd;
1862}
1863#define TSAN_MAYBE_INTERCEPT_EVENTFD TSAN_INTERCEPT(eventfd)
1864#else
1865#define TSAN_MAYBE_INTERCEPT_EVENTFD
1866#endif
1867
1868#if SANITIZER_LINUX
1869TSAN_INTERCEPTOR(int, signalfd, int fd, void *mask, int flags) {
1870 SCOPED_INTERCEPTOR_RAW(signalfd, fd, mask, flags);
1871 FdClose(thr, pc, fd);
1872 fd = REAL(signalfd)(fd, mask, flags);
1873 if (!MustIgnoreInterceptor(thr))
1874 FdSignalCreate(thr, pc, fd);
1875 return fd;
1876}
1877#define TSAN_MAYBE_INTERCEPT_SIGNALFD TSAN_INTERCEPT(signalfd)
1878#else
1879#define TSAN_MAYBE_INTERCEPT_SIGNALFD
1880#endif
1881
1882#if SANITIZER_LINUX
1883TSAN_INTERCEPTOR(int, inotify_init, int fake) {
1884 SCOPED_TSAN_INTERCEPTOR(inotify_init, fake);
1885 int fd = REAL(inotify_init)(fake);
1886 if (fd >= 0)
1887 FdInotifyCreate(thr, pc, fd);
1888 return fd;
1889}
1890#define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT TSAN_INTERCEPT(inotify_init)
1891#else
1892#define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT
1893#endif
1894
1895#if SANITIZER_LINUX
1896TSAN_INTERCEPTOR(int, inotify_init1, int flags) {
1897 SCOPED_TSAN_INTERCEPTOR(inotify_init1, flags);
1898 int fd = REAL(inotify_init1)(flags);
1899 if (fd >= 0)
1900 FdInotifyCreate(thr, pc, fd);
1901 return fd;
1902}
1903#define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1 TSAN_INTERCEPT(inotify_init1)
1904#else
1905#define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1
1906#endif
1907
1908TSAN_INTERCEPTOR(int, socket, int domain, int type, int protocol) {
1909 SCOPED_TSAN_INTERCEPTOR(socket, domain, type, protocol);
1910 int fd = REAL(socket)(domain, type, protocol);
1911 if (fd >= 0)
1912 FdSocketCreate(thr, pc, fd);
1913 return fd;
1914}
1915
1916TSAN_INTERCEPTOR(int, socketpair, int domain, int type, int protocol, int *fd) {
1917 SCOPED_TSAN_INTERCEPTOR(socketpair, domain, type, protocol, fd);
1918 int res = REAL(socketpair)(domain, type, protocol, fd);
1919 if (res == 0 && fd[0] >= 0 && fd[1] >= 0)
1920 FdPipeCreate(thr, pc, rfd: fd[0], wfd: fd[1]);
1921 return res;
1922}
1923
1924TSAN_INTERCEPTOR(int, connect, int fd, void *addr, unsigned addrlen) {
1925 SCOPED_TSAN_INTERCEPTOR(connect, fd, addr, addrlen);
1926 FdSocketConnecting(thr, pc, fd);
1927 int res = REAL(connect)(fd, addr, addrlen);
1928 if (res == 0 && fd >= 0)
1929 FdSocketConnect(thr, pc, fd);
1930 return res;
1931}
1932
1933TSAN_INTERCEPTOR(int, bind, int fd, void *addr, unsigned addrlen) {
1934 SCOPED_TSAN_INTERCEPTOR(bind, fd, addr, addrlen);
1935 int res = REAL(bind)(fd, addr, addrlen);
1936 if (fd > 0 && res == 0)
1937 FdAccess(thr, pc, fd);
1938 return res;
1939}
1940
1941TSAN_INTERCEPTOR(int, listen, int fd, int backlog) {
1942 SCOPED_TSAN_INTERCEPTOR(listen, fd, backlog);
1943 int res = REAL(listen)(fd, backlog);
1944 if (fd > 0 && res == 0)
1945 FdAccess(thr, pc, fd);
1946 return res;
1947}
1948
1949TSAN_INTERCEPTOR(int, close, int fd) {
1950 SCOPED_INTERCEPTOR_RAW(close, fd);
1951 if (!in_symbolizer())
1952 FdClose(thr, pc, fd);
1953 return REAL(close)(fd);
1954}
1955
1956#if SANITIZER_LINUX
1957TSAN_INTERCEPTOR(int, __close, int fd) {
1958 SCOPED_INTERCEPTOR_RAW(__close, fd);
1959 FdClose(thr, pc, fd);
1960 return REAL(__close)(fd);
1961}
1962#define TSAN_MAYBE_INTERCEPT___CLOSE TSAN_INTERCEPT(__close)
1963#else
1964#define TSAN_MAYBE_INTERCEPT___CLOSE
1965#endif
1966
1967// glibc guts
1968#if SANITIZER_LINUX && !SANITIZER_ANDROID
1969TSAN_INTERCEPTOR(void, __res_iclose, void *state, bool free_addr) {
1970 SCOPED_INTERCEPTOR_RAW(__res_iclose, state, free_addr);
1971 int fds[64];
1972 int cnt = ExtractResolvFDs(state, fds, ARRAY_SIZE(fds));
1973 for (int i = 0; i < cnt; i++) FdClose(thr, pc, fd: fds[i]);
1974 REAL(__res_iclose)(state, free_addr);
1975}
1976#define TSAN_MAYBE_INTERCEPT___RES_ICLOSE TSAN_INTERCEPT(__res_iclose)
1977#else
1978#define TSAN_MAYBE_INTERCEPT___RES_ICLOSE
1979#endif
1980
1981TSAN_INTERCEPTOR(int, pipe, int *pipefd) {
1982 SCOPED_TSAN_INTERCEPTOR(pipe, pipefd);
1983 int res = REAL(pipe)(pipefd);
1984 if (res == 0 && pipefd[0] >= 0 && pipefd[1] >= 0)
1985 FdPipeCreate(thr, pc, rfd: pipefd[0], wfd: pipefd[1]);
1986 return res;
1987}
1988
1989#if !SANITIZER_APPLE
1990TSAN_INTERCEPTOR(int, pipe2, int *pipefd, int flags) {
1991 SCOPED_TSAN_INTERCEPTOR(pipe2, pipefd, flags);
1992 int res = REAL(pipe2)(pipefd, flags);
1993 if (res == 0 && pipefd[0] >= 0 && pipefd[1] >= 0)
1994 FdPipeCreate(thr, pc, rfd: pipefd[0], wfd: pipefd[1]);
1995 return res;
1996}
1997#endif
1998
1999TSAN_INTERCEPTOR(int, unlink, char *path) {
2000 SCOPED_TSAN_INTERCEPTOR(unlink, path);
2001 Release(thr, pc, addr: File2addr(path));
2002 int res = REAL(unlink)(path);
2003 return res;
2004}
2005
2006TSAN_INTERCEPTOR(void*, tmpfile, int fake) {
2007 SCOPED_TSAN_INTERCEPTOR(tmpfile, fake);
2008 void *res = REAL(tmpfile)(fake);
2009 if (res) {
2010 int fd = fileno_unlocked(stream: res);
2011 if (fd >= 0)
2012 FdFileCreate(thr, pc, fd);
2013 }
2014 return res;
2015}
2016
2017#if SANITIZER_LINUX
2018TSAN_INTERCEPTOR(void*, tmpfile64, int fake) {
2019 SCOPED_TSAN_INTERCEPTOR(tmpfile64, fake);
2020 void *res = REAL(tmpfile64)(fake);
2021 if (res) {
2022 int fd = fileno_unlocked(stream: res);
2023 if (fd >= 0)
2024 FdFileCreate(thr, pc, fd);
2025 }
2026 return res;
2027}
2028#define TSAN_MAYBE_INTERCEPT_TMPFILE64 TSAN_INTERCEPT(tmpfile64)
2029#else
2030#define TSAN_MAYBE_INTERCEPT_TMPFILE64
2031#endif
2032
2033static void FlushStreams() {
2034 // Flushing all the streams here may freeze the process if a child thread is
2035 // performing file stream operations at the same time.
2036 REAL(fflush)(stdout);
2037 REAL(fflush)(stderr);
2038}
2039
2040TSAN_INTERCEPTOR(void, abort, int fake) {
2041 SCOPED_TSAN_INTERCEPTOR(abort, fake);
2042 FlushStreams();
2043 REAL(abort)(fake);
2044}
2045
2046TSAN_INTERCEPTOR(int, rmdir, char *path) {
2047 SCOPED_TSAN_INTERCEPTOR(rmdir, path);
2048 Release(thr, pc, addr: Dir2addr(path));
2049 int res = REAL(rmdir)(path);
2050 return res;
2051}
2052
2053TSAN_INTERCEPTOR(int, closedir, void *dirp) {
2054 SCOPED_INTERCEPTOR_RAW(closedir, dirp);
2055 if (dirp) {
2056 int fd = dirfd(dirp);
2057 FdClose(thr, pc, fd);
2058 }
2059 return REAL(closedir)(dirp);
2060}
2061
2062#if SANITIZER_LINUX
2063TSAN_INTERCEPTOR(int, epoll_create, int size) {
2064 SCOPED_TSAN_INTERCEPTOR(epoll_create, size);
2065 int fd = REAL(epoll_create)(size);
2066 if (fd >= 0)
2067 FdPollCreate(thr, pc, fd);
2068 return fd;
2069}
2070
2071TSAN_INTERCEPTOR(int, epoll_create1, int flags) {
2072 SCOPED_TSAN_INTERCEPTOR(epoll_create1, flags);
2073 int fd = REAL(epoll_create1)(flags);
2074 if (fd >= 0)
2075 FdPollCreate(thr, pc, fd);
2076 return fd;
2077}
2078
2079TSAN_INTERCEPTOR(int, epoll_ctl, int epfd, int op, int fd, void *ev) {
2080 SCOPED_TSAN_INTERCEPTOR(epoll_ctl, epfd, op, fd, ev);
2081 if (epfd >= 0)
2082 FdAccess(thr, pc, fd: epfd);
2083 if (epfd >= 0 && fd >= 0)
2084 FdAccess(thr, pc, fd);
2085 if (op == EPOLL_CTL_ADD && epfd >= 0) {
2086 FdPollAdd(thr, pc, epfd, fd);
2087 FdRelease(thr, pc, fd: epfd);
2088 }
2089 int res = REAL(epoll_ctl)(epfd, op, fd, ev);
2090 return res;
2091}
2092
2093TSAN_INTERCEPTOR(int, epoll_wait, int epfd, void *ev, int cnt, int timeout) {
2094 SCOPED_TSAN_INTERCEPTOR(epoll_wait, epfd, ev, cnt, timeout);
2095 if (epfd >= 0)
2096 FdAccess(thr, pc, fd: epfd);
2097 int res = BLOCK_REAL(epoll_wait)(epfd, ev, cnt, timeout);
2098 if (res > 0 && epfd >= 0)
2099 FdAcquire(thr, pc, fd: epfd);
2100 return res;
2101}
2102
2103TSAN_INTERCEPTOR(int, epoll_pwait, int epfd, void *ev, int cnt, int timeout,
2104 void *sigmask) {
2105 SCOPED_TSAN_INTERCEPTOR(epoll_pwait, epfd, ev, cnt, timeout, sigmask);
2106 if (epfd >= 0)
2107 FdAccess(thr, pc, fd: epfd);
2108 int res = BLOCK_REAL(epoll_pwait)(epfd, ev, cnt, timeout, sigmask);
2109 if (res > 0 && epfd >= 0)
2110 FdAcquire(thr, pc, fd: epfd);
2111 return res;
2112}
2113
2114TSAN_INTERCEPTOR(int, epoll_pwait2, int epfd, void *ev, int cnt, void *timeout,
2115 void *sigmask) {
2116 SCOPED_INTERCEPTOR_RAW(epoll_pwait2, epfd, ev, cnt, timeout, sigmask);
2117 // This function is new and may not be present in libc and/or kernel.
2118 // Since we effectively add it to libc (as will be probed by the program
2119 // using dlsym or a weak function pointer) we need to handle the case
2120 // when it's not present in the actual libc.
2121 if (!REAL(epoll_pwait2)) {
2122 errno = errno_ENOSYS;
2123 return -1;
2124 }
2125 if (MustIgnoreInterceptor(thr))
2126 REAL(epoll_pwait2)(epfd, ev, cnt, timeout, sigmask);
2127 if (epfd >= 0)
2128 FdAccess(thr, pc, fd: epfd);
2129 int res = BLOCK_REAL(epoll_pwait2)(epfd, ev, cnt, timeout, sigmask);
2130 if (res > 0 && epfd >= 0)
2131 FdAcquire(thr, pc, fd: epfd);
2132 return res;
2133}
2134
2135# define TSAN_MAYBE_INTERCEPT_EPOLL \
2136 TSAN_INTERCEPT(epoll_create); \
2137 TSAN_INTERCEPT(epoll_create1); \
2138 TSAN_INTERCEPT(epoll_ctl); \
2139 TSAN_INTERCEPT(epoll_wait); \
2140 TSAN_INTERCEPT(epoll_pwait); \
2141 TSAN_INTERCEPT(epoll_pwait2)
2142#else
2143#define TSAN_MAYBE_INTERCEPT_EPOLL
2144#endif
2145
2146// The following functions are intercepted merely to process pending signals.
2147// If program blocks signal X, we must deliver the signal before the function
2148// returns. Similarly, if program unblocks a signal (or returns from sigsuspend)
2149// it's better to deliver the signal straight away.
2150TSAN_INTERCEPTOR(int, sigsuspend, const __sanitizer_sigset_t *mask) {
2151 SCOPED_TSAN_INTERCEPTOR(sigsuspend, mask);
2152 return REAL(sigsuspend)(mask);
2153}
2154
2155TSAN_INTERCEPTOR(int, sigblock, int mask) {
2156 SCOPED_TSAN_INTERCEPTOR(sigblock, mask);
2157 return REAL(sigblock)(mask);
2158}
2159
2160TSAN_INTERCEPTOR(int, sigsetmask, int mask) {
2161 SCOPED_TSAN_INTERCEPTOR(sigsetmask, mask);
2162 return REAL(sigsetmask)(mask);
2163}
2164
2165TSAN_INTERCEPTOR(int, pthread_sigmask, int how, const __sanitizer_sigset_t *set,
2166 __sanitizer_sigset_t *oldset) {
2167 SCOPED_TSAN_INTERCEPTOR(pthread_sigmask, how, set, oldset);
2168 return REAL(pthread_sigmask)(how, set, oldset);
2169}
2170
2171namespace __tsan {
2172
2173static void ReportErrnoSpoiling(ThreadState *thr, uptr pc, int sig) {
2174 VarSizeStackTrace stack;
2175 // StackTrace::GetNestInstructionPc(pc) is used because return address is
2176 // expected, OutputReport() will undo this.
2177 ObtainCurrentStack(thr, toppc: StackTrace::GetNextInstructionPc(pc), stack: &stack);
2178 // Use alloca, because malloc during signal handling deadlocks
2179 ScopedReport *rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport));
2180 bool suppressed;
2181 // Take a new scope as Apple platforms require the below locks released
2182 // before symbolizing in order to avoid a deadlock
2183 {
2184 ThreadRegistryLock l(&ctx->thread_registry);
2185 new (rep) ScopedReport(ReportTypeErrnoInSignal);
2186 rep->SetSigNum(sig);
2187 suppressed = IsFiredSuppression(ctx, type: ReportTypeErrnoInSignal, trace: stack);
2188 if (!suppressed)
2189 rep->AddStack(stack, suppressable: true);
2190#if SANITIZER_APPLE
2191 } // Close this scope to release the locks before writing report
2192#endif
2193 if (!suppressed)
2194 OutputReport(thr, srep&: *rep);
2195
2196 // Need to manually destroy this because we used placement new to allocate
2197 rep->~ScopedReport();
2198#if !SANITIZER_APPLE
2199 }
2200#endif
2201}
2202
2203static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire,
2204 int sig, __sanitizer_siginfo *info,
2205 void *uctx) {
2206 CHECK(thr->slot);
2207 __sanitizer_sigaction *sigactions = interceptor_ctx()->sigactions;
2208 if (acquire)
2209 Acquire(thr, pc: 0, addr: (uptr)&sigactions[sig]);
2210 // Signals are generally asynchronous, so if we receive a signals when
2211 // ignores are enabled we should disable ignores. This is critical for sync
2212 // and interceptors, because otherwise we can miss synchronization and report
2213 // false races.
2214 int ignore_reads_and_writes = thr->ignore_reads_and_writes;
2215 int ignore_interceptors = thr->ignore_interceptors;
2216 int ignore_sync = thr->ignore_sync;
2217 // For symbolizer we only process SIGSEGVs synchronously
2218 // (bug in symbolizer or in tsan). But we want to reset
2219 // in_symbolizer to fail gracefully. Symbolizer and user code
2220 // use different memory allocators, so if we don't reset
2221 // in_symbolizer we can get memory allocated with one being
2222 // feed with another, which can cause more crashes.
2223 int in_symbolizer = thr->in_symbolizer;
2224 if (!ctx->after_multithreaded_fork) {
2225 thr->ignore_reads_and_writes = 0;
2226 thr->fast_state.ClearIgnoreBit();
2227 thr->ignore_interceptors = 0;
2228 thr->ignore_sync = 0;
2229 thr->in_symbolizer = 0;
2230 }
2231 // Ensure that the handler does not spoil errno.
2232 const int saved_errno = errno;
2233 errno = 99;
2234 // This code races with sigaction. Be careful to not read sa_sigaction twice.
2235 // Also need to remember pc for reporting before the call,
2236 // because the handler can reset it.
2237 volatile uptr pc = (sigactions[sig].sa_flags & SA_SIGINFO)
2238 ? (uptr)sigactions[sig].sigaction
2239 : (uptr)sigactions[sig].handler;
2240 if (pc != sig_dfl && pc != sig_ign) {
2241 // The callback can be either sa_handler or sa_sigaction.
2242 // They have different signatures, but we assume that passing
2243 // additional arguments to sa_handler works and is harmless.
2244 ((__sanitizer_sigactionhandler_ptr)pc)(sig, info, uctx);
2245 }
2246 if (!ctx->after_multithreaded_fork) {
2247 thr->ignore_reads_and_writes = ignore_reads_and_writes;
2248 if (ignore_reads_and_writes)
2249 thr->fast_state.SetIgnoreBit();
2250 thr->ignore_interceptors = ignore_interceptors;
2251 thr->ignore_sync = ignore_sync;
2252 thr->in_symbolizer = in_symbolizer;
2253 }
2254 // We do not detect errno spoiling for SIGTERM,
2255 // because some SIGTERM handlers do spoil errno but reraise SIGTERM,
2256 // tsan reports false positive in such case.
2257 // It's difficult to properly detect this situation (reraise),
2258 // because in async signal processing case (when handler is called directly
2259 // from rtl_generic_sighandler) we have not yet received the reraised
2260 // signal; and it looks too fragile to intercept all ways to reraise a signal.
2261 if (ShouldReport(thr, typ: ReportTypeErrnoInSignal) && !sync && sig != SIGTERM &&
2262 errno != 99)
2263 ReportErrnoSpoiling(thr, pc, sig);
2264 errno = saved_errno;
2265}
2266
2267void ProcessPendingSignalsImpl(ThreadState *thr) {
2268 atomic_store(a: &thr->pending_signals, v: 0, mo: memory_order_relaxed);
2269 ThreadSignalContext *sctx = SigCtx(thr);
2270 if (sctx == 0)
2271 return;
2272 atomic_fetch_add(a: &thr->in_signal_handler, v: 1, mo: memory_order_relaxed);
2273 internal_sigfillset(set: &sctx->emptyset);
2274 __sanitizer_sigset_t *oldset = sctx->oldset.PushBack();
2275 int res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->emptyset, oldset);
2276 CHECK_EQ(res, 0);
2277 for (int sig = 0; sig < kSigCount; sig++) {
2278 SignalDesc *signal = &sctx->pending_signals[sig];
2279 if (signal->armed) {
2280 signal->armed = false;
2281 CallUserSignalHandler(thr, sync: false, acquire: true, sig, info: &signal->siginfo,
2282 uctx: &signal->ctx);
2283 }
2284 }
2285 res = REAL(pthread_sigmask)(SIG_SETMASK, oldset, 0);
2286 CHECK_EQ(res, 0);
2287 sctx->oldset.PopBack();
2288 atomic_fetch_add(a: &thr->in_signal_handler, v: -1, mo: memory_order_relaxed);
2289}
2290
2291} // namespace __tsan
2292
2293static bool is_sync_signal(ThreadSignalContext *sctx, int sig,
2294 __sanitizer_siginfo *info) {
2295 // If we are sending signal to ourselves, we must process it now.
2296 if (sctx && sig == sctx->int_signal_send)
2297 return true;
2298#if SANITIZER_HAS_SIGINFO
2299 // POSIX timers can be configured to send any kind of signal; however, it
2300 // doesn't make any sense to consider a timer signal as synchronous!
2301 if (info->si_code == SI_TIMER)
2302 return false;
2303#endif
2304 return sig == SIGSEGV || sig == SIGBUS || sig == SIGILL || sig == SIGTRAP ||
2305 sig == SIGABRT || sig == SIGFPE || sig == SIGPIPE || sig == SIGSYS;
2306}
2307
2308void sighandler(int sig, __sanitizer_siginfo *info, void *ctx) {
2309 ThreadState *thr = cur_thread_init();
2310 ThreadSignalContext *sctx = SigCtx(thr);
2311 if (sig < 0 || sig >= kSigCount) {
2312 VPrintf(1, "ThreadSanitizer: ignoring signal %d\n", sig);
2313 return;
2314 }
2315 // Don't mess with synchronous signals.
2316 const bool sync = is_sync_signal(sctx, sig, info);
2317 if (sync ||
2318 // If we are in blocking function, we can safely process it now
2319 // (but check if we are in a recursive interceptor,
2320 // i.e. pthread_join()->munmap()).
2321 atomic_load(a: &thr->in_blocking_func, mo: memory_order_relaxed)) {
2322 atomic_fetch_add(a: &thr->in_signal_handler, v: 1, mo: memory_order_relaxed);
2323 if (atomic_load(a: &thr->in_blocking_func, mo: memory_order_relaxed)) {
2324 atomic_store(a: &thr->in_blocking_func, v: 0, mo: memory_order_relaxed);
2325 CallUserSignalHandler(thr, sync, acquire: true, sig, info, uctx: ctx);
2326 atomic_store(a: &thr->in_blocking_func, v: 1, mo: memory_order_relaxed);
2327 } else {
2328 // Be very conservative with when we do acquire in this case.
2329 // It's unsafe to do acquire in async handlers, because ThreadState
2330 // can be in inconsistent state.
2331 // SIGSYS looks relatively safe -- it's synchronous and can actually
2332 // need some global state.
2333 bool acq = (sig == SIGSYS);
2334 CallUserSignalHandler(thr, sync, acquire: acq, sig, info, uctx: ctx);
2335 }
2336 atomic_fetch_add(a: &thr->in_signal_handler, v: -1, mo: memory_order_relaxed);
2337 return;
2338 }
2339
2340 if (sctx == 0)
2341 return;
2342 SignalDesc *signal = &sctx->pending_signals[sig];
2343 if (signal->armed == false) {
2344 signal->armed = true;
2345 internal_memcpy(dest: &signal->siginfo, src: info, n: sizeof(*info));
2346 internal_memcpy(dest: &signal->ctx, src: ctx, n: sizeof(signal->ctx));
2347 atomic_store(a: &thr->pending_signals, v: 1, mo: memory_order_relaxed);
2348 }
2349}
2350
2351TSAN_INTERCEPTOR(int, raise, int sig) {
2352 SCOPED_TSAN_INTERCEPTOR(raise, sig);
2353 ThreadSignalContext *sctx = SigCtx(thr);
2354 CHECK_NE(sctx, 0);
2355 int prev = sctx->int_signal_send;
2356 sctx->int_signal_send = sig;
2357 int res = REAL(raise)(sig);
2358 CHECK_EQ(sctx->int_signal_send, sig);
2359 sctx->int_signal_send = prev;
2360 return res;
2361}
2362
2363TSAN_INTERCEPTOR(int, kill, int pid, int sig) {
2364 SCOPED_TSAN_INTERCEPTOR(kill, pid, sig);
2365 ThreadSignalContext *sctx = SigCtx(thr);
2366 CHECK_NE(sctx, 0);
2367 int prev = sctx->int_signal_send;
2368 if (pid == (int)internal_getpid()) {
2369 sctx->int_signal_send = sig;
2370 }
2371 int res = REAL(kill)(pid, sig);
2372 if (pid == (int)internal_getpid()) {
2373 CHECK_EQ(sctx->int_signal_send, sig);
2374 sctx->int_signal_send = prev;
2375 }
2376 return res;
2377}
2378
2379TSAN_INTERCEPTOR(int, pthread_kill, void *tid, int sig) {
2380 SCOPED_TSAN_INTERCEPTOR(pthread_kill, tid, sig);
2381 ThreadSignalContext *sctx = SigCtx(thr);
2382 CHECK_NE(sctx, 0);
2383 int prev = sctx->int_signal_send;
2384 bool self = pthread_equal(t1: tid, t2: pthread_self());
2385 if (self)
2386 sctx->int_signal_send = sig;
2387 int res = REAL(pthread_kill)(tid, sig);
2388 if (self) {
2389 CHECK_EQ(sctx->int_signal_send, sig);
2390 sctx->int_signal_send = prev;
2391 }
2392 return res;
2393}
2394
2395TSAN_INTERCEPTOR(int, gettimeofday, void *tv, void *tz) {
2396 SCOPED_TSAN_INTERCEPTOR(gettimeofday, tv, tz);
2397 // It's intercepted merely to process pending signals.
2398 return REAL(gettimeofday)(tv, tz);
2399}
2400
2401TSAN_INTERCEPTOR(int, getaddrinfo, void *node, void *service,
2402 void *hints, void *rv) {
2403 SCOPED_TSAN_INTERCEPTOR(getaddrinfo, node, service, hints, rv);
2404 // We miss atomic synchronization in getaddrinfo,
2405 // and can report false race between malloc and free
2406 // inside of getaddrinfo. So ignore memory accesses.
2407 ThreadIgnoreBegin(thr, pc);
2408 int res = REAL(getaddrinfo)(node, service, hints, rv);
2409 ThreadIgnoreEnd(thr);
2410 return res;
2411}
2412
2413TSAN_INTERCEPTOR(int, fork, int fake) {
2414 if (in_symbolizer())
2415 return REAL(fork)(fake);
2416 SCOPED_INTERCEPTOR_RAW(fork, fake);
2417 return REAL(fork)(fake);
2418}
2419
2420void atfork_prepare() {
2421 if (in_symbolizer())
2422 return;
2423 ThreadState *thr = cur_thread();
2424 const uptr pc = StackTrace::GetCurrentPc();
2425 ForkBefore(thr, pc);
2426}
2427
2428void atfork_parent() {
2429 if (in_symbolizer())
2430 return;
2431 ThreadState *thr = cur_thread();
2432 const uptr pc = StackTrace::GetCurrentPc();
2433 ForkParentAfter(thr, pc);
2434}
2435
2436void atfork_child() {
2437 if (in_symbolizer())
2438 return;
2439 ThreadState *thr = cur_thread();
2440 const uptr pc = StackTrace::GetCurrentPc();
2441 ForkChildAfter(thr, pc, start_thread: true);
2442 FdOnFork(thr, pc);
2443}
2444
2445#if !SANITIZER_IOS
2446TSAN_INTERCEPTOR(int, vfork, int fake) {
2447 // Some programs (e.g. openjdk) call close for all file descriptors
2448 // in the child process. Under tsan it leads to false positives, because
2449 // address space is shared, so the parent process also thinks that
2450 // the descriptors are closed (while they are actually not).
2451 // This leads to false positives due to missed synchronization.
2452 // Strictly saying this is undefined behavior, because vfork child is not
2453 // allowed to call any functions other than exec/exit. But this is what
2454 // openjdk does, so we want to handle it.
2455 // We could disable interceptors in the child process. But it's not possible
2456 // to simply intercept and wrap vfork, because vfork child is not allowed
2457 // to return from the function that calls vfork, and that's exactly what
2458 // we would do. So this would require some assembly trickery as well.
2459 // Instead we simply turn vfork into fork.
2460 return WRAP(fork)(fake);
2461}
2462#endif
2463
2464#if SANITIZER_LINUX && !SANITIZER_ANDROID
2465// Bionic's pthread_create internally calls clone. When the CLONE_THREAD flag is
2466// set, clone does not create a new process but a new thread. This is a
2467// workaround for Android. Disabling the interception of clone solves the
2468// problem in most scenarios.
2469TSAN_INTERCEPTOR(int, clone, int (*fn)(void *), void *stack, int flags,
2470 void *arg, int *parent_tid, void *tls, pid_t *child_tid) {
2471 SCOPED_INTERCEPTOR_RAW(clone, fn, stack, flags, arg, parent_tid, tls,
2472 child_tid);
2473 struct Arg {
2474 int (*fn)(void *);
2475 void *arg;
2476 };
2477 auto wrapper = +[](void *p) -> int {
2478 auto *thr = cur_thread();
2479 uptr pc = GET_CURRENT_PC();
2480 // Start the background thread for fork, but not for clone.
2481 // For fork we did this always and it's known to work (or user code has
2482 // adopted). But if we do this for the new clone interceptor some code
2483 // (sandbox2) fails. So model we used to do for years and don't start the
2484 // background thread after clone.
2485 ForkChildAfter(thr, pc, start_thread: false);
2486 FdOnFork(thr, pc);
2487 auto *arg = static_cast<Arg *>(p);
2488 return arg->fn(arg->arg);
2489 };
2490 ForkBefore(thr, pc);
2491 Arg arg_wrapper = {.fn: fn, .arg: arg};
2492 int pid = REAL(clone)(wrapper, stack, flags, &arg_wrapper, parent_tid, tls,
2493 child_tid);
2494 ForkParentAfter(thr, pc);
2495 return pid;
2496}
2497#endif
2498
2499#if !SANITIZER_APPLE && !SANITIZER_ANDROID
2500typedef int (*dl_iterate_phdr_cb_t)(__sanitizer_dl_phdr_info *info, SIZE_T size,
2501 void *data);
2502struct dl_iterate_phdr_data {
2503 ThreadState *thr;
2504 uptr pc;
2505 dl_iterate_phdr_cb_t cb;
2506 void *data;
2507};
2508
2509static bool IsAppNotRodata(uptr addr) {
2510 return IsAppMem(mem: addr) && *MemToShadow(x: addr) != Shadow::kRodata;
2511}
2512
2513static int dl_iterate_phdr_cb(__sanitizer_dl_phdr_info *info, SIZE_T size,
2514 void *data) {
2515 dl_iterate_phdr_data *cbdata = (dl_iterate_phdr_data *)data;
2516 // dlopen/dlclose allocate/free dynamic-linker-internal memory, which is later
2517 // accessible in dl_iterate_phdr callback. But we don't see synchronization
2518 // inside of dynamic linker, so we "unpoison" it here in order to not
2519 // produce false reports. Ignoring malloc/free in dlopen/dlclose is not enough
2520 // because some libc functions call __libc_dlopen.
2521 if (info && IsAppNotRodata(addr: (uptr)info->dlpi_name))
2522 MemoryResetRange(thr: cbdata->thr, pc: cbdata->pc, addr: (uptr)info->dlpi_name,
2523 size: internal_strlen(s: info->dlpi_name));
2524 int res = cbdata->cb(info, size, cbdata->data);
2525 // Perform the check one more time in case info->dlpi_name was overwritten
2526 // by user callback.
2527 if (info && IsAppNotRodata(addr: (uptr)info->dlpi_name))
2528 MemoryResetRange(thr: cbdata->thr, pc: cbdata->pc, addr: (uptr)info->dlpi_name,
2529 size: internal_strlen(s: info->dlpi_name));
2530 return res;
2531}
2532
2533TSAN_INTERCEPTOR(int, dl_iterate_phdr, dl_iterate_phdr_cb_t cb, void *data) {
2534 SCOPED_TSAN_INTERCEPTOR(dl_iterate_phdr, cb, data);
2535 dl_iterate_phdr_data cbdata;
2536 cbdata.thr = thr;
2537 cbdata.pc = pc;
2538 cbdata.cb = cb;
2539 cbdata.data = data;
2540 int res = REAL(dl_iterate_phdr)(dl_iterate_phdr_cb, &cbdata);
2541 return res;
2542}
2543#endif
2544
2545static int OnExit(ThreadState *thr) {
2546 int status = Finalize(thr);
2547 FlushStreams();
2548 return status;
2549}
2550
2551#if !SANITIZER_APPLE
2552static void HandleRecvmsg(ThreadState *thr, uptr pc,
2553 __sanitizer_msghdr *msg) {
2554 int fds[64];
2555 int cnt = ExtractRecvmsgFDs(msg, fds, ARRAY_SIZE(fds));
2556 for (int i = 0; i < cnt; i++)
2557 FdEventCreate(thr, pc, fd: fds[i]);
2558}
2559#endif
2560
2561#include "sanitizer_common/sanitizer_platform_interceptors.h"
2562// Causes interceptor recursion (getaddrinfo() and fopen())
2563#undef SANITIZER_INTERCEPT_GETADDRINFO
2564// We define our own.
2565#if SANITIZER_INTERCEPT_TLS_GET_ADDR
2566#define NEED_TLS_GET_ADDR
2567#endif
2568#undef SANITIZER_INTERCEPT_TLS_GET_ADDR
2569#define SANITIZER_INTERCEPT_TLS_GET_OFFSET 1
2570#undef SANITIZER_INTERCEPT_PTHREAD_SIGMASK
2571
2572#define COMMON_INTERCEPT_FUNCTION_VER(name, ver) \
2573 INTERCEPT_FUNCTION_VER(name, ver)
2574#define COMMON_INTERCEPT_FUNCTION_VER_UNVERSIONED_FALLBACK(name, ver) \
2575 (INTERCEPT_FUNCTION_VER(name, ver) || INTERCEPT_FUNCTION(name))
2576
2577#define COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, func, ...) \
2578 SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__); \
2579 TsanInterceptorContext _ctx = {thr, pc}; \
2580 ctx = (void *)&_ctx; \
2581 (void)ctx;
2582
2583#define COMMON_INTERCEPTOR_FILE_OPEN(ctx, file, path) \
2584 if (path) \
2585 Acquire(thr, pc, File2addr(path)); \
2586 if (file) { \
2587 int fd = fileno_unlocked(file); \
2588 if (fd >= 0) FdFileCreate(thr, pc, fd); \
2589 }
2590
2591#define COMMON_INTERCEPTOR_FILE_CLOSE(ctx, file) \
2592 if (file) { \
2593 int fd = fileno_unlocked(file); \
2594 FdClose(thr, pc, fd); \
2595 }
2596
2597#define COMMON_INTERCEPTOR_DLOPEN(filename, flag) \
2598 ({ \
2599 CheckNoDeepBind(filename, flag); \
2600 ThreadIgnoreBegin(thr, 0); \
2601 void *res = REAL(dlopen)(filename, flag); \
2602 ThreadIgnoreEnd(thr); \
2603 res; \
2604 })
2605
2606// Ignore interceptors in OnLibraryLoaded()/Unloaded(). These hooks use code
2607// (ListOfModules::init, MemoryMappingLayout::DumpListOfModules) that make
2608// intercepted calls, which can cause deadlockes with ReportRace() which also
2609// uses this code.
2610#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) \
2611 ({ \
2612 ScopedIgnoreInterceptors ignore_interceptors; \
2613 libignore()->OnLibraryLoaded(filename); \
2614 })
2615
2616#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() \
2617 ({ \
2618 ScopedIgnoreInterceptors ignore_interceptors; \
2619 libignore()->OnLibraryUnloaded(); \
2620 })
2621
2622#define COMMON_INTERCEPTOR_ACQUIRE(ctx, u) \
2623 Acquire(((TsanInterceptorContext *) ctx)->thr, pc, u)
2624
2625#define COMMON_INTERCEPTOR_RELEASE(ctx, u) \
2626 Release(((TsanInterceptorContext *) ctx)->thr, pc, u)
2627
2628#define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \
2629 Acquire(((TsanInterceptorContext *) ctx)->thr, pc, Dir2addr(path))
2630
2631#define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \
2632 FdAcquire(((TsanInterceptorContext *) ctx)->thr, pc, fd)
2633
2634#define COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd) \
2635 FdRelease(((TsanInterceptorContext *) ctx)->thr, pc, fd)
2636
2637#define COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd) \
2638 FdAccess(((TsanInterceptorContext *) ctx)->thr, pc, fd)
2639
2640#define COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, newfd) \
2641 FdSocketAccept(((TsanInterceptorContext *) ctx)->thr, pc, fd, newfd)
2642
2643#define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) \
2644 ThreadSetName(((TsanInterceptorContext *) ctx)->thr, name)
2645
2646#define COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name) \
2647 if (pthread_equal(pthread_self(), reinterpret_cast<void *>(thread))) \
2648 COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name); \
2649 else \
2650 __tsan::ctx->thread_registry.SetThreadNameByUserId(thread, name)
2651
2652#define COMMON_INTERCEPTOR_BLOCK_REAL(name) BLOCK_REAL(name)
2653
2654#define COMMON_INTERCEPTOR_ON_EXIT(ctx) \
2655 OnExit(((TsanInterceptorContext *) ctx)->thr)
2656
2657#define COMMON_INTERCEPTOR_MMAP_IMPL(ctx, mmap, addr, sz, prot, flags, fd, \
2658 off) \
2659 do { \
2660 return mmap_interceptor(thr, pc, REAL(mmap), addr, sz, prot, flags, fd, \
2661 off); \
2662 } while (false)
2663
2664#define COMMON_INTERCEPTOR_MUNMAP_IMPL(ctx, addr, sz) \
2665 do { \
2666 return munmap_interceptor(thr, pc, REAL(munmap), addr, sz); \
2667 } while (false)
2668
2669#if !SANITIZER_APPLE
2670#define COMMON_INTERCEPTOR_HANDLE_RECVMSG(ctx, msg) \
2671 HandleRecvmsg(((TsanInterceptorContext *)ctx)->thr, \
2672 ((TsanInterceptorContext *)ctx)->pc, msg)
2673#endif
2674
2675#define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end) \
2676 if (TsanThread *t = GetCurrentThread()) { \
2677 *begin = t->tls_begin(); \
2678 *end = t->tls_end(); \
2679 } else { \
2680 *begin = *end = 0; \
2681 }
2682
2683#define COMMON_INTERCEPTOR_USER_CALLBACK_START() \
2684 SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START()
2685
2686#define COMMON_INTERCEPTOR_USER_CALLBACK_END() \
2687 SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END()
2688
2689#include "sanitizer_common/sanitizer_common_interceptors.inc"
2690
2691static int sigaction_impl(int sig, const __sanitizer_sigaction *act,
2692 __sanitizer_sigaction *old);
2693static __sanitizer_sighandler_ptr signal_impl(int sig,
2694 __sanitizer_sighandler_ptr h);
2695
2696#define SIGNAL_INTERCEPTOR_SIGACTION_IMPL(signo, act, oldact) \
2697 { return sigaction_impl(signo, act, oldact); }
2698
2699#define SIGNAL_INTERCEPTOR_SIGNAL_IMPL(func, signo, handler) \
2700 { return (uptr)signal_impl(signo, (__sanitizer_sighandler_ptr)handler); }
2701
2702#define SIGNAL_INTERCEPTOR_ENTER() LazyInitialize(cur_thread_init())
2703
2704#include "sanitizer_common/sanitizer_signal_interceptors.inc"
2705
2706int sigaction_impl(int sig, const __sanitizer_sigaction *act,
2707 __sanitizer_sigaction *old) {
2708 // Note: if we call REAL(sigaction) directly for any reason without proxying
2709 // the signal handler through sighandler, very bad things will happen.
2710 // The handler will run synchronously and corrupt tsan per-thread state.
2711 SCOPED_INTERCEPTOR_RAW(sigaction, sig, act, old);
2712 if (sig <= 0 || sig >= kSigCount) {
2713 errno = errno_EINVAL;
2714 return -1;
2715 }
2716 __sanitizer_sigaction *sigactions = interceptor_ctx()->sigactions;
2717 __sanitizer_sigaction old_stored;
2718 if (old) internal_memcpy(dest: &old_stored, src: &sigactions[sig], n: sizeof(old_stored));
2719 __sanitizer_sigaction newact;
2720 if (act) {
2721 // Copy act into sigactions[sig].
2722 // Can't use struct copy, because compiler can emit call to memcpy.
2723 // Can't use internal_memcpy, because it copies byte-by-byte,
2724 // and signal handler reads the handler concurrently. It can read
2725 // some bytes from old value and some bytes from new value.
2726 // Use volatile to prevent insertion of memcpy.
2727 sigactions[sig].handler =
2728 *(volatile __sanitizer_sighandler_ptr const *)&act->handler;
2729 sigactions[sig].sa_flags = *(volatile int const *)&act->sa_flags;
2730 internal_memcpy(dest: &sigactions[sig].sa_mask, src: &act->sa_mask,
2731 n: sizeof(sigactions[sig].sa_mask));
2732#if !SANITIZER_FREEBSD && !SANITIZER_APPLE && !SANITIZER_NETBSD
2733 sigactions[sig].sa_restorer = act->sa_restorer;
2734#endif
2735 internal_memcpy(dest: &newact, src: act, n: sizeof(newact));
2736 internal_sigfillset(set: &newact.sa_mask);
2737 if ((act->sa_flags & SA_SIGINFO) ||
2738 ((uptr)act->handler != sig_ign && (uptr)act->handler != sig_dfl)) {
2739 newact.sa_flags |= SA_SIGINFO;
2740 newact.sigaction = sighandler;
2741 }
2742 ReleaseStore(thr, pc, addr: (uptr)&sigactions[sig]);
2743 act = &newact;
2744 }
2745 int res = REAL(sigaction)(sig, act, old);
2746 if (res == 0 && old && old->sigaction == sighandler)
2747 internal_memcpy(dest: old, src: &old_stored, n: sizeof(*old));
2748 return res;
2749}
2750
2751static __sanitizer_sighandler_ptr signal_impl(int sig,
2752 __sanitizer_sighandler_ptr h) {
2753 __sanitizer_sigaction act;
2754 act.handler = h;
2755 internal_memset(s: &act.sa_mask, c: -1, n: sizeof(act.sa_mask));
2756 act.sa_flags = 0;
2757 __sanitizer_sigaction old;
2758 int res = sigaction_symname(signum: sig, act: &act, oldact: &old);
2759 if (res) return (__sanitizer_sighandler_ptr)sig_err;
2760 return old.handler;
2761}
2762
2763#define TSAN_SYSCALL() \
2764 ThreadState *thr = cur_thread(); \
2765 if (thr->ignore_interceptors) \
2766 return; \
2767 ScopedSyscall scoped_syscall(thr)
2768
2769struct ScopedSyscall {
2770 ThreadState *thr;
2771
2772 explicit ScopedSyscall(ThreadState *thr) : thr(thr) { LazyInitialize(thr); }
2773
2774 ~ScopedSyscall() {
2775 ProcessPendingSignals(thr);
2776 }
2777};
2778
2779#if !SANITIZER_FREEBSD && !SANITIZER_APPLE
2780static void syscall_access_range(uptr pc, uptr p, uptr s, bool write) {
2781 TSAN_SYSCALL();
2782 MemoryAccessRange(thr, pc, addr: p, size: s, is_write: write);
2783}
2784
2785static USED void syscall_acquire(uptr pc, uptr addr) {
2786 TSAN_SYSCALL();
2787 Acquire(thr, pc, addr);
2788 DPrintf("syscall_acquire(0x%zx))\n", addr);
2789}
2790
2791static USED void syscall_release(uptr pc, uptr addr) {
2792 TSAN_SYSCALL();
2793 DPrintf("syscall_release(0x%zx)\n", addr);
2794 Release(thr, pc, addr);
2795}
2796
2797static void syscall_fd_close(uptr pc, int fd) {
2798 auto *thr = cur_thread();
2799 FdClose(thr, pc, fd);
2800}
2801
2802static USED void syscall_fd_acquire(uptr pc, int fd) {
2803 TSAN_SYSCALL();
2804 FdAcquire(thr, pc, fd);
2805 DPrintf("syscall_fd_acquire(%d)\n", fd);
2806}
2807
2808static USED void syscall_fd_release(uptr pc, int fd) {
2809 TSAN_SYSCALL();
2810 DPrintf("syscall_fd_release(%d)\n", fd);
2811 FdRelease(thr, pc, fd);
2812}
2813
2814static USED void sycall_blocking_start() {
2815 DPrintf("sycall_blocking_start()\n");
2816 ThreadState *thr = cur_thread();
2817 EnterBlockingFunc(thr);
2818 // When we are in a "blocking call", we process signals asynchronously
2819 // (right when they arrive). In this context we do not expect to be
2820 // executing any user/runtime code. The known interceptor sequence when
2821 // this is not true is: pthread_join -> munmap(stack). It's fine
2822 // to ignore munmap in this case -- we handle stack shadow separately.
2823 thr->ignore_interceptors++;
2824}
2825
2826static USED void sycall_blocking_end() {
2827 DPrintf("sycall_blocking_end()\n");
2828 ThreadState *thr = cur_thread();
2829 thr->ignore_interceptors--;
2830 atomic_store(a: &thr->in_blocking_func, v: 0, mo: memory_order_relaxed);
2831}
2832
2833static void syscall_pre_fork(uptr pc) { ForkBefore(thr: cur_thread(), pc); }
2834
2835static void syscall_post_fork(uptr pc, int pid) {
2836 ThreadState *thr = cur_thread();
2837 if (pid == 0) {
2838 // child
2839 ForkChildAfter(thr, pc, start_thread: true);
2840 FdOnFork(thr, pc);
2841 } else if (pid > 0) {
2842 // parent
2843 ForkParentAfter(thr, pc);
2844 } else {
2845 // error
2846 ForkParentAfter(thr, pc);
2847 }
2848}
2849#endif
2850
2851#define COMMON_SYSCALL_PRE_READ_RANGE(p, s) \
2852 syscall_access_range(GET_CALLER_PC(), (uptr)(p), (uptr)(s), false)
2853
2854#define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) \
2855 syscall_access_range(GET_CALLER_PC(), (uptr)(p), (uptr)(s), true)
2856
2857#define COMMON_SYSCALL_POST_READ_RANGE(p, s) \
2858 do { \
2859 (void)(p); \
2860 (void)(s); \
2861 } while (false)
2862
2863#define COMMON_SYSCALL_POST_WRITE_RANGE(p, s) \
2864 do { \
2865 (void)(p); \
2866 (void)(s); \
2867 } while (false)
2868
2869#define COMMON_SYSCALL_ACQUIRE(addr) \
2870 syscall_acquire(GET_CALLER_PC(), (uptr)(addr))
2871
2872#define COMMON_SYSCALL_RELEASE(addr) \
2873 syscall_release(GET_CALLER_PC(), (uptr)(addr))
2874
2875#define COMMON_SYSCALL_FD_CLOSE(fd) syscall_fd_close(GET_CALLER_PC(), fd)
2876
2877#define COMMON_SYSCALL_FD_ACQUIRE(fd) syscall_fd_acquire(GET_CALLER_PC(), fd)
2878
2879#define COMMON_SYSCALL_FD_RELEASE(fd) syscall_fd_release(GET_CALLER_PC(), fd)
2880
2881#define COMMON_SYSCALL_PRE_FORK() \
2882 syscall_pre_fork(GET_CALLER_PC())
2883
2884#define COMMON_SYSCALL_POST_FORK(res) \
2885 syscall_post_fork(GET_CALLER_PC(), res)
2886
2887#define COMMON_SYSCALL_BLOCKING_START() sycall_blocking_start()
2888#define COMMON_SYSCALL_BLOCKING_END() sycall_blocking_end()
2889
2890#include "sanitizer_common/sanitizer_common_syscalls.inc"
2891#include "sanitizer_common/sanitizer_syscalls_netbsd.inc"
2892
2893#ifdef NEED_TLS_GET_ADDR
2894
2895static void handle_tls_addr(void *arg, void *res) {
2896 ThreadState *thr = cur_thread();
2897 if (!thr)
2898 return;
2899 DTLS::DTV *dtv = DTLS_on_tls_get_addr(arg, res, static_tls_begin: thr->tls_addr,
2900 static_tls_end: thr->tls_addr + thr->tls_size);
2901 if (!dtv)
2902 return;
2903 // New DTLS block has been allocated.
2904 MemoryResetRange(thr, pc: 0, addr: dtv->beg, size: dtv->size);
2905}
2906
2907#if !SANITIZER_S390
2908// Define own interceptor instead of sanitizer_common's for three reasons:
2909// 1. It must not process pending signals.
2910// Signal handlers may contain MOVDQA instruction (see below).
2911// 2. It must be as simple as possible to not contain MOVDQA.
2912// 3. Sanitizer_common version uses COMMON_INTERCEPTOR_INITIALIZE_RANGE which
2913// is empty for tsan (meant only for msan).
2914// Note: __tls_get_addr can be called with mis-aligned stack due to:
2915// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58066
2916// So the interceptor must work with mis-aligned stack, in particular, does not
2917// execute MOVDQA with stack addresses.
2918TSAN_INTERCEPTOR(void *, __tls_get_addr, void *arg) {
2919 void *res = REAL(__tls_get_addr)(arg);
2920 handle_tls_addr(arg, res);
2921 return res;
2922}
2923#else // SANITIZER_S390
2924TSAN_INTERCEPTOR(uptr, __tls_get_addr_internal, void *arg) {
2925 uptr res = __tls_get_offset_wrapper(arg, REAL(__tls_get_offset));
2926 char *tp = static_cast<char *>(__builtin_thread_pointer());
2927 handle_tls_addr(arg, res + tp);
2928 return res;
2929}
2930#endif
2931#endif
2932
2933#if SANITIZER_NETBSD
2934TSAN_INTERCEPTOR(void, _lwp_exit) {
2935 SCOPED_TSAN_INTERCEPTOR(_lwp_exit);
2936 DestroyThreadState();
2937 REAL(_lwp_exit)();
2938}
2939#define TSAN_MAYBE_INTERCEPT__LWP_EXIT TSAN_INTERCEPT(_lwp_exit)
2940#else
2941#define TSAN_MAYBE_INTERCEPT__LWP_EXIT
2942#endif
2943
2944#if SANITIZER_FREEBSD
2945TSAN_INTERCEPTOR(void, thr_exit, ThreadID *state) {
2946 SCOPED_TSAN_INTERCEPTOR(thr_exit, state);
2947 DestroyThreadState();
2948 REAL(thr_exit(state));
2949}
2950# define TSAN_MAYBE_INTERCEPT_THR_EXIT TSAN_INTERCEPT(thr_exit)
2951#else
2952#define TSAN_MAYBE_INTERCEPT_THR_EXIT
2953#endif
2954
2955TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_init, void *c, void *a)
2956TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_destroy, void *c)
2957TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_signal, void *c)
2958TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_broadcast, void *c)
2959TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_wait, void *c, void *m)
2960TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_init, void *m, void *a)
2961TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_destroy, void *m)
2962TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_lock, void *m)
2963TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_trylock, void *m)
2964TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_unlock, void *m)
2965TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_init, void *l, void *a)
2966TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_destroy, void *l)
2967TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_rdlock, void *l)
2968TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_tryrdlock, void *l)
2969TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_wrlock, void *l)
2970TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_trywrlock, void *l)
2971TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_unlock, void *l)
2972TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, once, void *o, void (*i)())
2973TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, sigmask, int f, void *n, void *o)
2974
2975TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_init, void *c, void *a)
2976TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_signal, void *c)
2977TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_broadcast, void *c)
2978TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_wait, void *c, void *m)
2979TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_destroy, void *c)
2980TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_init, void *m, void *a)
2981TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_destroy, void *m)
2982TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_lock, void *m)
2983TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_trylock, void *m)
2984TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_unlock, void *m)
2985TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_init, void *m, void *a)
2986TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_destroy, void *m)
2987TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_rdlock, void *m)
2988TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_tryrdlock, void *m)
2989TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_wrlock, void *m)
2990TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_trywrlock, void *m)
2991TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_unlock, void *m)
2992TSAN_INTERCEPTOR_NETBSD_ALIAS_THR(int, once, void *o, void (*f)())
2993TSAN_INTERCEPTOR_NETBSD_ALIAS_THR2(int, sigsetmask, sigmask, int a, void *b,
2994 void *c)
2995
2996namespace __tsan {
2997
2998static void finalize(void *arg) {
2999 ThreadState *thr = cur_thread();
3000 int status = Finalize(thr);
3001 // Make sure the output is not lost.
3002 FlushStreams();
3003 if (status)
3004 Die();
3005}
3006
3007#if !SANITIZER_APPLE && !SANITIZER_ANDROID
3008static void unreachable() {
3009 Report(format: "FATAL: ThreadSanitizer: unreachable called\n");
3010 Die();
3011}
3012#endif
3013
3014// Define default implementation since interception of libdispatch is optional.
3015SANITIZER_WEAK_ATTRIBUTE void InitializeLibdispatchInterceptors() {}
3016
3017void InitializeInterceptors() {
3018#if !SANITIZER_APPLE
3019 // We need to setup it early, because functions like dlsym() can call it.
3020 REAL(memset) = internal_memset;
3021 REAL(memcpy) = internal_memcpy;
3022#endif
3023
3024 __interception::DoesNotSupportStaticLinking();
3025
3026 new(interceptor_ctx()) InterceptorContext();
3027
3028 // Interpose __tls_get_addr before the common interposers. This is needed
3029 // because dlsym() may call malloc on failure which could result in other
3030 // interposed functions being called that could eventually make use of TLS.
3031#ifdef NEED_TLS_GET_ADDR
3032# if !SANITIZER_S390
3033 TSAN_INTERCEPT(__tls_get_addr);
3034# else
3035 TSAN_INTERCEPT(__tls_get_addr_internal);
3036 TSAN_INTERCEPT(__tls_get_offset);
3037# endif
3038#endif
3039 InitializeCommonInterceptors();
3040 InitializeSignalInterceptors();
3041 InitializeLibdispatchInterceptors();
3042
3043#if !SANITIZER_APPLE
3044 InitializeSetjmpInterceptors();
3045#endif
3046
3047 TSAN_INTERCEPT(longjmp_symname);
3048 TSAN_INTERCEPT(siglongjmp_symname);
3049#if SANITIZER_NETBSD
3050 TSAN_INTERCEPT(_longjmp);
3051#endif
3052
3053 TSAN_INTERCEPT(malloc);
3054 TSAN_INTERCEPT(__libc_memalign);
3055 TSAN_INTERCEPT(calloc);
3056 TSAN_INTERCEPT(realloc);
3057 TSAN_INTERCEPT(reallocarray);
3058 TSAN_INTERCEPT(free);
3059 TSAN_MAYBE_INTERCEPT_FREE_SIZED;
3060 TSAN_MAYBE_INTERCEPT_FREE_ALIGNED_SIZED;
3061 TSAN_INTERCEPT(cfree);
3062 TSAN_INTERCEPT(munmap);
3063 TSAN_MAYBE_INTERCEPT_MEMALIGN;
3064 TSAN_INTERCEPT(valloc);
3065 TSAN_MAYBE_INTERCEPT_PVALLOC;
3066 TSAN_INTERCEPT(posix_memalign);
3067
3068 TSAN_INTERCEPT(strcpy);
3069 TSAN_INTERCEPT(strncpy);
3070 TSAN_INTERCEPT(strdup);
3071
3072 TSAN_INTERCEPT(pthread_create);
3073 TSAN_INTERCEPT(pthread_join);
3074 TSAN_INTERCEPT(pthread_detach);
3075 TSAN_INTERCEPT(pthread_exit);
3076 #if SANITIZER_LINUX
3077 TSAN_INTERCEPT(pthread_tryjoin_np);
3078 TSAN_INTERCEPT(pthread_timedjoin_np);
3079 #endif
3080
3081 // In glibc versions older than 2.36, dlsym(RTLD_NEXT, "pthread_cond_init")
3082 // may return an outdated symbol (max(2.2,base_version)) if the port was
3083 // introduced before 2.3.2 (when the new pthread_cond_t was introduced).
3084#if SANITIZER_GLIBC && !__GLIBC_PREREQ(2, 36) && \
3085 (defined(__x86_64__) || defined(__mips__) || SANITIZER_PPC64V1 || \
3086 defined(__s390x__))
3087 INTERCEPT_FUNCTION_VER(pthread_cond_init, "GLIBC_2.3.2");
3088 INTERCEPT_FUNCTION_VER(pthread_cond_signal, "GLIBC_2.3.2");
3089 INTERCEPT_FUNCTION_VER(pthread_cond_broadcast, "GLIBC_2.3.2");
3090 INTERCEPT_FUNCTION_VER(pthread_cond_wait, "GLIBC_2.3.2");
3091 INTERCEPT_FUNCTION_VER(pthread_cond_timedwait, "GLIBC_2.3.2");
3092 INTERCEPT_FUNCTION_VER(pthread_cond_destroy, "GLIBC_2.3.2");
3093#else
3094 INTERCEPT_FUNCTION(pthread_cond_init);
3095 INTERCEPT_FUNCTION(pthread_cond_signal);
3096 INTERCEPT_FUNCTION(pthread_cond_broadcast);
3097 INTERCEPT_FUNCTION(pthread_cond_wait);
3098 INTERCEPT_FUNCTION(pthread_cond_timedwait);
3099 INTERCEPT_FUNCTION(pthread_cond_destroy);
3100#endif
3101
3102 TSAN_MAYBE_PTHREAD_COND_CLOCKWAIT;
3103
3104 TSAN_INTERCEPT(pthread_mutex_init);
3105 TSAN_INTERCEPT(pthread_mutex_destroy);
3106 TSAN_INTERCEPT(pthread_mutex_lock);
3107 TSAN_INTERCEPT(pthread_mutex_trylock);
3108 TSAN_INTERCEPT(pthread_mutex_timedlock);
3109 TSAN_INTERCEPT(pthread_mutex_unlock);
3110#if SANITIZER_LINUX
3111 TSAN_INTERCEPT(pthread_mutex_clocklock);
3112#endif
3113#if SANITIZER_GLIBC
3114# if !__GLIBC_PREREQ(2, 34)
3115 TSAN_INTERCEPT(__pthread_mutex_lock);
3116 TSAN_INTERCEPT(__pthread_mutex_unlock);
3117# endif
3118#endif
3119
3120 TSAN_INTERCEPT(pthread_spin_init);
3121 TSAN_INTERCEPT(pthread_spin_destroy);
3122 TSAN_INTERCEPT(pthread_spin_lock);
3123 TSAN_INTERCEPT(pthread_spin_trylock);
3124 TSAN_INTERCEPT(pthread_spin_unlock);
3125
3126 TSAN_INTERCEPT(pthread_rwlock_init);
3127 TSAN_INTERCEPT(pthread_rwlock_destroy);
3128 TSAN_INTERCEPT(pthread_rwlock_rdlock);
3129 TSAN_INTERCEPT(pthread_rwlock_tryrdlock);
3130 TSAN_INTERCEPT(pthread_rwlock_timedrdlock);
3131 TSAN_INTERCEPT(pthread_rwlock_wrlock);
3132 TSAN_INTERCEPT(pthread_rwlock_trywrlock);
3133 TSAN_INTERCEPT(pthread_rwlock_timedwrlock);
3134 TSAN_INTERCEPT(pthread_rwlock_unlock);
3135
3136 TSAN_INTERCEPT(pthread_barrier_init);
3137 TSAN_INTERCEPT(pthread_barrier_destroy);
3138 TSAN_INTERCEPT(pthread_barrier_wait);
3139
3140 TSAN_INTERCEPT(pthread_once);
3141
3142 TSAN_MAYBE_INTERCEPT___FXSTAT;
3143 TSAN_MAYBE_INTERCEPT_FSTAT;
3144 TSAN_MAYBE_INTERCEPT_FSTAT64;
3145 TSAN_INTERCEPT(open);
3146 TSAN_MAYBE_INTERCEPT_OPEN64;
3147 TSAN_INTERCEPT(creat);
3148 TSAN_MAYBE_INTERCEPT_CREAT64;
3149 TSAN_INTERCEPT(dup);
3150 TSAN_INTERCEPT(dup2);
3151 TSAN_INTERCEPT(dup3);
3152 TSAN_MAYBE_INTERCEPT_EVENTFD;
3153 TSAN_MAYBE_INTERCEPT_SIGNALFD;
3154 TSAN_MAYBE_INTERCEPT_INOTIFY_INIT;
3155 TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1;
3156 TSAN_INTERCEPT(socket);
3157 TSAN_INTERCEPT(socketpair);
3158 TSAN_INTERCEPT(connect);
3159 TSAN_INTERCEPT(bind);
3160 TSAN_INTERCEPT(listen);
3161 TSAN_MAYBE_INTERCEPT_EPOLL;
3162 TSAN_INTERCEPT(close);
3163 TSAN_MAYBE_INTERCEPT___CLOSE;
3164 TSAN_MAYBE_INTERCEPT___RES_ICLOSE;
3165 TSAN_INTERCEPT(pipe);
3166 TSAN_INTERCEPT(pipe2);
3167
3168 TSAN_INTERCEPT(unlink);
3169 TSAN_INTERCEPT(tmpfile);
3170 TSAN_MAYBE_INTERCEPT_TMPFILE64;
3171 TSAN_INTERCEPT(abort);
3172 TSAN_INTERCEPT(rmdir);
3173 TSAN_INTERCEPT(closedir);
3174
3175 TSAN_INTERCEPT(sigsuspend);
3176 TSAN_INTERCEPT(sigblock);
3177 TSAN_INTERCEPT(sigsetmask);
3178 TSAN_INTERCEPT(pthread_sigmask);
3179 TSAN_INTERCEPT(raise);
3180 TSAN_INTERCEPT(kill);
3181 TSAN_INTERCEPT(pthread_kill);
3182 TSAN_INTERCEPT(sleep);
3183 TSAN_INTERCEPT(usleep);
3184 TSAN_INTERCEPT(nanosleep);
3185 TSAN_INTERCEPT(pause);
3186 TSAN_INTERCEPT(gettimeofday);
3187 TSAN_INTERCEPT(getaddrinfo);
3188
3189 TSAN_INTERCEPT(fork);
3190 TSAN_INTERCEPT(vfork);
3191#if SANITIZER_LINUX && !SANITIZER_ANDROID
3192 TSAN_INTERCEPT(clone);
3193#endif
3194#if !SANITIZER_ANDROID
3195 TSAN_INTERCEPT(dl_iterate_phdr);
3196#endif
3197
3198 // Symbolization indirectly calls dl_iterate_phdr
3199 ready_to_symbolize = true;
3200
3201 TSAN_MAYBE_INTERCEPT_ON_EXIT;
3202 TSAN_INTERCEPT(__cxa_atexit);
3203 TSAN_INTERCEPT(_exit);
3204
3205 TSAN_MAYBE_INTERCEPT__LWP_EXIT;
3206 TSAN_MAYBE_INTERCEPT_THR_EXIT;
3207
3208#if !SANITIZER_APPLE && !SANITIZER_ANDROID
3209 // Need to setup it, because interceptors check that the function is resolved.
3210 // But atexit is emitted directly into the module, so can't be resolved.
3211 REAL(atexit) = (int(*)(void(*)()))unreachable;
3212#endif
3213
3214 if (REAL(__cxa_atexit)(&finalize, 0, 0)) {
3215 Printf(format: "ThreadSanitizer: failed to setup atexit callback\n");
3216 Die();
3217 }
3218 if (pthread_atfork(prepare: atfork_prepare, parent: atfork_parent, child: atfork_child)) {
3219 Printf(format: "ThreadSanitizer: failed to setup atfork callbacks\n");
3220 Die();
3221 }
3222
3223#if !SANITIZER_APPLE && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
3224 if (pthread_key_create(key: &interceptor_ctx()->finalize_key, destructor: &thread_finalize)) {
3225 Printf(format: "ThreadSanitizer: failed to create thread key\n");
3226 Die();
3227 }
3228#endif
3229
3230 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_init);
3231 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_destroy);
3232 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_signal);
3233 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_broadcast);
3234 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_wait);
3235 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_init);
3236 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_destroy);
3237 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_lock);
3238 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_trylock);
3239 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_unlock);
3240 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_init);
3241 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_destroy);
3242 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_rdlock);
3243 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_tryrdlock);
3244 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_wrlock);
3245 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_trywrlock);
3246 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_unlock);
3247 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(once);
3248 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(sigmask);
3249
3250 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_init);
3251 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_signal);
3252 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_broadcast);
3253 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_wait);
3254 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_destroy);
3255 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_init);
3256 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_destroy);
3257 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_lock);
3258 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_trylock);
3259 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_unlock);
3260 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_init);
3261 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_destroy);
3262 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_rdlock);
3263 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_tryrdlock);
3264 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_wrlock);
3265 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_trywrlock);
3266 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_unlock);
3267 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(once);
3268 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(sigsetmask);
3269
3270 FdInit();
3271}
3272
3273} // namespace __tsan
3274
3275// Invisible barrier for tests.
3276// There were several unsuccessful iterations for this functionality:
3277// 1. Initially it was implemented in user code using
3278// REAL(pthread_barrier_wait). But pthread_barrier_wait is not supported on
3279// MacOS. Futexes are linux-specific for this matter.
3280// 2. Then we switched to atomics+usleep(10). But usleep produced parasitic
3281// "as-if synchronized via sleep" messages in reports which failed some
3282// output tests.
3283// 3. Then we switched to atomics+sched_yield. But this produced tons of tsan-
3284// visible events, which lead to "failed to restore stack trace" failures.
3285// Note that no_sanitize_thread attribute does not turn off atomic interception
3286// so attaching it to the function defined in user code does not help.
3287// That's why we now have what we have.
3288constexpr u32 kBarrierThreadBits = 10;
3289constexpr u32 kBarrierThreads = 1 << kBarrierThreadBits;
3290
3291extern "C" {
3292
3293SANITIZER_INTERFACE_ATTRIBUTE void __tsan_testonly_barrier_init(
3294 atomic_uint32_t *barrier, u32 num_threads) {
3295 if (num_threads >= kBarrierThreads) {
3296 Printf(format: "barrier_init: count is too large (%d)\n", num_threads);
3297 Die();
3298 }
3299 // kBarrierThreadBits lsb is thread count,
3300 // the remaining are count of entered threads.
3301 atomic_store(a: barrier, v: num_threads, mo: memory_order_relaxed);
3302}
3303
3304static u32 barrier_epoch(u32 value) {
3305 return (value >> kBarrierThreadBits) / (value & (kBarrierThreads - 1));
3306}
3307
3308SANITIZER_INTERFACE_ATTRIBUTE void __tsan_testonly_barrier_wait(
3309 atomic_uint32_t *barrier) {
3310 u32 old = atomic_fetch_add(a: barrier, v: kBarrierThreads, mo: memory_order_relaxed);
3311 u32 old_epoch = barrier_epoch(value: old);
3312 if (barrier_epoch(value: old + kBarrierThreads) != old_epoch) {
3313 FutexWake(p: barrier, count: (1 << 30));
3314 return;
3315 }
3316 for (;;) {
3317 u32 cur = atomic_load(a: barrier, mo: memory_order_relaxed);
3318 if (barrier_epoch(value: cur) != old_epoch)
3319 return;
3320 FutexWait(p: barrier, cmp: cur);
3321 }
3322}
3323
3324} // extern "C"
3325