1//===-- tsan_interceptors_posix.cpp ---------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of ThreadSanitizer (TSan), a race detector.
10//
11// FIXME: move as many interceptors as possible into
12// sanitizer_common/sanitizer_common_interceptors.inc
13//===----------------------------------------------------------------------===//
14
15#include <stdarg.h>
16
17#include "interception/interception.h"
18#include "sanitizer_common/sanitizer_allocator_dlsym.h"
19#include "sanitizer_common/sanitizer_atomic.h"
20#include "sanitizer_common/sanitizer_errno.h"
21#include "sanitizer_common/sanitizer_glibc_version.h"
22#include "sanitizer_common/sanitizer_internal_defs.h"
23#include "sanitizer_common/sanitizer_libc.h"
24#include "sanitizer_common/sanitizer_linux.h"
25#include "sanitizer_common/sanitizer_platform_interceptors.h"
26#include "sanitizer_common/sanitizer_platform_limits_netbsd.h"
27#include "sanitizer_common/sanitizer_platform_limits_posix.h"
28#include "sanitizer_common/sanitizer_posix.h"
29#include "sanitizer_common/sanitizer_stacktrace.h"
30#include "sanitizer_common/sanitizer_tls_get_addr.h"
31#include "sanitizer_common/sanitizer_vector.h"
32#include "tsan_fd.h"
33#include "tsan_interceptors.h"
34#include "tsan_interface.h"
35#include "tsan_mman.h"
36#include "tsan_platform.h"
37#include "tsan_rtl.h"
38#include "tsan_suppressions.h"
39
40using namespace __tsan;
41
42DECLARE_REAL(void *, memcpy, void *to, const void *from, SIZE_T size)
43DECLARE_REAL(void *, memset, void *block, int c, SIZE_T size)
44
45#if SANITIZER_FREEBSD || SANITIZER_APPLE
46#define stdout __stdoutp
47#define stderr __stderrp
48#endif
49
50#if SANITIZER_NETBSD
51#define dirfd(dirp) (*(int *)(dirp))
52#define fileno_unlocked(fp) \
53 (((__sanitizer_FILE *)fp)->_file == -1 \
54 ? -1 \
55 : (int)(unsigned short)(((__sanitizer_FILE *)fp)->_file))
56
57#define stdout ((__sanitizer_FILE*)&__sF[1])
58#define stderr ((__sanitizer_FILE*)&__sF[2])
59
60#define nanosleep __nanosleep50
61#define vfork __vfork14
62#endif
63
64#ifdef __mips__
65const int kSigCount = 129;
66#else
67const int kSigCount = 65;
68#endif
69
70#ifdef __mips__
71struct ucontext_t {
72 u64 opaque[768 / sizeof(u64) + 1];
73};
74#else
75struct ucontext_t {
76 // The size is determined by looking at sizeof of real ucontext_t on linux.
77 u64 opaque[936 / sizeof(u64) + 1];
78};
79#endif
80
81#if defined(__x86_64__) || defined(__mips__) || SANITIZER_PPC64V1 || \
82 defined(__s390x__)
83#define PTHREAD_ABI_BASE "GLIBC_2.3.2"
84#elif defined(__aarch64__) || SANITIZER_PPC64V2
85#define PTHREAD_ABI_BASE "GLIBC_2.17"
86#elif SANITIZER_LOONGARCH64
87#define PTHREAD_ABI_BASE "GLIBC_2.36"
88#elif SANITIZER_RISCV64
89# define PTHREAD_ABI_BASE "GLIBC_2.27"
90#endif
91
92extern "C" int pthread_attr_init(void *attr);
93extern "C" int pthread_attr_destroy(void *attr);
94DECLARE_REAL(int, pthread_attr_getdetachstate, void *, void *)
95extern "C" int pthread_attr_setstacksize(void *attr, uptr stacksize);
96extern "C" int pthread_atfork(void (*prepare)(void), void (*parent)(void),
97 void (*child)(void));
98extern "C" int pthread_key_create(unsigned *key, void (*destructor)(void* v));
99extern "C" int pthread_setspecific(unsigned key, const void *v);
100DECLARE_REAL(int, pthread_mutexattr_gettype, void *, void *)
101DECLARE_REAL(int, fflush, __sanitizer_FILE *fp)
102DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, usize size)
103DECLARE_REAL_AND_INTERCEPTOR(void, free, void *ptr)
104extern "C" int pthread_equal(void *t1, void *t2);
105extern "C" void *pthread_self();
106extern "C" void _exit(int status);
107#if !SANITIZER_NETBSD
108extern "C" int fileno_unlocked(void *stream);
109extern "C" int dirfd(void *dirp);
110#endif
111#if SANITIZER_NETBSD
112extern __sanitizer_FILE __sF[];
113#else
114extern __sanitizer_FILE *stdout, *stderr;
115#endif
116#if !SANITIZER_FREEBSD && !SANITIZER_APPLE && !SANITIZER_NETBSD
117const int PTHREAD_MUTEX_RECURSIVE = 1;
118const int PTHREAD_MUTEX_RECURSIVE_NP = 1;
119#else
120const int PTHREAD_MUTEX_RECURSIVE = 2;
121const int PTHREAD_MUTEX_RECURSIVE_NP = 2;
122#endif
123#if !SANITIZER_FREEBSD && !SANITIZER_APPLE && !SANITIZER_NETBSD
124const int EPOLL_CTL_ADD = 1;
125#endif
126const int SIGILL = 4;
127const int SIGTRAP = 5;
128const int SIGABRT = 6;
129const int SIGFPE = 8;
130const int SIGSEGV = 11;
131const int SIGPIPE = 13;
132const int SIGTERM = 15;
133#if defined(__mips__) || SANITIZER_FREEBSD || SANITIZER_APPLE || SANITIZER_NETBSD
134const int SIGBUS = 10;
135const int SIGSYS = 12;
136#else
137const int SIGBUS = 7;
138const int SIGSYS = 31;
139#endif
140#if SANITIZER_HAS_SIGINFO
141const int SI_TIMER = -2;
142#endif
143void *const MAP_FAILED = (void*)-1;
144#if SANITIZER_NETBSD
145const int PTHREAD_BARRIER_SERIAL_THREAD = 1234567;
146#elif !SANITIZER_APPLE
147const int PTHREAD_BARRIER_SERIAL_THREAD = -1;
148#endif
149const int MAP_FIXED = 0x10;
150typedef long long_t;
151typedef __sanitizer::u16 mode_t;
152
153// From /usr/include/unistd.h
154# define F_ULOCK 0 /* Unlock a previously locked region. */
155# define F_LOCK 1 /* Lock a region for exclusive use. */
156# define F_TLOCK 2 /* Test and lock a region for exclusive use. */
157# define F_TEST 3 /* Test a region for other processes locks. */
158
159#if SANITIZER_FREEBSD || SANITIZER_APPLE || SANITIZER_NETBSD
160const int SA_SIGINFO = 0x40;
161const int SIG_SETMASK = 3;
162#elif defined(__mips__)
163const int SA_SIGINFO = 8;
164const int SIG_SETMASK = 3;
165#else
166const int SA_SIGINFO = 4;
167const int SIG_SETMASK = 2;
168#endif
169
170namespace __tsan {
171struct SignalDesc {
172 bool armed;
173 __sanitizer_siginfo siginfo;
174 ucontext_t ctx;
175};
176
177struct ThreadSignalContext {
178 int int_signal_send;
179 SignalDesc pending_signals[kSigCount];
180 // emptyset and oldset are too big for stack.
181 __sanitizer_sigset_t emptyset;
182 __sanitizer::Vector<__sanitizer_sigset_t> oldset;
183};
184
185void EnterBlockingFunc(ThreadState *thr) {
186 for (;;) {
187 // The order is important to not delay a signal infinitely if it's
188 // delivered right before we set in_blocking_func. Note: we can't call
189 // ProcessPendingSignals when in_blocking_func is set, or we can handle
190 // a signal synchronously when we are already handling a signal.
191 atomic_store(a: &thr->in_blocking_func, v: 1, mo: memory_order_relaxed);
192 if (atomic_load(a: &thr->pending_signals, mo: memory_order_relaxed) == 0)
193 break;
194 atomic_store(a: &thr->in_blocking_func, v: 0, mo: memory_order_relaxed);
195 ProcessPendingSignals(thr);
196 }
197}
198
199// The sole reason tsan wraps atexit callbacks is to establish synchronization
200// between callback setup and callback execution.
201struct AtExitCtx {
202 void (*f)();
203 void *arg;
204 uptr pc;
205};
206
207// InterceptorContext holds all global data required for interceptors.
208// It's explicitly constructed in InitializeInterceptors with placement new
209// and is never destroyed. This allows usage of members with non-trivial
210// constructors and destructors.
211struct InterceptorContext {
212 // The object is 64-byte aligned, because we want hot data to be located
213 // in a single cache line if possible (it's accessed in every interceptor).
214 alignas(64) LibIgnore libignore;
215 __sanitizer_sigaction sigactions[kSigCount];
216#if !SANITIZER_APPLE && !SANITIZER_NETBSD
217 unsigned finalize_key;
218#endif
219
220 Mutex atexit_mu;
221 Vector<struct AtExitCtx *> AtExitStack;
222
223 InterceptorContext() : libignore(LINKER_INITIALIZED), atexit_mu(MutexTypeAtExit), AtExitStack() {}
224};
225
226alignas(64) static char interceptor_placeholder[sizeof(InterceptorContext)];
227InterceptorContext *interceptor_ctx() {
228 return reinterpret_cast<InterceptorContext*>(&interceptor_placeholder[0]);
229}
230
231LibIgnore *libignore() {
232 return &interceptor_ctx()->libignore;
233}
234
235void InitializeLibIgnore() {
236 const SuppressionContext &supp = *Suppressions();
237 const uptr n = supp.SuppressionCount();
238 for (uptr i = 0; i < n; i++) {
239 const Suppression *s = supp.SuppressionAt(i);
240 if (0 == internal_strcmp(s1: s->type, s2: kSuppressionLib))
241 libignore()->AddIgnoredLibrary(name_templ: s->templ);
242 }
243 if (flags()->ignore_noninstrumented_modules)
244 libignore()->IgnoreNoninstrumentedModules(enable: true);
245 libignore()->OnLibraryLoaded(name: 0);
246}
247
248// The following two hooks can be used by for cooperative scheduling when
249// locking.
250#ifdef TSAN_EXTERNAL_HOOKS
251void OnPotentiallyBlockingRegionBegin();
252void OnPotentiallyBlockingRegionEnd();
253#else
254SANITIZER_WEAK_CXX_DEFAULT_IMPL void OnPotentiallyBlockingRegionBegin() {}
255SANITIZER_WEAK_CXX_DEFAULT_IMPL void OnPotentiallyBlockingRegionEnd() {}
256#endif
257
258// FIXME: Use for `in_symbolizer()` as well. As-is we can't use
259// `DlSymAllocator`, because it uses the primary allocator only. Symbolizer
260// requires support of the secondary allocator for larger blocks.
261struct DlsymAlloc : public DlSymAllocator<DlsymAlloc> {
262 static bool UseImpl() { return (ctx && !ctx->initialized); }
263};
264
265} // namespace __tsan
266
267static ThreadSignalContext *SigCtx(ThreadState *thr) {
268 // This function may be called reentrantly if it is interrupted by a signal
269 // handler. Use CAS to handle the race.
270 uptr ctx = atomic_load(a: &thr->signal_ctx, mo: memory_order_relaxed);
271 if (ctx == 0 && !thr->is_dead) {
272 uptr pctx =
273 (uptr)MmapOrDie(size: sizeof(ThreadSignalContext), mem_type: "ThreadSignalContext");
274 MemoryResetRange(thr, pc: (uptr)&SigCtx, addr: pctx, size: sizeof(ThreadSignalContext));
275 if (atomic_compare_exchange_strong(a: &thr->signal_ctx, cmp: &ctx, xchg: pctx,
276 mo: memory_order_relaxed)) {
277 ctx = pctx;
278 } else {
279 UnmapOrDie(addr: (ThreadSignalContext *)pctx, size: sizeof(ThreadSignalContext));
280 }
281 }
282 return (ThreadSignalContext *)ctx;
283}
284
285ScopedInterceptor::ScopedInterceptor(ThreadState *thr, const char *fname,
286 uptr pc)
287 : thr_(thr) {
288 LazyInitialize(thr);
289 if (UNLIKELY(atomic_load(&thr->in_blocking_func, memory_order_relaxed))) {
290 // pthread_join is marked as blocking, but it's also known to call other
291 // intercepted functions (mmap, free). If we don't reset in_blocking_func
292 // we can get deadlocks and memory corruptions if we deliver a synchronous
293 // signal inside of an mmap/free interceptor.
294 // So reset it and restore it back in the destructor.
295 // See https://github.com/google/sanitizers/issues/1540
296 atomic_store(a: &thr->in_blocking_func, v: 0, mo: memory_order_relaxed);
297 in_blocking_func_ = true;
298 }
299 if (!thr_->is_inited) return;
300 if (!thr_->ignore_interceptors) FuncEntry(thr, pc);
301 DPrintf("#%d: intercept %s()\n", thr_->tid, fname);
302 ignoring_ =
303 !thr_->in_ignored_lib && (flags()->ignore_interceptors_accesses ||
304 libignore()->IsIgnored(pc, pc_in_ignored_lib: &in_ignored_lib_));
305 EnableIgnores();
306}
307
308ScopedInterceptor::~ScopedInterceptor() {
309 if (!thr_->is_inited) return;
310 DisableIgnores();
311 if (UNLIKELY(in_blocking_func_))
312 EnterBlockingFunc(thr: thr_);
313 if (!thr_->ignore_interceptors) {
314 ProcessPendingSignals(thr: thr_);
315 FuncExit(thr: thr_);
316 CheckedMutex::CheckNoLocks();
317 }
318}
319
320NOINLINE
321void ScopedInterceptor::EnableIgnoresImpl() {
322 ThreadIgnoreBegin(thr: thr_, pc: 0);
323 if (flags()->ignore_noninstrumented_modules)
324 thr_->suppress_reports++;
325 if (in_ignored_lib_) {
326 DCHECK(!thr_->in_ignored_lib);
327 thr_->in_ignored_lib = true;
328 }
329}
330
331NOINLINE
332void ScopedInterceptor::DisableIgnoresImpl() {
333 ThreadIgnoreEnd(thr: thr_);
334 if (flags()->ignore_noninstrumented_modules)
335 thr_->suppress_reports--;
336 if (in_ignored_lib_) {
337 DCHECK(thr_->in_ignored_lib);
338 thr_->in_ignored_lib = false;
339 }
340}
341
342#define TSAN_INTERCEPT(func) INTERCEPT_FUNCTION(func)
343#if SANITIZER_FREEBSD || SANITIZER_NETBSD
344# define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION(func)
345#else
346# define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION_VER(func, ver)
347#endif
348#if SANITIZER_FREEBSD
349# define TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(func) \
350 INTERCEPT_FUNCTION(_pthread_##func)
351#else
352# define TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(func)
353#endif
354#if SANITIZER_NETBSD
355# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func) \
356 INTERCEPT_FUNCTION(__libc_##func)
357# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func) \
358 INTERCEPT_FUNCTION(__libc_thr_##func)
359#else
360# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func)
361# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func)
362#endif
363
364#define READ_STRING_OF_LEN(thr, pc, s, len, n) \
365 MemoryAccessRange((thr), (pc), (uptr)(s), \
366 common_flags()->strict_string_checks ? (len) + 1 : (n), false)
367
368#define READ_STRING(thr, pc, s, n) \
369 READ_STRING_OF_LEN((thr), (pc), (s), internal_strlen(s), (n))
370
371#define BLOCK_REAL(name) (BlockingCall(thr), REAL(name))
372
373struct BlockingCall {
374 explicit BlockingCall(ThreadState *thr)
375 : thr(thr) {
376 EnterBlockingFunc(thr);
377 // When we are in a "blocking call", we process signals asynchronously
378 // (right when they arrive). In this context we do not expect to be
379 // executing any user/runtime code. The known interceptor sequence when
380 // this is not true is: pthread_join -> munmap(stack). It's fine
381 // to ignore munmap in this case -- we handle stack shadow separately.
382 thr->ignore_interceptors++;
383 }
384
385 ~BlockingCall() {
386 thr->ignore_interceptors--;
387 atomic_store(a: &thr->in_blocking_func, v: 0, mo: memory_order_relaxed);
388 }
389
390 ThreadState *thr;
391};
392
393TSAN_INTERCEPTOR(unsigned, sleep, unsigned sec) {
394 SCOPED_TSAN_INTERCEPTOR(sleep, sec);
395 unsigned res = BLOCK_REAL(sleep)(sec);
396 AfterSleep(thr, pc);
397 return res;
398}
399
400TSAN_INTERCEPTOR(int, usleep, long_t usec) {
401 SCOPED_TSAN_INTERCEPTOR(usleep, usec);
402 int res = BLOCK_REAL(usleep)(usec);
403 AfterSleep(thr, pc);
404 return res;
405}
406
407TSAN_INTERCEPTOR(int, nanosleep, void *req, void *rem) {
408 SCOPED_TSAN_INTERCEPTOR(nanosleep, req, rem);
409 int res = BLOCK_REAL(nanosleep)(req, rem);
410 AfterSleep(thr, pc);
411 return res;
412}
413
414TSAN_INTERCEPTOR(int, pause, int fake) {
415 SCOPED_TSAN_INTERCEPTOR(pause, fake);
416 return BLOCK_REAL(pause)(fake);
417}
418
419// Note: we specifically call the function in such strange way
420// with "installed_at" because in reports it will appear between
421// callback frames and the frame that installed the callback.
422static void at_exit_callback_installed_at() {
423 AtExitCtx *ctx;
424 {
425 // Ensure thread-safety.
426 Lock l(&interceptor_ctx()->atexit_mu);
427
428 // Pop AtExitCtx from the top of the stack of callback functions
429 uptr element = interceptor_ctx()->AtExitStack.Size() - 1;
430 ctx = interceptor_ctx()->AtExitStack[element];
431 interceptor_ctx()->AtExitStack.PopBack();
432 }
433
434 ThreadState *thr = cur_thread();
435 Acquire(thr, pc: ctx->pc, addr: (uptr)ctx);
436 FuncEntry(thr, pc: ctx->pc);
437 ((void(*)())ctx->f)();
438 FuncExit(thr);
439 Free(p&: ctx);
440}
441
442static void cxa_at_exit_callback_installed_at(void *arg) {
443 ThreadState *thr = cur_thread();
444 AtExitCtx *ctx = (AtExitCtx*)arg;
445 Acquire(thr, pc: ctx->pc, addr: (uptr)arg);
446 FuncEntry(thr, pc: ctx->pc);
447 ((void(*)(void *arg))ctx->f)(ctx->arg);
448 FuncExit(thr);
449 Free(p&: ctx);
450}
451
452static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
453 void *arg, void *dso);
454
455#if !SANITIZER_ANDROID
456TSAN_INTERCEPTOR(int, atexit, void (*f)()) {
457 if (in_symbolizer())
458 return 0;
459 // We want to setup the atexit callback even if we are in ignored lib
460 // or after fork.
461 SCOPED_INTERCEPTOR_RAW(atexit, f);
462 return setup_at_exit_wrapper(thr, GET_CALLER_PC(), f: (void (*)())f, arg: 0, dso: 0);
463}
464#endif
465
466TSAN_INTERCEPTOR(int, __cxa_atexit, void (*f)(void *a), void *arg, void *dso) {
467 if (in_symbolizer())
468 return 0;
469 SCOPED_TSAN_INTERCEPTOR(__cxa_atexit, f, arg, dso);
470 return setup_at_exit_wrapper(thr, GET_CALLER_PC(), f: (void (*)())f, arg, dso);
471}
472
473static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
474 void *arg, void *dso) {
475 auto *ctx = New<AtExitCtx>();
476 ctx->f = f;
477 ctx->arg = arg;
478 ctx->pc = pc;
479 Release(thr, pc, addr: (uptr)ctx);
480 // Memory allocation in __cxa_atexit will race with free during exit,
481 // because we do not see synchronization around atexit callback list.
482 ThreadIgnoreBegin(thr, pc);
483 int res;
484 if (!dso) {
485 // NetBSD does not preserve the 2nd argument if dso is equal to 0
486 // Store ctx in a local stack-like structure
487
488 // Ensure thread-safety.
489 Lock l(&interceptor_ctx()->atexit_mu);
490 // __cxa_atexit calls calloc. If we don't ignore interceptors, we will fail
491 // due to atexit_mu held on exit from the calloc interceptor.
492 ScopedIgnoreInterceptors ignore;
493
494 res = REAL(__cxa_atexit)((void (*)(void *a))at_exit_callback_installed_at,
495 0, 0);
496 // Push AtExitCtx on the top of the stack of callback functions
497 if (!res) {
498 interceptor_ctx()->AtExitStack.PushBack(v: ctx);
499 }
500 } else {
501 res = REAL(__cxa_atexit)(cxa_at_exit_callback_installed_at, ctx, dso);
502 }
503 ThreadIgnoreEnd(thr);
504 return res;
505}
506
507#if !SANITIZER_APPLE && !SANITIZER_NETBSD
508static void on_exit_callback_installed_at(int status, void *arg) {
509 ThreadState *thr = cur_thread();
510 AtExitCtx *ctx = (AtExitCtx*)arg;
511 Acquire(thr, pc: ctx->pc, addr: (uptr)arg);
512 FuncEntry(thr, pc: ctx->pc);
513 ((void(*)(int status, void *arg))ctx->f)(status, ctx->arg);
514 FuncExit(thr);
515 Free(p&: ctx);
516}
517
518TSAN_INTERCEPTOR(int, on_exit, void(*f)(int, void*), void *arg) {
519 if (in_symbolizer())
520 return 0;
521 SCOPED_TSAN_INTERCEPTOR(on_exit, f, arg);
522 auto *ctx = New<AtExitCtx>();
523 ctx->f = (void(*)())f;
524 ctx->arg = arg;
525 ctx->pc = GET_CALLER_PC();
526 Release(thr, pc, addr: (uptr)ctx);
527 // Memory allocation in __cxa_atexit will race with free during exit,
528 // because we do not see synchronization around atexit callback list.
529 ThreadIgnoreBegin(thr, pc);
530 int res = REAL(on_exit)(on_exit_callback_installed_at, ctx);
531 ThreadIgnoreEnd(thr);
532 return res;
533}
534#define TSAN_MAYBE_INTERCEPT_ON_EXIT TSAN_INTERCEPT(on_exit)
535#else
536#define TSAN_MAYBE_INTERCEPT_ON_EXIT
537#endif
538
539// Cleanup old bufs.
540static void JmpBufGarbageCollect(ThreadState *thr, uptr sp) {
541 for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) {
542 JmpBuf *buf = &thr->jmp_bufs[i];
543 if (buf->sp <= sp) {
544 uptr sz = thr->jmp_bufs.Size();
545 internal_memcpy(dest: buf, src: &thr->jmp_bufs[sz - 1], n: sizeof(*buf));
546 thr->jmp_bufs.PopBack();
547 i--;
548 }
549 }
550}
551
552static void SetJmp(ThreadState *thr, uptr sp) {
553 if (!thr->is_inited) // called from libc guts during bootstrap
554 return;
555 // Cleanup old bufs.
556 JmpBufGarbageCollect(thr, sp);
557 // Remember the buf.
558 JmpBuf *buf = thr->jmp_bufs.PushBack();
559 buf->sp = sp;
560 buf->shadow_stack_pos = thr->shadow_stack_pos;
561 ThreadSignalContext *sctx = SigCtx(thr);
562 buf->int_signal_send = sctx ? sctx->int_signal_send : 0;
563 buf->oldset_stack_size = sctx ? sctx->oldset.Size() : 0;
564 buf->in_blocking_func = atomic_load(a: &thr->in_blocking_func, mo: memory_order_relaxed);
565 buf->in_signal_handler = atomic_load(a: &thr->in_signal_handler,
566 mo: memory_order_relaxed);
567}
568
569static void LongJmp(ThreadState *thr, uptr *env) {
570 uptr sp = ExtractLongJmpSp(env);
571 // Find the saved buf with matching sp.
572 for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) {
573 JmpBuf *buf = &thr->jmp_bufs[i];
574 if (buf->sp == sp) {
575 CHECK_GE(thr->shadow_stack_pos, buf->shadow_stack_pos);
576 // Unwind the stack.
577 while (thr->shadow_stack_pos > buf->shadow_stack_pos)
578 FuncExit(thr);
579 ThreadSignalContext *sctx = SigCtx(thr);
580 if (sctx) {
581 sctx->int_signal_send = buf->int_signal_send;
582 while (sctx->oldset.Size() > buf->oldset_stack_size)
583 sctx->oldset.PopBack();
584 }
585 atomic_store(a: &thr->in_blocking_func, v: buf->in_blocking_func,
586 mo: memory_order_relaxed);
587 atomic_store(a: &thr->in_signal_handler, v: buf->in_signal_handler,
588 mo: memory_order_relaxed);
589 JmpBufGarbageCollect(thr, sp: buf->sp - 1); // do not collect buf->sp
590 return;
591 }
592 }
593 Printf(format: "ThreadSanitizer: can't find longjmp buf\n");
594 CHECK(0);
595}
596
597// FIXME: put everything below into a common extern "C" block?
598extern "C" void __tsan_setjmp(uptr sp) { SetJmp(thr: cur_thread_init(), sp); }
599
600#if SANITIZER_APPLE
601TSAN_INTERCEPTOR(int, setjmp, void *env);
602TSAN_INTERCEPTOR(int, _setjmp, void *env);
603TSAN_INTERCEPTOR(int, sigsetjmp, void *env);
604#else // SANITIZER_APPLE
605
606#if SANITIZER_NETBSD
607#define setjmp_symname __setjmp14
608#define sigsetjmp_symname __sigsetjmp14
609#else
610#define setjmp_symname setjmp
611#define sigsetjmp_symname sigsetjmp
612#endif
613
614DEFINE_REAL(int, setjmp_symname, void *env)
615DEFINE_REAL(int, _setjmp, void *env)
616DEFINE_REAL(int, sigsetjmp_symname, void *env)
617#if !SANITIZER_NETBSD
618DEFINE_REAL(int, __sigsetjmp, void *env)
619#endif
620
621// The real interceptor for setjmp is special, and implemented in pure asm. We
622// just need to initialize the REAL functions so that they can be used in asm.
623static void InitializeSetjmpInterceptors() {
624 // We can not use TSAN_INTERCEPT to get setjmp addr, because it does &setjmp and
625 // setjmp is not present in some versions of libc.
626 using __interception::InterceptFunction;
627 InterceptFunction(SANITIZER_STRINGIFY(setjmp_symname), ptr_to_real: (uptr*)&REAL(setjmp_symname), func: 0, trampoline: 0);
628 InterceptFunction(name: "_setjmp", ptr_to_real: (uptr*)&REAL(_setjmp), func: 0, trampoline: 0);
629 InterceptFunction(SANITIZER_STRINGIFY(sigsetjmp_symname), ptr_to_real: (uptr*)&REAL(sigsetjmp_symname), func: 0,
630 trampoline: 0);
631#if !SANITIZER_NETBSD
632 InterceptFunction(name: "__sigsetjmp", ptr_to_real: (uptr*)&REAL(__sigsetjmp), func: 0, trampoline: 0);
633#endif
634}
635#endif // SANITIZER_APPLE
636
637#if SANITIZER_NETBSD
638#define longjmp_symname __longjmp14
639#define siglongjmp_symname __siglongjmp14
640#else
641#define longjmp_symname longjmp
642#define siglongjmp_symname siglongjmp
643#endif
644
645TSAN_INTERCEPTOR(void, longjmp_symname, uptr *env, int val) {
646 // Note: if we call REAL(longjmp) in the context of ScopedInterceptor,
647 // bad things will happen. We will jump over ScopedInterceptor dtor and can
648 // leave thr->in_ignored_lib set.
649 {
650 SCOPED_INTERCEPTOR_RAW(longjmp_symname, env, val);
651 }
652 LongJmp(thr: cur_thread(), env);
653 REAL(longjmp_symname)(env, val);
654}
655
656TSAN_INTERCEPTOR(void, siglongjmp_symname, uptr *env, int val) {
657 {
658 SCOPED_INTERCEPTOR_RAW(siglongjmp_symname, env, val);
659 }
660 LongJmp(thr: cur_thread(), env);
661 REAL(siglongjmp_symname)(env, val);
662}
663
664#if SANITIZER_NETBSD
665TSAN_INTERCEPTOR(void, _longjmp, uptr *env, int val) {
666 {
667 SCOPED_INTERCEPTOR_RAW(_longjmp, env, val);
668 }
669 LongJmp(cur_thread(), env);
670 REAL(_longjmp)(env, val);
671}
672#endif
673
674#if !SANITIZER_APPLE
675TSAN_INTERCEPTOR(void*, malloc, uptr size) {
676 if (in_symbolizer())
677 return InternalAlloc(size);
678 if (DlsymAlloc::Use())
679 return DlsymAlloc::Allocate(size_in_bytes: size);
680 void *p = 0;
681 {
682 SCOPED_INTERCEPTOR_RAW(malloc, size);
683 p = user_alloc(thr, pc, sz: size);
684 }
685 invoke_malloc_hook(ptr: p, size);
686 return p;
687}
688
689// In glibc<2.25, dynamic TLS blocks are allocated by __libc_memalign. Intercept
690// __libc_memalign so that (1) we can detect races (2) free will not be called
691// on libc internally allocated blocks.
692TSAN_INTERCEPTOR(void*, __libc_memalign, uptr align, uptr sz) {
693 SCOPED_INTERCEPTOR_RAW(__libc_memalign, align, sz);
694 return user_memalign(thr, pc, align, sz);
695}
696
697TSAN_INTERCEPTOR(void *, calloc, uptr n, uptr size) {
698 if (in_symbolizer())
699 return InternalCalloc(count: n, size);
700 if (DlsymAlloc::Use())
701 return DlsymAlloc::Callocate(nmemb: n, size);
702 void *p = 0;
703 {
704 SCOPED_INTERCEPTOR_RAW(calloc, n, size);
705 p = user_calloc(thr, pc, sz: size, n);
706 }
707 invoke_malloc_hook(ptr: p, size: n * size);
708 return p;
709}
710
711TSAN_INTERCEPTOR(void*, realloc, void *p, uptr size) {
712 if (in_symbolizer())
713 return InternalRealloc(p, size);
714 if (DlsymAlloc::Use() || DlsymAlloc::PointerIsMine(ptr: p))
715 return DlsymAlloc::Realloc(ptr: p, new_size: size);
716 if (p)
717 invoke_free_hook(ptr: p);
718 {
719 SCOPED_INTERCEPTOR_RAW(realloc, p, size);
720 p = user_realloc(thr, pc, p, sz: size);
721 }
722 invoke_malloc_hook(ptr: p, size);
723 return p;
724}
725
726TSAN_INTERCEPTOR(void *, reallocarray, void *p, uptr n, uptr size) {
727 if (in_symbolizer())
728 return InternalReallocArray(p, count: n, size);
729 if (p)
730 invoke_free_hook(ptr: p);
731 {
732 SCOPED_INTERCEPTOR_RAW(reallocarray, p, n, size);
733 p = user_reallocarray(thr, pc, p, sz: size, n);
734 }
735 invoke_malloc_hook(ptr: p, size);
736 return p;
737}
738
739TSAN_INTERCEPTOR(void, free, void *p) {
740 if (UNLIKELY(!p))
741 return;
742 if (in_symbolizer())
743 return InternalFree(p);
744 if (DlsymAlloc::PointerIsMine(ptr: p))
745 return DlsymAlloc::Free(ptr: p);
746 invoke_free_hook(ptr: p);
747 SCOPED_INTERCEPTOR_RAW(free, p);
748 user_free(thr, pc, p);
749}
750
751# if SANITIZER_INTERCEPT_FREE_SIZED
752TSAN_INTERCEPTOR(void, free_sized, void *p, uptr size) {
753 if (UNLIKELY(!p))
754 return;
755 if (in_symbolizer())
756 return InternalFree(p);
757 if (DlsymAlloc::PointerIsMine(ptr: p))
758 return DlsymAlloc::Free(ptr: p);
759 invoke_free_hook(ptr: p);
760 SCOPED_INTERCEPTOR_RAW(free_sized, p, size);
761 user_free(thr, pc, p);
762}
763# define TSAN_MAYBE_INTERCEPT_FREE_SIZED INTERCEPT_FUNCTION(free_sized)
764# else
765# define TSAN_MAYBE_INTERCEPT_FREE_SIZED
766# endif
767
768# if SANITIZER_INTERCEPT_FREE_ALIGNED_SIZED
769TSAN_INTERCEPTOR(void, free_aligned_sized, void *p, uptr alignment, uptr size) {
770 if (UNLIKELY(!p))
771 return;
772 if (in_symbolizer())
773 return InternalFree(p);
774 if (DlsymAlloc::PointerIsMine(ptr: p))
775 return DlsymAlloc::Free(ptr: p);
776 invoke_free_hook(ptr: p);
777 SCOPED_INTERCEPTOR_RAW(free_aligned_sized, p, alignment, size);
778 user_free(thr, pc, p);
779}
780# define TSAN_MAYBE_INTERCEPT_FREE_ALIGNED_SIZED \
781 INTERCEPT_FUNCTION(free_aligned_sized)
782# else
783# define TSAN_MAYBE_INTERCEPT_FREE_ALIGNED_SIZED
784# endif
785
786TSAN_INTERCEPTOR(void, cfree, void *p) {
787 if (UNLIKELY(!p))
788 return;
789 if (in_symbolizer())
790 return InternalFree(p);
791 if (DlsymAlloc::PointerIsMine(ptr: p))
792 return DlsymAlloc::Free(ptr: p);
793 invoke_free_hook(ptr: p);
794 SCOPED_INTERCEPTOR_RAW(cfree, p);
795 user_free(thr, pc, p);
796}
797
798TSAN_INTERCEPTOR(uptr, malloc_usable_size, void *p) {
799 SCOPED_INTERCEPTOR_RAW(malloc_usable_size, p);
800 return user_alloc_usable_size(p);
801}
802#else
803# define TSAN_MAYBE_INTERCEPT_FREE_SIZED
804# define TSAN_MAYBE_INTERCEPT_FREE_ALIGNED_SIZED
805#endif
806
807TSAN_INTERCEPTOR(char *, strcpy, char *dst, const char *src) {
808 SCOPED_TSAN_INTERCEPTOR(strcpy, dst, src);
809 uptr srclen = internal_strlen(s: src);
810 MemoryAccessRange(thr, pc, addr: (uptr)dst, size: srclen + 1, is_write: true);
811 MemoryAccessRange(thr, pc, addr: (uptr)src, size: srclen + 1, is_write: false);
812 return REAL(strcpy)(dst, src);
813}
814
815TSAN_INTERCEPTOR(char*, strncpy, char *dst, char *src, usize n) {
816 SCOPED_TSAN_INTERCEPTOR(strncpy, dst, src, n);
817 uptr srclen = internal_strnlen(s: src, maxlen: n);
818 MemoryAccessRange(thr, pc, addr: (uptr)dst, size: n, is_write: true);
819 MemoryAccessRange(thr, pc, addr: (uptr)src, size: min(a: srclen + 1, b: n), is_write: false);
820 return REAL(strncpy)(dst, src, n);
821}
822
823TSAN_INTERCEPTOR(char*, strdup, const char *str) {
824 SCOPED_TSAN_INTERCEPTOR(strdup, str);
825 // strdup will call malloc, so no instrumentation is required here.
826 return REAL(strdup)(str);
827}
828
829// Zero out addr if it points into shadow memory and was provided as a hint
830// only, i.e., MAP_FIXED is not set.
831static bool fix_mmap_addr(void **addr, long_t sz, int flags) {
832 if (*addr) {
833 if (!IsAppMem(mem: (uptr)*addr) || !IsAppMem(mem: (uptr)*addr + sz - 1)) {
834 if (flags & MAP_FIXED) {
835 errno = errno_EINVAL;
836 return false;
837 } else {
838 *addr = 0;
839 }
840 }
841 }
842 return true;
843}
844
845template <class Mmap>
846static void *mmap_interceptor(ThreadState *thr, uptr pc, Mmap real_mmap,
847 void *addr, SIZE_T sz, int prot, int flags,
848 int fd, OFF64_T off) {
849 if (!fix_mmap_addr(addr: &addr, sz, flags)) return MAP_FAILED;
850 void *res = real_mmap(addr, sz, prot, flags, fd, off);
851 if (res != MAP_FAILED) {
852 if (!IsAppMem(mem: (uptr)res) || !IsAppMem(mem: (uptr)res + sz - 1)) {
853 Report(format: "ThreadSanitizer: mmap at bad address: addr=%p size=%p res=%p\n",
854 addr, (void*)sz, res);
855 Die();
856 }
857 if (fd > 0) FdAccess(thr, pc, fd);
858 MemoryRangeImitateWriteOrResetRange(thr, pc, addr: (uptr)res, size: sz);
859 }
860 return res;
861}
862
863template <class Munmap>
864static int munmap_interceptor(ThreadState *thr, uptr pc, Munmap real_munmap,
865 void *addr, SIZE_T sz) {
866 UnmapShadow(thr, addr: (uptr)addr, size: sz);
867 int res = real_munmap(addr, sz);
868 return res;
869}
870
871#if SANITIZER_LINUX
872TSAN_INTERCEPTOR(void*, memalign, uptr align, uptr sz) {
873 SCOPED_INTERCEPTOR_RAW(memalign, align, sz);
874 return user_memalign(thr, pc, align, sz);
875}
876#define TSAN_MAYBE_INTERCEPT_MEMALIGN TSAN_INTERCEPT(memalign)
877#else
878#define TSAN_MAYBE_INTERCEPT_MEMALIGN
879#endif
880
881#if !SANITIZER_APPLE
882TSAN_INTERCEPTOR(void*, aligned_alloc, uptr align, uptr sz) {
883 if (in_symbolizer())
884 return InternalAlloc(size: sz, cache: nullptr, alignment: align);
885 SCOPED_INTERCEPTOR_RAW(aligned_alloc, align, sz);
886 return user_aligned_alloc(thr, pc, align, sz);
887}
888
889TSAN_INTERCEPTOR(void*, valloc, uptr sz) {
890 if (in_symbolizer())
891 return InternalAlloc(size: sz, cache: nullptr, alignment: GetPageSizeCached());
892 SCOPED_INTERCEPTOR_RAW(valloc, sz);
893 return user_valloc(thr, pc, sz);
894}
895#endif
896
897#if SANITIZER_LINUX
898TSAN_INTERCEPTOR(void*, pvalloc, uptr sz) {
899 if (in_symbolizer()) {
900 uptr PageSize = GetPageSizeCached();
901 sz = sz ? RoundUpTo(size: sz, boundary: PageSize) : PageSize;
902 return InternalAlloc(size: sz, cache: nullptr, alignment: PageSize);
903 }
904 SCOPED_INTERCEPTOR_RAW(pvalloc, sz);
905 return user_pvalloc(thr, pc, sz);
906}
907#define TSAN_MAYBE_INTERCEPT_PVALLOC TSAN_INTERCEPT(pvalloc)
908#else
909#define TSAN_MAYBE_INTERCEPT_PVALLOC
910#endif
911
912#if !SANITIZER_APPLE
913TSAN_INTERCEPTOR(int, posix_memalign, void **memptr, uptr align, uptr sz) {
914 if (in_symbolizer()) {
915 void *p = InternalAlloc(size: sz, cache: nullptr, alignment: align);
916 if (!p)
917 return errno_ENOMEM;
918 *memptr = p;
919 return 0;
920 }
921 SCOPED_INTERCEPTOR_RAW(posix_memalign, memptr, align, sz);
922 return user_posix_memalign(thr, pc, memptr, align, sz);
923}
924#endif
925
926// Both __cxa_guard_acquire and pthread_once 0-initialize
927// the object initially. pthread_once does not have any
928// other ABI requirements. __cxa_guard_acquire assumes
929// that any non-0 value in the first byte means that
930// initialization is completed. Contents of the remaining
931// bytes are up to us.
932constexpr u32 kGuardInit = 0;
933constexpr u32 kGuardDone = 1;
934constexpr u32 kGuardRunning = 1 << 16;
935constexpr u32 kGuardWaiter = 1 << 17;
936
937static int guard_acquire(ThreadState *thr, uptr pc, atomic_uint32_t *g,
938 bool blocking_hooks = true) {
939 bool in_potentially_blocking_region = false;
940 auto on_exit = at_scope_exit(fn: [&] {
941 if (in_potentially_blocking_region)
942 OnPotentiallyBlockingRegionEnd();
943 });
944
945 for (;;) {
946 u32 cmp = atomic_load(a: g, mo: memory_order_acquire);
947 if (cmp == kGuardInit) {
948 if (atomic_compare_exchange_strong(a: g, cmp: &cmp, xchg: kGuardRunning,
949 mo: memory_order_relaxed))
950 return 1;
951 } else if (cmp == kGuardDone) {
952 if (!thr->in_ignored_lib)
953 Acquire(thr, pc, addr: (uptr)g);
954 return 0;
955 } else {
956 if ((cmp & kGuardWaiter) ||
957 atomic_compare_exchange_strong(a: g, cmp: &cmp, xchg: cmp | kGuardWaiter,
958 mo: memory_order_relaxed)) {
959 if (blocking_hooks && !in_potentially_blocking_region) {
960 in_potentially_blocking_region = true;
961 OnPotentiallyBlockingRegionBegin();
962 }
963 FutexWait(p: g, cmp: cmp | kGuardWaiter);
964 }
965 }
966 }
967}
968
969static void guard_release(ThreadState *thr, uptr pc, atomic_uint32_t *g,
970 u32 v) {
971 if (!thr->in_ignored_lib)
972 Release(thr, pc, addr: (uptr)g);
973 u32 old = atomic_exchange(a: g, v, mo: memory_order_release);
974 if (old & kGuardWaiter)
975 FutexWake(p: g, count: 1 << 30);
976}
977
978// __cxa_guard_acquire and friends need to be intercepted in a special way -
979// regular interceptors will break statically-linked libstdc++. Linux
980// interceptors are especially defined as weak functions (so that they don't
981// cause link errors when user defines them as well). So they silently
982// auto-disable themselves when such symbol is already present in the binary. If
983// we link libstdc++ statically, it will bring own __cxa_guard_acquire which
984// will silently replace our interceptor. That's why on Linux we simply export
985// these interceptors with INTERFACE_ATTRIBUTE.
986// On OS X, we don't support statically linking, so we just use a regular
987// interceptor.
988#if SANITIZER_APPLE
989#define STDCXX_INTERCEPTOR TSAN_INTERCEPTOR
990#else
991#define STDCXX_INTERCEPTOR(rettype, name, ...) \
992 extern "C" rettype INTERFACE_ATTRIBUTE name(__VA_ARGS__)
993#endif
994
995// Used in thread-safe function static initialization.
996STDCXX_INTERCEPTOR(int, __cxa_guard_acquire, atomic_uint32_t *g) {
997 SCOPED_INTERCEPTOR_RAW(__cxa_guard_acquire, g);
998 return guard_acquire(thr, pc, g);
999}
1000
1001STDCXX_INTERCEPTOR(void, __cxa_guard_release, atomic_uint32_t *g) {
1002 SCOPED_INTERCEPTOR_RAW(__cxa_guard_release, g);
1003 guard_release(thr, pc, g, v: kGuardDone);
1004}
1005
1006STDCXX_INTERCEPTOR(void, __cxa_guard_abort, atomic_uint32_t *g) {
1007 SCOPED_INTERCEPTOR_RAW(__cxa_guard_abort, g);
1008 guard_release(thr, pc, g, v: kGuardInit);
1009}
1010
1011namespace __tsan {
1012void DestroyThreadState() {
1013 ThreadState *thr = cur_thread();
1014 Processor *proc = thr->proc();
1015 ThreadFinish(thr);
1016 ProcUnwire(proc, thr);
1017 ProcDestroy(proc);
1018 DTLS_Destroy();
1019 cur_thread_finalize();
1020}
1021
1022void PlatformCleanUpThreadState(ThreadState *thr) {
1023 ThreadSignalContext *sctx = (ThreadSignalContext *)atomic_load(
1024 a: &thr->signal_ctx, mo: memory_order_relaxed);
1025 if (sctx) {
1026 atomic_store(a: &thr->signal_ctx, v: 0, mo: memory_order_relaxed);
1027 sctx->oldset.Reset();
1028 UnmapOrDie(addr: sctx, size: sizeof(*sctx));
1029 }
1030}
1031} // namespace __tsan
1032
1033#if !SANITIZER_APPLE && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
1034static void thread_finalize(void *v) {
1035 uptr iter = (uptr)v;
1036 if (iter > 1) {
1037 if (pthread_setspecific(key: interceptor_ctx()->finalize_key,
1038 v: (void*)(iter - 1))) {
1039 Printf(format: "ThreadSanitizer: failed to set thread key\n");
1040 Die();
1041 }
1042 return;
1043 }
1044 DestroyThreadState();
1045}
1046#endif
1047
1048
1049struct ThreadParam {
1050 void* (*callback)(void *arg);
1051 void *param;
1052 Tid tid;
1053 Semaphore created;
1054 Semaphore started;
1055};
1056
1057extern "C" void *__tsan_thread_start_func(void *arg) {
1058 ThreadParam *p = (ThreadParam*)arg;
1059 void* (*callback)(void *arg) = p->callback;
1060 void *param = p->param;
1061 {
1062 ThreadState *thr = cur_thread_init();
1063 // Thread-local state is not initialized yet.
1064 ScopedIgnoreInterceptors ignore;
1065#if !SANITIZER_APPLE && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
1066 ThreadIgnoreBegin(thr, pc: 0);
1067 if (pthread_setspecific(key: interceptor_ctx()->finalize_key,
1068 v: (void *)GetPthreadDestructorIterations())) {
1069 Printf(format: "ThreadSanitizer: failed to set thread key\n");
1070 Die();
1071 }
1072 ThreadIgnoreEnd(thr);
1073#endif
1074 p->created.Wait();
1075 Processor *proc = ProcCreate();
1076 ProcWire(proc, thr);
1077 ThreadStart(thr, tid: p->tid, os_id: GetTid(), thread_type: ThreadType::Regular);
1078 p->started.Post();
1079 }
1080 void *res = callback(param);
1081 // Prevent the callback from being tail called,
1082 // it mixes up stack traces.
1083 volatile int foo = 42;
1084 foo++;
1085 return res;
1086}
1087
1088TSAN_INTERCEPTOR(int, pthread_create,
1089 void *th, void *attr, void *(*callback)(void*), void * param) {
1090 SCOPED_INTERCEPTOR_RAW(pthread_create, th, attr, callback, param);
1091
1092 MaybeSpawnBackgroundThread();
1093
1094 if (ctx->after_multithreaded_fork) {
1095 if (flags()->die_after_fork) {
1096 Report(format: "ThreadSanitizer: starting new threads after multi-threaded "
1097 "fork is not supported. Dying (set die_after_fork=0 to override)\n");
1098 Die();
1099 } else {
1100 VPrintf(1,
1101 "ThreadSanitizer: starting new threads after multi-threaded "
1102 "fork is not supported (pid %lu). Continuing because of "
1103 "die_after_fork=0, but you are on your own\n",
1104 internal_getpid());
1105 }
1106 }
1107 __sanitizer_pthread_attr_t myattr;
1108 if (attr == 0) {
1109 pthread_attr_init(attr: &myattr);
1110 attr = &myattr;
1111 }
1112 int detached = 0;
1113 REAL(pthread_attr_getdetachstate)(attr, &detached);
1114 AdjustStackSize(attr);
1115
1116 ThreadParam p;
1117 p.callback = callback;
1118 p.param = param;
1119 p.tid = kMainTid;
1120 int res = -1;
1121 {
1122 // Otherwise we see false positives in pthread stack manipulation.
1123 ScopedIgnoreInterceptors ignore;
1124 ThreadIgnoreBegin(thr, pc);
1125 res = REAL(pthread_create)(th, attr, __tsan_thread_start_func, &p);
1126 ThreadIgnoreEnd(thr);
1127 }
1128 if (res == 0) {
1129 p.tid = ThreadCreate(thr, pc, uid: *(uptr *)th, detached: IsStateDetached(state: detached));
1130 CHECK_NE(p.tid, kMainTid);
1131 // Synchronization on p.tid serves two purposes:
1132 // 1. ThreadCreate must finish before the new thread starts.
1133 // Otherwise the new thread can call pthread_detach, but the pthread_t
1134 // identifier is not yet registered in ThreadRegistry by ThreadCreate.
1135 // 2. ThreadStart must finish before this thread continues.
1136 // Otherwise, this thread can call pthread_detach and reset thr->sync
1137 // before the new thread got a chance to acquire from it in ThreadStart.
1138 p.created.Post();
1139 p.started.Wait();
1140 }
1141 if (attr == &myattr)
1142 pthread_attr_destroy(attr: &myattr);
1143 return res;
1144}
1145
1146TSAN_INTERCEPTOR(int, pthread_join, void *th, void **ret) {
1147 SCOPED_INTERCEPTOR_RAW(pthread_join, th, ret);
1148 Tid tid = ThreadConsumeTid(thr, pc, uid: (uptr)th);
1149 ThreadIgnoreBegin(thr, pc);
1150 int res = BLOCK_REAL(pthread_join)(th, ret);
1151 ThreadIgnoreEnd(thr);
1152 if (res == 0) {
1153 ThreadJoin(thr, pc, tid);
1154 }
1155 return res;
1156}
1157
1158// DEFINE_INTERNAL_PTHREAD_FUNCTIONS
1159namespace __sanitizer {
1160int internal_pthread_create(void *th, void *attr, void *(*callback)(void *),
1161 void *param) {
1162 ScopedIgnoreInterceptors ignore;
1163 return REAL(pthread_create)(th, attr, callback, param);
1164}
1165int internal_pthread_join(void *th, void **ret) {
1166 ScopedIgnoreInterceptors ignore;
1167 return REAL(pthread_join)(th, ret);
1168}
1169} // namespace __sanitizer
1170
1171TSAN_INTERCEPTOR(int, pthread_detach, void *th) {
1172 SCOPED_INTERCEPTOR_RAW(pthread_detach, th);
1173 Tid tid = ThreadConsumeTid(thr, pc, uid: (uptr)th);
1174 int res = REAL(pthread_detach)(th);
1175 if (res == 0) {
1176 ThreadDetach(thr, pc, tid);
1177 }
1178 return res;
1179}
1180
1181TSAN_INTERCEPTOR(void, pthread_exit, void *retval) {
1182 {
1183 SCOPED_INTERCEPTOR_RAW(pthread_exit, retval);
1184#if !SANITIZER_APPLE && !SANITIZER_ANDROID
1185 CHECK_EQ(thr, &cur_thread_placeholder);
1186#endif
1187 }
1188 REAL(pthread_exit)(retval);
1189}
1190
1191#if SANITIZER_LINUX
1192TSAN_INTERCEPTOR(int, pthread_tryjoin_np, void *th, void **ret) {
1193 SCOPED_INTERCEPTOR_RAW(pthread_tryjoin_np, th, ret);
1194 Tid tid = ThreadConsumeTid(thr, pc, uid: (uptr)th);
1195 ThreadIgnoreBegin(thr, pc);
1196 int res = REAL(pthread_tryjoin_np)(th, ret);
1197 ThreadIgnoreEnd(thr);
1198 if (res == 0)
1199 ThreadJoin(thr, pc, tid);
1200 else
1201 ThreadNotJoined(thr, pc, tid, uid: (uptr)th);
1202 return res;
1203}
1204
1205TSAN_INTERCEPTOR(int, pthread_timedjoin_np, void *th, void **ret,
1206 const struct timespec *abstime) {
1207 SCOPED_INTERCEPTOR_RAW(pthread_timedjoin_np, th, ret, abstime);
1208 Tid tid = ThreadConsumeTid(thr, pc, uid: (uptr)th);
1209 ThreadIgnoreBegin(thr, pc);
1210 int res = BLOCK_REAL(pthread_timedjoin_np)(th, ret, abstime);
1211 ThreadIgnoreEnd(thr);
1212 if (res == 0)
1213 ThreadJoin(thr, pc, tid);
1214 else
1215 ThreadNotJoined(thr, pc, tid, uid: (uptr)th);
1216 return res;
1217}
1218#endif
1219
1220// Problem:
1221// NPTL implementation of pthread_cond has 2 versions (2.2.5 and 2.3.2).
1222// pthread_cond_t has different size in the different versions.
1223// If call new REAL functions for old pthread_cond_t, they will corrupt memory
1224// after pthread_cond_t (old cond is smaller).
1225// If we call old REAL functions for new pthread_cond_t, we will lose some
1226// functionality (e.g. old functions do not support waiting against
1227// CLOCK_REALTIME).
1228// Proper handling would require to have 2 versions of interceptors as well.
1229// But this is messy, in particular requires linker scripts when sanitizer
1230// runtime is linked into a shared library.
1231// Instead we assume we don't have dynamic libraries built against old
1232// pthread (2.2.5 is dated by 2002). And provide legacy_pthread_cond flag
1233// that allows to work with old libraries (but this mode does not support
1234// some features, e.g. pthread_condattr_getpshared).
1235static void *init_cond(void *c, bool force = false) {
1236 // sizeof(pthread_cond_t) >= sizeof(uptr) in both versions.
1237 // So we allocate additional memory on the side large enough to hold
1238 // any pthread_cond_t object. Always call new REAL functions, but pass
1239 // the aux object to them.
1240 // Note: the code assumes that PTHREAD_COND_INITIALIZER initializes
1241 // first word of pthread_cond_t to zero.
1242 // It's all relevant only for linux.
1243 if (!common_flags()->legacy_pthread_cond)
1244 return c;
1245 atomic_uintptr_t *p = (atomic_uintptr_t*)c;
1246 uptr cond = atomic_load(a: p, mo: memory_order_acquire);
1247 if (!force && cond != 0)
1248 return (void*)cond;
1249 void *newcond = WRAP(malloc)(size: pthread_cond_t_sz);
1250 internal_memset(s: newcond, c: 0, n: pthread_cond_t_sz);
1251 if (atomic_compare_exchange_strong(a: p, cmp: &cond, xchg: (uptr)newcond,
1252 mo: memory_order_acq_rel))
1253 return newcond;
1254 WRAP(free)(p: newcond);
1255 return (void*)cond;
1256}
1257
1258namespace {
1259
1260template <class Fn>
1261struct CondMutexUnlockCtx {
1262 ScopedInterceptor *si;
1263 ThreadState *thr;
1264 uptr pc;
1265 void *m;
1266 void *c;
1267 const Fn &fn;
1268
1269 int Cancel() const { return fn(); }
1270 void Unlock() const;
1271};
1272
1273template <class Fn>
1274void CondMutexUnlockCtx<Fn>::Unlock() const {
1275 // pthread_cond_wait interceptor has enabled async signal delivery
1276 // (see BlockingCall below). Disable async signals since we are running
1277 // tsan code. Also ScopedInterceptor and BlockingCall destructors won't run
1278 // since the thread is cancelled, so we have to manually execute them
1279 // (the thread still can run some user code due to pthread_cleanup_push).
1280 CHECK_EQ(atomic_load(&thr->in_blocking_func, memory_order_relaxed), 1);
1281 atomic_store(a: &thr->in_blocking_func, v: 0, mo: memory_order_relaxed);
1282 MutexPostLock(thr, pc, addr: (uptr)m, flagz: MutexFlagDoPreLockOnPostLock);
1283 // Undo BlockingCall ctor effects.
1284 thr->ignore_interceptors--;
1285 si->~ScopedInterceptor();
1286}
1287} // namespace
1288
1289INTERCEPTOR(int, pthread_cond_init, void *c, void *a) {
1290 void *cond = init_cond(c, force: true);
1291 SCOPED_TSAN_INTERCEPTOR(pthread_cond_init, cond, a);
1292 MemoryAccessRange(thr, pc, addr: (uptr)c, size: sizeof(uptr), is_write: true);
1293 return REAL(pthread_cond_init)(cond, a);
1294}
1295
1296template <class Fn>
1297int cond_wait(ThreadState *thr, uptr pc, ScopedInterceptor *si, const Fn &fn,
1298 void *c, void *m) {
1299 MemoryAccessRange(thr, pc, addr: (uptr)c, size: sizeof(uptr), is_write: false);
1300 MutexUnlock(thr, pc, addr: (uptr)m);
1301 int res = 0;
1302 // This ensures that we handle mutex lock even in case of pthread_cancel.
1303 // See test/tsan/cond_cancel.cpp.
1304 {
1305 // Enable signal delivery while the thread is blocked.
1306 BlockingCall bc(thr);
1307 CondMutexUnlockCtx<Fn> arg = {si, thr, pc, m, c, fn};
1308 res = call_pthread_cancel_with_cleanup(
1309 [](void *arg) -> int {
1310 return ((const CondMutexUnlockCtx<Fn> *)arg)->Cancel();
1311 },
1312 [](void *arg) { ((const CondMutexUnlockCtx<Fn> *)arg)->Unlock(); },
1313 &arg);
1314 }
1315 if (res == errno_EOWNERDEAD) MutexRepair(thr, pc, addr: (uptr)m);
1316 MutexPostLock(thr, pc, addr: (uptr)m, flagz: MutexFlagDoPreLockOnPostLock);
1317 return res;
1318}
1319
1320INTERCEPTOR(int, pthread_cond_wait, void *c, void *m) {
1321 void *cond = init_cond(c);
1322 SCOPED_TSAN_INTERCEPTOR(pthread_cond_wait, cond, m);
1323 return cond_wait(
1324 thr, pc, si: &si, fn: [=]() { return REAL(pthread_cond_wait)(cond, m); }, c: cond,
1325 m);
1326}
1327
1328INTERCEPTOR(int, pthread_cond_timedwait, void *c, void *m, void *abstime) {
1329 void *cond = init_cond(c);
1330 SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait, cond, m, abstime);
1331 return cond_wait(
1332 thr, pc, si: &si,
1333 fn: [=]() { return REAL(pthread_cond_timedwait)(cond, m, abstime); }, c: cond,
1334 m);
1335}
1336
1337#if SANITIZER_LINUX
1338INTERCEPTOR(int, pthread_cond_clockwait, void *c, void *m,
1339 __sanitizer_clockid_t clock, void *abstime) {
1340 void *cond = init_cond(c);
1341 SCOPED_TSAN_INTERCEPTOR(pthread_cond_clockwait, cond, m, clock, abstime);
1342 return cond_wait(
1343 thr, pc, si: &si,
1344 fn: [=]() { return REAL(pthread_cond_clockwait)(cond, m, clock, abstime); },
1345 c: cond, m);
1346}
1347#define TSAN_MAYBE_PTHREAD_COND_CLOCKWAIT TSAN_INTERCEPT(pthread_cond_clockwait)
1348#else
1349#define TSAN_MAYBE_PTHREAD_COND_CLOCKWAIT
1350#endif
1351
1352#if SANITIZER_APPLE
1353INTERCEPTOR(int, pthread_cond_timedwait_relative_np, void *c, void *m,
1354 void *reltime) {
1355 void *cond = init_cond(c);
1356 SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait_relative_np, cond, m, reltime);
1357 return cond_wait(
1358 thr, pc, &si,
1359 [=]() {
1360 return REAL(pthread_cond_timedwait_relative_np)(cond, m, reltime);
1361 },
1362 cond, m);
1363}
1364#endif
1365
1366INTERCEPTOR(int, pthread_cond_signal, void *c) {
1367 void *cond = init_cond(c);
1368 SCOPED_TSAN_INTERCEPTOR(pthread_cond_signal, cond);
1369 MemoryAccessRange(thr, pc, addr: (uptr)c, size: sizeof(uptr), is_write: false);
1370 return REAL(pthread_cond_signal)(cond);
1371}
1372
1373INTERCEPTOR(int, pthread_cond_broadcast, void *c) {
1374 void *cond = init_cond(c);
1375 SCOPED_TSAN_INTERCEPTOR(pthread_cond_broadcast, cond);
1376 MemoryAccessRange(thr, pc, addr: (uptr)c, size: sizeof(uptr), is_write: false);
1377 return REAL(pthread_cond_broadcast)(cond);
1378}
1379
1380INTERCEPTOR(int, pthread_cond_destroy, void *c) {
1381 void *cond = init_cond(c);
1382 SCOPED_TSAN_INTERCEPTOR(pthread_cond_destroy, cond);
1383 MemoryAccessRange(thr, pc, addr: (uptr)c, size: sizeof(uptr), is_write: true);
1384 int res = REAL(pthread_cond_destroy)(cond);
1385 if (common_flags()->legacy_pthread_cond) {
1386 // Free our aux cond and zero the pointer to not leave dangling pointers.
1387 WRAP(free)(p: cond);
1388 atomic_store(a: (atomic_uintptr_t*)c, v: 0, mo: memory_order_relaxed);
1389 }
1390 return res;
1391}
1392
1393TSAN_INTERCEPTOR(int, pthread_mutex_init, void *m, void *a) {
1394 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_init, m, a);
1395 int res = REAL(pthread_mutex_init)(m, a);
1396 if (res == 0) {
1397 u32 flagz = 0;
1398 if (a) {
1399 int type = 0;
1400 if (REAL(pthread_mutexattr_gettype)(a, &type) == 0)
1401 if (type == PTHREAD_MUTEX_RECURSIVE ||
1402 type == PTHREAD_MUTEX_RECURSIVE_NP)
1403 flagz |= MutexFlagWriteReentrant;
1404 }
1405 MutexCreate(thr, pc, addr: (uptr)m, flagz);
1406 }
1407 return res;
1408}
1409
1410TSAN_INTERCEPTOR(int, pthread_mutex_destroy, void *m) {
1411 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_destroy, m);
1412 int res = REAL(pthread_mutex_destroy)(m);
1413 if (res == 0 || res == errno_EBUSY) {
1414 MutexDestroy(thr, pc, addr: (uptr)m);
1415 }
1416 return res;
1417}
1418
1419TSAN_INTERCEPTOR(int, pthread_mutex_lock, void *m) {
1420 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_lock, m);
1421 MutexPreLock(thr, pc, addr: (uptr)m);
1422 int res = BLOCK_REAL(pthread_mutex_lock)(m);
1423 if (res == errno_EOWNERDEAD)
1424 MutexRepair(thr, pc, addr: (uptr)m);
1425 if (res == 0 || res == errno_EOWNERDEAD)
1426 MutexPostLock(thr, pc, addr: (uptr)m);
1427 if (res == errno_EINVAL)
1428 MutexInvalidAccess(thr, pc, addr: (uptr)m);
1429 return res;
1430}
1431
1432TSAN_INTERCEPTOR(int, pthread_mutex_trylock, void *m) {
1433 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_trylock, m);
1434 int res = REAL(pthread_mutex_trylock)(m);
1435 if (res == errno_EOWNERDEAD)
1436 MutexRepair(thr, pc, addr: (uptr)m);
1437 if (res == 0 || res == errno_EOWNERDEAD)
1438 MutexPostLock(thr, pc, addr: (uptr)m, flagz: MutexFlagTryLock);
1439 return res;
1440}
1441
1442#if !SANITIZER_APPLE
1443TSAN_INTERCEPTOR(int, pthread_mutex_timedlock, void *m, void *abstime) {
1444 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_timedlock, m, abstime);
1445 int res = REAL(pthread_mutex_timedlock)(m, abstime);
1446 if (res == 0) {
1447 MutexPostLock(thr, pc, addr: (uptr)m, flagz: MutexFlagTryLock);
1448 }
1449 return res;
1450}
1451#endif
1452
1453TSAN_INTERCEPTOR(int, pthread_mutex_unlock, void *m) {
1454 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_unlock, m);
1455 MutexUnlock(thr, pc, addr: (uptr)m);
1456 int res = REAL(pthread_mutex_unlock)(m);
1457 if (res == errno_EINVAL)
1458 MutexInvalidAccess(thr, pc, addr: (uptr)m);
1459 return res;
1460}
1461
1462#if SANITIZER_LINUX
1463TSAN_INTERCEPTOR(int, pthread_mutex_clocklock, void *m,
1464 __sanitizer_clockid_t clock, void *abstime) {
1465 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_clocklock, m, clock, abstime);
1466 MutexPreLock(thr, pc, addr: (uptr)m);
1467 int res = BLOCK_REAL(pthread_mutex_clocklock)(m, clock, abstime);
1468 if (res == errno_EOWNERDEAD)
1469 MutexRepair(thr, pc, addr: (uptr)m);
1470 if (res == 0 || res == errno_EOWNERDEAD)
1471 MutexPostLock(thr, pc, addr: (uptr)m);
1472 if (res == errno_EINVAL)
1473 MutexInvalidAccess(thr, pc, addr: (uptr)m);
1474 return res;
1475}
1476#endif
1477
1478#if SANITIZER_GLIBC
1479# if !__GLIBC_PREREQ(2, 34)
1480// glibc 2.34 applies a non-default version for the two functions. They are no
1481// longer expected to be intercepted by programs.
1482TSAN_INTERCEPTOR(int, __pthread_mutex_lock, void *m) {
1483 SCOPED_TSAN_INTERCEPTOR(__pthread_mutex_lock, m);
1484 MutexPreLock(thr, pc, (uptr)m);
1485 int res = BLOCK_REAL(__pthread_mutex_lock)(m);
1486 if (res == errno_EOWNERDEAD)
1487 MutexRepair(thr, pc, (uptr)m);
1488 if (res == 0 || res == errno_EOWNERDEAD)
1489 MutexPostLock(thr, pc, (uptr)m);
1490 if (res == errno_EINVAL)
1491 MutexInvalidAccess(thr, pc, (uptr)m);
1492 return res;
1493}
1494
1495TSAN_INTERCEPTOR(int, __pthread_mutex_unlock, void *m) {
1496 SCOPED_TSAN_INTERCEPTOR(__pthread_mutex_unlock, m);
1497 MutexUnlock(thr, pc, (uptr)m);
1498 int res = REAL(__pthread_mutex_unlock)(m);
1499 if (res == errno_EINVAL)
1500 MutexInvalidAccess(thr, pc, (uptr)m);
1501 return res;
1502}
1503# endif
1504#endif
1505
1506#if !SANITIZER_APPLE
1507TSAN_INTERCEPTOR(int, pthread_spin_init, void *m, int pshared) {
1508 SCOPED_TSAN_INTERCEPTOR(pthread_spin_init, m, pshared);
1509 int res = REAL(pthread_spin_init)(m, pshared);
1510 if (res == 0) {
1511 MutexCreate(thr, pc, addr: (uptr)m);
1512 }
1513 return res;
1514}
1515
1516TSAN_INTERCEPTOR(int, pthread_spin_destroy, void *m) {
1517 SCOPED_TSAN_INTERCEPTOR(pthread_spin_destroy, m);
1518 int res = REAL(pthread_spin_destroy)(m);
1519 if (res == 0) {
1520 MutexDestroy(thr, pc, addr: (uptr)m);
1521 }
1522 return res;
1523}
1524
1525TSAN_INTERCEPTOR(int, pthread_spin_lock, void *m) {
1526 SCOPED_TSAN_INTERCEPTOR(pthread_spin_lock, m);
1527 MutexPreLock(thr, pc, addr: (uptr)m);
1528 int res = BLOCK_REAL(pthread_spin_lock)(m);
1529 if (res == 0) {
1530 MutexPostLock(thr, pc, addr: (uptr)m);
1531 }
1532 return res;
1533}
1534
1535TSAN_INTERCEPTOR(int, pthread_spin_trylock, void *m) {
1536 SCOPED_TSAN_INTERCEPTOR(pthread_spin_trylock, m);
1537 int res = REAL(pthread_spin_trylock)(m);
1538 if (res == 0) {
1539 MutexPostLock(thr, pc, addr: (uptr)m, flagz: MutexFlagTryLock);
1540 }
1541 return res;
1542}
1543
1544TSAN_INTERCEPTOR(int, pthread_spin_unlock, void *m) {
1545 SCOPED_TSAN_INTERCEPTOR(pthread_spin_unlock, m);
1546 MutexUnlock(thr, pc, addr: (uptr)m);
1547 int res = REAL(pthread_spin_unlock)(m);
1548 return res;
1549}
1550#endif
1551
1552TSAN_INTERCEPTOR(int, pthread_rwlock_init, void *m, void *a) {
1553 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_init, m, a);
1554 int res = REAL(pthread_rwlock_init)(m, a);
1555 if (res == 0) {
1556 MutexCreate(thr, pc, addr: (uptr)m);
1557 }
1558 return res;
1559}
1560
1561TSAN_INTERCEPTOR(int, pthread_rwlock_destroy, void *m) {
1562 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_destroy, m);
1563 int res = REAL(pthread_rwlock_destroy)(m);
1564 if (res == 0) {
1565 MutexDestroy(thr, pc, addr: (uptr)m);
1566 }
1567 return res;
1568}
1569
1570TSAN_INTERCEPTOR(int, pthread_rwlock_rdlock, void *m) {
1571 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_rdlock, m);
1572 MutexPreReadLock(thr, pc, addr: (uptr)m);
1573 int res = REAL(pthread_rwlock_rdlock)(m);
1574 if (res == 0) {
1575 MutexPostReadLock(thr, pc, addr: (uptr)m);
1576 }
1577 return res;
1578}
1579
1580TSAN_INTERCEPTOR(int, pthread_rwlock_tryrdlock, void *m) {
1581 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_tryrdlock, m);
1582 int res = REAL(pthread_rwlock_tryrdlock)(m);
1583 if (res == 0) {
1584 MutexPostReadLock(thr, pc, addr: (uptr)m, flagz: MutexFlagTryLock);
1585 }
1586 return res;
1587}
1588
1589#if !SANITIZER_APPLE
1590TSAN_INTERCEPTOR(int, pthread_rwlock_timedrdlock, void *m, void *abstime) {
1591 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedrdlock, m, abstime);
1592 int res = REAL(pthread_rwlock_timedrdlock)(m, abstime);
1593 if (res == 0) {
1594 MutexPostReadLock(thr, pc, addr: (uptr)m);
1595 }
1596 return res;
1597}
1598#endif
1599
1600TSAN_INTERCEPTOR(int, pthread_rwlock_wrlock, void *m) {
1601 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_wrlock, m);
1602 MutexPreLock(thr, pc, addr: (uptr)m);
1603 int res = BLOCK_REAL(pthread_rwlock_wrlock)(m);
1604 if (res == 0) {
1605 MutexPostLock(thr, pc, addr: (uptr)m);
1606 }
1607 return res;
1608}
1609
1610TSAN_INTERCEPTOR(int, pthread_rwlock_trywrlock, void *m) {
1611 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_trywrlock, m);
1612 int res = REAL(pthread_rwlock_trywrlock)(m);
1613 if (res == 0) {
1614 MutexPostLock(thr, pc, addr: (uptr)m, flagz: MutexFlagTryLock);
1615 }
1616 return res;
1617}
1618
1619#if !SANITIZER_APPLE
1620TSAN_INTERCEPTOR(int, pthread_rwlock_timedwrlock, void *m, void *abstime) {
1621 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedwrlock, m, abstime);
1622 int res = REAL(pthread_rwlock_timedwrlock)(m, abstime);
1623 if (res == 0) {
1624 MutexPostLock(thr, pc, addr: (uptr)m, flagz: MutexFlagTryLock);
1625 }
1626 return res;
1627}
1628#endif
1629
1630TSAN_INTERCEPTOR(int, pthread_rwlock_unlock, void *m) {
1631 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_unlock, m);
1632 MutexReadOrWriteUnlock(thr, pc, addr: (uptr)m);
1633 int res = REAL(pthread_rwlock_unlock)(m);
1634 return res;
1635}
1636
1637#if !SANITIZER_APPLE
1638TSAN_INTERCEPTOR(int, pthread_barrier_init, void *b, void *a, unsigned count) {
1639 SCOPED_TSAN_INTERCEPTOR(pthread_barrier_init, b, a, count);
1640 MemoryAccess(thr, pc, addr: (uptr)b, size: 1, typ: kAccessWrite);
1641 int res = REAL(pthread_barrier_init)(b, a, count);
1642 return res;
1643}
1644
1645TSAN_INTERCEPTOR(int, pthread_barrier_destroy, void *b) {
1646 SCOPED_TSAN_INTERCEPTOR(pthread_barrier_destroy, b);
1647 MemoryAccess(thr, pc, addr: (uptr)b, size: 1, typ: kAccessWrite);
1648 int res = REAL(pthread_barrier_destroy)(b);
1649 return res;
1650}
1651
1652TSAN_INTERCEPTOR(int, pthread_barrier_wait, void *b) {
1653 SCOPED_TSAN_INTERCEPTOR(pthread_barrier_wait, b);
1654 Release(thr, pc, addr: (uptr)b);
1655 MemoryAccess(thr, pc, addr: (uptr)b, size: 1, typ: kAccessRead);
1656 int res = REAL(pthread_barrier_wait)(b);
1657 MemoryAccess(thr, pc, addr: (uptr)b, size: 1, typ: kAccessRead);
1658 if (res == 0 || res == PTHREAD_BARRIER_SERIAL_THREAD) {
1659 Acquire(thr, pc, addr: (uptr)b);
1660 }
1661 return res;
1662}
1663#endif
1664
1665TSAN_INTERCEPTOR(int, pthread_once, void *o, void (*f)()) {
1666 SCOPED_INTERCEPTOR_RAW(pthread_once, o, f);
1667 if (o == 0 || f == 0)
1668 return errno_EINVAL;
1669 atomic_uint32_t *a;
1670
1671 if (SANITIZER_APPLE)
1672 a = static_cast<atomic_uint32_t*>((void *)((char *)o + sizeof(long_t)));
1673 else if (SANITIZER_NETBSD)
1674 a = static_cast<atomic_uint32_t*>
1675 ((void *)((char *)o + __sanitizer::pthread_mutex_t_sz));
1676 else
1677 a = static_cast<atomic_uint32_t*>(o);
1678
1679 // Mac OS X appears to use pthread_once() where calling BlockingRegion hooks
1680 // result in crashes due to too little stack space.
1681 if (guard_acquire(thr, pc, g: a, blocking_hooks: !SANITIZER_APPLE)) {
1682 (*f)();
1683 guard_release(thr, pc, g: a, v: kGuardDone);
1684 }
1685 return 0;
1686}
1687
1688#if SANITIZER_GLIBC
1689TSAN_INTERCEPTOR(int, __fxstat, int version, int fd, void *buf) {
1690 SCOPED_TSAN_INTERCEPTOR(__fxstat, version, fd, buf);
1691 if (fd > 0)
1692 FdAccess(thr, pc, fd);
1693 return REAL(__fxstat)(version, fd, buf);
1694}
1695
1696TSAN_INTERCEPTOR(int, __fxstat64, int version, int fd, void *buf) {
1697 SCOPED_TSAN_INTERCEPTOR(__fxstat64, version, fd, buf);
1698 if (fd > 0)
1699 FdAccess(thr, pc, fd);
1700 return REAL(__fxstat64)(version, fd, buf);
1701}
1702#define TSAN_MAYBE_INTERCEPT___FXSTAT TSAN_INTERCEPT(__fxstat); TSAN_INTERCEPT(__fxstat64)
1703#else
1704#define TSAN_MAYBE_INTERCEPT___FXSTAT
1705#endif
1706
1707#if !SANITIZER_GLIBC || __GLIBC_PREREQ(2, 33)
1708TSAN_INTERCEPTOR(int, fstat, int fd, void *buf) {
1709 SCOPED_TSAN_INTERCEPTOR(fstat, fd, buf);
1710 if (fd > 0)
1711 FdAccess(thr, pc, fd);
1712 return REAL(fstat)(fd, buf);
1713}
1714# define TSAN_MAYBE_INTERCEPT_FSTAT TSAN_INTERCEPT(fstat)
1715#else
1716# define TSAN_MAYBE_INTERCEPT_FSTAT
1717#endif
1718
1719#if __GLIBC_PREREQ(2, 33)
1720TSAN_INTERCEPTOR(int, fstat64, int fd, void *buf) {
1721 SCOPED_TSAN_INTERCEPTOR(fstat64, fd, buf);
1722 if (fd > 0)
1723 FdAccess(thr, pc, fd);
1724 return REAL(fstat64)(fd, buf);
1725}
1726# define TSAN_MAYBE_INTERCEPT_FSTAT64 TSAN_INTERCEPT(fstat64)
1727#else
1728# define TSAN_MAYBE_INTERCEPT_FSTAT64
1729#endif
1730
1731TSAN_INTERCEPTOR(int, open, const char *name, int oflag, ...) {
1732 mode_t mode = 0;
1733 if (OpenReadsVaArgs(oflag)) {
1734 va_list ap;
1735 va_start(ap, oflag);
1736 mode = va_arg(ap, int);
1737 va_end(ap);
1738 }
1739
1740 SCOPED_TSAN_INTERCEPTOR(open, name, oflag, mode);
1741 READ_STRING(thr, pc, name, 0);
1742
1743 int fd;
1744 if (OpenReadsVaArgs(oflag))
1745 fd = REAL(open)(name, oflag, mode);
1746 else
1747 fd = REAL(open)(name, oflag);
1748
1749 if (fd >= 0)
1750 FdFileCreate(thr, pc, fd);
1751 return fd;
1752}
1753
1754#if SANITIZER_LINUX
1755TSAN_INTERCEPTOR(int, open64, const char *name, int oflag, ...) {
1756 va_list ap;
1757 va_start(ap, oflag);
1758 mode_t mode = va_arg(ap, int);
1759 va_end(ap);
1760 SCOPED_TSAN_INTERCEPTOR(open64, name, oflag, mode);
1761 READ_STRING(thr, pc, name, 0);
1762 int fd = REAL(open64)(name, oflag, mode);
1763 if (fd >= 0)
1764 FdFileCreate(thr, pc, fd);
1765 return fd;
1766}
1767#define TSAN_MAYBE_INTERCEPT_OPEN64 TSAN_INTERCEPT(open64)
1768#else
1769#define TSAN_MAYBE_INTERCEPT_OPEN64
1770#endif
1771
1772TSAN_INTERCEPTOR(int, creat, const char *name, int mode) {
1773 SCOPED_TSAN_INTERCEPTOR(creat, name, mode);
1774 READ_STRING(thr, pc, name, 0);
1775 int fd = REAL(creat)(name, mode);
1776 if (fd >= 0)
1777 FdFileCreate(thr, pc, fd);
1778 return fd;
1779}
1780
1781#if SANITIZER_LINUX
1782TSAN_INTERCEPTOR(int, creat64, const char *name, int mode) {
1783 SCOPED_TSAN_INTERCEPTOR(creat64, name, mode);
1784 READ_STRING(thr, pc, name, 0);
1785 int fd = REAL(creat64)(name, mode);
1786 if (fd >= 0)
1787 FdFileCreate(thr, pc, fd);
1788 return fd;
1789}
1790#define TSAN_MAYBE_INTERCEPT_CREAT64 TSAN_INTERCEPT(creat64)
1791#else
1792#define TSAN_MAYBE_INTERCEPT_CREAT64
1793#endif
1794
1795TSAN_INTERCEPTOR(int, dup, int oldfd) {
1796 SCOPED_TSAN_INTERCEPTOR(dup, oldfd);
1797 int newfd = REAL(dup)(oldfd);
1798 if (oldfd >= 0 && newfd >= 0 && newfd != oldfd)
1799 FdDup(thr, pc, oldfd, newfd, write: true);
1800 return newfd;
1801}
1802
1803TSAN_INTERCEPTOR(int, dup2, int oldfd, int newfd) {
1804 SCOPED_TSAN_INTERCEPTOR(dup2, oldfd, newfd);
1805 int newfd2 = REAL(dup2)(oldfd, newfd);
1806 if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd)
1807 FdDup(thr, pc, oldfd, newfd: newfd2, write: false);
1808 return newfd2;
1809}
1810
1811#if !SANITIZER_APPLE
1812TSAN_INTERCEPTOR(int, dup3, int oldfd, int newfd, int flags) {
1813 SCOPED_TSAN_INTERCEPTOR(dup3, oldfd, newfd, flags);
1814 int newfd2 = REAL(dup3)(oldfd, newfd, flags);
1815 if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd)
1816 FdDup(thr, pc, oldfd, newfd: newfd2, write: false);
1817 return newfd2;
1818}
1819#endif
1820
1821#if SANITIZER_LINUX
1822TSAN_INTERCEPTOR(int, eventfd, unsigned initval, int flags) {
1823 SCOPED_TSAN_INTERCEPTOR(eventfd, initval, flags);
1824 int fd = REAL(eventfd)(initval, flags);
1825 if (fd >= 0)
1826 FdEventCreate(thr, pc, fd);
1827 return fd;
1828}
1829#define TSAN_MAYBE_INTERCEPT_EVENTFD TSAN_INTERCEPT(eventfd)
1830#else
1831#define TSAN_MAYBE_INTERCEPT_EVENTFD
1832#endif
1833
1834#if SANITIZER_LINUX
1835TSAN_INTERCEPTOR(int, signalfd, int fd, void *mask, int flags) {
1836 SCOPED_INTERCEPTOR_RAW(signalfd, fd, mask, flags);
1837 FdClose(thr, pc, fd);
1838 fd = REAL(signalfd)(fd, mask, flags);
1839 if (!MustIgnoreInterceptor(thr))
1840 FdSignalCreate(thr, pc, fd);
1841 return fd;
1842}
1843#define TSAN_MAYBE_INTERCEPT_SIGNALFD TSAN_INTERCEPT(signalfd)
1844#else
1845#define TSAN_MAYBE_INTERCEPT_SIGNALFD
1846#endif
1847
1848#if SANITIZER_LINUX
1849TSAN_INTERCEPTOR(int, inotify_init, int fake) {
1850 SCOPED_TSAN_INTERCEPTOR(inotify_init, fake);
1851 int fd = REAL(inotify_init)(fake);
1852 if (fd >= 0)
1853 FdInotifyCreate(thr, pc, fd);
1854 return fd;
1855}
1856#define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT TSAN_INTERCEPT(inotify_init)
1857#else
1858#define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT
1859#endif
1860
1861#if SANITIZER_LINUX
1862TSAN_INTERCEPTOR(int, inotify_init1, int flags) {
1863 SCOPED_TSAN_INTERCEPTOR(inotify_init1, flags);
1864 int fd = REAL(inotify_init1)(flags);
1865 if (fd >= 0)
1866 FdInotifyCreate(thr, pc, fd);
1867 return fd;
1868}
1869#define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1 TSAN_INTERCEPT(inotify_init1)
1870#else
1871#define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1
1872#endif
1873
1874TSAN_INTERCEPTOR(int, socket, int domain, int type, int protocol) {
1875 SCOPED_TSAN_INTERCEPTOR(socket, domain, type, protocol);
1876 int fd = REAL(socket)(domain, type, protocol);
1877 if (fd >= 0)
1878 FdSocketCreate(thr, pc, fd);
1879 return fd;
1880}
1881
1882TSAN_INTERCEPTOR(int, socketpair, int domain, int type, int protocol, int *fd) {
1883 SCOPED_TSAN_INTERCEPTOR(socketpair, domain, type, protocol, fd);
1884 int res = REAL(socketpair)(domain, type, protocol, fd);
1885 if (res == 0 && fd[0] >= 0 && fd[1] >= 0)
1886 FdPipeCreate(thr, pc, rfd: fd[0], wfd: fd[1]);
1887 return res;
1888}
1889
1890TSAN_INTERCEPTOR(int, connect, int fd, void *addr, unsigned addrlen) {
1891 SCOPED_TSAN_INTERCEPTOR(connect, fd, addr, addrlen);
1892 FdSocketConnecting(thr, pc, fd);
1893 int res = REAL(connect)(fd, addr, addrlen);
1894 if (res == 0 && fd >= 0)
1895 FdSocketConnect(thr, pc, fd);
1896 return res;
1897}
1898
1899TSAN_INTERCEPTOR(int, bind, int fd, void *addr, unsigned addrlen) {
1900 SCOPED_TSAN_INTERCEPTOR(bind, fd, addr, addrlen);
1901 int res = REAL(bind)(fd, addr, addrlen);
1902 if (fd > 0 && res == 0)
1903 FdAccess(thr, pc, fd);
1904 return res;
1905}
1906
1907TSAN_INTERCEPTOR(int, listen, int fd, int backlog) {
1908 SCOPED_TSAN_INTERCEPTOR(listen, fd, backlog);
1909 int res = REAL(listen)(fd, backlog);
1910 if (fd > 0 && res == 0)
1911 FdAccess(thr, pc, fd);
1912 return res;
1913}
1914
1915TSAN_INTERCEPTOR(int, close, int fd) {
1916 SCOPED_INTERCEPTOR_RAW(close, fd);
1917 if (!in_symbolizer())
1918 FdClose(thr, pc, fd);
1919 return REAL(close)(fd);
1920}
1921
1922#if SANITIZER_LINUX
1923TSAN_INTERCEPTOR(int, __close, int fd) {
1924 SCOPED_INTERCEPTOR_RAW(__close, fd);
1925 FdClose(thr, pc, fd);
1926 return REAL(__close)(fd);
1927}
1928#define TSAN_MAYBE_INTERCEPT___CLOSE TSAN_INTERCEPT(__close)
1929#else
1930#define TSAN_MAYBE_INTERCEPT___CLOSE
1931#endif
1932
1933// glibc guts
1934#if SANITIZER_LINUX && !SANITIZER_ANDROID
1935TSAN_INTERCEPTOR(void, __res_iclose, void *state, bool free_addr) {
1936 SCOPED_INTERCEPTOR_RAW(__res_iclose, state, free_addr);
1937 int fds[64];
1938 int cnt = ExtractResolvFDs(state, fds, ARRAY_SIZE(fds));
1939 for (int i = 0; i < cnt; i++) FdClose(thr, pc, fd: fds[i]);
1940 REAL(__res_iclose)(state, free_addr);
1941}
1942#define TSAN_MAYBE_INTERCEPT___RES_ICLOSE TSAN_INTERCEPT(__res_iclose)
1943#else
1944#define TSAN_MAYBE_INTERCEPT___RES_ICLOSE
1945#endif
1946
1947TSAN_INTERCEPTOR(int, pipe, int *pipefd) {
1948 SCOPED_TSAN_INTERCEPTOR(pipe, pipefd);
1949 int res = REAL(pipe)(pipefd);
1950 if (res == 0 && pipefd[0] >= 0 && pipefd[1] >= 0)
1951 FdPipeCreate(thr, pc, rfd: pipefd[0], wfd: pipefd[1]);
1952 return res;
1953}
1954
1955#if !SANITIZER_APPLE
1956TSAN_INTERCEPTOR(int, pipe2, int *pipefd, int flags) {
1957 SCOPED_TSAN_INTERCEPTOR(pipe2, pipefd, flags);
1958 int res = REAL(pipe2)(pipefd, flags);
1959 if (res == 0 && pipefd[0] >= 0 && pipefd[1] >= 0)
1960 FdPipeCreate(thr, pc, rfd: pipefd[0], wfd: pipefd[1]);
1961 return res;
1962}
1963#endif
1964
1965TSAN_INTERCEPTOR(int, unlink, char *path) {
1966 SCOPED_TSAN_INTERCEPTOR(unlink, path);
1967 Release(thr, pc, addr: File2addr(path));
1968 int res = REAL(unlink)(path);
1969 return res;
1970}
1971
1972TSAN_INTERCEPTOR(void*, tmpfile, int fake) {
1973 SCOPED_TSAN_INTERCEPTOR(tmpfile, fake);
1974 void *res = REAL(tmpfile)(fake);
1975 if (res) {
1976 int fd = fileno_unlocked(stream: res);
1977 if (fd >= 0)
1978 FdFileCreate(thr, pc, fd);
1979 }
1980 return res;
1981}
1982
1983#if SANITIZER_LINUX
1984TSAN_INTERCEPTOR(void*, tmpfile64, int fake) {
1985 SCOPED_TSAN_INTERCEPTOR(tmpfile64, fake);
1986 void *res = REAL(tmpfile64)(fake);
1987 if (res) {
1988 int fd = fileno_unlocked(stream: res);
1989 if (fd >= 0)
1990 FdFileCreate(thr, pc, fd);
1991 }
1992 return res;
1993}
1994#define TSAN_MAYBE_INTERCEPT_TMPFILE64 TSAN_INTERCEPT(tmpfile64)
1995#else
1996#define TSAN_MAYBE_INTERCEPT_TMPFILE64
1997#endif
1998
1999static void FlushStreams() {
2000 // Flushing all the streams here may freeze the process if a child thread is
2001 // performing file stream operations at the same time.
2002 REAL(fflush)(stdout);
2003 REAL(fflush)(stderr);
2004}
2005
2006TSAN_INTERCEPTOR(void, abort, int fake) {
2007 SCOPED_TSAN_INTERCEPTOR(abort, fake);
2008 FlushStreams();
2009 REAL(abort)(fake);
2010}
2011
2012TSAN_INTERCEPTOR(int, rmdir, char *path) {
2013 SCOPED_TSAN_INTERCEPTOR(rmdir, path);
2014 Release(thr, pc, addr: Dir2addr(path));
2015 int res = REAL(rmdir)(path);
2016 return res;
2017}
2018
2019TSAN_INTERCEPTOR(int, closedir, void *dirp) {
2020 SCOPED_INTERCEPTOR_RAW(closedir, dirp);
2021 if (dirp) {
2022 int fd = dirfd(dirp);
2023 FdClose(thr, pc, fd);
2024 }
2025 return REAL(closedir)(dirp);
2026}
2027
2028#if SANITIZER_LINUX
2029TSAN_INTERCEPTOR(int, epoll_create, int size) {
2030 SCOPED_TSAN_INTERCEPTOR(epoll_create, size);
2031 int fd = REAL(epoll_create)(size);
2032 if (fd >= 0)
2033 FdPollCreate(thr, pc, fd);
2034 return fd;
2035}
2036
2037TSAN_INTERCEPTOR(int, epoll_create1, int flags) {
2038 SCOPED_TSAN_INTERCEPTOR(epoll_create1, flags);
2039 int fd = REAL(epoll_create1)(flags);
2040 if (fd >= 0)
2041 FdPollCreate(thr, pc, fd);
2042 return fd;
2043}
2044
2045TSAN_INTERCEPTOR(int, epoll_ctl, int epfd, int op, int fd, void *ev) {
2046 SCOPED_TSAN_INTERCEPTOR(epoll_ctl, epfd, op, fd, ev);
2047 if (epfd >= 0)
2048 FdAccess(thr, pc, fd: epfd);
2049 if (epfd >= 0 && fd >= 0)
2050 FdAccess(thr, pc, fd);
2051 if (op == EPOLL_CTL_ADD && epfd >= 0) {
2052 FdPollAdd(thr, pc, epfd, fd);
2053 FdRelease(thr, pc, fd: epfd);
2054 }
2055 int res = REAL(epoll_ctl)(epfd, op, fd, ev);
2056 return res;
2057}
2058
2059TSAN_INTERCEPTOR(int, epoll_wait, int epfd, void *ev, int cnt, int timeout) {
2060 SCOPED_TSAN_INTERCEPTOR(epoll_wait, epfd, ev, cnt, timeout);
2061 if (epfd >= 0)
2062 FdAccess(thr, pc, fd: epfd);
2063 int res = BLOCK_REAL(epoll_wait)(epfd, ev, cnt, timeout);
2064 if (res > 0 && epfd >= 0)
2065 FdAcquire(thr, pc, fd: epfd);
2066 return res;
2067}
2068
2069TSAN_INTERCEPTOR(int, epoll_pwait, int epfd, void *ev, int cnt, int timeout,
2070 void *sigmask) {
2071 SCOPED_TSAN_INTERCEPTOR(epoll_pwait, epfd, ev, cnt, timeout, sigmask);
2072 if (epfd >= 0)
2073 FdAccess(thr, pc, fd: epfd);
2074 int res = BLOCK_REAL(epoll_pwait)(epfd, ev, cnt, timeout, sigmask);
2075 if (res > 0 && epfd >= 0)
2076 FdAcquire(thr, pc, fd: epfd);
2077 return res;
2078}
2079
2080TSAN_INTERCEPTOR(int, epoll_pwait2, int epfd, void *ev, int cnt, void *timeout,
2081 void *sigmask) {
2082 SCOPED_INTERCEPTOR_RAW(epoll_pwait2, epfd, ev, cnt, timeout, sigmask);
2083 // This function is new and may not be present in libc and/or kernel.
2084 // Since we effectively add it to libc (as will be probed by the program
2085 // using dlsym or a weak function pointer) we need to handle the case
2086 // when it's not present in the actual libc.
2087 if (!REAL(epoll_pwait2)) {
2088 errno = errno_ENOSYS;
2089 return -1;
2090 }
2091 if (MustIgnoreInterceptor(thr))
2092 REAL(epoll_pwait2)(epfd, ev, cnt, timeout, sigmask);
2093 if (epfd >= 0)
2094 FdAccess(thr, pc, fd: epfd);
2095 int res = BLOCK_REAL(epoll_pwait2)(epfd, ev, cnt, timeout, sigmask);
2096 if (res > 0 && epfd >= 0)
2097 FdAcquire(thr, pc, fd: epfd);
2098 return res;
2099}
2100
2101# define TSAN_MAYBE_INTERCEPT_EPOLL \
2102 TSAN_INTERCEPT(epoll_create); \
2103 TSAN_INTERCEPT(epoll_create1); \
2104 TSAN_INTERCEPT(epoll_ctl); \
2105 TSAN_INTERCEPT(epoll_wait); \
2106 TSAN_INTERCEPT(epoll_pwait); \
2107 TSAN_INTERCEPT(epoll_pwait2)
2108#else
2109#define TSAN_MAYBE_INTERCEPT_EPOLL
2110#endif
2111
2112// The following functions are intercepted merely to process pending signals.
2113// If program blocks signal X, we must deliver the signal before the function
2114// returns. Similarly, if program unblocks a signal (or returns from sigsuspend)
2115// it's better to deliver the signal straight away.
2116TSAN_INTERCEPTOR(int, sigsuspend, const __sanitizer_sigset_t *mask) {
2117 SCOPED_TSAN_INTERCEPTOR(sigsuspend, mask);
2118 return REAL(sigsuspend)(mask);
2119}
2120
2121TSAN_INTERCEPTOR(int, sigblock, int mask) {
2122 SCOPED_TSAN_INTERCEPTOR(sigblock, mask);
2123 return REAL(sigblock)(mask);
2124}
2125
2126TSAN_INTERCEPTOR(int, sigsetmask, int mask) {
2127 SCOPED_TSAN_INTERCEPTOR(sigsetmask, mask);
2128 return REAL(sigsetmask)(mask);
2129}
2130
2131TSAN_INTERCEPTOR(int, pthread_sigmask, int how, const __sanitizer_sigset_t *set,
2132 __sanitizer_sigset_t *oldset) {
2133 SCOPED_TSAN_INTERCEPTOR(pthread_sigmask, how, set, oldset);
2134 return REAL(pthread_sigmask)(how, set, oldset);
2135}
2136
2137namespace __tsan {
2138
2139static void ReportErrnoSpoiling(ThreadState *thr, uptr pc, int sig) {
2140 VarSizeStackTrace stack;
2141 // StackTrace::GetNestInstructionPc(pc) is used because return address is
2142 // expected, OutputReport() will undo this.
2143 ObtainCurrentStack(thr, toppc: StackTrace::GetNextInstructionPc(pc), stack: &stack);
2144 ThreadRegistryLock l(&ctx->thread_registry);
2145 ScopedReport rep(ReportTypeErrnoInSignal);
2146 rep.SetSigNum(sig);
2147 if (!IsFiredSuppression(ctx, type: ReportTypeErrnoInSignal, trace: stack)) {
2148 rep.AddStack(stack, suppressable: true);
2149 OutputReport(thr, srep: rep);
2150 }
2151}
2152
2153static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire,
2154 int sig, __sanitizer_siginfo *info,
2155 void *uctx) {
2156 CHECK(thr->slot);
2157 __sanitizer_sigaction *sigactions = interceptor_ctx()->sigactions;
2158 if (acquire)
2159 Acquire(thr, pc: 0, addr: (uptr)&sigactions[sig]);
2160 // Signals are generally asynchronous, so if we receive a signals when
2161 // ignores are enabled we should disable ignores. This is critical for sync
2162 // and interceptors, because otherwise we can miss synchronization and report
2163 // false races.
2164 int ignore_reads_and_writes = thr->ignore_reads_and_writes;
2165 int ignore_interceptors = thr->ignore_interceptors;
2166 int ignore_sync = thr->ignore_sync;
2167 // For symbolizer we only process SIGSEGVs synchronously
2168 // (bug in symbolizer or in tsan). But we want to reset
2169 // in_symbolizer to fail gracefully. Symbolizer and user code
2170 // use different memory allocators, so if we don't reset
2171 // in_symbolizer we can get memory allocated with one being
2172 // feed with another, which can cause more crashes.
2173 int in_symbolizer = thr->in_symbolizer;
2174 if (!ctx->after_multithreaded_fork) {
2175 thr->ignore_reads_and_writes = 0;
2176 thr->fast_state.ClearIgnoreBit();
2177 thr->ignore_interceptors = 0;
2178 thr->ignore_sync = 0;
2179 thr->in_symbolizer = 0;
2180 }
2181 // Ensure that the handler does not spoil errno.
2182 const int saved_errno = errno;
2183 errno = 99;
2184 // This code races with sigaction. Be careful to not read sa_sigaction twice.
2185 // Also need to remember pc for reporting before the call,
2186 // because the handler can reset it.
2187 volatile uptr pc = (sigactions[sig].sa_flags & SA_SIGINFO)
2188 ? (uptr)sigactions[sig].sigaction
2189 : (uptr)sigactions[sig].handler;
2190 if (pc != sig_dfl && pc != sig_ign) {
2191 // The callback can be either sa_handler or sa_sigaction.
2192 // They have different signatures, but we assume that passing
2193 // additional arguments to sa_handler works and is harmless.
2194 ((__sanitizer_sigactionhandler_ptr)pc)(sig, info, uctx);
2195 }
2196 if (!ctx->after_multithreaded_fork) {
2197 thr->ignore_reads_and_writes = ignore_reads_and_writes;
2198 if (ignore_reads_and_writes)
2199 thr->fast_state.SetIgnoreBit();
2200 thr->ignore_interceptors = ignore_interceptors;
2201 thr->ignore_sync = ignore_sync;
2202 thr->in_symbolizer = in_symbolizer;
2203 }
2204 // We do not detect errno spoiling for SIGTERM,
2205 // because some SIGTERM handlers do spoil errno but reraise SIGTERM,
2206 // tsan reports false positive in such case.
2207 // It's difficult to properly detect this situation (reraise),
2208 // because in async signal processing case (when handler is called directly
2209 // from rtl_generic_sighandler) we have not yet received the reraised
2210 // signal; and it looks too fragile to intercept all ways to reraise a signal.
2211 if (ShouldReport(thr, typ: ReportTypeErrnoInSignal) && !sync && sig != SIGTERM &&
2212 errno != 99)
2213 ReportErrnoSpoiling(thr, pc, sig);
2214 errno = saved_errno;
2215}
2216
2217void ProcessPendingSignalsImpl(ThreadState *thr) {
2218 atomic_store(a: &thr->pending_signals, v: 0, mo: memory_order_relaxed);
2219 ThreadSignalContext *sctx = SigCtx(thr);
2220 if (sctx == 0)
2221 return;
2222 atomic_fetch_add(a: &thr->in_signal_handler, v: 1, mo: memory_order_relaxed);
2223 internal_sigfillset(set: &sctx->emptyset);
2224 __sanitizer_sigset_t *oldset = sctx->oldset.PushBack();
2225 int res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->emptyset, oldset);
2226 CHECK_EQ(res, 0);
2227 for (int sig = 0; sig < kSigCount; sig++) {
2228 SignalDesc *signal = &sctx->pending_signals[sig];
2229 if (signal->armed) {
2230 signal->armed = false;
2231 CallUserSignalHandler(thr, sync: false, acquire: true, sig, info: &signal->siginfo,
2232 uctx: &signal->ctx);
2233 }
2234 }
2235 res = REAL(pthread_sigmask)(SIG_SETMASK, oldset, 0);
2236 CHECK_EQ(res, 0);
2237 sctx->oldset.PopBack();
2238 atomic_fetch_add(a: &thr->in_signal_handler, v: -1, mo: memory_order_relaxed);
2239}
2240
2241} // namespace __tsan
2242
2243static bool is_sync_signal(ThreadSignalContext *sctx, int sig,
2244 __sanitizer_siginfo *info) {
2245 // If we are sending signal to ourselves, we must process it now.
2246 if (sctx && sig == sctx->int_signal_send)
2247 return true;
2248#if SANITIZER_HAS_SIGINFO
2249 // POSIX timers can be configured to send any kind of signal; however, it
2250 // doesn't make any sense to consider a timer signal as synchronous!
2251 if (info->si_code == SI_TIMER)
2252 return false;
2253#endif
2254 return sig == SIGSEGV || sig == SIGBUS || sig == SIGILL || sig == SIGTRAP ||
2255 sig == SIGABRT || sig == SIGFPE || sig == SIGPIPE || sig == SIGSYS;
2256}
2257
2258void sighandler(int sig, __sanitizer_siginfo *info, void *ctx) {
2259 ThreadState *thr = cur_thread_init();
2260 ThreadSignalContext *sctx = SigCtx(thr);
2261 if (sig < 0 || sig >= kSigCount) {
2262 VPrintf(1, "ThreadSanitizer: ignoring signal %d\n", sig);
2263 return;
2264 }
2265 // Don't mess with synchronous signals.
2266 const bool sync = is_sync_signal(sctx, sig, info);
2267 if (sync ||
2268 // If we are in blocking function, we can safely process it now
2269 // (but check if we are in a recursive interceptor,
2270 // i.e. pthread_join()->munmap()).
2271 atomic_load(a: &thr->in_blocking_func, mo: memory_order_relaxed)) {
2272 atomic_fetch_add(a: &thr->in_signal_handler, v: 1, mo: memory_order_relaxed);
2273 if (atomic_load(a: &thr->in_blocking_func, mo: memory_order_relaxed)) {
2274 atomic_store(a: &thr->in_blocking_func, v: 0, mo: memory_order_relaxed);
2275 CallUserSignalHandler(thr, sync, acquire: true, sig, info, uctx: ctx);
2276 atomic_store(a: &thr->in_blocking_func, v: 1, mo: memory_order_relaxed);
2277 } else {
2278 // Be very conservative with when we do acquire in this case.
2279 // It's unsafe to do acquire in async handlers, because ThreadState
2280 // can be in inconsistent state.
2281 // SIGSYS looks relatively safe -- it's synchronous and can actually
2282 // need some global state.
2283 bool acq = (sig == SIGSYS);
2284 CallUserSignalHandler(thr, sync, acquire: acq, sig, info, uctx: ctx);
2285 }
2286 atomic_fetch_add(a: &thr->in_signal_handler, v: -1, mo: memory_order_relaxed);
2287 return;
2288 }
2289
2290 if (sctx == 0)
2291 return;
2292 SignalDesc *signal = &sctx->pending_signals[sig];
2293 if (signal->armed == false) {
2294 signal->armed = true;
2295 internal_memcpy(dest: &signal->siginfo, src: info, n: sizeof(*info));
2296 internal_memcpy(dest: &signal->ctx, src: ctx, n: sizeof(signal->ctx));
2297 atomic_store(a: &thr->pending_signals, v: 1, mo: memory_order_relaxed);
2298 }
2299}
2300
2301TSAN_INTERCEPTOR(int, raise, int sig) {
2302 SCOPED_TSAN_INTERCEPTOR(raise, sig);
2303 ThreadSignalContext *sctx = SigCtx(thr);
2304 CHECK_NE(sctx, 0);
2305 int prev = sctx->int_signal_send;
2306 sctx->int_signal_send = sig;
2307 int res = REAL(raise)(sig);
2308 CHECK_EQ(sctx->int_signal_send, sig);
2309 sctx->int_signal_send = prev;
2310 return res;
2311}
2312
2313TSAN_INTERCEPTOR(int, kill, int pid, int sig) {
2314 SCOPED_TSAN_INTERCEPTOR(kill, pid, sig);
2315 ThreadSignalContext *sctx = SigCtx(thr);
2316 CHECK_NE(sctx, 0);
2317 int prev = sctx->int_signal_send;
2318 if (pid == (int)internal_getpid()) {
2319 sctx->int_signal_send = sig;
2320 }
2321 int res = REAL(kill)(pid, sig);
2322 if (pid == (int)internal_getpid()) {
2323 CHECK_EQ(sctx->int_signal_send, sig);
2324 sctx->int_signal_send = prev;
2325 }
2326 return res;
2327}
2328
2329TSAN_INTERCEPTOR(int, pthread_kill, void *tid, int sig) {
2330 SCOPED_TSAN_INTERCEPTOR(pthread_kill, tid, sig);
2331 ThreadSignalContext *sctx = SigCtx(thr);
2332 CHECK_NE(sctx, 0);
2333 int prev = sctx->int_signal_send;
2334 bool self = pthread_equal(t1: tid, t2: pthread_self());
2335 if (self)
2336 sctx->int_signal_send = sig;
2337 int res = REAL(pthread_kill)(tid, sig);
2338 if (self) {
2339 CHECK_EQ(sctx->int_signal_send, sig);
2340 sctx->int_signal_send = prev;
2341 }
2342 return res;
2343}
2344
2345TSAN_INTERCEPTOR(int, gettimeofday, void *tv, void *tz) {
2346 SCOPED_TSAN_INTERCEPTOR(gettimeofday, tv, tz);
2347 // It's intercepted merely to process pending signals.
2348 return REAL(gettimeofday)(tv, tz);
2349}
2350
2351TSAN_INTERCEPTOR(int, getaddrinfo, void *node, void *service,
2352 void *hints, void *rv) {
2353 SCOPED_TSAN_INTERCEPTOR(getaddrinfo, node, service, hints, rv);
2354 // We miss atomic synchronization in getaddrinfo,
2355 // and can report false race between malloc and free
2356 // inside of getaddrinfo. So ignore memory accesses.
2357 ThreadIgnoreBegin(thr, pc);
2358 int res = REAL(getaddrinfo)(node, service, hints, rv);
2359 ThreadIgnoreEnd(thr);
2360 return res;
2361}
2362
2363TSAN_INTERCEPTOR(int, fork, int fake) {
2364 if (in_symbolizer())
2365 return REAL(fork)(fake);
2366 SCOPED_INTERCEPTOR_RAW(fork, fake);
2367 return REAL(fork)(fake);
2368}
2369
2370void atfork_prepare() {
2371 if (in_symbolizer())
2372 return;
2373 ThreadState *thr = cur_thread();
2374 const uptr pc = StackTrace::GetCurrentPc();
2375 ForkBefore(thr, pc);
2376}
2377
2378void atfork_parent() {
2379 if (in_symbolizer())
2380 return;
2381 ThreadState *thr = cur_thread();
2382 const uptr pc = StackTrace::GetCurrentPc();
2383 ForkParentAfter(thr, pc);
2384}
2385
2386void atfork_child() {
2387 if (in_symbolizer())
2388 return;
2389 ThreadState *thr = cur_thread();
2390 const uptr pc = StackTrace::GetCurrentPc();
2391 ForkChildAfter(thr, pc, start_thread: true);
2392 FdOnFork(thr, pc);
2393}
2394
2395#if !SANITIZER_IOS
2396TSAN_INTERCEPTOR(int, vfork, int fake) {
2397 // Some programs (e.g. openjdk) call close for all file descriptors
2398 // in the child process. Under tsan it leads to false positives, because
2399 // address space is shared, so the parent process also thinks that
2400 // the descriptors are closed (while they are actually not).
2401 // This leads to false positives due to missed synchronization.
2402 // Strictly saying this is undefined behavior, because vfork child is not
2403 // allowed to call any functions other than exec/exit. But this is what
2404 // openjdk does, so we want to handle it.
2405 // We could disable interceptors in the child process. But it's not possible
2406 // to simply intercept and wrap vfork, because vfork child is not allowed
2407 // to return from the function that calls vfork, and that's exactly what
2408 // we would do. So this would require some assembly trickery as well.
2409 // Instead we simply turn vfork into fork.
2410 return WRAP(fork)(fake);
2411}
2412#endif
2413
2414#if SANITIZER_LINUX
2415TSAN_INTERCEPTOR(int, clone, int (*fn)(void *), void *stack, int flags,
2416 void *arg, int *parent_tid, void *tls, pid_t *child_tid) {
2417 SCOPED_INTERCEPTOR_RAW(clone, fn, stack, flags, arg, parent_tid, tls,
2418 child_tid);
2419 struct Arg {
2420 int (*fn)(void *);
2421 void *arg;
2422 };
2423 auto wrapper = +[](void *p) -> int {
2424 auto *thr = cur_thread();
2425 uptr pc = GET_CURRENT_PC();
2426 // Start the background thread for fork, but not for clone.
2427 // For fork we did this always and it's known to work (or user code has
2428 // adopted). But if we do this for the new clone interceptor some code
2429 // (sandbox2) fails. So model we used to do for years and don't start the
2430 // background thread after clone.
2431 ForkChildAfter(thr, pc, start_thread: false);
2432 FdOnFork(thr, pc);
2433 auto *arg = static_cast<Arg *>(p);
2434 return arg->fn(arg->arg);
2435 };
2436 ForkBefore(thr, pc);
2437 Arg arg_wrapper = {.fn: fn, .arg: arg};
2438 int pid = REAL(clone)(wrapper, stack, flags, &arg_wrapper, parent_tid, tls,
2439 child_tid);
2440 ForkParentAfter(thr, pc);
2441 return pid;
2442}
2443#endif
2444
2445#if !SANITIZER_APPLE && !SANITIZER_ANDROID
2446typedef int (*dl_iterate_phdr_cb_t)(__sanitizer_dl_phdr_info *info, SIZE_T size,
2447 void *data);
2448struct dl_iterate_phdr_data {
2449 ThreadState *thr;
2450 uptr pc;
2451 dl_iterate_phdr_cb_t cb;
2452 void *data;
2453};
2454
2455static bool IsAppNotRodata(uptr addr) {
2456 return IsAppMem(mem: addr) && *MemToShadow(x: addr) != Shadow::kRodata;
2457}
2458
2459static int dl_iterate_phdr_cb(__sanitizer_dl_phdr_info *info, SIZE_T size,
2460 void *data) {
2461 dl_iterate_phdr_data *cbdata = (dl_iterate_phdr_data *)data;
2462 // dlopen/dlclose allocate/free dynamic-linker-internal memory, which is later
2463 // accessible in dl_iterate_phdr callback. But we don't see synchronization
2464 // inside of dynamic linker, so we "unpoison" it here in order to not
2465 // produce false reports. Ignoring malloc/free in dlopen/dlclose is not enough
2466 // because some libc functions call __libc_dlopen.
2467 if (info && IsAppNotRodata(addr: (uptr)info->dlpi_name))
2468 MemoryResetRange(thr: cbdata->thr, pc: cbdata->pc, addr: (uptr)info->dlpi_name,
2469 size: internal_strlen(s: info->dlpi_name));
2470 int res = cbdata->cb(info, size, cbdata->data);
2471 // Perform the check one more time in case info->dlpi_name was overwritten
2472 // by user callback.
2473 if (info && IsAppNotRodata(addr: (uptr)info->dlpi_name))
2474 MemoryResetRange(thr: cbdata->thr, pc: cbdata->pc, addr: (uptr)info->dlpi_name,
2475 size: internal_strlen(s: info->dlpi_name));
2476 return res;
2477}
2478
2479TSAN_INTERCEPTOR(int, dl_iterate_phdr, dl_iterate_phdr_cb_t cb, void *data) {
2480 SCOPED_TSAN_INTERCEPTOR(dl_iterate_phdr, cb, data);
2481 dl_iterate_phdr_data cbdata;
2482 cbdata.thr = thr;
2483 cbdata.pc = pc;
2484 cbdata.cb = cb;
2485 cbdata.data = data;
2486 int res = REAL(dl_iterate_phdr)(dl_iterate_phdr_cb, &cbdata);
2487 return res;
2488}
2489#endif
2490
2491static int OnExit(ThreadState *thr) {
2492 int status = Finalize(thr);
2493 FlushStreams();
2494 return status;
2495}
2496
2497#if !SANITIZER_APPLE
2498static void HandleRecvmsg(ThreadState *thr, uptr pc,
2499 __sanitizer_msghdr *msg) {
2500 int fds[64];
2501 int cnt = ExtractRecvmsgFDs(msg, fds, ARRAY_SIZE(fds));
2502 for (int i = 0; i < cnt; i++)
2503 FdEventCreate(thr, pc, fd: fds[i]);
2504}
2505#endif
2506
2507#include "sanitizer_common/sanitizer_platform_interceptors.h"
2508// Causes interceptor recursion (getaddrinfo() and fopen())
2509#undef SANITIZER_INTERCEPT_GETADDRINFO
2510// We define our own.
2511#if SANITIZER_INTERCEPT_TLS_GET_ADDR
2512#define NEED_TLS_GET_ADDR
2513#endif
2514#undef SANITIZER_INTERCEPT_TLS_GET_ADDR
2515#define SANITIZER_INTERCEPT_TLS_GET_OFFSET 1
2516#undef SANITIZER_INTERCEPT_PTHREAD_SIGMASK
2517
2518#define COMMON_INTERCEPT_FUNCTION_VER(name, ver) \
2519 INTERCEPT_FUNCTION_VER(name, ver)
2520#define COMMON_INTERCEPT_FUNCTION_VER_UNVERSIONED_FALLBACK(name, ver) \
2521 (INTERCEPT_FUNCTION_VER(name, ver) || INTERCEPT_FUNCTION(name))
2522
2523#define COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, func, ...) \
2524 SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__); \
2525 TsanInterceptorContext _ctx = {thr, pc}; \
2526 ctx = (void *)&_ctx; \
2527 (void)ctx;
2528
2529#define COMMON_INTERCEPTOR_FILE_OPEN(ctx, file, path) \
2530 if (path) \
2531 Acquire(thr, pc, File2addr(path)); \
2532 if (file) { \
2533 int fd = fileno_unlocked(file); \
2534 if (fd >= 0) FdFileCreate(thr, pc, fd); \
2535 }
2536
2537#define COMMON_INTERCEPTOR_FILE_CLOSE(ctx, file) \
2538 if (file) { \
2539 int fd = fileno_unlocked(file); \
2540 FdClose(thr, pc, fd); \
2541 }
2542
2543#define COMMON_INTERCEPTOR_DLOPEN(filename, flag) \
2544 ({ \
2545 CheckNoDeepBind(filename, flag); \
2546 ThreadIgnoreBegin(thr, 0); \
2547 void *res = REAL(dlopen)(filename, flag); \
2548 ThreadIgnoreEnd(thr); \
2549 res; \
2550 })
2551
2552// Ignore interceptors in OnLibraryLoaded()/Unloaded(). These hooks use code
2553// (ListOfModules::init, MemoryMappingLayout::DumpListOfModules) that make
2554// intercepted calls, which can cause deadlockes with ReportRace() which also
2555// uses this code.
2556#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) \
2557 ({ \
2558 ScopedIgnoreInterceptors ignore_interceptors; \
2559 libignore()->OnLibraryLoaded(filename); \
2560 })
2561
2562#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() \
2563 ({ \
2564 ScopedIgnoreInterceptors ignore_interceptors; \
2565 libignore()->OnLibraryUnloaded(); \
2566 })
2567
2568#define COMMON_INTERCEPTOR_ACQUIRE(ctx, u) \
2569 Acquire(((TsanInterceptorContext *) ctx)->thr, pc, u)
2570
2571#define COMMON_INTERCEPTOR_RELEASE(ctx, u) \
2572 Release(((TsanInterceptorContext *) ctx)->thr, pc, u)
2573
2574#define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \
2575 Acquire(((TsanInterceptorContext *) ctx)->thr, pc, Dir2addr(path))
2576
2577#define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \
2578 FdAcquire(((TsanInterceptorContext *) ctx)->thr, pc, fd)
2579
2580#define COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd) \
2581 FdRelease(((TsanInterceptorContext *) ctx)->thr, pc, fd)
2582
2583#define COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd) \
2584 FdAccess(((TsanInterceptorContext *) ctx)->thr, pc, fd)
2585
2586#define COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, newfd) \
2587 FdSocketAccept(((TsanInterceptorContext *) ctx)->thr, pc, fd, newfd)
2588
2589#define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) \
2590 ThreadSetName(((TsanInterceptorContext *) ctx)->thr, name)
2591
2592#define COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name) \
2593 if (pthread_equal(pthread_self(), reinterpret_cast<void *>(thread))) \
2594 COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name); \
2595 else \
2596 __tsan::ctx->thread_registry.SetThreadNameByUserId(thread, name)
2597
2598#define COMMON_INTERCEPTOR_BLOCK_REAL(name) BLOCK_REAL(name)
2599
2600#define COMMON_INTERCEPTOR_ON_EXIT(ctx) \
2601 OnExit(((TsanInterceptorContext *) ctx)->thr)
2602
2603#define COMMON_INTERCEPTOR_MMAP_IMPL(ctx, mmap, addr, sz, prot, flags, fd, \
2604 off) \
2605 do { \
2606 return mmap_interceptor(thr, pc, REAL(mmap), addr, sz, prot, flags, fd, \
2607 off); \
2608 } while (false)
2609
2610#define COMMON_INTERCEPTOR_MUNMAP_IMPL(ctx, addr, sz) \
2611 do { \
2612 return munmap_interceptor(thr, pc, REAL(munmap), addr, sz); \
2613 } while (false)
2614
2615#if !SANITIZER_APPLE
2616#define COMMON_INTERCEPTOR_HANDLE_RECVMSG(ctx, msg) \
2617 HandleRecvmsg(((TsanInterceptorContext *)ctx)->thr, \
2618 ((TsanInterceptorContext *)ctx)->pc, msg)
2619#endif
2620
2621#define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end) \
2622 if (TsanThread *t = GetCurrentThread()) { \
2623 *begin = t->tls_begin(); \
2624 *end = t->tls_end(); \
2625 } else { \
2626 *begin = *end = 0; \
2627 }
2628
2629#define COMMON_INTERCEPTOR_USER_CALLBACK_START() \
2630 SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START()
2631
2632#define COMMON_INTERCEPTOR_USER_CALLBACK_END() \
2633 SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END()
2634
2635#include "sanitizer_common/sanitizer_common_interceptors.inc"
2636
2637static int sigaction_impl(int sig, const __sanitizer_sigaction *act,
2638 __sanitizer_sigaction *old);
2639static __sanitizer_sighandler_ptr signal_impl(int sig,
2640 __sanitizer_sighandler_ptr h);
2641
2642#define SIGNAL_INTERCEPTOR_SIGACTION_IMPL(signo, act, oldact) \
2643 { return sigaction_impl(signo, act, oldact); }
2644
2645#define SIGNAL_INTERCEPTOR_SIGNAL_IMPL(func, signo, handler) \
2646 { return (uptr)signal_impl(signo, (__sanitizer_sighandler_ptr)handler); }
2647
2648#define SIGNAL_INTERCEPTOR_ENTER() LazyInitialize(cur_thread_init())
2649
2650#include "sanitizer_common/sanitizer_signal_interceptors.inc"
2651
2652int sigaction_impl(int sig, const __sanitizer_sigaction *act,
2653 __sanitizer_sigaction *old) {
2654 // Note: if we call REAL(sigaction) directly for any reason without proxying
2655 // the signal handler through sighandler, very bad things will happen.
2656 // The handler will run synchronously and corrupt tsan per-thread state.
2657 SCOPED_INTERCEPTOR_RAW(sigaction, sig, act, old);
2658 if (sig <= 0 || sig >= kSigCount) {
2659 errno = errno_EINVAL;
2660 return -1;
2661 }
2662 __sanitizer_sigaction *sigactions = interceptor_ctx()->sigactions;
2663 __sanitizer_sigaction old_stored;
2664 if (old) internal_memcpy(dest: &old_stored, src: &sigactions[sig], n: sizeof(old_stored));
2665 __sanitizer_sigaction newact;
2666 if (act) {
2667 // Copy act into sigactions[sig].
2668 // Can't use struct copy, because compiler can emit call to memcpy.
2669 // Can't use internal_memcpy, because it copies byte-by-byte,
2670 // and signal handler reads the handler concurrently. It can read
2671 // some bytes from old value and some bytes from new value.
2672 // Use volatile to prevent insertion of memcpy.
2673 sigactions[sig].handler =
2674 *(volatile __sanitizer_sighandler_ptr const *)&act->handler;
2675 sigactions[sig].sa_flags = *(volatile int const *)&act->sa_flags;
2676 internal_memcpy(dest: &sigactions[sig].sa_mask, src: &act->sa_mask,
2677 n: sizeof(sigactions[sig].sa_mask));
2678#if !SANITIZER_FREEBSD && !SANITIZER_APPLE && !SANITIZER_NETBSD
2679 sigactions[sig].sa_restorer = act->sa_restorer;
2680#endif
2681 internal_memcpy(dest: &newact, src: act, n: sizeof(newact));
2682 internal_sigfillset(set: &newact.sa_mask);
2683 if ((act->sa_flags & SA_SIGINFO) ||
2684 ((uptr)act->handler != sig_ign && (uptr)act->handler != sig_dfl)) {
2685 newact.sa_flags |= SA_SIGINFO;
2686 newact.sigaction = sighandler;
2687 }
2688 ReleaseStore(thr, pc, addr: (uptr)&sigactions[sig]);
2689 act = &newact;
2690 }
2691 int res = REAL(sigaction)(sig, act, old);
2692 if (res == 0 && old && old->sigaction == sighandler)
2693 internal_memcpy(dest: old, src: &old_stored, n: sizeof(*old));
2694 return res;
2695}
2696
2697static __sanitizer_sighandler_ptr signal_impl(int sig,
2698 __sanitizer_sighandler_ptr h) {
2699 __sanitizer_sigaction act;
2700 act.handler = h;
2701 internal_memset(s: &act.sa_mask, c: -1, n: sizeof(act.sa_mask));
2702 act.sa_flags = 0;
2703 __sanitizer_sigaction old;
2704 int res = sigaction_symname(signum: sig, act: &act, oldact: &old);
2705 if (res) return (__sanitizer_sighandler_ptr)sig_err;
2706 return old.handler;
2707}
2708
2709#define TSAN_SYSCALL() \
2710 ThreadState *thr = cur_thread(); \
2711 if (thr->ignore_interceptors) \
2712 return; \
2713 ScopedSyscall scoped_syscall(thr)
2714
2715struct ScopedSyscall {
2716 ThreadState *thr;
2717
2718 explicit ScopedSyscall(ThreadState *thr) : thr(thr) { LazyInitialize(thr); }
2719
2720 ~ScopedSyscall() {
2721 ProcessPendingSignals(thr);
2722 }
2723};
2724
2725#if !SANITIZER_FREEBSD && !SANITIZER_APPLE
2726static void syscall_access_range(uptr pc, uptr p, uptr s, bool write) {
2727 TSAN_SYSCALL();
2728 MemoryAccessRange(thr, pc, addr: p, size: s, is_write: write);
2729}
2730
2731static USED void syscall_acquire(uptr pc, uptr addr) {
2732 TSAN_SYSCALL();
2733 Acquire(thr, pc, addr);
2734 DPrintf("syscall_acquire(0x%zx))\n", addr);
2735}
2736
2737static USED void syscall_release(uptr pc, uptr addr) {
2738 TSAN_SYSCALL();
2739 DPrintf("syscall_release(0x%zx)\n", addr);
2740 Release(thr, pc, addr);
2741}
2742
2743static void syscall_fd_close(uptr pc, int fd) {
2744 auto *thr = cur_thread();
2745 FdClose(thr, pc, fd);
2746}
2747
2748static USED void syscall_fd_acquire(uptr pc, int fd) {
2749 TSAN_SYSCALL();
2750 FdAcquire(thr, pc, fd);
2751 DPrintf("syscall_fd_acquire(%d)\n", fd);
2752}
2753
2754static USED void syscall_fd_release(uptr pc, int fd) {
2755 TSAN_SYSCALL();
2756 DPrintf("syscall_fd_release(%d)\n", fd);
2757 FdRelease(thr, pc, fd);
2758}
2759
2760static USED void sycall_blocking_start() {
2761 DPrintf("sycall_blocking_start()\n");
2762 ThreadState *thr = cur_thread();
2763 EnterBlockingFunc(thr);
2764 // When we are in a "blocking call", we process signals asynchronously
2765 // (right when they arrive). In this context we do not expect to be
2766 // executing any user/runtime code. The known interceptor sequence when
2767 // this is not true is: pthread_join -> munmap(stack). It's fine
2768 // to ignore munmap in this case -- we handle stack shadow separately.
2769 thr->ignore_interceptors++;
2770}
2771
2772static USED void sycall_blocking_end() {
2773 DPrintf("sycall_blocking_end()\n");
2774 ThreadState *thr = cur_thread();
2775 thr->ignore_interceptors--;
2776 atomic_store(a: &thr->in_blocking_func, v: 0, mo: memory_order_relaxed);
2777}
2778
2779static void syscall_pre_fork(uptr pc) { ForkBefore(thr: cur_thread(), pc); }
2780
2781static void syscall_post_fork(uptr pc, int pid) {
2782 ThreadState *thr = cur_thread();
2783 if (pid == 0) {
2784 // child
2785 ForkChildAfter(thr, pc, start_thread: true);
2786 FdOnFork(thr, pc);
2787 } else if (pid > 0) {
2788 // parent
2789 ForkParentAfter(thr, pc);
2790 } else {
2791 // error
2792 ForkParentAfter(thr, pc);
2793 }
2794}
2795#endif
2796
2797#define COMMON_SYSCALL_PRE_READ_RANGE(p, s) \
2798 syscall_access_range(GET_CALLER_PC(), (uptr)(p), (uptr)(s), false)
2799
2800#define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) \
2801 syscall_access_range(GET_CALLER_PC(), (uptr)(p), (uptr)(s), true)
2802
2803#define COMMON_SYSCALL_POST_READ_RANGE(p, s) \
2804 do { \
2805 (void)(p); \
2806 (void)(s); \
2807 } while (false)
2808
2809#define COMMON_SYSCALL_POST_WRITE_RANGE(p, s) \
2810 do { \
2811 (void)(p); \
2812 (void)(s); \
2813 } while (false)
2814
2815#define COMMON_SYSCALL_ACQUIRE(addr) \
2816 syscall_acquire(GET_CALLER_PC(), (uptr)(addr))
2817
2818#define COMMON_SYSCALL_RELEASE(addr) \
2819 syscall_release(GET_CALLER_PC(), (uptr)(addr))
2820
2821#define COMMON_SYSCALL_FD_CLOSE(fd) syscall_fd_close(GET_CALLER_PC(), fd)
2822
2823#define COMMON_SYSCALL_FD_ACQUIRE(fd) syscall_fd_acquire(GET_CALLER_PC(), fd)
2824
2825#define COMMON_SYSCALL_FD_RELEASE(fd) syscall_fd_release(GET_CALLER_PC(), fd)
2826
2827#define COMMON_SYSCALL_PRE_FORK() \
2828 syscall_pre_fork(GET_CALLER_PC())
2829
2830#define COMMON_SYSCALL_POST_FORK(res) \
2831 syscall_post_fork(GET_CALLER_PC(), res)
2832
2833#define COMMON_SYSCALL_BLOCKING_START() sycall_blocking_start()
2834#define COMMON_SYSCALL_BLOCKING_END() sycall_blocking_end()
2835
2836#include "sanitizer_common/sanitizer_common_syscalls.inc"
2837#include "sanitizer_common/sanitizer_syscalls_netbsd.inc"
2838
2839#ifdef NEED_TLS_GET_ADDR
2840
2841static void handle_tls_addr(void *arg, void *res) {
2842 ThreadState *thr = cur_thread();
2843 if (!thr)
2844 return;
2845 DTLS::DTV *dtv = DTLS_on_tls_get_addr(arg, res, static_tls_begin: thr->tls_addr,
2846 static_tls_end: thr->tls_addr + thr->tls_size);
2847 if (!dtv)
2848 return;
2849 // New DTLS block has been allocated.
2850 MemoryResetRange(thr, pc: 0, addr: dtv->beg, size: dtv->size);
2851}
2852
2853#if !SANITIZER_S390
2854// Define own interceptor instead of sanitizer_common's for three reasons:
2855// 1. It must not process pending signals.
2856// Signal handlers may contain MOVDQA instruction (see below).
2857// 2. It must be as simple as possible to not contain MOVDQA.
2858// 3. Sanitizer_common version uses COMMON_INTERCEPTOR_INITIALIZE_RANGE which
2859// is empty for tsan (meant only for msan).
2860// Note: __tls_get_addr can be called with mis-aligned stack due to:
2861// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58066
2862// So the interceptor must work with mis-aligned stack, in particular, does not
2863// execute MOVDQA with stack addresses.
2864TSAN_INTERCEPTOR(void *, __tls_get_addr, void *arg) {
2865 void *res = REAL(__tls_get_addr)(arg);
2866 handle_tls_addr(arg, res);
2867 return res;
2868}
2869#else // SANITIZER_S390
2870TSAN_INTERCEPTOR(uptr, __tls_get_addr_internal, void *arg) {
2871 uptr res = __tls_get_offset_wrapper(arg, REAL(__tls_get_offset));
2872 char *tp = static_cast<char *>(__builtin_thread_pointer());
2873 handle_tls_addr(arg, res + tp);
2874 return res;
2875}
2876#endif
2877#endif
2878
2879#if SANITIZER_NETBSD
2880TSAN_INTERCEPTOR(void, _lwp_exit) {
2881 SCOPED_TSAN_INTERCEPTOR(_lwp_exit);
2882 DestroyThreadState();
2883 REAL(_lwp_exit)();
2884}
2885#define TSAN_MAYBE_INTERCEPT__LWP_EXIT TSAN_INTERCEPT(_lwp_exit)
2886#else
2887#define TSAN_MAYBE_INTERCEPT__LWP_EXIT
2888#endif
2889
2890#if SANITIZER_FREEBSD
2891TSAN_INTERCEPTOR(void, thr_exit, tid_t *state) {
2892 SCOPED_TSAN_INTERCEPTOR(thr_exit, state);
2893 DestroyThreadState();
2894 REAL(thr_exit(state));
2895}
2896#define TSAN_MAYBE_INTERCEPT_THR_EXIT TSAN_INTERCEPT(thr_exit)
2897#else
2898#define TSAN_MAYBE_INTERCEPT_THR_EXIT
2899#endif
2900
2901TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_init, void *c, void *a)
2902TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_destroy, void *c)
2903TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_signal, void *c)
2904TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_broadcast, void *c)
2905TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_wait, void *c, void *m)
2906TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_init, void *m, void *a)
2907TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_destroy, void *m)
2908TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_lock, void *m)
2909TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_trylock, void *m)
2910TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_unlock, void *m)
2911TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_init, void *l, void *a)
2912TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_destroy, void *l)
2913TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_rdlock, void *l)
2914TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_tryrdlock, void *l)
2915TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_wrlock, void *l)
2916TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_trywrlock, void *l)
2917TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_unlock, void *l)
2918TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, once, void *o, void (*i)())
2919TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, sigmask, int f, void *n, void *o)
2920
2921TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_init, void *c, void *a)
2922TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_signal, void *c)
2923TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_broadcast, void *c)
2924TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_wait, void *c, void *m)
2925TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_destroy, void *c)
2926TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_init, void *m, void *a)
2927TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_destroy, void *m)
2928TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_lock, void *m)
2929TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_trylock, void *m)
2930TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_unlock, void *m)
2931TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_init, void *m, void *a)
2932TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_destroy, void *m)
2933TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_rdlock, void *m)
2934TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_tryrdlock, void *m)
2935TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_wrlock, void *m)
2936TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_trywrlock, void *m)
2937TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_unlock, void *m)
2938TSAN_INTERCEPTOR_NETBSD_ALIAS_THR(int, once, void *o, void (*f)())
2939TSAN_INTERCEPTOR_NETBSD_ALIAS_THR2(int, sigsetmask, sigmask, int a, void *b,
2940 void *c)
2941
2942namespace __tsan {
2943
2944static void finalize(void *arg) {
2945 ThreadState *thr = cur_thread();
2946 int status = Finalize(thr);
2947 // Make sure the output is not lost.
2948 FlushStreams();
2949 if (status)
2950 Die();
2951}
2952
2953#if !SANITIZER_APPLE && !SANITIZER_ANDROID
2954static void unreachable() {
2955 Report(format: "FATAL: ThreadSanitizer: unreachable called\n");
2956 Die();
2957}
2958#endif
2959
2960// Define default implementation since interception of libdispatch is optional.
2961SANITIZER_WEAK_ATTRIBUTE void InitializeLibdispatchInterceptors() {}
2962
2963void InitializeInterceptors() {
2964#if !SANITIZER_APPLE
2965 // We need to setup it early, because functions like dlsym() can call it.
2966 REAL(memset) = internal_memset;
2967 REAL(memcpy) = internal_memcpy;
2968#endif
2969
2970 __interception::DoesNotSupportStaticLinking();
2971
2972 new(interceptor_ctx()) InterceptorContext();
2973
2974 // Interpose __tls_get_addr before the common interposers. This is needed
2975 // because dlsym() may call malloc on failure which could result in other
2976 // interposed functions being called that could eventually make use of TLS.
2977#ifdef NEED_TLS_GET_ADDR
2978# if !SANITIZER_S390
2979 TSAN_INTERCEPT(__tls_get_addr);
2980# else
2981 TSAN_INTERCEPT(__tls_get_addr_internal);
2982 TSAN_INTERCEPT(__tls_get_offset);
2983# endif
2984#endif
2985 InitializeCommonInterceptors();
2986 InitializeSignalInterceptors();
2987 InitializeLibdispatchInterceptors();
2988
2989#if !SANITIZER_APPLE
2990 InitializeSetjmpInterceptors();
2991#endif
2992
2993 TSAN_INTERCEPT(longjmp_symname);
2994 TSAN_INTERCEPT(siglongjmp_symname);
2995#if SANITIZER_NETBSD
2996 TSAN_INTERCEPT(_longjmp);
2997#endif
2998
2999 TSAN_INTERCEPT(malloc);
3000 TSAN_INTERCEPT(__libc_memalign);
3001 TSAN_INTERCEPT(calloc);
3002 TSAN_INTERCEPT(realloc);
3003 TSAN_INTERCEPT(reallocarray);
3004 TSAN_INTERCEPT(free);
3005 TSAN_MAYBE_INTERCEPT_FREE_SIZED;
3006 TSAN_MAYBE_INTERCEPT_FREE_ALIGNED_SIZED;
3007 TSAN_INTERCEPT(cfree);
3008 TSAN_INTERCEPT(munmap);
3009 TSAN_MAYBE_INTERCEPT_MEMALIGN;
3010 TSAN_INTERCEPT(valloc);
3011 TSAN_MAYBE_INTERCEPT_PVALLOC;
3012 TSAN_INTERCEPT(posix_memalign);
3013
3014 TSAN_INTERCEPT(strcpy);
3015 TSAN_INTERCEPT(strncpy);
3016 TSAN_INTERCEPT(strdup);
3017
3018 TSAN_INTERCEPT(pthread_create);
3019 TSAN_INTERCEPT(pthread_join);
3020 TSAN_INTERCEPT(pthread_detach);
3021 TSAN_INTERCEPT(pthread_exit);
3022 #if SANITIZER_LINUX
3023 TSAN_INTERCEPT(pthread_tryjoin_np);
3024 TSAN_INTERCEPT(pthread_timedjoin_np);
3025 #endif
3026
3027 TSAN_INTERCEPT_VER(pthread_cond_init, PTHREAD_ABI_BASE);
3028 TSAN_INTERCEPT_VER(pthread_cond_signal, PTHREAD_ABI_BASE);
3029 TSAN_INTERCEPT_VER(pthread_cond_broadcast, PTHREAD_ABI_BASE);
3030 TSAN_INTERCEPT_VER(pthread_cond_wait, PTHREAD_ABI_BASE);
3031 TSAN_INTERCEPT_VER(pthread_cond_timedwait, PTHREAD_ABI_BASE);
3032 TSAN_INTERCEPT_VER(pthread_cond_destroy, PTHREAD_ABI_BASE);
3033
3034 TSAN_MAYBE_PTHREAD_COND_CLOCKWAIT;
3035
3036 TSAN_INTERCEPT(pthread_mutex_init);
3037 TSAN_INTERCEPT(pthread_mutex_destroy);
3038 TSAN_INTERCEPT(pthread_mutex_lock);
3039 TSAN_INTERCEPT(pthread_mutex_trylock);
3040 TSAN_INTERCEPT(pthread_mutex_timedlock);
3041 TSAN_INTERCEPT(pthread_mutex_unlock);
3042#if SANITIZER_LINUX
3043 TSAN_INTERCEPT(pthread_mutex_clocklock);
3044#endif
3045#if SANITIZER_GLIBC
3046# if !__GLIBC_PREREQ(2, 34)
3047 TSAN_INTERCEPT(__pthread_mutex_lock);
3048 TSAN_INTERCEPT(__pthread_mutex_unlock);
3049# endif
3050#endif
3051
3052 TSAN_INTERCEPT(pthread_spin_init);
3053 TSAN_INTERCEPT(pthread_spin_destroy);
3054 TSAN_INTERCEPT(pthread_spin_lock);
3055 TSAN_INTERCEPT(pthread_spin_trylock);
3056 TSAN_INTERCEPT(pthread_spin_unlock);
3057
3058 TSAN_INTERCEPT(pthread_rwlock_init);
3059 TSAN_INTERCEPT(pthread_rwlock_destroy);
3060 TSAN_INTERCEPT(pthread_rwlock_rdlock);
3061 TSAN_INTERCEPT(pthread_rwlock_tryrdlock);
3062 TSAN_INTERCEPT(pthread_rwlock_timedrdlock);
3063 TSAN_INTERCEPT(pthread_rwlock_wrlock);
3064 TSAN_INTERCEPT(pthread_rwlock_trywrlock);
3065 TSAN_INTERCEPT(pthread_rwlock_timedwrlock);
3066 TSAN_INTERCEPT(pthread_rwlock_unlock);
3067
3068 TSAN_INTERCEPT(pthread_barrier_init);
3069 TSAN_INTERCEPT(pthread_barrier_destroy);
3070 TSAN_INTERCEPT(pthread_barrier_wait);
3071
3072 TSAN_INTERCEPT(pthread_once);
3073
3074 TSAN_MAYBE_INTERCEPT___FXSTAT;
3075 TSAN_MAYBE_INTERCEPT_FSTAT;
3076 TSAN_MAYBE_INTERCEPT_FSTAT64;
3077 TSAN_INTERCEPT(open);
3078 TSAN_MAYBE_INTERCEPT_OPEN64;
3079 TSAN_INTERCEPT(creat);
3080 TSAN_MAYBE_INTERCEPT_CREAT64;
3081 TSAN_INTERCEPT(dup);
3082 TSAN_INTERCEPT(dup2);
3083 TSAN_INTERCEPT(dup3);
3084 TSAN_MAYBE_INTERCEPT_EVENTFD;
3085 TSAN_MAYBE_INTERCEPT_SIGNALFD;
3086 TSAN_MAYBE_INTERCEPT_INOTIFY_INIT;
3087 TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1;
3088 TSAN_INTERCEPT(socket);
3089 TSAN_INTERCEPT(socketpair);
3090 TSAN_INTERCEPT(connect);
3091 TSAN_INTERCEPT(bind);
3092 TSAN_INTERCEPT(listen);
3093 TSAN_MAYBE_INTERCEPT_EPOLL;
3094 TSAN_INTERCEPT(close);
3095 TSAN_MAYBE_INTERCEPT___CLOSE;
3096 TSAN_MAYBE_INTERCEPT___RES_ICLOSE;
3097 TSAN_INTERCEPT(pipe);
3098 TSAN_INTERCEPT(pipe2);
3099
3100 TSAN_INTERCEPT(unlink);
3101 TSAN_INTERCEPT(tmpfile);
3102 TSAN_MAYBE_INTERCEPT_TMPFILE64;
3103 TSAN_INTERCEPT(abort);
3104 TSAN_INTERCEPT(rmdir);
3105 TSAN_INTERCEPT(closedir);
3106
3107 TSAN_INTERCEPT(sigsuspend);
3108 TSAN_INTERCEPT(sigblock);
3109 TSAN_INTERCEPT(sigsetmask);
3110 TSAN_INTERCEPT(pthread_sigmask);
3111 TSAN_INTERCEPT(raise);
3112 TSAN_INTERCEPT(kill);
3113 TSAN_INTERCEPT(pthread_kill);
3114 TSAN_INTERCEPT(sleep);
3115 TSAN_INTERCEPT(usleep);
3116 TSAN_INTERCEPT(nanosleep);
3117 TSAN_INTERCEPT(pause);
3118 TSAN_INTERCEPT(gettimeofday);
3119 TSAN_INTERCEPT(getaddrinfo);
3120
3121 TSAN_INTERCEPT(fork);
3122 TSAN_INTERCEPT(vfork);
3123#if SANITIZER_LINUX
3124 TSAN_INTERCEPT(clone);
3125#endif
3126#if !SANITIZER_ANDROID
3127 TSAN_INTERCEPT(dl_iterate_phdr);
3128#endif
3129
3130 // Symbolization indirectly calls dl_iterate_phdr
3131 ready_to_symbolize = true;
3132
3133 TSAN_MAYBE_INTERCEPT_ON_EXIT;
3134 TSAN_INTERCEPT(__cxa_atexit);
3135 TSAN_INTERCEPT(_exit);
3136
3137 TSAN_MAYBE_INTERCEPT__LWP_EXIT;
3138 TSAN_MAYBE_INTERCEPT_THR_EXIT;
3139
3140#if !SANITIZER_APPLE && !SANITIZER_ANDROID
3141 // Need to setup it, because interceptors check that the function is resolved.
3142 // But atexit is emitted directly into the module, so can't be resolved.
3143 REAL(atexit) = (int(*)(void(*)()))unreachable;
3144#endif
3145
3146 if (REAL(__cxa_atexit)(&finalize, 0, 0)) {
3147 Printf(format: "ThreadSanitizer: failed to setup atexit callback\n");
3148 Die();
3149 }
3150 if (pthread_atfork(prepare: atfork_prepare, parent: atfork_parent, child: atfork_child)) {
3151 Printf(format: "ThreadSanitizer: failed to setup atfork callbacks\n");
3152 Die();
3153 }
3154
3155#if !SANITIZER_APPLE && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
3156 if (pthread_key_create(key: &interceptor_ctx()->finalize_key, destructor: &thread_finalize)) {
3157 Printf(format: "ThreadSanitizer: failed to create thread key\n");
3158 Die();
3159 }
3160#endif
3161
3162 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_init);
3163 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_destroy);
3164 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_signal);
3165 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_broadcast);
3166 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_wait);
3167 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_init);
3168 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_destroy);
3169 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_lock);
3170 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_trylock);
3171 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_unlock);
3172 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_init);
3173 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_destroy);
3174 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_rdlock);
3175 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_tryrdlock);
3176 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_wrlock);
3177 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_trywrlock);
3178 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_unlock);
3179 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(once);
3180 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(sigmask);
3181
3182 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_init);
3183 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_signal);
3184 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_broadcast);
3185 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_wait);
3186 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_destroy);
3187 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_init);
3188 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_destroy);
3189 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_lock);
3190 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_trylock);
3191 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_unlock);
3192 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_init);
3193 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_destroy);
3194 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_rdlock);
3195 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_tryrdlock);
3196 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_wrlock);
3197 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_trywrlock);
3198 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_unlock);
3199 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(once);
3200 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(sigsetmask);
3201
3202 FdInit();
3203}
3204
3205} // namespace __tsan
3206
3207// Invisible barrier for tests.
3208// There were several unsuccessful iterations for this functionality:
3209// 1. Initially it was implemented in user code using
3210// REAL(pthread_barrier_wait). But pthread_barrier_wait is not supported on
3211// MacOS. Futexes are linux-specific for this matter.
3212// 2. Then we switched to atomics+usleep(10). But usleep produced parasitic
3213// "as-if synchronized via sleep" messages in reports which failed some
3214// output tests.
3215// 3. Then we switched to atomics+sched_yield. But this produced tons of tsan-
3216// visible events, which lead to "failed to restore stack trace" failures.
3217// Note that no_sanitize_thread attribute does not turn off atomic interception
3218// so attaching it to the function defined in user code does not help.
3219// That's why we now have what we have.
3220constexpr u32 kBarrierThreadBits = 10;
3221constexpr u32 kBarrierThreads = 1 << kBarrierThreadBits;
3222
3223extern "C" {
3224
3225SANITIZER_INTERFACE_ATTRIBUTE void __tsan_testonly_barrier_init(
3226 atomic_uint32_t *barrier, u32 num_threads) {
3227 if (num_threads >= kBarrierThreads) {
3228 Printf(format: "barrier_init: count is too large (%d)\n", num_threads);
3229 Die();
3230 }
3231 // kBarrierThreadBits lsb is thread count,
3232 // the remaining are count of entered threads.
3233 atomic_store(a: barrier, v: num_threads, mo: memory_order_relaxed);
3234}
3235
3236static u32 barrier_epoch(u32 value) {
3237 return (value >> kBarrierThreadBits) / (value & (kBarrierThreads - 1));
3238}
3239
3240SANITIZER_INTERFACE_ATTRIBUTE void __tsan_testonly_barrier_wait(
3241 atomic_uint32_t *barrier) {
3242 u32 old = atomic_fetch_add(a: barrier, v: kBarrierThreads, mo: memory_order_relaxed);
3243 u32 old_epoch = barrier_epoch(value: old);
3244 if (barrier_epoch(value: old + kBarrierThreads) != old_epoch) {
3245 FutexWake(p: barrier, count: (1 << 30));
3246 return;
3247 }
3248 for (;;) {
3249 u32 cur = atomic_load(a: barrier, mo: memory_order_relaxed);
3250 if (barrier_epoch(value: cur) != old_epoch)
3251 return;
3252 FutexWait(p: barrier, cmp: cur);
3253 }
3254}
3255
3256} // extern "C"
3257