1//===-- tsan_interface_ann.cpp --------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of ThreadSanitizer (TSan), a race detector.
10//
11//===----------------------------------------------------------------------===//
12#include "tsan_interface_ann.h"
13
14#include "sanitizer_common/sanitizer_internal_defs.h"
15#include "sanitizer_common/sanitizer_libc.h"
16#include "sanitizer_common/sanitizer_placement_new.h"
17#include "sanitizer_common/sanitizer_stacktrace.h"
18#include "sanitizer_common/sanitizer_vector.h"
19#include "tsan_adaptive_delay.h"
20#include "tsan_flags.h"
21#include "tsan_mman.h"
22#include "tsan_platform.h"
23#include "tsan_report.h"
24#include "tsan_rtl.h"
25
26#define CALLERPC ((uptr)__builtin_return_address(0))
27
28using namespace __tsan;
29
30namespace __tsan {
31
32class ScopedAnnotation {
33 public:
34 ScopedAnnotation(ThreadState *thr, const char *aname, uptr pc)
35 : thr_(thr) {
36 FuncEntry(thr: thr_, pc);
37 DPrintf("#%d: annotation %s()\n", thr_->tid, aname);
38 }
39
40 ~ScopedAnnotation() {
41 FuncExit(thr: thr_);
42 CheckedMutex::CheckNoLocks();
43 }
44 private:
45 ThreadState *const thr_;
46};
47
48#define SCOPED_ANNOTATION_RET(typ, ret) \
49 if (!flags()->enable_annotations) \
50 return ret; \
51 ThreadState *thr = cur_thread(); \
52 const uptr caller_pc = (uptr)__builtin_return_address(0); \
53 ScopedAnnotation sa(thr, __func__, caller_pc); \
54 const uptr pc = StackTrace::GetCurrentPc(); \
55 (void)pc;
56
57#define SCOPED_ANNOTATION(typ) SCOPED_ANNOTATION_RET(typ, )
58
59static const int kMaxDescLen = 128;
60
61struct ExpectRace {
62 ExpectRace *next;
63 ExpectRace *prev;
64 atomic_uintptr_t hitcount;
65 atomic_uintptr_t addcount;
66 uptr addr;
67 uptr size;
68 char *file;
69 int line;
70 char desc[kMaxDescLen];
71};
72
73struct DynamicAnnContext {
74 Mutex mtx;
75 ExpectRace benign;
76
77 DynamicAnnContext() : mtx(MutexTypeAnnotations) {}
78};
79
80static DynamicAnnContext *dyn_ann_ctx;
81alignas(64) static char dyn_ann_ctx_placeholder[sizeof(DynamicAnnContext)];
82
83static void AddExpectRace(ExpectRace *list,
84 char *f, int l, uptr addr, uptr size, char *desc) {
85 ExpectRace *race = list->next;
86 for (; race != list; race = race->next) {
87 if (race->addr == addr && race->size == size) {
88 atomic_store_relaxed(a: &race->addcount,
89 v: atomic_load_relaxed(a: &race->addcount) + 1);
90 return;
91 }
92 }
93 race = static_cast<ExpectRace *>(Alloc(sz: sizeof(ExpectRace)));
94 race->addr = addr;
95 race->size = size;
96 race->file = f;
97 race->line = l;
98 race->desc[0] = 0;
99 atomic_store_relaxed(a: &race->hitcount, v: 0);
100 atomic_store_relaxed(a: &race->addcount, v: 1);
101 if (desc) {
102 int i = 0;
103 for (; i < kMaxDescLen - 1 && desc[i]; i++)
104 race->desc[i] = desc[i];
105 race->desc[i] = 0;
106 }
107 race->prev = list;
108 race->next = list->next;
109 race->next->prev = race;
110 list->next = race;
111}
112
113static ExpectRace *FindRace(ExpectRace *list, uptr addr, uptr size) {
114 for (ExpectRace *race = list->next; race != list; race = race->next) {
115 uptr maxbegin = max(a: race->addr, b: addr);
116 uptr minend = min(a: race->addr + race->size, b: addr + size);
117 if (maxbegin < minend)
118 return race;
119 }
120 return 0;
121}
122
123static bool CheckContains(ExpectRace *list, uptr addr, uptr size) {
124 ExpectRace *race = FindRace(list, addr, size);
125 if (race == 0)
126 return false;
127 DPrintf("Hit expected/benign race: %s addr=%zx:%d %s:%d\n",
128 race->desc, race->addr, (int)race->size, race->file, race->line);
129 atomic_fetch_add(a: &race->hitcount, v: 1, mo: memory_order_relaxed);
130 return true;
131}
132
133static void InitList(ExpectRace *list) {
134 list->next = list;
135 list->prev = list;
136}
137
138void InitializeDynamicAnnotations() {
139 dyn_ann_ctx = new(dyn_ann_ctx_placeholder) DynamicAnnContext;
140 InitList(list: &dyn_ann_ctx->benign);
141}
142
143bool IsExpectedReport(uptr addr, uptr size) {
144 ReadLock lock(&dyn_ann_ctx->mtx);
145 return CheckContains(list: &dyn_ann_ctx->benign, addr, size);
146}
147} // namespace __tsan
148
149using namespace __tsan;
150
151extern "C" {
152void INTERFACE_ATTRIBUTE AnnotateHappensBefore(char *f, int l, uptr addr) {
153 SCOPED_ANNOTATION(AnnotateHappensBefore);
154 Release(thr, pc, addr);
155}
156
157void INTERFACE_ATTRIBUTE AnnotateHappensAfter(char *f, int l, uptr addr) {
158 SCOPED_ANNOTATION(AnnotateHappensAfter);
159 Acquire(thr, pc, addr);
160}
161
162void INTERFACE_ATTRIBUTE AnnotateCondVarSignal(char *f, int l, uptr cv) {
163}
164
165void INTERFACE_ATTRIBUTE AnnotateCondVarSignalAll(char *f, int l, uptr cv) {
166}
167
168void INTERFACE_ATTRIBUTE AnnotateMutexIsNotPHB(char *f, int l, uptr mu) {
169}
170
171void INTERFACE_ATTRIBUTE AnnotateCondVarWait(char *f, int l, uptr cv,
172 uptr lock) {
173}
174
175void INTERFACE_ATTRIBUTE AnnotateRWLockCreate(char *f, int l, uptr m) {
176 SCOPED_ANNOTATION(AnnotateRWLockCreate);
177 MutexCreate(thr, pc, addr: m, flagz: MutexFlagWriteReentrant);
178}
179
180void INTERFACE_ATTRIBUTE AnnotateRWLockCreateStatic(char *f, int l, uptr m) {
181 SCOPED_ANNOTATION(AnnotateRWLockCreateStatic);
182 MutexCreate(thr, pc, addr: m, flagz: MutexFlagWriteReentrant | MutexFlagLinkerInit);
183}
184
185void INTERFACE_ATTRIBUTE AnnotateRWLockDestroy(char *f, int l, uptr m) {
186 SCOPED_ANNOTATION(AnnotateRWLockDestroy);
187 MutexDestroy(thr, pc, addr: m);
188}
189
190void INTERFACE_ATTRIBUTE AnnotateRWLockAcquired(char *f, int l, uptr m,
191 uptr is_w) {
192 SCOPED_ANNOTATION(AnnotateRWLockAcquired);
193 if (is_w)
194 MutexPostLock(thr, pc, addr: m, flagz: MutexFlagDoPreLockOnPostLock);
195 else
196 MutexPostReadLock(thr, pc, addr: m, flagz: MutexFlagDoPreLockOnPostLock);
197}
198
199void INTERFACE_ATTRIBUTE AnnotateRWLockReleased(char *f, int l, uptr m,
200 uptr is_w) {
201 SCOPED_ANNOTATION(AnnotateRWLockReleased);
202 if (is_w)
203 MutexUnlock(thr, pc, addr: m);
204 else
205 MutexReadUnlock(thr, pc, addr: m);
206}
207
208void INTERFACE_ATTRIBUTE AnnotateTraceMemory(char *f, int l, uptr mem) {
209}
210
211void INTERFACE_ATTRIBUTE AnnotateFlushState(char *f, int l) {
212}
213
214void INTERFACE_ATTRIBUTE AnnotateNewMemory(char *f, int l, uptr mem,
215 uptr size) {
216}
217
218void INTERFACE_ATTRIBUTE AnnotateNoOp(char *f, int l, uptr mem) {
219}
220
221void INTERFACE_ATTRIBUTE AnnotateFlushExpectedRaces(char *f, int l) {
222}
223
224void INTERFACE_ATTRIBUTE AnnotateEnableRaceDetection(
225 char *f, int l, int enable) {
226}
227
228void INTERFACE_ATTRIBUTE AnnotateMutexIsUsedAsCondVar(
229 char *f, int l, uptr mu) {
230}
231
232void INTERFACE_ATTRIBUTE AnnotatePCQGet(
233 char *f, int l, uptr pcq) {
234}
235
236void INTERFACE_ATTRIBUTE AnnotatePCQPut(
237 char *f, int l, uptr pcq) {
238}
239
240void INTERFACE_ATTRIBUTE AnnotatePCQDestroy(
241 char *f, int l, uptr pcq) {
242}
243
244void INTERFACE_ATTRIBUTE AnnotatePCQCreate(
245 char *f, int l, uptr pcq) {
246}
247
248void INTERFACE_ATTRIBUTE AnnotateExpectRace(
249 char *f, int l, uptr mem, char *desc) {
250}
251
252static void BenignRaceImpl(char *f, int l, uptr mem, uptr size, char *desc) {
253 Lock lock(&dyn_ann_ctx->mtx);
254 AddExpectRace(list: &dyn_ann_ctx->benign,
255 f, l, addr: mem, size, desc);
256 DPrintf("Add benign race: %s addr=%zx %s:%d\n", desc, mem, f, l);
257}
258
259void INTERFACE_ATTRIBUTE AnnotateBenignRaceSized(
260 char *f, int l, uptr mem, uptr size, char *desc) {
261 SCOPED_ANNOTATION(AnnotateBenignRaceSized);
262 BenignRaceImpl(f, l, mem, size, desc);
263}
264
265void INTERFACE_ATTRIBUTE AnnotateBenignRace(
266 char *f, int l, uptr mem, char *desc) {
267 SCOPED_ANNOTATION(AnnotateBenignRace);
268 BenignRaceImpl(f, l, mem, size: 1, desc);
269}
270
271void INTERFACE_ATTRIBUTE AnnotateIgnoreReadsBegin(char *f, int l) {
272 SCOPED_ANNOTATION(AnnotateIgnoreReadsBegin);
273 ThreadIgnoreBegin(thr, pc);
274}
275
276void INTERFACE_ATTRIBUTE AnnotateIgnoreReadsEnd(char *f, int l) {
277 SCOPED_ANNOTATION(AnnotateIgnoreReadsEnd);
278 ThreadIgnoreEnd(thr);
279}
280
281void INTERFACE_ATTRIBUTE AnnotateIgnoreWritesBegin(char *f, int l) {
282 SCOPED_ANNOTATION(AnnotateIgnoreWritesBegin);
283 ThreadIgnoreBegin(thr, pc);
284}
285
286void INTERFACE_ATTRIBUTE AnnotateIgnoreWritesEnd(char *f, int l) {
287 SCOPED_ANNOTATION(AnnotateIgnoreWritesEnd);
288 ThreadIgnoreEnd(thr);
289}
290
291void INTERFACE_ATTRIBUTE AnnotateIgnoreSyncBegin(char *f, int l) {
292 SCOPED_ANNOTATION(AnnotateIgnoreSyncBegin);
293 ThreadIgnoreSyncBegin(thr, pc);
294}
295
296void INTERFACE_ATTRIBUTE AnnotateIgnoreSyncEnd(char *f, int l) {
297 SCOPED_ANNOTATION(AnnotateIgnoreSyncEnd);
298 ThreadIgnoreSyncEnd(thr);
299}
300
301void INTERFACE_ATTRIBUTE AnnotatePublishMemoryRange(
302 char *f, int l, uptr addr, uptr size) {
303}
304
305void INTERFACE_ATTRIBUTE AnnotateUnpublishMemoryRange(
306 char *f, int l, uptr addr, uptr size) {
307}
308
309void INTERFACE_ATTRIBUTE AnnotateThreadName(
310 char *f, int l, char *name) {
311 SCOPED_ANNOTATION(AnnotateThreadName);
312 ThreadSetName(thr, name);
313}
314
315// We deliberately omit the implementation of WTFAnnotateHappensBefore() and
316// WTFAnnotateHappensAfter(). Those are being used by Webkit to annotate
317// atomic operations, which should be handled by ThreadSanitizer correctly.
318void INTERFACE_ATTRIBUTE WTFAnnotateHappensBefore(char *f, int l, uptr addr) {
319}
320
321void INTERFACE_ATTRIBUTE WTFAnnotateHappensAfter(char *f, int l, uptr addr) {
322}
323
324void INTERFACE_ATTRIBUTE WTFAnnotateBenignRaceSized(
325 char *f, int l, uptr mem, uptr sz, char *desc) {
326 SCOPED_ANNOTATION(AnnotateBenignRaceSized);
327 BenignRaceImpl(f, l, mem, size: sz, desc);
328}
329
330int INTERFACE_ATTRIBUTE RunningOnValgrind() {
331 return flags()->running_on_valgrind;
332}
333
334double __attribute__((weak)) INTERFACE_ATTRIBUTE ValgrindSlowdown(void) {
335 return 10.0;
336}
337
338const char INTERFACE_ATTRIBUTE* ThreadSanitizerQuery(const char *query) {
339 if (internal_strcmp(s1: query, s2: "pure_happens_before") == 0)
340 return "1";
341 else
342 return "0";
343}
344
345void INTERFACE_ATTRIBUTE
346AnnotateMemoryIsInitialized(char *f, int l, uptr mem, uptr sz) {}
347void INTERFACE_ATTRIBUTE
348AnnotateMemoryIsUninitialized(char *f, int l, uptr mem, uptr sz) {}
349
350// Note: the parameter is called flagz, because flags is already taken
351// by the global function that returns flags.
352INTERFACE_ATTRIBUTE
353void __tsan_mutex_create(void *m, unsigned flagz) {
354 SCOPED_ANNOTATION(__tsan_mutex_create);
355 MutexCreate(thr, pc, addr: (uptr)m, flagz: flagz & MutexCreationFlagMask);
356}
357
358INTERFACE_ATTRIBUTE
359void __tsan_mutex_destroy(void *m, unsigned flagz) {
360 SCOPED_ANNOTATION(__tsan_mutex_destroy);
361 MutexDestroy(thr, pc, addr: (uptr)m, flagz);
362}
363
364INTERFACE_ATTRIBUTE
365void __tsan_mutex_pre_lock(void *m, unsigned flagz) {
366 SCOPED_ANNOTATION(__tsan_mutex_pre_lock);
367 if (!(flagz & MutexFlagTryLock)) {
368 if (flagz & MutexFlagReadLock)
369 MutexPreReadLock(thr, pc, addr: (uptr)m);
370 else
371 MutexPreLock(thr, pc, addr: (uptr)m);
372 }
373 ThreadIgnoreBegin(thr, pc: 0);
374 ThreadIgnoreSyncBegin(thr, pc: 0);
375 AdaptiveDelay::SyncOp();
376}
377
378INTERFACE_ATTRIBUTE
379void __tsan_mutex_post_lock(void *m, unsigned flagz, int rec) {
380 SCOPED_ANNOTATION(__tsan_mutex_post_lock);
381 ThreadIgnoreSyncEnd(thr);
382 ThreadIgnoreEnd(thr);
383 if (!(flagz & MutexFlagTryLockFailed)) {
384 if (flagz & MutexFlagReadLock)
385 MutexPostReadLock(thr, pc, addr: (uptr)m, flagz);
386 else
387 MutexPostLock(thr, pc, addr: (uptr)m, flagz, rec);
388 }
389}
390
391INTERFACE_ATTRIBUTE
392int __tsan_mutex_pre_unlock(void *m, unsigned flagz) {
393 SCOPED_ANNOTATION_RET(__tsan_mutex_pre_unlock, 0);
394 int ret = 0;
395 if (flagz & MutexFlagReadLock) {
396 CHECK(!(flagz & MutexFlagRecursiveUnlock));
397 MutexReadUnlock(thr, pc, addr: (uptr)m);
398 } else {
399 ret = MutexUnlock(thr, pc, addr: (uptr)m, flagz);
400 }
401 ThreadIgnoreBegin(thr, pc: 0);
402 ThreadIgnoreSyncBegin(thr, pc: 0);
403 return ret;
404}
405
406INTERFACE_ATTRIBUTE
407void __tsan_mutex_post_unlock(void *m, unsigned flagz) {
408 AdaptiveDelay::SyncOp();
409 SCOPED_ANNOTATION(__tsan_mutex_post_unlock);
410 ThreadIgnoreSyncEnd(thr);
411 ThreadIgnoreEnd(thr);
412}
413
414INTERFACE_ATTRIBUTE
415void __tsan_mutex_pre_signal(void *addr, unsigned flagz) {
416 SCOPED_ANNOTATION(__tsan_mutex_pre_signal);
417 ThreadIgnoreBegin(thr, pc: 0);
418 ThreadIgnoreSyncBegin(thr, pc: 0);
419}
420
421INTERFACE_ATTRIBUTE
422void __tsan_mutex_post_signal(void *addr, unsigned flagz) {
423 SCOPED_ANNOTATION(__tsan_mutex_post_signal);
424 ThreadIgnoreSyncEnd(thr);
425 ThreadIgnoreEnd(thr);
426}
427
428INTERFACE_ATTRIBUTE
429void __tsan_mutex_pre_divert(void *addr, unsigned flagz) {
430 SCOPED_ANNOTATION(__tsan_mutex_pre_divert);
431 // Exit from ignore region started in __tsan_mutex_pre_lock/unlock/signal.
432 ThreadIgnoreSyncEnd(thr);
433 ThreadIgnoreEnd(thr);
434}
435
436INTERFACE_ATTRIBUTE
437void __tsan_mutex_post_divert(void *addr, unsigned flagz) {
438 SCOPED_ANNOTATION(__tsan_mutex_post_divert);
439 ThreadIgnoreBegin(thr, pc: 0);
440 ThreadIgnoreSyncBegin(thr, pc: 0);
441}
442
443static void ReportMutexHeldWrongContext(ThreadState *thr, uptr pc) {
444 // Use alloca, because malloc during signal handling deadlocks
445 ScopedReport *rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport));
446 // Take a new scope as Apple platforms require the below locks released
447 // before symbolizing in order to avoid a deadlock
448 {
449 ThreadRegistryLock l(&ctx->thread_registry);
450 new (rep) ScopedReport(ReportTypeMutexHeldWrongContext);
451 for (uptr i = 0; i < thr->mset.Size(); ++i) {
452 MutexSet::Desc desc = thr->mset.Get(i);
453 rep->AddMutex(addr: desc.addr, creation_stack_id: desc.stack_id);
454 }
455 VarSizeStackTrace trace;
456 ObtainCurrentStack(thr, toppc: pc, stack: &trace);
457 rep->AddStack(stack: trace, suppressable: true);
458#if SANITIZER_APPLE
459 } // Close this scope to release the locks
460#endif
461 OutputReport(thr, srep&: *rep);
462
463 // Need to manually destroy this because we used placement new to allocate
464 rep->~ScopedReport();
465#if !SANITIZER_APPLE
466 }
467#endif
468}
469
470INTERFACE_ATTRIBUTE
471void __tsan_check_no_mutexes_held() {
472 SCOPED_ANNOTATION(__tsan_check_no_mutexes_held);
473 if (thr->mset.Size() == 0) {
474 return;
475 }
476 ReportMutexHeldWrongContext(thr, pc);
477}
478} // extern "C"
479