1//===-- tsan_interface_ann.cpp --------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of ThreadSanitizer (TSan), a race detector.
10//
11//===----------------------------------------------------------------------===//
12#include "sanitizer_common/sanitizer_libc.h"
13#include "sanitizer_common/sanitizer_internal_defs.h"
14#include "sanitizer_common/sanitizer_placement_new.h"
15#include "sanitizer_common/sanitizer_stacktrace.h"
16#include "sanitizer_common/sanitizer_vector.h"
17#include "tsan_interface_ann.h"
18#include "tsan_report.h"
19#include "tsan_rtl.h"
20#include "tsan_mman.h"
21#include "tsan_flags.h"
22#include "tsan_platform.h"
23
24#define CALLERPC ((uptr)__builtin_return_address(0))
25
26using namespace __tsan;
27
28namespace __tsan {
29
30class ScopedAnnotation {
31 public:
32 ScopedAnnotation(ThreadState *thr, const char *aname, uptr pc)
33 : thr_(thr) {
34 FuncEntry(thr: thr_, pc);
35 DPrintf("#%d: annotation %s()\n", thr_->tid, aname);
36 }
37
38 ~ScopedAnnotation() {
39 FuncExit(thr: thr_);
40 CheckedMutex::CheckNoLocks();
41 }
42 private:
43 ThreadState *const thr_;
44};
45
46#define SCOPED_ANNOTATION_RET(typ, ret) \
47 if (!flags()->enable_annotations) \
48 return ret; \
49 ThreadState *thr = cur_thread(); \
50 const uptr caller_pc = (uptr)__builtin_return_address(0); \
51 ScopedAnnotation sa(thr, __func__, caller_pc); \
52 const uptr pc = StackTrace::GetCurrentPc(); \
53 (void)pc;
54
55#define SCOPED_ANNOTATION(typ) SCOPED_ANNOTATION_RET(typ, )
56
57static const int kMaxDescLen = 128;
58
59struct ExpectRace {
60 ExpectRace *next;
61 ExpectRace *prev;
62 atomic_uintptr_t hitcount;
63 atomic_uintptr_t addcount;
64 uptr addr;
65 uptr size;
66 char *file;
67 int line;
68 char desc[kMaxDescLen];
69};
70
71struct DynamicAnnContext {
72 Mutex mtx;
73 ExpectRace benign;
74
75 DynamicAnnContext() : mtx(MutexTypeAnnotations) {}
76};
77
78static DynamicAnnContext *dyn_ann_ctx;
79alignas(64) static char dyn_ann_ctx_placeholder[sizeof(DynamicAnnContext)];
80
81static void AddExpectRace(ExpectRace *list,
82 char *f, int l, uptr addr, uptr size, char *desc) {
83 ExpectRace *race = list->next;
84 for (; race != list; race = race->next) {
85 if (race->addr == addr && race->size == size) {
86 atomic_store_relaxed(a: &race->addcount,
87 v: atomic_load_relaxed(a: &race->addcount) + 1);
88 return;
89 }
90 }
91 race = static_cast<ExpectRace *>(Alloc(sz: sizeof(ExpectRace)));
92 race->addr = addr;
93 race->size = size;
94 race->file = f;
95 race->line = l;
96 race->desc[0] = 0;
97 atomic_store_relaxed(a: &race->hitcount, v: 0);
98 atomic_store_relaxed(a: &race->addcount, v: 1);
99 if (desc) {
100 int i = 0;
101 for (; i < kMaxDescLen - 1 && desc[i]; i++)
102 race->desc[i] = desc[i];
103 race->desc[i] = 0;
104 }
105 race->prev = list;
106 race->next = list->next;
107 race->next->prev = race;
108 list->next = race;
109}
110
111static ExpectRace *FindRace(ExpectRace *list, uptr addr, uptr size) {
112 for (ExpectRace *race = list->next; race != list; race = race->next) {
113 uptr maxbegin = max(a: race->addr, b: addr);
114 uptr minend = min(a: race->addr + race->size, b: addr + size);
115 if (maxbegin < minend)
116 return race;
117 }
118 return 0;
119}
120
121static bool CheckContains(ExpectRace *list, uptr addr, uptr size) {
122 ExpectRace *race = FindRace(list, addr, size);
123 if (race == 0)
124 return false;
125 DPrintf("Hit expected/benign race: %s addr=%zx:%d %s:%d\n",
126 race->desc, race->addr, (int)race->size, race->file, race->line);
127 atomic_fetch_add(a: &race->hitcount, v: 1, mo: memory_order_relaxed);
128 return true;
129}
130
131static void InitList(ExpectRace *list) {
132 list->next = list;
133 list->prev = list;
134}
135
136void InitializeDynamicAnnotations() {
137 dyn_ann_ctx = new(dyn_ann_ctx_placeholder) DynamicAnnContext;
138 InitList(list: &dyn_ann_ctx->benign);
139}
140
141bool IsExpectedReport(uptr addr, uptr size) {
142 ReadLock lock(&dyn_ann_ctx->mtx);
143 return CheckContains(list: &dyn_ann_ctx->benign, addr, size);
144}
145} // namespace __tsan
146
147using namespace __tsan;
148
149extern "C" {
150void INTERFACE_ATTRIBUTE AnnotateHappensBefore(char *f, int l, uptr addr) {
151 SCOPED_ANNOTATION(AnnotateHappensBefore);
152 Release(thr, pc, addr);
153}
154
155void INTERFACE_ATTRIBUTE AnnotateHappensAfter(char *f, int l, uptr addr) {
156 SCOPED_ANNOTATION(AnnotateHappensAfter);
157 Acquire(thr, pc, addr);
158}
159
160void INTERFACE_ATTRIBUTE AnnotateCondVarSignal(char *f, int l, uptr cv) {
161}
162
163void INTERFACE_ATTRIBUTE AnnotateCondVarSignalAll(char *f, int l, uptr cv) {
164}
165
166void INTERFACE_ATTRIBUTE AnnotateMutexIsNotPHB(char *f, int l, uptr mu) {
167}
168
169void INTERFACE_ATTRIBUTE AnnotateCondVarWait(char *f, int l, uptr cv,
170 uptr lock) {
171}
172
173void INTERFACE_ATTRIBUTE AnnotateRWLockCreate(char *f, int l, uptr m) {
174 SCOPED_ANNOTATION(AnnotateRWLockCreate);
175 MutexCreate(thr, pc, addr: m, flagz: MutexFlagWriteReentrant);
176}
177
178void INTERFACE_ATTRIBUTE AnnotateRWLockCreateStatic(char *f, int l, uptr m) {
179 SCOPED_ANNOTATION(AnnotateRWLockCreateStatic);
180 MutexCreate(thr, pc, addr: m, flagz: MutexFlagWriteReentrant | MutexFlagLinkerInit);
181}
182
183void INTERFACE_ATTRIBUTE AnnotateRWLockDestroy(char *f, int l, uptr m) {
184 SCOPED_ANNOTATION(AnnotateRWLockDestroy);
185 MutexDestroy(thr, pc, addr: m);
186}
187
188void INTERFACE_ATTRIBUTE AnnotateRWLockAcquired(char *f, int l, uptr m,
189 uptr is_w) {
190 SCOPED_ANNOTATION(AnnotateRWLockAcquired);
191 if (is_w)
192 MutexPostLock(thr, pc, addr: m, flagz: MutexFlagDoPreLockOnPostLock);
193 else
194 MutexPostReadLock(thr, pc, addr: m, flagz: MutexFlagDoPreLockOnPostLock);
195}
196
197void INTERFACE_ATTRIBUTE AnnotateRWLockReleased(char *f, int l, uptr m,
198 uptr is_w) {
199 SCOPED_ANNOTATION(AnnotateRWLockReleased);
200 if (is_w)
201 MutexUnlock(thr, pc, addr: m);
202 else
203 MutexReadUnlock(thr, pc, addr: m);
204}
205
206void INTERFACE_ATTRIBUTE AnnotateTraceMemory(char *f, int l, uptr mem) {
207}
208
209void INTERFACE_ATTRIBUTE AnnotateFlushState(char *f, int l) {
210}
211
212void INTERFACE_ATTRIBUTE AnnotateNewMemory(char *f, int l, uptr mem,
213 uptr size) {
214}
215
216void INTERFACE_ATTRIBUTE AnnotateNoOp(char *f, int l, uptr mem) {
217}
218
219void INTERFACE_ATTRIBUTE AnnotateFlushExpectedRaces(char *f, int l) {
220}
221
222void INTERFACE_ATTRIBUTE AnnotateEnableRaceDetection(
223 char *f, int l, int enable) {
224}
225
226void INTERFACE_ATTRIBUTE AnnotateMutexIsUsedAsCondVar(
227 char *f, int l, uptr mu) {
228}
229
230void INTERFACE_ATTRIBUTE AnnotatePCQGet(
231 char *f, int l, uptr pcq) {
232}
233
234void INTERFACE_ATTRIBUTE AnnotatePCQPut(
235 char *f, int l, uptr pcq) {
236}
237
238void INTERFACE_ATTRIBUTE AnnotatePCQDestroy(
239 char *f, int l, uptr pcq) {
240}
241
242void INTERFACE_ATTRIBUTE AnnotatePCQCreate(
243 char *f, int l, uptr pcq) {
244}
245
246void INTERFACE_ATTRIBUTE AnnotateExpectRace(
247 char *f, int l, uptr mem, char *desc) {
248}
249
250static void BenignRaceImpl(char *f, int l, uptr mem, uptr size, char *desc) {
251 Lock lock(&dyn_ann_ctx->mtx);
252 AddExpectRace(list: &dyn_ann_ctx->benign,
253 f, l, addr: mem, size, desc);
254 DPrintf("Add benign race: %s addr=%zx %s:%d\n", desc, mem, f, l);
255}
256
257void INTERFACE_ATTRIBUTE AnnotateBenignRaceSized(
258 char *f, int l, uptr mem, uptr size, char *desc) {
259 SCOPED_ANNOTATION(AnnotateBenignRaceSized);
260 BenignRaceImpl(f, l, mem, size, desc);
261}
262
263void INTERFACE_ATTRIBUTE AnnotateBenignRace(
264 char *f, int l, uptr mem, char *desc) {
265 SCOPED_ANNOTATION(AnnotateBenignRace);
266 BenignRaceImpl(f, l, mem, size: 1, desc);
267}
268
269void INTERFACE_ATTRIBUTE AnnotateIgnoreReadsBegin(char *f, int l) {
270 SCOPED_ANNOTATION(AnnotateIgnoreReadsBegin);
271 ThreadIgnoreBegin(thr, pc);
272}
273
274void INTERFACE_ATTRIBUTE AnnotateIgnoreReadsEnd(char *f, int l) {
275 SCOPED_ANNOTATION(AnnotateIgnoreReadsEnd);
276 ThreadIgnoreEnd(thr);
277}
278
279void INTERFACE_ATTRIBUTE AnnotateIgnoreWritesBegin(char *f, int l) {
280 SCOPED_ANNOTATION(AnnotateIgnoreWritesBegin);
281 ThreadIgnoreBegin(thr, pc);
282}
283
284void INTERFACE_ATTRIBUTE AnnotateIgnoreWritesEnd(char *f, int l) {
285 SCOPED_ANNOTATION(AnnotateIgnoreWritesEnd);
286 ThreadIgnoreEnd(thr);
287}
288
289void INTERFACE_ATTRIBUTE AnnotateIgnoreSyncBegin(char *f, int l) {
290 SCOPED_ANNOTATION(AnnotateIgnoreSyncBegin);
291 ThreadIgnoreSyncBegin(thr, pc);
292}
293
294void INTERFACE_ATTRIBUTE AnnotateIgnoreSyncEnd(char *f, int l) {
295 SCOPED_ANNOTATION(AnnotateIgnoreSyncEnd);
296 ThreadIgnoreSyncEnd(thr);
297}
298
299void INTERFACE_ATTRIBUTE AnnotatePublishMemoryRange(
300 char *f, int l, uptr addr, uptr size) {
301}
302
303void INTERFACE_ATTRIBUTE AnnotateUnpublishMemoryRange(
304 char *f, int l, uptr addr, uptr size) {
305}
306
307void INTERFACE_ATTRIBUTE AnnotateThreadName(
308 char *f, int l, char *name) {
309 SCOPED_ANNOTATION(AnnotateThreadName);
310 ThreadSetName(thr, name);
311}
312
313// We deliberately omit the implementation of WTFAnnotateHappensBefore() and
314// WTFAnnotateHappensAfter(). Those are being used by Webkit to annotate
315// atomic operations, which should be handled by ThreadSanitizer correctly.
316void INTERFACE_ATTRIBUTE WTFAnnotateHappensBefore(char *f, int l, uptr addr) {
317}
318
319void INTERFACE_ATTRIBUTE WTFAnnotateHappensAfter(char *f, int l, uptr addr) {
320}
321
322void INTERFACE_ATTRIBUTE WTFAnnotateBenignRaceSized(
323 char *f, int l, uptr mem, uptr sz, char *desc) {
324 SCOPED_ANNOTATION(AnnotateBenignRaceSized);
325 BenignRaceImpl(f, l, mem, size: sz, desc);
326}
327
328int INTERFACE_ATTRIBUTE RunningOnValgrind() {
329 return flags()->running_on_valgrind;
330}
331
332double __attribute__((weak)) INTERFACE_ATTRIBUTE ValgrindSlowdown(void) {
333 return 10.0;
334}
335
336const char INTERFACE_ATTRIBUTE* ThreadSanitizerQuery(const char *query) {
337 if (internal_strcmp(s1: query, s2: "pure_happens_before") == 0)
338 return "1";
339 else
340 return "0";
341}
342
343void INTERFACE_ATTRIBUTE
344AnnotateMemoryIsInitialized(char *f, int l, uptr mem, uptr sz) {}
345void INTERFACE_ATTRIBUTE
346AnnotateMemoryIsUninitialized(char *f, int l, uptr mem, uptr sz) {}
347
348// Note: the parameter is called flagz, because flags is already taken
349// by the global function that returns flags.
350INTERFACE_ATTRIBUTE
351void __tsan_mutex_create(void *m, unsigned flagz) {
352 SCOPED_ANNOTATION(__tsan_mutex_create);
353 MutexCreate(thr, pc, addr: (uptr)m, flagz: flagz & MutexCreationFlagMask);
354}
355
356INTERFACE_ATTRIBUTE
357void __tsan_mutex_destroy(void *m, unsigned flagz) {
358 SCOPED_ANNOTATION(__tsan_mutex_destroy);
359 MutexDestroy(thr, pc, addr: (uptr)m, flagz);
360}
361
362INTERFACE_ATTRIBUTE
363void __tsan_mutex_pre_lock(void *m, unsigned flagz) {
364 SCOPED_ANNOTATION(__tsan_mutex_pre_lock);
365 if (!(flagz & MutexFlagTryLock)) {
366 if (flagz & MutexFlagReadLock)
367 MutexPreReadLock(thr, pc, addr: (uptr)m);
368 else
369 MutexPreLock(thr, pc, addr: (uptr)m);
370 }
371 ThreadIgnoreBegin(thr, pc: 0);
372 ThreadIgnoreSyncBegin(thr, pc: 0);
373}
374
375INTERFACE_ATTRIBUTE
376void __tsan_mutex_post_lock(void *m, unsigned flagz, int rec) {
377 SCOPED_ANNOTATION(__tsan_mutex_post_lock);
378 ThreadIgnoreSyncEnd(thr);
379 ThreadIgnoreEnd(thr);
380 if (!(flagz & MutexFlagTryLockFailed)) {
381 if (flagz & MutexFlagReadLock)
382 MutexPostReadLock(thr, pc, addr: (uptr)m, flagz);
383 else
384 MutexPostLock(thr, pc, addr: (uptr)m, flagz, rec);
385 }
386}
387
388INTERFACE_ATTRIBUTE
389int __tsan_mutex_pre_unlock(void *m, unsigned flagz) {
390 SCOPED_ANNOTATION_RET(__tsan_mutex_pre_unlock, 0);
391 int ret = 0;
392 if (flagz & MutexFlagReadLock) {
393 CHECK(!(flagz & MutexFlagRecursiveUnlock));
394 MutexReadUnlock(thr, pc, addr: (uptr)m);
395 } else {
396 ret = MutexUnlock(thr, pc, addr: (uptr)m, flagz);
397 }
398 ThreadIgnoreBegin(thr, pc: 0);
399 ThreadIgnoreSyncBegin(thr, pc: 0);
400 return ret;
401}
402
403INTERFACE_ATTRIBUTE
404void __tsan_mutex_post_unlock(void *m, unsigned flagz) {
405 SCOPED_ANNOTATION(__tsan_mutex_post_unlock);
406 ThreadIgnoreSyncEnd(thr);
407 ThreadIgnoreEnd(thr);
408}
409
410INTERFACE_ATTRIBUTE
411void __tsan_mutex_pre_signal(void *addr, unsigned flagz) {
412 SCOPED_ANNOTATION(__tsan_mutex_pre_signal);
413 ThreadIgnoreBegin(thr, pc: 0);
414 ThreadIgnoreSyncBegin(thr, pc: 0);
415}
416
417INTERFACE_ATTRIBUTE
418void __tsan_mutex_post_signal(void *addr, unsigned flagz) {
419 SCOPED_ANNOTATION(__tsan_mutex_post_signal);
420 ThreadIgnoreSyncEnd(thr);
421 ThreadIgnoreEnd(thr);
422}
423
424INTERFACE_ATTRIBUTE
425void __tsan_mutex_pre_divert(void *addr, unsigned flagz) {
426 SCOPED_ANNOTATION(__tsan_mutex_pre_divert);
427 // Exit from ignore region started in __tsan_mutex_pre_lock/unlock/signal.
428 ThreadIgnoreSyncEnd(thr);
429 ThreadIgnoreEnd(thr);
430}
431
432INTERFACE_ATTRIBUTE
433void __tsan_mutex_post_divert(void *addr, unsigned flagz) {
434 SCOPED_ANNOTATION(__tsan_mutex_post_divert);
435 ThreadIgnoreBegin(thr, pc: 0);
436 ThreadIgnoreSyncBegin(thr, pc: 0);
437}
438
439static void ReportMutexHeldWrongContext(ThreadState *thr, uptr pc) {
440 ThreadRegistryLock l(&ctx->thread_registry);
441 ScopedReport rep(ReportTypeMutexHeldWrongContext);
442 for (uptr i = 0; i < thr->mset.Size(); ++i) {
443 MutexSet::Desc desc = thr->mset.Get(i);
444 rep.AddMutex(addr: desc.addr, creation_stack_id: desc.stack_id);
445 }
446 VarSizeStackTrace trace;
447 ObtainCurrentStack(thr, toppc: pc, stack: &trace);
448 rep.AddStack(stack: trace, suppressable: true);
449 OutputReport(thr, srep: rep);
450}
451
452INTERFACE_ATTRIBUTE
453void __tsan_check_no_mutexes_held() {
454 SCOPED_ANNOTATION(__tsan_check_no_mutexes_held);
455 if (thr->mset.Size() == 0) {
456 return;
457 }
458 ReportMutexHeldWrongContext(thr, pc);
459}
460} // extern "C"
461