1//===-- tsan_interface_atomic.cpp -----------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of ThreadSanitizer (TSan), a race detector.
10//
11//===----------------------------------------------------------------------===//
12
13// ThreadSanitizer atomic operations are based on C++11/C1x standards.
14// For background see C++11 standard. A slightly older, publicly
15// available draft of the standard (not entirely up-to-date, but close enough
16// for casual browsing) is available here:
17// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
18// The following page contains more background information:
19// http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
20
21#include "sanitizer_common/sanitizer_placement_new.h"
22#include "sanitizer_common/sanitizer_stacktrace.h"
23#include "sanitizer_common/sanitizer_mutex.h"
24#include "tsan_flags.h"
25#include "tsan_interface.h"
26#include "tsan_rtl.h"
27
28using namespace __tsan;
29
30#if !SANITIZER_GO && __TSAN_HAS_INT128
31// Protects emulation of 128-bit atomic operations.
32static StaticSpinMutex mutex128;
33#endif
34
35#if SANITIZER_DEBUG
36static bool IsLoadOrder(morder mo) {
37 return mo == mo_relaxed || mo == mo_consume
38 || mo == mo_acquire || mo == mo_seq_cst;
39}
40
41static bool IsStoreOrder(morder mo) {
42 return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst;
43}
44#endif
45
46static bool IsReleaseOrder(morder mo) {
47 return mo == mo_release || mo == mo_acq_rel || mo == mo_seq_cst;
48}
49
50static bool IsAcquireOrder(morder mo) {
51 return mo == mo_consume || mo == mo_acquire
52 || mo == mo_acq_rel || mo == mo_seq_cst;
53}
54
55static bool IsAcqRelOrder(morder mo) {
56 return mo == mo_acq_rel || mo == mo_seq_cst;
57}
58
59template<typename T> T func_xchg(volatile T *v, T op) {
60 T res = __sync_lock_test_and_set(v, op);
61 // __sync_lock_test_and_set does not contain full barrier.
62 __sync_synchronize();
63 return res;
64}
65
66template<typename T> T func_add(volatile T *v, T op) {
67 return __sync_fetch_and_add(v, op);
68}
69
70template<typename T> T func_sub(volatile T *v, T op) {
71 return __sync_fetch_and_sub(v, op);
72}
73
74template<typename T> T func_and(volatile T *v, T op) {
75 return __sync_fetch_and_and(v, op);
76}
77
78template<typename T> T func_or(volatile T *v, T op) {
79 return __sync_fetch_and_or(v, op);
80}
81
82template<typename T> T func_xor(volatile T *v, T op) {
83 return __sync_fetch_and_xor(v, op);
84}
85
86template<typename T> T func_nand(volatile T *v, T op) {
87 // clang does not support __sync_fetch_and_nand.
88 T cmp = *v;
89 for (;;) {
90 T newv = ~(cmp & op);
91 T cur = __sync_val_compare_and_swap(v, cmp, newv);
92 if (cmp == cur)
93 return cmp;
94 cmp = cur;
95 }
96}
97
98template<typename T> T func_cas(volatile T *v, T cmp, T xch) {
99 return __sync_val_compare_and_swap(v, cmp, xch);
100}
101
102// clang does not support 128-bit atomic ops.
103// Atomic ops are executed under tsan internal mutex,
104// here we assume that the atomic variables are not accessed
105// from non-instrumented code.
106#if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) && !SANITIZER_GO \
107 && __TSAN_HAS_INT128
108a128 func_xchg(volatile a128 *v, a128 op) {
109 SpinMutexLock lock(&mutex128);
110 a128 cmp = *v;
111 *v = op;
112 return cmp;
113}
114
115a128 func_add(volatile a128 *v, a128 op) {
116 SpinMutexLock lock(&mutex128);
117 a128 cmp = *v;
118 *v = cmp + op;
119 return cmp;
120}
121
122a128 func_sub(volatile a128 *v, a128 op) {
123 SpinMutexLock lock(&mutex128);
124 a128 cmp = *v;
125 *v = cmp - op;
126 return cmp;
127}
128
129a128 func_and(volatile a128 *v, a128 op) {
130 SpinMutexLock lock(&mutex128);
131 a128 cmp = *v;
132 *v = cmp & op;
133 return cmp;
134}
135
136a128 func_or(volatile a128 *v, a128 op) {
137 SpinMutexLock lock(&mutex128);
138 a128 cmp = *v;
139 *v = cmp | op;
140 return cmp;
141}
142
143a128 func_xor(volatile a128 *v, a128 op) {
144 SpinMutexLock lock(&mutex128);
145 a128 cmp = *v;
146 *v = cmp ^ op;
147 return cmp;
148}
149
150a128 func_nand(volatile a128 *v, a128 op) {
151 SpinMutexLock lock(&mutex128);
152 a128 cmp = *v;
153 *v = ~(cmp & op);
154 return cmp;
155}
156
157a128 func_cas(volatile a128 *v, a128 cmp, a128 xch) {
158 SpinMutexLock lock(&mutex128);
159 a128 cur = *v;
160 if (cur == cmp)
161 *v = xch;
162 return cur;
163}
164#endif
165
166template <typename T>
167static int AccessSize() {
168 if (sizeof(T) <= 1)
169 return 1;
170 else if (sizeof(T) <= 2)
171 return 2;
172 else if (sizeof(T) <= 4)
173 return 4;
174 else
175 return 8;
176 // For 16-byte atomics we also use 8-byte memory access,
177 // this leads to false negatives only in very obscure cases.
178}
179
180#if !SANITIZER_GO
181static atomic_uint8_t *to_atomic(const volatile a8 *a) {
182 return reinterpret_cast<atomic_uint8_t *>(const_cast<a8 *>(a));
183}
184
185static atomic_uint16_t *to_atomic(const volatile a16 *a) {
186 return reinterpret_cast<atomic_uint16_t *>(const_cast<a16 *>(a));
187}
188#endif
189
190static atomic_uint32_t *to_atomic(const volatile a32 *a) {
191 return reinterpret_cast<atomic_uint32_t *>(const_cast<a32 *>(a));
192}
193
194static atomic_uint64_t *to_atomic(const volatile a64 *a) {
195 return reinterpret_cast<atomic_uint64_t *>(const_cast<a64 *>(a));
196}
197
198static memory_order to_mo(morder mo) {
199 switch (mo) {
200 case mo_relaxed: return memory_order_relaxed;
201 case mo_consume: return memory_order_consume;
202 case mo_acquire: return memory_order_acquire;
203 case mo_release: return memory_order_release;
204 case mo_acq_rel: return memory_order_acq_rel;
205 case mo_seq_cst: return memory_order_seq_cst;
206 }
207 DCHECK(0);
208 return memory_order_seq_cst;
209}
210
211template<typename T>
212static T NoTsanAtomicLoad(const volatile T *a, morder mo) {
213 return atomic_load(to_atomic(a), to_mo(mo));
214}
215
216#if __TSAN_HAS_INT128 && !SANITIZER_GO
217static a128 NoTsanAtomicLoad(const volatile a128 *a, morder mo) {
218 SpinMutexLock lock(&mutex128);
219 return *a;
220}
221#endif
222
223template <typename T>
224static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a, morder mo) {
225 DCHECK(IsLoadOrder(mo));
226 // This fast-path is critical for performance.
227 // Assume the access is atomic.
228 if (!IsAcquireOrder(mo)) {
229 MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(),
230 kAccessRead | kAccessAtomic);
231 return NoTsanAtomicLoad(a, mo);
232 }
233 // Don't create sync object if it does not exist yet. For example, an atomic
234 // pointer is initialized to nullptr and then periodically acquire-loaded.
235 T v = NoTsanAtomicLoad(a, mo);
236 SyncVar *s = ctx->metamap.GetSyncIfExists(addr: (uptr)a);
237 if (s) {
238 SlotLocker locker(thr);
239 ReadLock lock(&s->mtx);
240 thr->clock.Acquire(src: s->clock);
241 // Re-read under sync mutex because we need a consistent snapshot
242 // of the value and the clock we acquire.
243 v = NoTsanAtomicLoad(a, mo);
244 }
245 MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessRead | kAccessAtomic);
246 return v;
247}
248
249template<typename T>
250static void NoTsanAtomicStore(volatile T *a, T v, morder mo) {
251 atomic_store(to_atomic(a), v, to_mo(mo));
252}
253
254#if __TSAN_HAS_INT128 && !SANITIZER_GO
255static void NoTsanAtomicStore(volatile a128 *a, a128 v, morder mo) {
256 SpinMutexLock lock(&mutex128);
257 *a = v;
258}
259#endif
260
261template <typename T>
262static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
263 morder mo) {
264 DCHECK(IsStoreOrder(mo));
265 MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessWrite | kAccessAtomic);
266 // This fast-path is critical for performance.
267 // Assume the access is atomic.
268 // Strictly saying even relaxed store cuts off release sequence,
269 // so must reset the clock.
270 if (!IsReleaseOrder(mo)) {
271 NoTsanAtomicStore(a, v, mo);
272 return;
273 }
274 SlotLocker locker(thr);
275 {
276 auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr: (uptr)a, save_stack: false);
277 Lock lock(&s->mtx);
278 thr->clock.ReleaseStore(dstp: &s->clock);
279 NoTsanAtomicStore(a, v, mo);
280 }
281 IncrementEpoch(thr);
282}
283
284template <typename T, T (*F)(volatile T *v, T op)>
285static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
286 MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessWrite | kAccessAtomic);
287 if (LIKELY(mo == mo_relaxed))
288 return F(a, v);
289 SlotLocker locker(thr);
290 {
291 auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr: (uptr)a, save_stack: false);
292 RWLock lock(&s->mtx, IsReleaseOrder(mo));
293 if (IsAcqRelOrder(mo))
294 thr->clock.ReleaseAcquire(dstp: &s->clock);
295 else if (IsReleaseOrder(mo))
296 thr->clock.Release(dstp: &s->clock);
297 else if (IsAcquireOrder(mo))
298 thr->clock.Acquire(src: s->clock);
299 v = F(a, v);
300 }
301 if (IsReleaseOrder(mo))
302 IncrementEpoch(thr);
303 return v;
304}
305
306template<typename T>
307static T NoTsanAtomicExchange(volatile T *a, T v, morder mo) {
308 return func_xchg(a, v);
309}
310
311template<typename T>
312static T NoTsanAtomicFetchAdd(volatile T *a, T v, morder mo) {
313 return func_add(a, v);
314}
315
316template<typename T>
317static T NoTsanAtomicFetchSub(volatile T *a, T v, morder mo) {
318 return func_sub(a, v);
319}
320
321template<typename T>
322static T NoTsanAtomicFetchAnd(volatile T *a, T v, morder mo) {
323 return func_and(a, v);
324}
325
326template<typename T>
327static T NoTsanAtomicFetchOr(volatile T *a, T v, morder mo) {
328 return func_or(a, v);
329}
330
331template<typename T>
332static T NoTsanAtomicFetchXor(volatile T *a, T v, morder mo) {
333 return func_xor(a, v);
334}
335
336template<typename T>
337static T NoTsanAtomicFetchNand(volatile T *a, T v, morder mo) {
338 return func_nand(a, v);
339}
340
341template<typename T>
342static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v,
343 morder mo) {
344 return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo);
345}
346
347template<typename T>
348static T AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v,
349 morder mo) {
350 return AtomicRMW<T, func_add>(thr, pc, a, v, mo);
351}
352
353template<typename T>
354static T AtomicFetchSub(ThreadState *thr, uptr pc, volatile T *a, T v,
355 morder mo) {
356 return AtomicRMW<T, func_sub>(thr, pc, a, v, mo);
357}
358
359template<typename T>
360static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v,
361 morder mo) {
362 return AtomicRMW<T, func_and>(thr, pc, a, v, mo);
363}
364
365template<typename T>
366static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v,
367 morder mo) {
368 return AtomicRMW<T, func_or>(thr, pc, a, v, mo);
369}
370
371template<typename T>
372static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v,
373 morder mo) {
374 return AtomicRMW<T, func_xor>(thr, pc, a, v, mo);
375}
376
377template<typename T>
378static T AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v,
379 morder mo) {
380 return AtomicRMW<T, func_nand>(thr, pc, a, v, mo);
381}
382
383template<typename T>
384static bool NoTsanAtomicCAS(volatile T *a, T *c, T v, morder mo, morder fmo) {
385 return atomic_compare_exchange_strong(to_atomic(a), c, v, to_mo(mo));
386}
387
388#if __TSAN_HAS_INT128
389static bool NoTsanAtomicCAS(volatile a128 *a, a128 *c, a128 v,
390 morder mo, morder fmo) {
391 a128 old = *c;
392 a128 cur = func_cas(v: a, cmp: old, xch: v);
393 if (cur == old)
394 return true;
395 *c = cur;
396 return false;
397}
398#endif
399
400template<typename T>
401static T NoTsanAtomicCAS(volatile T *a, T c, T v, morder mo, morder fmo) {
402 NoTsanAtomicCAS(a, &c, v, mo, fmo);
403 return c;
404}
405
406template <typename T>
407static bool AtomicCAS(ThreadState *thr, uptr pc, volatile T *a, T *c, T v,
408 morder mo, morder fmo) {
409 // 31.7.2.18: "The failure argument shall not be memory_order_release
410 // nor memory_order_acq_rel". LLVM (2021-05) fallbacks to Monotonic
411 // (mo_relaxed) when those are used.
412 DCHECK(IsLoadOrder(fmo));
413
414 MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessWrite | kAccessAtomic);
415 if (LIKELY(mo == mo_relaxed && fmo == mo_relaxed)) {
416 T cc = *c;
417 T pr = func_cas(a, cc, v);
418 if (pr == cc)
419 return true;
420 *c = pr;
421 return false;
422 }
423 SlotLocker locker(thr);
424 bool release = IsReleaseOrder(mo);
425 bool success;
426 {
427 auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr: (uptr)a, save_stack: false);
428 RWLock lock(&s->mtx, release);
429 T cc = *c;
430 T pr = func_cas(a, cc, v);
431 success = pr == cc;
432 if (!success) {
433 *c = pr;
434 mo = fmo;
435 }
436 if (success && IsAcqRelOrder(mo))
437 thr->clock.ReleaseAcquire(dstp: &s->clock);
438 else if (success && IsReleaseOrder(mo))
439 thr->clock.Release(dstp: &s->clock);
440 else if (IsAcquireOrder(mo))
441 thr->clock.Acquire(src: s->clock);
442 }
443 if (success && release)
444 IncrementEpoch(thr);
445 return success;
446}
447
448template<typename T>
449static T AtomicCAS(ThreadState *thr, uptr pc,
450 volatile T *a, T c, T v, morder mo, morder fmo) {
451 AtomicCAS(thr, pc, a, &c, v, mo, fmo);
452 return c;
453}
454
455#if !SANITIZER_GO
456static void NoTsanAtomicFence(morder mo) {
457 __sync_synchronize();
458}
459
460static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
461 // FIXME(dvyukov): not implemented.
462 __sync_synchronize();
463}
464#endif
465
466// Interface functions follow.
467#if !SANITIZER_GO
468
469// C/C++
470
471static morder convert_morder(morder mo) {
472 if (flags()->force_seq_cst_atomics)
473 return (morder)mo_seq_cst;
474
475 // Filter out additional memory order flags:
476 // MEMMODEL_SYNC = 1 << 15
477 // __ATOMIC_HLE_ACQUIRE = 1 << 16
478 // __ATOMIC_HLE_RELEASE = 1 << 17
479 //
480 // HLE is an optimization, and we pretend that elision always fails.
481 // MEMMODEL_SYNC is used when lowering __sync_ atomics,
482 // since we use __sync_ atomics for actual atomic operations,
483 // we can safely ignore it as well. It also subtly affects semantics,
484 // but we don't model the difference.
485 return (morder)(mo & 0x7fff);
486}
487
488# define ATOMIC_IMPL(func, ...) \
489 ThreadState *const thr = cur_thread(); \
490 ProcessPendingSignals(thr); \
491 if (UNLIKELY(thr->ignore_sync || thr->ignore_interceptors)) \
492 return NoTsanAtomic##func(__VA_ARGS__); \
493 mo = convert_morder(mo); \
494 return Atomic##func(thr, GET_CALLER_PC(), __VA_ARGS__);
495
496extern "C" {
497SANITIZER_INTERFACE_ATTRIBUTE
498a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
499 ATOMIC_IMPL(Load, a, mo);
500}
501
502SANITIZER_INTERFACE_ATTRIBUTE
503a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) {
504 ATOMIC_IMPL(Load, a, mo);
505}
506
507SANITIZER_INTERFACE_ATTRIBUTE
508a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) {
509 ATOMIC_IMPL(Load, a, mo);
510}
511
512SANITIZER_INTERFACE_ATTRIBUTE
513a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
514 ATOMIC_IMPL(Load, a, mo);
515}
516
517#if __TSAN_HAS_INT128
518SANITIZER_INTERFACE_ATTRIBUTE
519a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) {
520 ATOMIC_IMPL(Load, a, mo);
521}
522#endif
523
524SANITIZER_INTERFACE_ATTRIBUTE
525void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
526 ATOMIC_IMPL(Store, a, v, mo);
527}
528
529SANITIZER_INTERFACE_ATTRIBUTE
530void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) {
531 ATOMIC_IMPL(Store, a, v, mo);
532}
533
534SANITIZER_INTERFACE_ATTRIBUTE
535void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) {
536 ATOMIC_IMPL(Store, a, v, mo);
537}
538
539SANITIZER_INTERFACE_ATTRIBUTE
540void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
541 ATOMIC_IMPL(Store, a, v, mo);
542}
543
544#if __TSAN_HAS_INT128
545SANITIZER_INTERFACE_ATTRIBUTE
546void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) {
547 ATOMIC_IMPL(Store, a, v, mo);
548}
549#endif
550
551SANITIZER_INTERFACE_ATTRIBUTE
552a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
553 ATOMIC_IMPL(Exchange, a, v, mo);
554}
555
556SANITIZER_INTERFACE_ATTRIBUTE
557a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {
558 ATOMIC_IMPL(Exchange, a, v, mo);
559}
560
561SANITIZER_INTERFACE_ATTRIBUTE
562a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {
563 ATOMIC_IMPL(Exchange, a, v, mo);
564}
565
566SANITIZER_INTERFACE_ATTRIBUTE
567a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
568 ATOMIC_IMPL(Exchange, a, v, mo);
569}
570
571#if __TSAN_HAS_INT128
572SANITIZER_INTERFACE_ATTRIBUTE
573a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) {
574 ATOMIC_IMPL(Exchange, a, v, mo);
575}
576#endif
577
578SANITIZER_INTERFACE_ATTRIBUTE
579a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
580 ATOMIC_IMPL(FetchAdd, a, v, mo);
581}
582
583SANITIZER_INTERFACE_ATTRIBUTE
584a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {
585 ATOMIC_IMPL(FetchAdd, a, v, mo);
586}
587
588SANITIZER_INTERFACE_ATTRIBUTE
589a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {
590 ATOMIC_IMPL(FetchAdd, a, v, mo);
591}
592
593SANITIZER_INTERFACE_ATTRIBUTE
594a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
595 ATOMIC_IMPL(FetchAdd, a, v, mo);
596}
597
598#if __TSAN_HAS_INT128
599SANITIZER_INTERFACE_ATTRIBUTE
600a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) {
601 ATOMIC_IMPL(FetchAdd, a, v, mo);
602}
603#endif
604
605SANITIZER_INTERFACE_ATTRIBUTE
606a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
607 ATOMIC_IMPL(FetchSub, a, v, mo);
608}
609
610SANITIZER_INTERFACE_ATTRIBUTE
611a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) {
612 ATOMIC_IMPL(FetchSub, a, v, mo);
613}
614
615SANITIZER_INTERFACE_ATTRIBUTE
616a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) {
617 ATOMIC_IMPL(FetchSub, a, v, mo);
618}
619
620SANITIZER_INTERFACE_ATTRIBUTE
621a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
622 ATOMIC_IMPL(FetchSub, a, v, mo);
623}
624
625#if __TSAN_HAS_INT128
626SANITIZER_INTERFACE_ATTRIBUTE
627a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) {
628 ATOMIC_IMPL(FetchSub, a, v, mo);
629}
630#endif
631
632SANITIZER_INTERFACE_ATTRIBUTE
633a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
634 ATOMIC_IMPL(FetchAnd, a, v, mo);
635}
636
637SANITIZER_INTERFACE_ATTRIBUTE
638a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {
639 ATOMIC_IMPL(FetchAnd, a, v, mo);
640}
641
642SANITIZER_INTERFACE_ATTRIBUTE
643a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {
644 ATOMIC_IMPL(FetchAnd, a, v, mo);
645}
646
647SANITIZER_INTERFACE_ATTRIBUTE
648a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
649 ATOMIC_IMPL(FetchAnd, a, v, mo);
650}
651
652#if __TSAN_HAS_INT128
653SANITIZER_INTERFACE_ATTRIBUTE
654a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) {
655 ATOMIC_IMPL(FetchAnd, a, v, mo);
656}
657#endif
658
659SANITIZER_INTERFACE_ATTRIBUTE
660a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
661 ATOMIC_IMPL(FetchOr, a, v, mo);
662}
663
664SANITIZER_INTERFACE_ATTRIBUTE
665a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {
666 ATOMIC_IMPL(FetchOr, a, v, mo);
667}
668
669SANITIZER_INTERFACE_ATTRIBUTE
670a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {
671 ATOMIC_IMPL(FetchOr, a, v, mo);
672}
673
674SANITIZER_INTERFACE_ATTRIBUTE
675a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
676 ATOMIC_IMPL(FetchOr, a, v, mo);
677}
678
679#if __TSAN_HAS_INT128
680SANITIZER_INTERFACE_ATTRIBUTE
681a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) {
682 ATOMIC_IMPL(FetchOr, a, v, mo);
683}
684#endif
685
686SANITIZER_INTERFACE_ATTRIBUTE
687a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
688 ATOMIC_IMPL(FetchXor, a, v, mo);
689}
690
691SANITIZER_INTERFACE_ATTRIBUTE
692a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {
693 ATOMIC_IMPL(FetchXor, a, v, mo);
694}
695
696SANITIZER_INTERFACE_ATTRIBUTE
697a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {
698 ATOMIC_IMPL(FetchXor, a, v, mo);
699}
700
701SANITIZER_INTERFACE_ATTRIBUTE
702a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
703 ATOMIC_IMPL(FetchXor, a, v, mo);
704}
705
706#if __TSAN_HAS_INT128
707SANITIZER_INTERFACE_ATTRIBUTE
708a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) {
709 ATOMIC_IMPL(FetchXor, a, v, mo);
710}
711#endif
712
713SANITIZER_INTERFACE_ATTRIBUTE
714a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
715 ATOMIC_IMPL(FetchNand, a, v, mo);
716}
717
718SANITIZER_INTERFACE_ATTRIBUTE
719a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) {
720 ATOMIC_IMPL(FetchNand, a, v, mo);
721}
722
723SANITIZER_INTERFACE_ATTRIBUTE
724a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) {
725 ATOMIC_IMPL(FetchNand, a, v, mo);
726}
727
728SANITIZER_INTERFACE_ATTRIBUTE
729a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
730 ATOMIC_IMPL(FetchNand, a, v, mo);
731}
732
733#if __TSAN_HAS_INT128
734SANITIZER_INTERFACE_ATTRIBUTE
735a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) {
736 ATOMIC_IMPL(FetchNand, a, v, mo);
737}
738#endif
739
740SANITIZER_INTERFACE_ATTRIBUTE
741int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
742 morder mo, morder fmo) {
743 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
744}
745
746SANITIZER_INTERFACE_ATTRIBUTE
747int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
748 morder mo, morder fmo) {
749 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
750}
751
752SANITIZER_INTERFACE_ATTRIBUTE
753int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
754 morder mo, morder fmo) {
755 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
756}
757
758SANITIZER_INTERFACE_ATTRIBUTE
759int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
760 morder mo, morder fmo) {
761 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
762}
763
764#if __TSAN_HAS_INT128
765SANITIZER_INTERFACE_ATTRIBUTE
766int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
767 morder mo, morder fmo) {
768 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
769}
770#endif
771
772SANITIZER_INTERFACE_ATTRIBUTE
773int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,
774 morder mo, morder fmo) {
775 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
776}
777
778SANITIZER_INTERFACE_ATTRIBUTE
779int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
780 morder mo, morder fmo) {
781 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
782}
783
784SANITIZER_INTERFACE_ATTRIBUTE
785int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
786 morder mo, morder fmo) {
787 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
788}
789
790SANITIZER_INTERFACE_ATTRIBUTE
791int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
792 morder mo, morder fmo) {
793 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
794}
795
796#if __TSAN_HAS_INT128
797SANITIZER_INTERFACE_ATTRIBUTE
798int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
799 morder mo, morder fmo) {
800 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
801}
802#endif
803
804SANITIZER_INTERFACE_ATTRIBUTE
805a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v,
806 morder mo, morder fmo) {
807 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
808}
809
810SANITIZER_INTERFACE_ATTRIBUTE
811a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
812 morder mo, morder fmo) {
813 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
814}
815
816SANITIZER_INTERFACE_ATTRIBUTE
817a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
818 morder mo, morder fmo) {
819 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
820}
821
822SANITIZER_INTERFACE_ATTRIBUTE
823a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
824 morder mo, morder fmo) {
825 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
826}
827
828#if __TSAN_HAS_INT128
829SANITIZER_INTERFACE_ATTRIBUTE
830a128 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
831 morder mo, morder fmo) {
832 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
833}
834#endif
835
836SANITIZER_INTERFACE_ATTRIBUTE
837void __tsan_atomic_thread_fence(morder mo) { ATOMIC_IMPL(Fence, mo); }
838
839SANITIZER_INTERFACE_ATTRIBUTE
840void __tsan_atomic_signal_fence(morder mo) {
841}
842} // extern "C"
843
844#else // #if !SANITIZER_GO
845
846// Go
847
848# define ATOMIC(func, ...) \
849 if (thr->ignore_sync) { \
850 NoTsanAtomic##func(__VA_ARGS__); \
851 } else { \
852 FuncEntry(thr, cpc); \
853 Atomic##func(thr, pc, __VA_ARGS__); \
854 FuncExit(thr); \
855 }
856
857# define ATOMIC_RET(func, ret, ...) \
858 if (thr->ignore_sync) { \
859 (ret) = NoTsanAtomic##func(__VA_ARGS__); \
860 } else { \
861 FuncEntry(thr, cpc); \
862 (ret) = Atomic##func(thr, pc, __VA_ARGS__); \
863 FuncExit(thr); \
864 }
865
866extern "C" {
867SANITIZER_INTERFACE_ATTRIBUTE
868void __tsan_go_atomic32_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
869 ATOMIC_RET(Load, *(a32*)(a+8), *(a32**)a, mo_acquire);
870}
871
872SANITIZER_INTERFACE_ATTRIBUTE
873void __tsan_go_atomic64_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
874 ATOMIC_RET(Load, *(a64*)(a+8), *(a64**)a, mo_acquire);
875}
876
877SANITIZER_INTERFACE_ATTRIBUTE
878void __tsan_go_atomic32_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
879 ATOMIC(Store, *(a32**)a, *(a32*)(a+8), mo_release);
880}
881
882SANITIZER_INTERFACE_ATTRIBUTE
883void __tsan_go_atomic64_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
884 ATOMIC(Store, *(a64**)a, *(a64*)(a+8), mo_release);
885}
886
887SANITIZER_INTERFACE_ATTRIBUTE
888void __tsan_go_atomic32_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
889 ATOMIC_RET(FetchAdd, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);
890}
891
892SANITIZER_INTERFACE_ATTRIBUTE
893void __tsan_go_atomic64_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
894 ATOMIC_RET(FetchAdd, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);
895}
896
897SANITIZER_INTERFACE_ATTRIBUTE
898void __tsan_go_atomic32_fetch_and(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
899 ATOMIC_RET(FetchAnd, *(a32 *)(a + 16), *(a32 **)a, *(a32 *)(a + 8),
900 mo_acq_rel);
901}
902
903SANITIZER_INTERFACE_ATTRIBUTE
904void __tsan_go_atomic64_fetch_and(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
905 ATOMIC_RET(FetchAnd, *(a64 *)(a + 16), *(a64 **)a, *(a64 *)(a + 8),
906 mo_acq_rel);
907}
908
909SANITIZER_INTERFACE_ATTRIBUTE
910void __tsan_go_atomic32_fetch_or(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
911 ATOMIC_RET(FetchOr, *(a32 *)(a + 16), *(a32 **)a, *(a32 *)(a + 8),
912 mo_acq_rel);
913}
914
915SANITIZER_INTERFACE_ATTRIBUTE
916void __tsan_go_atomic64_fetch_or(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
917 ATOMIC_RET(FetchOr, *(a64 *)(a + 16), *(a64 **)a, *(a64 *)(a + 8),
918 mo_acq_rel);
919}
920
921SANITIZER_INTERFACE_ATTRIBUTE
922void __tsan_go_atomic32_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
923 ATOMIC_RET(Exchange, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);
924}
925
926SANITIZER_INTERFACE_ATTRIBUTE
927void __tsan_go_atomic64_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
928 ATOMIC_RET(Exchange, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);
929}
930
931SANITIZER_INTERFACE_ATTRIBUTE
932void __tsan_go_atomic32_compare_exchange(
933 ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
934 a32 cur = 0;
935 a32 cmp = *(a32*)(a+8);
936 ATOMIC_RET(CAS, cur, *(a32**)a, cmp, *(a32*)(a+12), mo_acq_rel, mo_acquire);
937 *(bool*)(a+16) = (cur == cmp);
938}
939
940SANITIZER_INTERFACE_ATTRIBUTE
941void __tsan_go_atomic64_compare_exchange(
942 ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
943 a64 cur = 0;
944 a64 cmp = *(a64*)(a+8);
945 ATOMIC_RET(CAS, cur, *(a64**)a, cmp, *(a64*)(a+16), mo_acq_rel, mo_acquire);
946 *(bool*)(a+24) = (cur == cmp);
947}
948} // extern "C"
949#endif // #if !SANITIZER_GO
950