1//===-- tsan_interface_atomic.cpp -----------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of ThreadSanitizer (TSan), a race detector.
10//
11//===----------------------------------------------------------------------===//
12
13// ThreadSanitizer atomic operations are based on C++11/C1x standards.
14// For background see C++11 standard. A slightly older, publicly
15// available draft of the standard (not entirely up-to-date, but close enough
16// for casual browsing) is available here:
17// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
18// The following page contains more background information:
19// http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
20
21#include "sanitizer_common/sanitizer_mutex.h"
22#include "sanitizer_common/sanitizer_placement_new.h"
23#include "sanitizer_common/sanitizer_stacktrace.h"
24#include "tsan_adaptive_delay.h"
25#include "tsan_flags.h"
26#include "tsan_interface.h"
27#include "tsan_rtl.h"
28
29using namespace __tsan;
30
31#if !SANITIZER_GO && __TSAN_HAS_INT128
32// Protects emulation of 128-bit atomic operations.
33static StaticSpinMutex mutex128;
34#endif
35
36#if SANITIZER_DEBUG
37static bool IsLoadOrder(morder mo) {
38 return mo == mo_relaxed || mo == mo_consume || mo == mo_acquire ||
39 mo == mo_seq_cst;
40}
41
42static bool IsStoreOrder(morder mo) {
43 return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst;
44}
45#endif
46
47static bool IsReleaseOrder(morder mo) {
48 return mo == mo_release || mo == mo_acq_rel || mo == mo_seq_cst;
49}
50
51static bool IsAcquireOrder(morder mo) {
52 return mo == mo_consume || mo == mo_acquire || mo == mo_acq_rel ||
53 mo == mo_seq_cst;
54}
55
56static bool IsAcqRelOrder(morder mo) {
57 return mo == mo_acq_rel || mo == mo_seq_cst;
58}
59
60template <typename T>
61T func_xchg(volatile T *v, T op) {
62 T res = __sync_lock_test_and_set(v, op);
63 // __sync_lock_test_and_set does not contain full barrier.
64 __sync_synchronize();
65 return res;
66}
67
68template <typename T>
69T func_add(volatile T *v, T op) {
70 return __sync_fetch_and_add(v, op);
71}
72
73template <typename T>
74T func_sub(volatile T *v, T op) {
75 return __sync_fetch_and_sub(v, op);
76}
77
78template <typename T>
79T func_and(volatile T *v, T op) {
80 return __sync_fetch_and_and(v, op);
81}
82
83template <typename T>
84T func_or(volatile T *v, T op) {
85 return __sync_fetch_and_or(v, op);
86}
87
88template <typename T>
89T func_xor(volatile T *v, T op) {
90 return __sync_fetch_and_xor(v, op);
91}
92
93template <typename T>
94T func_nand(volatile T *v, T op) {
95 // clang does not support __sync_fetch_and_nand.
96 T cmp = *v;
97 for (;;) {
98 T newv = ~(cmp & op);
99 T cur = __sync_val_compare_and_swap(v, cmp, newv);
100 if (cmp == cur)
101 return cmp;
102 cmp = cur;
103 }
104}
105
106template <typename T>
107T func_cas(volatile T *v, T cmp, T xch) {
108 return __sync_val_compare_and_swap(v, cmp, xch);
109}
110
111// clang does not support 128-bit atomic ops.
112// Atomic ops are executed under tsan internal mutex,
113// here we assume that the atomic variables are not accessed
114// from non-instrumented code.
115#if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) && !SANITIZER_GO && \
116 __TSAN_HAS_INT128
117a128 func_xchg(volatile a128 *v, a128 op) {
118 SpinMutexLock lock(&mutex128);
119 a128 cmp = *v;
120 *v = op;
121 return cmp;
122}
123
124a128 func_add(volatile a128 *v, a128 op) {
125 SpinMutexLock lock(&mutex128);
126 a128 cmp = *v;
127 *v = cmp + op;
128 return cmp;
129}
130
131a128 func_sub(volatile a128 *v, a128 op) {
132 SpinMutexLock lock(&mutex128);
133 a128 cmp = *v;
134 *v = cmp - op;
135 return cmp;
136}
137
138a128 func_and(volatile a128 *v, a128 op) {
139 SpinMutexLock lock(&mutex128);
140 a128 cmp = *v;
141 *v = cmp & op;
142 return cmp;
143}
144
145a128 func_or(volatile a128 *v, a128 op) {
146 SpinMutexLock lock(&mutex128);
147 a128 cmp = *v;
148 *v = cmp | op;
149 return cmp;
150}
151
152a128 func_xor(volatile a128 *v, a128 op) {
153 SpinMutexLock lock(&mutex128);
154 a128 cmp = *v;
155 *v = cmp ^ op;
156 return cmp;
157}
158
159a128 func_nand(volatile a128 *v, a128 op) {
160 SpinMutexLock lock(&mutex128);
161 a128 cmp = *v;
162 *v = ~(cmp & op);
163 return cmp;
164}
165
166a128 func_cas(volatile a128 *v, a128 cmp, a128 xch) {
167 SpinMutexLock lock(&mutex128);
168 a128 cur = *v;
169 if (cur == cmp)
170 *v = xch;
171 return cur;
172}
173#endif
174
175template <typename T>
176static int AccessSize() {
177 if (sizeof(T) <= 1)
178 return 1;
179 else if (sizeof(T) <= 2)
180 return 2;
181 else if (sizeof(T) <= 4)
182 return 4;
183 else
184 return 8;
185 // For 16-byte atomics we also use 8-byte memory access,
186 // this leads to false negatives only in very obscure cases.
187}
188
189#if !SANITIZER_GO
190static atomic_uint8_t *to_atomic(const volatile a8 *a) {
191 return reinterpret_cast<atomic_uint8_t *>(const_cast<a8 *>(a));
192}
193
194static atomic_uint16_t *to_atomic(const volatile a16 *a) {
195 return reinterpret_cast<atomic_uint16_t *>(const_cast<a16 *>(a));
196}
197#endif
198
199static atomic_uint32_t *to_atomic(const volatile a32 *a) {
200 return reinterpret_cast<atomic_uint32_t *>(const_cast<a32 *>(a));
201}
202
203static atomic_uint64_t *to_atomic(const volatile a64 *a) {
204 return reinterpret_cast<atomic_uint64_t *>(const_cast<a64 *>(a));
205}
206
207static memory_order to_mo(morder mo) {
208 switch (mo) {
209 case mo_relaxed:
210 return memory_order_relaxed;
211 case mo_consume:
212 return memory_order_consume;
213 case mo_acquire:
214 return memory_order_acquire;
215 case mo_release:
216 return memory_order_release;
217 case mo_acq_rel:
218 return memory_order_acq_rel;
219 case mo_seq_cst:
220 return memory_order_seq_cst;
221 }
222 DCHECK(0);
223 return memory_order_seq_cst;
224}
225
226namespace {
227
228template <typename T, T (*F)(volatile T *v, T op)>
229static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
230 MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessWrite | kAccessAtomic);
231 if (LIKELY(mo == mo_relaxed))
232 return F(a, v);
233 SlotLocker locker(thr);
234 {
235 auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr: (uptr)a, save_stack: false);
236 RWLock lock(&s->mtx, IsReleaseOrder(mo));
237 if (IsAcqRelOrder(mo))
238 thr->clock.ReleaseAcquire(dstp: &s->clock);
239 else if (IsReleaseOrder(mo))
240 thr->clock.Release(dstp: &s->clock);
241 else if (IsAcquireOrder(mo))
242 thr->clock.Acquire(src: s->clock);
243 v = F(a, v);
244 }
245 if (IsReleaseOrder(mo))
246 IncrementEpoch(thr);
247 return v;
248}
249
250struct OpLoad {
251 template <typename T>
252 static T NoTsanAtomic(morder mo, const volatile T *a) {
253 return atomic_load(to_atomic(a), to_mo(mo));
254 }
255
256#if __TSAN_HAS_INT128 && !SANITIZER_GO
257 static a128 NoTsanAtomic(morder mo, const volatile a128 *a) {
258 SpinMutexLock lock(&mutex128);
259 return *a;
260 }
261#endif
262
263 template <typename T>
264 static T Atomic(ThreadState *thr, uptr pc, morder mo, const volatile T *a) {
265 DCHECK(IsLoadOrder(mo));
266 // This fast-path is critical for performance.
267 // Assume the access is atomic.
268 if (!IsAcquireOrder(mo)) {
269 MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(),
270 kAccessRead | kAccessAtomic);
271 return NoTsanAtomic(mo, a);
272 }
273 // Don't create sync object if it does not exist yet. For example, an atomic
274 // pointer is initialized to nullptr and then periodically acquire-loaded.
275 T v = NoTsanAtomic(mo, a);
276 SyncVar *s = ctx->metamap.GetSyncIfExists(addr: (uptr)a);
277 if (s) {
278 SlotLocker locker(thr);
279 ReadLock lock(&s->mtx);
280 thr->clock.Acquire(src: s->clock);
281 // Re-read under sync mutex because we need a consistent snapshot
282 // of the value and the clock we acquire.
283 v = NoTsanAtomic(mo, a);
284 }
285 MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(),
286 kAccessRead | kAccessAtomic);
287 return v;
288 }
289};
290
291struct OpStore {
292 template <typename T>
293 static void NoTsanAtomic(morder mo, volatile T *a, T v) {
294 atomic_store(to_atomic(a), v, to_mo(mo));
295 }
296
297#if __TSAN_HAS_INT128 && !SANITIZER_GO
298 static void NoTsanAtomic(morder mo, volatile a128 *a, a128 v) {
299 SpinMutexLock lock(&mutex128);
300 *a = v;
301 }
302#endif
303
304 template <typename T>
305 static void Atomic(ThreadState *thr, uptr pc, morder mo, volatile T *a, T v) {
306 DCHECK(IsStoreOrder(mo));
307 MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(),
308 kAccessWrite | kAccessAtomic);
309 // This fast-path is critical for performance.
310 // Assume the access is atomic.
311 // Strictly saying even relaxed store cuts off release sequence,
312 // so must reset the clock.
313 if (!IsReleaseOrder(mo)) {
314 NoTsanAtomic(mo, a, v);
315 return;
316 }
317 SlotLocker locker(thr);
318 {
319 auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr: (uptr)a, save_stack: false);
320 Lock lock(&s->mtx);
321 thr->clock.ReleaseStore(dstp: &s->clock);
322 NoTsanAtomic(mo, a, v);
323 }
324 IncrementEpoch(thr);
325 }
326};
327
328struct OpExchange {
329 template <typename T>
330 static T NoTsanAtomic(morder mo, volatile T *a, T v) {
331 return func_xchg(a, v);
332 }
333 template <typename T>
334 static T Atomic(ThreadState *thr, uptr pc, morder mo, volatile T *a, T v) {
335 return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo);
336 }
337};
338
339struct OpFetchAdd {
340 template <typename T>
341 static T NoTsanAtomic(morder mo, volatile T *a, T v) {
342 return func_add(a, v);
343 }
344
345 template <typename T>
346 static T Atomic(ThreadState *thr, uptr pc, morder mo, volatile T *a, T v) {
347 return AtomicRMW<T, func_add>(thr, pc, a, v, mo);
348 }
349};
350
351struct OpFetchSub {
352 template <typename T>
353 static T NoTsanAtomic(morder mo, volatile T *a, T v) {
354 return func_sub(a, v);
355 }
356
357 template <typename T>
358 static T Atomic(ThreadState *thr, uptr pc, morder mo, volatile T *a, T v) {
359 return AtomicRMW<T, func_sub>(thr, pc, a, v, mo);
360 }
361};
362
363struct OpFetchAnd {
364 template <typename T>
365 static T NoTsanAtomic(morder mo, volatile T *a, T v) {
366 return func_and(a, v);
367 }
368
369 template <typename T>
370 static T Atomic(ThreadState *thr, uptr pc, morder mo, volatile T *a, T v) {
371 return AtomicRMW<T, func_and>(thr, pc, a, v, mo);
372 }
373};
374
375struct OpFetchOr {
376 template <typename T>
377 static T NoTsanAtomic(morder mo, volatile T *a, T v) {
378 return func_or(a, v);
379 }
380
381 template <typename T>
382 static T Atomic(ThreadState *thr, uptr pc, morder mo, volatile T *a, T v) {
383 return AtomicRMW<T, func_or>(thr, pc, a, v, mo);
384 }
385};
386
387struct OpFetchXor {
388 template <typename T>
389 static T NoTsanAtomic(morder mo, volatile T *a, T v) {
390 return func_xor(a, v);
391 }
392
393 template <typename T>
394 static T Atomic(ThreadState *thr, uptr pc, morder mo, volatile T *a, T v) {
395 return AtomicRMW<T, func_xor>(thr, pc, a, v, mo);
396 }
397};
398
399struct OpFetchNand {
400 template <typename T>
401 static T NoTsanAtomic(morder mo, volatile T *a, T v) {
402 return func_nand(a, v);
403 }
404
405 template <typename T>
406 static T Atomic(ThreadState *thr, uptr pc, morder mo, volatile T *a, T v) {
407 return AtomicRMW<T, func_nand>(thr, pc, a, v, mo);
408 }
409};
410
411struct OpCAS {
412 template <typename T>
413 static bool NoTsanAtomic(morder mo, morder fmo, volatile T *a, T *c, T v) {
414 return atomic_compare_exchange_strong(to_atomic(a), c, v, to_mo(mo));
415 }
416
417#if __TSAN_HAS_INT128
418 static bool NoTsanAtomic(morder mo, morder fmo, volatile a128 *a, a128 *c,
419 a128 v) {
420 a128 old = *c;
421 a128 cur = func_cas(v: a, cmp: old, xch: v);
422 if (cur == old)
423 return true;
424 *c = cur;
425 return false;
426 }
427#endif
428
429 template <typename T>
430 static T NoTsanAtomic(morder mo, morder fmo, volatile T *a, T c, T v) {
431 NoTsanAtomic(mo, fmo, a, &c, v);
432 return c;
433 }
434
435 template <typename T>
436 static bool Atomic(ThreadState *thr, uptr pc, morder mo, morder fmo,
437 volatile T *a, T *c, T v) {
438 // 31.7.2.18: "The failure argument shall not be memory_order_release
439 // nor memory_order_acq_rel". LLVM (2021-05) fallbacks to Monotonic
440 // (mo_relaxed) when those are used.
441 DCHECK(IsLoadOrder(fmo));
442
443 MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(),
444 kAccessWrite | kAccessAtomic);
445 if (LIKELY(mo == mo_relaxed && fmo == mo_relaxed)) {
446 T cc = *c;
447 T pr = func_cas(a, cc, v);
448 if (pr == cc)
449 return true;
450 *c = pr;
451 return false;
452 }
453 SlotLocker locker(thr);
454 bool release = IsReleaseOrder(mo);
455 bool success;
456 {
457 auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr: (uptr)a, save_stack: false);
458 RWLock lock(&s->mtx, release);
459 T cc = *c;
460 T pr = func_cas(a, cc, v);
461 success = pr == cc;
462 if (!success) {
463 *c = pr;
464 mo = fmo;
465 }
466 if (success && IsAcqRelOrder(mo))
467 thr->clock.ReleaseAcquire(dstp: &s->clock);
468 else if (success && IsReleaseOrder(mo))
469 thr->clock.Release(dstp: &s->clock);
470 else if (IsAcquireOrder(mo))
471 thr->clock.Acquire(src: s->clock);
472 }
473 if (success && release)
474 IncrementEpoch(thr);
475 return success;
476 }
477
478 template <typename T>
479 static T Atomic(ThreadState *thr, uptr pc, morder mo, morder fmo,
480 volatile T *a, T c, T v) {
481 Atomic(thr, pc, mo, fmo, a, &c, v);
482 return c;
483 }
484};
485
486#if !SANITIZER_GO
487struct OpFence {
488 static void NoTsanAtomic(morder mo) { __sync_synchronize(); }
489
490 static void Atomic(ThreadState *thr, uptr pc, morder mo) {
491 // FIXME(dvyukov): not implemented.
492 __sync_synchronize();
493 }
494};
495#endif
496
497} // namespace
498
499// Interface functions follow.
500#if !SANITIZER_GO
501
502// C/C++
503
504static morder convert_morder(morder mo) {
505 return flags()->force_seq_cst_atomics ? mo_seq_cst : mo;
506}
507
508static morder to_morder(int mo) {
509 // Filter out additional memory order flags:
510 // MEMMODEL_SYNC = 1 << 15
511 // __ATOMIC_HLE_ACQUIRE = 1 << 16
512 // __ATOMIC_HLE_RELEASE = 1 << 17
513 //
514 // HLE is an optimization, and we pretend that elision always fails.
515 // MEMMODEL_SYNC is used when lowering __sync_ atomics,
516 // since we use __sync_ atomics for actual atomic operations,
517 // we can safely ignore it as well. It also subtly affects semantics,
518 // but we don't model the difference.
519 morder res = static_cast<morder>(static_cast<u8>(mo));
520 DCHECK_LE(res, mo_seq_cst);
521 return res;
522}
523
524template <class... Types>
525ALWAYS_INLINE auto AtomicDelayImpl(morder mo, Types... args) {
526 AdaptiveDelay::AtomicOpFence(mo);
527}
528
529template <class AddrType, class... Types>
530ALWAYS_INLINE auto AtomicDelayImpl(morder mo, AddrType addr, Types... args) {
531 AdaptiveDelay::AtomicOpAddr(addr: (uptr)addr, mo: (int)mo);
532}
533
534template <class Op, class... Types>
535ALWAYS_INLINE auto AtomicImpl(morder mo, Types... args) {
536 AtomicDelayImpl(mo, args...);
537 ThreadState *const thr = cur_thread();
538 ProcessPendingSignals(thr);
539 if (UNLIKELY(thr->ignore_sync || thr->ignore_interceptors))
540 return Op::NoTsanAtomic(mo, args...);
541 return Op::Atomic(thr, GET_CALLER_PC(), convert_morder(mo), args...);
542}
543
544extern "C" {
545SANITIZER_INTERFACE_ATTRIBUTE
546a8 __tsan_atomic8_load(const volatile a8 *a, int mo) {
547 return AtomicImpl<OpLoad>(mo: to_morder(mo), args: a);
548}
549
550SANITIZER_INTERFACE_ATTRIBUTE
551a16 __tsan_atomic16_load(const volatile a16 *a, int mo) {
552 return AtomicImpl<OpLoad>(mo: to_morder(mo), args: a);
553}
554
555SANITIZER_INTERFACE_ATTRIBUTE
556a32 __tsan_atomic32_load(const volatile a32 *a, int mo) {
557 return AtomicImpl<OpLoad>(mo: to_morder(mo), args: a);
558}
559
560SANITIZER_INTERFACE_ATTRIBUTE
561a64 __tsan_atomic64_load(const volatile a64 *a, int mo) {
562 return AtomicImpl<OpLoad>(mo: to_morder(mo), args: a);
563}
564
565# if __TSAN_HAS_INT128
566SANITIZER_INTERFACE_ATTRIBUTE
567a128 __tsan_atomic128_load(const volatile a128 *a, int mo) {
568 return AtomicImpl<OpLoad>(mo: to_morder(mo), args: a);
569}
570# endif
571
572SANITIZER_INTERFACE_ATTRIBUTE
573void __tsan_atomic8_store(volatile a8 *a, a8 v, int mo) {
574 return AtomicImpl<OpStore>(mo: to_morder(mo), args: a, args: v);
575}
576
577SANITIZER_INTERFACE_ATTRIBUTE
578void __tsan_atomic16_store(volatile a16 *a, a16 v, int mo) {
579 return AtomicImpl<OpStore>(mo: to_morder(mo), args: a, args: v);
580}
581
582SANITIZER_INTERFACE_ATTRIBUTE
583void __tsan_atomic32_store(volatile a32 *a, a32 v, int mo) {
584 return AtomicImpl<OpStore>(mo: to_morder(mo), args: a, args: v);
585}
586
587SANITIZER_INTERFACE_ATTRIBUTE
588void __tsan_atomic64_store(volatile a64 *a, a64 v, int mo) {
589 return AtomicImpl<OpStore>(mo: to_morder(mo), args: a, args: v);
590}
591
592# if __TSAN_HAS_INT128
593SANITIZER_INTERFACE_ATTRIBUTE
594void __tsan_atomic128_store(volatile a128 *a, a128 v, int mo) {
595 return AtomicImpl<OpStore>(mo: to_morder(mo), args: a, args: v);
596}
597# endif
598
599SANITIZER_INTERFACE_ATTRIBUTE
600a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, int mo) {
601 return AtomicImpl<OpExchange>(mo: to_morder(mo), args: a, args: v);
602}
603
604SANITIZER_INTERFACE_ATTRIBUTE
605a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, int mo) {
606 return AtomicImpl<OpExchange>(mo: to_morder(mo), args: a, args: v);
607}
608
609SANITIZER_INTERFACE_ATTRIBUTE
610a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, int mo) {
611 return AtomicImpl<OpExchange>(mo: to_morder(mo), args: a, args: v);
612}
613
614SANITIZER_INTERFACE_ATTRIBUTE
615a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, int mo) {
616 return AtomicImpl<OpExchange>(mo: to_morder(mo), args: a, args: v);
617}
618
619# if __TSAN_HAS_INT128
620SANITIZER_INTERFACE_ATTRIBUTE
621a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, int mo) {
622 return AtomicImpl<OpExchange>(mo: to_morder(mo), args: a, args: v);
623}
624# endif
625
626SANITIZER_INTERFACE_ATTRIBUTE
627a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, int mo) {
628 return AtomicImpl<OpFetchAdd>(mo: to_morder(mo), args: a, args: v);
629}
630
631SANITIZER_INTERFACE_ATTRIBUTE
632a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, int mo) {
633 return AtomicImpl<OpFetchAdd>(mo: to_morder(mo), args: a, args: v);
634}
635
636SANITIZER_INTERFACE_ATTRIBUTE
637a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, int mo) {
638 return AtomicImpl<OpFetchAdd>(mo: to_morder(mo), args: a, args: v);
639}
640
641SANITIZER_INTERFACE_ATTRIBUTE
642a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, int mo) {
643 return AtomicImpl<OpFetchAdd>(mo: to_morder(mo), args: a, args: v);
644}
645
646# if __TSAN_HAS_INT128
647SANITIZER_INTERFACE_ATTRIBUTE
648a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, int mo) {
649 return AtomicImpl<OpFetchAdd>(mo: to_morder(mo), args: a, args: v);
650}
651# endif
652
653SANITIZER_INTERFACE_ATTRIBUTE
654a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, int mo) {
655 return AtomicImpl<OpFetchSub>(mo: to_morder(mo), args: a, args: v);
656}
657
658SANITIZER_INTERFACE_ATTRIBUTE
659a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, int mo) {
660 return AtomicImpl<OpFetchSub>(mo: to_morder(mo), args: a, args: v);
661}
662
663SANITIZER_INTERFACE_ATTRIBUTE
664a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, int mo) {
665 return AtomicImpl<OpFetchSub>(mo: to_morder(mo), args: a, args: v);
666}
667
668SANITIZER_INTERFACE_ATTRIBUTE
669a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, int mo) {
670 return AtomicImpl<OpFetchSub>(mo: to_morder(mo), args: a, args: v);
671}
672
673# if __TSAN_HAS_INT128
674SANITIZER_INTERFACE_ATTRIBUTE
675a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, int mo) {
676 return AtomicImpl<OpFetchSub>(mo: to_morder(mo), args: a, args: v);
677}
678# endif
679
680SANITIZER_INTERFACE_ATTRIBUTE
681a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, int mo) {
682 return AtomicImpl<OpFetchAnd>(mo: to_morder(mo), args: a, args: v);
683}
684
685SANITIZER_INTERFACE_ATTRIBUTE
686a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, int mo) {
687 return AtomicImpl<OpFetchAnd>(mo: to_morder(mo), args: a, args: v);
688}
689
690SANITIZER_INTERFACE_ATTRIBUTE
691a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, int mo) {
692 return AtomicImpl<OpFetchAnd>(mo: to_morder(mo), args: a, args: v);
693}
694
695SANITIZER_INTERFACE_ATTRIBUTE
696a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, int mo) {
697 return AtomicImpl<OpFetchAnd>(mo: to_morder(mo), args: a, args: v);
698}
699
700# if __TSAN_HAS_INT128
701SANITIZER_INTERFACE_ATTRIBUTE
702a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, int mo) {
703 return AtomicImpl<OpFetchAnd>(mo: to_morder(mo), args: a, args: v);
704}
705# endif
706
707SANITIZER_INTERFACE_ATTRIBUTE
708a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, int mo) {
709 return AtomicImpl<OpFetchOr>(mo: to_morder(mo), args: a, args: v);
710}
711
712SANITIZER_INTERFACE_ATTRIBUTE
713a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, int mo) {
714 return AtomicImpl<OpFetchOr>(mo: to_morder(mo), args: a, args: v);
715}
716
717SANITIZER_INTERFACE_ATTRIBUTE
718a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, int mo) {
719 return AtomicImpl<OpFetchOr>(mo: to_morder(mo), args: a, args: v);
720}
721
722SANITIZER_INTERFACE_ATTRIBUTE
723a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, int mo) {
724 return AtomicImpl<OpFetchOr>(mo: to_morder(mo), args: a, args: v);
725}
726
727# if __TSAN_HAS_INT128
728SANITIZER_INTERFACE_ATTRIBUTE
729a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, int mo) {
730 return AtomicImpl<OpFetchOr>(mo: to_morder(mo), args: a, args: v);
731}
732# endif
733
734SANITIZER_INTERFACE_ATTRIBUTE
735a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, int mo) {
736 return AtomicImpl<OpFetchXor>(mo: to_morder(mo), args: a, args: v);
737}
738
739SANITIZER_INTERFACE_ATTRIBUTE
740a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, int mo) {
741 return AtomicImpl<OpFetchXor>(mo: to_morder(mo), args: a, args: v);
742}
743
744SANITIZER_INTERFACE_ATTRIBUTE
745a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, int mo) {
746 return AtomicImpl<OpFetchXor>(mo: to_morder(mo), args: a, args: v);
747}
748
749SANITIZER_INTERFACE_ATTRIBUTE
750a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, int mo) {
751 return AtomicImpl<OpFetchXor>(mo: to_morder(mo), args: a, args: v);
752}
753
754# if __TSAN_HAS_INT128
755SANITIZER_INTERFACE_ATTRIBUTE
756a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, int mo) {
757 return AtomicImpl<OpFetchXor>(mo: to_morder(mo), args: a, args: v);
758}
759# endif
760
761SANITIZER_INTERFACE_ATTRIBUTE
762a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, int mo) {
763 return AtomicImpl<OpFetchNand>(mo: to_morder(mo), args: a, args: v);
764}
765
766SANITIZER_INTERFACE_ATTRIBUTE
767a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, int mo) {
768 return AtomicImpl<OpFetchNand>(mo: to_morder(mo), args: a, args: v);
769}
770
771SANITIZER_INTERFACE_ATTRIBUTE
772a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, int mo) {
773 return AtomicImpl<OpFetchNand>(mo: to_morder(mo), args: a, args: v);
774}
775
776SANITIZER_INTERFACE_ATTRIBUTE
777a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, int mo) {
778 return AtomicImpl<OpFetchNand>(mo: to_morder(mo), args: a, args: v);
779}
780
781# if __TSAN_HAS_INT128
782SANITIZER_INTERFACE_ATTRIBUTE
783a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, int mo) {
784 return AtomicImpl<OpFetchNand>(mo: to_morder(mo), args: a, args: v);
785}
786# endif
787
788SANITIZER_INTERFACE_ATTRIBUTE
789int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v, int mo,
790 int fmo) {
791 return AtomicImpl<OpCAS>(mo: to_morder(mo), args: to_morder(mo: fmo), args: a, args: c, args: v);
792}
793
794SANITIZER_INTERFACE_ATTRIBUTE
795int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
796 int mo, int fmo) {
797 return AtomicImpl<OpCAS>(mo: to_morder(mo), args: to_morder(mo: fmo), args: a, args: c, args: v);
798}
799
800SANITIZER_INTERFACE_ATTRIBUTE
801int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
802 int mo, int fmo) {
803 return AtomicImpl<OpCAS>(mo: to_morder(mo), args: to_morder(mo: fmo), args: a, args: c, args: v);
804}
805
806SANITIZER_INTERFACE_ATTRIBUTE
807int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
808 int mo, int fmo) {
809 return AtomicImpl<OpCAS>(mo: to_morder(mo), args: to_morder(mo: fmo), args: a, args: c, args: v);
810}
811
812# if __TSAN_HAS_INT128
813SANITIZER_INTERFACE_ATTRIBUTE
814int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
815 int mo, int fmo) {
816 return AtomicImpl<OpCAS>(mo: to_morder(mo), args: to_morder(mo: fmo), args: a, args: c, args: v);
817}
818# endif
819
820SANITIZER_INTERFACE_ATTRIBUTE
821int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v, int mo,
822 int fmo) {
823 return AtomicImpl<OpCAS>(mo: to_morder(mo), args: to_morder(mo: fmo), args: a, args: c, args: v);
824}
825
826SANITIZER_INTERFACE_ATTRIBUTE
827int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
828 int mo, int fmo) {
829 return AtomicImpl<OpCAS>(mo: to_morder(mo), args: to_morder(mo: fmo), args: a, args: c, args: v);
830}
831
832SANITIZER_INTERFACE_ATTRIBUTE
833int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
834 int mo, int fmo) {
835 return AtomicImpl<OpCAS>(mo: to_morder(mo), args: to_morder(mo: fmo), args: a, args: c, args: v);
836}
837
838SANITIZER_INTERFACE_ATTRIBUTE
839int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
840 int mo, int fmo) {
841 return AtomicImpl<OpCAS>(mo: to_morder(mo), args: to_morder(mo: fmo), args: a, args: c, args: v);
842}
843
844# if __TSAN_HAS_INT128
845SANITIZER_INTERFACE_ATTRIBUTE
846int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
847 int mo, int fmo) {
848 return AtomicImpl<OpCAS>(mo: to_morder(mo), args: to_morder(mo: fmo), args: a, args: c, args: v);
849}
850# endif
851
852SANITIZER_INTERFACE_ATTRIBUTE
853a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v, int mo,
854 int fmo) {
855 return AtomicImpl<OpCAS>(mo: to_morder(mo), args: to_morder(mo: fmo), args: a, args: c, args: v);
856}
857
858SANITIZER_INTERFACE_ATTRIBUTE
859a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v, int mo,
860 int fmo) {
861 return AtomicImpl<OpCAS>(mo: to_morder(mo), args: to_morder(mo: fmo), args: a, args: c, args: v);
862}
863
864SANITIZER_INTERFACE_ATTRIBUTE
865a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v, int mo,
866 int fmo) {
867 return AtomicImpl<OpCAS>(mo: to_morder(mo), args: to_morder(mo: fmo), args: a, args: c, args: v);
868}
869
870SANITIZER_INTERFACE_ATTRIBUTE
871a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v, int mo,
872 int fmo) {
873 return AtomicImpl<OpCAS>(mo: to_morder(mo), args: to_morder(mo: fmo), args: a, args: c, args: v);
874}
875
876# if __TSAN_HAS_INT128
877SANITIZER_INTERFACE_ATTRIBUTE
878a128 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
879 int mo, int fmo) {
880 return AtomicImpl<OpCAS>(mo: to_morder(mo), args: to_morder(mo: fmo), args: a, args: c, args: v);
881}
882# endif
883
884SANITIZER_INTERFACE_ATTRIBUTE
885void __tsan_atomic_thread_fence(int mo) {
886 return AtomicImpl<OpFence>(mo: to_morder(mo));
887}
888
889SANITIZER_INTERFACE_ATTRIBUTE
890void __tsan_atomic_signal_fence(int mo) {}
891} // extern "C"
892
893#else // #if !SANITIZER_GO
894
895// Go
896
897template <class Op, class... Types>
898void AtomicGo(ThreadState *thr, uptr cpc, uptr pc, Types... args) {
899 if (thr->ignore_sync) {
900 (void)Op::NoTsanAtomic(args...);
901 } else {
902 FuncEntry(thr, cpc);
903 (void)Op::Atomic(thr, pc, args...);
904 FuncExit(thr);
905 }
906}
907
908template <class Op, class... Types>
909auto AtomicGoRet(ThreadState *thr, uptr cpc, uptr pc, Types... args) {
910 if (thr->ignore_sync) {
911 return Op::NoTsanAtomic(args...);
912 } else {
913 FuncEntry(thr, cpc);
914 auto ret = Op::Atomic(thr, pc, args...);
915 FuncExit(thr);
916 return ret;
917 }
918}
919
920extern "C" {
921SANITIZER_INTERFACE_ATTRIBUTE
922void __tsan_go_atomic32_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
923 *(a32 *)(a + 8) = AtomicGoRet<OpLoad>(thr, cpc, pc, mo_acquire, *(a32 **)a);
924}
925
926SANITIZER_INTERFACE_ATTRIBUTE
927void __tsan_go_atomic64_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
928 *(a64 *)(a + 8) = AtomicGoRet<OpLoad>(thr, cpc, pc, mo_acquire, *(a64 **)a);
929}
930
931SANITIZER_INTERFACE_ATTRIBUTE
932void __tsan_go_atomic32_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
933 AtomicGo<OpStore>(thr, cpc, pc, mo_release, *(a32 **)a, *(a32 *)(a + 8));
934}
935
936SANITIZER_INTERFACE_ATTRIBUTE
937void __tsan_go_atomic64_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
938 AtomicGo<OpStore>(thr, cpc, pc, mo_release, *(a64 **)a, *(a64 *)(a + 8));
939}
940
941SANITIZER_INTERFACE_ATTRIBUTE
942void __tsan_go_atomic32_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
943 *(a32 *)(a + 16) = AtomicGoRet<OpFetchAdd>(thr, cpc, pc, mo_acq_rel,
944 *(a32 **)a, *(a32 *)(a + 8));
945}
946
947SANITIZER_INTERFACE_ATTRIBUTE
948void __tsan_go_atomic64_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
949 *(a64 *)(a + 16) = AtomicGoRet<OpFetchAdd>(thr, cpc, pc, mo_acq_rel,
950 *(a64 **)a, *(a64 *)(a + 8));
951}
952
953SANITIZER_INTERFACE_ATTRIBUTE
954void __tsan_go_atomic32_fetch_and(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
955 *(a32 *)(a + 16) = AtomicGoRet<OpFetchAnd>(thr, cpc, pc, mo_acq_rel,
956 *(a32 **)a, *(a32 *)(a + 8));
957}
958
959SANITIZER_INTERFACE_ATTRIBUTE
960void __tsan_go_atomic64_fetch_and(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
961 *(a64 *)(a + 16) = AtomicGoRet<OpFetchAnd>(thr, cpc, pc, mo_acq_rel,
962 *(a64 **)a, *(a64 *)(a + 8));
963}
964
965SANITIZER_INTERFACE_ATTRIBUTE
966void __tsan_go_atomic32_fetch_or(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
967 *(a32 *)(a + 16) = AtomicGoRet<OpFetchOr>(thr, cpc, pc, mo_acq_rel,
968 *(a32 **)a, *(a32 *)(a + 8));
969}
970
971SANITIZER_INTERFACE_ATTRIBUTE
972void __tsan_go_atomic64_fetch_or(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
973 *(a64 *)(a + 16) = AtomicGoRet<OpFetchOr>(thr, cpc, pc, mo_acq_rel,
974 *(a64 **)a, *(a64 *)(a + 8));
975}
976
977SANITIZER_INTERFACE_ATTRIBUTE
978void __tsan_go_atomic32_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
979 *(a32 *)(a + 16) = AtomicGoRet<OpExchange>(thr, cpc, pc, mo_acq_rel,
980 *(a32 **)a, *(a32 *)(a + 8));
981}
982
983SANITIZER_INTERFACE_ATTRIBUTE
984void __tsan_go_atomic64_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
985 *(a64 *)(a + 16) = AtomicGoRet<OpExchange>(thr, cpc, pc, mo_acq_rel,
986 *(a64 **)a, *(a64 *)(a + 8));
987}
988
989SANITIZER_INTERFACE_ATTRIBUTE
990void __tsan_go_atomic32_compare_exchange(ThreadState *thr, uptr cpc, uptr pc,
991 u8 *a) {
992 a32 cmp = *(a32 *)(a + 8);
993 a32 cur = AtomicGoRet<OpCAS>(thr, cpc, pc, mo_acq_rel, mo_acquire, *(a32 **)a,
994 cmp, *(a32 *)(a + 12));
995 *(bool *)(a + 16) = (cur == cmp);
996}
997
998SANITIZER_INTERFACE_ATTRIBUTE
999void __tsan_go_atomic64_compare_exchange(ThreadState *thr, uptr cpc, uptr pc,
1000 u8 *a) {
1001 a64 cmp = *(a64 *)(a + 8);
1002 a64 cur = AtomicGoRet<OpCAS>(thr, cpc, pc, mo_acq_rel, mo_acquire, *(a64 **)a,
1003 cmp, *(a64 *)(a + 16));
1004 *(bool *)(a + 24) = (cur == cmp);
1005}
1006} // extern "C"
1007#endif // #if !SANITIZER_GO
1008