| 1 | // -*- C++ -*- |
| 2 | //===----------------------------------------------------------------------===// |
| 3 | // |
| 4 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 5 | // See https://llvm.org/LICENSE.txt for license information. |
| 6 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 7 | // |
| 8 | // Kokkos v. 4.0 |
| 9 | // Copyright (2022) National Technology & Engineering |
| 10 | // Solutions of Sandia, LLC (NTESS). |
| 11 | // |
| 12 | // Under the terms of Contract DE-NA0003525 with NTESS, |
| 13 | // the U.S. Government retains certain rights in this software. |
| 14 | // |
| 15 | //===---------------------------------------------------------------------===// |
| 16 | |
| 17 | #ifndef _LIBCPP___ATOMIC_ATOMIC_REF_H |
| 18 | #define _LIBCPP___ATOMIC_ATOMIC_REF_H |
| 19 | |
| 20 | #include <__assert> |
| 21 | #include <__atomic/atomic_sync.h> |
| 22 | #include <__atomic/atomic_waitable_traits.h> |
| 23 | #include <__atomic/check_memory_order.h> |
| 24 | #include <__atomic/floating_point_helper.h> |
| 25 | #include <__atomic/memory_order.h> |
| 26 | #include <__atomic/to_gcc_order.h> |
| 27 | #include <__concepts/arithmetic.h> |
| 28 | #include <__concepts/same_as.h> |
| 29 | #include <__config> |
| 30 | #include <__cstddef/byte.h> |
| 31 | #include <__cstddef/ptrdiff_t.h> |
| 32 | #include <__memory/addressof.h> |
| 33 | #include <__type_traits/has_unique_object_representation.h> |
| 34 | #include <__type_traits/is_trivially_copyable.h> |
| 35 | #include <cstdint> |
| 36 | #include <cstring> |
| 37 | |
| 38 | #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) |
| 39 | # pragma GCC system_header |
| 40 | #endif |
| 41 | |
| 42 | _LIBCPP_PUSH_MACROS |
| 43 | #include <__undef_macros> |
| 44 | |
| 45 | _LIBCPP_BEGIN_NAMESPACE_STD |
| 46 | |
| 47 | #if _LIBCPP_STD_VER >= 20 |
| 48 | |
| 49 | // These types are required to make __atomic_is_always_lock_free work across GCC and Clang. |
| 50 | // The purpose of this trick is to make sure that we provide an object with the correct alignment |
| 51 | // to __atomic_is_always_lock_free, since that answer depends on the alignment. |
| 52 | template <size_t _Alignment> |
| 53 | struct __alignment_checker_type { |
| 54 | alignas(_Alignment) char __data; |
| 55 | }; |
| 56 | |
| 57 | template <size_t _Alignment> |
| 58 | struct __get_aligner_instance { |
| 59 | static constexpr __alignment_checker_type<_Alignment> __instance{}; |
| 60 | }; |
| 61 | |
| 62 | template <class _Tp> |
| 63 | struct __atomic_ref_base { |
| 64 | private: |
| 65 | _LIBCPP_HIDE_FROM_ABI static _Tp* __clear_padding(_Tp& __val) noexcept { |
| 66 | _Tp* __ptr = std::addressof(__val); |
| 67 | # if __has_builtin(__builtin_clear_padding) |
| 68 | __builtin_clear_padding(__ptr); |
| 69 | # endif |
| 70 | return __ptr; |
| 71 | } |
| 72 | |
| 73 | _LIBCPP_HIDE_FROM_ABI static bool __compare_exchange( |
| 74 | _Tp* __ptr, _Tp* __expected, _Tp* __desired, bool __is_weak, int __success, int __failure) noexcept { |
| 75 | if constexpr ( |
| 76 | # if __has_builtin(__builtin_clear_padding) |
| 77 | has_unique_object_representations_v<_Tp> || floating_point<_Tp> |
| 78 | # else |
| 79 | true // NOLINT(readability-simplify-boolean-expr) |
| 80 | # endif |
| 81 | ) { |
| 82 | return __atomic_compare_exchange(__ptr, __expected, __desired, __is_weak, __success, __failure); |
| 83 | } else { // _Tp has padding bits and __builtin_clear_padding is available |
| 84 | __clear_padding(val&: *__desired); |
| 85 | _Tp __copy = *__expected; |
| 86 | __clear_padding(val&: __copy); |
| 87 | // The algorithm we use here is basically to perform `__atomic_compare_exchange` on the |
| 88 | // values until it has either succeeded, or failed because the value representation of the |
| 89 | // objects involved was different. This is why we loop around __atomic_compare_exchange: |
| 90 | // we basically loop until its failure is caused by the value representation of the objects |
| 91 | // being different, not only their object representation. |
| 92 | while (true) { |
| 93 | _Tp __prev = __copy; |
| 94 | if (__atomic_compare_exchange(__ptr, std::addressof(__copy), __desired, __is_weak, __success, __failure)) { |
| 95 | return true; |
| 96 | } |
| 97 | _Tp __curr = __copy; |
| 98 | if (std::memcmp(s1: __clear_padding(val&: __prev), s2: __clear_padding(val&: __curr), n: sizeof(_Tp)) != 0) { |
| 99 | // Value representation without padding bits do not compare equal -> |
| 100 | // write the current content of *ptr into *expected |
| 101 | std::memcpy(dest: __expected, src: std::addressof(__copy), n: sizeof(_Tp)); |
| 102 | return false; |
| 103 | } |
| 104 | } |
| 105 | } |
| 106 | } |
| 107 | |
| 108 | friend struct __atomic_waitable_traits<__atomic_ref_base<_Tp>>; |
| 109 | |
| 110 | // require types that are 1, 2, 4, 8, or 16 bytes in length to be aligned to at least their size to be potentially |
| 111 | // used lock-free |
| 112 | static constexpr size_t __min_alignment = (sizeof(_Tp) & (sizeof(_Tp) - 1)) || (sizeof(_Tp) > 16) ? 0 : sizeof(_Tp); |
| 113 | |
| 114 | public: |
| 115 | using value_type = _Tp; |
| 116 | |
| 117 | static constexpr size_t required_alignment = alignof(_Tp) > __min_alignment ? alignof(_Tp) : __min_alignment; |
| 118 | |
| 119 | // The __atomic_always_lock_free builtin takes into account the alignment of the pointer if provided, |
| 120 | // so we create a fake pointer with a suitable alignment when querying it. Note that we are guaranteed |
| 121 | // that the pointer is going to be aligned properly at runtime because that is a (checked) precondition |
| 122 | // of atomic_ref's constructor. |
| 123 | static constexpr bool is_always_lock_free = |
| 124 | __atomic_always_lock_free(sizeof(_Tp), std::addressof(__get_aligner_instance<required_alignment>::__instance)); |
| 125 | |
| 126 | [[nodiscard]] _LIBCPP_HIDE_FROM_ABI bool is_lock_free() const noexcept { |
| 127 | return __atomic_is_lock_free(sizeof(_Tp), __ptr_); |
| 128 | } |
| 129 | |
| 130 | _LIBCPP_HIDE_FROM_ABI void store(_Tp __desired, memory_order __order = memory_order::seq_cst) const noexcept |
| 131 | _LIBCPP_CHECK_STORE_MEMORY_ORDER(__order) { |
| 132 | _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN( |
| 133 | __order == memory_order::relaxed || __order == memory_order::release || __order == memory_order::seq_cst, |
| 134 | "atomic_ref: memory order argument to atomic store operation is invalid" ); |
| 135 | __atomic_store(__ptr_, __clear_padding(val&: __desired), std::__to_gcc_order(__order)); |
| 136 | } |
| 137 | |
| 138 | _LIBCPP_HIDE_FROM_ABI _Tp operator=(_Tp __desired) const noexcept { |
| 139 | store(__desired); |
| 140 | return __desired; |
| 141 | } |
| 142 | |
| 143 | [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _Tp load(memory_order __order = memory_order::seq_cst) const noexcept |
| 144 | _LIBCPP_CHECK_LOAD_MEMORY_ORDER(__order) { |
| 145 | _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN( |
| 146 | __order == memory_order::relaxed || __order == memory_order::consume || __order == memory_order::acquire || |
| 147 | __order == memory_order::seq_cst, |
| 148 | "atomic_ref: memory order argument to atomic load operation is invalid" ); |
| 149 | alignas(_Tp) byte __mem[sizeof(_Tp)]; |
| 150 | auto* __ret = reinterpret_cast<_Tp*>(__mem); |
| 151 | __atomic_load(__ptr_, __ret, std::__to_gcc_order(__order)); |
| 152 | return *__ret; |
| 153 | } |
| 154 | |
| 155 | _LIBCPP_HIDE_FROM_ABI operator _Tp() const noexcept { return load(); } |
| 156 | |
| 157 | _LIBCPP_HIDE_FROM_ABI _Tp exchange(_Tp __desired, memory_order __order = memory_order::seq_cst) const noexcept { |
| 158 | alignas(_Tp) byte __mem[sizeof(_Tp)]; |
| 159 | auto* __ret = reinterpret_cast<_Tp*>(__mem); |
| 160 | __atomic_exchange(__ptr_, __clear_padding(val&: __desired), __ret, std::__to_gcc_order(__order)); |
| 161 | return *__ret; |
| 162 | } |
| 163 | |
| 164 | _LIBCPP_HIDE_FROM_ABI bool |
| 165 | compare_exchange_weak(_Tp& __expected, _Tp __desired, memory_order __success, memory_order __failure) const noexcept |
| 166 | _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__success, __failure) { |
| 167 | _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN( |
| 168 | __failure == memory_order::relaxed || __failure == memory_order::consume || |
| 169 | __failure == memory_order::acquire || __failure == memory_order::seq_cst, |
| 170 | "atomic_ref: failure memory order argument to weak atomic compare-and-exchange operation is invalid" ); |
| 171 | return __compare_exchange( |
| 172 | ptr: __ptr_, |
| 173 | expected: std::addressof(__expected), |
| 174 | desired: std::addressof(__desired), |
| 175 | is_weak: true, |
| 176 | success: std::__to_gcc_order(order: __success), |
| 177 | failure: std::__to_gcc_order(order: __failure)); |
| 178 | } |
| 179 | _LIBCPP_HIDE_FROM_ABI bool |
| 180 | compare_exchange_strong(_Tp& __expected, _Tp __desired, memory_order __success, memory_order __failure) const noexcept |
| 181 | _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__success, __failure) { |
| 182 | _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN( |
| 183 | __failure == memory_order::relaxed || __failure == memory_order::consume || |
| 184 | __failure == memory_order::acquire || __failure == memory_order::seq_cst, |
| 185 | "atomic_ref: failure memory order argument to strong atomic compare-and-exchange operation is invalid" ); |
| 186 | return __compare_exchange( |
| 187 | ptr: __ptr_, |
| 188 | expected: std::addressof(__expected), |
| 189 | desired: std::addressof(__desired), |
| 190 | is_weak: false, |
| 191 | success: std::__to_gcc_order(order: __success), |
| 192 | failure: std::__to_gcc_order(order: __failure)); |
| 193 | } |
| 194 | |
| 195 | _LIBCPP_HIDE_FROM_ABI bool |
| 196 | compare_exchange_weak(_Tp& __expected, _Tp __desired, memory_order __order = memory_order::seq_cst) const noexcept { |
| 197 | return __compare_exchange( |
| 198 | ptr: __ptr_, |
| 199 | expected: std::addressof(__expected), |
| 200 | desired: std::addressof(__desired), |
| 201 | is_weak: true, |
| 202 | success: std::__to_gcc_order(__order), |
| 203 | failure: std::__to_gcc_failure_order(__order)); |
| 204 | } |
| 205 | _LIBCPP_HIDE_FROM_ABI bool |
| 206 | compare_exchange_strong(_Tp& __expected, _Tp __desired, memory_order __order = memory_order::seq_cst) const noexcept { |
| 207 | return __compare_exchange( |
| 208 | ptr: __ptr_, |
| 209 | expected: std::addressof(__expected), |
| 210 | desired: std::addressof(__desired), |
| 211 | is_weak: false, |
| 212 | success: std::__to_gcc_order(__order), |
| 213 | failure: std::__to_gcc_failure_order(__order)); |
| 214 | } |
| 215 | |
| 216 | _LIBCPP_HIDE_FROM_ABI void wait(_Tp __old, memory_order __order = memory_order::seq_cst) const noexcept |
| 217 | _LIBCPP_CHECK_WAIT_MEMORY_ORDER(__order) { |
| 218 | _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN( |
| 219 | __order == memory_order::relaxed || __order == memory_order::consume || __order == memory_order::acquire || |
| 220 | __order == memory_order::seq_cst, |
| 221 | "atomic_ref: memory order argument to atomic wait operation is invalid" ); |
| 222 | std::__atomic_wait(*this, __old, __order); |
| 223 | } |
| 224 | _LIBCPP_HIDE_FROM_ABI void notify_one() const noexcept { std::__atomic_notify_one(*this); } |
| 225 | _LIBCPP_HIDE_FROM_ABI void notify_all() const noexcept { std::__atomic_notify_all(*this); } |
| 226 | # if _LIBCPP_STD_VER >= 26 |
| 227 | [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr _Tp* address() const noexcept { return __ptr_; } |
| 228 | # endif |
| 229 | |
| 230 | protected: |
| 231 | using _Aligned_Tp [[__gnu__::__aligned__(required_alignment), __gnu__::__nodebug__]] = _Tp; |
| 232 | _Aligned_Tp* __ptr_; |
| 233 | |
| 234 | _LIBCPP_HIDE_FROM_ABI __atomic_ref_base(_Tp& __obj) : __ptr_(std::addressof(__obj)) {} |
| 235 | }; |
| 236 | |
| 237 | template <class _Tp> |
| 238 | struct __atomic_waitable_traits<__atomic_ref_base<_Tp>> { |
| 239 | using __value_type _LIBCPP_NODEBUG = _Tp; |
| 240 | |
| 241 | static _LIBCPP_HIDE_FROM_ABI _Tp __atomic_load(const __atomic_ref_base<_Tp>& __a, memory_order __order) { |
| 242 | return __a.load(__order); |
| 243 | } |
| 244 | static _LIBCPP_HIDE_FROM_ABI const _Tp* __atomic_contention_address(const __atomic_ref_base<_Tp>& __a) { |
| 245 | return __a.__ptr_; |
| 246 | } |
| 247 | }; |
| 248 | |
| 249 | template <class _Tp> |
| 250 | struct atomic_ref : public __atomic_ref_base<_Tp> { |
| 251 | static_assert(is_trivially_copyable_v<_Tp>, "std::atomic_ref<T> requires that 'T' be a trivially copyable type" ); |
| 252 | |
| 253 | using __base _LIBCPP_NODEBUG = __atomic_ref_base<_Tp>; |
| 254 | |
| 255 | _LIBCPP_HIDE_FROM_ABI explicit atomic_ref(_Tp& __obj) : __base(__obj) { |
| 256 | _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN( |
| 257 | reinterpret_cast<uintptr_t>(std::addressof(__obj)) % __base::required_alignment == 0, |
| 258 | "atomic_ref ctor: referenced object must be aligned to required_alignment" ); |
| 259 | } |
| 260 | |
| 261 | _LIBCPP_HIDE_FROM_ABI atomic_ref(const atomic_ref&) noexcept = default; |
| 262 | |
| 263 | _LIBCPP_HIDE_FROM_ABI _Tp operator=(_Tp __desired) const noexcept { return __base::operator=(__desired); } |
| 264 | |
| 265 | atomic_ref& operator=(const atomic_ref&) = delete; |
| 266 | }; |
| 267 | |
| 268 | template <class _Tp> |
| 269 | requires(std::integral<_Tp> && !std::same_as<bool, _Tp>) |
| 270 | struct atomic_ref<_Tp> : public __atomic_ref_base<_Tp> { |
| 271 | using __base _LIBCPP_NODEBUG = __atomic_ref_base<_Tp>; |
| 272 | |
| 273 | using difference_type = __base::value_type; |
| 274 | |
| 275 | _LIBCPP_HIDE_FROM_ABI explicit atomic_ref(_Tp& __obj) : __base(__obj) { |
| 276 | _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN( |
| 277 | reinterpret_cast<uintptr_t>(std::addressof(__obj)) % __base::required_alignment == 0, |
| 278 | "atomic_ref ctor: referenced object must be aligned to required_alignment" ); |
| 279 | } |
| 280 | |
| 281 | _LIBCPP_HIDE_FROM_ABI atomic_ref(const atomic_ref&) noexcept = default; |
| 282 | |
| 283 | _LIBCPP_HIDE_FROM_ABI _Tp operator=(_Tp __desired) const noexcept { return __base::operator=(__desired); } |
| 284 | |
| 285 | atomic_ref& operator=(const atomic_ref&) = delete; |
| 286 | |
| 287 | _LIBCPP_HIDE_FROM_ABI _Tp fetch_add(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept { |
| 288 | return __atomic_fetch_add(this->__ptr_, __arg, std::__to_gcc_order(__order)); |
| 289 | } |
| 290 | _LIBCPP_HIDE_FROM_ABI _Tp fetch_sub(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept { |
| 291 | return __atomic_fetch_sub(this->__ptr_, __arg, std::__to_gcc_order(__order)); |
| 292 | } |
| 293 | _LIBCPP_HIDE_FROM_ABI _Tp fetch_and(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept { |
| 294 | return __atomic_fetch_and(this->__ptr_, __arg, std::__to_gcc_order(__order)); |
| 295 | } |
| 296 | _LIBCPP_HIDE_FROM_ABI _Tp fetch_or(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept { |
| 297 | return __atomic_fetch_or(this->__ptr_, __arg, std::__to_gcc_order(__order)); |
| 298 | } |
| 299 | _LIBCPP_HIDE_FROM_ABI _Tp fetch_xor(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept { |
| 300 | return __atomic_fetch_xor(this->__ptr_, __arg, std::__to_gcc_order(__order)); |
| 301 | } |
| 302 | |
| 303 | _LIBCPP_HIDE_FROM_ABI _Tp operator++(int) const noexcept { return fetch_add(arg: _Tp(1)); } |
| 304 | _LIBCPP_HIDE_FROM_ABI _Tp operator--(int) const noexcept { return fetch_sub(arg: _Tp(1)); } |
| 305 | _LIBCPP_HIDE_FROM_ABI _Tp operator++() const noexcept { return fetch_add(arg: _Tp(1)) + _Tp(1); } |
| 306 | _LIBCPP_HIDE_FROM_ABI _Tp operator--() const noexcept { return fetch_sub(arg: _Tp(1)) - _Tp(1); } |
| 307 | _LIBCPP_HIDE_FROM_ABI _Tp operator+=(_Tp __arg) const noexcept { return fetch_add(__arg) + __arg; } |
| 308 | _LIBCPP_HIDE_FROM_ABI _Tp operator-=(_Tp __arg) const noexcept { return fetch_sub(__arg) - __arg; } |
| 309 | _LIBCPP_HIDE_FROM_ABI _Tp operator&=(_Tp __arg) const noexcept { return fetch_and(__arg) & __arg; } |
| 310 | _LIBCPP_HIDE_FROM_ABI _Tp operator|=(_Tp __arg) const noexcept { return fetch_or(__arg) | __arg; } |
| 311 | _LIBCPP_HIDE_FROM_ABI _Tp operator^=(_Tp __arg) const noexcept { return fetch_xor(__arg) ^ __arg; } |
| 312 | }; |
| 313 | |
| 314 | template <class _Tp> |
| 315 | requires std::floating_point<_Tp> |
| 316 | struct atomic_ref<_Tp> : public __atomic_ref_base<_Tp> { |
| 317 | using __base _LIBCPP_NODEBUG = __atomic_ref_base<_Tp>; |
| 318 | |
| 319 | using difference_type = __base::value_type; |
| 320 | |
| 321 | _LIBCPP_HIDE_FROM_ABI explicit atomic_ref(_Tp& __obj) : __base(__obj) { |
| 322 | _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN( |
| 323 | reinterpret_cast<uintptr_t>(std::addressof(__obj)) % __base::required_alignment == 0, |
| 324 | "atomic_ref ctor: referenced object must be aligned to required_alignment" ); |
| 325 | } |
| 326 | |
| 327 | _LIBCPP_HIDE_FROM_ABI atomic_ref(const atomic_ref&) noexcept = default; |
| 328 | |
| 329 | _LIBCPP_HIDE_FROM_ABI _Tp operator=(_Tp __desired) const noexcept { return __base::operator=(__desired); } |
| 330 | |
| 331 | atomic_ref& operator=(const atomic_ref&) = delete; |
| 332 | |
| 333 | _LIBCPP_HIDE_FROM_ABI _Tp fetch_add(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept { |
| 334 | if constexpr (std::__has_rmw_builtin<_Tp>()) { |
| 335 | return __atomic_fetch_add(this->__ptr_, __arg, std::__to_gcc_order(__order)); |
| 336 | } else { |
| 337 | _Tp __old = this->load(memory_order_relaxed); |
| 338 | _Tp __new = __old + __arg; |
| 339 | while (!this->compare_exchange_weak(__old, __new, __order, memory_order_relaxed)) { |
| 340 | __new = __old + __arg; |
| 341 | } |
| 342 | return __old; |
| 343 | } |
| 344 | } |
| 345 | _LIBCPP_HIDE_FROM_ABI _Tp fetch_sub(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept { |
| 346 | if constexpr (std::__has_rmw_builtin<_Tp>()) { |
| 347 | return __atomic_fetch_sub(this->__ptr_, __arg, std::__to_gcc_order(__order)); |
| 348 | } else { |
| 349 | _Tp __old = this->load(memory_order_relaxed); |
| 350 | _Tp __new = __old - __arg; |
| 351 | while (!this->compare_exchange_weak(__old, __new, __order, memory_order_relaxed)) { |
| 352 | __new = __old - __arg; |
| 353 | } |
| 354 | return __old; |
| 355 | } |
| 356 | } |
| 357 | |
| 358 | _LIBCPP_HIDE_FROM_ABI _Tp operator+=(_Tp __arg) const noexcept { return fetch_add(__arg) + __arg; } |
| 359 | _LIBCPP_HIDE_FROM_ABI _Tp operator-=(_Tp __arg) const noexcept { return fetch_sub(__arg) - __arg; } |
| 360 | }; |
| 361 | |
| 362 | template <class _Tp> |
| 363 | struct atomic_ref<_Tp*> : public __atomic_ref_base<_Tp*> { |
| 364 | using __base _LIBCPP_NODEBUG = __atomic_ref_base<_Tp*>; |
| 365 | |
| 366 | using difference_type = ptrdiff_t; |
| 367 | |
| 368 | _LIBCPP_HIDE_FROM_ABI explicit atomic_ref(_Tp*& __ptr) : __base(__ptr) {} |
| 369 | |
| 370 | _LIBCPP_HIDE_FROM_ABI _Tp* operator=(_Tp* __desired) const noexcept { return __base::operator=(__desired); } |
| 371 | |
| 372 | atomic_ref& operator=(const atomic_ref&) = delete; |
| 373 | |
| 374 | _LIBCPP_HIDE_FROM_ABI _Tp* fetch_add(ptrdiff_t __arg, memory_order __order = memory_order_seq_cst) const noexcept { |
| 375 | return __atomic_fetch_add(this->__ptr_, __arg * sizeof(_Tp), std::__to_gcc_order(__order)); |
| 376 | } |
| 377 | _LIBCPP_HIDE_FROM_ABI _Tp* fetch_sub(ptrdiff_t __arg, memory_order __order = memory_order_seq_cst) const noexcept { |
| 378 | return __atomic_fetch_sub(this->__ptr_, __arg * sizeof(_Tp), std::__to_gcc_order(__order)); |
| 379 | } |
| 380 | |
| 381 | _LIBCPP_HIDE_FROM_ABI _Tp* operator++(int) const noexcept { return fetch_add(arg: 1); } |
| 382 | _LIBCPP_HIDE_FROM_ABI _Tp* operator--(int) const noexcept { return fetch_sub(arg: 1); } |
| 383 | _LIBCPP_HIDE_FROM_ABI _Tp* operator++() const noexcept { return fetch_add(arg: 1) + 1; } |
| 384 | _LIBCPP_HIDE_FROM_ABI _Tp* operator--() const noexcept { return fetch_sub(arg: 1) - 1; } |
| 385 | _LIBCPP_HIDE_FROM_ABI _Tp* operator+=(ptrdiff_t __arg) const noexcept { return fetch_add(__arg) + __arg; } |
| 386 | _LIBCPP_HIDE_FROM_ABI _Tp* operator-=(ptrdiff_t __arg) const noexcept { return fetch_sub(__arg) - __arg; } |
| 387 | }; |
| 388 | |
| 389 | _LIBCPP_CTAD_SUPPORTED_FOR_TYPE(atomic_ref); |
| 390 | |
| 391 | #endif // _LIBCPP_STD_VER >= 20 |
| 392 | |
| 393 | _LIBCPP_END_NAMESPACE_STD |
| 394 | |
| 395 | _LIBCPP_POP_MACROS |
| 396 | |
| 397 | #endif // _LIBCPP__ATOMIC_ATOMIC_REF_H |
| 398 | |