| 1 | // -*- C++ -*- |
| 2 | //===----------------------------------------------------------------------===// |
| 3 | // |
| 4 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 5 | // See https://llvm.org/LICENSE.txt for license information. |
| 6 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | |
| 10 | #ifndef _LIBCPP___STOP_TOKEN_STOP_STATE_H |
| 11 | #define _LIBCPP___STOP_TOKEN_STOP_STATE_H |
| 12 | |
| 13 | #include <__assert> |
| 14 | #include <__config> |
| 15 | #include <__stop_token/atomic_unique_lock.h> |
| 16 | #include <__stop_token/intrusive_list_view.h> |
| 17 | #include <__thread/id.h> |
| 18 | #include <atomic> |
| 19 | #include <cstdint> |
| 20 | |
| 21 | #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) |
| 22 | # pragma GCC system_header |
| 23 | #endif |
| 24 | |
| 25 | _LIBCPP_BEGIN_NAMESPACE_STD |
| 26 | |
| 27 | #if _LIBCPP_STD_VER >= 20 && _LIBCPP_HAS_THREADS |
| 28 | |
| 29 | struct __stop_callback_base : __intrusive_node_base<__stop_callback_base> { |
| 30 | using __callback_fn_t _LIBCPP_NODEBUG = void(__stop_callback_base*) noexcept; |
| 31 | _LIBCPP_HIDE_FROM_ABI explicit __stop_callback_base(__callback_fn_t* __callback_fn) : __callback_fn_(__callback_fn) {} |
| 32 | |
| 33 | _LIBCPP_HIDE_FROM_ABI void __invoke() noexcept { __callback_fn_(this); } |
| 34 | |
| 35 | __callback_fn_t* __callback_fn_; |
| 36 | atomic<bool> __completed_ = false; |
| 37 | bool* __destroyed_ = nullptr; |
| 38 | }; |
| 39 | |
| 40 | class __stop_state { |
| 41 | static constexpr uint32_t __stop_requested_bit = 1; |
| 42 | static constexpr uint32_t __callback_list_locked_bit = 1 << 1; |
| 43 | static constexpr uint32_t __stop_source_counter_shift = 2; |
| 44 | |
| 45 | // The "stop_source counter" is not used for lifetime reference counting. |
| 46 | // When the number of stop_source reaches 0, the remaining stop_tokens's |
| 47 | // stop_possible will return false. We need this counter to track this. |
| 48 | // |
| 49 | // The "callback list locked" bit implements the atomic_unique_lock to |
| 50 | // guard the operations on the callback list |
| 51 | // |
| 52 | // 31 - 2 | 1 | 0 | |
| 53 | // stop_source counter | callback list locked | stop_requested | |
| 54 | atomic<uint32_t> __state_ = 0; |
| 55 | |
| 56 | // Reference count for stop_token + stop_callback + stop_source |
| 57 | // When the counter reaches zero, the state is destroyed |
| 58 | // It is used by __intrusive_shared_ptr, but it is stored here for better layout |
| 59 | atomic<uint32_t> __ref_count_ = 0; |
| 60 | |
| 61 | using __state_t _LIBCPP_NODEBUG = uint32_t; |
| 62 | using __callback_list_lock _LIBCPP_NODEBUG = __atomic_unique_lock<__state_t, __callback_list_locked_bit>; |
| 63 | using __callback_list _LIBCPP_NODEBUG = __intrusive_list_view<__stop_callback_base>; |
| 64 | |
| 65 | __callback_list __callback_list_; |
| 66 | __thread_id __requesting_thread_; |
| 67 | |
| 68 | public: |
| 69 | _LIBCPP_HIDE_FROM_ABI __stop_state() noexcept = default; |
| 70 | |
| 71 | _LIBCPP_HIDE_FROM_ABI void __increment_stop_source_counter() noexcept { |
| 72 | _LIBCPP_ASSERT_UNCATEGORIZED( |
| 73 | __state_.load(std::memory_order_relaxed) <= static_cast<__state_t>(~(1 << __stop_source_counter_shift)), |
| 74 | "stop_source's counter reaches the maximum. Incrementing the counter will overflow" ); |
| 75 | __state_.fetch_add(op: 1 << __stop_source_counter_shift, m: std::memory_order_relaxed); |
| 76 | } |
| 77 | |
| 78 | // We are not destroying the object after counter decrements to zero, nor do we have |
| 79 | // operations depend on the ordering of decrementing the counter. relaxed is enough. |
| 80 | _LIBCPP_HIDE_FROM_ABI void __decrement_stop_source_counter() noexcept { |
| 81 | _LIBCPP_ASSERT_UNCATEGORIZED( |
| 82 | __state_.load(std::memory_order_relaxed) >= static_cast<__state_t>(1 << __stop_source_counter_shift), |
| 83 | "stop_source's counter is 0. Decrementing the counter will underflow" ); |
| 84 | __state_.fetch_sub(op: 1 << __stop_source_counter_shift, m: std::memory_order_relaxed); |
| 85 | } |
| 86 | |
| 87 | _LIBCPP_HIDE_FROM_ABI bool __stop_requested() const noexcept { |
| 88 | // acquire because [thread.stoptoken.intro] A call to request_stop that returns true |
| 89 | // synchronizes with a call to stop_requested on an associated stop_token or stop_source |
| 90 | // object that returns true. |
| 91 | // request_stop's compare_exchange_weak has release which syncs with this acquire |
| 92 | return (__state_.load(m: std::memory_order_acquire) & __stop_requested_bit) != 0; |
| 93 | } |
| 94 | |
| 95 | _LIBCPP_HIDE_FROM_ABI bool __stop_possible_for_stop_token() const noexcept { |
| 96 | // [stoptoken.mem] false if "a stop request was not made and there are no associated stop_source objects" |
| 97 | // Todo: Can this be std::memory_order_relaxed as the standard does not say anything except not to introduce data |
| 98 | // race? |
| 99 | __state_t __curent_state = __state_.load(m: std::memory_order_acquire); |
| 100 | return ((__curent_state & __stop_requested_bit) != 0) || ((__curent_state >> __stop_source_counter_shift) != 0); |
| 101 | } |
| 102 | |
| 103 | _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI bool __request_stop() noexcept { |
| 104 | auto __cb_list_lock = __try_lock_for_request_stop(); |
| 105 | if (!__cb_list_lock.__owns_lock()) { |
| 106 | return false; |
| 107 | } |
| 108 | __requesting_thread_ = this_thread::get_id(); |
| 109 | |
| 110 | while (!__callback_list_.__empty()) { |
| 111 | auto __cb = __callback_list_.__pop_front(); |
| 112 | |
| 113 | // allow other callbacks to be removed while invoking the current callback |
| 114 | __cb_list_lock.__unlock(); |
| 115 | |
| 116 | bool __destroyed = false; |
| 117 | __cb->__destroyed_ = &__destroyed; |
| 118 | |
| 119 | __cb->__invoke(); |
| 120 | |
| 121 | // __cb's invoke function could potentially delete itself. We need to check before accessing __cb's member |
| 122 | if (!__destroyed) { |
| 123 | // needs to set __destroyed_ pointer to nullptr, otherwise it points to a local variable |
| 124 | // which is to be destroyed at the end of the loop |
| 125 | __cb->__destroyed_ = nullptr; |
| 126 | |
| 127 | // [stopcallback.cons] If callback is concurrently executing on another thread, then the return |
| 128 | // from the invocation of callback strongly happens before ([intro.races]) callback is destroyed. |
| 129 | // this release syncs with the acquire in the remove_callback |
| 130 | __cb->__completed_.store(d: true, m: std::memory_order_release); |
| 131 | __cb->__completed_.notify_all(); |
| 132 | } |
| 133 | |
| 134 | __cb_list_lock.__lock(); |
| 135 | } |
| 136 | |
| 137 | return true; |
| 138 | } |
| 139 | |
| 140 | _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI bool __add_callback(__stop_callback_base* __cb) noexcept { |
| 141 | // If it is already stop_requested. Do not try to request it again. |
| 142 | const auto __give_up_trying_to_lock_condition = [__cb](__state_t __state) { |
| 143 | if ((__state & __stop_requested_bit) != 0) { |
| 144 | // already stop requested, synchronously run the callback and no need to lock the list again |
| 145 | __cb->__invoke(); |
| 146 | return true; |
| 147 | } |
| 148 | // no stop source. no need to lock the list to add the callback as it can never be invoked |
| 149 | return (__state >> __stop_source_counter_shift) == 0; |
| 150 | }; |
| 151 | |
| 152 | __callback_list_lock __cb_list_lock(__state_, __give_up_trying_to_lock_condition); |
| 153 | |
| 154 | if (!__cb_list_lock.__owns_lock()) { |
| 155 | return false; |
| 156 | } |
| 157 | |
| 158 | __callback_list_.__push_front(node: __cb); |
| 159 | |
| 160 | return true; |
| 161 | // unlock here: [thread.stoptoken.intro] Registration of a callback synchronizes with the invocation of |
| 162 | // that callback. |
| 163 | // Note: this release sync with the acquire in the request_stop' __try_lock_for_request_stop |
| 164 | } |
| 165 | |
| 166 | // called by the destructor of stop_callback |
| 167 | _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void __remove_callback(__stop_callback_base* __cb) noexcept { |
| 168 | __callback_list_lock __cb_list_lock(__state_); |
| 169 | |
| 170 | // under below condition, the request_stop call just popped __cb from the list and could execute it now |
| 171 | bool __potentially_executing_now = __cb->__prev_ == nullptr && !__callback_list_.__is_head(node: __cb); |
| 172 | |
| 173 | if (__potentially_executing_now) { |
| 174 | auto __requested_thread = __requesting_thread_; |
| 175 | __cb_list_lock.__unlock(); |
| 176 | |
| 177 | if (std::this_thread::get_id() != __requested_thread) { |
| 178 | // [stopcallback.cons] If callback is concurrently executing on another thread, then the return |
| 179 | // from the invocation of callback strongly happens before ([intro.races]) callback is destroyed. |
| 180 | __cb->__completed_.wait(v: false, m: std::memory_order_acquire); |
| 181 | } else { |
| 182 | // The destructor of stop_callback runs on the same thread of the thread that invokes the callback. |
| 183 | // The callback is potentially invoking its own destuctor. Set the flag to avoid accessing destroyed |
| 184 | // members on the invoking side |
| 185 | if (__cb->__destroyed_) { |
| 186 | *__cb->__destroyed_ = true; |
| 187 | } |
| 188 | } |
| 189 | } else { |
| 190 | __callback_list_.__remove(node: __cb); |
| 191 | } |
| 192 | } |
| 193 | |
| 194 | private: |
| 195 | _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI __callback_list_lock __try_lock_for_request_stop() noexcept { |
| 196 | // If it is already stop_requested, do not try to request stop or lock the list again. |
| 197 | const auto __lock_fail_condition = [](__state_t __state) { return (__state & __stop_requested_bit) != 0; }; |
| 198 | |
| 199 | // set locked and requested bit at the same time |
| 200 | const auto __after_lock_state = [](__state_t __state) { |
| 201 | return __state | __callback_list_locked_bit | __stop_requested_bit; |
| 202 | }; |
| 203 | |
| 204 | // acq because [thread.stoptoken.intro] Registration of a callback synchronizes with the invocation of that |
| 205 | // callback. We are going to invoke the callback after getting the lock, acquire so that we can see the |
| 206 | // registration of a callback (and other writes that happens-before the add_callback) |
| 207 | // Note: the rel (unlock) in the add_callback syncs with this acq |
| 208 | // rel because [thread.stoptoken.intro] A call to request_stop that returns true synchronizes with a call |
| 209 | // to stop_requested on an associated stop_token or stop_source object that returns true. |
| 210 | // We need to make sure that all writes (including user code) before request_stop will be made visible |
| 211 | // to the threads that waiting for `stop_requested == true` |
| 212 | // Note: this rel syncs with the acq in `stop_requested` |
| 213 | const auto __locked_ordering = std::memory_order_acq_rel; |
| 214 | |
| 215 | return __callback_list_lock(__state_, __lock_fail_condition, __after_lock_state, __locked_ordering); |
| 216 | } |
| 217 | |
| 218 | template <class _Tp> |
| 219 | friend struct __intrusive_shared_ptr_traits; |
| 220 | }; |
| 221 | |
| 222 | template <class _Tp> |
| 223 | struct __intrusive_shared_ptr_traits; |
| 224 | |
| 225 | template <> |
| 226 | struct __intrusive_shared_ptr_traits<__stop_state> { |
| 227 | _LIBCPP_HIDE_FROM_ABI static atomic<uint32_t>& __get_atomic_ref_count(__stop_state& __state) { |
| 228 | return __state.__ref_count_; |
| 229 | } |
| 230 | }; |
| 231 | |
| 232 | #endif // _LIBCPP_STD_VER >= 20 && _LIBCPP_HAS_THREADS |
| 233 | |
| 234 | _LIBCPP_END_NAMESPACE_STD |
| 235 | |
| 236 | #endif // _LIBCPP___STOP_TOKEN_STOP_STATE_H |
| 237 | |