1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#ifndef _LIBCPP___ATOMIC_ATOMIC_SYNC_TIMED_H
10#define _LIBCPP___ATOMIC_ATOMIC_SYNC_TIMED_H
11
12#include <__atomic/atomic_waitable_traits.h>
13#include <__atomic/contention_t.h>
14#include <__atomic/memory_order.h>
15#include <__chrono/duration.h>
16#include <__config>
17#include <__memory/addressof.h>
18#include <__thread/poll_with_backoff.h>
19#include <__thread/timed_backoff_policy.h>
20#include <__type_traits/decay.h>
21#include <cstdint>
22#include <cstring>
23
24#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
25# pragma GCC system_header
26#endif
27
28_LIBCPP_BEGIN_NAMESPACE_STD
29
30#if _LIBCPP_STD_VER >= 20
31# if _LIBCPP_HAS_THREADS && _LIBCPP_AVAILABILITY_HAS_NEW_SYNC
32
33_LIBCPP_AVAILABILITY_NEW_SYNC
34_LIBCPP_EXPORTED_FROM_ABI __cxx_contention_t __atomic_monitor_global(void const* __address) _NOEXCEPT;
35
36// wait on the global contention state to be changed from the given value for the address
37_LIBCPP_AVAILABILITY_NEW_SYNC _LIBCPP_EXPORTED_FROM_ABI void __atomic_wait_global_table_with_timeout(
38 void const* __address, __cxx_contention_t __monitor_value, uint64_t __timeout_ns) _NOEXCEPT;
39
40// wait on the address directly with the native platform wait
41template <std::size_t _Size>
42_LIBCPP_AVAILABILITY_NEW_SYNC _LIBCPP_EXPORTED_FROM_ABI void
43__atomic_wait_native_with_timeout(void const* __address, void const* __old_value, uint64_t __timeout_ns) _NOEXCEPT;
44
45template <class _AtomicWaitable, class _Poll, class _Rep, class _Period>
46struct __atomic_wait_timed_backoff_impl {
47 const _AtomicWaitable& __a_;
48 _Poll __poll_;
49 memory_order __order_;
50 chrono::duration<_Rep, _Period> __rel_time_;
51
52 using __waitable_traits _LIBCPP_NODEBUG = __atomic_waitable_traits<__decay_t<_AtomicWaitable> >;
53 using __value_type _LIBCPP_NODEBUG = typename __waitable_traits::__value_type;
54
55 _LIBCPP_HIDE_FROM_ABI __backoff_results operator()(chrono::nanoseconds __elapsed) const {
56 if (__elapsed > chrono::microseconds(4)) {
57 auto __contention_address = const_cast<const void*>(
58 static_cast<const volatile void*>(__waitable_traits::__atomic_contention_address(__a_)));
59
60 uint64_t __timeout_ns =
61 static_cast<uint64_t>((chrono::duration_cast<chrono::nanoseconds>(__rel_time_) - __elapsed).count());
62
63 if constexpr (__has_native_atomic_wait<__value_type>) {
64 auto __atomic_value = __waitable_traits::__atomic_load(__a_, __order_);
65 if (__poll_(__atomic_value))
66 return __backoff_results::__poll_success;
67 std::__atomic_wait_native_with_timeout<sizeof(__value_type)>(
68 __contention_address, std::addressof(__atomic_value), __timeout_ns);
69 } else {
70 __cxx_contention_t __monitor_val = std::__atomic_monitor_global(address: __contention_address);
71 auto __atomic_value = __waitable_traits::__atomic_load(__a_, __order_);
72 if (__poll_(__atomic_value))
73 return __backoff_results::__poll_success;
74 std::__atomic_wait_global_table_with_timeout(address: __contention_address, monitor_value: __monitor_val, __timeout_ns);
75 }
76 } else {
77 } // poll
78 return __backoff_results::__continue_poll;
79 }
80};
81
82// The semantics of this function are similar to `atomic`'s
83// `.wait(T old, std::memory_order order)` with a timeout, but instead of having a hardcoded
84// predicate (is the loaded value unequal to `old`?), the predicate function is
85// specified as an argument. The loaded value is given as an in-out argument to
86// the predicate. If the predicate function returns `true`,
87// `__atomic_wait_unless_with_timeout` will return. If the predicate function returns
88// `false`, it must set the argument to its current understanding of the atomic
89// value. The predicate function must not return `false` spuriously.
90template <class _AtomicWaitable, class _Poll, class _Rep, class _Period>
91_LIBCPP_HIDE_FROM_ABI bool __atomic_wait_unless_with_timeout(
92 const _AtomicWaitable& __a,
93 memory_order __order,
94 _Poll&& __poll,
95 chrono::duration<_Rep, _Period> const& __rel_time) {
96 static_assert(__atomic_waitable<_AtomicWaitable>, "");
97 __atomic_wait_timed_backoff_impl<_AtomicWaitable, __decay_t<_Poll>, _Rep, _Period> __backoff_fn = {
98 __a, __poll, __order, __rel_time};
99 auto __poll_result = std::__libcpp_thread_poll_with_backoff(
100 /* poll */
101 [&]() {
102 auto __current_val = __atomic_waitable_traits<__decay_t<_AtomicWaitable> >::__atomic_load(__a, __order);
103 return __poll(__current_val);
104 },
105 /* backoff */ __backoff_fn,
106 __rel_time);
107
108 return __poll_result == __poll_with_backoff_results::__poll_success;
109}
110
111# elif _LIBCPP_HAS_THREADS // _LIBCPP_HAS_THREADS && _LIBCPP_AVAILABILITY_HAS_NEW_SYNC
112
113template <class _AtomicWaitable, class _Poll, class _Rep, class _Period>
114_LIBCPP_HIDE_FROM_ABI bool __atomic_wait_unless_with_timeout(
115 const _AtomicWaitable& __a,
116 memory_order __order,
117 _Poll&& __poll,
118 chrono::duration<_Rep, _Period> const& __rel_time) {
119 auto __res = std::__libcpp_thread_poll_with_backoff(
120 /* poll */
121 [&]() {
122 auto __current_val = __atomic_waitable_traits<__decay_t<_AtomicWaitable> >::__atomic_load(__a, __order);
123 return __poll(__current_val);
124 },
125 /* backoff */ __libcpp_timed_backoff_policy(),
126 __rel_time);
127 return __res == __poll_with_backoff_results::__poll_success;
128}
129
130# endif // _LIBCPP_HAS_THREADS && _LIBCPP_AVAILABILITY_HAS_NEW_SYNC
131
132#endif // C++20
133
134_LIBCPP_END_NAMESPACE_STD
135
136#endif // _LIBCPP___ATOMIC_ATOMIC_SYNC_TIMED_H
137