1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include <__config>
10#if !defined(_LIBCPP_OBJECT_FORMAT_COFF) && !defined(_LIBCPP_OBJECT_FORMAT_XCOFF) && \
11 _LIBCPP_AVAILABILITY_MINIMUM_HEADER_VERSION < 5
12# define _LIBCPP_SHARED_PTR_DEFINE_LEGACY_INLINE_FUNCTIONS
13#endif
14
15#include <__functional/hash.h>
16#include <memory>
17#include <typeinfo>
18
19#if _LIBCPP_HAS_THREADS
20# include <mutex>
21# include <thread>
22# if defined(__ELF__) && defined(_LIBCPP_LINK_PTHREAD_LIB)
23# pragma comment(lib, "pthread")
24# endif
25#endif
26
27#include "include/atomic_support.h"
28
29_LIBCPP_BEGIN_NAMESPACE_STD
30
31bad_weak_ptr::~bad_weak_ptr() noexcept {}
32
33const char* bad_weak_ptr::what() const noexcept { return "bad_weak_ptr"; }
34
35__shared_count::~__shared_count() {}
36
37__shared_weak_count::~__shared_weak_count() {}
38
39#if defined(_LIBCPP_SHARED_PTR_DEFINE_LEGACY_INLINE_FUNCTIONS)
40void __shared_count::__add_shared() noexcept { __libcpp_atomic_refcount_increment(t&: __shared_owners_); }
41
42bool __shared_count::__release_shared() noexcept {
43 if (__libcpp_atomic_refcount_decrement(t&: __shared_owners_) == -1) {
44 __on_zero_shared();
45 return true;
46 }
47 return false;
48}
49
50void __shared_weak_count::__add_shared() noexcept { __shared_count::__add_shared(); }
51
52void __shared_weak_count::__add_weak() noexcept { __libcpp_atomic_refcount_increment(t&: __shared_weak_owners_); }
53
54void __shared_weak_count::__release_shared() noexcept {
55 if (__shared_count::__release_shared())
56 __release_weak();
57}
58#endif // _LIBCPP_SHARED_PTR_DEFINE_LEGACY_INLINE_FUNCTIONS
59
60void __shared_weak_count::__release_weak() noexcept {
61 // NOTE: The acquire load here is an optimization of the very
62 // common case where a shared pointer is being destructed while
63 // having no other contended references.
64 //
65 // BENEFIT: We avoid expensive atomic stores like XADD and STREX
66 // in a common case. Those instructions are slow and do nasty
67 // things to caches.
68 //
69 // IS THIS SAFE? Yes. During weak destruction, if we see that we
70 // are the last reference, we know that no-one else is accessing
71 // us. If someone were accessing us, then they would be doing so
72 // while the last shared / weak_ptr was being destructed, and
73 // that's undefined anyway.
74 //
75 // If we see anything other than a 0, then we have possible
76 // contention, and need to use an atomicrmw primitive.
77 // The same arguments don't apply for increment, where it is legal
78 // (though inadvisable) to share shared_ptr references between
79 // threads, and have them all get copied at once. The argument
80 // also doesn't apply for __release_shared, because an outstanding
81 // weak_ptr::lock() could read / modify the shared count.
82 if (__libcpp_atomic_load(val: &__shared_weak_owners_, order: _AO_Acquire) == 0) {
83 // no need to do this store, because we are about
84 // to destroy everything.
85 //__libcpp_atomic_store(&__shared_weak_owners_, -1, _AO_Release);
86 __on_zero_shared_weak();
87 } else if (__libcpp_atomic_refcount_decrement(t&: __shared_weak_owners_) == -1)
88 __on_zero_shared_weak();
89}
90
91__shared_weak_count* __shared_weak_count::lock() noexcept {
92 long object_owners = __libcpp_atomic_load(val: &__shared_owners_);
93 while (object_owners != -1) {
94 if (__libcpp_atomic_compare_exchange(val: &__shared_owners_, expected: &object_owners, after: object_owners + 1))
95 return this;
96 }
97 return nullptr;
98}
99
100const void* __shared_weak_count::__get_deleter(const type_info&) const noexcept { return nullptr; }
101
102#if _LIBCPP_HAS_THREADS
103
104static constexpr std::size_t __sp_mut_count = 32;
105static constinit __libcpp_mutex_t mut_back[__sp_mut_count] = {
106 _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER,
107 _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER,
108 _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER,
109 _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER,
110 _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER,
111 _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER,
112 _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER,
113 _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER};
114
115constexpr __sp_mut::__sp_mut(void* p) noexcept : __lx_(p) {}
116
117void __sp_mut::lock() noexcept {
118 auto m = static_cast<__libcpp_mutex_t*>(__lx_);
119 __libcpp_mutex_lock(m: m);
120}
121
122void __sp_mut::unlock() noexcept { __libcpp_mutex_unlock(m: static_cast<__libcpp_mutex_t*>(__lx_)); }
123
124__sp_mut& __get_sp_mut(const void* p) {
125 static constinit __sp_mut muts[__sp_mut_count] = {
126 &mut_back[0], &mut_back[1], &mut_back[2], &mut_back[3], &mut_back[4], &mut_back[5], &mut_back[6],
127 &mut_back[7], &mut_back[8], &mut_back[9], &mut_back[10], &mut_back[11], &mut_back[12], &mut_back[13],
128 &mut_back[14], &mut_back[15], &mut_back[16], &mut_back[17], &mut_back[18], &mut_back[19], &mut_back[20],
129 &mut_back[21], &mut_back[22], &mut_back[23], &mut_back[24], &mut_back[25], &mut_back[26], &mut_back[27],
130 &mut_back[28], &mut_back[29], &mut_back[30], &mut_back[31]};
131 return muts[hash<const void*>()(p) & (__sp_mut_count - 1)];
132}
133
134#endif // _LIBCPP_HAS_THREADS
135
136#if _LIBCPP_AVAILABILITY_MINIMUM_HEADER_VERSION < 21
137
138_LIBCPP_DIAGNOSTIC_PUSH
139_LIBCPP_CLANG_DIAGNOSTIC_IGNORED("-Wmissing-prototypes")
140// This function only exists for ABI compatibility and we therefore don't provide a declaration in the headers
141_LIBCPP_EXPORTED_FROM_ABI void* align(size_t alignment, size_t size, void*& ptr, size_t& space) {
142 return __align_inline::align(align: alignment, sz: size, ptr&: ptr, space&: space);
143}
144_LIBCPP_DIAGNOSTIC_POP
145
146#endif
147
148_LIBCPP_END_NAMESPACE_STD
149