1//===-- tsd_exclusive.h -----------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#ifndef SCUDO_TSD_EXCLUSIVE_H_
10#define SCUDO_TSD_EXCLUSIVE_H_
11
12#include "tsd.h"
13
14#include "string_utils.h"
15
16namespace scudo {
17
18struct ThreadState {
19 bool DisableMemInit : 1;
20 enum : unsigned {
21 NotInitialized = 0,
22 Initialized,
23 TornDown,
24 } InitState : 2;
25};
26
27template <class Allocator> void teardownThread(void *Ptr);
28
29template <class Allocator> struct TSDRegistryExT {
30 using ThisT = TSDRegistryExT<Allocator>;
31
32 struct ScopedTSD {
33 ALWAYS_INLINE ScopedTSD(ThisT &TSDRegistry) {
34 CurrentTSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
35 DCHECK_NE(CurrentTSD, nullptr);
36 }
37
38 ~ScopedTSD() {
39 if (UNLIKELY(UnlockRequired))
40 CurrentTSD->unlock();
41 }
42
43 TSD<Allocator> &operator*() { return *CurrentTSD; }
44
45 TSD<Allocator> *operator->() {
46 CurrentTSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
47 return CurrentTSD;
48 }
49
50 private:
51 TSD<Allocator> *CurrentTSD;
52 bool UnlockRequired;
53 };
54
55 void init(Allocator *Instance) EXCLUDES(Mutex) {
56 ScopedLock L(Mutex);
57 // If more than one thread is initializing at the exact same moment, the
58 // threads that lose don't need to do anything.
59 if (UNLIKELY(atomic_load_relaxed(&Initialized) != 0))
60 return;
61 Instance->init();
62 CHECK_EQ(pthread_key_create(&PThreadKey, teardownThread<Allocator>), 0);
63 FallbackTSD.init(Instance);
64 atomic_store_relaxed(A: &Initialized, V: 1);
65 }
66
67 void initOnceMaybe(Allocator *Instance) {
68 if (LIKELY(atomic_load_relaxed(&Initialized) != 0))
69 return;
70 init(Instance); // Sets Initialized.
71 }
72
73 void unmapTestOnly(Allocator *Instance) EXCLUDES(Mutex) {
74 DCHECK(Instance);
75 if (reinterpret_cast<Allocator *>(pthread_getspecific(key: PThreadKey))) {
76 DCHECK_EQ(reinterpret_cast<Allocator *>(pthread_getspecific(PThreadKey)),
77 Instance);
78 ThreadTSD.commitBack(Instance);
79 ThreadTSD = {};
80 }
81 CHECK_EQ(pthread_key_delete(PThreadKey), 0);
82 PThreadKey = {};
83 FallbackTSD.commitBack(Instance);
84 FallbackTSD = {};
85 State = {};
86 ScopedLock L(Mutex);
87 atomic_store_relaxed(A: &Initialized, V: 0);
88 }
89
90 void drainCaches(Allocator *Instance) {
91 // We don't have a way to iterate all thread local `ThreadTSD`s. Simply
92 // drain the `ThreadTSD` of current thread and `FallbackTSD`.
93 Instance->drainCache(&ThreadTSD);
94 FallbackTSD.lock();
95 Instance->drainCache(&FallbackTSD);
96 FallbackTSD.unlock();
97 }
98
99 ALWAYS_INLINE void initThreadMaybe(Allocator *Instance, bool MinimalInit) {
100 if (LIKELY(State.InitState != ThreadState::NotInitialized))
101 return;
102 initThread(Instance, MinimalInit);
103 }
104
105 // To disable the exclusive TSD registry, we effectively lock the fallback TSD
106 // and force all threads to attempt to use it instead of their local one.
107 void disable() NO_THREAD_SAFETY_ANALYSIS {
108 Mutex.lock();
109 FallbackTSD.lock();
110 atomic_store(A: &Disabled, V: 1U, MO: memory_order_release);
111 }
112
113 void enable() NO_THREAD_SAFETY_ANALYSIS {
114 atomic_store(A: &Disabled, V: 0U, MO: memory_order_release);
115 FallbackTSD.unlock();
116 Mutex.unlock();
117 }
118
119 bool setOption(Option O, sptr Value) {
120 if (O == Option::ThreadDisableMemInit)
121 State.DisableMemInit = Value;
122 if (O == Option::MaxTSDsCount)
123 return false;
124 return true;
125 }
126
127 bool getDisableMemInit() { return State.DisableMemInit; }
128
129 void getStats(ScopedString *Str) {
130 // We don't have a way to iterate all thread local `ThreadTSD`s. Instead of
131 // printing only self `ThreadTSD` which may mislead the usage, we just skip
132 // it.
133 Str->append(Format: "Exclusive TSD don't support iterating each TSD\n");
134 }
135
136private:
137 ALWAYS_INLINE TSD<Allocator> *
138 getTSDAndLock(bool *UnlockRequired) NO_THREAD_SAFETY_ANALYSIS {
139 if (LIKELY(State.InitState == ThreadState::Initialized &&
140 !atomic_load(&Disabled, memory_order_acquire))) {
141 *UnlockRequired = false;
142 return &ThreadTSD;
143 }
144 FallbackTSD.lock();
145 *UnlockRequired = true;
146 return &FallbackTSD;
147 }
148
149 // Using minimal initialization allows for global initialization while keeping
150 // the thread specific structure untouched. The fallback structure will be
151 // used instead.
152 NOINLINE void initThread(Allocator *Instance, bool MinimalInit) {
153 initOnceMaybe(Instance);
154 if (UNLIKELY(MinimalInit))
155 return;
156 CHECK_EQ(
157 pthread_setspecific(PThreadKey, reinterpret_cast<void *>(Instance)), 0);
158 ThreadTSD.init(Instance);
159 State.InitState = ThreadState::Initialized;
160 Instance->callPostInitCallback();
161 }
162
163 pthread_key_t PThreadKey = {};
164 atomic_u8 Initialized = {};
165 atomic_u8 Disabled = {};
166 TSD<Allocator> FallbackTSD;
167 HybridMutex Mutex;
168 static thread_local ThreadState State;
169 static thread_local TSD<Allocator> ThreadTSD;
170
171 friend void teardownThread<Allocator>(void *Ptr);
172};
173
174template <class Allocator>
175thread_local TSD<Allocator> TSDRegistryExT<Allocator>::ThreadTSD;
176template <class Allocator>
177thread_local ThreadState TSDRegistryExT<Allocator>::State;
178
179template <class Allocator>
180void teardownThread(void *Ptr) NO_THREAD_SAFETY_ANALYSIS {
181 typedef TSDRegistryExT<Allocator> TSDRegistryT;
182 Allocator *Instance = reinterpret_cast<Allocator *>(Ptr);
183 // The glibc POSIX thread-local-storage deallocation routine calls user
184 // provided destructors in a loop of PTHREAD_DESTRUCTOR_ITERATIONS.
185 // We want to be called last since other destructors might call free and the
186 // like, so we wait until PTHREAD_DESTRUCTOR_ITERATIONS before draining the
187 // quarantine and swallowing the cache.
188 if (TSDRegistryT::ThreadTSD.DestructorIterations > 1) {
189 TSDRegistryT::ThreadTSD.DestructorIterations--;
190 // If pthread_setspecific fails, we will go ahead with the teardown.
191 if (LIKELY(pthread_setspecific(Instance->getTSDRegistry()->PThreadKey,
192 Ptr) == 0))
193 return;
194 }
195 TSDRegistryT::ThreadTSD.commitBack(Instance);
196 TSDRegistryT::State.InitState = ThreadState::TornDown;
197}
198
199} // namespace scudo
200
201#endif // SCUDO_TSD_EXCLUSIVE_H_
202