1//===-- tsd_exclusive.h -----------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#ifndef SCUDO_TSD_EXCLUSIVE_H_
10#define SCUDO_TSD_EXCLUSIVE_H_
11
12#include "tsd.h"
13
14#include "string_utils.h"
15
16namespace scudo {
17
18struct ThreadState {
19 bool DisableMemInit : 1;
20 enum : unsigned {
21 NotInitialized = 0,
22 Initialized,
23 TornDown,
24 } InitState : 2;
25};
26
27template <class Allocator> void teardownThread(void *Ptr);
28
29template <class Allocator> struct TSDRegistryExT {
30 using ThisT = TSDRegistryExT<Allocator>;
31
32 struct ScopedTSD {
33 ALWAYS_INLINE ScopedTSD(ThisT &TSDRegistry) {
34 CurrentTSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
35 DCHECK_NE(CurrentTSD, nullptr);
36 }
37
38 ~ScopedTSD() {
39 if (UNLIKELY(UnlockRequired))
40 CurrentTSD->unlock();
41 }
42
43 TSD<Allocator> &operator*() { return *CurrentTSD; }
44
45 TSD<Allocator> *operator->() {
46 CurrentTSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
47 return CurrentTSD;
48 }
49
50 private:
51 TSD<Allocator> *CurrentTSD;
52 bool UnlockRequired;
53 };
54
55 void init(Allocator *Instance) REQUIRES(Mutex) {
56 DCHECK(!Initialized);
57 Instance->init();
58 CHECK_EQ(pthread_key_create(&PThreadKey, teardownThread<Allocator>), 0);
59 FallbackTSD.init(Instance);
60 Initialized = true;
61 }
62
63 void initOnceMaybe(Allocator *Instance) EXCLUDES(Mutex) {
64 ScopedLock L(Mutex);
65 if (LIKELY(Initialized))
66 return;
67 init(Instance); // Sets Initialized.
68 }
69
70 void unmapTestOnly(Allocator *Instance) EXCLUDES(Mutex) {
71 DCHECK(Instance);
72 if (reinterpret_cast<Allocator *>(pthread_getspecific(key: PThreadKey))) {
73 DCHECK_EQ(reinterpret_cast<Allocator *>(pthread_getspecific(PThreadKey)),
74 Instance);
75 ThreadTSD.commitBack(Instance);
76 ThreadTSD = {};
77 }
78 CHECK_EQ(pthread_key_delete(PThreadKey), 0);
79 PThreadKey = {};
80 FallbackTSD.commitBack(Instance);
81 FallbackTSD = {};
82 State = {};
83 ScopedLock L(Mutex);
84 Initialized = false;
85 }
86
87 void drainCaches(Allocator *Instance) {
88 // We don't have a way to iterate all thread local `ThreadTSD`s. Simply
89 // drain the `ThreadTSD` of current thread and `FallbackTSD`.
90 Instance->drainCache(&ThreadTSD);
91 FallbackTSD.lock();
92 Instance->drainCache(&FallbackTSD);
93 FallbackTSD.unlock();
94 }
95
96 ALWAYS_INLINE void initThreadMaybe(Allocator *Instance, bool MinimalInit) {
97 if (LIKELY(State.InitState != ThreadState::NotInitialized))
98 return;
99 initThread(Instance, MinimalInit);
100 }
101
102 // To disable the exclusive TSD registry, we effectively lock the fallback TSD
103 // and force all threads to attempt to use it instead of their local one.
104 void disable() NO_THREAD_SAFETY_ANALYSIS {
105 Mutex.lock();
106 FallbackTSD.lock();
107 atomic_store(A: &Disabled, V: 1U, MO: memory_order_release);
108 }
109
110 void enable() NO_THREAD_SAFETY_ANALYSIS {
111 atomic_store(A: &Disabled, V: 0U, MO: memory_order_release);
112 FallbackTSD.unlock();
113 Mutex.unlock();
114 }
115
116 bool setOption(Option O, sptr Value) {
117 if (O == Option::ThreadDisableMemInit)
118 State.DisableMemInit = Value;
119 if (O == Option::MaxTSDsCount)
120 return false;
121 return true;
122 }
123
124 bool getDisableMemInit() { return State.DisableMemInit; }
125
126 void getStats(ScopedString *Str) {
127 // We don't have a way to iterate all thread local `ThreadTSD`s. Instead of
128 // printing only self `ThreadTSD` which may mislead the usage, we just skip
129 // it.
130 Str->append(Format: "Exclusive TSD don't support iterating each TSD\n");
131 }
132
133private:
134 ALWAYS_INLINE TSD<Allocator> *
135 getTSDAndLock(bool *UnlockRequired) NO_THREAD_SAFETY_ANALYSIS {
136 if (LIKELY(State.InitState == ThreadState::Initialized &&
137 !atomic_load(&Disabled, memory_order_acquire))) {
138 *UnlockRequired = false;
139 return &ThreadTSD;
140 }
141 FallbackTSD.lock();
142 *UnlockRequired = true;
143 return &FallbackTSD;
144 }
145
146 // Using minimal initialization allows for global initialization while keeping
147 // the thread specific structure untouched. The fallback structure will be
148 // used instead.
149 NOINLINE void initThread(Allocator *Instance, bool MinimalInit) {
150 initOnceMaybe(Instance);
151 if (UNLIKELY(MinimalInit))
152 return;
153 CHECK_EQ(
154 pthread_setspecific(PThreadKey, reinterpret_cast<void *>(Instance)), 0);
155 ThreadTSD.init(Instance);
156 State.InitState = ThreadState::Initialized;
157 Instance->callPostInitCallback();
158 }
159
160 pthread_key_t PThreadKey = {};
161 bool Initialized GUARDED_BY(Mutex) = false;
162 atomic_u8 Disabled = {};
163 TSD<Allocator> FallbackTSD;
164 HybridMutex Mutex;
165 static thread_local ThreadState State;
166 static thread_local TSD<Allocator> ThreadTSD;
167
168 friend void teardownThread<Allocator>(void *Ptr);
169};
170
171template <class Allocator>
172thread_local TSD<Allocator> TSDRegistryExT<Allocator>::ThreadTSD;
173template <class Allocator>
174thread_local ThreadState TSDRegistryExT<Allocator>::State;
175
176template <class Allocator>
177void teardownThread(void *Ptr) NO_THREAD_SAFETY_ANALYSIS {
178 typedef TSDRegistryExT<Allocator> TSDRegistryT;
179 Allocator *Instance = reinterpret_cast<Allocator *>(Ptr);
180 // The glibc POSIX thread-local-storage deallocation routine calls user
181 // provided destructors in a loop of PTHREAD_DESTRUCTOR_ITERATIONS.
182 // We want to be called last since other destructors might call free and the
183 // like, so we wait until PTHREAD_DESTRUCTOR_ITERATIONS before draining the
184 // quarantine and swallowing the cache.
185 if (TSDRegistryT::ThreadTSD.DestructorIterations > 1) {
186 TSDRegistryT::ThreadTSD.DestructorIterations--;
187 // If pthread_setspecific fails, we will go ahead with the teardown.
188 if (LIKELY(pthread_setspecific(Instance->getTSDRegistry()->PThreadKey,
189 Ptr) == 0))
190 return;
191 }
192 TSDRegistryT::ThreadTSD.commitBack(Instance);
193 TSDRegistryT::State.InitState = ThreadState::TornDown;
194}
195
196} // namespace scudo
197
198#endif // SCUDO_TSD_EXCLUSIVE_H_
199