1
2#include "hwasan_thread.h"
3
4#include "hwasan.h"
5#include "hwasan_interface_internal.h"
6#include "hwasan_mapping.h"
7#include "hwasan_poisoning.h"
8#include "hwasan_thread_list.h"
9#include "sanitizer_common/sanitizer_atomic.h"
10#include "sanitizer_common/sanitizer_file.h"
11#include "sanitizer_common/sanitizer_placement_new.h"
12#include "sanitizer_common/sanitizer_tls_get_addr.h"
13
14namespace __hwasan {
15
16static u32 RandomSeed() {
17 u32 seed;
18 do {
19 if (UNLIKELY(!GetRandom(reinterpret_cast<void *>(&seed), sizeof(seed),
20 /*blocking=*/false))) {
21 seed = static_cast<u32>(
22 (NanoTime() >> 12) ^
23 (reinterpret_cast<uptr>(__builtin_frame_address(0)) >> 4));
24 }
25 } while (!seed);
26 return seed;
27}
28
29void Thread::InitRandomState() {
30 random_state_ = flags()->random_tags ? RandomSeed() : unique_id_;
31 random_state_inited_ = true;
32
33 // Push a random number of zeros onto the ring buffer so that the first stack
34 // tag base will be random.
35 for (tag_t i = 0, e = GenerateRandomTag(); i != e; ++i)
36 stack_allocations_->push(t: 0);
37}
38
39void Thread::Init(uptr stack_buffer_start, uptr stack_buffer_size,
40 const InitState *state) {
41 CHECK_EQ(0, unique_id_); // try to catch bad stack reuse
42 CHECK_EQ(0, stack_top_);
43 CHECK_EQ(0, stack_bottom_);
44
45 static atomic_uint64_t unique_id;
46 unique_id_ = atomic_fetch_add(a: &unique_id, v: 1, mo: memory_order_relaxed);
47 if (!IsMainThread())
48 os_id_ = GetTid();
49
50 if (auto sz = flags()->heap_history_size)
51 heap_allocations_ = HeapAllocationsRingBuffer::New(Size: sz);
52
53#if !SANITIZER_FUCHSIA
54 // Do not initialize the stack ring buffer just yet on Fuchsia. Threads will
55 // be initialized before we enter the thread itself, so we will instead call
56 // this later.
57 InitStackRingBuffer(stack_buffer_start, stack_buffer_size);
58#endif
59 InitStackAndTls(state);
60 dtls_ = DTLS_Get();
61 AllocatorThreadStart(cache: allocator_cache());
62
63 if (flags()->verbose_threads) {
64 if (IsMainThread()) {
65 Printf(format: "sizeof(Thread): %zd sizeof(HeapRB): %zd sizeof(StackRB): %zd\n",
66 sizeof(Thread), heap_allocations_->SizeInBytes(),
67 stack_allocations_->size() * sizeof(uptr));
68 }
69 Print(prefix: "Creating : ");
70 }
71 ClearShadowForThreadStackAndTLS();
72}
73
74void Thread::InitStackRingBuffer(uptr stack_buffer_start,
75 uptr stack_buffer_size) {
76 HwasanTSDThreadInit(); // Only needed with interceptors.
77 uptr *ThreadLong = GetCurrentThreadLongPtr();
78 // The following implicitly sets (this) as the current thread.
79 stack_allocations_ = new (ThreadLong)
80 StackAllocationsRingBuffer((void *)stack_buffer_start, stack_buffer_size);
81 // Check that it worked.
82 CHECK_EQ(GetCurrentThread(), this);
83
84 // ScopedTaggingDisable needs GetCurrentThread to be set up.
85 ScopedTaggingDisabler disabler;
86
87 if (stack_bottom_) {
88 int local;
89 CHECK(AddrIsInStack((uptr)&local));
90 CHECK(MemIsApp(stack_bottom_));
91 CHECK(MemIsApp(stack_top_ - 1));
92 }
93}
94
95void Thread::ClearShadowForThreadStackAndTLS() {
96 if (stack_top_ != stack_bottom_)
97 TagMemory(p: UntagAddr(tagged_addr: stack_bottom_),
98 size: UntagAddr(tagged_addr: stack_top_) - UntagAddr(tagged_addr: stack_bottom_),
99 tag: GetTagFromPointer(p: stack_top_));
100 if (tls_begin_ != tls_end_)
101 TagMemory(p: UntagAddr(tagged_addr: tls_begin_),
102 size: UntagAddr(tagged_addr: tls_end_) - UntagAddr(tagged_addr: tls_begin_),
103 tag: GetTagFromPointer(p: tls_begin_));
104}
105
106void Thread::Destroy() {
107 if (flags()->verbose_threads)
108 Print(prefix: "Destroying: ");
109 AllocatorThreadFinish(cache: allocator_cache());
110 ClearShadowForThreadStackAndTLS();
111 if (heap_allocations_)
112 heap_allocations_->Delete();
113 DTLS_Destroy();
114 // Unregister this as the current thread.
115 // Instrumented code can not run on this thread from this point onwards, but
116 // malloc/free can still be served. Glibc may call free() very late, after all
117 // TSD destructors are done.
118 CHECK_EQ(GetCurrentThread(), this);
119 *GetCurrentThreadLongPtr() = 0;
120}
121
122void Thread::StartSwitchFiber(uptr bottom, uptr size) {
123 if (atomic_load(a: &stack_switching_, mo: memory_order_acquire)) {
124 Report(format: "ERROR: starting fiber switch while in fiber switch\n");
125 Die();
126 }
127
128 next_stack_bottom_ = bottom;
129 next_stack_top_ = bottom + size;
130 atomic_store(a: &stack_switching_, v: 1, mo: memory_order_release);
131}
132
133void Thread::FinishSwitchFiber(uptr *bottom_old, uptr *size_old) {
134 if (!atomic_load(a: &stack_switching_, mo: memory_order_acquire)) {
135 Report(format: "ERROR: finishing a fiber switch that has not started\n");
136 Die();
137 }
138
139 if (bottom_old)
140 *bottom_old = stack_bottom_;
141 if (size_old)
142 *size_old = stack_top_ - stack_bottom_;
143 stack_bottom_ = next_stack_bottom_;
144 stack_top_ = next_stack_top_;
145 atomic_store(a: &stack_switching_, v: 0, mo: memory_order_release);
146 next_stack_top_ = 0;
147 next_stack_bottom_ = 0;
148}
149
150inline Thread::StackBounds Thread::GetStackBounds() const {
151 if (!atomic_load(a: &stack_switching_, mo: memory_order_acquire)) {
152 // Make sure the stack bounds are fully initialized.
153 if (stack_bottom_ >= stack_top_)
154 return {.bottom: 0, .top: 0};
155 return {.bottom: stack_bottom_, .top: stack_top_};
156 }
157 const uptr cur_stack = (uptr)__builtin_frame_address(0);
158 // Note: need to check next stack first, because FinishSwitchFiber
159 // may be in process of overwriting stack_top_/bottom_. But in such case
160 // we are already on the next stack.
161 if (cur_stack >= next_stack_bottom_ && cur_stack < next_stack_top_)
162 return {.bottom: next_stack_bottom_, .top: next_stack_top_};
163 return {.bottom: stack_bottom_, .top: stack_top_};
164}
165
166uptr Thread::stack_top() { return GetStackBounds().top; }
167
168uptr Thread::stack_bottom() { return GetStackBounds().bottom; }
169
170uptr Thread::stack_size() {
171 const auto bounds = GetStackBounds();
172 return bounds.top - bounds.bottom;
173}
174
175void Thread::Print(const char *Prefix) {
176 Printf(format: "%sT%zd %p stack: [%p,%p) sz: %zd tls: [%p,%p)\n", Prefix,
177 (ssize)unique_id_, (void *)this, (void *)stack_bottom(),
178 (void *)stack_top(), stack_top() - stack_bottom(), (void *)tls_begin(),
179 (void *)tls_end());
180}
181
182static u32 xorshift(u32 state) {
183 state ^= state << 13;
184 state ^= state >> 17;
185 state ^= state << 5;
186 return state;
187}
188
189// Generate a (pseudo-)random non-zero tag.
190tag_t Thread::GenerateRandomTag(uptr num_bits) {
191 DCHECK_GT(num_bits, 0);
192 if (tagging_disabled_)
193 return 0;
194 tag_t tag;
195 const uptr tag_mask = (1ULL << num_bits) - 1;
196 do {
197 if (flags()->random_tags) {
198 if (!random_buffer_) {
199 EnsureRandomStateInited();
200 random_buffer_ = random_state_ = xorshift(state: random_state_);
201 }
202 CHECK(random_buffer_);
203 tag = random_buffer_ & tag_mask;
204 random_buffer_ >>= num_bits;
205 } else {
206 EnsureRandomStateInited();
207 random_state_ += 1;
208 tag = random_state_ & tag_mask;
209 }
210 } while (!tag);
211 return tag;
212}
213
214void EnsureMainThreadIDIsCorrect() {
215 auto *t = __hwasan::GetCurrentThread();
216 if (t && (t->IsMainThread()))
217 t->set_os_id(GetTid());
218}
219
220} // namespace __hwasan
221
222// --- Implementation of LSan-specific functions --- {{{1
223namespace __lsan {
224
225static __hwasan::HwasanThreadList *GetHwasanThreadListLocked() {
226 auto &tl = __hwasan::hwasanThreadList();
227 tl.CheckLocked();
228 return &tl;
229}
230
231static __hwasan::Thread *GetThreadByOsIDLocked(ThreadID os_id) {
232 return GetHwasanThreadListLocked()->FindThreadLocked(
233 cb: [os_id](__hwasan::Thread *t) { return t->os_id() == os_id; });
234}
235
236void LockThreads() {
237 __hwasan::hwasanThreadList().Lock();
238 __hwasan::hwasanThreadArgRetval().Lock();
239}
240
241void UnlockThreads() {
242 __hwasan::hwasanThreadArgRetval().Unlock();
243 __hwasan::hwasanThreadList().Unlock();
244}
245
246void EnsureMainThreadIDIsCorrect() { __hwasan::EnsureMainThreadIDIsCorrect(); }
247
248bool GetThreadRangesLocked(ThreadID os_id, uptr *stack_begin, uptr *stack_end,
249 uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
250 uptr *cache_end, DTLS **dtls) {
251 auto *t = GetThreadByOsIDLocked(os_id);
252 if (!t)
253 return false;
254 *stack_begin = t->stack_bottom();
255 *stack_end = t->stack_top();
256 *tls_begin = t->tls_begin();
257 *tls_end = t->tls_end();
258 // Fixme: is this correct for HWASan.
259 *cache_begin = 0;
260 *cache_end = 0;
261 *dtls = t->dtls();
262 return true;
263}
264
265void GetAllThreadAllocatorCachesLocked(InternalMmapVector<uptr> *caches) {}
266
267void GetThreadExtraStackRangesLocked(ThreadID os_id,
268 InternalMmapVector<Range> *ranges) {}
269void GetThreadExtraStackRangesLocked(InternalMmapVector<Range> *ranges) {}
270
271void GetAdditionalThreadContextPtrsLocked(InternalMmapVector<uptr> *ptrs) {
272 __hwasan::hwasanThreadArgRetval().GetAllPtrsLocked(ptrs);
273}
274
275void GetRunningThreadsLocked(InternalMmapVector<ThreadID> *threads) {
276 // TODO: implement.
277}
278void PrintThreads() {
279 // TODO: implement.
280}
281
282} // namespace __lsan
283
284// ---------------------- Interface ---------------- {{{1
285using namespace __hwasan;
286
287extern "C" {
288SANITIZER_INTERFACE_ATTRIBUTE
289void __sanitizer_start_switch_fiber(void **, const void *bottom, uptr size) {
290 if (auto *t = GetCurrentThread())
291 t->StartSwitchFiber(bottom: (uptr)bottom, size);
292 else
293 VReport(1, "__hwasan_start_switch_fiber called from unknown thread\n");
294}
295
296SANITIZER_INTERFACE_ATTRIBUTE
297void __sanitizer_finish_switch_fiber(void *, const void **bottom_old,
298 uptr *size_old) {
299 if (auto *t = GetCurrentThread())
300 t->FinishSwitchFiber(bottom_old: (uptr *)bottom_old, size_old);
301 else
302 VReport(1, "__hwasan_finish_switch_fiber called from unknown thread\n");
303}
304}
305