1 | //===-- asan_thread.cpp ---------------------------------------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file is a part of AddressSanitizer, an address sanity checker. |
10 | // |
11 | // Thread-related code. |
12 | //===----------------------------------------------------------------------===// |
13 | #include "asan_thread.h" |
14 | |
15 | #include "asan_allocator.h" |
16 | #include "asan_interceptors.h" |
17 | #include "asan_mapping.h" |
18 | #include "asan_poisoning.h" |
19 | #include "asan_stack.h" |
20 | #include "lsan/lsan_common.h" |
21 | #include "sanitizer_common/sanitizer_common.h" |
22 | #include "sanitizer_common/sanitizer_placement_new.h" |
23 | #include "sanitizer_common/sanitizer_stackdepot.h" |
24 | #include "sanitizer_common/sanitizer_thread_history.h" |
25 | #include "sanitizer_common/sanitizer_tls_get_addr.h" |
26 | |
27 | namespace __asan { |
28 | |
29 | // AsanThreadContext implementation. |
30 | |
31 | void AsanThreadContext::OnCreated(void *arg) { |
32 | thread = static_cast<AsanThread *>(arg); |
33 | thread->set_context(this); |
34 | } |
35 | |
36 | void AsanThreadContext::OnFinished() { |
37 | // Drop the link to the AsanThread object. |
38 | thread = nullptr; |
39 | } |
40 | |
41 | static ThreadRegistry *asan_thread_registry; |
42 | static ThreadArgRetval *thread_data; |
43 | |
44 | static Mutex mu_for_thread_context; |
45 | // TODO(leonardchan@): It should be possible to make LowLevelAllocator |
46 | // threadsafe and consolidate this one into the GlobalLoweLevelAllocator. |
47 | // We should be able to do something similar to what's in |
48 | // sanitizer_stack_store.cpp. |
49 | static LowLevelAllocator allocator_for_thread_context; |
50 | |
51 | static ThreadContextBase *GetAsanThreadContext(u32 tid) { |
52 | Lock lock(&mu_for_thread_context); |
53 | return new (allocator_for_thread_context) AsanThreadContext(tid); |
54 | } |
55 | |
56 | static void InitThreads() { |
57 | static bool initialized; |
58 | // Don't worry about thread_safety - this should be called when there is |
59 | // a single thread. |
60 | if (LIKELY(initialized)) |
61 | return; |
62 | // Never reuse ASan threads: we store pointer to AsanThreadContext |
63 | // in TSD and can't reliably tell when no more TSD destructors will |
64 | // be called. It would be wrong to reuse AsanThreadContext for another |
65 | // thread before all TSD destructors will be called for it. |
66 | |
67 | // MIPS requires aligned address |
68 | alignas(alignof(ThreadRegistry)) static char |
69 | thread_registry_placeholder[sizeof(ThreadRegistry)]; |
70 | alignas(alignof(ThreadArgRetval)) static char |
71 | thread_data_placeholder[sizeof(ThreadArgRetval)]; |
72 | |
73 | asan_thread_registry = |
74 | new (thread_registry_placeholder) ThreadRegistry(GetAsanThreadContext); |
75 | thread_data = new (thread_data_placeholder) ThreadArgRetval(); |
76 | initialized = true; |
77 | } |
78 | |
79 | ThreadRegistry &asanThreadRegistry() { |
80 | InitThreads(); |
81 | return *asan_thread_registry; |
82 | } |
83 | |
84 | ThreadArgRetval &asanThreadArgRetval() { |
85 | InitThreads(); |
86 | return *thread_data; |
87 | } |
88 | |
89 | AsanThreadContext *GetThreadContextByTidLocked(u32 tid) { |
90 | return static_cast<AsanThreadContext *>( |
91 | asanThreadRegistry().GetThreadLocked(tid)); |
92 | } |
93 | |
94 | // AsanThread implementation. |
95 | |
96 | AsanThread *AsanThread::Create(const void *start_data, uptr data_size, |
97 | u32 parent_tid, StackTrace *stack, |
98 | bool detached) { |
99 | uptr PageSize = GetPageSizeCached(); |
100 | uptr size = RoundUpTo(size: sizeof(AsanThread), boundary: PageSize); |
101 | AsanThread *thread = (AsanThread *)MmapOrDie(size, mem_type: __func__); |
102 | if (data_size) { |
103 | uptr availible_size = (uptr)thread + size - (uptr)(thread->start_data_); |
104 | CHECK_LE(data_size, availible_size); |
105 | internal_memcpy(dest: thread->start_data_, src: start_data, n: data_size); |
106 | } |
107 | asanThreadRegistry().CreateThread(user_id: 0, detached, parent_tid, |
108 | stack_tid: stack ? StackDepotPut(stack: *stack) : 0, arg: thread); |
109 | |
110 | return thread; |
111 | } |
112 | |
113 | void AsanThread::GetStartData(void *out, uptr out_size) const { |
114 | internal_memcpy(dest: out, src: start_data_, n: out_size); |
115 | } |
116 | |
117 | void AsanThread::TSDDtor(void *tsd) { |
118 | AsanThreadContext *context = (AsanThreadContext *)tsd; |
119 | VReport(1, "T%d TSDDtor\n" , context->tid); |
120 | if (context->thread) |
121 | context->thread->Destroy(); |
122 | } |
123 | |
124 | void AsanThread::Destroy() { |
125 | int tid = this->tid(); |
126 | VReport(1, "T%d exited\n" , tid); |
127 | |
128 | bool was_running = |
129 | (asanThreadRegistry().FinishThread(tid) == ThreadStatusRunning); |
130 | if (was_running) { |
131 | if (AsanThread *thread = GetCurrentThread()) |
132 | CHECK_EQ(this, thread); |
133 | malloc_storage().CommitBack(); |
134 | if (common_flags()->use_sigaltstack) |
135 | UnsetAlternateSignalStack(); |
136 | FlushToDeadThreadStats(stats: &stats_); |
137 | // We also clear the shadow on thread destruction because |
138 | // some code may still be executing in later TSD destructors |
139 | // and we don't want it to have any poisoned stack. |
140 | ClearShadowForThreadStackAndTLS(); |
141 | DeleteFakeStack(tid); |
142 | } else { |
143 | CHECK_NE(this, GetCurrentThread()); |
144 | } |
145 | uptr size = RoundUpTo(size: sizeof(AsanThread), boundary: GetPageSizeCached()); |
146 | UnmapOrDie(addr: this, size); |
147 | if (was_running) |
148 | DTLS_Destroy(); |
149 | } |
150 | |
151 | void AsanThread::StartSwitchFiber(FakeStack **fake_stack_save, uptr bottom, |
152 | uptr size) { |
153 | if (atomic_load(a: &stack_switching_, mo: memory_order_relaxed)) { |
154 | Report(format: "ERROR: starting fiber switch while in fiber switch\n" ); |
155 | Die(); |
156 | } |
157 | |
158 | next_stack_bottom_ = bottom; |
159 | next_stack_top_ = bottom + size; |
160 | atomic_store(a: &stack_switching_, v: 1, mo: memory_order_release); |
161 | |
162 | FakeStack *current_fake_stack = fake_stack_; |
163 | if (fake_stack_save) |
164 | *fake_stack_save = fake_stack_; |
165 | fake_stack_ = nullptr; |
166 | SetTLSFakeStack(nullptr); |
167 | // if fake_stack_save is null, the fiber will die, delete the fakestack |
168 | if (!fake_stack_save && current_fake_stack) |
169 | current_fake_stack->Destroy(tid: this->tid()); |
170 | } |
171 | |
172 | void AsanThread::FinishSwitchFiber(FakeStack *fake_stack_save, uptr *bottom_old, |
173 | uptr *size_old) { |
174 | if (!atomic_load(a: &stack_switching_, mo: memory_order_relaxed)) { |
175 | Report(format: "ERROR: finishing a fiber switch that has not started\n" ); |
176 | Die(); |
177 | } |
178 | |
179 | if (fake_stack_save) { |
180 | SetTLSFakeStack(fake_stack_save); |
181 | fake_stack_ = fake_stack_save; |
182 | } |
183 | |
184 | if (bottom_old) |
185 | *bottom_old = stack_bottom_; |
186 | if (size_old) |
187 | *size_old = stack_top_ - stack_bottom_; |
188 | stack_bottom_ = next_stack_bottom_; |
189 | stack_top_ = next_stack_top_; |
190 | atomic_store(a: &stack_switching_, v: 0, mo: memory_order_release); |
191 | next_stack_top_ = 0; |
192 | next_stack_bottom_ = 0; |
193 | } |
194 | |
195 | inline AsanThread::StackBounds AsanThread::GetStackBounds() const { |
196 | if (!atomic_load(a: &stack_switching_, mo: memory_order_acquire)) { |
197 | // Make sure the stack bounds are fully initialized. |
198 | if (stack_bottom_ >= stack_top_) |
199 | return {.bottom: 0, .top: 0}; |
200 | return {.bottom: stack_bottom_, .top: stack_top_}; |
201 | } |
202 | char local; |
203 | const uptr cur_stack = (uptr)&local; |
204 | // Note: need to check next stack first, because FinishSwitchFiber |
205 | // may be in process of overwriting stack_top_/bottom_. But in such case |
206 | // we are already on the next stack. |
207 | if (cur_stack >= next_stack_bottom_ && cur_stack < next_stack_top_) |
208 | return {.bottom: next_stack_bottom_, .top: next_stack_top_}; |
209 | return {.bottom: stack_bottom_, .top: stack_top_}; |
210 | } |
211 | |
212 | uptr AsanThread::stack_top() { return GetStackBounds().top; } |
213 | |
214 | uptr AsanThread::stack_bottom() { return GetStackBounds().bottom; } |
215 | |
216 | uptr AsanThread::stack_size() { |
217 | const auto bounds = GetStackBounds(); |
218 | return bounds.top - bounds.bottom; |
219 | } |
220 | |
221 | // We want to create the FakeStack lazily on the first use, but not earlier |
222 | // than the stack size is known and the procedure has to be async-signal safe. |
223 | FakeStack *AsanThread::AsyncSignalSafeLazyInitFakeStack() { |
224 | uptr stack_size = this->stack_size(); |
225 | if (stack_size == 0) // stack_size is not yet available, don't use FakeStack. |
226 | return nullptr; |
227 | uptr old_val = 0; |
228 | // fake_stack_ has 3 states: |
229 | // 0 -- not initialized |
230 | // 1 -- being initialized |
231 | // ptr -- initialized |
232 | // This CAS checks if the state was 0 and if so changes it to state 1, |
233 | // if that was successful, it initializes the pointer. |
234 | if (atomic_compare_exchange_strong( |
235 | a: reinterpret_cast<atomic_uintptr_t *>(&fake_stack_), cmp: &old_val, xchg: 1UL, |
236 | mo: memory_order_relaxed)) { |
237 | uptr stack_size_log = Log2(x: RoundUpToPowerOfTwo(size: stack_size)); |
238 | CHECK_LE(flags()->min_uar_stack_size_log, flags()->max_uar_stack_size_log); |
239 | stack_size_log = |
240 | Min(a: stack_size_log, b: static_cast<uptr>(flags()->max_uar_stack_size_log)); |
241 | stack_size_log = |
242 | Max(a: stack_size_log, b: static_cast<uptr>(flags()->min_uar_stack_size_log)); |
243 | fake_stack_ = FakeStack::Create(stack_size_log); |
244 | DCHECK_EQ(GetCurrentThread(), this); |
245 | SetTLSFakeStack(fake_stack_); |
246 | return fake_stack_; |
247 | } |
248 | return nullptr; |
249 | } |
250 | |
251 | void AsanThread::Init(const InitOptions *options) { |
252 | DCHECK_NE(tid(), kInvalidTid); |
253 | next_stack_top_ = next_stack_bottom_ = 0; |
254 | atomic_store(a: &stack_switching_, v: false, mo: memory_order_release); |
255 | CHECK_EQ(this->stack_size(), 0U); |
256 | SetThreadStackAndTls(options); |
257 | if (stack_top_ != stack_bottom_) { |
258 | CHECK_GT(this->stack_size(), 0U); |
259 | CHECK(AddrIsInMem(stack_bottom_)); |
260 | CHECK(AddrIsInMem(stack_top_ - 1)); |
261 | } |
262 | ClearShadowForThreadStackAndTLS(); |
263 | fake_stack_ = nullptr; |
264 | if (__asan_option_detect_stack_use_after_return && |
265 | tid() == GetCurrentTidOrInvalid()) { |
266 | // AsyncSignalSafeLazyInitFakeStack makes use of threadlocals and must be |
267 | // called from the context of the thread it is initializing, not its parent. |
268 | // Most platforms call AsanThread::Init on the newly-spawned thread, but |
269 | // Fuchsia calls this function from the parent thread. To support that |
270 | // approach, we avoid calling AsyncSignalSafeLazyInitFakeStack here; it will |
271 | // be called by the new thread when it first attempts to access the fake |
272 | // stack. |
273 | AsyncSignalSafeLazyInitFakeStack(); |
274 | } |
275 | int local = 0; |
276 | VReport(1, "T%d: stack [%p,%p) size 0x%zx; local=%p\n" , tid(), |
277 | (void *)stack_bottom_, (void *)stack_top_, stack_top_ - stack_bottom_, |
278 | (void *)&local); |
279 | } |
280 | |
281 | // Fuchsia doesn't use ThreadStart. |
282 | // asan_fuchsia.c definies CreateMainThread and SetThreadStackAndTls. |
283 | #if !SANITIZER_FUCHSIA |
284 | |
285 | void AsanThread::ThreadStart(tid_t os_id) { |
286 | Init(); |
287 | asanThreadRegistry().StartThread(tid: tid(), os_id, thread_type: ThreadType::Regular, arg: nullptr); |
288 | |
289 | if (common_flags()->use_sigaltstack) |
290 | SetAlternateSignalStack(); |
291 | } |
292 | |
293 | AsanThread *CreateMainThread() { |
294 | AsanThread *main_thread = AsanThread::Create( |
295 | /* parent_tid */ kMainTid, |
296 | /* stack */ nullptr, /* detached */ true); |
297 | SetCurrentThread(main_thread); |
298 | main_thread->ThreadStart(os_id: internal_getpid()); |
299 | return main_thread; |
300 | } |
301 | |
302 | // This implementation doesn't use the argument, which is just passed down |
303 | // from the caller of Init (which see, above). It's only there to support |
304 | // OS-specific implementations that need more information passed through. |
305 | void AsanThread::SetThreadStackAndTls(const InitOptions *options) { |
306 | DCHECK_EQ(options, nullptr); |
307 | GetThreadStackAndTls(main: tid() == kMainTid, stk_begin: &stack_bottom_, stk_end: &stack_top_, |
308 | tls_begin: &tls_begin_, tls_end: &tls_end_); |
309 | stack_top_ = RoundDownTo(x: stack_top_, ASAN_SHADOW_GRANULARITY); |
310 | stack_bottom_ = RoundDownTo(x: stack_bottom_, ASAN_SHADOW_GRANULARITY); |
311 | dtls_ = DTLS_Get(); |
312 | |
313 | if (stack_top_ != stack_bottom_) { |
314 | int local; |
315 | CHECK(AddrIsInStack((uptr)&local)); |
316 | } |
317 | } |
318 | |
319 | #endif // !SANITIZER_FUCHSIA |
320 | |
321 | void AsanThread::ClearShadowForThreadStackAndTLS() { |
322 | if (stack_top_ != stack_bottom_) |
323 | PoisonShadow(addr: stack_bottom_, size: stack_top_ - stack_bottom_, value: 0); |
324 | if (tls_begin_ != tls_end_) { |
325 | uptr tls_begin_aligned = RoundDownTo(x: tls_begin_, ASAN_SHADOW_GRANULARITY); |
326 | uptr tls_end_aligned = RoundUpTo(size: tls_end_, ASAN_SHADOW_GRANULARITY); |
327 | FastPoisonShadow(aligned_beg: tls_begin_aligned, aligned_size: tls_end_aligned - tls_begin_aligned, value: 0); |
328 | } |
329 | } |
330 | |
331 | bool AsanThread::GetStackFrameAccessByAddr(uptr addr, |
332 | StackFrameAccess *access) { |
333 | if (stack_top_ == stack_bottom_) |
334 | return false; |
335 | |
336 | uptr bottom = 0; |
337 | if (AddrIsInStack(addr)) { |
338 | bottom = stack_bottom(); |
339 | } else if (FakeStack *fake_stack = get_fake_stack()) { |
340 | bottom = fake_stack->AddrIsInFakeStack(addr); |
341 | CHECK(bottom); |
342 | access->offset = addr - bottom; |
343 | access->frame_pc = ((uptr *)bottom)[2]; |
344 | access->frame_descr = (const char *)((uptr *)bottom)[1]; |
345 | return true; |
346 | } |
347 | uptr aligned_addr = RoundDownTo(x: addr, SANITIZER_WORDSIZE / 8); // align addr. |
348 | uptr mem_ptr = RoundDownTo(x: aligned_addr, ASAN_SHADOW_GRANULARITY); |
349 | u8 *shadow_ptr = (u8 *)MemToShadow(p: aligned_addr); |
350 | u8 *shadow_bottom = (u8 *)MemToShadow(p: bottom); |
351 | |
352 | while (shadow_ptr >= shadow_bottom && |
353 | *shadow_ptr != kAsanStackLeftRedzoneMagic) { |
354 | shadow_ptr--; |
355 | mem_ptr -= ASAN_SHADOW_GRANULARITY; |
356 | } |
357 | |
358 | while (shadow_ptr >= shadow_bottom && |
359 | *shadow_ptr == kAsanStackLeftRedzoneMagic) { |
360 | shadow_ptr--; |
361 | mem_ptr -= ASAN_SHADOW_GRANULARITY; |
362 | } |
363 | |
364 | if (shadow_ptr < shadow_bottom) { |
365 | return false; |
366 | } |
367 | |
368 | uptr *ptr = (uptr *)(mem_ptr + ASAN_SHADOW_GRANULARITY); |
369 | CHECK(ptr[0] == kCurrentStackFrameMagic); |
370 | access->offset = addr - (uptr)ptr; |
371 | access->frame_pc = ptr[2]; |
372 | access->frame_descr = (const char *)ptr[1]; |
373 | return true; |
374 | } |
375 | |
376 | uptr AsanThread::GetStackVariableShadowStart(uptr addr) { |
377 | uptr bottom = 0; |
378 | if (AddrIsInStack(addr)) { |
379 | bottom = stack_bottom(); |
380 | } else if (FakeStack *fake_stack = get_fake_stack()) { |
381 | bottom = fake_stack->AddrIsInFakeStack(addr); |
382 | if (bottom == 0) { |
383 | return 0; |
384 | } |
385 | } else { |
386 | return 0; |
387 | } |
388 | |
389 | uptr aligned_addr = RoundDownTo(x: addr, SANITIZER_WORDSIZE / 8); // align addr. |
390 | u8 *shadow_ptr = (u8 *)MemToShadow(p: aligned_addr); |
391 | u8 *shadow_bottom = (u8 *)MemToShadow(p: bottom); |
392 | |
393 | while (shadow_ptr >= shadow_bottom && |
394 | (*shadow_ptr != kAsanStackLeftRedzoneMagic && |
395 | *shadow_ptr != kAsanStackMidRedzoneMagic && |
396 | *shadow_ptr != kAsanStackRightRedzoneMagic)) |
397 | shadow_ptr--; |
398 | |
399 | return (uptr)shadow_ptr + 1; |
400 | } |
401 | |
402 | bool AsanThread::AddrIsInStack(uptr addr) { |
403 | const auto bounds = GetStackBounds(); |
404 | return addr >= bounds.bottom && addr < bounds.top; |
405 | } |
406 | |
407 | static bool ThreadStackContainsAddress(ThreadContextBase *tctx_base, |
408 | void *addr) { |
409 | AsanThreadContext *tctx = static_cast<AsanThreadContext *>(tctx_base); |
410 | AsanThread *t = tctx->thread; |
411 | if (!t) |
412 | return false; |
413 | if (t->AddrIsInStack(addr: (uptr)addr)) |
414 | return true; |
415 | FakeStack *fake_stack = t->get_fake_stack(); |
416 | if (!fake_stack) |
417 | return false; |
418 | return fake_stack->AddrIsInFakeStack(addr: (uptr)addr); |
419 | } |
420 | |
421 | AsanThread *GetCurrentThread() { |
422 | AsanThreadContext *context = |
423 | reinterpret_cast<AsanThreadContext *>(AsanTSDGet()); |
424 | if (!context) { |
425 | if (SANITIZER_ANDROID) { |
426 | // On Android, libc constructor is called _after_ asan_init, and cleans up |
427 | // TSD. Try to figure out if this is still the main thread by the stack |
428 | // address. We are not entirely sure that we have correct main thread |
429 | // limits, so only do this magic on Android, and only if the found thread |
430 | // is the main thread. |
431 | AsanThreadContext *tctx = GetThreadContextByTidLocked(tid: kMainTid); |
432 | if (tctx && ThreadStackContainsAddress(tctx_base: tctx, addr: &context)) { |
433 | SetCurrentThread(tctx->thread); |
434 | return tctx->thread; |
435 | } |
436 | } |
437 | return nullptr; |
438 | } |
439 | return context->thread; |
440 | } |
441 | |
442 | void SetCurrentThread(AsanThread *t) { |
443 | CHECK(t->context()); |
444 | VReport(2, "SetCurrentThread: %p for thread %p\n" , (void *)t->context(), |
445 | (void *)GetThreadSelf()); |
446 | // Make sure we do not reset the current AsanThread. |
447 | CHECK_EQ(0, AsanTSDGet()); |
448 | AsanTSDSet(tsd: t->context()); |
449 | CHECK_EQ(t->context(), AsanTSDGet()); |
450 | } |
451 | |
452 | u32 GetCurrentTidOrInvalid() { |
453 | AsanThread *t = GetCurrentThread(); |
454 | return t ? t->tid() : kInvalidTid; |
455 | } |
456 | |
457 | AsanThread *FindThreadByStackAddress(uptr addr) { |
458 | asanThreadRegistry().CheckLocked(); |
459 | AsanThreadContext *tctx = static_cast<AsanThreadContext *>( |
460 | asanThreadRegistry().FindThreadContextLocked(cb: ThreadStackContainsAddress, |
461 | arg: (void *)addr)); |
462 | return tctx ? tctx->thread : nullptr; |
463 | } |
464 | |
465 | void EnsureMainThreadIDIsCorrect() { |
466 | AsanThreadContext *context = |
467 | reinterpret_cast<AsanThreadContext *>(AsanTSDGet()); |
468 | if (context && (context->tid == kMainTid)) |
469 | context->os_id = GetTid(); |
470 | } |
471 | |
472 | __asan::AsanThread *GetAsanThreadByOsIDLocked(tid_t os_id) { |
473 | __asan::AsanThreadContext *context = static_cast<__asan::AsanThreadContext *>( |
474 | __asan::asanThreadRegistry().FindThreadContextByOsIDLocked(os_id)); |
475 | if (!context) |
476 | return nullptr; |
477 | return context->thread; |
478 | } |
479 | } // namespace __asan |
480 | |
481 | // --- Implementation of LSan-specific functions --- {{{1 |
482 | namespace __lsan { |
483 | void LockThreads() { |
484 | __asan::asanThreadRegistry().Lock(); |
485 | __asan::asanThreadArgRetval().Lock(); |
486 | } |
487 | |
488 | void UnlockThreads() { |
489 | __asan::asanThreadArgRetval().Unlock(); |
490 | __asan::asanThreadRegistry().Unlock(); |
491 | } |
492 | |
493 | static ThreadRegistry *GetAsanThreadRegistryLocked() { |
494 | __asan::asanThreadRegistry().CheckLocked(); |
495 | return &__asan::asanThreadRegistry(); |
496 | } |
497 | |
498 | void EnsureMainThreadIDIsCorrect() { __asan::EnsureMainThreadIDIsCorrect(); } |
499 | |
500 | bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end, |
501 | uptr *tls_begin, uptr *tls_end, uptr *cache_begin, |
502 | uptr *cache_end, DTLS **dtls) { |
503 | __asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id); |
504 | if (!t) |
505 | return false; |
506 | *stack_begin = t->stack_bottom(); |
507 | *stack_end = t->stack_top(); |
508 | *tls_begin = t->tls_begin(); |
509 | *tls_end = t->tls_end(); |
510 | // ASan doesn't keep allocator caches in TLS, so these are unused. |
511 | *cache_begin = 0; |
512 | *cache_end = 0; |
513 | *dtls = t->dtls(); |
514 | return true; |
515 | } |
516 | |
517 | void GetAllThreadAllocatorCachesLocked(InternalMmapVector<uptr> *caches) {} |
518 | |
519 | void (tid_t os_id, |
520 | InternalMmapVector<Range> *ranges) { |
521 | __asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id); |
522 | if (!t) |
523 | return; |
524 | __asan::FakeStack *fake_stack = t->get_fake_stack(); |
525 | if (!fake_stack) |
526 | return; |
527 | |
528 | fake_stack->ForEachFakeFrame( |
529 | callback: [](uptr begin, uptr end, void *arg) { |
530 | reinterpret_cast<InternalMmapVector<Range> *>(arg)->push_back( |
531 | element: {.begin: begin, .end: end}); |
532 | }, |
533 | arg: ranges); |
534 | } |
535 | |
536 | void (InternalMmapVector<Range> *ranges) { |
537 | GetAsanThreadRegistryLocked()->RunCallbackForEachThreadLocked( |
538 | cb: [](ThreadContextBase *tctx, void *arg) { |
539 | GetThreadExtraStackRangesLocked( |
540 | os_id: tctx->os_id, ranges: reinterpret_cast<InternalMmapVector<Range> *>(arg)); |
541 | }, |
542 | arg: ranges); |
543 | } |
544 | |
545 | void GetAdditionalThreadContextPtrsLocked(InternalMmapVector<uptr> *ptrs) { |
546 | __asan::asanThreadArgRetval().GetAllPtrsLocked(ptrs); |
547 | } |
548 | |
549 | void GetRunningThreadsLocked(InternalMmapVector<tid_t> *threads) { |
550 | GetAsanThreadRegistryLocked()->RunCallbackForEachThreadLocked( |
551 | cb: [](ThreadContextBase *tctx, void *threads) { |
552 | if (tctx->status == ThreadStatusRunning) |
553 | reinterpret_cast<InternalMmapVector<tid_t> *>(threads)->push_back( |
554 | element: tctx->os_id); |
555 | }, |
556 | arg: threads); |
557 | } |
558 | |
559 | void PrintThreads() { |
560 | InternalScopedString out; |
561 | PrintThreadHistory(registry&: __asan::asanThreadRegistry(), out); |
562 | Report(format: "%s\n" , out.data()); |
563 | } |
564 | |
565 | } // namespace __lsan |
566 | |
567 | // ---------------------- Interface ---------------- {{{1 |
568 | using namespace __asan; |
569 | |
570 | extern "C" { |
571 | SANITIZER_INTERFACE_ATTRIBUTE |
572 | void __sanitizer_start_switch_fiber(void **fakestacksave, const void *bottom, |
573 | uptr size) { |
574 | AsanThread *t = GetCurrentThread(); |
575 | if (!t) { |
576 | VReport(1, "__asan_start_switch_fiber called from unknown thread\n" ); |
577 | return; |
578 | } |
579 | t->StartSwitchFiber(fake_stack_save: (FakeStack **)fakestacksave, bottom: (uptr)bottom, size); |
580 | } |
581 | |
582 | SANITIZER_INTERFACE_ATTRIBUTE |
583 | void __sanitizer_finish_switch_fiber(void *fakestack, const void **bottom_old, |
584 | uptr *size_old) { |
585 | AsanThread *t = GetCurrentThread(); |
586 | if (!t) { |
587 | VReport(1, "__asan_finish_switch_fiber called from unknown thread\n" ); |
588 | return; |
589 | } |
590 | t->FinishSwitchFiber(fake_stack_save: (FakeStack *)fakestack, bottom_old: (uptr *)bottom_old, |
591 | size_old: (uptr *)size_old); |
592 | } |
593 | } |
594 | |