| 1 | |
| 2 | #include "msan_thread.h" |
| 3 | |
| 4 | #include "msan.h" |
| 5 | #include "msan_interface_internal.h" |
| 6 | #include "sanitizer_common/sanitizer_tls_get_addr.h" |
| 7 | |
| 8 | using namespace __msan; |
| 9 | |
| 10 | MsanThread *MsanThread::Create(thread_callback_t start_routine, |
| 11 | void *arg) { |
| 12 | uptr PageSize = GetPageSizeCached(); |
| 13 | uptr size = RoundUpTo(size: sizeof(MsanThread), boundary: PageSize); |
| 14 | MsanThread *thread = (MsanThread*)MmapOrDie(size, mem_type: __func__); |
| 15 | thread->start_routine_ = start_routine; |
| 16 | thread->arg_ = arg; |
| 17 | thread->destructor_iterations_ = GetPthreadDestructorIterations(); |
| 18 | |
| 19 | return thread; |
| 20 | } |
| 21 | |
| 22 | void MsanThread::SetThreadStackAndTls() { |
| 23 | GetThreadStackAndTls(main: IsMainThread(), stk_begin: &stack_.bottom, stk_end: &stack_.top, tls_begin: &tls_begin_, |
| 24 | tls_end: &tls_end_); |
| 25 | int local; |
| 26 | CHECK(AddrIsInStack((uptr)&local)); |
| 27 | } |
| 28 | |
| 29 | void MsanThread::ClearShadowForThreadStackAndTLS() { |
| 30 | __msan_unpoison(a: (void *)stack_.bottom, size: stack_.top - stack_.bottom); |
| 31 | if (tls_begin_ != tls_end_) |
| 32 | __msan_unpoison(a: (void *)tls_begin_, size: tls_end_ - tls_begin_); |
| 33 | DTLS *dtls = DTLS_Get(); |
| 34 | CHECK_NE(dtls, 0); |
| 35 | ForEachDVT(dtls, fn: [](const DTLS::DTV &dtv, int id) { |
| 36 | __msan_unpoison(a: (void *)(dtv.beg), size: dtv.size); |
| 37 | }); |
| 38 | } |
| 39 | |
| 40 | void MsanThread::Init() { |
| 41 | SetThreadStackAndTls(); |
| 42 | CHECK(MEM_IS_APP(stack_.bottom)); |
| 43 | CHECK(MEM_IS_APP(stack_.top - 1)); |
| 44 | ClearShadowForThreadStackAndTLS(); |
| 45 | malloc_storage().Init(); |
| 46 | } |
| 47 | |
| 48 | void MsanThread::TSDDtor(void *tsd) { |
| 49 | MsanThread *t = (MsanThread*)tsd; |
| 50 | t->Destroy(); |
| 51 | } |
| 52 | |
| 53 | void MsanThread::Destroy() { |
| 54 | malloc_storage().CommitBack(); |
| 55 | // We also clear the shadow on thread destruction because |
| 56 | // some code may still be executing in later TSD destructors |
| 57 | // and we don't want it to have any poisoned stack. |
| 58 | ClearShadowForThreadStackAndTLS(); |
| 59 | uptr size = RoundUpTo(size: sizeof(MsanThread), boundary: GetPageSizeCached()); |
| 60 | UnmapOrDie(addr: this, size); |
| 61 | DTLS_Destroy(); |
| 62 | } |
| 63 | |
| 64 | thread_return_t MsanThread::ThreadStart() { |
| 65 | if (!start_routine_) { |
| 66 | // start_routine_ == 0 if we're on the main thread or on one of the |
| 67 | // OS X libdispatch worker threads. But nobody is supposed to call |
| 68 | // ThreadStart() for the worker threads. |
| 69 | return 0; |
| 70 | } |
| 71 | |
| 72 | return start_routine_(arg_); |
| 73 | } |
| 74 | |
| 75 | MsanThread::StackBounds MsanThread::GetStackBounds() const { |
| 76 | if (!stack_switching_) |
| 77 | return {.bottom: stack_.bottom, .top: stack_.top}; |
| 78 | const uptr cur_stack = GET_CURRENT_FRAME(); |
| 79 | // Note: need to check next stack first, because FinishSwitchFiber |
| 80 | // may be in process of overwriting stack_.top/bottom_. But in such case |
| 81 | // we are already on the next stack. |
| 82 | if (cur_stack >= next_stack_.bottom && cur_stack < next_stack_.top) |
| 83 | return {.bottom: next_stack_.bottom, .top: next_stack_.top}; |
| 84 | return {.bottom: stack_.bottom, .top: stack_.top}; |
| 85 | } |
| 86 | |
| 87 | uptr MsanThread::stack_top() { return GetStackBounds().top; } |
| 88 | |
| 89 | uptr MsanThread::stack_bottom() { return GetStackBounds().bottom; } |
| 90 | |
| 91 | bool MsanThread::AddrIsInStack(uptr addr) { |
| 92 | const auto bounds = GetStackBounds(); |
| 93 | return addr >= bounds.bottom && addr < bounds.top; |
| 94 | } |
| 95 | |
| 96 | void MsanThread::StartSwitchFiber(uptr bottom, uptr size) { |
| 97 | CHECK(!stack_switching_); |
| 98 | next_stack_.bottom = bottom; |
| 99 | next_stack_.top = bottom + size; |
| 100 | stack_switching_ = true; |
| 101 | } |
| 102 | |
| 103 | void MsanThread::FinishSwitchFiber(uptr *bottom_old, uptr *size_old) { |
| 104 | CHECK(stack_switching_); |
| 105 | if (bottom_old) |
| 106 | *bottom_old = stack_.bottom; |
| 107 | if (size_old) |
| 108 | *size_old = stack_.top - stack_.bottom; |
| 109 | stack_.bottom = next_stack_.bottom; |
| 110 | stack_.top = next_stack_.top; |
| 111 | stack_switching_ = false; |
| 112 | next_stack_.top = 0; |
| 113 | next_stack_.bottom = 0; |
| 114 | } |
| 115 | |