1 | //===-- asan_fake_stack.cpp -----------------------------------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file is a part of AddressSanitizer, an address sanity checker. |
10 | // |
11 | // FakeStack is used to detect use-after-return bugs. |
12 | //===----------------------------------------------------------------------===// |
13 | |
14 | #include "asan_allocator.h" |
15 | #include "asan_poisoning.h" |
16 | #include "asan_thread.h" |
17 | |
18 | namespace __asan { |
19 | |
20 | static const u64 kMagic1 = kAsanStackAfterReturnMagic; |
21 | static const u64 kMagic2 = (kMagic1 << 8) | kMagic1; |
22 | static const u64 kMagic4 = (kMagic2 << 16) | kMagic2; |
23 | static const u64 kMagic8 = (kMagic4 << 32) | kMagic4; |
24 | |
25 | static const u64 kAllocaRedzoneSize = 32UL; |
26 | static const u64 kAllocaRedzoneMask = 31UL; |
27 | |
28 | // For small size classes inline PoisonShadow for better performance. |
29 | ALWAYS_INLINE void SetShadow(uptr ptr, uptr size, uptr class_id, u64 magic) { |
30 | CHECK(AddrIsAlignedByGranularity(ptr + size)); |
31 | u64 *shadow = reinterpret_cast<u64*>(MemToShadow(p: ptr)); |
32 | if (ASAN_SHADOW_SCALE == 3 && class_id <= 6) { |
33 | // This code expects ASAN_SHADOW_SCALE=3. |
34 | for (uptr i = 0; i < (((uptr)1) << class_id); i++) { |
35 | shadow[i] = magic; |
36 | // Make sure this does not become memset. |
37 | SanitizerBreakOptimization(arg: nullptr); |
38 | } |
39 | } else { |
40 | // The size class is too big, it's cheaper to poison only size bytes. |
41 | PoisonShadow(addr: ptr, size, value: static_cast<u8>(magic)); |
42 | } |
43 | |
44 | if (magic == 0) { |
45 | uptr redzone_size = FakeStack::BytesInSizeClass(class_id) - size; |
46 | PoisonShadow(addr: ptr + size, size: redzone_size, value: kAsanStackRightRedzoneMagic); |
47 | } |
48 | } |
49 | |
50 | FakeStack *FakeStack::Create(uptr stack_size_log) { |
51 | static uptr kMinStackSizeLog = 16; |
52 | static uptr kMaxStackSizeLog = FIRST_32_SECOND_64(24, 28); |
53 | if (stack_size_log < kMinStackSizeLog) |
54 | stack_size_log = kMinStackSizeLog; |
55 | if (stack_size_log > kMaxStackSizeLog) |
56 | stack_size_log = kMaxStackSizeLog; |
57 | uptr size = RequiredSize(stack_size_log); |
58 | FakeStack *res = reinterpret_cast<FakeStack *>( |
59 | flags()->uar_noreserve ? MmapNoReserveOrDie(size, mem_type: "FakeStack" ) |
60 | : MmapOrDie(size, mem_type: "FakeStack" )); |
61 | res->stack_size_log_ = stack_size_log; |
62 | u8 *p = reinterpret_cast<u8 *>(res); |
63 | VReport(1, |
64 | "T%d: FakeStack created: %p -- %p stack_size_log: %zd; " |
65 | "mmapped %zdK, noreserve=%d \n" , |
66 | GetCurrentTidOrInvalid(), (void *)p, |
67 | (void *)(p + FakeStack::RequiredSize(stack_size_log)), stack_size_log, |
68 | size >> 10, flags()->uar_noreserve); |
69 | return res; |
70 | } |
71 | |
72 | void FakeStack::Destroy(int tid) { |
73 | PoisonAll(magic: 0); |
74 | if (Verbosity() >= 2) { |
75 | InternalScopedString str; |
76 | for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) |
77 | str.AppendF(format: "%zd: %zd/%zd; " , class_id, hint_position_[class_id], |
78 | NumberOfFrames(stack_size_log: stack_size_log(), class_id)); |
79 | Report(format: "T%d: FakeStack destroyed: %s\n" , tid, str.data()); |
80 | } |
81 | uptr size = RequiredSize(stack_size_log: stack_size_log_); |
82 | FlushUnneededASanShadowMemory(p: reinterpret_cast<uptr>(this), size); |
83 | UnmapOrDie(addr: this, size); |
84 | } |
85 | |
86 | void FakeStack::PoisonAll(u8 magic) { |
87 | PoisonShadow(addr: reinterpret_cast<uptr>(this), size: RequiredSize(stack_size_log: stack_size_log()), |
88 | value: magic); |
89 | } |
90 | |
91 | #if !defined(_MSC_VER) || defined(__clang__) |
92 | ALWAYS_INLINE USED |
93 | #endif |
94 | FakeFrame *FakeStack::Allocate(uptr stack_size_log, uptr class_id, |
95 | uptr real_stack) { |
96 | CHECK_LT(class_id, kNumberOfSizeClasses); |
97 | if (needs_gc_) |
98 | GC(real_stack); |
99 | uptr &hint_position = hint_position_[class_id]; |
100 | const int num_iter = NumberOfFrames(stack_size_log, class_id); |
101 | u8 *flags = GetFlags(stack_size_log, class_id); |
102 | for (int i = 0; i < num_iter; i++) { |
103 | uptr pos = ModuloNumberOfFrames(stack_size_log, class_id, n: hint_position++); |
104 | // This part is tricky. On one hand, checking and setting flags[pos] |
105 | // should be atomic to ensure async-signal safety. But on the other hand, |
106 | // if the signal arrives between checking and setting flags[pos], the |
107 | // signal handler's fake stack will start from a different hint_position |
108 | // and so will not touch this particular byte. So, it is safe to do this |
109 | // with regular non-atomic load and store (at least I was not able to make |
110 | // this code crash). |
111 | if (flags[pos]) continue; |
112 | flags[pos] = 1; |
113 | FakeFrame *res = reinterpret_cast<FakeFrame *>( |
114 | GetFrame(stack_size_log, class_id, pos)); |
115 | res->real_stack = real_stack; |
116 | *SavedFlagPtr(x: reinterpret_cast<uptr>(res), class_id) = &flags[pos]; |
117 | return res; |
118 | } |
119 | return nullptr; // We are out of fake stack. |
120 | } |
121 | |
122 | uptr FakeStack::AddrIsInFakeStack(uptr ptr, uptr *frame_beg, uptr *frame_end) { |
123 | uptr stack_size_log = this->stack_size_log(); |
124 | uptr beg = reinterpret_cast<uptr>(GetFrame(stack_size_log, class_id: 0, pos: 0)); |
125 | uptr end = reinterpret_cast<uptr>(this) + RequiredSize(stack_size_log); |
126 | if (ptr < beg || ptr >= end) return 0; |
127 | uptr class_id = (ptr - beg) >> stack_size_log; |
128 | uptr base = beg + (class_id << stack_size_log); |
129 | CHECK_LE(base, ptr); |
130 | CHECK_LT(ptr, base + (((uptr)1) << stack_size_log)); |
131 | uptr pos = (ptr - base) >> (kMinStackFrameSizeLog + class_id); |
132 | uptr res = base + pos * BytesInSizeClass(class_id); |
133 | *frame_end = res + BytesInSizeClass(class_id); |
134 | *frame_beg = res + sizeof(FakeFrame); |
135 | return res; |
136 | } |
137 | |
138 | void FakeStack::HandleNoReturn() { |
139 | needs_gc_ = true; |
140 | } |
141 | |
142 | // Hack: The statement below is not true if we take into account sigaltstack or |
143 | // makecontext. It should be possible to make GC to discard wrong stack frame if |
144 | // we use these tools. For now, let's support the simplest case and allow GC to |
145 | // discard only frames from the default stack, assuming there is no buffer on |
146 | // the stack which is used for makecontext or sigaltstack. |
147 | // |
148 | // When throw, longjmp or some such happens we don't call OnFree() and |
149 | // as the result may leak one or more fake frames, but the good news is that |
150 | // we are notified about all such events by HandleNoReturn(). |
151 | // If we recently had such no-return event we need to collect garbage frames. |
152 | // We do it based on their 'real_stack' values -- everything that is lower |
153 | // than the current real_stack is garbage. |
154 | NOINLINE void FakeStack::GC(uptr real_stack) { |
155 | AsanThread *curr_thread = GetCurrentThread(); |
156 | if (!curr_thread) |
157 | return; // Try again when we have a thread. |
158 | auto top = curr_thread->stack_top(); |
159 | auto bottom = curr_thread->stack_bottom(); |
160 | if (real_stack < bottom || real_stack > top) |
161 | return; // Not the default stack. |
162 | |
163 | for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) { |
164 | u8 *flags = GetFlags(stack_size_log: stack_size_log(), class_id); |
165 | for (uptr i = 0, n = NumberOfFrames(stack_size_log: stack_size_log(), class_id); i < n; |
166 | i++) { |
167 | if (flags[i] == 0) continue; // not allocated. |
168 | FakeFrame *ff = reinterpret_cast<FakeFrame *>( |
169 | GetFrame(stack_size_log: stack_size_log(), class_id, pos: i)); |
170 | // GC only on the default stack. |
171 | if (bottom < ff->real_stack && ff->real_stack < real_stack) { |
172 | flags[i] = 0; |
173 | // Poison the frame, so the any access will be reported as UAR. |
174 | SetShadow(ptr: reinterpret_cast<uptr>(ff), size: BytesInSizeClass(class_id), |
175 | class_id, magic: kMagic8); |
176 | } |
177 | } |
178 | } |
179 | needs_gc_ = false; |
180 | } |
181 | |
182 | void FakeStack::ForEachFakeFrame(RangeIteratorCallback callback, void *arg) { |
183 | for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) { |
184 | u8 *flags = GetFlags(stack_size_log: stack_size_log(), class_id); |
185 | for (uptr i = 0, n = NumberOfFrames(stack_size_log: stack_size_log(), class_id); i < n; |
186 | i++) { |
187 | if (flags[i] == 0) continue; // not allocated. |
188 | FakeFrame *ff = reinterpret_cast<FakeFrame *>( |
189 | GetFrame(stack_size_log: stack_size_log(), class_id, pos: i)); |
190 | uptr begin = reinterpret_cast<uptr>(ff); |
191 | callback(begin, begin + FakeStack::BytesInSizeClass(class_id), arg); |
192 | } |
193 | } |
194 | } |
195 | |
196 | #if (SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_FUCHSIA |
197 | static THREADLOCAL FakeStack *fake_stack_tls; |
198 | |
199 | FakeStack *GetTLSFakeStack() { |
200 | return fake_stack_tls; |
201 | } |
202 | void SetTLSFakeStack(FakeStack *fs) { |
203 | fake_stack_tls = fs; |
204 | } |
205 | #else |
206 | FakeStack *GetTLSFakeStack() { return 0; } |
207 | void SetTLSFakeStack(FakeStack *fs) { } |
208 | #endif // (SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_FUCHSIA |
209 | |
210 | static FakeStack *GetFakeStack() { |
211 | AsanThread *t = GetCurrentThread(); |
212 | if (!t) return nullptr; |
213 | return t->get_or_create_fake_stack(); |
214 | } |
215 | |
216 | static FakeStack *GetFakeStackFast() { |
217 | if (FakeStack *fs = GetTLSFakeStack()) |
218 | return fs; |
219 | if (!__asan_option_detect_stack_use_after_return) |
220 | return nullptr; |
221 | return GetFakeStack(); |
222 | } |
223 | |
224 | static FakeStack *GetFakeStackFastAlways() { |
225 | if (FakeStack *fs = GetTLSFakeStack()) |
226 | return fs; |
227 | return GetFakeStack(); |
228 | } |
229 | |
230 | static ALWAYS_INLINE uptr OnMalloc(uptr class_id, uptr size) { |
231 | FakeStack *fs = GetFakeStackFast(); |
232 | if (!fs) |
233 | return 0; |
234 | FakeFrame *ff = |
235 | fs->Allocate(stack_size_log: fs->stack_size_log(), class_id, GET_CURRENT_FRAME()); |
236 | if (!ff) |
237 | return 0; // Out of fake stack. |
238 | uptr ptr = reinterpret_cast<uptr>(ff); |
239 | SetShadow(ptr, size, class_id, magic: 0); |
240 | return ptr; |
241 | } |
242 | |
243 | static ALWAYS_INLINE uptr OnMallocAlways(uptr class_id, uptr size) { |
244 | FakeStack *fs = GetFakeStackFastAlways(); |
245 | if (!fs) |
246 | return 0; |
247 | FakeFrame *ff = |
248 | fs->Allocate(stack_size_log: fs->stack_size_log(), class_id, GET_CURRENT_FRAME()); |
249 | if (!ff) |
250 | return 0; // Out of fake stack. |
251 | uptr ptr = reinterpret_cast<uptr>(ff); |
252 | SetShadow(ptr, size, class_id, magic: 0); |
253 | return ptr; |
254 | } |
255 | |
256 | static ALWAYS_INLINE void OnFree(uptr ptr, uptr class_id, uptr size) { |
257 | FakeStack::Deallocate(x: ptr, class_id); |
258 | SetShadow(ptr, size, class_id, magic: kMagic8); |
259 | } |
260 | |
261 | } // namespace __asan |
262 | |
263 | // ---------------------- Interface ---------------- {{{1 |
264 | using namespace __asan; |
265 | #define DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(class_id) \ |
266 | extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr \ |
267 | __asan_stack_malloc_##class_id(uptr size) { \ |
268 | return OnMalloc(class_id, size); \ |
269 | } \ |
270 | extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr \ |
271 | __asan_stack_malloc_always_##class_id(uptr size) { \ |
272 | return OnMallocAlways(class_id, size); \ |
273 | } \ |
274 | extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __asan_stack_free_##class_id( \ |
275 | uptr ptr, uptr size) { \ |
276 | OnFree(ptr, class_id, size); \ |
277 | } |
278 | |
279 | DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(0) |
280 | DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(1) |
281 | DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(2) |
282 | DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(3) |
283 | DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(4) |
284 | DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(5) |
285 | DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(6) |
286 | DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(7) |
287 | DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(8) |
288 | DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(9) |
289 | DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(10) |
290 | |
291 | extern "C" { |
292 | // TODO: remove this method and fix tests that use it by setting |
293 | // -asan-use-after-return=never, after modal UAR flag lands |
294 | // (https://github.com/google/sanitizers/issues/1394) |
295 | SANITIZER_INTERFACE_ATTRIBUTE |
296 | void *__asan_get_current_fake_stack() { return GetFakeStackFast(); } |
297 | |
298 | SANITIZER_INTERFACE_ATTRIBUTE |
299 | void *__asan_addr_is_in_fake_stack(void *fake_stack, void *addr, void **beg, |
300 | void **end) { |
301 | FakeStack *fs = reinterpret_cast<FakeStack*>(fake_stack); |
302 | if (!fs) return nullptr; |
303 | uptr frame_beg, frame_end; |
304 | FakeFrame *frame = reinterpret_cast<FakeFrame *>(fs->AddrIsInFakeStack( |
305 | ptr: reinterpret_cast<uptr>(addr), frame_beg: &frame_beg, frame_end: &frame_end)); |
306 | if (!frame) return nullptr; |
307 | if (frame->magic != kCurrentStackFrameMagic) |
308 | return nullptr; |
309 | if (beg) *beg = reinterpret_cast<void*>(frame_beg); |
310 | if (end) *end = reinterpret_cast<void*>(frame_end); |
311 | return reinterpret_cast<void*>(frame->real_stack); |
312 | } |
313 | |
314 | SANITIZER_INTERFACE_ATTRIBUTE |
315 | void __asan_alloca_poison(uptr addr, uptr size) { |
316 | uptr LeftRedzoneAddr = addr - kAllocaRedzoneSize; |
317 | uptr PartialRzAddr = addr + size; |
318 | uptr RightRzAddr = (PartialRzAddr + kAllocaRedzoneMask) & ~kAllocaRedzoneMask; |
319 | uptr PartialRzAligned = PartialRzAddr & ~(ASAN_SHADOW_GRANULARITY - 1); |
320 | FastPoisonShadow(aligned_beg: LeftRedzoneAddr, aligned_size: kAllocaRedzoneSize, value: kAsanAllocaLeftMagic); |
321 | FastPoisonShadowPartialRightRedzone( |
322 | aligned_addr: PartialRzAligned, size: PartialRzAddr % ASAN_SHADOW_GRANULARITY, |
323 | redzone_size: RightRzAddr - PartialRzAligned, value: kAsanAllocaRightMagic); |
324 | FastPoisonShadow(aligned_beg: RightRzAddr, aligned_size: kAllocaRedzoneSize, value: kAsanAllocaRightMagic); |
325 | } |
326 | |
327 | SANITIZER_INTERFACE_ATTRIBUTE |
328 | void __asan_allocas_unpoison(uptr top, uptr bottom) { |
329 | if ((!top) || (top > bottom)) return; |
330 | REAL(memset) |
331 | (reinterpret_cast<void *>(MemToShadow(p: top)), 0, |
332 | (bottom - top) / ASAN_SHADOW_GRANULARITY); |
333 | } |
334 | } // extern "C" |
335 | |