1//===-- asan_fake_stack.cpp -----------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of AddressSanitizer, an address sanity checker.
10//
11// FakeStack is used to detect use-after-return bugs.
12//===----------------------------------------------------------------------===//
13
14#include "asan_allocator.h"
15#include "asan_poisoning.h"
16#include "asan_thread.h"
17
18namespace __asan {
19
20static const u64 kMagic1 = kAsanStackAfterReturnMagic;
21static const u64 kMagic2 = (kMagic1 << 8) | kMagic1;
22static const u64 kMagic4 = (kMagic2 << 16) | kMagic2;
23static const u64 kMagic8 = (kMagic4 << 32) | kMagic4;
24
25static const u64 kAllocaRedzoneSize = 32UL;
26static const u64 kAllocaRedzoneMask = 31UL;
27
28// For small size classes inline PoisonShadow for better performance.
29ALWAYS_INLINE void SetShadow(uptr ptr, uptr size, uptr class_id, u64 magic) {
30 CHECK(AddrIsAlignedByGranularity(ptr + size));
31 u64* shadow = reinterpret_cast<u64*>(MemToShadow(p: ptr));
32 if (ASAN_SHADOW_SCALE == 3 && class_id <= 6) {
33 // This code expects ASAN_SHADOW_SCALE=3.
34 for (uptr i = 0; i < (((uptr)1) << class_id); i++) {
35 shadow[i] = magic;
36 // Make sure this does not become memset.
37 SanitizerBreakOptimization(arg: nullptr);
38 }
39 } else {
40 // The size class is too big, it's cheaper to poison only size bytes.
41 PoisonShadow(addr: ptr, size, value: static_cast<u8>(magic));
42 }
43
44 if (magic == 0) {
45 uptr redzone_size = FakeStack::BytesInSizeClass(class_id) - size;
46 PoisonShadow(addr: ptr + size, size: redzone_size, value: kAsanStackRightRedzoneMagic);
47 }
48}
49
50FakeStack* FakeStack::Create(uptr stack_size_log) {
51 static uptr kMinStackSizeLog = 16;
52 static uptr kMaxStackSizeLog = FIRST_32_SECOND_64(24, 28);
53 if (stack_size_log < kMinStackSizeLog)
54 stack_size_log = kMinStackSizeLog;
55 if (stack_size_log > kMaxStackSizeLog)
56 stack_size_log = kMaxStackSizeLog;
57 CHECK_LE(kMaxStackFrameSizeLog, stack_size_log);
58 uptr size = RequiredSize(stack_size_log);
59 uptr padded_size = size + kMaxStackFrameSize;
60 void* true_res = reinterpret_cast<void*>(
61 flags()->uar_noreserve ? MmapNoReserveOrDie(size: padded_size, mem_type: "FakeStack")
62 : MmapOrDie(size: padded_size, mem_type: "FakeStack"));
63 // GetFrame() requires the property that
64 // (res + kFlagsOffset + SizeRequiredForFlags(stack_size_log)) is aligned to
65 // kMaxStackFrameSize.
66 // We didn't use MmapAlignedOrDieOnFatalError, because it requires that the
67 // *size* is a power of 2, which is an overly strong condition.
68 static_assert(alignof(FakeStack) <= kMaxStackFrameSize);
69 FakeStack* res = reinterpret_cast<FakeStack*>(
70 RoundUpTo(
71 size: (uptr)true_res + kFlagsOffset + SizeRequiredForFlags(stack_size_log),
72 boundary: kMaxStackFrameSize) -
73 kFlagsOffset - SizeRequiredForFlags(stack_size_log));
74 res->true_start = true_res;
75 res->stack_size_log_ = stack_size_log;
76 u8* p = reinterpret_cast<u8*>(res);
77 VReport(1,
78 "T%d: FakeStack created: %p -- %p stack_size_log: %zd; "
79 "mmapped %zdK, noreserve=%d, true_start: %p, start of first frame: "
80 "%p\n",
81 GetCurrentTidOrInvalid(), (void*)p,
82 (void*)(p + FakeStack::RequiredSize(stack_size_log)), stack_size_log,
83 size >> 10, flags()->uar_noreserve, res->true_start,
84 (void*)res->GetFrame(stack_size_log, /*class_id*/ 0, /*pos*/ 0));
85 return res;
86}
87
88void FakeStack::Destroy(int tid) {
89 PoisonAll(magic: 0);
90 if (Verbosity() >= 2) {
91 InternalScopedString str;
92 for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++)
93 str.AppendF(format: "%zd: %zd/%zd; ", class_id, hint_position_[class_id],
94 NumberOfFrames(stack_size_log: stack_size_log(), class_id));
95 Report(format: "T%d: FakeStack destroyed: %s\n", tid, str.data());
96 }
97 uptr size = RequiredSize(stack_size_log: stack_size_log_);
98 uptr padded_size = size + kMaxStackFrameSize;
99 FlushUnneededASanShadowMemory(p: reinterpret_cast<uptr>(true_start),
100 size: padded_size);
101 UnmapOrDie(addr: true_start, size: padded_size);
102}
103
104void FakeStack::PoisonAll(u8 magic) {
105 PoisonShadow(addr: reinterpret_cast<uptr>(this), size: RequiredSize(stack_size_log: stack_size_log()),
106 value: magic);
107}
108
109#if !defined(_MSC_VER) || defined(__clang__)
110ALWAYS_INLINE USED
111#endif
112 FakeFrame* FakeStack::Allocate(uptr stack_size_log, uptr class_id,
113 uptr real_stack) {
114 CHECK_LT(class_id, kNumberOfSizeClasses);
115 if (needs_gc_)
116 GC(real_stack);
117 uptr& hint_position = hint_position_[class_id];
118 const int num_iter = NumberOfFrames(stack_size_log, class_id);
119 u8* flags = GetFlags(stack_size_log, class_id);
120 for (int i = 0; i < num_iter; i++) {
121 uptr pos = ModuloNumberOfFrames(stack_size_log, class_id, n: hint_position++);
122 // This part is tricky. On one hand, checking and setting flags[pos]
123 // should be atomic to ensure async-signal safety. But on the other hand,
124 // if the signal arrives between checking and setting flags[pos], the
125 // signal handler's fake stack will start from a different hint_position
126 // and so will not touch this particular byte. So, it is safe to do this
127 // with regular non-atomic load and store (at least I was not able to make
128 // this code crash).
129 if (flags[pos])
130 continue;
131 flags[pos] = 1;
132 FakeFrame* res =
133 reinterpret_cast<FakeFrame*>(GetFrame(stack_size_log, class_id, pos));
134 res->real_stack = real_stack;
135 *SavedFlagPtr(x: reinterpret_cast<uptr>(res), class_id) = &flags[pos];
136 return res;
137 }
138 return nullptr; // We are out of fake stack.
139}
140
141uptr FakeStack::AddrIsInFakeStack(uptr ptr, uptr* frame_beg, uptr* frame_end) {
142 uptr stack_size_log = this->stack_size_log();
143 uptr beg = reinterpret_cast<uptr>(GetFrame(stack_size_log, class_id: 0, pos: 0));
144 uptr end = reinterpret_cast<uptr>(this) + RequiredSize(stack_size_log);
145 if (ptr < beg || ptr >= end)
146 return 0;
147 uptr class_id = (ptr - beg) >> stack_size_log;
148 uptr base = beg + (class_id << stack_size_log);
149 CHECK_LE(base, ptr);
150 CHECK_LT(ptr, base + (((uptr)1) << stack_size_log));
151 uptr pos = (ptr - base) >> (kMinStackFrameSizeLog + class_id);
152 uptr res = base + pos * BytesInSizeClass(class_id);
153 *frame_end = res + BytesInSizeClass(class_id);
154 *frame_beg = res + sizeof(FakeFrame);
155 return res;
156}
157
158void FakeStack::HandleNoReturn() { needs_gc_ = true; }
159
160// Hack: The statement below is not true if we take into account sigaltstack or
161// makecontext. It should be possible to make GC to discard wrong stack frame if
162// we use these tools. For now, let's support the simplest case and allow GC to
163// discard only frames from the default stack, assuming there is no buffer on
164// the stack which is used for makecontext or sigaltstack.
165//
166// When throw, longjmp or some such happens we don't call OnFree() and
167// as the result may leak one or more fake frames, but the good news is that
168// we are notified about all such events by HandleNoReturn().
169// If we recently had such no-return event we need to collect garbage frames.
170// We do it based on their 'real_stack' values -- everything that is lower
171// than the current real_stack is garbage.
172NOINLINE void FakeStack::GC(uptr real_stack) {
173 AsanThread* curr_thread = GetCurrentThread();
174 if (!curr_thread)
175 return; // Try again when we have a thread.
176 auto top = curr_thread->stack_top();
177 auto bottom = curr_thread->stack_bottom();
178 if (real_stack < bottom || real_stack > top)
179 return; // Not the default stack.
180
181 for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) {
182 u8* flags = GetFlags(stack_size_log: stack_size_log(), class_id);
183 for (uptr i = 0, n = NumberOfFrames(stack_size_log: stack_size_log(), class_id); i < n;
184 i++) {
185 if (flags[i] == 0)
186 continue; // not allocated.
187 FakeFrame* ff =
188 reinterpret_cast<FakeFrame*>(GetFrame(stack_size_log: stack_size_log(), class_id, pos: i));
189 // GC only on the default stack.
190 if (bottom < ff->real_stack && ff->real_stack < real_stack) {
191 flags[i] = 0;
192 // Poison the frame, so the any access will be reported as UAR.
193 SetShadow(ptr: reinterpret_cast<uptr>(ff), size: BytesInSizeClass(class_id),
194 class_id, magic: kMagic8);
195 }
196 }
197 }
198 needs_gc_ = false;
199}
200
201void FakeStack::ForEachFakeFrame(RangeIteratorCallback callback, void* arg) {
202 for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) {
203 u8* flags = GetFlags(stack_size_log: stack_size_log(), class_id);
204 for (uptr i = 0, n = NumberOfFrames(stack_size_log: stack_size_log(), class_id); i < n;
205 i++) {
206 if (flags[i] == 0)
207 continue; // not allocated.
208 FakeFrame* ff =
209 reinterpret_cast<FakeFrame*>(GetFrame(stack_size_log: stack_size_log(), class_id, pos: i));
210 uptr begin = reinterpret_cast<uptr>(ff);
211 callback(begin, begin + FakeStack::BytesInSizeClass(class_id), arg);
212 }
213 }
214}
215
216#if (SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_FUCHSIA
217static THREADLOCAL FakeStack* fake_stack_tls;
218
219static FakeStack* GetTLSFakeStack() { return fake_stack_tls; }
220static void SetTLSFakeStack(FakeStack* fs) { fake_stack_tls = fs; }
221void ResetTLSFakeStack() { fake_stack_tls = nullptr; }
222#else
223static FakeStack* GetTLSFakeStack() { return nullptr; }
224static void SetTLSFakeStack(FakeStack*) {}
225void ResetTLSFakeStack() {}
226#endif // (SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_FUCHSIA
227
228static void SuppressFakeStack() {
229 AsanThread* t = GetCurrentThread();
230 if (t) {
231 t->SuppressFakeStack();
232 }
233}
234
235static void UnsuppressFakeStack() {
236 AsanThread* t = GetCurrentThread();
237 if (t) {
238 t->UnsuppressFakeStack();
239 }
240}
241
242static FakeStack* GetFakeStack() {
243 AsanThread* t = GetCurrentThread();
244 if (!t || t->IsFakeStackSuppressed())
245 return nullptr;
246 return t->get_or_create_fake_stack();
247}
248
249static FakeStack* GetFakeStackFast() {
250 FakeStack* fs = GetTLSFakeStack();
251 if (LIKELY(fs))
252 return fs;
253 if (!__asan_option_detect_stack_use_after_return)
254 return nullptr;
255 fs = GetFakeStack();
256 if (LIKELY(fs))
257 SetTLSFakeStack(fs);
258 return fs;
259}
260
261static FakeStack* GetFakeStackFastAlways() {
262 FakeStack* fs = GetTLSFakeStack();
263 if (LIKELY(fs))
264 return fs;
265 fs = GetFakeStack();
266 if (LIKELY(fs))
267 SetTLSFakeStack(fs);
268 return fs;
269}
270
271static ALWAYS_INLINE uptr OnMalloc(uptr class_id, uptr size) {
272 FakeStack* fs = GetFakeStackFast();
273 if (!fs)
274 return 0;
275 FakeFrame* ff =
276 fs->Allocate(stack_size_log: fs->stack_size_log(), class_id, GET_CURRENT_FRAME());
277 if (!ff)
278 return 0; // Out of fake stack.
279 uptr ptr = reinterpret_cast<uptr>(ff);
280 SetShadow(ptr, size, class_id, magic: 0);
281 return ptr;
282}
283
284static ALWAYS_INLINE uptr OnMallocAlways(uptr class_id, uptr size) {
285 FakeStack* fs = GetFakeStackFastAlways();
286 if (!fs)
287 return 0;
288 FakeFrame* ff =
289 fs->Allocate(stack_size_log: fs->stack_size_log(), class_id, GET_CURRENT_FRAME());
290 if (!ff)
291 return 0; // Out of fake stack.
292 uptr ptr = reinterpret_cast<uptr>(ff);
293 SetShadow(ptr, size, class_id, magic: 0);
294 return ptr;
295}
296
297static ALWAYS_INLINE void OnFree(uptr ptr, uptr class_id, uptr size) {
298 FakeStack::Deallocate(x: ptr, class_id);
299 SetShadow(ptr, size, class_id, magic: kMagic8);
300}
301
302} // namespace __asan
303
304// ---------------------- Interface ---------------- {{{1
305using namespace __asan;
306#define DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(class_id) \
307 extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr \
308 __asan_stack_malloc_##class_id(uptr size) { \
309 return OnMalloc(class_id, size); \
310 } \
311 extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr \
312 __asan_stack_malloc_always_##class_id(uptr size) { \
313 return OnMallocAlways(class_id, size); \
314 } \
315 extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __asan_stack_free_##class_id( \
316 uptr ptr, uptr size) { \
317 OnFree(ptr, class_id, size); \
318 }
319
320DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(0)
321DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(1)
322DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(2)
323DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(3)
324DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(4)
325DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(5)
326DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(6)
327DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(7)
328DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(8)
329DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(9)
330DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(10)
331
332extern "C" {
333// TODO: remove this method and fix tests that use it by setting
334// -asan-use-after-return=never, after modal UAR flag lands
335// (https://github.com/google/sanitizers/issues/1394)
336SANITIZER_INTERFACE_ATTRIBUTE
337void* __asan_get_current_fake_stack() { return GetFakeStackFast(); }
338
339SANITIZER_INTERFACE_ATTRIBUTE
340void* __asan_addr_is_in_fake_stack(void* fake_stack, void* addr, void** beg,
341 void** end) {
342 FakeStack* fs = reinterpret_cast<FakeStack*>(fake_stack);
343 if (!fs)
344 return nullptr;
345 uptr frame_beg, frame_end;
346 FakeFrame* frame = reinterpret_cast<FakeFrame*>(fs->AddrIsInFakeStack(
347 ptr: reinterpret_cast<uptr>(addr), frame_beg: &frame_beg, frame_end: &frame_end));
348 if (!frame)
349 return nullptr;
350 if (frame->magic != kCurrentStackFrameMagic)
351 return nullptr;
352 if (beg)
353 *beg = reinterpret_cast<void*>(frame_beg);
354 if (end)
355 *end = reinterpret_cast<void*>(frame_end);
356 return reinterpret_cast<void*>(frame->real_stack);
357}
358
359SANITIZER_INTERFACE_ATTRIBUTE
360void __asan_alloca_poison(uptr addr, uptr size) {
361 uptr LeftRedzoneAddr = addr - kAllocaRedzoneSize;
362 uptr PartialRzAddr = addr + size;
363 uptr RightRzAddr = (PartialRzAddr + kAllocaRedzoneMask) & ~kAllocaRedzoneMask;
364 uptr PartialRzAligned = PartialRzAddr & ~(ASAN_SHADOW_GRANULARITY - 1);
365 FastPoisonShadow(aligned_beg: LeftRedzoneAddr, aligned_size: kAllocaRedzoneSize, value: kAsanAllocaLeftMagic);
366 FastPoisonShadowPartialRightRedzone(
367 aligned_addr: PartialRzAligned, size: PartialRzAddr % ASAN_SHADOW_GRANULARITY,
368 redzone_size: RightRzAddr - PartialRzAligned, value: kAsanAllocaRightMagic);
369 FastPoisonShadow(aligned_beg: RightRzAddr, aligned_size: kAllocaRedzoneSize, value: kAsanAllocaRightMagic);
370}
371
372SANITIZER_INTERFACE_ATTRIBUTE
373void __asan_allocas_unpoison(uptr top, uptr bottom) {
374 if ((!top) || (top > bottom))
375 return;
376 REAL(memset)(reinterpret_cast<void*>(MemToShadow(p: top)), 0,
377 (bottom - top) / ASAN_SHADOW_GRANULARITY);
378}
379
380SANITIZER_INTERFACE_ATTRIBUTE
381void __asan_suppress_fake_stack() { return SuppressFakeStack(); }
382SANITIZER_INTERFACE_ATTRIBUTE
383void __asan_unsuppress_fake_stack() { return UnsuppressFakeStack(); }
384} // extern "C"
385