1//===-- msan_linux.cpp ----------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of MemorySanitizer.
10//
11// Linux-, NetBSD- and FreeBSD-specific code.
12//===----------------------------------------------------------------------===//
13
14#include "sanitizer_common/sanitizer_platform.h"
15#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
16
17# include <elf.h>
18# include <link.h>
19# include <pthread.h>
20# include <signal.h>
21# include <stdio.h>
22# include <stdlib.h>
23# if SANITIZER_LINUX
24# include <sys/personality.h>
25# endif
26# include <sys/resource.h>
27# include <sys/time.h>
28# include <unistd.h>
29# include <unwind.h>
30
31# include "msan.h"
32# include "msan_allocator.h"
33# include "msan_chained_origin_depot.h"
34# include "msan_report.h"
35# include "msan_thread.h"
36# include "sanitizer_common/sanitizer_common.h"
37# include "sanitizer_common/sanitizer_procmaps.h"
38# include "sanitizer_common/sanitizer_stackdepot.h"
39
40namespace __msan {
41
42void ReportMapRange(const char *descr, uptr beg, uptr size) {
43 if (size > 0) {
44 uptr end = beg + size - 1;
45 VPrintf(1, "%s : %p-%p\n", descr, (void *)beg, (void *)end);
46 }
47}
48
49static bool CheckMemoryRangeAvailability(uptr beg, uptr size, bool verbose) {
50 if (size > 0) {
51 uptr end = beg + size - 1;
52 if (!MemoryRangeIsAvailable(range_start: beg, range_end: end)) {
53 if (verbose)
54 Printf(format: "FATAL: MemorySanitizer: Shadow range %p-%p is not available.\n",
55 (void *)beg, (void *)end);
56 return false;
57 }
58 }
59 return true;
60}
61
62static bool ProtectMemoryRange(uptr beg, uptr size, const char *name) {
63 if (size > 0) {
64 void *addr = MmapFixedNoAccess(fixed_addr: beg, size, name);
65 if (beg == 0 && addr) {
66 // Depending on the kernel configuration, we may not be able to protect
67 // the page at address zero.
68 uptr gap = 16 * GetPageSizeCached();
69 beg += gap;
70 size -= gap;
71 addr = MmapFixedNoAccess(fixed_addr: beg, size, name);
72 }
73 if ((uptr)addr != beg) {
74 uptr end = beg + size - 1;
75 Printf(
76 format: "FATAL: MemorySanitizer: Cannot protect memory range %p-%p (%s).\n",
77 (void *)beg, (void *)end, name);
78 return false;
79 }
80 }
81 return true;
82}
83
84static void CheckMemoryLayoutSanity() {
85 uptr prev_end = 0;
86 for (unsigned i = 0; i < kMemoryLayoutSize; ++i) {
87 uptr start = kMemoryLayout[i].start;
88 uptr end = kMemoryLayout[i].end;
89 MappingDesc::Type type = kMemoryLayout[i].type;
90 CHECK_LT(start, end);
91 CHECK_EQ(prev_end, start);
92 CHECK(addr_is_type(start, type));
93 // Use start + (end - start) / 2 to avoid overflow on 32-bit.
94 CHECK(addr_is_type(start + (end - start) / 2, type));
95 CHECK(addr_is_type(end - 1, type));
96 if (type == MappingDesc::APP || type == MappingDesc::ALLOCATOR) {
97 uptr addr = start;
98 CHECK(MEM_IS_SHADOW(MEM_TO_SHADOW(addr)));
99 CHECK(MEM_IS_ORIGIN(MEM_TO_ORIGIN(addr)));
100 CHECK_EQ(MEM_TO_ORIGIN(addr), SHADOW_TO_ORIGIN(MEM_TO_SHADOW(addr)));
101
102 addr = start + (end - start) / 2;
103 CHECK(MEM_IS_SHADOW(MEM_TO_SHADOW(addr)));
104 CHECK(MEM_IS_ORIGIN(MEM_TO_ORIGIN(addr)));
105 CHECK_EQ(MEM_TO_ORIGIN(addr), SHADOW_TO_ORIGIN(MEM_TO_SHADOW(addr)));
106
107 addr = end - 1;
108 CHECK(MEM_IS_SHADOW(MEM_TO_SHADOW(addr)));
109 CHECK(MEM_IS_ORIGIN(MEM_TO_ORIGIN(addr)));
110 CHECK_EQ(MEM_TO_ORIGIN(addr), SHADOW_TO_ORIGIN(MEM_TO_SHADOW(addr)));
111 }
112 prev_end = end;
113 }
114}
115
116static bool InitShadow(bool init_origins, bool dry_run) {
117 // Let user know mapping parameters first.
118 VPrintf(1, "__msan_init %p\n", reinterpret_cast<void *>(&__msan_init));
119 for (unsigned i = 0; i < kMemoryLayoutSize; ++i)
120 VPrintf(1, "%s: %zx - %zx\n", kMemoryLayout[i].name, kMemoryLayout[i].start,
121 kMemoryLayout[i].end - 1);
122
123 CheckMemoryLayoutSanity();
124
125 if (!MEM_IS_APP(&__msan_init)) {
126 if (!dry_run)
127 Printf(format: "FATAL: Code %p is out of application range. Non-PIE build?\n",
128 reinterpret_cast<void *>(&__msan_init));
129 return false;
130 }
131
132 const uptr maxVirtualAddress = GetMaxUserVirtualAddress();
133
134 for (unsigned i = 0; i < kMemoryLayoutSize; ++i) {
135 uptr start = kMemoryLayout[i].start;
136 uptr end = kMemoryLayout[i].end;
137 uptr size = end - start;
138 MappingDesc::Type type = kMemoryLayout[i].type;
139
140 // Check if the segment should be mapped based on platform constraints.
141 if (start >= maxVirtualAddress)
142 continue;
143
144 bool map = type == MappingDesc::SHADOW ||
145 (init_origins && type == MappingDesc::ORIGIN);
146 bool protect = type == MappingDesc::INVALID ||
147 (!init_origins && type == MappingDesc::ORIGIN);
148 CHECK(!(map && protect));
149 if (!map && !protect) {
150 CHECK(type == MappingDesc::APP || type == MappingDesc::ALLOCATOR);
151
152 if (dry_run && type == MappingDesc::ALLOCATOR &&
153 !CheckMemoryRangeAvailability(beg: start, size, verbose: !dry_run))
154 return false;
155 }
156 if (map) {
157 if (dry_run && !CheckMemoryRangeAvailability(beg: start, size, verbose: !dry_run))
158 return false;
159 if (!dry_run &&
160 !MmapFixedSuperNoReserve(fixed_addr: start, size, name: kMemoryLayout[i].name))
161 return false;
162 if (!dry_run && common_flags()->use_madv_dontdump)
163 DontDumpShadowMemory(addr: start, length: size);
164 }
165 if (protect) {
166 if (dry_run && !CheckMemoryRangeAvailability(beg: start, size, verbose: !dry_run))
167 return false;
168 if (!dry_run && !ProtectMemoryRange(beg: start, size, name: kMemoryLayout[i].name))
169 return false;
170 }
171 }
172
173 return true;
174}
175
176bool InitShadowWithReExec(bool init_origins) {
177 // Start with dry run: check layout is ok, but don't print warnings because
178 // warning messages will cause tests to fail (even if we successfully re-exec
179 // after the warning).
180 bool success = InitShadow(init_origins, dry_run: true);
181 if (!success) {
182# if SANITIZER_LINUX
183 // Perhaps ASLR entropy is too high. If ASLR is enabled, re-exec without it.
184 int old_personality = personality(persona: 0xffffffff);
185 bool aslr_on =
186 (old_personality != -1) && ((old_personality & ADDR_NO_RANDOMIZE) == 0);
187
188 if (aslr_on) {
189 VReport(1,
190 "WARNING: MemorySanitizer: memory layout is incompatible, "
191 "possibly due to high-entropy ASLR.\n"
192 "Re-execing with fixed virtual address space.\n"
193 "N.B. reducing ASLR entropy is preferable.\n");
194
195 if (personality(persona: old_personality | ADDR_NO_RANDOMIZE) == -1) {
196 Printf(
197 format: "FATAL: MemorySanitizer: unable to disable ASLR (perhaps "
198 "sandboxing is enabled?).\n");
199 Printf(format: "FATAL: Please rerun without sandboxing and/or ASLR.\n");
200 Die();
201 }
202
203 ReExec();
204 }
205# endif
206 }
207
208 // The earlier dry run didn't actually map or protect anything. Run again in
209 // non-dry run mode.
210 return success && InitShadow(init_origins, dry_run: false);
211}
212
213static void MsanAtExit(void) {
214 if (flags()->print_stats && (flags()->atexit || msan_report_count > 0))
215 ReportStats();
216 if (msan_report_count > 0) {
217 ReportAtExitStatistics();
218 if (common_flags()->exitcode)
219 internal__exit(exitcode: common_flags()->exitcode);
220 }
221}
222
223void InstallAtExitHandler() {
224 atexit(func: MsanAtExit);
225}
226
227// ---------------------- TSD ---------------- {{{1
228
229#if SANITIZER_NETBSD
230// Thread Static Data cannot be used in early init on NetBSD.
231// Reuse the MSan TSD API for compatibility with existing code
232// with an alternative implementation.
233
234static void (*tsd_destructor)(void *tsd) = nullptr;
235
236struct tsd_key {
237 tsd_key() : key(nullptr) {}
238 ~tsd_key() {
239 CHECK(tsd_destructor);
240 if (key)
241 (*tsd_destructor)(key);
242 }
243 MsanThread *key;
244};
245
246static thread_local struct tsd_key key;
247
248void MsanTSDInit(void (*destructor)(void *tsd)) {
249 CHECK(!tsd_destructor);
250 tsd_destructor = destructor;
251}
252
253MsanThread *GetCurrentThread() {
254 CHECK(tsd_destructor);
255 return key.key;
256}
257
258void SetCurrentThread(MsanThread *tsd) {
259 CHECK(tsd_destructor);
260 CHECK(tsd);
261 CHECK(!key.key);
262 key.key = tsd;
263}
264
265void MsanTSDDtor(void *tsd) {
266 CHECK(tsd_destructor);
267 CHECK_EQ(key.key, tsd);
268 key.key = nullptr;
269 // Make sure that signal handler can not see a stale current thread pointer.
270 atomic_signal_fence(memory_order_seq_cst);
271 MsanThread::TSDDtor(tsd);
272}
273#else
274static pthread_key_t tsd_key;
275static bool tsd_key_inited = false;
276
277void MsanTSDInit(void (*destructor)(void *tsd)) {
278 CHECK(!tsd_key_inited);
279 tsd_key_inited = true;
280 CHECK_EQ(0, pthread_key_create(&tsd_key, destructor));
281}
282
283static THREADLOCAL MsanThread* msan_current_thread;
284
285MsanThread *GetCurrentThread() {
286 return msan_current_thread;
287}
288
289void SetCurrentThread(MsanThread *t) {
290 // Make sure we do not reset the current MsanThread.
291 CHECK_EQ(0, msan_current_thread);
292 msan_current_thread = t;
293 // Make sure that MsanTSDDtor gets called at the end.
294 CHECK(tsd_key_inited);
295 pthread_setspecific(key: tsd_key, pointer: (void *)t);
296}
297
298void MsanTSDDtor(void *tsd) {
299 MsanThread *t = (MsanThread*)tsd;
300 if (t->destructor_iterations_ > 1) {
301 t->destructor_iterations_--;
302 CHECK_EQ(0, pthread_setspecific(tsd_key, tsd));
303 return;
304 }
305 ScopedBlockSignals block(nullptr);
306 msan_current_thread = nullptr;
307 // Make sure that signal handler can not see a stale current thread pointer.
308 atomic_signal_fence(mo: memory_order_seq_cst);
309 MsanThread::TSDDtor(tsd);
310}
311# endif
312
313static void BeforeFork() {
314 VReport(2, "BeforeFork tid: %llu\n", GetTid());
315 // Usually we lock ThreadRegistry, but msan does not have one.
316 LockAllocator();
317 StackDepotLockBeforeFork();
318 ChainedOriginDepotBeforeFork();
319}
320
321static void AfterFork(bool fork_child) {
322 ChainedOriginDepotAfterFork(fork_child);
323 StackDepotUnlockAfterFork(fork_child);
324 UnlockAllocator();
325 // Usually we unlock ThreadRegistry, but msan does not have one.
326 VReport(2, "AfterFork tid: %llu\n", GetTid());
327}
328
329void InstallAtForkHandler() {
330 pthread_atfork(
331 prepare: &BeforeFork, parent: []() { AfterFork(/* fork_child= */ false); },
332 child: []() { AfterFork(/* fork_child= */ true); });
333}
334
335} // namespace __msan
336
337#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
338