1//===-- hwasan_linux.cpp ----------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file is a part of HWAddressSanitizer and contains Linux-, NetBSD- and
11/// FreeBSD-specific code.
12///
13//===----------------------------------------------------------------------===//
14
15#include "sanitizer_common/sanitizer_platform.h"
16#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
17
18# include <dlfcn.h>
19# include <elf.h>
20# include <errno.h>
21# include <link.h>
22# include <pthread.h>
23# include <signal.h>
24# include <stdio.h>
25# include <stdlib.h>
26# include <sys/prctl.h>
27# include <sys/resource.h>
28# include <sys/time.h>
29# include <unistd.h>
30# include <unwind.h>
31
32# include "hwasan.h"
33# include "hwasan_dynamic_shadow.h"
34# include "hwasan_interface_internal.h"
35# include "hwasan_mapping.h"
36# include "hwasan_report.h"
37# include "hwasan_thread.h"
38# include "hwasan_thread_list.h"
39# include "sanitizer_common/sanitizer_common.h"
40# include "sanitizer_common/sanitizer_procmaps.h"
41# include "sanitizer_common/sanitizer_stackdepot.h"
42
43// Configurations of HWASAN_WITH_INTERCEPTORS and SANITIZER_ANDROID.
44//
45// HWASAN_WITH_INTERCEPTORS=OFF, SANITIZER_ANDROID=OFF
46// Not currently tested.
47// HWASAN_WITH_INTERCEPTORS=OFF, SANITIZER_ANDROID=ON
48// Integration tests downstream exist.
49// HWASAN_WITH_INTERCEPTORS=ON, SANITIZER_ANDROID=OFF
50// Tested with check-hwasan on x86_64-linux.
51// HWASAN_WITH_INTERCEPTORS=ON, SANITIZER_ANDROID=ON
52// Tested with check-hwasan on aarch64-linux-android.
53# if !SANITIZER_ANDROID
54SANITIZER_INTERFACE_ATTRIBUTE
55THREADLOCAL uptr __hwasan_tls;
56# endif
57
58namespace __hwasan {
59
60// With the zero shadow base we can not actually map pages starting from 0.
61// This constant is somewhat arbitrary.
62constexpr uptr kZeroBaseShadowStart = 0;
63constexpr uptr kZeroBaseMaxShadowStart = 1 << 18;
64
65static void ProtectGap(uptr addr, uptr size) {
66 __sanitizer::ProtectGap(addr, size, zero_base_shadow_start: kZeroBaseShadowStart,
67 zero_base_max_shadow_start: kZeroBaseMaxShadowStart);
68}
69
70uptr kLowMemStart;
71uptr kLowMemEnd;
72uptr kHighMemStart;
73uptr kHighMemEnd;
74
75static void PrintRange(uptr start, uptr end, const char *name) {
76 Printf(format: "|| [%p, %p] || %.*s ||\n", (void *)start, (void *)end, 10, name);
77}
78
79static void PrintAddressSpaceLayout() {
80 PrintRange(start: kHighMemStart, end: kHighMemEnd, name: "HighMem");
81 if (kHighShadowEnd + 1 < kHighMemStart)
82 PrintRange(start: kHighShadowEnd + 1, end: kHighMemStart - 1, name: "ShadowGap");
83 else
84 CHECK_EQ(kHighShadowEnd + 1, kHighMemStart);
85 PrintRange(start: kHighShadowStart, end: kHighShadowEnd, name: "HighShadow");
86 if (kLowShadowEnd + 1 < kHighShadowStart)
87 PrintRange(start: kLowShadowEnd + 1, end: kHighShadowStart - 1, name: "ShadowGap");
88 else
89 CHECK_EQ(kLowMemEnd + 1, kHighShadowStart);
90 PrintRange(start: kLowShadowStart, end: kLowShadowEnd, name: "LowShadow");
91 if (kLowMemEnd + 1 < kLowShadowStart)
92 PrintRange(start: kLowMemEnd + 1, end: kLowShadowStart - 1, name: "ShadowGap");
93 else
94 CHECK_EQ(kLowMemEnd + 1, kLowShadowStart);
95 PrintRange(start: kLowMemStart, end: kLowMemEnd, name: "LowMem");
96 CHECK_EQ(0, kLowMemStart);
97}
98
99static uptr GetHighMemEnd() {
100 // HighMem covers the upper part of the address space.
101 uptr max_address = GetMaxUserVirtualAddress();
102 // Adjust max address to make sure that kHighMemEnd and kHighMemStart are
103 // properly aligned:
104 max_address |= (GetMmapGranularity() << kShadowScale) - 1;
105 return max_address;
106}
107
108static void InitializeShadowBaseAddress(uptr shadow_size_bytes) {
109 // FIXME: Android should init flags before shadow.
110 if (!SANITIZER_ANDROID && flags()->fixed_shadow_base != (uptr)-1) {
111 __hwasan_shadow_memory_dynamic_address = flags()->fixed_shadow_base;
112 uptr beg = __hwasan_shadow_memory_dynamic_address;
113 uptr end = beg + shadow_size_bytes;
114 if (!MemoryRangeIsAvailable(range_start: beg, range_end: end)) {
115 Report(
116 format: "FATAL: HWAddressSanitizer: Shadow range %p-%p is not available.\n",
117 (void *)beg, (void *)end);
118 DumpProcessMap();
119 CHECK(MemoryRangeIsAvailable(beg, end));
120 }
121 } else {
122 __hwasan_shadow_memory_dynamic_address =
123 FindDynamicShadowStart(shadow_size_bytes);
124 }
125}
126
127static void MaybeDieIfNoTaggingAbi(const char *message) {
128 if (!flags()->fail_without_syscall_abi)
129 return;
130 Printf(format: "FATAL: %s\n", message);
131 Die();
132}
133
134# define PR_SET_TAGGED_ADDR_CTRL 55
135# define PR_GET_TAGGED_ADDR_CTRL 56
136# define PR_TAGGED_ADDR_ENABLE (1UL << 0)
137# define PR_PMLEN_SHIFT 24
138# define ARCH_GET_UNTAG_MASK 0x4001
139# define ARCH_ENABLE_TAGGED_ADDR 0x4002
140# define ARCH_GET_MAX_TAG_BITS 0x4003
141
142static bool CanUseTaggingAbi() {
143# if defined(__x86_64__)
144 unsigned long num_bits = 0;
145 // Check for x86 LAM support. This API is based on a currently unsubmitted
146 // patch to the Linux kernel (as of August 2022) and is thus subject to
147 // change. The patch is here:
148 // https://lore.kernel.org/all/20220815041803.17954-1-kirill.shutemov@linux.intel.com/
149 //
150 // arch_prctl(ARCH_GET_MAX_TAG_BITS, &bits) returns the maximum number of tag
151 // bits the user can request, or zero if LAM is not supported by the hardware.
152 if (internal_iserror(retval: internal_arch_prctl(ARCH_GET_MAX_TAG_BITS,
153 arg2: reinterpret_cast<uptr>(&num_bits))))
154 return false;
155 // The platform must provide enough bits for HWASan tags.
156 if (num_bits < kTagBits)
157 return false;
158 return true;
159# else
160 // Check for ARM TBI support.
161 return !internal_iserror(internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0));
162# endif // __x86_64__
163}
164
165static bool EnableTaggingAbi() {
166# if defined(__x86_64__)
167 // Enable x86 LAM tagging for the process.
168 //
169 // arch_prctl(ARCH_ENABLE_TAGGED_ADDR, bits) enables tagging if the number of
170 // tag bits requested by the user does not exceed that provided by the system.
171 // arch_prctl(ARCH_GET_UNTAG_MASK, &mask) returns the mask of significant
172 // address bits. It is ~0ULL if either LAM is disabled for the process or LAM
173 // is not supported by the hardware.
174 if (internal_iserror(retval: internal_arch_prctl(ARCH_ENABLE_TAGGED_ADDR, arg2: kTagBits)))
175 return false;
176 unsigned long mask = 0;
177 // Make sure the tag bits are where we expect them to be.
178 if (internal_iserror(retval: internal_arch_prctl(ARCH_GET_UNTAG_MASK,
179 arg2: reinterpret_cast<uptr>(&mask))))
180 return false;
181 // @mask has ones for non-tag bits, whereas @kAddressTagMask has ones for tag
182 // bits. Therefore these masks must not overlap.
183 if (mask & kAddressTagMask)
184 return false;
185 return true;
186# elif defined(__aarch64__)
187 // Enable ARM TBI tagging for the process. If for some reason tagging is not
188 // supported, prctl(PR_SET_TAGGED_ADDR_CTRL, PR_TAGGED_ADDR_ENABLE) returns
189 // -EINVAL.
190 if (internal_iserror(internal_prctl(PR_SET_TAGGED_ADDR_CTRL,
191 PR_TAGGED_ADDR_ENABLE, 0, 0, 0)))
192 return false;
193 // Ensure that TBI is enabled.
194 if (internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0) !=
195 PR_TAGGED_ADDR_ENABLE)
196 return false;
197 return true;
198# elif SANITIZER_RISCV64
199 // Enable RISC-V address tagging via pointer masking.
200 uptr req = kTagBits << PR_PMLEN_SHIFT | PR_TAGGED_ADDR_ENABLE;
201 if (internal_iserror(internal_prctl(PR_SET_TAGGED_ADDR_CTRL, req, 0, 0, 0)))
202 return false;
203 uptr rsp = internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0);
204 if (internal_iserror(rsp))
205 return false;
206 return rsp & PR_TAGGED_ADDR_ENABLE;
207# else
208# error Architecture not supported
209# endif // __x86_64__
210}
211
212void InitializeOsSupport() {
213 // Check we're running on a kernel that can use the tagged address ABI.
214 bool has_abi = CanUseTaggingAbi();
215
216 if (!has_abi) {
217# if SANITIZER_ANDROID || defined(HWASAN_ALIASING_MODE)
218 // Some older Android kernels have the tagged pointer ABI on
219 // unconditionally, and hence don't have the tagged-addr prctl while still
220 // allow the ABI.
221 // If targeting Android and the prctl is not around we assume this is the
222 // case.
223 return;
224# else
225 MaybeDieIfNoTaggingAbi(
226 "HWAddressSanitizer requires a kernel with tagged address ABI.");
227# endif
228 }
229
230 if (EnableTaggingAbi())
231 return;
232
233# if SANITIZER_ANDROID
234 MaybeDieIfNoTaggingAbi(
235 "HWAddressSanitizer failed to enable tagged address syscall ABI.\n"
236 "Check the `sysctl abi.tagged_addr_disabled` configuration.");
237# else
238 MaybeDieIfNoTaggingAbi(
239 message: "HWAddressSanitizer failed to enable tagged address syscall ABI.\n");
240# endif
241}
242
243bool InitShadow() {
244 // Define the entire memory range.
245 kHighMemEnd = GetHighMemEnd();
246
247 // Determine shadow memory base offset.
248 InitializeShadowBaseAddress(shadow_size_bytes: MemToShadowSize(size: kHighMemEnd));
249
250 // Place the low memory first.
251 kLowMemEnd = __hwasan_shadow_memory_dynamic_address - 1;
252 kLowMemStart = 0;
253
254 // Define the low shadow based on the already placed low memory.
255 kLowShadowEnd = MemToShadow(untagged_addr: kLowMemEnd);
256 kLowShadowStart = __hwasan_shadow_memory_dynamic_address;
257
258 // High shadow takes whatever memory is left up there (making sure it is not
259 // interfering with low memory in the fixed case).
260 kHighShadowEnd = MemToShadow(untagged_addr: kHighMemEnd);
261 kHighShadowStart = Max(a: kLowMemEnd, b: MemToShadow(untagged_addr: kHighShadowEnd)) + 1;
262
263 // High memory starts where allocated shadow allows.
264 kHighMemStart = ShadowToMem(shadow_addr: kHighShadowStart);
265
266 // Check the sanity of the defined memory ranges (there might be gaps).
267 CHECK_EQ(kHighMemStart % GetMmapGranularity(), 0);
268 CHECK_GT(kHighMemStart, kHighShadowEnd);
269 CHECK_GT(kHighShadowEnd, kHighShadowStart);
270 CHECK_GT(kHighShadowStart, kLowMemEnd);
271 CHECK_GT(kLowMemEnd, kLowMemStart);
272 CHECK_GT(kLowShadowEnd, kLowShadowStart);
273 CHECK_GT(kLowShadowStart, kLowMemEnd);
274
275 // Reserve shadow memory.
276 ReserveShadowMemoryRange(beg: kLowShadowStart, end: kLowShadowEnd, name: "low shadow");
277 ReserveShadowMemoryRange(beg: kHighShadowStart, end: kHighShadowEnd, name: "high shadow");
278
279 // Protect all the gaps.
280 ProtectGap(addr: 0, size: Min(a: kLowMemStart, b: kLowShadowStart));
281 if (kLowMemEnd + 1 < kLowShadowStart)
282 ProtectGap(addr: kLowMemEnd + 1, size: kLowShadowStart - kLowMemEnd - 1);
283 if (kLowShadowEnd + 1 < kHighShadowStart)
284 ProtectGap(addr: kLowShadowEnd + 1, size: kHighShadowStart - kLowShadowEnd - 1);
285 if (kHighShadowEnd + 1 < kHighMemStart)
286 ProtectGap(addr: kHighShadowEnd + 1, size: kHighMemStart - kHighShadowEnd - 1);
287
288 if (Verbosity())
289 PrintAddressSpaceLayout();
290
291 return true;
292}
293
294void InitThreads() {
295 CHECK(__hwasan_shadow_memory_dynamic_address);
296 uptr guard_page_size = GetMmapGranularity();
297 uptr thread_space_start =
298 __hwasan_shadow_memory_dynamic_address - (1ULL << kShadowBaseAlignment);
299 uptr thread_space_end =
300 __hwasan_shadow_memory_dynamic_address - guard_page_size;
301 ReserveShadowMemoryRange(beg: thread_space_start, end: thread_space_end - 1,
302 name: "hwasan threads", /*madvise_shadow*/ false);
303 ProtectGap(addr: thread_space_end,
304 size: __hwasan_shadow_memory_dynamic_address - thread_space_end);
305 InitThreadList(storage: thread_space_start, size: thread_space_end - thread_space_start);
306 hwasanThreadList().CreateCurrentThread();
307}
308
309bool MemIsApp(uptr p) {
310// Memory outside the alias range has non-zero tags.
311# if !defined(HWASAN_ALIASING_MODE)
312 CHECK_EQ(GetTagFromPointer(p), 0);
313# endif
314
315 return (p >= kHighMemStart && p <= kHighMemEnd) ||
316 (p >= kLowMemStart && p <= kLowMemEnd);
317}
318
319void InstallAtExitHandler() { atexit(func: HwasanAtExit); }
320
321// ---------------------- TSD ---------------- {{{1
322
323# if HWASAN_WITH_INTERCEPTORS
324static pthread_key_t tsd_key;
325static bool tsd_key_inited = false;
326
327void HwasanTSDThreadInit() {
328 if (tsd_key_inited)
329 CHECK_EQ(0, pthread_setspecific(tsd_key,
330 (void *)GetPthreadDestructorIterations()));
331}
332
333void HwasanTSDDtor(void *tsd) {
334 uptr iterations = (uptr)tsd;
335 if (iterations > 1) {
336 CHECK_EQ(0, pthread_setspecific(tsd_key, (void *)(iterations - 1)));
337 return;
338 }
339 __hwasan_thread_exit();
340}
341
342void HwasanTSDInit() {
343 CHECK(!tsd_key_inited);
344 tsd_key_inited = true;
345 CHECK_EQ(0, pthread_key_create(&tsd_key, HwasanTSDDtor));
346}
347# else
348void HwasanTSDInit() {}
349void HwasanTSDThreadInit() {}
350# endif
351
352# if SANITIZER_ANDROID
353uptr *GetCurrentThreadLongPtr() { return (uptr *)get_android_tls_ptr(); }
354# else
355uptr *GetCurrentThreadLongPtr() { return &__hwasan_tls; }
356# endif
357
358# if SANITIZER_ANDROID
359void AndroidTestTlsSlot() {
360 uptr kMagicValue = 0x010203040A0B0C0D;
361 uptr *tls_ptr = GetCurrentThreadLongPtr();
362 uptr old_value = *tls_ptr;
363 *tls_ptr = kMagicValue;
364 dlerror();
365 if (*(uptr *)get_android_tls_ptr() != kMagicValue) {
366 Printf(
367 "ERROR: Incompatible version of Android: TLS_SLOT_SANITIZER(6) is used "
368 "for dlerror().\n");
369 Die();
370 }
371 *tls_ptr = old_value;
372}
373# else
374void AndroidTestTlsSlot() {}
375# endif
376
377static AccessInfo GetAccessInfo(siginfo_t *info, ucontext_t *uc) {
378 // Access type is passed in a platform dependent way (see below) and encoded
379 // as 0xXY, where X&1 is 1 for store, 0 for load, and X&2 is 1 if the error is
380 // recoverable. Valid values of Y are 0 to 4, which are interpreted as
381 // log2(access_size), and 0xF, which means that access size is passed via
382 // platform dependent register (see below).
383# if defined(__aarch64__)
384 // Access type is encoded in BRK immediate as 0x900 + 0xXY. For Y == 0xF,
385 // access size is stored in X1 register. Access address is always in X0
386 // register.
387 uptr pc = (uptr)info->si_addr;
388 const unsigned code = ((*(u32 *)pc) >> 5) & 0xffff;
389 if ((code & 0xff00) != 0x900)
390 return AccessInfo{}; // Not ours.
391
392 const bool is_store = code & 0x10;
393 const bool recover = code & 0x20;
394 const uptr addr = uc->uc_mcontext.regs[0];
395 const unsigned size_log = code & 0xf;
396 if (size_log > 4 && size_log != 0xf)
397 return AccessInfo{}; // Not ours.
398 const uptr size = size_log == 0xf ? uc->uc_mcontext.regs[1] : 1U << size_log;
399
400# elif defined(__x86_64__)
401 // Access type is encoded in the instruction following INT3 as
402 // NOP DWORD ptr [EAX + 0x40 + 0xXY]. For Y == 0xF, access size is stored in
403 // RSI register. Access address is always in RDI register.
404 uptr pc = (uptr)uc->uc_mcontext.gregs[REG_RIP];
405 uint8_t *nop = (uint8_t *)pc;
406 if (*nop != 0x0f || *(nop + 1) != 0x1f || *(nop + 2) != 0x40 ||
407 *(nop + 3) < 0x40)
408 return AccessInfo{}; // Not ours.
409 const unsigned code = *(nop + 3);
410
411 const bool is_store = code & 0x10;
412 const bool recover = code & 0x20;
413 const uptr addr = uc->uc_mcontext.gregs[REG_RDI];
414 const unsigned size_log = code & 0xf;
415 if (size_log > 4 && size_log != 0xf)
416 return AccessInfo{}; // Not ours.
417 const uptr size =
418 size_log == 0xf ? uc->uc_mcontext.gregs[REG_RSI] : 1U << size_log;
419
420# elif SANITIZER_RISCV64
421 // Access type is encoded in the instruction following EBREAK as
422 // ADDI x0, x0, [0x40 + 0xXY]. For Y == 0xF, access size is stored in
423 // X11 register. Access address is always in X10 register.
424 uptr pc = (uptr)uc->uc_mcontext.__gregs[REG_PC];
425 uint8_t byte1 = *((u8 *)(pc + 0));
426 uint8_t byte2 = *((u8 *)(pc + 1));
427 uint8_t byte3 = *((u8 *)(pc + 2));
428 uint8_t byte4 = *((u8 *)(pc + 3));
429 uint32_t ebreak = (byte1 | (byte2 << 8) | (byte3 << 16) | (byte4 << 24));
430 bool isFaultShort = false;
431 bool isEbreak = (ebreak == 0x100073);
432 bool isShortEbreak = false;
433# if defined(__riscv_compressed)
434 isFaultShort = ((ebreak & 0x3) != 0x3);
435 isShortEbreak = ((ebreak & 0xffff) == 0x9002);
436# endif
437 // faulted insn is not ebreak, not our case
438 if (!(isEbreak || isShortEbreak))
439 return AccessInfo{};
440 // advance pc to point after ebreak and reconstruct addi instruction
441 pc += isFaultShort ? 2 : 4;
442 byte1 = *((u8 *)(pc + 0));
443 byte2 = *((u8 *)(pc + 1));
444 byte3 = *((u8 *)(pc + 2));
445 byte4 = *((u8 *)(pc + 3));
446 // reconstruct instruction
447 uint32_t instr = (byte1 | (byte2 << 8) | (byte3 << 16) | (byte4 << 24));
448 // check if this is really 32 bit instruction
449 // code is encoded in top 12 bits, since instruction is supposed to be with
450 // imm
451 const unsigned code = (instr >> 20) & 0xffff;
452 const uptr addr = uc->uc_mcontext.__gregs[10];
453 const bool is_store = code & 0x10;
454 const bool recover = code & 0x20;
455 const unsigned size_log = code & 0xf;
456 if (size_log > 4 && size_log != 0xf)
457 return AccessInfo{}; // Not our case
458 const uptr size =
459 size_log == 0xf ? uc->uc_mcontext.__gregs[11] : 1U << size_log;
460
461# else
462# error Unsupported architecture
463# endif
464
465 return AccessInfo{.addr: addr, .size: size, .is_store: is_store, .is_load: !is_store, .recover: recover};
466}
467
468static bool HwasanOnSIGTRAP(int signo, siginfo_t *info, ucontext_t *uc) {
469 AccessInfo ai = GetAccessInfo(info, uc);
470 if (!ai.is_store && !ai.is_load)
471 return false;
472
473 SignalContext sig{info, uc};
474 HandleTagMismatch(ai, pc: StackTrace::GetNextInstructionPc(pc: sig.pc), frame: sig.bp, uc);
475
476# if defined(__aarch64__)
477 uc->uc_mcontext.pc += 4;
478# elif defined(__x86_64__)
479# elif SANITIZER_RISCV64
480 // pc points to EBREAK which is 2 bytes long
481 uint8_t *exception_source = (uint8_t *)(uc->uc_mcontext.__gregs[REG_PC]);
482 uint8_t byte1 = (uint8_t)(*(exception_source + 0));
483 uint8_t byte2 = (uint8_t)(*(exception_source + 1));
484 uint8_t byte3 = (uint8_t)(*(exception_source + 2));
485 uint8_t byte4 = (uint8_t)(*(exception_source + 3));
486 uint32_t faulted = (byte1 | (byte2 << 8) | (byte3 << 16) | (byte4 << 24));
487 bool isFaultShort = false;
488# if defined(__riscv_compressed)
489 isFaultShort = ((faulted & 0x3) != 0x3);
490# endif
491 uc->uc_mcontext.__gregs[REG_PC] += isFaultShort ? 2 : 4;
492# else
493# error Unsupported architecture
494# endif
495 return true;
496}
497
498static void OnStackUnwind(const SignalContext &sig, const void *,
499 BufferedStackTrace *stack) {
500 stack->Unwind(pc: StackTrace::GetNextInstructionPc(pc: sig.pc), bp: sig.bp, context: sig.context,
501 request_fast: common_flags()->fast_unwind_on_fatal);
502}
503
504void HwasanOnDeadlySignal(int signo, void *info, void *context) {
505 // Probably a tag mismatch.
506 if (signo == SIGTRAP)
507 if (HwasanOnSIGTRAP(signo, info: (siginfo_t *)info, uc: (ucontext_t *)context))
508 return;
509
510 HandleDeadlySignal(siginfo: info, context, tid: GetTid(), unwind: &OnStackUnwind, unwind_context: nullptr);
511}
512
513void Thread::InitStackAndTls(const InitState *) {
514 GetThreadStackAndTls(main: IsMainThread(), stk_begin: &stack_bottom_, stk_end: &stack_top_, tls_begin: &tls_begin_,
515 tls_end: &tls_end_);
516}
517
518uptr TagMemoryAligned(uptr p, uptr size, tag_t tag) {
519 CHECK(IsAligned(p, kShadowAlignment));
520 CHECK(IsAligned(size, kShadowAlignment));
521 uptr shadow_start = MemToShadow(untagged_addr: p);
522 uptr shadow_size = MemToShadowSize(size);
523
524 uptr page_size = GetPageSizeCached();
525 uptr page_start = RoundUpTo(size: shadow_start, boundary: page_size);
526 uptr page_end = RoundDownTo(x: shadow_start + shadow_size, boundary: page_size);
527 uptr threshold = common_flags()->clear_shadow_mmap_threshold;
528 if (SANITIZER_LINUX &&
529 UNLIKELY(page_end >= page_start + threshold && tag == 0)) {
530 internal_memset(s: (void *)shadow_start, c: tag, n: page_start - shadow_start);
531 internal_memset(s: (void *)page_end, c: tag,
532 n: shadow_start + shadow_size - page_end);
533 // For an anonymous private mapping MADV_DONTNEED will return a zero page on
534 // Linux.
535 ReleaseMemoryPagesToOSAndZeroFill(beg: page_start, end: page_end);
536 } else {
537 internal_memset(s: (void *)shadow_start, c: tag, n: shadow_size);
538 }
539 return AddTagToPointer(p, tag);
540}
541
542static void BeforeFork() {
543 VReport(2, "BeforeFork tid: %llu\n", GetTid());
544 if (CAN_SANITIZE_LEAKS) {
545 __lsan::LockGlobal();
546 }
547 // `_lsan` functions defined regardless of `CAN_SANITIZE_LEAKS` and lock the
548 // stuff we need.
549 __lsan::LockThreads();
550 __lsan::LockAllocator();
551 StackDepotLockBeforeFork();
552}
553
554static void AfterFork(bool fork_child) {
555 StackDepotUnlockAfterFork(fork_child);
556 // `_lsan` functions defined regardless of `CAN_SANITIZE_LEAKS` and unlock
557 // the stuff we need.
558 __lsan::UnlockAllocator();
559 __lsan::UnlockThreads();
560 if (CAN_SANITIZE_LEAKS) {
561 __lsan::UnlockGlobal();
562 }
563 VReport(2, "AfterFork tid: %llu\n", GetTid());
564}
565
566void HwasanInstallAtForkHandler() {
567 pthread_atfork(
568 prepare: &BeforeFork, parent: []() { AfterFork(/* fork_child= */ false); },
569 child: []() { AfterFork(/* fork_child= */ true); });
570}
571
572void InstallAtExitCheckLeaks() {
573 if (CAN_SANITIZE_LEAKS) {
574 if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) {
575 if (flags()->halt_on_error)
576 Atexit(function: __lsan::DoLeakCheck);
577 else
578 Atexit(function: __lsan::DoRecoverableLeakCheckVoid);
579 }
580 }
581}
582
583} // namespace __hwasan
584
585using namespace __hwasan;
586
587extern "C" void __hwasan_thread_enter() {
588 hwasanThreadList().CreateCurrentThread()->EnsureRandomStateInited();
589}
590
591extern "C" void __hwasan_thread_exit() {
592 Thread *t = GetCurrentThread();
593 // Make sure that signal handler can not see a stale current thread pointer.
594 atomic_signal_fence(mo: memory_order_seq_cst);
595 if (t) {
596 // Block async signals on the thread as the handler can be instrumented.
597 // After this point instrumented code can't access essential data from TLS
598 // and will crash.
599 // Bionic already calls __hwasan_thread_exit with blocked signals.
600 if (SANITIZER_GLIBC)
601 BlockSignals();
602 hwasanThreadList().ReleaseThread(t);
603 }
604}
605
606#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
607