| 1 | //===-- hwasan_linux.cpp ----------------------------------------*- C++ -*-===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | /// |
| 9 | /// \file |
| 10 | /// This file is a part of HWAddressSanitizer and contains Linux-, NetBSD- and |
| 11 | /// FreeBSD-specific code. |
| 12 | /// |
| 13 | //===----------------------------------------------------------------------===// |
| 14 | |
| 15 | #include "sanitizer_common/sanitizer_platform.h" |
| 16 | #if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD |
| 17 | |
| 18 | # include <dlfcn.h> |
| 19 | # include <elf.h> |
| 20 | # include <errno.h> |
| 21 | # include <link.h> |
| 22 | # include <pthread.h> |
| 23 | # include <signal.h> |
| 24 | # include <stdio.h> |
| 25 | # include <stdlib.h> |
| 26 | # include <sys/prctl.h> |
| 27 | # include <sys/resource.h> |
| 28 | # include <sys/time.h> |
| 29 | # include <unistd.h> |
| 30 | # include <unwind.h> |
| 31 | |
| 32 | # include "hwasan.h" |
| 33 | # include "hwasan_dynamic_shadow.h" |
| 34 | # include "hwasan_interface_internal.h" |
| 35 | # include "hwasan_mapping.h" |
| 36 | # include "hwasan_report.h" |
| 37 | # include "hwasan_thread.h" |
| 38 | # include "hwasan_thread_list.h" |
| 39 | # include "sanitizer_common/sanitizer_common.h" |
| 40 | # include "sanitizer_common/sanitizer_procmaps.h" |
| 41 | # include "sanitizer_common/sanitizer_stackdepot.h" |
| 42 | |
| 43 | // Configurations of HWASAN_WITH_INTERCEPTORS and SANITIZER_ANDROID. |
| 44 | // |
| 45 | // HWASAN_WITH_INTERCEPTORS=OFF, SANITIZER_ANDROID=OFF |
| 46 | // Not currently tested. |
| 47 | // HWASAN_WITH_INTERCEPTORS=OFF, SANITIZER_ANDROID=ON |
| 48 | // Integration tests downstream exist. |
| 49 | // HWASAN_WITH_INTERCEPTORS=ON, SANITIZER_ANDROID=OFF |
| 50 | // Tested with check-hwasan on x86_64-linux. |
| 51 | // HWASAN_WITH_INTERCEPTORS=ON, SANITIZER_ANDROID=ON |
| 52 | // Tested with check-hwasan on aarch64-linux-android. |
| 53 | # if !SANITIZER_ANDROID |
| 54 | SANITIZER_INTERFACE_ATTRIBUTE |
| 55 | THREADLOCAL uptr __hwasan_tls; |
| 56 | # endif |
| 57 | |
| 58 | namespace __hwasan { |
| 59 | |
| 60 | // With the zero shadow base we can not actually map pages starting from 0. |
| 61 | // This constant is somewhat arbitrary. |
| 62 | constexpr uptr kZeroBaseShadowStart = 0; |
| 63 | constexpr uptr kZeroBaseMaxShadowStart = 1 << 18; |
| 64 | |
| 65 | static void ProtectGap(uptr addr, uptr size) { |
| 66 | __sanitizer::ProtectGap(addr, size, zero_base_shadow_start: kZeroBaseShadowStart, |
| 67 | zero_base_max_shadow_start: kZeroBaseMaxShadowStart); |
| 68 | } |
| 69 | |
| 70 | uptr kLowMemStart; |
| 71 | uptr kLowMemEnd; |
| 72 | uptr kHighMemStart; |
| 73 | uptr kHighMemEnd; |
| 74 | |
| 75 | static void PrintRange(uptr start, uptr end, const char *name) { |
| 76 | Printf(format: "|| [%p, %p] || %.*s ||\n" , (void *)start, (void *)end, 10, name); |
| 77 | } |
| 78 | |
| 79 | static void PrintAddressSpaceLayout() { |
| 80 | PrintRange(start: kHighMemStart, end: kHighMemEnd, name: "HighMem" ); |
| 81 | if (kHighShadowEnd + 1 < kHighMemStart) |
| 82 | PrintRange(start: kHighShadowEnd + 1, end: kHighMemStart - 1, name: "ShadowGap" ); |
| 83 | else |
| 84 | CHECK_EQ(kHighShadowEnd + 1, kHighMemStart); |
| 85 | PrintRange(start: kHighShadowStart, end: kHighShadowEnd, name: "HighShadow" ); |
| 86 | if (kLowShadowEnd + 1 < kHighShadowStart) |
| 87 | PrintRange(start: kLowShadowEnd + 1, end: kHighShadowStart - 1, name: "ShadowGap" ); |
| 88 | else |
| 89 | CHECK_EQ(kLowMemEnd + 1, kHighShadowStart); |
| 90 | PrintRange(start: kLowShadowStart, end: kLowShadowEnd, name: "LowShadow" ); |
| 91 | if (kLowMemEnd + 1 < kLowShadowStart) |
| 92 | PrintRange(start: kLowMemEnd + 1, end: kLowShadowStart - 1, name: "ShadowGap" ); |
| 93 | else |
| 94 | CHECK_EQ(kLowMemEnd + 1, kLowShadowStart); |
| 95 | PrintRange(start: kLowMemStart, end: kLowMemEnd, name: "LowMem" ); |
| 96 | CHECK_EQ(0, kLowMemStart); |
| 97 | } |
| 98 | |
| 99 | static uptr GetHighMemEnd() { |
| 100 | // HighMem covers the upper part of the address space. |
| 101 | uptr max_address = GetMaxUserVirtualAddress(); |
| 102 | // Adjust max address to make sure that kHighMemEnd and kHighMemStart are |
| 103 | // properly aligned: |
| 104 | max_address |= (GetMmapGranularity() << kShadowScale) - 1; |
| 105 | return max_address; |
| 106 | } |
| 107 | |
| 108 | static void InitializeShadowBaseAddress(uptr shadow_size_bytes) { |
| 109 | // FIXME: Android should init flags before shadow. |
| 110 | if (!SANITIZER_ANDROID && flags()->fixed_shadow_base != (uptr)-1) { |
| 111 | __hwasan_shadow_memory_dynamic_address = flags()->fixed_shadow_base; |
| 112 | uptr beg = __hwasan_shadow_memory_dynamic_address; |
| 113 | uptr end = beg + shadow_size_bytes; |
| 114 | if (!MemoryRangeIsAvailable(range_start: beg, range_end: end)) { |
| 115 | Report( |
| 116 | format: "FATAL: HWAddressSanitizer: Shadow range %p-%p is not available.\n" , |
| 117 | (void *)beg, (void *)end); |
| 118 | DumpProcessMap(); |
| 119 | CHECK(MemoryRangeIsAvailable(beg, end)); |
| 120 | } |
| 121 | } else { |
| 122 | __hwasan_shadow_memory_dynamic_address = |
| 123 | FindDynamicShadowStart(shadow_size_bytes); |
| 124 | } |
| 125 | } |
| 126 | |
| 127 | static void MaybeDieIfNoTaggingAbi(const char *message) { |
| 128 | if (!flags()->fail_without_syscall_abi) |
| 129 | return; |
| 130 | Printf(format: "FATAL: %s\n" , message); |
| 131 | Die(); |
| 132 | } |
| 133 | |
| 134 | # define PR_SET_TAGGED_ADDR_CTRL 55 |
| 135 | # define PR_GET_TAGGED_ADDR_CTRL 56 |
| 136 | # define PR_TAGGED_ADDR_ENABLE (1UL << 0) |
| 137 | # define ARCH_GET_UNTAG_MASK 0x4001 |
| 138 | # define ARCH_ENABLE_TAGGED_ADDR 0x4002 |
| 139 | # define ARCH_GET_MAX_TAG_BITS 0x4003 |
| 140 | |
| 141 | static bool CanUseTaggingAbi() { |
| 142 | # if defined(__x86_64__) |
| 143 | unsigned long num_bits = 0; |
| 144 | // Check for x86 LAM support. This API is based on a currently unsubmitted |
| 145 | // patch to the Linux kernel (as of August 2022) and is thus subject to |
| 146 | // change. The patch is here: |
| 147 | // https://lore.kernel.org/all/20220815041803.17954-1-kirill.shutemov@linux.intel.com/ |
| 148 | // |
| 149 | // arch_prctl(ARCH_GET_MAX_TAG_BITS, &bits) returns the maximum number of tag |
| 150 | // bits the user can request, or zero if LAM is not supported by the hardware. |
| 151 | if (internal_iserror(retval: internal_arch_prctl(ARCH_GET_MAX_TAG_BITS, |
| 152 | arg2: reinterpret_cast<uptr>(&num_bits)))) |
| 153 | return false; |
| 154 | // The platform must provide enough bits for HWASan tags. |
| 155 | if (num_bits < kTagBits) |
| 156 | return false; |
| 157 | return true; |
| 158 | # else |
| 159 | // Check for ARM TBI support. |
| 160 | return !internal_iserror(internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0)); |
| 161 | # endif // __x86_64__ |
| 162 | } |
| 163 | |
| 164 | static bool EnableTaggingAbi() { |
| 165 | # if defined(__x86_64__) |
| 166 | // Enable x86 LAM tagging for the process. |
| 167 | // |
| 168 | // arch_prctl(ARCH_ENABLE_TAGGED_ADDR, bits) enables tagging if the number of |
| 169 | // tag bits requested by the user does not exceed that provided by the system. |
| 170 | // arch_prctl(ARCH_GET_UNTAG_MASK, &mask) returns the mask of significant |
| 171 | // address bits. It is ~0ULL if either LAM is disabled for the process or LAM |
| 172 | // is not supported by the hardware. |
| 173 | if (internal_iserror(retval: internal_arch_prctl(ARCH_ENABLE_TAGGED_ADDR, arg2: kTagBits))) |
| 174 | return false; |
| 175 | unsigned long mask = 0; |
| 176 | // Make sure the tag bits are where we expect them to be. |
| 177 | if (internal_iserror(retval: internal_arch_prctl(ARCH_GET_UNTAG_MASK, |
| 178 | arg2: reinterpret_cast<uptr>(&mask)))) |
| 179 | return false; |
| 180 | // @mask has ones for non-tag bits, whereas @kAddressTagMask has ones for tag |
| 181 | // bits. Therefore these masks must not overlap. |
| 182 | if (mask & kAddressTagMask) |
| 183 | return false; |
| 184 | return true; |
| 185 | # else |
| 186 | // Enable ARM TBI tagging for the process. If for some reason tagging is not |
| 187 | // supported, prctl(PR_SET_TAGGED_ADDR_CTRL, PR_TAGGED_ADDR_ENABLE) returns |
| 188 | // -EINVAL. |
| 189 | if (internal_iserror(internal_prctl(PR_SET_TAGGED_ADDR_CTRL, |
| 190 | PR_TAGGED_ADDR_ENABLE, 0, 0, 0))) |
| 191 | return false; |
| 192 | // Ensure that TBI is enabled. |
| 193 | if (internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0) != |
| 194 | PR_TAGGED_ADDR_ENABLE) |
| 195 | return false; |
| 196 | return true; |
| 197 | # endif // __x86_64__ |
| 198 | } |
| 199 | |
| 200 | void InitializeOsSupport() { |
| 201 | // Check we're running on a kernel that can use the tagged address ABI. |
| 202 | bool has_abi = CanUseTaggingAbi(); |
| 203 | |
| 204 | if (!has_abi) { |
| 205 | # if SANITIZER_ANDROID || defined(HWASAN_ALIASING_MODE) |
| 206 | // Some older Android kernels have the tagged pointer ABI on |
| 207 | // unconditionally, and hence don't have the tagged-addr prctl while still |
| 208 | // allow the ABI. |
| 209 | // If targeting Android and the prctl is not around we assume this is the |
| 210 | // case. |
| 211 | return; |
| 212 | # else |
| 213 | MaybeDieIfNoTaggingAbi( |
| 214 | message: "HWAddressSanitizer requires a kernel with tagged address ABI." ); |
| 215 | # endif |
| 216 | } |
| 217 | |
| 218 | if (EnableTaggingAbi()) |
| 219 | return; |
| 220 | |
| 221 | # if SANITIZER_ANDROID |
| 222 | MaybeDieIfNoTaggingAbi( |
| 223 | "HWAddressSanitizer failed to enable tagged address syscall ABI.\n" |
| 224 | "Check the `sysctl abi.tagged_addr_disabled` configuration." ); |
| 225 | # else |
| 226 | MaybeDieIfNoTaggingAbi( |
| 227 | message: "HWAddressSanitizer failed to enable tagged address syscall ABI.\n" ); |
| 228 | # endif |
| 229 | } |
| 230 | |
| 231 | bool InitShadow() { |
| 232 | // Define the entire memory range. |
| 233 | kHighMemEnd = GetHighMemEnd(); |
| 234 | |
| 235 | // Determine shadow memory base offset. |
| 236 | InitializeShadowBaseAddress(shadow_size_bytes: MemToShadowSize(size: kHighMemEnd)); |
| 237 | |
| 238 | // Place the low memory first. |
| 239 | kLowMemEnd = __hwasan_shadow_memory_dynamic_address - 1; |
| 240 | kLowMemStart = 0; |
| 241 | |
| 242 | // Define the low shadow based on the already placed low memory. |
| 243 | kLowShadowEnd = MemToShadow(untagged_addr: kLowMemEnd); |
| 244 | kLowShadowStart = __hwasan_shadow_memory_dynamic_address; |
| 245 | |
| 246 | // High shadow takes whatever memory is left up there (making sure it is not |
| 247 | // interfering with low memory in the fixed case). |
| 248 | kHighShadowEnd = MemToShadow(untagged_addr: kHighMemEnd); |
| 249 | kHighShadowStart = Max(a: kLowMemEnd, b: MemToShadow(untagged_addr: kHighShadowEnd)) + 1; |
| 250 | |
| 251 | // High memory starts where allocated shadow allows. |
| 252 | kHighMemStart = ShadowToMem(shadow_addr: kHighShadowStart); |
| 253 | |
| 254 | // Check the sanity of the defined memory ranges (there might be gaps). |
| 255 | CHECK_EQ(kHighMemStart % GetMmapGranularity(), 0); |
| 256 | CHECK_GT(kHighMemStart, kHighShadowEnd); |
| 257 | CHECK_GT(kHighShadowEnd, kHighShadowStart); |
| 258 | CHECK_GT(kHighShadowStart, kLowMemEnd); |
| 259 | CHECK_GT(kLowMemEnd, kLowMemStart); |
| 260 | CHECK_GT(kLowShadowEnd, kLowShadowStart); |
| 261 | CHECK_GT(kLowShadowStart, kLowMemEnd); |
| 262 | |
| 263 | // Reserve shadow memory. |
| 264 | ReserveShadowMemoryRange(beg: kLowShadowStart, end: kLowShadowEnd, name: "low shadow" ); |
| 265 | ReserveShadowMemoryRange(beg: kHighShadowStart, end: kHighShadowEnd, name: "high shadow" ); |
| 266 | |
| 267 | // Protect all the gaps. |
| 268 | ProtectGap(addr: 0, size: Min(a: kLowMemStart, b: kLowShadowStart)); |
| 269 | if (kLowMemEnd + 1 < kLowShadowStart) |
| 270 | ProtectGap(addr: kLowMemEnd + 1, size: kLowShadowStart - kLowMemEnd - 1); |
| 271 | if (kLowShadowEnd + 1 < kHighShadowStart) |
| 272 | ProtectGap(addr: kLowShadowEnd + 1, size: kHighShadowStart - kLowShadowEnd - 1); |
| 273 | if (kHighShadowEnd + 1 < kHighMemStart) |
| 274 | ProtectGap(addr: kHighShadowEnd + 1, size: kHighMemStart - kHighShadowEnd - 1); |
| 275 | |
| 276 | if (Verbosity()) |
| 277 | PrintAddressSpaceLayout(); |
| 278 | |
| 279 | return true; |
| 280 | } |
| 281 | |
| 282 | void InitThreads() { |
| 283 | CHECK(__hwasan_shadow_memory_dynamic_address); |
| 284 | uptr guard_page_size = GetMmapGranularity(); |
| 285 | uptr thread_space_start = |
| 286 | __hwasan_shadow_memory_dynamic_address - (1ULL << kShadowBaseAlignment); |
| 287 | uptr thread_space_end = |
| 288 | __hwasan_shadow_memory_dynamic_address - guard_page_size; |
| 289 | ReserveShadowMemoryRange(beg: thread_space_start, end: thread_space_end - 1, |
| 290 | name: "hwasan threads" , /*madvise_shadow*/ false); |
| 291 | ProtectGap(addr: thread_space_end, |
| 292 | size: __hwasan_shadow_memory_dynamic_address - thread_space_end); |
| 293 | InitThreadList(storage: thread_space_start, size: thread_space_end - thread_space_start); |
| 294 | hwasanThreadList().CreateCurrentThread(); |
| 295 | } |
| 296 | |
| 297 | bool MemIsApp(uptr p) { |
| 298 | // Memory outside the alias range has non-zero tags. |
| 299 | # if !defined(HWASAN_ALIASING_MODE) |
| 300 | CHECK_EQ(GetTagFromPointer(p), 0); |
| 301 | # endif |
| 302 | |
| 303 | return (p >= kHighMemStart && p <= kHighMemEnd) || |
| 304 | (p >= kLowMemStart && p <= kLowMemEnd); |
| 305 | } |
| 306 | |
| 307 | void InstallAtExitHandler() { atexit(func: HwasanAtExit); } |
| 308 | |
| 309 | // ---------------------- TSD ---------------- {{{1 |
| 310 | |
| 311 | # if HWASAN_WITH_INTERCEPTORS |
| 312 | static pthread_key_t tsd_key; |
| 313 | static bool tsd_key_inited = false; |
| 314 | |
| 315 | void HwasanTSDThreadInit() { |
| 316 | if (tsd_key_inited) |
| 317 | CHECK_EQ(0, pthread_setspecific(tsd_key, |
| 318 | (void *)GetPthreadDestructorIterations())); |
| 319 | } |
| 320 | |
| 321 | void HwasanTSDDtor(void *tsd) { |
| 322 | uptr iterations = (uptr)tsd; |
| 323 | if (iterations > 1) { |
| 324 | CHECK_EQ(0, pthread_setspecific(tsd_key, (void *)(iterations - 1))); |
| 325 | return; |
| 326 | } |
| 327 | __hwasan_thread_exit(); |
| 328 | } |
| 329 | |
| 330 | void HwasanTSDInit() { |
| 331 | CHECK(!tsd_key_inited); |
| 332 | tsd_key_inited = true; |
| 333 | CHECK_EQ(0, pthread_key_create(&tsd_key, HwasanTSDDtor)); |
| 334 | } |
| 335 | # else |
| 336 | void HwasanTSDInit() {} |
| 337 | void HwasanTSDThreadInit() {} |
| 338 | # endif |
| 339 | |
| 340 | # if SANITIZER_ANDROID |
| 341 | uptr *GetCurrentThreadLongPtr() { return (uptr *)get_android_tls_ptr(); } |
| 342 | # else |
| 343 | uptr *GetCurrentThreadLongPtr() { return &__hwasan_tls; } |
| 344 | # endif |
| 345 | |
| 346 | # if SANITIZER_ANDROID |
| 347 | void AndroidTestTlsSlot() { |
| 348 | uptr kMagicValue = 0x010203040A0B0C0D; |
| 349 | uptr *tls_ptr = GetCurrentThreadLongPtr(); |
| 350 | uptr old_value = *tls_ptr; |
| 351 | *tls_ptr = kMagicValue; |
| 352 | dlerror(); |
| 353 | if (*(uptr *)get_android_tls_ptr() != kMagicValue) { |
| 354 | Printf( |
| 355 | "ERROR: Incompatible version of Android: TLS_SLOT_SANITIZER(6) is used " |
| 356 | "for dlerror().\n" ); |
| 357 | Die(); |
| 358 | } |
| 359 | *tls_ptr = old_value; |
| 360 | } |
| 361 | # else |
| 362 | void AndroidTestTlsSlot() {} |
| 363 | # endif |
| 364 | |
| 365 | static AccessInfo GetAccessInfo(siginfo_t *info, ucontext_t *uc) { |
| 366 | // Access type is passed in a platform dependent way (see below) and encoded |
| 367 | // as 0xXY, where X&1 is 1 for store, 0 for load, and X&2 is 1 if the error is |
| 368 | // recoverable. Valid values of Y are 0 to 4, which are interpreted as |
| 369 | // log2(access_size), and 0xF, which means that access size is passed via |
| 370 | // platform dependent register (see below). |
| 371 | # if defined(__aarch64__) |
| 372 | // Access type is encoded in BRK immediate as 0x900 + 0xXY. For Y == 0xF, |
| 373 | // access size is stored in X1 register. Access address is always in X0 |
| 374 | // register. |
| 375 | uptr pc = (uptr)info->si_addr; |
| 376 | const unsigned code = ((*(u32 *)pc) >> 5) & 0xffff; |
| 377 | if ((code & 0xff00) != 0x900) |
| 378 | return AccessInfo{}; // Not ours. |
| 379 | |
| 380 | const bool is_store = code & 0x10; |
| 381 | const bool recover = code & 0x20; |
| 382 | const uptr addr = uc->uc_mcontext.regs[0]; |
| 383 | const unsigned size_log = code & 0xf; |
| 384 | if (size_log > 4 && size_log != 0xf) |
| 385 | return AccessInfo{}; // Not ours. |
| 386 | const uptr size = size_log == 0xf ? uc->uc_mcontext.regs[1] : 1U << size_log; |
| 387 | |
| 388 | # elif defined(__x86_64__) |
| 389 | // Access type is encoded in the instruction following INT3 as |
| 390 | // NOP DWORD ptr [EAX + 0x40 + 0xXY]. For Y == 0xF, access size is stored in |
| 391 | // RSI register. Access address is always in RDI register. |
| 392 | uptr pc = (uptr)uc->uc_mcontext.gregs[REG_RIP]; |
| 393 | uint8_t *nop = (uint8_t *)pc; |
| 394 | if (*nop != 0x0f || *(nop + 1) != 0x1f || *(nop + 2) != 0x40 || |
| 395 | *(nop + 3) < 0x40) |
| 396 | return AccessInfo{}; // Not ours. |
| 397 | const unsigned code = *(nop + 3); |
| 398 | |
| 399 | const bool is_store = code & 0x10; |
| 400 | const bool recover = code & 0x20; |
| 401 | const uptr addr = uc->uc_mcontext.gregs[REG_RDI]; |
| 402 | const unsigned size_log = code & 0xf; |
| 403 | if (size_log > 4 && size_log != 0xf) |
| 404 | return AccessInfo{}; // Not ours. |
| 405 | const uptr size = |
| 406 | size_log == 0xf ? uc->uc_mcontext.gregs[REG_RSI] : 1U << size_log; |
| 407 | |
| 408 | # elif SANITIZER_RISCV64 |
| 409 | // Access type is encoded in the instruction following EBREAK as |
| 410 | // ADDI x0, x0, [0x40 + 0xXY]. For Y == 0xF, access size is stored in |
| 411 | // X11 register. Access address is always in X10 register. |
| 412 | uptr pc = (uptr)uc->uc_mcontext.__gregs[REG_PC]; |
| 413 | uint8_t byte1 = *((u8 *)(pc + 0)); |
| 414 | uint8_t byte2 = *((u8 *)(pc + 1)); |
| 415 | uint8_t byte3 = *((u8 *)(pc + 2)); |
| 416 | uint8_t byte4 = *((u8 *)(pc + 3)); |
| 417 | uint32_t ebreak = (byte1 | (byte2 << 8) | (byte3 << 16) | (byte4 << 24)); |
| 418 | bool isFaultShort = false; |
| 419 | bool isEbreak = (ebreak == 0x100073); |
| 420 | bool isShortEbreak = false; |
| 421 | # if defined(__riscv_compressed) |
| 422 | isFaultShort = ((ebreak & 0x3) != 0x3); |
| 423 | isShortEbreak = ((ebreak & 0xffff) == 0x9002); |
| 424 | # endif |
| 425 | // faulted insn is not ebreak, not our case |
| 426 | if (!(isEbreak || isShortEbreak)) |
| 427 | return AccessInfo{}; |
| 428 | // advance pc to point after ebreak and reconstruct addi instruction |
| 429 | pc += isFaultShort ? 2 : 4; |
| 430 | byte1 = *((u8 *)(pc + 0)); |
| 431 | byte2 = *((u8 *)(pc + 1)); |
| 432 | byte3 = *((u8 *)(pc + 2)); |
| 433 | byte4 = *((u8 *)(pc + 3)); |
| 434 | // reconstruct instruction |
| 435 | uint32_t instr = (byte1 | (byte2 << 8) | (byte3 << 16) | (byte4 << 24)); |
| 436 | // check if this is really 32 bit instruction |
| 437 | // code is encoded in top 12 bits, since instruction is supposed to be with |
| 438 | // imm |
| 439 | const unsigned code = (instr >> 20) & 0xffff; |
| 440 | const uptr addr = uc->uc_mcontext.__gregs[10]; |
| 441 | const bool is_store = code & 0x10; |
| 442 | const bool recover = code & 0x20; |
| 443 | const unsigned size_log = code & 0xf; |
| 444 | if (size_log > 4 && size_log != 0xf) |
| 445 | return AccessInfo{}; // Not our case |
| 446 | const uptr size = |
| 447 | size_log == 0xf ? uc->uc_mcontext.__gregs[11] : 1U << size_log; |
| 448 | |
| 449 | # else |
| 450 | # error Unsupported architecture |
| 451 | # endif |
| 452 | |
| 453 | return AccessInfo{.addr: addr, .size: size, .is_store: is_store, .is_load: !is_store, .recover: recover}; |
| 454 | } |
| 455 | |
| 456 | static bool HwasanOnSIGTRAP(int signo, siginfo_t *info, ucontext_t *uc) { |
| 457 | AccessInfo ai = GetAccessInfo(info, uc); |
| 458 | if (!ai.is_store && !ai.is_load) |
| 459 | return false; |
| 460 | |
| 461 | SignalContext sig{info, uc}; |
| 462 | HandleTagMismatch(ai, pc: StackTrace::GetNextInstructionPc(pc: sig.pc), frame: sig.bp, uc); |
| 463 | |
| 464 | # if defined(__aarch64__) |
| 465 | uc->uc_mcontext.pc += 4; |
| 466 | # elif defined(__x86_64__) |
| 467 | # elif SANITIZER_RISCV64 |
| 468 | // pc points to EBREAK which is 2 bytes long |
| 469 | uint8_t *exception_source = (uint8_t *)(uc->uc_mcontext.__gregs[REG_PC]); |
| 470 | uint8_t byte1 = (uint8_t)(*(exception_source + 0)); |
| 471 | uint8_t byte2 = (uint8_t)(*(exception_source + 1)); |
| 472 | uint8_t byte3 = (uint8_t)(*(exception_source + 2)); |
| 473 | uint8_t byte4 = (uint8_t)(*(exception_source + 3)); |
| 474 | uint32_t faulted = (byte1 | (byte2 << 8) | (byte3 << 16) | (byte4 << 24)); |
| 475 | bool isFaultShort = false; |
| 476 | # if defined(__riscv_compressed) |
| 477 | isFaultShort = ((faulted & 0x3) != 0x3); |
| 478 | # endif |
| 479 | uc->uc_mcontext.__gregs[REG_PC] += isFaultShort ? 2 : 4; |
| 480 | # else |
| 481 | # error Unsupported architecture |
| 482 | # endif |
| 483 | return true; |
| 484 | } |
| 485 | |
| 486 | static void OnStackUnwind(const SignalContext &sig, const void *, |
| 487 | BufferedStackTrace *stack) { |
| 488 | stack->Unwind(pc: StackTrace::GetNextInstructionPc(pc: sig.pc), bp: sig.bp, context: sig.context, |
| 489 | request_fast: common_flags()->fast_unwind_on_fatal); |
| 490 | } |
| 491 | |
| 492 | void HwasanOnDeadlySignal(int signo, void *info, void *context) { |
| 493 | // Probably a tag mismatch. |
| 494 | if (signo == SIGTRAP) |
| 495 | if (HwasanOnSIGTRAP(signo, info: (siginfo_t *)info, uc: (ucontext_t *)context)) |
| 496 | return; |
| 497 | |
| 498 | HandleDeadlySignal(siginfo: info, context, tid: GetTid(), unwind: &OnStackUnwind, unwind_context: nullptr); |
| 499 | } |
| 500 | |
| 501 | void Thread::InitStackAndTls(const InitState *) { |
| 502 | GetThreadStackAndTls(main: IsMainThread(), stk_begin: &stack_bottom_, stk_end: &stack_top_, tls_begin: &tls_begin_, |
| 503 | tls_end: &tls_end_); |
| 504 | } |
| 505 | |
| 506 | uptr TagMemoryAligned(uptr p, uptr size, tag_t tag) { |
| 507 | CHECK(IsAligned(p, kShadowAlignment)); |
| 508 | CHECK(IsAligned(size, kShadowAlignment)); |
| 509 | uptr shadow_start = MemToShadow(untagged_addr: p); |
| 510 | uptr shadow_size = MemToShadowSize(size); |
| 511 | |
| 512 | uptr page_size = GetPageSizeCached(); |
| 513 | uptr page_start = RoundUpTo(size: shadow_start, boundary: page_size); |
| 514 | uptr page_end = RoundDownTo(x: shadow_start + shadow_size, boundary: page_size); |
| 515 | uptr threshold = common_flags()->clear_shadow_mmap_threshold; |
| 516 | if (SANITIZER_LINUX && |
| 517 | UNLIKELY(page_end >= page_start + threshold && tag == 0)) { |
| 518 | internal_memset(s: (void *)shadow_start, c: tag, n: page_start - shadow_start); |
| 519 | internal_memset(s: (void *)page_end, c: tag, |
| 520 | n: shadow_start + shadow_size - page_end); |
| 521 | // For an anonymous private mapping MADV_DONTNEED will return a zero page on |
| 522 | // Linux. |
| 523 | ReleaseMemoryPagesToOSAndZeroFill(beg: page_start, end: page_end); |
| 524 | } else { |
| 525 | internal_memset(s: (void *)shadow_start, c: tag, n: shadow_size); |
| 526 | } |
| 527 | return AddTagToPointer(p, tag); |
| 528 | } |
| 529 | |
| 530 | static void BeforeFork() { |
| 531 | VReport(2, "BeforeFork tid: %llu\n" , GetTid()); |
| 532 | if (CAN_SANITIZE_LEAKS) { |
| 533 | __lsan::LockGlobal(); |
| 534 | } |
| 535 | // `_lsan` functions defined regardless of `CAN_SANITIZE_LEAKS` and lock the |
| 536 | // stuff we need. |
| 537 | __lsan::LockThreads(); |
| 538 | __lsan::LockAllocator(); |
| 539 | StackDepotLockBeforeFork(); |
| 540 | } |
| 541 | |
| 542 | static void AfterFork(bool fork_child) { |
| 543 | StackDepotUnlockAfterFork(fork_child); |
| 544 | // `_lsan` functions defined regardless of `CAN_SANITIZE_LEAKS` and unlock |
| 545 | // the stuff we need. |
| 546 | __lsan::UnlockAllocator(); |
| 547 | __lsan::UnlockThreads(); |
| 548 | if (CAN_SANITIZE_LEAKS) { |
| 549 | __lsan::UnlockGlobal(); |
| 550 | } |
| 551 | VReport(2, "AfterFork tid: %llu\n" , GetTid()); |
| 552 | } |
| 553 | |
| 554 | void HwasanInstallAtForkHandler() { |
| 555 | pthread_atfork( |
| 556 | prepare: &BeforeFork, parent: []() { AfterFork(/* fork_child= */ false); }, |
| 557 | child: []() { AfterFork(/* fork_child= */ true); }); |
| 558 | } |
| 559 | |
| 560 | void InstallAtExitCheckLeaks() { |
| 561 | if (CAN_SANITIZE_LEAKS) { |
| 562 | if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) { |
| 563 | if (flags()->halt_on_error) |
| 564 | Atexit(function: __lsan::DoLeakCheck); |
| 565 | else |
| 566 | Atexit(function: __lsan::DoRecoverableLeakCheckVoid); |
| 567 | } |
| 568 | } |
| 569 | } |
| 570 | |
| 571 | } // namespace __hwasan |
| 572 | |
| 573 | using namespace __hwasan; |
| 574 | |
| 575 | extern "C" void __hwasan_thread_enter() { |
| 576 | hwasanThreadList().CreateCurrentThread()->EnsureRandomStateInited(); |
| 577 | } |
| 578 | |
| 579 | extern "C" void __hwasan_thread_exit() { |
| 580 | Thread *t = GetCurrentThread(); |
| 581 | // Make sure that signal handler can not see a stale current thread pointer. |
| 582 | atomic_signal_fence(mo: memory_order_seq_cst); |
| 583 | if (t) { |
| 584 | // Block async signals on the thread as the handler can be instrumented. |
| 585 | // After this point instrumented code can't access essential data from TLS |
| 586 | // and will crash. |
| 587 | // Bionic already calls __hwasan_thread_exit with blocked signals. |
| 588 | if (SANITIZER_GLIBC) |
| 589 | BlockSignals(); |
| 590 | hwasanThreadList().ReleaseThread(t); |
| 591 | } |
| 592 | } |
| 593 | |
| 594 | #endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD |
| 595 | |