1//===-- hwasan_interceptors.cpp -------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of HWAddressSanitizer.
10//
11// Interceptors for standard library functions.
12//
13// FIXME: move as many interceptors as possible into
14// sanitizer_common/sanitizer_common_interceptors.h
15//===----------------------------------------------------------------------===//
16
17#define SANITIZER_COMMON_NO_REDEFINE_BUILTINS
18
19#include "hwasan.h"
20#include "hwasan_allocator.h"
21#include "hwasan_checks.h"
22#include "hwasan_mapping.h"
23#include "hwasan_platform_interceptors.h"
24#include "hwasan_thread.h"
25#include "hwasan_thread_list.h"
26#include "interception/interception.h"
27#include "sanitizer_common/sanitizer_errno.h"
28#include "sanitizer_common/sanitizer_linux.h"
29#include "sanitizer_common/sanitizer_stackdepot.h"
30
31#if !SANITIZER_FUCHSIA
32
33using namespace __hwasan;
34
35struct HWAsanInterceptorContext {
36 const char *interceptor_name;
37};
38
39# define ACCESS_MEMORY_RANGE(offset, size, access) \
40 do { \
41 __hwasan::CheckAddressSized<ErrorAction::Recover, access>((uptr)offset, \
42 size); \
43 } while (0)
44
45# define HWASAN_READ_RANGE(offset, size) \
46 ACCESS_MEMORY_RANGE(offset, size, AccessType::Load)
47# define HWASAN_WRITE_RANGE(offset, size) \
48 ACCESS_MEMORY_RANGE(offset, size, AccessType::Store)
49
50# if !SANITIZER_APPLE
51# define HWASAN_INTERCEPT_FUNC(name) \
52 do { \
53 if (!INTERCEPT_FUNCTION(name)) \
54 VReport(1, "HWAddressSanitizer: failed to intercept '%s'\n", #name); \
55 } while (0)
56# define HWASAN_INTERCEPT_FUNC_VER(name, ver) \
57 do { \
58 if (!INTERCEPT_FUNCTION_VER(name, ver)) \
59 VReport(1, "HWAddressSanitizer: failed to intercept '%s@@%s'\n", \
60 #name, ver); \
61 } while (0)
62# define HWASAN_INTERCEPT_FUNC_VER_UNVERSIONED_FALLBACK(name, ver) \
63 do { \
64 if (!INTERCEPT_FUNCTION_VER(name, ver) && !INTERCEPT_FUNCTION(name)) \
65 VReport( \
66 1, "HWAddressSanitizer: failed to intercept '%s@@%s' or '%s'\n", \
67 #name, ver, #name); \
68 } while (0)
69
70# else
71// OS X interceptors don't need to be initialized with INTERCEPT_FUNCTION.
72# define HWASAN_INTERCEPT_FUNC(name)
73# endif // SANITIZER_APPLE
74
75# if HWASAN_WITH_INTERCEPTORS
76
77# define COMMON_SYSCALL_PRE_READ_RANGE(p, s) HWASAN_READ_RANGE(p, s)
78# define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) HWASAN_WRITE_RANGE(p, s)
79# define COMMON_SYSCALL_POST_READ_RANGE(p, s) \
80 do { \
81 (void)(p); \
82 (void)(s); \
83 } while (false)
84# define COMMON_SYSCALL_POST_WRITE_RANGE(p, s) \
85 do { \
86 (void)(p); \
87 (void)(s); \
88 } while (false)
89# include "sanitizer_common/sanitizer_common_syscalls.inc"
90# include "sanitizer_common/sanitizer_syscalls_netbsd.inc"
91
92# define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
93 HWASAN_WRITE_RANGE(ptr, size)
94
95# define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \
96 HWASAN_READ_RANGE(ptr, size)
97
98# define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
99 HWAsanInterceptorContext _ctx = {#func}; \
100 ctx = (void *)&_ctx; \
101 do { \
102 (void)(ctx); \
103 (void)(func); \
104 } while (false)
105
106# define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \
107 do { \
108 (void)(ctx); \
109 (void)(path); \
110 } while (false)
111
112# define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \
113 do { \
114 (void)(ctx); \
115 (void)(fd); \
116 } while (false)
117
118# define COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd) \
119 do { \
120 (void)(ctx); \
121 (void)(fd); \
122 } while (false)
123
124# define COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, newfd) \
125 do { \
126 (void)(ctx); \
127 (void)(fd); \
128 (void)(newfd); \
129 } while (false)
130
131# define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) \
132 do { \
133 (void)(ctx); \
134 (void)(name); \
135 } while (false)
136
137# define COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name) \
138 do { \
139 (void)(ctx); \
140 (void)(thread); \
141 (void)(name); \
142 } while (false)
143
144# define COMMON_INTERCEPTOR_BLOCK_REAL(name) \
145 do { \
146 (void)(name); \
147 } while (false)
148
149# define COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, dst, v, size) \
150 { \
151 if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) \
152 return internal_memset(dst, v, size); \
153 COMMON_INTERCEPTOR_ENTER(ctx, memset, dst, v, size); \
154 if (MemIsApp(UntagAddr(reinterpret_cast<uptr>(dst))) && \
155 common_flags()->intercept_intrin) \
156 COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \
157 return REAL(memset)(dst, v, size); \
158 }
159
160# define COMMON_INTERCEPTOR_STRERROR() \
161 do { \
162 } while (false)
163
164# define COMMON_INTERCEPT_FUNCTION(name) HWASAN_INTERCEPT_FUNC(name)
165
166# define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED (!hwasan_inited)
167
168// The main purpose of the mmap interceptor is to prevent the user from
169// allocating on top of shadow pages.
170//
171// For compatibility, it does not tag pointers, nor does it allow
172// MAP_FIXED in combination with a tagged pointer. (Since mmap itself
173// will not return a tagged pointer, the tagged pointer must have come
174// from elsewhere, such as the secondary allocator, which makes it a
175// very odd usecase.)
176template <class Mmap>
177static void *mmap_interceptor(Mmap real_mmap, void *addr, SIZE_T length,
178 int prot, int flags, int fd, OFF64_T offset) {
179 if (addr) {
180 if (flags & map_fixed) CHECK_EQ(addr, UntagPtr(addr));
181
182 addr = UntagPtr(tagged_ptr: addr);
183 }
184 SIZE_T rounded_length = RoundUpTo(size: length, boundary: GetPageSize());
185 void *end_addr = (char *)addr + (rounded_length - 1);
186 if (addr && length &&
187 (!MemIsApp(p: reinterpret_cast<uptr>(addr)) ||
188 !MemIsApp(p: reinterpret_cast<uptr>(end_addr)))) {
189 // User requested an address that is incompatible with HWASan's
190 // memory layout. Use a different address if allowed, else fail.
191 if (flags & map_fixed) {
192 errno = errno_EINVAL;
193 return (void *)-1;
194 } else {
195 addr = nullptr;
196 }
197 }
198 void *res = real_mmap(addr, length, prot, flags, fd, offset);
199 if (length && res != (void *)-1) {
200 uptr beg = reinterpret_cast<uptr>(res);
201 DCHECK(IsAligned(beg, GetPageSize()));
202 if (!MemIsApp(p: beg) || !MemIsApp(p: beg + rounded_length - 1)) {
203 // Application has attempted to map more memory than is supported by
204 // HWASan. Act as if we ran out of memory.
205 internal_munmap(addr: res, length);
206 errno = errno_ENOMEM;
207 return (void *)-1;
208 }
209 __hwasan::TagMemoryAligned(p: beg, size: rounded_length, tag: 0);
210 }
211
212 return res;
213}
214
215template <class Munmap>
216static int munmap_interceptor(Munmap real_munmap, void *addr, SIZE_T length) {
217 // We should not tag if munmap fail, but it's to late to tag after
218 // real_munmap, as the pages could be mmaped by another thread.
219 uptr beg = reinterpret_cast<uptr>(addr);
220 if (length && IsAligned(a: beg, alignment: GetPageSize())) {
221 SIZE_T rounded_length = RoundUpTo(size: length, boundary: GetPageSize());
222 // Protect from unmapping the shadow.
223 if (!MemIsApp(p: beg) || !MemIsApp(p: beg + rounded_length - 1)) {
224 errno = errno_EINVAL;
225 return -1;
226 }
227 __hwasan::TagMemoryAligned(p: beg, size: rounded_length, tag: 0);
228 }
229 return real_munmap(addr, length);
230}
231
232# define COMMON_INTERCEPTOR_MMAP_IMPL(ctx, mmap, addr, length, prot, flags, \
233 fd, offset) \
234 do { \
235 (void)(ctx); \
236 return mmap_interceptor(REAL(mmap), addr, sz, prot, flags, fd, off); \
237 } while (false)
238
239# define COMMON_INTERCEPTOR_MUNMAP_IMPL(ctx, addr, length) \
240 do { \
241 (void)(ctx); \
242 return munmap_interceptor(REAL(munmap), addr, sz); \
243 } while (false)
244
245# include "sanitizer_common/sanitizer_common_interceptors_memintrinsics.inc"
246# include "sanitizer_common/sanitizer_common_interceptors.inc"
247
248struct ThreadStartArg {
249 __sanitizer_sigset_t starting_sigset_;
250};
251
252static void *HwasanThreadStartFunc(void *arg) {
253 __hwasan_thread_enter();
254 SetSigProcMask(set: &reinterpret_cast<ThreadStartArg *>(arg)->starting_sigset_,
255 oldset: nullptr);
256 InternalFree(p: arg);
257 auto self = GetThreadSelf();
258 auto args = hwasanThreadArgRetval().GetArgs(thread: self);
259 void *retval = (*args.routine)(args.arg_retval);
260 hwasanThreadArgRetval().Finish(thread: self, retval);
261 return retval;
262}
263
264extern "C" {
265int pthread_attr_getdetachstate(void *attr, int *v);
266}
267
268INTERCEPTOR(int, pthread_create, void *thread, void *attr,
269 void *(*callback)(void *), void *param) {
270 EnsureMainThreadIDIsCorrect();
271 ScopedTaggingDisabler tagging_disabler;
272 bool detached = [attr]() {
273 int d = 0;
274 return attr && !pthread_attr_getdetachstate(attr, v: &d) && IsStateDetached(state: d);
275 }();
276 ThreadStartArg *A = (ThreadStartArg *)InternalAlloc(size: sizeof(ThreadStartArg));
277 ScopedBlockSignals block(&A->starting_sigset_);
278 // ASAN uses the same approach to disable leaks from pthread_create.
279# if CAN_SANITIZE_LEAKS
280 __lsan::ScopedInterceptorDisabler lsan_disabler;
281# endif
282
283 int result;
284 hwasanThreadArgRetval().Create(detached, args: {.routine: callback, .arg_retval: param}, fn: [&]() -> uptr {
285 result = REAL(pthread_create)(thread, attr, &HwasanThreadStartFunc, A);
286 return result ? 0 : *(uptr *)(thread);
287 });
288 if (result != 0)
289 InternalFree(p: A);
290 return result;
291}
292
293INTERCEPTOR(int, pthread_join, void *thread, void **retval) {
294 int result;
295 hwasanThreadArgRetval().Join(thread: (uptr)thread, fn: [&]() {
296 result = REAL(pthread_join)(thread, retval);
297 return !result;
298 });
299 return result;
300}
301
302INTERCEPTOR(int, pthread_detach, void *thread) {
303 int result;
304 hwasanThreadArgRetval().Detach(thread: (uptr)thread, fn: [&]() {
305 result = REAL(pthread_detach)(thread);
306 return !result;
307 });
308 return result;
309}
310
311INTERCEPTOR(void, pthread_exit, void *retval) {
312 hwasanThreadArgRetval().Finish(thread: GetThreadSelf(), retval);
313 REAL(pthread_exit)(retval);
314}
315
316# if SANITIZER_GLIBC
317INTERCEPTOR(int, pthread_tryjoin_np, void *thread, void **ret) {
318 int result;
319 hwasanThreadArgRetval().Join(thread: (uptr)thread, fn: [&]() {
320 result = REAL(pthread_tryjoin_np)(thread, ret);
321 return !result;
322 });
323 return result;
324}
325
326INTERCEPTOR(int, pthread_timedjoin_np, void *thread, void **ret,
327 const struct timespec *abstime) {
328 int result;
329 hwasanThreadArgRetval().Join(thread: (uptr)thread, fn: [&]() {
330 result = REAL(pthread_timedjoin_np)(thread, ret, abstime);
331 return !result;
332 });
333 return result;
334}
335# endif
336
337DEFINE_INTERNAL_PTHREAD_FUNCTIONS
338
339DEFINE_REAL(int, vfork,)
340DECLARE_EXTERN_INTERCEPTOR_AND_WRAPPER(int, vfork,)
341
342// Get and/or change the set of blocked signals.
343extern "C" int sigprocmask(int __how, const __hw_sigset_t *__restrict __set,
344 __hw_sigset_t *__restrict __oset);
345# define SIG_BLOCK 0
346# define SIG_SETMASK 2
347extern "C" int __sigjmp_save(__hw_sigjmp_buf env, int savemask) {
348 env[0].__magic = kHwJmpBufMagic;
349 env[0].__mask_was_saved =
350 (savemask &&
351 sigprocmask(SIG_BLOCK, set: (__hw_sigset_t *)0, oset: &env[0].__saved_mask) == 0);
352 return 0;
353}
354
355static void __attribute__((always_inline))
356InternalLongjmp(__hw_register_buf env, int retval) {
357# if defined(__aarch64__)
358 constexpr size_t kSpIndex = 13;
359# elif defined(__x86_64__)
360 constexpr size_t kSpIndex = 6;
361# elif SANITIZER_RISCV64
362 constexpr size_t kSpIndex = 13;
363# endif
364
365 // Clear all memory tags on the stack between here and where we're going.
366 unsigned long long stack_pointer = env[kSpIndex];
367 // The stack pointer should never be tagged, so we don't need to clear the
368 // tag for this function call.
369 __hwasan_handle_longjmp(sp_dst: (void *)stack_pointer);
370
371 // Run code for handling a longjmp.
372 // Need to use a register that isn't going to be loaded from the environment
373 // buffer -- hence why we need to specify the register to use.
374 // Must implement this ourselves, since we don't know the order of registers
375 // in different libc implementations and many implementations mangle the
376 // stack pointer so we can't use it without knowing the demangling scheme.
377# if defined(__aarch64__)
378 register long int retval_tmp asm("x1") = retval;
379 register void *env_address asm("x0") = &env[0];
380 asm volatile(
381 "ldp x19, x20, [%0, #0<<3];"
382 "ldp x21, x22, [%0, #2<<3];"
383 "ldp x23, x24, [%0, #4<<3];"
384 "ldp x25, x26, [%0, #6<<3];"
385 "ldp x27, x28, [%0, #8<<3];"
386 "ldp x29, x30, [%0, #10<<3];"
387 "ldp d8, d9, [%0, #14<<3];"
388 "ldp d10, d11, [%0, #16<<3];"
389 "ldp d12, d13, [%0, #18<<3];"
390 "ldp d14, d15, [%0, #20<<3];"
391 "ldr x5, [%0, #13<<3];"
392 "mov sp, x5;"
393 // Return the value requested to return through arguments.
394 // This should be in x1 given what we requested above.
395 "cmp %1, #0;"
396 "mov x0, #1;"
397 "csel x0, %1, x0, ne;"
398 "br x30;"
399 : "+r"(env_address)
400 : "r"(retval_tmp));
401# elif defined(__x86_64__)
402 register long int retval_tmp asm("%rsi") = retval;
403 register void *env_address asm("%rdi") = &env[0];
404 asm volatile(
405 // Restore registers.
406 "mov (0*8)(%0),%%rbx;"
407 "mov (1*8)(%0),%%rbp;"
408 "mov (2*8)(%0),%%r12;"
409 "mov (3*8)(%0),%%r13;"
410 "mov (4*8)(%0),%%r14;"
411 "mov (5*8)(%0),%%r15;"
412 "mov (6*8)(%0),%%rsp;"
413 "mov (7*8)(%0),%%rdx;"
414 // Return 1 if retval is 0.
415 "mov $1,%%rax;"
416 "test %1,%1;"
417 "cmovnz %1,%%rax;"
418 "jmp *%%rdx;" ::"r"(env_address),
419 "r"(retval_tmp));
420# elif SANITIZER_RISCV64
421 register long int retval_tmp asm("x11") = retval;
422 register void *env_address asm("x10") = &env[0];
423 asm volatile(
424 "ld ra, 0<<3(%0);"
425 "ld s0, 1<<3(%0);"
426 "ld s1, 2<<3(%0);"
427 "ld s2, 3<<3(%0);"
428 "ld s3, 4<<3(%0);"
429 "ld s4, 5<<3(%0);"
430 "ld s5, 6<<3(%0);"
431 "ld s6, 7<<3(%0);"
432 "ld s7, 8<<3(%0);"
433 "ld s8, 9<<3(%0);"
434 "ld s9, 10<<3(%0);"
435 "ld s10, 11<<3(%0);"
436 "ld s11, 12<<3(%0);"
437# if __riscv_float_abi_double
438 "fld fs0, 14<<3(%0);"
439 "fld fs1, 15<<3(%0);"
440 "fld fs2, 16<<3(%0);"
441 "fld fs3, 17<<3(%0);"
442 "fld fs4, 18<<3(%0);"
443 "fld fs5, 19<<3(%0);"
444 "fld fs6, 20<<3(%0);"
445 "fld fs7, 21<<3(%0);"
446 "fld fs8, 22<<3(%0);"
447 "fld fs9, 23<<3(%0);"
448 "fld fs10, 24<<3(%0);"
449 "fld fs11, 25<<3(%0);"
450# elif __riscv_float_abi_soft
451# else
452# error "Unsupported case"
453# endif
454 "ld a4, 13<<3(%0);"
455 "mv sp, a4;"
456 // Return the value requested to return through arguments.
457 // This should be in x11 given what we requested above.
458 "seqz a0, %1;"
459 "add a0, a0, %1;"
460 "ret;"
461 : "+r"(env_address)
462 : "r"(retval_tmp));
463# endif
464}
465
466INTERCEPTOR(void, siglongjmp, __hw_sigjmp_buf env, int val) {
467 if (env[0].__magic != kHwJmpBufMagic) {
468 Printf(
469 format: "WARNING: Unexpected bad jmp_buf. Either setjmp was not called or "
470 "there is a bug in HWASan.\n");
471 return REAL(siglongjmp)(env, val);
472 }
473
474 if (env[0].__mask_was_saved)
475 // Restore the saved signal mask.
476 (void)sigprocmask(SIG_SETMASK, set: &env[0].__saved_mask, oset: (__hw_sigset_t *)0);
477 InternalLongjmp(env: env[0].__jmpbuf, retval: val);
478}
479
480// Required since glibc libpthread calls __libc_longjmp on pthread_exit, and
481// _setjmp on start_thread. Hence we have to intercept the longjmp on
482// pthread_exit so the __hw_jmp_buf order matches.
483INTERCEPTOR(void, __libc_longjmp, __hw_jmp_buf env, int val) {
484 if (env[0].__magic != kHwJmpBufMagic)
485 return REAL(__libc_longjmp)(env, val);
486 InternalLongjmp(env: env[0].__jmpbuf, retval: val);
487}
488
489INTERCEPTOR(void, longjmp, __hw_jmp_buf env, int val) {
490 if (env[0].__magic != kHwJmpBufMagic) {
491 Printf(
492 format: "WARNING: Unexpected bad jmp_buf. Either setjmp was not called or "
493 "there is a bug in HWASan.\n");
494 return REAL(longjmp)(env, val);
495 }
496 InternalLongjmp(env: env[0].__jmpbuf, retval: val);
497}
498# undef SIG_BLOCK
499# undef SIG_SETMASK
500
501# endif // HWASAN_WITH_INTERCEPTORS
502
503namespace __hwasan {
504
505int OnExit() {
506 if (CAN_SANITIZE_LEAKS && common_flags()->detect_leaks &&
507 __lsan::HasReportedLeaks()) {
508 return common_flags()->exitcode;
509 }
510 // FIXME: ask frontend whether we need to return failure.
511 return 0;
512}
513
514} // namespace __hwasan
515
516namespace __hwasan {
517
518void InitializeInterceptors() {
519 static int inited = 0;
520 CHECK_EQ(inited, 0);
521
522# if HWASAN_WITH_INTERCEPTORS
523 __interception::DoesNotSupportStaticLinking();
524 InitializeCommonInterceptors();
525
526 (void)(read_iovec);
527 (void)(write_iovec);
528
529# if defined(__linux__)
530 INTERCEPT_FUNCTION(__libc_longjmp);
531 INTERCEPT_FUNCTION(longjmp);
532 INTERCEPT_FUNCTION(siglongjmp);
533 INTERCEPT_FUNCTION(vfork);
534# endif // __linux__
535 INTERCEPT_FUNCTION(pthread_create);
536 INTERCEPT_FUNCTION(pthread_join);
537 INTERCEPT_FUNCTION(pthread_detach);
538 INTERCEPT_FUNCTION(pthread_exit);
539# if SANITIZER_GLIBC
540 INTERCEPT_FUNCTION(pthread_tryjoin_np);
541 INTERCEPT_FUNCTION(pthread_timedjoin_np);
542# endif
543# endif
544
545 inited = 1;
546}
547} // namespace __hwasan
548
549#endif // #if !SANITIZER_FUCHSIA
550