1//===-- sanitizer_mac.cpp -------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is shared between various sanitizers' runtime libraries and
10// implements OSX-specific functions.
11//===----------------------------------------------------------------------===//
12
13#include "sanitizer_platform.h"
14#if SANITIZER_APPLE
15# include "interception/interception.h"
16# include "sanitizer_mac.h"
17
18// Use 64-bit inodes in file operations. ASan does not support OS X 10.5, so
19// the clients will most certainly use 64-bit ones as well.
20# ifndef _DARWIN_USE_64_BIT_INODE
21# define _DARWIN_USE_64_BIT_INODE 1
22# endif
23# include <stdio.h>
24
25// Start searching for available memory region past PAGEZERO, which is
26// 4KB on 32-bit and 4GB on 64-bit.
27# define GAP_SEARCH_START_ADDRESS \
28 ((SANITIZER_WORDSIZE == 32) ? 0x000000001000 : 0x000100000000)
29
30# include "sanitizer_common.h"
31# include "sanitizer_file.h"
32# include "sanitizer_flags.h"
33# include "sanitizer_interface_internal.h"
34# include "sanitizer_internal_defs.h"
35# include "sanitizer_libc.h"
36# include "sanitizer_platform_limits_posix.h"
37# include "sanitizer_procmaps.h"
38# include "sanitizer_ptrauth.h"
39
40# if !SANITIZER_IOS
41# include <crt_externs.h> // for _NSGetEnviron
42# else
43extern char **environ;
44# endif
45
46// Integrate with CrashReporter library if available
47# if defined(__has_include) && __has_include(<CrashReporterClient.h>)
48# define HAVE_CRASHREPORTERCLIENT_H 1
49# include <CrashReporterClient.h>
50# else
51# define HAVE_CRASHREPORTERCLIENT_H 0
52# endif
53
54# if !SANITIZER_IOS
55# include <crt_externs.h> // for _NSGetArgv and _NSGetEnviron
56# else
57extern "C" {
58extern char ***_NSGetArgv(void);
59}
60# endif
61
62# include <asl.h>
63# include <dlfcn.h> // for dladdr()
64# include <errno.h>
65# include <fcntl.h>
66# include <inttypes.h>
67# include <libkern/OSAtomic.h>
68# include <mach-o/dyld.h>
69# include <mach/mach.h>
70# include <mach/mach_error.h>
71# include <mach/mach_time.h>
72# include <mach/vm_statistics.h>
73# include <malloc/malloc.h>
74# include <os/log.h>
75# include <pthread.h>
76# include <pthread/introspection.h>
77# include <sched.h>
78# include <signal.h>
79# include <spawn.h>
80# include <stdlib.h>
81# include <sys/ioctl.h>
82# include <sys/mman.h>
83# include <sys/resource.h>
84# include <sys/stat.h>
85# include <sys/sysctl.h>
86# include <sys/types.h>
87# include <sys/wait.h>
88# include <unistd.h>
89# include <util.h>
90
91// From <crt_externs.h>, but we don't have that file on iOS.
92extern "C" {
93 extern char ***_NSGetArgv(void);
94 extern char ***_NSGetEnviron(void);
95}
96
97// From <mach/mach_vm.h>, but we don't have that file on iOS.
98extern "C" {
99 extern kern_return_t mach_vm_region_recurse(
100 vm_map_t target_task,
101 mach_vm_address_t *address,
102 mach_vm_size_t *size,
103 natural_t *nesting_depth,
104 vm_region_recurse_info_t info,
105 mach_msg_type_number_t *infoCnt);
106
107 extern const void* _dyld_get_shared_cache_range(size_t* length);
108}
109
110# if !SANITIZER_GO
111// Weak symbol no-op when TSan is not linked
112SANITIZER_WEAK_ATTRIBUTE extern void __tsan_set_in_internal_write_call(
113 bool value) {}
114# endif
115
116namespace __sanitizer {
117
118#include "sanitizer_syscall_generic.inc"
119
120// Direct syscalls, don't call libmalloc hooks (but not available on 10.6).
121extern "C" void *__mmap(void *addr, size_t len, int prot, int flags, int fildes,
122 off_t off) SANITIZER_WEAK_ATTRIBUTE;
123extern "C" int __munmap(void *, size_t) SANITIZER_WEAK_ATTRIBUTE;
124
125// ---------------------- sanitizer_libc.h
126
127// From <mach/vm_statistics.h>, but not on older OSs.
128#ifndef VM_MEMORY_SANITIZER
129#define VM_MEMORY_SANITIZER 99
130#endif
131
132// XNU on Darwin provides a mmap flag that optimizes allocation/deallocation of
133// giant memory regions (i.e. shadow memory regions).
134#define kXnuFastMmapFd 0x4
135static size_t kXnuFastMmapThreshold = 2 << 30; // 2 GB
136static bool use_xnu_fast_mmap = false;
137
138uptr internal_mmap(void *addr, size_t length, int prot, int flags,
139 int fd, u64 offset) {
140 if (fd == -1) {
141 fd = VM_MAKE_TAG(VM_MEMORY_SANITIZER);
142 if (length >= kXnuFastMmapThreshold) {
143 if (use_xnu_fast_mmap) fd |= kXnuFastMmapFd;
144 }
145 }
146 if (&__mmap) return (uptr)__mmap(addr, length, prot, flags, fd, offset);
147 return (uptr)mmap(addr, length, prot, flags, fd, offset);
148}
149
150uptr internal_munmap(void *addr, uptr length) {
151 if (&__munmap) return __munmap(addr, length);
152 return munmap(addr, length);
153}
154
155uptr internal_mremap(void *old_address, uptr old_size, uptr new_size, int flags,
156 void *new_address) {
157 CHECK(false && "internal_mremap is unimplemented on Mac");
158 return 0;
159}
160
161int internal_mprotect(void *addr, uptr length, int prot) {
162 return mprotect(addr, length, prot);
163}
164
165int internal_madvise(uptr addr, uptr length, int advice) {
166 return madvise((void *)addr, length, advice);
167}
168
169uptr internal_close(fd_t fd) {
170 return close(fd);
171}
172
173uptr internal_open(const char *filename, int flags) {
174 return open(filename, flags);
175}
176
177uptr internal_open(const char *filename, int flags, u32 mode) {
178 return open(filename, flags, mode);
179}
180
181uptr internal_read(fd_t fd, void *buf, uptr count) {
182 return read(fd, buf, count);
183}
184
185uptr internal_write(fd_t fd, const void *buf, uptr count) {
186# if SANITIZER_GO
187 return write(fd, buf, count);
188# else
189 // We need to disable interceptors when writing in TSan
190 __tsan_set_in_internal_write_call(true);
191 uptr res = write(fd, buf, count);
192 __tsan_set_in_internal_write_call(false);
193 return res;
194# endif
195}
196
197uptr internal_stat(const char *path, void *buf) {
198 return stat(path, (struct stat *)buf);
199}
200
201uptr internal_lstat(const char *path, void *buf) {
202 return lstat(path, (struct stat *)buf);
203}
204
205uptr internal_fstat(fd_t fd, void *buf) {
206 return fstat(fd, (struct stat *)buf);
207}
208
209uptr internal_filesize(fd_t fd) {
210 struct stat st;
211 if (internal_fstat(fd, &st))
212 return -1;
213 return (uptr)st.st_size;
214}
215
216uptr internal_dup(int oldfd) {
217 return dup(oldfd);
218}
219
220uptr internal_dup2(int oldfd, int newfd) {
221 return dup2(oldfd, newfd);
222}
223
224uptr internal_readlink(const char *path, char *buf, uptr bufsize) {
225 return readlink(path, buf, bufsize);
226}
227
228uptr internal_unlink(const char *path) {
229 return unlink(path);
230}
231
232uptr internal_sched_yield() {
233 return sched_yield();
234}
235
236void internal__exit(int exitcode) {
237 _exit(exitcode);
238}
239
240void internal_usleep(u64 useconds) { usleep(useconds); }
241
242uptr internal_getpid() {
243 return getpid();
244}
245
246int internal_dlinfo(void *handle, int request, void *p) {
247 UNIMPLEMENTED();
248}
249
250int internal_sigaction(int signum, const void *act, void *oldact) {
251 return sigaction(signum,
252 (const struct sigaction *)act, (struct sigaction *)oldact);
253}
254
255void internal_sigfillset(__sanitizer_sigset_t *set) { sigfillset(set); }
256
257uptr internal_sigprocmask(int how, __sanitizer_sigset_t *set,
258 __sanitizer_sigset_t *oldset) {
259 // Don't use sigprocmask here, because it affects all threads.
260 return pthread_sigmask(how, set, oldset);
261}
262
263// Doesn't call pthread_atfork() handlers (but not available on 10.6).
264extern "C" pid_t __fork(void) SANITIZER_WEAK_ATTRIBUTE;
265
266int internal_fork() {
267 if (&__fork)
268 return __fork();
269 return fork();
270}
271
272int internal_sysctl(const int *name, unsigned int namelen, void *oldp,
273 uptr *oldlenp, const void *newp, uptr newlen) {
274 return sysctl(const_cast<int *>(name), namelen, oldp, (size_t *)oldlenp,
275 const_cast<void *>(newp), (size_t)newlen);
276}
277
278int internal_sysctlbyname(const char *sname, void *oldp, uptr *oldlenp,
279 const void *newp, uptr newlen) {
280 return sysctlbyname(sname, oldp, (size_t *)oldlenp, const_cast<void *>(newp),
281 (size_t)newlen);
282}
283
284bool internal_spawn(const char* argv[], const char* envp[], pid_t* pid,
285 fd_t fd_stdin, fd_t fd_stdout) {
286 // NOTE: Caller ensures that fd_stdin and fd_stdout are not 0, 1, or 2, since
287 // this can break communication.
288 //
289 // NOTE: Caller is responsible for closing fd_stdin after the process has
290 // died.
291
292 int res;
293 auto fd_closer = at_scope_exit([&] {
294 // NOTE: We intentionally do not close fd_stdin since this can
295 // cause us to receive a fatal SIGPIPE if the process dies.
296 internal_close(fd_stdout);
297 });
298
299 // File descriptor actions
300 posix_spawn_file_actions_t acts;
301 res = posix_spawn_file_actions_init(&acts);
302 if (res != 0)
303 return false;
304
305 auto acts_cleanup = at_scope_exit([&] {
306 posix_spawn_file_actions_destroy(&acts);
307 });
308
309 res = posix_spawn_file_actions_adddup2(&acts, fd_stdin, STDIN_FILENO) ||
310 posix_spawn_file_actions_adddup2(&acts, fd_stdout, STDOUT_FILENO) ||
311 posix_spawn_file_actions_addclose(&acts, fd_stdin) ||
312 posix_spawn_file_actions_addclose(&acts, fd_stdout);
313 if (res != 0)
314 return false;
315
316 // Spawn attributes
317 posix_spawnattr_t attrs;
318 res = posix_spawnattr_init(&attrs);
319 if (res != 0)
320 return false;
321
322 auto attrs_cleanup = at_scope_exit([&] {
323 posix_spawnattr_destroy(&attrs);
324 });
325
326 // In the spawned process, close all file descriptors that are not explicitly
327 // described by the file actions object. This is Darwin-specific extension.
328 res = posix_spawnattr_setflags(&attrs, POSIX_SPAWN_CLOEXEC_DEFAULT);
329 if (res != 0)
330 return false;
331
332 // posix_spawn
333 char **argv_casted = const_cast<char **>(argv);
334 char **envp_casted = const_cast<char **>(envp);
335 res = posix_spawn(pid, argv[0], &acts, &attrs, argv_casted, envp_casted);
336 if (res != 0)
337 return false;
338
339 return true;
340}
341
342uptr internal_rename(const char *oldpath, const char *newpath) {
343 return rename(oldpath, newpath);
344}
345
346uptr internal_ftruncate(fd_t fd, uptr size) {
347 return ftruncate(fd, size);
348}
349
350uptr internal_execve(const char *filename, char *const argv[],
351 char *const envp[]) {
352 return execve(filename, argv, envp);
353}
354
355uptr internal_waitpid(int pid, int *status, int options) {
356 return waitpid(pid, status, options);
357}
358
359// ----------------- sanitizer_common.h
360bool FileExists(const char *filename) {
361 if (ShouldMockFailureToOpen(filename))
362 return false;
363 struct stat st;
364 if (stat(filename, &st))
365 return false;
366 // Sanity check: filename is a regular file.
367 return S_ISREG(st.st_mode);
368}
369
370bool DirExists(const char *path) {
371 struct stat st;
372 if (stat(path, &st))
373 return false;
374 return S_ISDIR(st.st_mode);
375}
376
377ThreadID GetTid() {
378 ThreadID tid;
379 pthread_threadid_np(nullptr, &tid);
380 return tid;
381}
382
383void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
384 uptr *stack_bottom) {
385 CHECK(stack_top);
386 CHECK(stack_bottom);
387 uptr stacksize = pthread_get_stacksize_np(pthread_self());
388 // pthread_get_stacksize_np() returns an incorrect stack size for the main
389 // thread on Mavericks. See
390 // https://github.com/google/sanitizers/issues/261
391 if ((GetMacosAlignedVersion() >= MacosVersion(10, 9)) && at_initialization &&
392 stacksize == (1 << 19)) {
393 struct rlimit rl;
394 CHECK_EQ(getrlimit(RLIMIT_STACK, &rl), 0);
395 // Most often rl.rlim_cur will be the desired 8M.
396 if (rl.rlim_cur < kMaxThreadStackSize) {
397 stacksize = rl.rlim_cur;
398 } else {
399 stacksize = kMaxThreadStackSize;
400 }
401 }
402 void *stackaddr = pthread_get_stackaddr_np(pthread_self());
403 *stack_top = (uptr)stackaddr;
404 *stack_bottom = *stack_top - stacksize;
405}
406
407char **GetEnviron() {
408#if !SANITIZER_IOS
409 char ***env_ptr = _NSGetEnviron();
410 if (!env_ptr) {
411 Report("_NSGetEnviron() returned NULL. Please make sure __asan_init() is "
412 "called after libSystem_initializer().\n");
413 CHECK(env_ptr);
414 }
415 char **environ = *env_ptr;
416#endif
417 CHECK(environ);
418 return environ;
419}
420
421const char *GetEnv(const char *name) {
422 char **env = GetEnviron();
423 uptr name_len = internal_strlen(name);
424 while (*env != 0) {
425 uptr len = internal_strlen(*env);
426 if (len > name_len) {
427 const char *p = *env;
428 if (!internal_memcmp(p, name, name_len) &&
429 p[name_len] == '=') { // Match.
430 return *env + name_len + 1; // String starting after =.
431 }
432 }
433 env++;
434 }
435 return 0;
436}
437
438uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) {
439 CHECK_LE(kMaxPathLength, buf_len);
440
441 // On OS X the executable path is saved to the stack by dyld. Reading it
442 // from there is much faster than calling dladdr, especially for large
443 // binaries with symbols.
444 InternalMmapVector<char> exe_path(kMaxPathLength);
445 uint32_t size = exe_path.size();
446 if (_NSGetExecutablePath(exe_path.data(), &size) == 0 &&
447 realpath(exe_path.data(), buf) != 0) {
448 return internal_strlen(buf);
449 }
450 return 0;
451}
452
453uptr ReadLongProcessName(/*out*/char *buf, uptr buf_len) {
454 return ReadBinaryName(buf, buf_len);
455}
456
457void ReExec() {
458 UNIMPLEMENTED();
459}
460
461void CheckASLR() {
462 // Do nothing
463}
464
465void CheckMPROTECT() {
466 // Do nothing
467}
468
469uptr GetPageSize() {
470 return sysconf(_SC_PAGESIZE);
471}
472
473extern "C" unsigned malloc_num_zones;
474extern "C" malloc_zone_t **malloc_zones;
475malloc_zone_t sanitizer_zone;
476
477// We need to make sure that sanitizer_zone is registered as malloc_zones[0]. If
478// libmalloc tries to set up a different zone as malloc_zones[0], it will call
479// mprotect(malloc_zones, ..., PROT_READ). This interceptor will catch that and
480// make sure we are still the first (default) zone.
481void MprotectMallocZones(void *addr, int prot) {
482 if (addr == malloc_zones && prot == PROT_READ) {
483 if (malloc_num_zones > 1 && malloc_zones[0] != &sanitizer_zone) {
484 for (unsigned i = 1; i < malloc_num_zones; i++) {
485 if (malloc_zones[i] == &sanitizer_zone) {
486 // Swap malloc_zones[0] and malloc_zones[i].
487 malloc_zones[i] = malloc_zones[0];
488 malloc_zones[0] = &sanitizer_zone;
489 break;
490 }
491 }
492 }
493 }
494}
495
496void FutexWait(atomic_uint32_t *p, u32 cmp) {
497 // FIXME: implement actual blocking.
498 sched_yield();
499}
500
501void FutexWake(atomic_uint32_t *p, u32 count) {}
502
503u64 NanoTime() {
504 timeval tv;
505 internal_memset(&tv, 0, sizeof(tv));
506 gettimeofday(&tv, 0);
507 return (u64)tv.tv_sec * 1000*1000*1000 + tv.tv_usec * 1000;
508}
509
510// This needs to be called during initialization to avoid being racy.
511u64 MonotonicNanoTime() {
512 static mach_timebase_info_data_t timebase_info;
513 if (timebase_info.denom == 0) mach_timebase_info(&timebase_info);
514 return (mach_absolute_time() * timebase_info.numer) / timebase_info.denom;
515}
516
517uptr GetTlsSize() {
518 return 0;
519}
520
521uptr TlsBaseAddr() {
522 uptr segbase = 0;
523#if defined(__x86_64__)
524 asm("movq %%gs:0,%0" : "=r"(segbase));
525#elif defined(__i386__)
526 asm("movl %%gs:0,%0" : "=r"(segbase));
527#elif defined(__aarch64__)
528 asm("mrs %x0, tpidrro_el0" : "=r"(segbase));
529 segbase &= 0x07ul; // clearing lower bits, cpu id stored there
530#endif
531 return segbase;
532}
533
534// The size of the tls on darwin does not appear to be well documented,
535// however the vm memory map suggests that it is 1024 uptrs in size,
536// with a size of 0x2000 bytes on x86_64 and 0x1000 bytes on i386.
537uptr TlsSize() {
538#if defined(__x86_64__) || defined(__i386__)
539 return 1024 * sizeof(uptr);
540#else
541 return 0;
542#endif
543}
544
545void GetThreadStackAndTls(bool main, uptr *stk_begin, uptr *stk_end,
546 uptr *tls_begin, uptr *tls_end) {
547# if !SANITIZER_GO
548 GetThreadStackTopAndBottom(main, stk_end, stk_begin);
549 *tls_begin = TlsBaseAddr();
550 *tls_end = *tls_begin + TlsSize();
551# else
552 *stk_begin = 0;
553 *stk_end = 0;
554 *tls_begin = 0;
555 *tls_end = 0;
556# endif
557}
558
559void ListOfModules::init() {
560 clearOrInit();
561 MemoryMappingLayout memory_mapping(false);
562 memory_mapping.DumpListOfModules(&modules_);
563}
564
565void ListOfModules::fallbackInit() { clear(); }
566
567static HandleSignalMode GetHandleSignalModeImpl(int signum) {
568 switch (signum) {
569 case SIGABRT:
570 return common_flags()->handle_abort;
571 case SIGILL:
572 return common_flags()->handle_sigill;
573 case SIGTRAP:
574 return common_flags()->handle_sigtrap;
575 case SIGFPE:
576 return common_flags()->handle_sigfpe;
577 case SIGSEGV:
578 return common_flags()->handle_segv;
579 case SIGBUS:
580 return common_flags()->handle_sigbus;
581 }
582 return kHandleSignalNo;
583}
584
585HandleSignalMode GetHandleSignalMode(int signum) {
586 // Handling fatal signals on watchOS and tvOS devices is disallowed.
587 if ((SANITIZER_WATCHOS || SANITIZER_TVOS) && !(SANITIZER_IOSSIM))
588 return kHandleSignalNo;
589 HandleSignalMode result = GetHandleSignalModeImpl(signum);
590 if (result == kHandleSignalYes && !common_flags()->allow_user_segv_handler)
591 return kHandleSignalExclusive;
592 return result;
593}
594
595// Offset example:
596// XNU 17 -- macOS 10.13 -- iOS 11 -- tvOS 11 -- watchOS 4
597constexpr u16 GetOSMajorKernelOffset() {
598 if (TARGET_OS_OSX) return 4;
599 if (TARGET_OS_IOS || TARGET_OS_TV) return 6;
600 if (TARGET_OS_WATCH) return 13;
601}
602
603using VersStr = char[64];
604
605static uptr ApproximateOSVersionViaKernelVersion(VersStr vers) {
606 u16 kernel_major = GetDarwinKernelVersion().major;
607 u16 offset = GetOSMajorKernelOffset();
608 CHECK_GE(kernel_major, offset);
609 u16 os_major = kernel_major - offset;
610
611 const char *format = "%d.0";
612 if (TARGET_OS_OSX) {
613 if (os_major >= 16) { // macOS 11+
614 os_major -= 5;
615 } else { // macOS 10.15 and below
616 format = "10.%d";
617 }
618 }
619 return internal_snprintf(vers, sizeof(VersStr), format, os_major);
620}
621
622static void GetOSVersion(VersStr vers) {
623 uptr len = sizeof(VersStr);
624 if (SANITIZER_IOSSIM) {
625 const char *vers_env = GetEnv("SIMULATOR_RUNTIME_VERSION");
626 if (!vers_env) {
627 Report("ERROR: Running in simulator but SIMULATOR_RUNTIME_VERSION env "
628 "var is not set.\n");
629 Die();
630 }
631 len = internal_strlcpy(vers, vers_env, len);
632 } else {
633 int res =
634 internal_sysctlbyname("kern.osproductversion", vers, &len, nullptr, 0);
635
636 // XNU 17 (macOS 10.13) and below do not provide the sysctl
637 // `kern.osproductversion` entry (res != 0).
638 bool no_os_version = res != 0;
639
640 // For launchd, sanitizer initialization runs before sysctl is setup
641 // (res == 0 && len != strlen(vers), vers is not a valid version). However,
642 // the kernel version `kern.osrelease` is available.
643 bool launchd = (res == 0 && internal_strlen(vers) < 3);
644 if (launchd) CHECK_EQ(internal_getpid(), 1);
645
646 if (no_os_version || launchd) {
647 len = ApproximateOSVersionViaKernelVersion(vers);
648 }
649 }
650 CHECK_LT(len, sizeof(VersStr));
651}
652
653void ParseVersion(const char *vers, u16 *major, u16 *minor) {
654 // Format: <major>.<minor>[.<patch>]\0
655 CHECK_GE(internal_strlen(vers), 3);
656 const char *p = vers;
657 *major = internal_simple_strtoll(p, &p, /*base=*/10);
658 CHECK_EQ(*p, '.');
659 p += 1;
660 *minor = internal_simple_strtoll(p, &p, /*base=*/10);
661}
662
663// Aligned versions example:
664// macOS 10.15 -- iOS 13 -- tvOS 13 -- watchOS 6
665static void MapToMacos(u16 *major, u16 *minor) {
666 if (TARGET_OS_OSX)
667 return;
668
669 if (TARGET_OS_IOS || TARGET_OS_TV)
670 *major += 2;
671 else if (TARGET_OS_WATCH)
672 *major += 9;
673 else
674 UNREACHABLE("unsupported platform");
675
676 if (*major >= 16) { // macOS 11+
677 *major -= 5;
678 } else { // macOS 10.15 and below
679 *minor = *major;
680 *major = 10;
681 }
682}
683
684static MacosVersion GetMacosAlignedVersionInternal() {
685 VersStr vers = {};
686 GetOSVersion(vers);
687
688 u16 major, minor;
689 ParseVersion(vers, &major, &minor);
690 MapToMacos(&major, &minor);
691
692 return MacosVersion(major, minor);
693}
694
695static_assert(sizeof(MacosVersion) == sizeof(atomic_uint32_t::Type),
696 "MacosVersion cache size");
697static atomic_uint32_t cached_macos_version;
698
699MacosVersion GetMacosAlignedVersion() {
700 atomic_uint32_t::Type result =
701 atomic_load(&cached_macos_version, memory_order_acquire);
702 if (!result) {
703 MacosVersion version = GetMacosAlignedVersionInternal();
704 result = *reinterpret_cast<atomic_uint32_t::Type *>(&version);
705 atomic_store(&cached_macos_version, result, memory_order_release);
706 }
707 return *reinterpret_cast<MacosVersion *>(&result);
708}
709
710DarwinKernelVersion GetDarwinKernelVersion() {
711 VersStr vers = {};
712 uptr len = sizeof(VersStr);
713 int res = internal_sysctlbyname("kern.osrelease", vers, &len, nullptr, 0);
714 CHECK_EQ(res, 0);
715 CHECK_LT(len, sizeof(VersStr));
716
717 u16 major, minor;
718 ParseVersion(vers, &major, &minor);
719
720 return DarwinKernelVersion(major, minor);
721}
722
723uptr GetRSS() {
724 struct task_basic_info info;
725 unsigned count = TASK_BASIC_INFO_COUNT;
726 kern_return_t result =
727 task_info(mach_task_self(), TASK_BASIC_INFO, (task_info_t)&info, &count);
728 if (UNLIKELY(result != KERN_SUCCESS)) {
729 Report("Cannot get task info. Error: %d\n", result);
730 Die();
731 }
732 return info.resident_size;
733}
734
735void *internal_start_thread(void *(*func)(void *arg), void *arg) {
736 // Start the thread with signals blocked, otherwise it can steal user signals.
737 __sanitizer_sigset_t set, old;
738 internal_sigfillset(&set);
739 internal_sigprocmask(SIG_SETMASK, &set, &old);
740 pthread_t th;
741 pthread_create(&th, 0, func, arg);
742 internal_sigprocmask(SIG_SETMASK, &old, 0);
743 return th;
744}
745
746void internal_join_thread(void *th) { pthread_join((pthread_t)th, 0); }
747
748#if !SANITIZER_GO
749static Mutex syslog_lock;
750# endif
751
752# if SANITIZER_DRIVERKIT
753# define SANITIZER_OS_LOG os_log
754# else
755# define SANITIZER_OS_LOG os_log_error
756# endif
757
758void WriteOneLineToSyslog(const char *s) {
759#if !SANITIZER_GO
760 syslog_lock.CheckLocked();
761 if (GetMacosAlignedVersion() >= MacosVersion(10, 12)) {
762 SANITIZER_OS_LOG(OS_LOG_DEFAULT, "%{public}s", s);
763 } else {
764#pragma clang diagnostic push
765// as_log is deprecated.
766#pragma clang diagnostic ignored "-Wdeprecated-declarations"
767 asl_log(nullptr, nullptr, ASL_LEVEL_ERR, "%s", s);
768#pragma clang diagnostic pop
769 }
770#endif
771}
772
773// buffer to store crash report application information
774static char crashreporter_info_buff[__sanitizer::kErrorMessageBufferSize] = {};
775static Mutex crashreporter_info_mutex;
776
777extern "C" {
778
779#if HAVE_CRASHREPORTERCLIENT_H
780// Available in CRASHREPORTER_ANNOTATIONS_VERSION 5+
781# ifdef CRASHREPORTER_ANNOTATIONS_INITIALIZER
782CRASHREPORTER_ANNOTATIONS_INITIALIZER()
783# else
784// Support for older CrashRerporter annotiations
785CRASH_REPORTER_CLIENT_HIDDEN
786struct crashreporter_annotations_t gCRAnnotations
787 __attribute__((section("__DATA," CRASHREPORTER_ANNOTATIONS_SECTION))) = {
788 CRASHREPORTER_ANNOTATIONS_VERSION,
789 0,
790 0,
791 0,
792 0,
793 0,
794 0,
795# if CRASHREPORTER_ANNOTATIONS_VERSION > 4
796 0,
797# endif
798};
799# endif
800# else
801// Revert to previous crash reporter API if client header is not available
802static const char *__crashreporter_info__ __attribute__((__used__)) =
803 &crashreporter_info_buff[0];
804asm(".desc ___crashreporter_info__, 0x10");
805#endif // HAVE_CRASHREPORTERCLIENT_H
806
807} // extern "C"
808
809static void CRAppendCrashLogMessage(const char *msg) {
810 Lock l(&crashreporter_info_mutex);
811 internal_strlcat(crashreporter_info_buff, msg,
812 sizeof(crashreporter_info_buff));
813#if HAVE_CRASHREPORTERCLIENT_H
814 (void)CRSetCrashLogMessage(crashreporter_info_buff);
815#endif
816}
817
818void LogMessageOnPrintf(const char *str) {
819 // Log all printf output to CrashLog.
820 if (common_flags()->abort_on_error)
821 CRAppendCrashLogMessage(str);
822}
823
824void LogFullErrorReport(const char *buffer) {
825# if !SANITIZER_GO
826 // When logging with os_log_error this will make it into the crash log.
827 if (internal_strncmp(SanitizerToolName, "AddressSanitizer",
828 sizeof("AddressSanitizer") - 1) == 0)
829 SANITIZER_OS_LOG(OS_LOG_DEFAULT, "Address Sanitizer reported a failure.");
830 else if (internal_strncmp(SanitizerToolName, "UndefinedBehaviorSanitizer",
831 sizeof("UndefinedBehaviorSanitizer") - 1) == 0)
832 SANITIZER_OS_LOG(OS_LOG_DEFAULT,
833 "Undefined Behavior Sanitizer reported a failure.");
834 else if (internal_strncmp(SanitizerToolName, "ThreadSanitizer",
835 sizeof("ThreadSanitizer") - 1) == 0)
836 SANITIZER_OS_LOG(OS_LOG_DEFAULT, "Thread Sanitizer reported a failure.");
837 else
838 SANITIZER_OS_LOG(OS_LOG_DEFAULT, "Sanitizer tool reported a failure.");
839
840 if (common_flags()->log_to_syslog)
841 SANITIZER_OS_LOG(OS_LOG_DEFAULT, "Consult syslog for more information.");
842
843 // Log to syslog.
844 // The logging on OS X may call pthread_create so we need the threading
845 // environment to be fully initialized. Also, this should never be called when
846 // holding the thread registry lock since that may result in a deadlock. If
847 // the reporting thread holds the thread registry mutex, and asl_log waits
848 // for GCD to dispatch a new thread, the process will deadlock, because the
849 // pthread_create wrapper needs to acquire the lock as well.
850 Lock l(&syslog_lock);
851 if (common_flags()->log_to_syslog)
852 WriteToSyslog(buffer);
853
854 // The report is added to CrashLog as part of logging all of Printf output.
855# endif // !SANITIZER_GO
856}
857
858SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
859#if defined(__x86_64__) || defined(__i386__)
860 ucontext_t *ucontext = static_cast<ucontext_t*>(context);
861 return ucontext->uc_mcontext->__es.__err & 2 /*T_PF_WRITE*/ ? Write : Read;
862#elif defined(__arm64__)
863 ucontext_t *ucontext = static_cast<ucontext_t*>(context);
864 return ucontext->uc_mcontext->__es.__esr & 0x40 /*ISS_DA_WNR*/ ? Write : Read;
865#else
866 return Unknown;
867#endif
868}
869
870bool SignalContext::IsTrueFaultingAddress() const {
871 auto si = static_cast<const siginfo_t *>(siginfo);
872 // "Real" SIGSEGV codes (e.g., SEGV_MAPERR, SEGV_MAPERR) are non-zero.
873 return si->si_signo == SIGSEGV && si->si_code != 0;
874}
875
876#if defined(__aarch64__) && defined(arm_thread_state64_get_sp)
877 #define AARCH64_GET_REG(r) \
878 (uptr)ptrauth_strip( \
879 (void *)arm_thread_state64_get_##r(ucontext->uc_mcontext->__ss), 0)
880#else
881 #define AARCH64_GET_REG(r) (uptr)ucontext->uc_mcontext->__ss.__##r
882#endif
883
884static void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
885 ucontext_t *ucontext = (ucontext_t*)context;
886# if defined(__aarch64__)
887 *pc = AARCH64_GET_REG(pc);
888 *bp = AARCH64_GET_REG(fp);
889 *sp = AARCH64_GET_REG(sp);
890# elif defined(__x86_64__)
891 *pc = ucontext->uc_mcontext->__ss.__rip;
892 *bp = ucontext->uc_mcontext->__ss.__rbp;
893 *sp = ucontext->uc_mcontext->__ss.__rsp;
894# elif defined(__arm__)
895 *pc = ucontext->uc_mcontext->__ss.__pc;
896 *bp = ucontext->uc_mcontext->__ss.__r[7];
897 *sp = ucontext->uc_mcontext->__ss.__sp;
898# elif defined(__i386__)
899 *pc = ucontext->uc_mcontext->__ss.__eip;
900 *bp = ucontext->uc_mcontext->__ss.__ebp;
901 *sp = ucontext->uc_mcontext->__ss.__esp;
902# else
903# error "Unknown architecture"
904# endif
905}
906
907void SignalContext::InitPcSpBp() {
908 addr = (uptr)ptrauth_strip((void *)addr, 0);
909 GetPcSpBp(context, &pc, &sp, &bp);
910}
911
912// ASan/TSan use mmap in a way that creates “deallocation gaps” which triggers
913// EXC_GUARD exceptions on macOS 10.15+ (XNU 19.0+).
914static void DisableMmapExcGuardExceptions() {
915 using task_exc_guard_behavior_t = uint32_t;
916 using task_set_exc_guard_behavior_t =
917 kern_return_t(task_t task, task_exc_guard_behavior_t behavior);
918 auto *set_behavior = (task_set_exc_guard_behavior_t *)dlsym(
919 RTLD_DEFAULT, "task_set_exc_guard_behavior");
920 if (set_behavior == nullptr) return;
921 const task_exc_guard_behavior_t task_exc_guard_none = 0;
922 kern_return_t res = set_behavior(mach_task_self(), task_exc_guard_none);
923 if (res != KERN_SUCCESS) {
924 Report(
925 "WARN: task_set_exc_guard_behavior returned %d (%s), "
926 "mmap may fail unexpectedly.\n",
927 res, mach_error_string(res));
928 if (res == KERN_DENIED)
929 Report(
930 "HINT: Check that task_set_exc_guard_behavior is allowed by "
931 "sandbox.\n");
932 }
933}
934
935static void VerifyInterceptorsWorking();
936static void StripEnv();
937
938void InitializePlatformEarly() {
939 // Only use xnu_fast_mmap when on x86_64 and the kernel supports it.
940 use_xnu_fast_mmap =
941#if defined(__x86_64__)
942 GetDarwinKernelVersion() >= DarwinKernelVersion(17, 5);
943#else
944 false;
945#endif
946 if (GetDarwinKernelVersion() >= DarwinKernelVersion(19, 0))
947 DisableMmapExcGuardExceptions();
948
949# if !SANITIZER_GO
950 MonotonicNanoTime(); // Call to initialize mach_timebase_info
951 VerifyInterceptorsWorking();
952 StripEnv();
953# endif
954}
955
956#if !SANITIZER_GO
957static const char kDyldInsertLibraries[] = "DYLD_INSERT_LIBRARIES";
958LowLevelAllocator allocator_for_env;
959
960static bool ShouldCheckInterceptors() {
961 // Restrict "interceptors working?" check
962 const char *sanitizer_names[] = {"AddressSanitizer", "ThreadSanitizer",
963 "RealtimeSanitizer"};
964 size_t count = sizeof(sanitizer_names) / sizeof(sanitizer_names[0]);
965 for (size_t i = 0; i < count; i++) {
966 if (internal_strcmp(sanitizer_names[i], SanitizerToolName) == 0)
967 return true;
968 }
969 return false;
970}
971
972static void VerifyInterceptorsWorking() {
973 if (!common_flags()->verify_interceptors || !ShouldCheckInterceptors())
974 return;
975
976 // Verify that interceptors really work. We'll use dlsym to locate
977 // "puts", if interceptors are working, it should really point to
978 // "wrap_puts" within our own dylib.
979 Dl_info info_puts, info_runtime;
980 RAW_CHECK(dladdr(dlsym(RTLD_DEFAULT, "puts"), &info_puts));
981 RAW_CHECK(dladdr((void *)&VerifyInterceptorsWorking, &info_runtime));
982 if (internal_strcmp(info_puts.dli_fname, info_runtime.dli_fname) != 0) {
983 Report(
984 "ERROR: Interceptors are not working. This may be because %s is "
985 "loaded too late (e.g. via dlopen). Please launch the executable "
986 "with:\n%s=%s\n",
987 SanitizerToolName, kDyldInsertLibraries, info_runtime.dli_fname);
988 RAW_CHECK("interceptors not installed" && 0);
989 }
990}
991
992// Change the value of the env var |name|, leaking the original value.
993// If |name_value| is NULL, the variable is deleted from the environment,
994// otherwise the corresponding "NAME=value" string is replaced with
995// |name_value|.
996static void LeakyResetEnv(const char *name, const char *name_value) {
997 char **env = GetEnviron();
998 uptr name_len = internal_strlen(name);
999 while (*env != 0) {
1000 uptr len = internal_strlen(*env);
1001 if (len > name_len) {
1002 const char *p = *env;
1003 if (!internal_memcmp(p, name, name_len) && p[name_len] == '=') {
1004 // Match.
1005 if (name_value) {
1006 // Replace the old value with the new one.
1007 *env = const_cast<char*>(name_value);
1008 } else {
1009 // Shift the subsequent pointers back.
1010 char **del = env;
1011 do {
1012 del[0] = del[1];
1013 } while (*del++);
1014 }
1015 }
1016 }
1017 env++;
1018 }
1019}
1020
1021static void StripEnv() {
1022 if (!common_flags()->strip_env)
1023 return;
1024
1025 char *dyld_insert_libraries =
1026 const_cast<char *>(GetEnv(kDyldInsertLibraries));
1027 if (!dyld_insert_libraries)
1028 return;
1029
1030 Dl_info info;
1031 RAW_CHECK(dladdr((void *)&StripEnv, &info));
1032 const char *dylib_name = StripModuleName(info.dli_fname);
1033 bool lib_is_in_env = internal_strstr(dyld_insert_libraries, dylib_name);
1034 if (!lib_is_in_env)
1035 return;
1036
1037 // DYLD_INSERT_LIBRARIES is set and contains the runtime library. Let's remove
1038 // the dylib from the environment variable, because interceptors are installed
1039 // and we don't want our children to inherit the variable.
1040
1041 uptr old_env_len = internal_strlen(dyld_insert_libraries);
1042 uptr dylib_name_len = internal_strlen(dylib_name);
1043 uptr env_name_len = internal_strlen(kDyldInsertLibraries);
1044 // Allocate memory to hold the previous env var name, its value, the '='
1045 // sign and the '\0' char.
1046 char *new_env = (char*)allocator_for_env.Allocate(
1047 old_env_len + 2 + env_name_len);
1048 RAW_CHECK(new_env);
1049 internal_memset(new_env, '\0', old_env_len + 2 + env_name_len);
1050 internal_strncpy(new_env, kDyldInsertLibraries, env_name_len);
1051 new_env[env_name_len] = '=';
1052 char *new_env_pos = new_env + env_name_len + 1;
1053
1054 // Iterate over colon-separated pieces of |dyld_insert_libraries|.
1055 char *piece_start = dyld_insert_libraries;
1056 char *piece_end = NULL;
1057 char *old_env_end = dyld_insert_libraries + old_env_len;
1058 do {
1059 if (piece_start[0] == ':') piece_start++;
1060 piece_end = internal_strchr(piece_start, ':');
1061 if (!piece_end) piece_end = dyld_insert_libraries + old_env_len;
1062 if ((uptr)(piece_start - dyld_insert_libraries) > old_env_len) break;
1063 uptr piece_len = piece_end - piece_start;
1064
1065 char *filename_start =
1066 (char *)internal_memrchr(piece_start, '/', piece_len);
1067 uptr filename_len = piece_len;
1068 if (filename_start) {
1069 filename_start += 1;
1070 filename_len = piece_len - (filename_start - piece_start);
1071 } else {
1072 filename_start = piece_start;
1073 }
1074
1075 // If the current piece isn't the runtime library name,
1076 // append it to new_env.
1077 if ((dylib_name_len != filename_len) ||
1078 (internal_memcmp(filename_start, dylib_name, dylib_name_len) != 0)) {
1079 if (new_env_pos != new_env + env_name_len + 1) {
1080 new_env_pos[0] = ':';
1081 new_env_pos++;
1082 }
1083 internal_strncpy(new_env_pos, piece_start, piece_len);
1084 new_env_pos += piece_len;
1085 }
1086 // Move on to the next piece.
1087 piece_start = piece_end;
1088 } while (piece_start < old_env_end);
1089
1090 // Can't use setenv() here, because it requires the allocator to be
1091 // initialized.
1092 // FIXME: instead of filtering DYLD_INSERT_LIBRARIES here, do it in
1093 // a separate function called after InitializeAllocator().
1094 if (new_env_pos == new_env + env_name_len + 1) new_env = NULL;
1095 LeakyResetEnv(kDyldInsertLibraries, new_env);
1096}
1097#endif // SANITIZER_GO
1098
1099// Prints out a consolidated memory map: contiguous regions
1100// are merged together.
1101static void PrintVmmap() {
1102 const mach_vm_address_t max_vm_address = GetMaxVirtualAddress() + 1;
1103 mach_vm_address_t address = GAP_SEARCH_START_ADDRESS;
1104 kern_return_t kr = KERN_SUCCESS;
1105
1106 Report("Memory map:\n");
1107 mach_vm_address_t last = 0;
1108 mach_vm_address_t lastsz = 0;
1109
1110 while (1) {
1111 mach_vm_size_t vmsize = 0;
1112 natural_t depth = 0;
1113 vm_region_submap_short_info_data_64_t vminfo;
1114 mach_msg_type_number_t count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
1115 kr = mach_vm_region_recurse(mach_task_self(), &address, &vmsize, &depth,
1116 (vm_region_info_t)&vminfo, &count);
1117
1118 if (kr == KERN_DENIED) {
1119 Report(
1120 "ERROR: mach_vm_region_recurse got KERN_DENIED when printing memory "
1121 "map.\n");
1122 Report(
1123 "HINT: Check whether mach_vm_region_recurse is allowed by "
1124 "sandbox.\n");
1125 }
1126
1127 if (kr == KERN_SUCCESS && address < max_vm_address) {
1128 if (last + lastsz == address) {
1129 // This region is contiguous with the last; merge together.
1130 lastsz += vmsize;
1131 } else {
1132 if (lastsz)
1133 Printf("|| `[%p, %p]` || size=0x%016" PRIx64 " ||\n", (void*)last,
1134 (void*)(last + lastsz), lastsz);
1135
1136 last = address;
1137 lastsz = vmsize;
1138 }
1139 address += vmsize;
1140 } else {
1141 // We've reached the end of the memory map. Print the last remaining
1142 // region, if there is one.
1143 if (lastsz)
1144 Printf("|| `[%p, %p]` || size=0x%016" PRIx64 " ||\n", (void*)last,
1145 (void*)(last + lastsz), lastsz);
1146
1147 break;
1148 }
1149 }
1150}
1151
1152static void ReportShadowAllocFail(uptr shadow_size_bytes, uptr alignment) {
1153 Report(
1154 "FATAL: Failed to allocate shadow memory. Tried to allocate %p bytes "
1155 "(alignment=%p).\n",
1156 (void*)shadow_size_bytes, (void*)alignment);
1157 PrintVmmap();
1158}
1159
1160char **GetArgv() {
1161 return *_NSGetArgv();
1162}
1163
1164#if SANITIZER_IOS && !SANITIZER_IOSSIM
1165// The task_vm_info struct is normally provided by the macOS SDK, but we need
1166// fields only available in 10.12+. Declare the struct manually to be able to
1167// build against older SDKs.
1168struct __sanitizer_task_vm_info {
1169 mach_vm_size_t virtual_size;
1170 integer_t region_count;
1171 integer_t page_size;
1172 mach_vm_size_t resident_size;
1173 mach_vm_size_t resident_size_peak;
1174 mach_vm_size_t device;
1175 mach_vm_size_t device_peak;
1176 mach_vm_size_t internal;
1177 mach_vm_size_t internal_peak;
1178 mach_vm_size_t external;
1179 mach_vm_size_t external_peak;
1180 mach_vm_size_t reusable;
1181 mach_vm_size_t reusable_peak;
1182 mach_vm_size_t purgeable_volatile_pmap;
1183 mach_vm_size_t purgeable_volatile_resident;
1184 mach_vm_size_t purgeable_volatile_virtual;
1185 mach_vm_size_t compressed;
1186 mach_vm_size_t compressed_peak;
1187 mach_vm_size_t compressed_lifetime;
1188 mach_vm_size_t phys_footprint;
1189 mach_vm_address_t min_address;
1190 mach_vm_address_t max_address;
1191};
1192#define __SANITIZER_TASK_VM_INFO_COUNT ((mach_msg_type_number_t) \
1193 (sizeof(__sanitizer_task_vm_info) / sizeof(natural_t)))
1194
1195static uptr GetTaskInfoMaxAddress() {
1196 __sanitizer_task_vm_info vm_info = {} /* zero initialize */;
1197 mach_msg_type_number_t count = __SANITIZER_TASK_VM_INFO_COUNT;
1198 int err = task_info(mach_task_self(), TASK_VM_INFO, (int *)&vm_info, &count);
1199 return err ? 0 : vm_info.max_address;
1200}
1201
1202uptr GetMaxUserVirtualAddress() {
1203 static uptr max_vm = GetTaskInfoMaxAddress();
1204 if (max_vm != 0) {
1205 const uptr ret_value = max_vm - 1;
1206 CHECK_LE(ret_value, SANITIZER_MMAP_RANGE_SIZE);
1207 return ret_value;
1208 }
1209
1210 // xnu cannot provide vm address limit
1211# if SANITIZER_WORDSIZE == 32
1212 constexpr uptr fallback_max_vm = 0xffe00000 - 1;
1213# else
1214 constexpr uptr fallback_max_vm = 0x200000000 - 1;
1215# endif
1216 static_assert(fallback_max_vm <= SANITIZER_MMAP_RANGE_SIZE,
1217 "Max virtual address must be less than mmap range size.");
1218 return fallback_max_vm;
1219}
1220
1221#else // !SANITIZER_IOS
1222
1223uptr GetMaxUserVirtualAddress() {
1224# if SANITIZER_WORDSIZE == 64
1225 constexpr uptr max_vm = (1ULL << 47) - 1; // 0x00007fffffffffffUL;
1226# else // SANITIZER_WORDSIZE == 32
1227 static_assert(SANITIZER_WORDSIZE == 32, "Wrong wordsize");
1228 constexpr uptr max_vm = (1ULL << 32) - 1; // 0xffffffff;
1229# endif
1230 static_assert(max_vm <= SANITIZER_MMAP_RANGE_SIZE,
1231 "Max virtual address must be less than mmap range size.");
1232 return max_vm;
1233}
1234#endif
1235
1236uptr GetMaxVirtualAddress() {
1237 return GetMaxUserVirtualAddress();
1238}
1239
1240uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
1241 uptr min_shadow_base_alignment, uptr &high_mem_end,
1242 uptr granularity) {
1243 const uptr alignment =
1244 Max<uptr>(granularity << shadow_scale, 1ULL << min_shadow_base_alignment);
1245 const uptr left_padding =
1246 Max<uptr>(granularity, 1ULL << min_shadow_base_alignment);
1247
1248 uptr space_size = shadow_size_bytes;
1249
1250 uptr largest_gap_found = 0;
1251 uptr max_occupied_addr = 0;
1252
1253 VReport(2, "FindDynamicShadowStart, space_size = %p\n", (void *)space_size);
1254 uptr shadow_start =
1255 FindAvailableMemoryRange(space_size, alignment, left_padding,
1256 &largest_gap_found, &max_occupied_addr);
1257 // If the shadow doesn't fit, restrict the address space to make it fit.
1258 if (shadow_start == 0) {
1259 VReport(
1260 2,
1261 "Shadow doesn't fit, largest_gap_found = %p, max_occupied_addr = %p\n",
1262 (void *)largest_gap_found, (void *)max_occupied_addr);
1263 uptr new_max_vm = RoundDownTo(largest_gap_found << shadow_scale, alignment);
1264 if (new_max_vm < max_occupied_addr) {
1265 Report("Unable to find a memory range for dynamic shadow.\n");
1266 Report(
1267 "\tspace_size = %p\n\tlargest_gap_found = %p\n\tmax_occupied_addr "
1268 "= %p\n\tnew_max_vm = %p\n",
1269 (void*)space_size, (void*)largest_gap_found, (void*)max_occupied_addr,
1270 (void*)new_max_vm);
1271 ReportShadowAllocFail(shadow_size_bytes, alignment);
1272 CHECK(0 && "cannot place shadow");
1273 }
1274 RestrictMemoryToMaxAddress(new_max_vm);
1275 high_mem_end = new_max_vm - 1;
1276 space_size = (high_mem_end >> shadow_scale);
1277 VReport(2, "FindDynamicShadowStart, space_size = %p\n", (void *)space_size);
1278 shadow_start = FindAvailableMemoryRange(space_size, alignment, left_padding,
1279 nullptr, nullptr);
1280 if (shadow_start == 0) {
1281 Report("Unable to find a memory range after restricting VM.\n");
1282 ReportShadowAllocFail(shadow_size_bytes, alignment);
1283 CHECK(0 && "cannot place shadow after restricting vm");
1284 }
1285 }
1286 CHECK_NE((uptr)0, shadow_start);
1287 CHECK(IsAligned(shadow_start, alignment));
1288 return shadow_start;
1289}
1290
1291uptr MapDynamicShadowAndAliases(uptr shadow_size, uptr alias_size,
1292 uptr num_aliases, uptr ring_buffer_size) {
1293 CHECK(false && "HWASan aliasing is unimplemented on Mac");
1294 return 0;
1295}
1296
1297uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
1298 uptr* largest_gap_found,
1299 uptr* max_occupied_addr) {
1300 const mach_vm_address_t max_vm_address = GetMaxVirtualAddress() + 1;
1301 mach_vm_address_t address = GAP_SEARCH_START_ADDRESS;
1302 mach_vm_address_t free_begin = GAP_SEARCH_START_ADDRESS;
1303 kern_return_t kr = KERN_SUCCESS;
1304 if (largest_gap_found) *largest_gap_found = 0;
1305 if (max_occupied_addr) *max_occupied_addr = 0;
1306 while (kr == KERN_SUCCESS) {
1307 mach_vm_size_t vmsize = 0;
1308 natural_t depth = 0;
1309 vm_region_submap_short_info_data_64_t vminfo;
1310 mach_msg_type_number_t count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
1311 kr = mach_vm_region_recurse(mach_task_self(), &address, &vmsize, &depth,
1312 (vm_region_info_t)&vminfo, &count);
1313
1314 if (kr == KERN_SUCCESS) {
1315 // There are cases where going beyond the processes' max vm does
1316 // not return KERN_INVALID_ADDRESS so we check for going beyond that
1317 // max address as well.
1318 if (address > max_vm_address) {
1319 address = max_vm_address;
1320 kr = -1; // break after this iteration.
1321 }
1322
1323 if (max_occupied_addr)
1324 *max_occupied_addr = address + vmsize;
1325 } else if (kr == KERN_INVALID_ADDRESS) {
1326 // No more regions beyond "address", consider the gap at the end of VM.
1327 address = max_vm_address;
1328
1329 // We will break after this iteration anyway since kr != KERN_SUCCESS
1330 } else if (kr == KERN_DENIED) {
1331 Report("ERROR: Unable to find a memory range for dynamic shadow.\n");
1332 Report("HINT: Ensure mach_vm_region_recurse is allowed under sandbox.\n");
1333 Die();
1334 } else {
1335 Report(
1336 "WARNING: mach_vm_region_recurse returned unexpected code %d (%s)\n",
1337 kr, mach_error_string(kr));
1338 DCHECK(false && "mach_vm_region_recurse returned unexpected code");
1339 break; // address is not valid unless KERN_SUCCESS, therefore we must not
1340 // use it.
1341 }
1342
1343 if (free_begin != address) {
1344 // We found a free region [free_begin..address-1].
1345 uptr gap_start = RoundUpTo((uptr)free_begin + left_padding, alignment);
1346 uptr gap_end = RoundDownTo((uptr)Min(address, max_vm_address), alignment);
1347 uptr gap_size = gap_end > gap_start ? gap_end - gap_start : 0;
1348 if (size < gap_size) {
1349 return gap_start;
1350 }
1351
1352 if (largest_gap_found && *largest_gap_found < gap_size) {
1353 *largest_gap_found = gap_size;
1354 }
1355 }
1356 // Move to the next region.
1357 address += vmsize;
1358 free_begin = address;
1359 }
1360
1361 // We looked at all free regions and could not find one large enough.
1362 return 0;
1363}
1364
1365// This function (when used during initialization when there is
1366// only a single thread), can be used to verify that a range
1367// of memory hasn't already been mapped, and won't be mapped
1368// later in the shared cache.
1369//
1370// If the syscall mach_vm_region_recurse fails (due to sandbox),
1371// we assume that the memory is not mapped so that execution can continue.
1372//
1373// NOTE: range_end is inclusive
1374//
1375// WARNING: This function must NOT allocate memory, since it is
1376// used in InitializeShadowMemory between where we search for
1377// space for shadow and where we actually allocate it.
1378bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
1379 mach_vm_size_t vmsize = 0;
1380 natural_t depth = 0;
1381 vm_region_submap_short_info_data_64_t vminfo;
1382 mach_msg_type_number_t count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
1383 mach_vm_address_t address = range_start;
1384
1385 // First, check if the range is already mapped.
1386 kern_return_t kr =
1387 mach_vm_region_recurse(mach_task_self(), &address, &vmsize, &depth,
1388 (vm_region_info_t)&vminfo, &count);
1389
1390 if (kr == KERN_DENIED) {
1391 Report(
1392 "WARN: mach_vm_region_recurse returned KERN_DENIED when checking "
1393 "whether an address is mapped.\n");
1394 Report("HINT: Is mach_vm_region_recurse allowed by sandbox?\n");
1395 }
1396
1397 if (kr == KERN_SUCCESS && !IntervalsAreSeparate(address, address + vmsize - 1,
1398 range_start, range_end)) {
1399 // Overlaps with already-mapped memory
1400 return false;
1401 }
1402
1403 size_t cacheLength;
1404 uptr cacheStart = (uptr)_dyld_get_shared_cache_range(&cacheLength);
1405
1406 if (cacheStart &&
1407 !IntervalsAreSeparate(cacheStart, cacheStart + cacheLength - 1,
1408 range_start, range_end)) {
1409 // Overlaps with shared cache region
1410 return false;
1411 }
1412
1413 // We believe this address is available.
1414 return true;
1415}
1416
1417// FIXME implement on this platform.
1418void GetMemoryProfile(fill_profile_f cb, uptr *stats) {}
1419
1420void SignalContext::DumpAllRegisters(void *context) {
1421 Report("Register values:\n");
1422
1423 ucontext_t *ucontext = (ucontext_t*)context;
1424# define DUMPREG64(r) \
1425 Printf("%s = 0x%016llx ", #r, ucontext->uc_mcontext->__ss.__ ## r);
1426# define DUMPREGA64(r) \
1427 Printf(" %s = 0x%016lx ", #r, AARCH64_GET_REG(r));
1428# define DUMPREG32(r) \
1429 Printf("%s = 0x%08x ", #r, ucontext->uc_mcontext->__ss.__ ## r);
1430# define DUMPREG_(r) Printf(" "); DUMPREG(r);
1431# define DUMPREG__(r) Printf(" "); DUMPREG(r);
1432# define DUMPREG___(r) Printf(" "); DUMPREG(r);
1433
1434# if defined(__x86_64__)
1435# define DUMPREG(r) DUMPREG64(r)
1436 DUMPREG(rax); DUMPREG(rbx); DUMPREG(rcx); DUMPREG(rdx); Printf("\n");
1437 DUMPREG(rdi); DUMPREG(rsi); DUMPREG(rbp); DUMPREG(rsp); Printf("\n");
1438 DUMPREG_(r8); DUMPREG_(r9); DUMPREG(r10); DUMPREG(r11); Printf("\n");
1439 DUMPREG(r12); DUMPREG(r13); DUMPREG(r14); DUMPREG(r15); Printf("\n");
1440# elif defined(__i386__)
1441# define DUMPREG(r) DUMPREG32(r)
1442 DUMPREG(eax); DUMPREG(ebx); DUMPREG(ecx); DUMPREG(edx); Printf("\n");
1443 DUMPREG(edi); DUMPREG(esi); DUMPREG(ebp); DUMPREG(esp); Printf("\n");
1444# elif defined(__aarch64__)
1445# define DUMPREG(r) DUMPREG64(r)
1446 DUMPREG_(x[0]); DUMPREG_(x[1]); DUMPREG_(x[2]); DUMPREG_(x[3]); Printf("\n");
1447 DUMPREG_(x[4]); DUMPREG_(x[5]); DUMPREG_(x[6]); DUMPREG_(x[7]); Printf("\n");
1448 DUMPREG_(x[8]); DUMPREG_(x[9]); DUMPREG(x[10]); DUMPREG(x[11]); Printf("\n");
1449 DUMPREG(x[12]); DUMPREG(x[13]); DUMPREG(x[14]); DUMPREG(x[15]); Printf("\n");
1450 DUMPREG(x[16]); DUMPREG(x[17]); DUMPREG(x[18]); DUMPREG(x[19]); Printf("\n");
1451 DUMPREG(x[20]); DUMPREG(x[21]); DUMPREG(x[22]); DUMPREG(x[23]); Printf("\n");
1452 DUMPREG(x[24]); DUMPREG(x[25]); DUMPREG(x[26]); DUMPREG(x[27]); Printf("\n");
1453 DUMPREG(x[28]); DUMPREGA64(fp); DUMPREGA64(lr); DUMPREGA64(sp); Printf("\n");
1454# elif defined(__arm__)
1455# define DUMPREG(r) DUMPREG32(r)
1456 DUMPREG_(r[0]); DUMPREG_(r[1]); DUMPREG_(r[2]); DUMPREG_(r[3]); Printf("\n");
1457 DUMPREG_(r[4]); DUMPREG_(r[5]); DUMPREG_(r[6]); DUMPREG_(r[7]); Printf("\n");
1458 DUMPREG_(r[8]); DUMPREG_(r[9]); DUMPREG(r[10]); DUMPREG(r[11]); Printf("\n");
1459 DUMPREG(r[12]); DUMPREG___(sp); DUMPREG___(lr); DUMPREG___(pc); Printf("\n");
1460# else
1461# error "Unknown architecture"
1462# endif
1463
1464# undef DUMPREG64
1465# undef DUMPREG32
1466# undef DUMPREG_
1467# undef DUMPREG__
1468# undef DUMPREG___
1469# undef DUMPREG
1470}
1471
1472static inline bool CompareBaseAddress(const LoadedModule &a,
1473 const LoadedModule &b) {
1474 return a.base_address() < b.base_address();
1475}
1476
1477void FormatUUID(char *out, uptr size, const u8 *uuid) {
1478 internal_snprintf(out, size,
1479 "<%02X%02X%02X%02X-%02X%02X-%02X%02X-%02X%02X-"
1480 "%02X%02X%02X%02X%02X%02X>",
1481 uuid[0], uuid[1], uuid[2], uuid[3], uuid[4], uuid[5],
1482 uuid[6], uuid[7], uuid[8], uuid[9], uuid[10], uuid[11],
1483 uuid[12], uuid[13], uuid[14], uuid[15]);
1484}
1485
1486void DumpProcessMap() {
1487 Printf("Process module map:\n");
1488 MemoryMappingLayout memory_mapping(false);
1489 InternalMmapVector<LoadedModule> modules;
1490 modules.reserve(128);
1491 memory_mapping.DumpListOfModules(&modules);
1492 Sort(modules.data(), modules.size(), CompareBaseAddress);
1493 for (uptr i = 0; i < modules.size(); ++i) {
1494 char uuid_str[128];
1495 FormatUUID(uuid_str, sizeof(uuid_str), modules[i].uuid());
1496 Printf("%p-%p %s (%s) %s\n", (void *)modules[i].base_address(),
1497 (void *)modules[i].max_address(), modules[i].full_name(),
1498 ModuleArchToString(modules[i].arch()), uuid_str);
1499 }
1500 Printf("End of module map.\n");
1501}
1502
1503void CheckNoDeepBind(const char *filename, int flag) {
1504 // Do nothing.
1505}
1506
1507bool GetRandom(void *buffer, uptr length, bool blocking) {
1508 if (!buffer || !length || length > 256)
1509 return false;
1510 // arc4random never fails.
1511 REAL(arc4random_buf)(buffer, length);
1512 return true;
1513}
1514
1515u32 GetNumberOfCPUs() {
1516 return (u32)sysconf(_SC_NPROCESSORS_ONLN);
1517}
1518
1519void InitializePlatformCommonFlags(CommonFlags *cf) {}
1520
1521// Pthread introspection hook
1522//
1523// * GCD worker threads are created without a call to pthread_create(), but we
1524// still need to register these threads (with ThreadCreate/Start()).
1525// * We use the "pthread introspection hook" below to observe the creation of
1526// such threads.
1527// * GCD worker threads don't have parent threads and the CREATE event is
1528// delivered in the context of the thread itself. CREATE events for regular
1529// threads, are delivered on the parent. We use this to tell apart which
1530// threads are GCD workers with `thread == pthread_self()`.
1531//
1532static pthread_introspection_hook_t prev_pthread_introspection_hook;
1533static ThreadEventCallbacks thread_event_callbacks;
1534
1535static void sanitizer_pthread_introspection_hook(unsigned int event,
1536 pthread_t thread, void *addr,
1537 size_t size) {
1538 // create -> start -> terminate -> destroy
1539 // * create/destroy are usually (not guaranteed) delivered on the parent and
1540 // track resource allocation/reclamation
1541 // * start/terminate are guaranteed to be delivered in the context of the
1542 // thread and give hooks into "just after (before) thread starts (stops)
1543 // executing"
1544 DCHECK(event >= PTHREAD_INTROSPECTION_THREAD_CREATE &&
1545 event <= PTHREAD_INTROSPECTION_THREAD_DESTROY);
1546
1547 if (event == PTHREAD_INTROSPECTION_THREAD_CREATE) {
1548 bool gcd_worker = (thread == pthread_self());
1549 if (thread_event_callbacks.create)
1550 thread_event_callbacks.create((uptr)thread, gcd_worker);
1551 } else if (event == PTHREAD_INTROSPECTION_THREAD_START) {
1552 CHECK_EQ(thread, pthread_self());
1553 if (thread_event_callbacks.start)
1554 thread_event_callbacks.start((uptr)thread);
1555 }
1556
1557 if (prev_pthread_introspection_hook)
1558 prev_pthread_introspection_hook(event, thread, addr, size);
1559
1560 if (event == PTHREAD_INTROSPECTION_THREAD_TERMINATE) {
1561 CHECK_EQ(thread, pthread_self());
1562 if (thread_event_callbacks.terminate)
1563 thread_event_callbacks.terminate((uptr)thread);
1564 } else if (event == PTHREAD_INTROSPECTION_THREAD_DESTROY) {
1565 if (thread_event_callbacks.destroy)
1566 thread_event_callbacks.destroy((uptr)thread);
1567 }
1568}
1569
1570void InstallPthreadIntrospectionHook(const ThreadEventCallbacks &callbacks) {
1571 thread_event_callbacks = callbacks;
1572 prev_pthread_introspection_hook =
1573 pthread_introspection_hook_install(&sanitizer_pthread_introspection_hook);
1574}
1575
1576} // namespace __sanitizer
1577
1578#endif // SANITIZER_APPLE
1579