| 1 | //===-- tsan_platform_posix.cpp -------------------------------------------===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | // This file is a part of ThreadSanitizer (TSan), a race detector. |
| 10 | // |
| 11 | // POSIX-specific code. |
| 12 | //===----------------------------------------------------------------------===// |
| 13 | |
| 14 | #include "sanitizer_common/sanitizer_platform.h" |
| 15 | #if SANITIZER_POSIX |
| 16 | |
| 17 | # include <dlfcn.h> |
| 18 | |
| 19 | # include "sanitizer_common/sanitizer_common.h" |
| 20 | # include "sanitizer_common/sanitizer_errno.h" |
| 21 | # include "sanitizer_common/sanitizer_libc.h" |
| 22 | # include "sanitizer_common/sanitizer_procmaps.h" |
| 23 | # include "tsan_platform.h" |
| 24 | # include "tsan_rtl.h" |
| 25 | |
| 26 | namespace __tsan { |
| 27 | |
| 28 | static const char kShadowMemoryMappingWarning[] = |
| 29 | "FATAL: %s can not madvise shadow region [%zx, %zx] with %s (errno: %d)\n" ; |
| 30 | static const char kShadowMemoryMappingHint[] = |
| 31 | "HINT: if %s is not supported in your environment, you may set " |
| 32 | "TSAN_OPTIONS=%s=0\n" ; |
| 33 | |
| 34 | # if !SANITIZER_GO |
| 35 | void DontDumpShadow(uptr addr, uptr size) { |
| 36 | if (common_flags()->use_madv_dontdump) |
| 37 | if (!DontDumpShadowMemory(addr, length: size)) { |
| 38 | Printf(format: kShadowMemoryMappingWarning, SanitizerToolName, addr, addr + size, |
| 39 | "MADV_DONTDUMP" , errno); |
| 40 | Printf(format: kShadowMemoryMappingHint, "MADV_DONTDUMP" , "use_madv_dontdump" ); |
| 41 | Die(); |
| 42 | } |
| 43 | } |
| 44 | |
| 45 | void InitializeShadowMemory() { |
| 46 | // Map memory shadow. |
| 47 | if (!MmapFixedSuperNoReserve(fixed_addr: ShadowBeg(), size: ShadowEnd() - ShadowBeg(), |
| 48 | name: "shadow" )) { |
| 49 | Printf(format: "FATAL: ThreadSanitizer can not mmap the shadow memory\n" ); |
| 50 | Printf(format: "FATAL: Make sure to compile with -fPIE and to link with -pie.\n" ); |
| 51 | Die(); |
| 52 | } |
| 53 | // This memory range is used for thread stacks and large user mmaps. |
| 54 | // Frequently a thread uses only a small part of stack and similarly |
| 55 | // a program uses a small part of large mmap. On some programs |
| 56 | // we see 20% memory usage reduction without huge pages for this range. |
| 57 | DontDumpShadow(addr: ShadowBeg(), size: ShadowEnd() - ShadowBeg()); |
| 58 | DPrintf("memory shadow: %zx-%zx (%zuGB)\n" , |
| 59 | ShadowBeg(), ShadowEnd(), |
| 60 | (ShadowEnd() - ShadowBeg()) >> 30); |
| 61 | |
| 62 | // Map meta shadow. |
| 63 | const uptr meta = MetaShadowBeg(); |
| 64 | const uptr meta_size = MetaShadowEnd() - meta; |
| 65 | if (!MmapFixedSuperNoReserve(fixed_addr: meta, size: meta_size, name: "meta shadow" )) { |
| 66 | Printf(format: "FATAL: ThreadSanitizer can not mmap the shadow memory\n" ); |
| 67 | Printf(format: "FATAL: Make sure to compile with -fPIE and to link with -pie.\n" ); |
| 68 | Die(); |
| 69 | } |
| 70 | DontDumpShadow(addr: meta, size: meta_size); |
| 71 | DPrintf("meta shadow: %zx-%zx (%zuGB)\n" , |
| 72 | meta, meta + meta_size, meta_size >> 30); |
| 73 | |
| 74 | InitializeShadowMemoryPlatform(); |
| 75 | |
| 76 | on_initialize = reinterpret_cast<void (*)(void)>( |
| 77 | dlsym(RTLD_DEFAULT, name: "__tsan_on_initialize" )); |
| 78 | on_finalize = |
| 79 | reinterpret_cast<int (*)(int)>(dlsym(RTLD_DEFAULT, name: "__tsan_on_finalize" )); |
| 80 | } |
| 81 | |
| 82 | static bool TryProtectRange(uptr beg, uptr end) { |
| 83 | CHECK_LE(beg, end); |
| 84 | if (beg == end) |
| 85 | return true; |
| 86 | return beg == (uptr)MmapFixedNoAccess(fixed_addr: beg, size: end - beg); |
| 87 | } |
| 88 | |
| 89 | static void ProtectRange(uptr beg, uptr end) { |
| 90 | if (!TryProtectRange(beg, end)) { |
| 91 | Printf(format: "FATAL: ThreadSanitizer can not protect [%zx,%zx]\n" , beg, end); |
| 92 | Printf(format: "FATAL: Make sure you are not using unlimited stack\n" ); |
| 93 | Die(); |
| 94 | } |
| 95 | } |
| 96 | |
| 97 | // CheckAndProtect will check if the memory layout is compatible with TSan. |
| 98 | // Optionally (if 'protect' is true), it will set the memory regions between |
| 99 | // app memory to be inaccessible. |
| 100 | // 'ignore_heap' means it will not consider heap memory allocations to be a |
| 101 | // conflict. Set this based on whether we are calling CheckAndProtect before |
| 102 | // or after the allocator has initialized the heap. |
| 103 | bool CheckAndProtect(bool protect, bool ignore_heap, bool print_warnings) { |
| 104 | // Ensure that the binary is indeed compiled with -pie. |
| 105 | MemoryMappingLayout proc_maps(true); |
| 106 | MemoryMappedSegment segment; |
| 107 | while (proc_maps.Next(segment: &segment)) { |
| 108 | if (segment.start >= HeapMemBeg() && segment.end <= HeapEnd()) { |
| 109 | if (ignore_heap) { |
| 110 | continue; |
| 111 | } else { |
| 112 | return false; |
| 113 | } |
| 114 | } |
| 115 | |
| 116 | // Note: IsAppMem includes if it is heap memory, hence we must |
| 117 | // put this check after the heap bounds check. |
| 118 | if (IsAppMem(mem: segment.start) && IsAppMem(mem: segment.end - 1)) |
| 119 | continue; |
| 120 | |
| 121 | // Guard page after the heap end |
| 122 | if (segment.start >= HeapMemEnd() && segment.start < HeapEnd()) continue; |
| 123 | |
| 124 | if (segment.protection == 0) // Zero page or mprotected. |
| 125 | continue; |
| 126 | |
| 127 | if (segment.start >= VdsoBeg()) // vdso |
| 128 | break; |
| 129 | |
| 130 | // Debug output can break tests. Suppress this message in most cases. |
| 131 | if (print_warnings) |
| 132 | Printf( |
| 133 | format: "WARNING: ThreadSanitizer: unexpected memory mapping 0x%zx-0x%zx\n" , |
| 134 | segment.start, segment.end); |
| 135 | |
| 136 | return false; |
| 137 | } |
| 138 | |
| 139 | if (!protect) |
| 140 | return true; |
| 141 | |
| 142 | # if SANITIZER_IOS && !SANITIZER_IOSSIM |
| 143 | ProtectRange(HeapMemEnd(), ShadowBeg()); |
| 144 | ProtectRange(ShadowEnd(), MetaShadowBeg()); |
| 145 | ProtectRange(MetaShadowEnd(), HiAppMemBeg()); |
| 146 | # else |
| 147 | ProtectRange(beg: LoAppMemEnd(), end: ShadowBeg()); |
| 148 | ProtectRange(beg: ShadowEnd(), end: MetaShadowBeg()); |
| 149 | if (MidAppMemBeg()) { |
| 150 | ProtectRange(beg: MetaShadowEnd(), end: MidAppMemBeg()); |
| 151 | ProtectRange(beg: MidAppMemEnd(), end: HeapMemBeg()); |
| 152 | } else { |
| 153 | ProtectRange(beg: MetaShadowEnd(), end: HeapMemBeg()); |
| 154 | } |
| 155 | ProtectRange(beg: HeapEnd(), end: HiAppMemBeg()); |
| 156 | # endif |
| 157 | |
| 158 | # if defined(__s390x__) |
| 159 | // Protect the rest of the address space. |
| 160 | const uptr user_addr_max_l4 = 0x0020000000000000ull; |
| 161 | const uptr user_addr_max_l5 = 0xfffffffffffff000ull; |
| 162 | // All the maintained s390x kernels support at least 4-level page tables. |
| 163 | ProtectRange(HiAppMemEnd(), user_addr_max_l4); |
| 164 | // Older s390x kernels may not support 5-level page tables. |
| 165 | TryProtectRange(user_addr_max_l4, user_addr_max_l5); |
| 166 | #endif |
| 167 | |
| 168 | return true; |
| 169 | } |
| 170 | # endif |
| 171 | |
| 172 | } // namespace __tsan |
| 173 | |
| 174 | #endif // SANITIZER_POSIX |
| 175 | |