1 | //===-- memprof_rtl.cpp --------------------------------------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file is a part of MemProfiler, a memory profiler. |
10 | // |
11 | // Main file of the MemProf run-time library. |
12 | //===----------------------------------------------------------------------===// |
13 | |
14 | #include "memprof_allocator.h" |
15 | #include "memprof_interceptors.h" |
16 | #include "memprof_interface_internal.h" |
17 | #include "memprof_internal.h" |
18 | #include "memprof_mapping.h" |
19 | #include "memprof_stack.h" |
20 | #include "memprof_stats.h" |
21 | #include "memprof_thread.h" |
22 | #include "sanitizer_common/sanitizer_atomic.h" |
23 | #include "sanitizer_common/sanitizer_flags.h" |
24 | #include "sanitizer_common/sanitizer_interface_internal.h" |
25 | #include "sanitizer_common/sanitizer_libc.h" |
26 | #include "sanitizer_common/sanitizer_symbolizer.h" |
27 | |
28 | #include <time.h> |
29 | |
30 | uptr __memprof_shadow_memory_dynamic_address; // Global interface symbol. |
31 | |
32 | // Allow the user to specify a profile output file via the binary. |
33 | SANITIZER_WEAK_ATTRIBUTE char __memprof_profile_filename[1]; |
34 | |
35 | // Share ClHistogram compiler flag with runtime. |
36 | SANITIZER_WEAK_ATTRIBUTE bool __memprof_histogram; |
37 | |
38 | namespace __memprof { |
39 | |
40 | static void MemprofDie() { |
41 | static atomic_uint32_t num_calls; |
42 | if (atomic_fetch_add(a: &num_calls, v: 1, mo: memory_order_relaxed) != 0) { |
43 | // Don't die twice - run a busy loop. |
44 | while (1) { |
45 | internal_sched_yield(); |
46 | } |
47 | } |
48 | if (common_flags()->print_module_map >= 1) |
49 | DumpProcessMap(); |
50 | if (flags()->unmap_shadow_on_exit) { |
51 | if (kHighShadowEnd) |
52 | UnmapOrDie(addr: (void *)kLowShadowBeg, kHighShadowEnd - kLowShadowBeg); |
53 | } |
54 | } |
55 | |
56 | static void MemprofOnDeadlySignal(int signo, void *siginfo, void *context) { |
57 | // We call StartReportDeadlySignal not HandleDeadlySignal so we get the |
58 | // deadly signal message to stderr but no writing to the profile output file |
59 | StartReportDeadlySignal(); |
60 | __memprof_profile_dump(); |
61 | Die(); |
62 | } |
63 | |
64 | static void CheckUnwind() { |
65 | GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_check); |
66 | stack.Print(); |
67 | } |
68 | |
69 | // -------------------------- Globals --------------------- {{{1 |
70 | int memprof_inited; |
71 | bool memprof_init_is_running; |
72 | int memprof_timestamp_inited; |
73 | long memprof_init_timestamp_s; |
74 | |
75 | uptr kHighMemEnd; |
76 | |
77 | // -------------------------- Run-time entry ------------------- {{{1 |
78 | // exported functions |
79 | |
80 | #define MEMPROF_MEMORY_ACCESS_CALLBACK_BODY() __memprof::RecordAccess(addr); |
81 | #define MEMPROF_MEMORY_ACCESS_CALLBACK_BODY_HIST() \ |
82 | __memprof::RecordAccessHistogram(addr); |
83 | |
84 | #define MEMPROF_MEMORY_ACCESS_CALLBACK(type) \ |
85 | extern "C" NOINLINE INTERFACE_ATTRIBUTE void __memprof_##type(uptr addr) { \ |
86 | MEMPROF_MEMORY_ACCESS_CALLBACK_BODY() \ |
87 | } |
88 | |
89 | #define MEMPROF_MEMORY_ACCESS_CALLBACK_HIST(type) \ |
90 | extern "C" NOINLINE INTERFACE_ATTRIBUTE void __memprof_hist_##type( \ |
91 | uptr addr) { \ |
92 | MEMPROF_MEMORY_ACCESS_CALLBACK_BODY_HIST() \ |
93 | } |
94 | |
95 | MEMPROF_MEMORY_ACCESS_CALLBACK_HIST(load) |
96 | MEMPROF_MEMORY_ACCESS_CALLBACK_HIST(store) |
97 | |
98 | MEMPROF_MEMORY_ACCESS_CALLBACK(load) |
99 | MEMPROF_MEMORY_ACCESS_CALLBACK(store) |
100 | |
101 | // Force the linker to keep the symbols for various MemProf interface |
102 | // functions. We want to keep those in the executable in order to let the |
103 | // instrumented dynamic libraries access the symbol even if it is not used by |
104 | // the executable itself. This should help if the build system is removing dead |
105 | // code at link time. |
106 | static NOINLINE void force_interface_symbols() { |
107 | volatile int fake_condition = 0; // prevent dead condition elimination. |
108 | // clang-format off |
109 | switch (fake_condition) { |
110 | case 1: __memprof_record_access(addr: nullptr); break; |
111 | case 2: __memprof_record_access_range(addr: nullptr, size: 0); break; |
112 | } |
113 | // clang-format on |
114 | } |
115 | |
116 | static void memprof_atexit() { |
117 | Printf(format: "MemProfiler exit stats:\n" ); |
118 | __memprof_print_accumulated_stats(); |
119 | } |
120 | |
121 | static void InitializeHighMemEnd() { |
122 | kHighMemEnd = GetMaxUserVirtualAddress(); |
123 | // Increase kHighMemEnd to make sure it's properly |
124 | // aligned together with kHighMemBeg: |
125 | kHighMemEnd |= (GetMmapGranularity() << SHADOW_SCALE) - 1; |
126 | } |
127 | |
128 | void PrintAddressSpaceLayout() { |
129 | if (kHighMemBeg) { |
130 | Printf(format: "|| `[%p, %p]` || HighMem ||\n" , (void *)kHighMemBeg, |
131 | (void *)kHighMemEnd); |
132 | Printf(format: "|| `[%p, %p]` || HighShadow ||\n" , (void *)kHighShadowBeg, |
133 | (void *)kHighShadowEnd); |
134 | } |
135 | Printf(format: "|| `[%p, %p]` || ShadowGap ||\n" , (void *)kShadowGapBeg, |
136 | (void *)kShadowGapEnd); |
137 | if (kLowShadowBeg) { |
138 | Printf(format: "|| `[%p, %p]` || LowShadow ||\n" , (void *)kLowShadowBeg, |
139 | (void *)kLowShadowEnd); |
140 | Printf(format: "|| `[%p, %p]` || LowMem ||\n" , (void *)kLowMemBeg, |
141 | (void *)kLowMemEnd); |
142 | } |
143 | Printf(format: "MemToShadow(shadow): %p %p" , (void *)MEM_TO_SHADOW(kLowShadowBeg), |
144 | (void *)MEM_TO_SHADOW(kLowShadowEnd)); |
145 | if (kHighMemBeg) { |
146 | Printf(format: " %p %p" , (void *)MEM_TO_SHADOW(kHighShadowBeg), |
147 | (void *)MEM_TO_SHADOW(kHighShadowEnd)); |
148 | } |
149 | Printf(format: "\n" ); |
150 | Printf(format: "malloc_context_size=%zu\n" , |
151 | (uptr)common_flags()->malloc_context_size); |
152 | |
153 | Printf(format: "SHADOW_SCALE: %d\n" , (int)SHADOW_SCALE); |
154 | Printf(format: "SHADOW_GRANULARITY: %d\n" , (int)SHADOW_GRANULARITY); |
155 | Printf(format: "SHADOW_OFFSET: %p\n" , (void *)SHADOW_OFFSET); |
156 | CHECK(SHADOW_SCALE >= 3 && SHADOW_SCALE <= 7); |
157 | } |
158 | |
159 | static void MemprofInitInternal() { |
160 | if (LIKELY(memprof_inited)) |
161 | return; |
162 | SanitizerToolName = "MemProfiler" ; |
163 | CHECK(!memprof_init_is_running && "MemProf init calls itself!" ); |
164 | memprof_init_is_running = true; |
165 | |
166 | CacheBinaryName(); |
167 | |
168 | // Initialize flags. This must be done early, because most of the |
169 | // initialization steps look at flags(). |
170 | InitializeFlags(); |
171 | |
172 | AvoidCVE_2016_2143(); |
173 | |
174 | SetMallocContextSize(common_flags()->malloc_context_size); |
175 | |
176 | InitializeHighMemEnd(); |
177 | |
178 | // Make sure we are not statically linked. |
179 | __interception::DoesNotSupportStaticLinking(); |
180 | |
181 | // Install tool-specific callbacks in sanitizer_common. |
182 | AddDieCallback(callback: MemprofDie); |
183 | SetCheckUnwindCallback(CheckUnwind); |
184 | |
185 | // Use profile name specified via the binary itself if it exists, and hasn't |
186 | // been overrriden by a flag at runtime. |
187 | if (__memprof_profile_filename[0] != 0 && !common_flags()->log_path) |
188 | __sanitizer_set_report_path(path: __memprof_profile_filename); |
189 | else |
190 | __sanitizer_set_report_path(path: common_flags()->log_path); |
191 | |
192 | __sanitizer::InitializePlatformEarly(); |
193 | |
194 | // Setup internal allocator callback. |
195 | SetLowLevelAllocateMinAlignment(SHADOW_GRANULARITY); |
196 | |
197 | InitializeMemprofInterceptors(); |
198 | CheckASLR(); |
199 | |
200 | ReplaceSystemMalloc(); |
201 | |
202 | DisableCoreDumperIfNecessary(); |
203 | |
204 | InitializeShadowMemory(); |
205 | |
206 | TSDInit(destructor: PlatformTSDDtor); |
207 | InstallDeadlySignalHandlers(handler: MemprofOnDeadlySignal); |
208 | |
209 | InitializeAllocator(); |
210 | |
211 | if (flags()->atexit) |
212 | Atexit(function: memprof_atexit); |
213 | |
214 | InitializeCoverage(enabled: common_flags()->coverage, coverage_dir: common_flags()->coverage_dir); |
215 | |
216 | // interceptors |
217 | InitTlsSize(); |
218 | |
219 | // Create main thread. |
220 | MemprofThread *main_thread = CreateMainThread(); |
221 | CHECK_EQ(0, main_thread->tid()); |
222 | force_interface_symbols(); // no-op. |
223 | SanitizerInitializeUnwinder(); |
224 | |
225 | Symbolizer::LateInitialize(); |
226 | |
227 | VReport(1, "MemProfiler Init done\n" ); |
228 | |
229 | memprof_init_is_running = false; |
230 | memprof_inited = 1; |
231 | } |
232 | |
233 | void MemprofInitTime() { |
234 | if (LIKELY(memprof_timestamp_inited)) |
235 | return; |
236 | timespec ts; |
237 | clock_gettime(CLOCK_REALTIME, tp: &ts); |
238 | memprof_init_timestamp_s = ts.tv_sec; |
239 | memprof_timestamp_inited = 1; |
240 | } |
241 | |
242 | // Initialize as requested from some part of MemProf runtime library |
243 | // (interceptors, allocator, etc). |
244 | void MemprofInitFromRtl() { MemprofInitInternal(); } |
245 | |
246 | #if MEMPROF_DYNAMIC |
247 | // Initialize runtime in case it's LD_PRELOAD-ed into uninstrumented executable |
248 | // (and thus normal initializers from .preinit_array or modules haven't run). |
249 | |
250 | class MemprofInitializer { |
251 | public: |
252 | MemprofInitializer() { MemprofInitFromRtl(); } |
253 | }; |
254 | |
255 | static MemprofInitializer memprof_initializer; |
256 | #endif // MEMPROF_DYNAMIC |
257 | |
258 | } // namespace __memprof |
259 | |
260 | // ---------------------- Interface ---------------- {{{1 |
261 | using namespace __memprof; |
262 | |
263 | // Initialize as requested from instrumented application code. |
264 | void __memprof_init() { |
265 | MemprofInitTime(); |
266 | MemprofInitInternal(); |
267 | } |
268 | |
269 | void __memprof_preinit() { MemprofInitInternal(); } |
270 | |
271 | void __memprof_version_mismatch_check_v1() {} |
272 | |
273 | void __memprof_record_access(void const volatile *addr) { |
274 | __memprof::RecordAccess(a: (uptr)addr); |
275 | } |
276 | |
277 | void __memprof_record_access_hist(void const volatile *addr) { |
278 | __memprof::RecordAccessHistogram(a: (uptr)addr); |
279 | } |
280 | |
281 | void __memprof_record_access_range(void const volatile *addr, uptr size) { |
282 | for (uptr a = (uptr)addr; a < (uptr)addr + size; a += kWordSize) |
283 | __memprof::RecordAccess(a); |
284 | } |
285 | |
286 | void __memprof_record_access_range_hist(void const volatile *addr, uptr size) { |
287 | for (uptr a = (uptr)addr; a < (uptr)addr + size; a += kWordSize) |
288 | __memprof::RecordAccessHistogram(a); |
289 | } |
290 | |
291 | extern "C" SANITIZER_INTERFACE_ATTRIBUTE u16 |
292 | __sanitizer_unaligned_load16(const uu16 *p) { |
293 | __memprof_record_access(addr: p); |
294 | return *p; |
295 | } |
296 | |
297 | extern "C" SANITIZER_INTERFACE_ATTRIBUTE u32 |
298 | __sanitizer_unaligned_load32(const uu32 *p) { |
299 | __memprof_record_access(addr: p); |
300 | return *p; |
301 | } |
302 | |
303 | extern "C" SANITIZER_INTERFACE_ATTRIBUTE u64 |
304 | __sanitizer_unaligned_load64(const uu64 *p) { |
305 | __memprof_record_access(addr: p); |
306 | return *p; |
307 | } |
308 | |
309 | extern "C" SANITIZER_INTERFACE_ATTRIBUTE void |
310 | __sanitizer_unaligned_store16(uu16 *p, u16 x) { |
311 | __memprof_record_access(addr: p); |
312 | *p = x; |
313 | } |
314 | |
315 | extern "C" SANITIZER_INTERFACE_ATTRIBUTE void |
316 | __sanitizer_unaligned_store32(uu32 *p, u32 x) { |
317 | __memprof_record_access(addr: p); |
318 | *p = x; |
319 | } |
320 | |
321 | extern "C" SANITIZER_INTERFACE_ATTRIBUTE void |
322 | __sanitizer_unaligned_store64(uu64 *p, u64 x) { |
323 | __memprof_record_access(addr: p); |
324 | *p = x; |
325 | } |
326 | |