1 | //=-- lsan_common.cpp -----------------------------------------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file is a part of LeakSanitizer. |
10 | // Implementation of common leak checking functionality. |
11 | // |
12 | //===----------------------------------------------------------------------===// |
13 | |
14 | #include "lsan_common.h" |
15 | |
16 | #include "sanitizer_common/sanitizer_common.h" |
17 | #include "sanitizer_common/sanitizer_flag_parser.h" |
18 | #include "sanitizer_common/sanitizer_flags.h" |
19 | #include "sanitizer_common/sanitizer_placement_new.h" |
20 | #include "sanitizer_common/sanitizer_procmaps.h" |
21 | #include "sanitizer_common/sanitizer_report_decorator.h" |
22 | #include "sanitizer_common/sanitizer_stackdepot.h" |
23 | #include "sanitizer_common/sanitizer_stacktrace.h" |
24 | #include "sanitizer_common/sanitizer_suppressions.h" |
25 | #include "sanitizer_common/sanitizer_thread_registry.h" |
26 | #include "sanitizer_common/sanitizer_tls_get_addr.h" |
27 | |
28 | #if CAN_SANITIZE_LEAKS |
29 | |
30 | # if SANITIZER_APPLE |
31 | // https://github.com/apple-oss-distributions/objc4/blob/8701d5672d3fd3cd817aeb84db1077aafe1a1604/runtime/objc-runtime-new.h#L127 |
32 | # if SANITIZER_IOS && !SANITIZER_IOSSIM |
33 | # define OBJC_DATA_MASK 0x0000007ffffffff8UL |
34 | # else |
35 | # define OBJC_DATA_MASK 0x00007ffffffffff8UL |
36 | # endif |
37 | # endif |
38 | |
39 | namespace __lsan { |
40 | |
41 | // This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and |
42 | // also to protect the global list of root regions. |
43 | static Mutex global_mutex; |
44 | |
45 | void LockGlobal() SANITIZER_ACQUIRE(global_mutex) { global_mutex.Lock(); } |
46 | void UnlockGlobal() SANITIZER_RELEASE(global_mutex) { global_mutex.Unlock(); } |
47 | |
48 | Flags lsan_flags; |
49 | |
50 | void DisableCounterUnderflow() { |
51 | if (common_flags()->detect_leaks) { |
52 | Report(format: "Unmatched call to __lsan_enable().\n" ); |
53 | Die(); |
54 | } |
55 | } |
56 | |
57 | void Flags::SetDefaults() { |
58 | # define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue; |
59 | # include "lsan_flags.inc" |
60 | # undef LSAN_FLAG |
61 | } |
62 | |
63 | void RegisterLsanFlags(FlagParser *parser, Flags *f) { |
64 | # define LSAN_FLAG(Type, Name, DefaultValue, Description) \ |
65 | RegisterFlag(parser, #Name, Description, &f->Name); |
66 | # include "lsan_flags.inc" |
67 | # undef LSAN_FLAG |
68 | } |
69 | |
70 | # define LOG_POINTERS(...) \ |
71 | do { \ |
72 | if (flags()->log_pointers) \ |
73 | Report(__VA_ARGS__); \ |
74 | } while (0) |
75 | |
76 | # define LOG_THREADS(...) \ |
77 | do { \ |
78 | if (flags()->log_threads) \ |
79 | Report(__VA_ARGS__); \ |
80 | } while (0) |
81 | |
82 | class LeakSuppressionContext { |
83 | bool parsed = false; |
84 | SuppressionContext context; |
85 | bool suppressed_stacks_sorted = true; |
86 | InternalMmapVector<u32> suppressed_stacks; |
87 | const LoadedModule *suppress_module = nullptr; |
88 | |
89 | void LazyInit(); |
90 | Suppression *GetSuppressionForAddr(uptr addr); |
91 | bool SuppressInvalid(const StackTrace &stack); |
92 | bool SuppressByRule(const StackTrace &stack, uptr hit_count, uptr total_size); |
93 | |
94 | public: |
95 | LeakSuppressionContext(const char *supprression_types[], |
96 | int suppression_types_num) |
97 | : context(supprression_types, suppression_types_num) {} |
98 | |
99 | bool Suppress(u32 stack_trace_id, uptr hit_count, uptr total_size); |
100 | |
101 | const InternalMmapVector<u32> &GetSortedSuppressedStacks() { |
102 | if (!suppressed_stacks_sorted) { |
103 | suppressed_stacks_sorted = true; |
104 | SortAndDedup(v&: suppressed_stacks); |
105 | } |
106 | return suppressed_stacks; |
107 | } |
108 | void PrintMatchedSuppressions(); |
109 | }; |
110 | |
111 | alignas(64) static char suppression_placeholder[sizeof(LeakSuppressionContext)]; |
112 | static LeakSuppressionContext *suppression_ctx = nullptr; |
113 | static const char kSuppressionLeak[] = "leak" ; |
114 | static const char *kSuppressionTypes[] = {kSuppressionLeak}; |
115 | static const char kStdSuppressions[] = |
116 | # if SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT |
117 | // For more details refer to the SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT |
118 | // definition. |
119 | "leak:*pthread_exit*\n" |
120 | # endif // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT |
121 | # if SANITIZER_APPLE |
122 | // For Darwin and os_log/os_trace: https://reviews.llvm.org/D35173 |
123 | "leak:*_os_trace*\n" |
124 | # endif |
125 | // TLS leak in some glibc versions, described in |
126 | // https://sourceware.org/bugzilla/show_bug.cgi?id=12650. |
127 | "leak:*tls_get_addr*\n" ; |
128 | |
129 | void InitializeSuppressions() { |
130 | CHECK_EQ(nullptr, suppression_ctx); |
131 | suppression_ctx = new (suppression_placeholder) |
132 | LeakSuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes)); |
133 | } |
134 | |
135 | void LeakSuppressionContext::LazyInit() { |
136 | if (!parsed) { |
137 | parsed = true; |
138 | context.ParseFromFile(filename: flags()->suppressions); |
139 | if (&__lsan_default_suppressions) |
140 | context.Parse(str: __lsan_default_suppressions()); |
141 | context.Parse(str: kStdSuppressions); |
142 | if (flags()->use_tls && flags()->use_ld_allocations) |
143 | suppress_module = GetLinker(); |
144 | } |
145 | } |
146 | |
147 | Suppression *LeakSuppressionContext::GetSuppressionForAddr(uptr addr) { |
148 | Suppression *s = nullptr; |
149 | |
150 | // Suppress by module name. |
151 | const char *module_name = Symbolizer::GetOrInit()->GetModuleNameForPc(pc: addr); |
152 | if (!module_name) |
153 | module_name = "<unknown module>" ; |
154 | if (context.Match(str: module_name, type: kSuppressionLeak, s: &s)) |
155 | return s; |
156 | |
157 | // Suppress by file or function name. |
158 | SymbolizedStackHolder symbolized_stack( |
159 | Symbolizer::GetOrInit()->SymbolizePC(address: addr)); |
160 | const SymbolizedStack *frames = symbolized_stack.get(); |
161 | for (const SymbolizedStack *cur = frames; cur; cur = cur->next) { |
162 | if (context.Match(str: cur->info.function, type: kSuppressionLeak, s: &s) || |
163 | context.Match(str: cur->info.file, type: kSuppressionLeak, s: &s)) { |
164 | break; |
165 | } |
166 | } |
167 | return s; |
168 | } |
169 | |
170 | static uptr GetCallerPC(const StackTrace &stack) { |
171 | // The top frame is our malloc/calloc/etc. The next frame is the caller. |
172 | if (stack.size >= 2) |
173 | return stack.trace[1]; |
174 | return 0; |
175 | } |
176 | |
177 | # if SANITIZER_APPLE |
178 | // Several pointers in the Objective-C runtime (method cache and class_rw_t, |
179 | // for example) are tagged with additional bits we need to strip. |
180 | static inline void *TransformPointer(void *p) { |
181 | uptr ptr = reinterpret_cast<uptr>(p); |
182 | return reinterpret_cast<void *>(ptr & OBJC_DATA_MASK); |
183 | } |
184 | # endif |
185 | |
186 | // On Linux, treats all chunks allocated from ld-linux.so as reachable, which |
187 | // covers dynamically allocated TLS blocks, internal dynamic loader's loaded |
188 | // modules accounting etc. |
189 | // Dynamic TLS blocks contain the TLS variables of dynamically loaded modules. |
190 | // They are allocated with a __libc_memalign() call in allocate_and_init() |
191 | // (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those |
192 | // blocks, but we can make sure they come from our own allocator by intercepting |
193 | // __libc_memalign(). On top of that, there is no easy way to reach them. Their |
194 | // addresses are stored in a dynamically allocated array (the DTV) which is |
195 | // referenced from the static TLS. Unfortunately, we can't just rely on the DTV |
196 | // being reachable from the static TLS, and the dynamic TLS being reachable from |
197 | // the DTV. This is because the initial DTV is allocated before our interception |
198 | // mechanism kicks in, and thus we don't recognize it as allocated memory. We |
199 | // can't special-case it either, since we don't know its size. |
200 | // Our solution is to include in the root set all allocations made from |
201 | // ld-linux.so (which is where allocate_and_init() is implemented). This is |
202 | // guaranteed to include all dynamic TLS blocks (and possibly other allocations |
203 | // which we don't care about). |
204 | // On all other platforms, this simply checks to ensure that the caller pc is |
205 | // valid before reporting chunks as leaked. |
206 | bool LeakSuppressionContext::SuppressInvalid(const StackTrace &stack) { |
207 | uptr caller_pc = GetCallerPC(stack); |
208 | // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark |
209 | // it as reachable, as we can't properly report its allocation stack anyway. |
210 | return !caller_pc || |
211 | (suppress_module && suppress_module->containsAddress(address: caller_pc)); |
212 | } |
213 | |
214 | bool LeakSuppressionContext::SuppressByRule(const StackTrace &stack, |
215 | uptr hit_count, uptr total_size) { |
216 | for (uptr i = 0; i < stack.size; i++) { |
217 | Suppression *s = GetSuppressionForAddr( |
218 | addr: StackTrace::GetPreviousInstructionPc(pc: stack.trace[i])); |
219 | if (s) { |
220 | s->weight += total_size; |
221 | atomic_fetch_add(a: &s->hit_count, v: hit_count, mo: memory_order_relaxed); |
222 | return true; |
223 | } |
224 | } |
225 | return false; |
226 | } |
227 | |
228 | bool LeakSuppressionContext::Suppress(u32 stack_trace_id, uptr hit_count, |
229 | uptr total_size) { |
230 | LazyInit(); |
231 | StackTrace stack = StackDepotGet(id: stack_trace_id); |
232 | if (!SuppressInvalid(stack) && !SuppressByRule(stack, hit_count, total_size)) |
233 | return false; |
234 | suppressed_stacks_sorted = false; |
235 | suppressed_stacks.push_back(element: stack_trace_id); |
236 | return true; |
237 | } |
238 | |
239 | static LeakSuppressionContext *GetSuppressionContext() { |
240 | CHECK(suppression_ctx); |
241 | return suppression_ctx; |
242 | } |
243 | |
244 | void InitCommonLsan() { |
245 | if (common_flags()->detect_leaks) { |
246 | // Initialization which can fail or print warnings should only be done if |
247 | // LSan is actually enabled. |
248 | InitializeSuppressions(); |
249 | InitializePlatformSpecificModules(); |
250 | } |
251 | } |
252 | |
253 | class Decorator : public __sanitizer::SanitizerCommonDecorator { |
254 | public: |
255 | Decorator() : SanitizerCommonDecorator() {} |
256 | const char *Error() { return Red(); } |
257 | const char *Leak() { return Blue(); } |
258 | }; |
259 | |
260 | static inline bool MaybeUserPointer(uptr p) { |
261 | // Since our heap is located in mmap-ed memory, we can assume a sensible lower |
262 | // bound on heap addresses. |
263 | const uptr kMinAddress = 4 * 4096; |
264 | if (p < kMinAddress) |
265 | return false; |
266 | # if defined(__x86_64__) |
267 | // TODO: support LAM48 and 5 level page tables. |
268 | // LAM_U57 mask format |
269 | // * top byte: 0x81 because the format is: [0] [6-bit tag] [0] |
270 | // * top-1 byte: 0xff because it should be 0 |
271 | // * top-2 byte: 0x80 because Linux uses 128 TB VMA ending at 0x7fffffffffff |
272 | constexpr uptr kLAM_U57Mask = 0x81ff80; |
273 | constexpr uptr kPointerMask = kLAM_U57Mask << 40; |
274 | return ((p & kPointerMask) == 0); |
275 | # elif defined(__mips64) |
276 | return ((p >> 40) == 0); |
277 | # elif defined(__aarch64__) |
278 | // TBI (Top Byte Ignore) feature of AArch64: bits [63:56] are ignored in |
279 | // address translation and can be used to store a tag. |
280 | constexpr uptr kPointerMask = 255ULL << 48; |
281 | // Accept up to 48 bit VMA. |
282 | return ((p & kPointerMask) == 0); |
283 | # elif defined(__loongarch_lp64) |
284 | // Allow 47-bit user-space VMA at current. |
285 | return ((p >> 47) == 0); |
286 | # else |
287 | return true; |
288 | # endif |
289 | } |
290 | |
291 | // Scans the memory range, looking for byte patterns that point into allocator |
292 | // chunks. Marks those chunks with |tag| and adds them to |frontier|. |
293 | // There are two usage modes for this function: finding reachable chunks |
294 | // (|tag| = kReachable) and finding indirectly leaked chunks |
295 | // (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill, |
296 | // so |frontier| = 0. |
297 | void ScanRangeForPointers(uptr begin, uptr end, Frontier *frontier, |
298 | const char *region_type, ChunkTag tag) { |
299 | CHECK(tag == kReachable || tag == kIndirectlyLeaked); |
300 | const uptr alignment = flags()->pointer_alignment(); |
301 | LOG_POINTERS("Scanning %s range %p-%p.\n" , region_type, (void *)begin, |
302 | (void *)end); |
303 | uptr pp = begin; |
304 | if (pp % alignment) |
305 | pp = pp + alignment - pp % alignment; |
306 | for (; pp + sizeof(void *) <= end; pp += alignment) { |
307 | void *p = *reinterpret_cast<void **>(pp); |
308 | # if SANITIZER_APPLE |
309 | p = TransformPointer(p); |
310 | # endif |
311 | if (!MaybeUserPointer(p: reinterpret_cast<uptr>(p))) |
312 | continue; |
313 | uptr chunk = PointsIntoChunk(p); |
314 | if (!chunk) |
315 | continue; |
316 | // Pointers to self don't count. This matters when tag == kIndirectlyLeaked. |
317 | if (chunk == begin) |
318 | continue; |
319 | LsanMetadata m(chunk); |
320 | if (m.tag() == kReachable || m.tag() == kIgnored) |
321 | continue; |
322 | |
323 | // Do this check relatively late so we can log only the interesting cases. |
324 | if (!flags()->use_poisoned && WordIsPoisoned(addr: pp)) { |
325 | LOG_POINTERS( |
326 | "%p is poisoned: ignoring %p pointing into chunk %p-%p of size " |
327 | "%zu.\n" , |
328 | (void *)pp, p, (void *)chunk, (void *)(chunk + m.requested_size()), |
329 | m.requested_size()); |
330 | continue; |
331 | } |
332 | |
333 | m.set_tag(tag); |
334 | LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n" , |
335 | (void *)pp, p, (void *)chunk, |
336 | (void *)(chunk + m.requested_size()), m.requested_size()); |
337 | if (frontier) |
338 | frontier->push_back(element: chunk); |
339 | } |
340 | } |
341 | |
342 | // Scans a global range for pointers |
343 | void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier) { |
344 | uptr allocator_begin = 0, allocator_end = 0; |
345 | GetAllocatorGlobalRange(begin: &allocator_begin, end: &allocator_end); |
346 | if (begin <= allocator_begin && allocator_begin < end) { |
347 | CHECK_LE(allocator_begin, allocator_end); |
348 | CHECK_LE(allocator_end, end); |
349 | if (begin < allocator_begin) |
350 | ScanRangeForPointers(begin, end: allocator_begin, frontier, region_type: "GLOBAL" , |
351 | tag: kReachable); |
352 | if (allocator_end < end) |
353 | ScanRangeForPointers(begin: allocator_end, end, frontier, region_type: "GLOBAL" , tag: kReachable); |
354 | } else { |
355 | ScanRangeForPointers(begin, end, frontier, region_type: "GLOBAL" , tag: kReachable); |
356 | } |
357 | } |
358 | |
359 | void (const InternalMmapVector<Range> &ranges, |
360 | Frontier *frontier) { |
361 | for (uptr i = 0; i < ranges.size(); i++) { |
362 | ScanRangeForPointers(begin: ranges[i].begin, end: ranges[i].end, frontier, region_type: "FAKE STACK" , |
363 | tag: kReachable); |
364 | } |
365 | } |
366 | |
367 | # if SANITIZER_FUCHSIA |
368 | |
369 | // Fuchsia handles all threads together with its own callback. |
370 | static void ProcessThreads(SuspendedThreadsList const &, Frontier *, tid_t, |
371 | uptr) {} |
372 | |
373 | # else |
374 | |
375 | # if SANITIZER_ANDROID |
376 | // FIXME: Move this out into *libcdep.cpp |
377 | extern "C" SANITIZER_WEAK_ATTRIBUTE void __libc_iterate_dynamic_tls( |
378 | pid_t, void (*cb)(void *, void *, uptr, void *), void *); |
379 | # endif |
380 | |
381 | static void ProcessThreadRegistry(Frontier *frontier) { |
382 | InternalMmapVector<uptr> ptrs; |
383 | GetAdditionalThreadContextPtrsLocked(ptrs: &ptrs); |
384 | |
385 | for (uptr i = 0; i < ptrs.size(); ++i) { |
386 | void *ptr = reinterpret_cast<void *>(ptrs[i]); |
387 | uptr chunk = PointsIntoChunk(p: ptr); |
388 | if (!chunk) |
389 | continue; |
390 | LsanMetadata m(chunk); |
391 | if (!m.allocated()) |
392 | continue; |
393 | |
394 | // Mark as reachable and add to frontier. |
395 | LOG_POINTERS("Treating pointer %p from ThreadContext as reachable\n" , ptr); |
396 | m.set_tag(kReachable); |
397 | frontier->push_back(element: chunk); |
398 | } |
399 | } |
400 | |
401 | // Scans thread data (stacks and TLS) for heap pointers. |
402 | static void ProcessThreads(SuspendedThreadsList const &suspended_threads, |
403 | Frontier *frontier, tid_t caller_tid, |
404 | uptr caller_sp) { |
405 | InternalMmapVector<uptr> registers; |
406 | InternalMmapVector<Range> ; |
407 | for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) { |
408 | tid_t os_id = static_cast<tid_t>(suspended_threads.GetThreadID(index: i)); |
409 | LOG_THREADS("Processing thread %llu.\n" , os_id); |
410 | uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end; |
411 | DTLS *dtls; |
412 | bool thread_found = |
413 | GetThreadRangesLocked(os_id, stack_begin: &stack_begin, stack_end: &stack_end, tls_begin: &tls_begin, |
414 | tls_end: &tls_end, cache_begin: &cache_begin, cache_end: &cache_end, dtls: &dtls); |
415 | if (!thread_found) { |
416 | // If a thread can't be found in the thread registry, it's probably in the |
417 | // process of destruction. Log this event and move on. |
418 | LOG_THREADS("Thread %llu not found in registry.\n" , os_id); |
419 | continue; |
420 | } |
421 | uptr sp; |
422 | PtraceRegistersStatus have_registers = |
423 | suspended_threads.GetRegistersAndSP(index: i, buffer: ®isters, sp: &sp); |
424 | if (have_registers != REGISTERS_AVAILABLE) { |
425 | Report(format: "Unable to get registers from thread %llu.\n" , os_id); |
426 | // If unable to get SP, consider the entire stack to be reachable unless |
427 | // GetRegistersAndSP failed with ESRCH. |
428 | if (have_registers == REGISTERS_UNAVAILABLE_FATAL) |
429 | continue; |
430 | sp = stack_begin; |
431 | } |
432 | if (suspended_threads.GetThreadID(index: i) == caller_tid) { |
433 | sp = caller_sp; |
434 | } |
435 | |
436 | if (flags()->use_registers && have_registers) { |
437 | uptr registers_begin = reinterpret_cast<uptr>(registers.data()); |
438 | uptr registers_end = |
439 | reinterpret_cast<uptr>(registers.data() + registers.size()); |
440 | ScanRangeForPointers(begin: registers_begin, end: registers_end, frontier, |
441 | region_type: "REGISTERS" , tag: kReachable); |
442 | } |
443 | |
444 | if (flags()->use_stacks) { |
445 | LOG_THREADS("Stack at %p-%p (SP = %p).\n" , (void *)stack_begin, |
446 | (void *)stack_end, (void *)sp); |
447 | if (sp < stack_begin || sp >= stack_end) { |
448 | // SP is outside the recorded stack range (e.g. the thread is running a |
449 | // signal handler on alternate stack, or swapcontext was used). |
450 | // Again, consider the entire stack range to be reachable. |
451 | LOG_THREADS("WARNING: stack pointer not in stack range.\n" ); |
452 | uptr page_size = GetPageSizeCached(); |
453 | int skipped = 0; |
454 | while (stack_begin < stack_end && |
455 | !IsAccessibleMemoryRange(beg: stack_begin, size: 1)) { |
456 | skipped++; |
457 | stack_begin += page_size; |
458 | } |
459 | LOG_THREADS("Skipped %d guard page(s) to obtain stack %p-%p.\n" , |
460 | skipped, (void *)stack_begin, (void *)stack_end); |
461 | } else { |
462 | // Shrink the stack range to ignore out-of-scope values. |
463 | stack_begin = sp; |
464 | } |
465 | ScanRangeForPointers(begin: stack_begin, end: stack_end, frontier, region_type: "STACK" , |
466 | tag: kReachable); |
467 | extra_ranges.clear(); |
468 | GetThreadExtraStackRangesLocked(os_id, ranges: &extra_ranges); |
469 | ScanExtraStackRanges(ranges: extra_ranges, frontier); |
470 | } |
471 | |
472 | if (flags()->use_tls) { |
473 | if (tls_begin) { |
474 | LOG_THREADS("TLS at %p-%p.\n" , (void *)tls_begin, (void *)tls_end); |
475 | // If the tls and cache ranges don't overlap, scan full tls range, |
476 | // otherwise, only scan the non-overlapping portions |
477 | if (cache_begin == cache_end || tls_end < cache_begin || |
478 | tls_begin > cache_end) { |
479 | ScanRangeForPointers(begin: tls_begin, end: tls_end, frontier, region_type: "TLS" , tag: kReachable); |
480 | } else { |
481 | if (tls_begin < cache_begin) |
482 | ScanRangeForPointers(begin: tls_begin, end: cache_begin, frontier, region_type: "TLS" , |
483 | tag: kReachable); |
484 | if (tls_end > cache_end) |
485 | ScanRangeForPointers(begin: cache_end, end: tls_end, frontier, region_type: "TLS" , |
486 | tag: kReachable); |
487 | } |
488 | } |
489 | # if SANITIZER_ANDROID |
490 | auto *cb = +[](void *dtls_begin, void *dtls_end, uptr /*dso_idd*/, |
491 | void *arg) -> void { |
492 | ScanRangeForPointers(reinterpret_cast<uptr>(dtls_begin), |
493 | reinterpret_cast<uptr>(dtls_end), |
494 | reinterpret_cast<Frontier *>(arg), "DTLS" , |
495 | kReachable); |
496 | }; |
497 | |
498 | // FIXME: There might be a race-condition here (and in Bionic) if the |
499 | // thread is suspended in the middle of updating its DTLS. IOWs, we |
500 | // could scan already freed memory. (probably fine for now) |
501 | __libc_iterate_dynamic_tls(os_id, cb, frontier); |
502 | # else |
503 | if (dtls && !DTLSInDestruction(dtls)) { |
504 | ForEachDVT(dtls, fn: [&](const DTLS::DTV &dtv, int id) { |
505 | uptr dtls_beg = dtv.beg; |
506 | uptr dtls_end = dtls_beg + dtv.size; |
507 | if (dtls_beg < dtls_end) { |
508 | LOG_THREADS("DTLS %d at %p-%p.\n" , id, (void *)dtls_beg, |
509 | (void *)dtls_end); |
510 | ScanRangeForPointers(begin: dtls_beg, end: dtls_end, frontier, region_type: "DTLS" , |
511 | tag: kReachable); |
512 | } |
513 | }); |
514 | } else { |
515 | // We are handling a thread with DTLS under destruction. Log about |
516 | // this and continue. |
517 | LOG_THREADS("Thread %llu has DTLS under destruction.\n" , os_id); |
518 | } |
519 | # endif |
520 | } |
521 | } |
522 | |
523 | // Add pointers reachable from ThreadContexts |
524 | ProcessThreadRegistry(frontier); |
525 | } |
526 | |
527 | # endif // SANITIZER_FUCHSIA |
528 | |
529 | // A map that contains [region_begin, region_end) pairs. |
530 | using RootRegions = DenseMap<detail::DenseMapPair<uptr, uptr>, uptr>; |
531 | |
532 | static RootRegions &GetRootRegionsLocked() { |
533 | global_mutex.CheckLocked(); |
534 | static RootRegions *regions = nullptr; |
535 | alignas(RootRegions) static char placeholder[sizeof(RootRegions)]; |
536 | if (!regions) |
537 | regions = new (placeholder) RootRegions(); |
538 | return *regions; |
539 | } |
540 | |
541 | bool HasRootRegions() { return !GetRootRegionsLocked().empty(); } |
542 | |
543 | void ScanRootRegions(Frontier *frontier, |
544 | const InternalMmapVectorNoCtor<Region> &mapped_regions) { |
545 | if (!flags()->use_root_regions) |
546 | return; |
547 | |
548 | InternalMmapVector<Region> regions; |
549 | GetRootRegionsLocked().forEach(fn: [&](const auto &kv) { |
550 | regions.push_back(element: {kv.first.first, kv.first.second}); |
551 | return true; |
552 | }); |
553 | |
554 | InternalMmapVector<Region> intersection; |
555 | Intersect(a: mapped_regions, b: regions, output&: intersection); |
556 | |
557 | for (const Region &r : intersection) { |
558 | LOG_POINTERS("Root region intersects with mapped region at %p-%p\n" , |
559 | (void *)r.begin, (void *)r.end); |
560 | ScanRangeForPointers(begin: r.begin, end: r.end, frontier, region_type: "ROOT" , tag: kReachable); |
561 | } |
562 | } |
563 | |
564 | // Scans root regions for heap pointers. |
565 | static void ProcessRootRegions(Frontier *frontier) { |
566 | if (!flags()->use_root_regions || !HasRootRegions()) |
567 | return; |
568 | MemoryMappingLayout proc_maps(/*cache_enabled*/ true); |
569 | MemoryMappedSegment segment; |
570 | InternalMmapVector<Region> mapped_regions; |
571 | while (proc_maps.Next(segment: &segment)) |
572 | if (segment.IsReadable()) |
573 | mapped_regions.push_back(element: {.begin: segment.start, .end: segment.end}); |
574 | ScanRootRegions(frontier, mapped_regions); |
575 | } |
576 | |
577 | static void FloodFillTag(Frontier *frontier, ChunkTag tag) { |
578 | while (frontier->size()) { |
579 | uptr next_chunk = frontier->back(); |
580 | frontier->pop_back(); |
581 | LsanMetadata m(next_chunk); |
582 | ScanRangeForPointers(begin: next_chunk, end: next_chunk + m.requested_size(), frontier, |
583 | region_type: "HEAP" , tag); |
584 | } |
585 | } |
586 | |
587 | // ForEachChunk callback. If the chunk is marked as leaked, marks all chunks |
588 | // which are reachable from it as indirectly leaked. |
589 | static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) { |
590 | chunk = GetUserBegin(chunk); |
591 | LsanMetadata m(chunk); |
592 | if (m.allocated() && m.tag() != kReachable) { |
593 | ScanRangeForPointers(begin: chunk, end: chunk + m.requested_size(), |
594 | /* frontier */ nullptr, region_type: "HEAP" , tag: kIndirectlyLeaked); |
595 | } |
596 | } |
597 | |
598 | static void IgnoredSuppressedCb(uptr chunk, void *arg) { |
599 | CHECK(arg); |
600 | chunk = GetUserBegin(chunk); |
601 | LsanMetadata m(chunk); |
602 | if (!m.allocated() || m.tag() == kIgnored) |
603 | return; |
604 | |
605 | const InternalMmapVector<u32> &suppressed = |
606 | *static_cast<const InternalMmapVector<u32> *>(arg); |
607 | uptr idx = InternalLowerBound(v: suppressed, val: m.stack_trace_id()); |
608 | if (idx >= suppressed.size() || m.stack_trace_id() != suppressed[idx]) |
609 | return; |
610 | |
611 | LOG_POINTERS("Suppressed: chunk %p-%p of size %zu.\n" , (void *)chunk, |
612 | (void *)(chunk + m.requested_size()), m.requested_size()); |
613 | m.set_tag(kIgnored); |
614 | } |
615 | |
616 | // ForEachChunk callback. If chunk is marked as ignored, adds its address to |
617 | // frontier. |
618 | static void CollectIgnoredCb(uptr chunk, void *arg) { |
619 | CHECK(arg); |
620 | chunk = GetUserBegin(chunk); |
621 | LsanMetadata m(chunk); |
622 | if (m.allocated() && m.tag() == kIgnored) { |
623 | LOG_POINTERS("Ignored: chunk %p-%p of size %zu.\n" , (void *)chunk, |
624 | (void *)(chunk + m.requested_size()), m.requested_size()); |
625 | reinterpret_cast<Frontier *>(arg)->push_back(element: chunk); |
626 | } |
627 | } |
628 | |
629 | // Sets the appropriate tag on each chunk. |
630 | static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads, |
631 | Frontier *frontier, tid_t caller_tid, |
632 | uptr caller_sp) { |
633 | const InternalMmapVector<u32> &suppressed_stacks = |
634 | GetSuppressionContext()->GetSortedSuppressedStacks(); |
635 | if (!suppressed_stacks.empty()) { |
636 | ForEachChunk(callback: IgnoredSuppressedCb, |
637 | arg: const_cast<InternalMmapVector<u32> *>(&suppressed_stacks)); |
638 | } |
639 | ForEachChunk(callback: CollectIgnoredCb, arg: frontier); |
640 | ProcessGlobalRegions(frontier); |
641 | ProcessThreads(suspended_threads, frontier, caller_tid, caller_sp); |
642 | ProcessRootRegions(frontier); |
643 | FloodFillTag(frontier, tag: kReachable); |
644 | |
645 | // The check here is relatively expensive, so we do this in a separate flood |
646 | // fill. That way we can skip the check for chunks that are reachable |
647 | // otherwise. |
648 | LOG_POINTERS("Processing platform-specific allocations.\n" ); |
649 | ProcessPlatformSpecificAllocations(frontier); |
650 | FloodFillTag(frontier, tag: kReachable); |
651 | |
652 | // Iterate over leaked chunks and mark those that are reachable from other |
653 | // leaked chunks. |
654 | LOG_POINTERS("Scanning leaked chunks.\n" ); |
655 | ForEachChunk(callback: MarkIndirectlyLeakedCb, arg: nullptr); |
656 | } |
657 | |
658 | // ForEachChunk callback. Resets the tags to pre-leak-check state. |
659 | static void ResetTagsCb(uptr chunk, void *arg) { |
660 | (void)arg; |
661 | chunk = GetUserBegin(chunk); |
662 | LsanMetadata m(chunk); |
663 | if (m.allocated() && m.tag() != kIgnored) |
664 | m.set_tag(kDirectlyLeaked); |
665 | } |
666 | |
667 | // ForEachChunk callback. Aggregates information about unreachable chunks into |
668 | // a LeakReport. |
669 | static void CollectLeaksCb(uptr chunk, void *arg) { |
670 | CHECK(arg); |
671 | LeakedChunks *leaks = reinterpret_cast<LeakedChunks *>(arg); |
672 | chunk = GetUserBegin(chunk); |
673 | LsanMetadata m(chunk); |
674 | if (!m.allocated()) |
675 | return; |
676 | if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) |
677 | leaks->push_back(element: {.chunk: chunk, .stack_trace_id: m.stack_trace_id(), .leaked_size: m.requested_size(), .tag: m.tag()}); |
678 | } |
679 | |
680 | void LeakSuppressionContext::PrintMatchedSuppressions() { |
681 | InternalMmapVector<Suppression *> matched; |
682 | context.GetMatched(matched: &matched); |
683 | if (!matched.size()) |
684 | return; |
685 | const char *line = "-----------------------------------------------------" ; |
686 | Printf(format: "%s\n" , line); |
687 | Printf(format: "Suppressions used:\n" ); |
688 | Printf(format: " count bytes template\n" ); |
689 | for (uptr i = 0; i < matched.size(); i++) { |
690 | Printf(format: "%7zu %10zu %s\n" , |
691 | static_cast<uptr>(atomic_load_relaxed(a: &matched[i]->hit_count)), |
692 | matched[i]->weight, matched[i]->templ); |
693 | } |
694 | Printf(format: "%s\n\n" , line); |
695 | } |
696 | |
697 | # if SANITIZER_FUCHSIA |
698 | |
699 | // Fuchsia provides a libc interface that guarantees all threads are |
700 | // covered, and SuspendedThreadList is never really used. |
701 | static void ReportUnsuspendedThreads(const SuspendedThreadsList &) {} |
702 | |
703 | # else // !SANITIZER_FUCHSIA |
704 | |
705 | static void ReportUnsuspendedThreads( |
706 | const SuspendedThreadsList &suspended_threads) { |
707 | InternalMmapVector<tid_t> threads(suspended_threads.ThreadCount()); |
708 | for (uptr i = 0; i < suspended_threads.ThreadCount(); ++i) |
709 | threads[i] = suspended_threads.GetThreadID(index: i); |
710 | |
711 | Sort(v: threads.data(), size: threads.size()); |
712 | |
713 | InternalMmapVector<tid_t> unsuspended; |
714 | GetRunningThreadsLocked(threads: &unsuspended); |
715 | |
716 | for (auto os_id : unsuspended) { |
717 | uptr i = InternalLowerBound(v: threads, val: os_id); |
718 | if (i >= threads.size() || threads[i] != os_id) |
719 | Report( |
720 | format: "Running thread %zu was not suspended. False leaks are possible.\n" , |
721 | os_id); |
722 | } |
723 | } |
724 | |
725 | # endif // !SANITIZER_FUCHSIA |
726 | |
727 | static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads, |
728 | void *arg) { |
729 | CheckForLeaksParam *param = reinterpret_cast<CheckForLeaksParam *>(arg); |
730 | CHECK(param); |
731 | CHECK(!param->success); |
732 | ReportUnsuspendedThreads(suspended_threads); |
733 | ClassifyAllChunks(suspended_threads, frontier: ¶m->frontier, caller_tid: param->caller_tid, |
734 | caller_sp: param->caller_sp); |
735 | ForEachChunk(callback: CollectLeaksCb, arg: ¶m->leaks); |
736 | // Clean up for subsequent leak checks. This assumes we did not overwrite any |
737 | // kIgnored tags. |
738 | ForEachChunk(callback: ResetTagsCb, arg: nullptr); |
739 | param->success = true; |
740 | } |
741 | |
742 | static bool PrintResults(LeakReport &report) { |
743 | uptr unsuppressed_count = report.UnsuppressedLeakCount(); |
744 | if (unsuppressed_count) { |
745 | Decorator d; |
746 | Printf( |
747 | format: "\n" |
748 | "=================================================================" |
749 | "\n" ); |
750 | Printf(format: "%s" , d.Error()); |
751 | Report(format: "ERROR: LeakSanitizer: detected memory leaks\n" ); |
752 | Printf(format: "%s" , d.Default()); |
753 | report.ReportTopLeaks(max_leaks: flags()->max_leaks); |
754 | } |
755 | if (common_flags()->print_suppressions) |
756 | GetSuppressionContext()->PrintMatchedSuppressions(); |
757 | if (unsuppressed_count > 0) { |
758 | report.PrintSummary(); |
759 | return true; |
760 | } |
761 | return false; |
762 | } |
763 | |
764 | static bool CheckForLeaks() { |
765 | if (&__lsan_is_turned_off && __lsan_is_turned_off()) { |
766 | VReport(1, "LeakSanitizer is disabled" ); |
767 | return false; |
768 | } |
769 | VReport(1, "LeakSanitizer: checking for leaks" ); |
770 | // Inside LockStuffAndStopTheWorld we can't run symbolizer, so we can't match |
771 | // suppressions. However if a stack id was previously suppressed, it should be |
772 | // suppressed in future checks as well. |
773 | for (int i = 0;; ++i) { |
774 | EnsureMainThreadIDIsCorrect(); |
775 | CheckForLeaksParam param; |
776 | // Capture calling thread's stack pointer early, to avoid false negatives. |
777 | // Old frame with dead pointers might be overlapped by new frame inside |
778 | // CheckForLeaks which does not use bytes with pointers before the |
779 | // threads are suspended and stack pointers captured. |
780 | param.caller_tid = GetTid(); |
781 | param.caller_sp = reinterpret_cast<uptr>(__builtin_frame_address(0)); |
782 | LockStuffAndStopTheWorld(callback: CheckForLeaksCallback, argument: ¶m); |
783 | if (!param.success) { |
784 | Report(format: "LeakSanitizer has encountered a fatal error.\n" ); |
785 | Report( |
786 | format: "HINT: For debugging, try setting environment variable " |
787 | "LSAN_OPTIONS=verbosity=1:log_threads=1\n" ); |
788 | Report( |
789 | format: "HINT: LeakSanitizer does not work under ptrace (strace, gdb, " |
790 | "etc)\n" ); |
791 | Die(); |
792 | } |
793 | LeakReport leak_report; |
794 | leak_report.AddLeakedChunks(chunks: param.leaks); |
795 | |
796 | // No new suppressions stacks, so rerun will not help and we can report. |
797 | if (!leak_report.ApplySuppressions()) |
798 | return PrintResults(report&: leak_report); |
799 | |
800 | // No indirect leaks to report, so we are done here. |
801 | if (!leak_report.IndirectUnsuppressedLeakCount()) |
802 | return PrintResults(report&: leak_report); |
803 | |
804 | if (i >= 8) { |
805 | Report(format: "WARNING: LeakSanitizer gave up on indirect leaks suppression.\n" ); |
806 | return PrintResults(report&: leak_report); |
807 | } |
808 | |
809 | // We found a new previously unseen suppressed call stack. Rerun to make |
810 | // sure it does not hold indirect leaks. |
811 | VReport(1, "Rerun with %zu suppressed stacks." , |
812 | GetSuppressionContext()->GetSortedSuppressedStacks().size()); |
813 | } |
814 | } |
815 | |
816 | static bool has_reported_leaks = false; |
817 | bool HasReportedLeaks() { return has_reported_leaks; } |
818 | |
819 | void DoLeakCheck() { |
820 | Lock l(&global_mutex); |
821 | static bool already_done; |
822 | if (already_done) |
823 | return; |
824 | already_done = true; |
825 | has_reported_leaks = CheckForLeaks(); |
826 | if (has_reported_leaks) |
827 | HandleLeaks(); |
828 | } |
829 | |
830 | static int DoRecoverableLeakCheck() { |
831 | Lock l(&global_mutex); |
832 | bool have_leaks = CheckForLeaks(); |
833 | return have_leaks ? 1 : 0; |
834 | } |
835 | |
836 | void DoRecoverableLeakCheckVoid() { DoRecoverableLeakCheck(); } |
837 | |
838 | ///// LeakReport implementation. ///// |
839 | |
840 | // A hard limit on the number of distinct leaks, to avoid quadratic complexity |
841 | // in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks |
842 | // in real-world applications. |
843 | // FIXME: Get rid of this limit by moving logic into DedupLeaks. |
844 | const uptr kMaxLeaksConsidered = 5000; |
845 | |
846 | void LeakReport::AddLeakedChunks(const LeakedChunks &chunks) { |
847 | for (const LeakedChunk &leak : chunks) { |
848 | uptr chunk = leak.chunk; |
849 | u32 stack_trace_id = leak.stack_trace_id; |
850 | uptr leaked_size = leak.leaked_size; |
851 | ChunkTag tag = leak.tag; |
852 | CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked); |
853 | |
854 | if (u32 resolution = flags()->resolution) { |
855 | StackTrace stack = StackDepotGet(id: stack_trace_id); |
856 | stack.size = Min(a: stack.size, b: resolution); |
857 | stack_trace_id = StackDepotPut(stack); |
858 | } |
859 | |
860 | bool is_directly_leaked = (tag == kDirectlyLeaked); |
861 | uptr i; |
862 | for (i = 0; i < leaks_.size(); i++) { |
863 | if (leaks_[i].stack_trace_id == stack_trace_id && |
864 | leaks_[i].is_directly_leaked == is_directly_leaked) { |
865 | leaks_[i].hit_count++; |
866 | leaks_[i].total_size += leaked_size; |
867 | break; |
868 | } |
869 | } |
870 | if (i == leaks_.size()) { |
871 | if (leaks_.size() == kMaxLeaksConsidered) |
872 | return; |
873 | Leak leak = {.id: next_id_++, /* hit_count */ 1, |
874 | .total_size: leaked_size, .stack_trace_id: stack_trace_id, |
875 | .is_directly_leaked: is_directly_leaked, /* is_suppressed */ false}; |
876 | leaks_.push_back(element: leak); |
877 | } |
878 | if (flags()->report_objects) { |
879 | LeakedObject obj = {.leak_id: leaks_[i].id, .addr: GetUserAddr(chunk), .size: leaked_size}; |
880 | leaked_objects_.push_back(element: obj); |
881 | } |
882 | } |
883 | } |
884 | |
885 | static bool LeakComparator(const Leak &leak1, const Leak &leak2) { |
886 | if (leak1.is_directly_leaked == leak2.is_directly_leaked) |
887 | return leak1.total_size > leak2.total_size; |
888 | else |
889 | return leak1.is_directly_leaked; |
890 | } |
891 | |
892 | void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) { |
893 | CHECK(leaks_.size() <= kMaxLeaksConsidered); |
894 | Printf(format: "\n" ); |
895 | if (leaks_.size() == kMaxLeaksConsidered) |
896 | Printf( |
897 | format: "Too many leaks! Only the first %zu leaks encountered will be " |
898 | "reported.\n" , |
899 | kMaxLeaksConsidered); |
900 | |
901 | uptr unsuppressed_count = UnsuppressedLeakCount(); |
902 | if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count) |
903 | Printf(format: "The %zu top leak(s):\n" , num_leaks_to_report); |
904 | Sort(v: leaks_.data(), size: leaks_.size(), comp: &LeakComparator); |
905 | uptr leaks_reported = 0; |
906 | for (uptr i = 0; i < leaks_.size(); i++) { |
907 | if (leaks_[i].is_suppressed) |
908 | continue; |
909 | PrintReportForLeak(index: i); |
910 | leaks_reported++; |
911 | if (leaks_reported == num_leaks_to_report) |
912 | break; |
913 | } |
914 | if (leaks_reported < unsuppressed_count) { |
915 | uptr remaining = unsuppressed_count - leaks_reported; |
916 | Printf(format: "Omitting %zu more leak(s).\n" , remaining); |
917 | } |
918 | } |
919 | |
920 | void LeakReport::PrintReportForLeak(uptr index) { |
921 | Decorator d; |
922 | Printf(format: "%s" , d.Leak()); |
923 | Printf(format: "%s leak of %zu byte(s) in %zu object(s) allocated from:\n" , |
924 | leaks_[index].is_directly_leaked ? "Direct" : "Indirect" , |
925 | leaks_[index].total_size, leaks_[index].hit_count); |
926 | Printf(format: "%s" , d.Default()); |
927 | |
928 | CHECK(leaks_[index].stack_trace_id); |
929 | StackDepotGet(id: leaks_[index].stack_trace_id).Print(); |
930 | |
931 | if (flags()->report_objects) { |
932 | Printf(format: "Objects leaked above:\n" ); |
933 | PrintLeakedObjectsForLeak(index); |
934 | Printf(format: "\n" ); |
935 | } |
936 | } |
937 | |
938 | void LeakReport::PrintLeakedObjectsForLeak(uptr index) { |
939 | u32 leak_id = leaks_[index].id; |
940 | for (uptr j = 0; j < leaked_objects_.size(); j++) { |
941 | if (leaked_objects_[j].leak_id == leak_id) |
942 | Printf(format: "%p (%zu bytes)\n" , (void *)leaked_objects_[j].addr, |
943 | leaked_objects_[j].size); |
944 | } |
945 | } |
946 | |
947 | void LeakReport::PrintSummary() { |
948 | CHECK(leaks_.size() <= kMaxLeaksConsidered); |
949 | uptr bytes = 0, allocations = 0; |
950 | for (uptr i = 0; i < leaks_.size(); i++) { |
951 | if (leaks_[i].is_suppressed) |
952 | continue; |
953 | bytes += leaks_[i].total_size; |
954 | allocations += leaks_[i].hit_count; |
955 | } |
956 | InternalScopedString summary; |
957 | summary.AppendF(format: "%zu byte(s) leaked in %zu allocation(s)." , bytes, |
958 | allocations); |
959 | ReportErrorSummary(error_message: summary.data()); |
960 | } |
961 | |
962 | uptr LeakReport::ApplySuppressions() { |
963 | LeakSuppressionContext *suppressions = GetSuppressionContext(); |
964 | uptr new_suppressions = 0; |
965 | for (uptr i = 0; i < leaks_.size(); i++) { |
966 | if (suppressions->Suppress(stack_trace_id: leaks_[i].stack_trace_id, hit_count: leaks_[i].hit_count, |
967 | total_size: leaks_[i].total_size)) { |
968 | leaks_[i].is_suppressed = true; |
969 | ++new_suppressions; |
970 | } |
971 | } |
972 | return new_suppressions; |
973 | } |
974 | |
975 | uptr LeakReport::UnsuppressedLeakCount() { |
976 | uptr result = 0; |
977 | for (uptr i = 0; i < leaks_.size(); i++) |
978 | if (!leaks_[i].is_suppressed) |
979 | result++; |
980 | return result; |
981 | } |
982 | |
983 | uptr LeakReport::IndirectUnsuppressedLeakCount() { |
984 | uptr result = 0; |
985 | for (uptr i = 0; i < leaks_.size(); i++) |
986 | if (!leaks_[i].is_suppressed && !leaks_[i].is_directly_leaked) |
987 | result++; |
988 | return result; |
989 | } |
990 | |
991 | } // namespace __lsan |
992 | #else // CAN_SANITIZE_LEAKS |
993 | namespace __lsan { |
994 | void InitCommonLsan() {} |
995 | void DoLeakCheck() {} |
996 | void DoRecoverableLeakCheckVoid() {} |
997 | void DisableInThisThread() {} |
998 | void EnableInThisThread() {} |
999 | } // namespace __lsan |
1000 | #endif // CAN_SANITIZE_LEAKS |
1001 | |
1002 | using namespace __lsan; |
1003 | |
1004 | extern "C" { |
1005 | SANITIZER_INTERFACE_ATTRIBUTE |
1006 | void __lsan_ignore_object(const void *p) { |
1007 | #if CAN_SANITIZE_LEAKS |
1008 | if (!common_flags()->detect_leaks) |
1009 | return; |
1010 | // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not |
1011 | // locked. |
1012 | Lock l(&global_mutex); |
1013 | IgnoreObjectResult res = IgnoreObject(p); |
1014 | if (res == kIgnoreObjectInvalid) |
1015 | VReport(1, "__lsan_ignore_object(): no heap object found at %p\n" , p); |
1016 | if (res == kIgnoreObjectAlreadyIgnored) |
1017 | VReport(1, |
1018 | "__lsan_ignore_object(): " |
1019 | "heap object at %p is already being ignored\n" , |
1020 | p); |
1021 | if (res == kIgnoreObjectSuccess) |
1022 | VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n" , p); |
1023 | #endif // CAN_SANITIZE_LEAKS |
1024 | } |
1025 | |
1026 | SANITIZER_INTERFACE_ATTRIBUTE |
1027 | void __lsan_register_root_region(const void *begin, uptr size) { |
1028 | #if CAN_SANITIZE_LEAKS |
1029 | VReport(1, "Registered root region at %p of size %zu\n" , begin, size); |
1030 | uptr b = reinterpret_cast<uptr>(begin); |
1031 | uptr e = b + size; |
1032 | CHECK_LT(b, e); |
1033 | |
1034 | Lock l(&global_mutex); |
1035 | ++GetRootRegionsLocked()[{b, e}]; |
1036 | #endif // CAN_SANITIZE_LEAKS |
1037 | } |
1038 | |
1039 | SANITIZER_INTERFACE_ATTRIBUTE |
1040 | void __lsan_unregister_root_region(const void *begin, uptr size) { |
1041 | #if CAN_SANITIZE_LEAKS |
1042 | uptr b = reinterpret_cast<uptr>(begin); |
1043 | uptr e = b + size; |
1044 | CHECK_LT(b, e); |
1045 | VReport(1, "Unregistered root region at %p of size %zu\n" , begin, size); |
1046 | |
1047 | { |
1048 | Lock l(&global_mutex); |
1049 | if (auto *f = GetRootRegionsLocked().find(Key: {b, e})) { |
1050 | if (--(f->second) == 0) |
1051 | GetRootRegionsLocked().erase(I: f); |
1052 | return; |
1053 | } |
1054 | } |
1055 | Report( |
1056 | format: "__lsan_unregister_root_region(): region at %p of size %zu has not " |
1057 | "been registered.\n" , |
1058 | begin, size); |
1059 | Die(); |
1060 | #endif // CAN_SANITIZE_LEAKS |
1061 | } |
1062 | |
1063 | SANITIZER_INTERFACE_ATTRIBUTE |
1064 | void __lsan_disable() { |
1065 | #if CAN_SANITIZE_LEAKS |
1066 | __lsan::DisableInThisThread(); |
1067 | #endif |
1068 | } |
1069 | |
1070 | SANITIZER_INTERFACE_ATTRIBUTE |
1071 | void __lsan_enable() { |
1072 | #if CAN_SANITIZE_LEAKS |
1073 | __lsan::EnableInThisThread(); |
1074 | #endif |
1075 | } |
1076 | |
1077 | SANITIZER_INTERFACE_ATTRIBUTE |
1078 | void __lsan_do_leak_check() { |
1079 | #if CAN_SANITIZE_LEAKS |
1080 | if (common_flags()->detect_leaks) |
1081 | __lsan::DoLeakCheck(); |
1082 | #endif // CAN_SANITIZE_LEAKS |
1083 | } |
1084 | |
1085 | SANITIZER_INTERFACE_ATTRIBUTE |
1086 | int __lsan_do_recoverable_leak_check() { |
1087 | #if CAN_SANITIZE_LEAKS |
1088 | if (common_flags()->detect_leaks) |
1089 | return __lsan::DoRecoverableLeakCheck(); |
1090 | #endif // CAN_SANITIZE_LEAKS |
1091 | return 0; |
1092 | } |
1093 | |
1094 | SANITIZER_INTERFACE_WEAK_DEF(const char *, __lsan_default_options, void) { |
1095 | return "" ; |
1096 | } |
1097 | |
1098 | #if !SANITIZER_SUPPORTS_WEAK_HOOKS |
1099 | SANITIZER_INTERFACE_WEAK_DEF(int, __lsan_is_turned_off, void) { |
1100 | return 0; |
1101 | } |
1102 | |
1103 | SANITIZER_INTERFACE_WEAK_DEF(const char *, __lsan_default_suppressions, void) { |
1104 | return "" ; |
1105 | } |
1106 | #endif |
1107 | } // extern "C" |
1108 | |