1 | //===-- asan_report.cpp ---------------------------------------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file is a part of AddressSanitizer, an address sanity checker. |
10 | // |
11 | // This file contains error reporting code. |
12 | //===----------------------------------------------------------------------===// |
13 | |
14 | #include "asan_report.h" |
15 | |
16 | #include "asan_descriptions.h" |
17 | #include "asan_errors.h" |
18 | #include "asan_flags.h" |
19 | #include "asan_internal.h" |
20 | #include "asan_mapping.h" |
21 | #include "asan_scariness_score.h" |
22 | #include "asan_stack.h" |
23 | #include "asan_thread.h" |
24 | #include "lsan/lsan_common.h" |
25 | #include "sanitizer_common/sanitizer_common.h" |
26 | #include "sanitizer_common/sanitizer_flags.h" |
27 | #include "sanitizer_common/sanitizer_interface_internal.h" |
28 | #include "sanitizer_common/sanitizer_placement_new.h" |
29 | #include "sanitizer_common/sanitizer_report_decorator.h" |
30 | #include "sanitizer_common/sanitizer_stackdepot.h" |
31 | #include "sanitizer_common/sanitizer_symbolizer.h" |
32 | |
33 | namespace __asan { |
34 | |
35 | // -------------------- User-specified callbacks ----------------- {{{1 |
36 | static void (*error_report_callback)(const char*); |
37 | using ErrorMessageBuffer = InternalMmapVectorNoCtor<char, true>; |
38 | alignas( |
39 | alignof(ErrorMessageBuffer)) static char error_message_buffer_placeholder |
40 | [sizeof(ErrorMessageBuffer)]; |
41 | static ErrorMessageBuffer *error_message_buffer = nullptr; |
42 | static Mutex error_message_buf_mutex; |
43 | static const unsigned kAsanBuggyPcPoolSize = 25; |
44 | static __sanitizer::atomic_uintptr_t AsanBuggyPcPool[kAsanBuggyPcPoolSize]; |
45 | |
46 | void AppendToErrorMessageBuffer(const char *buffer) { |
47 | Lock l(&error_message_buf_mutex); |
48 | if (!error_message_buffer) { |
49 | error_message_buffer = |
50 | new (error_message_buffer_placeholder) ErrorMessageBuffer(); |
51 | error_message_buffer->Initialize(initial_capacity: kErrorMessageBufferSize); |
52 | } |
53 | uptr error_message_buffer_len = error_message_buffer->size(); |
54 | uptr buffer_len = internal_strlen(s: buffer); |
55 | error_message_buffer->resize(new_size: error_message_buffer_len + buffer_len); |
56 | internal_memcpy(dest: error_message_buffer->data() + error_message_buffer_len, |
57 | src: buffer, n: buffer_len); |
58 | } |
59 | |
60 | // ---------------------- Helper functions ----------------------- {{{1 |
61 | |
62 | void PrintMemoryByte(InternalScopedString *str, const char *before, u8 byte, |
63 | bool in_shadow, const char *after) { |
64 | Decorator d; |
65 | str->AppendF(format: "%s%s%x%x%s%s" , before, |
66 | in_shadow ? d.ShadowByte(byte) : d.MemoryByte(), byte >> 4, |
67 | byte & 15, d.Default(), after); |
68 | } |
69 | |
70 | static void PrintZoneForPointer(uptr ptr, uptr zone_ptr, |
71 | const char *zone_name) { |
72 | if (zone_ptr) { |
73 | if (zone_name) { |
74 | Printf(format: "malloc_zone_from_ptr(%p) = %p, which is %s\n" , (void *)ptr, |
75 | (void *)zone_ptr, zone_name); |
76 | } else { |
77 | Printf(format: "malloc_zone_from_ptr(%p) = %p, which doesn't have a name\n" , |
78 | (void *)ptr, (void *)zone_ptr); |
79 | } |
80 | } else { |
81 | Printf(format: "malloc_zone_from_ptr(%p) = 0\n" , (void *)ptr); |
82 | } |
83 | } |
84 | |
85 | // ---------------------- Address Descriptions ------------------- {{{1 |
86 | |
87 | bool ParseFrameDescription(const char *frame_descr, |
88 | InternalMmapVector<StackVarDescr> *vars) { |
89 | CHECK(frame_descr); |
90 | const char *p; |
91 | // This string is created by the compiler and has the following form: |
92 | // "n alloc_1 alloc_2 ... alloc_n" |
93 | // where alloc_i looks like "offset size len ObjectName" |
94 | // or "offset size len ObjectName:line". |
95 | uptr n_objects = (uptr)internal_simple_strtoll(nptr: frame_descr, endptr: &p, base: 10); |
96 | if (n_objects == 0) |
97 | return false; |
98 | |
99 | for (uptr i = 0; i < n_objects; i++) { |
100 | uptr beg = (uptr)internal_simple_strtoll(nptr: p, endptr: &p, base: 10); |
101 | uptr size = (uptr)internal_simple_strtoll(nptr: p, endptr: &p, base: 10); |
102 | uptr len = (uptr)internal_simple_strtoll(nptr: p, endptr: &p, base: 10); |
103 | if (beg == 0 || size == 0 || *p != ' ') { |
104 | return false; |
105 | } |
106 | p++; |
107 | char *colon_pos = internal_strchr(s: p, c: ':'); |
108 | uptr line = 0; |
109 | uptr name_len = len; |
110 | if (colon_pos != nullptr && colon_pos < p + len) { |
111 | name_len = colon_pos - p; |
112 | line = (uptr)internal_simple_strtoll(nptr: colon_pos + 1, endptr: nullptr, base: 10); |
113 | } |
114 | StackVarDescr var = {.beg: beg, .size: size, .name_pos: p, .name_len: name_len, .line: line}; |
115 | vars->push_back(element: var); |
116 | p += len; |
117 | } |
118 | |
119 | return true; |
120 | } |
121 | |
122 | // -------------------- Different kinds of reports ----------------- {{{1 |
123 | |
124 | // Use ScopedInErrorReport to run common actions just before and |
125 | // immediately after printing error report. |
126 | class ScopedInErrorReport { |
127 | public: |
128 | explicit ScopedInErrorReport(bool fatal = false) |
129 | : halt_on_error_(fatal || flags()->halt_on_error) { |
130 | // Deadlock Prevention Between ASan and LSan |
131 | // |
132 | // Background: |
133 | // - The `dl_iterate_phdr` function requires holding libdl's internal lock |
134 | // (Lock A). |
135 | // - LSan acquires the ASan thread registry lock (Lock B) *after* calling |
136 | // `dl_iterate_phdr`. |
137 | // |
138 | // Problem Scenario: |
139 | // When ASan attempts to call `dl_iterate_phdr` while holding Lock B (e.g., |
140 | // during error reporting via `ErrorDescription::Print`), a circular lock |
141 | // dependency may occur: |
142 | // 1. Thread 1: Holds Lock B → Requests Lock A (via dl_iterate_phdr) |
143 | // 2. Thread 2: Holds Lock A → Requests Lock B (via LSan operations) |
144 | // |
145 | // Solution: |
146 | // Proactively load all required modules before acquiring Lock B. |
147 | // This ensures: |
148 | // 1. Any `dl_iterate_phdr` calls during module loading complete before |
149 | // locking. |
150 | // 2. Subsequent error reporting avoids nested lock acquisition patterns. |
151 | // 3. Eliminates the lock order inversion risk between libdl and ASan's |
152 | // thread registry. |
153 | #if CAN_SANITIZE_LEAKS && (SANITIZER_LINUX || SANITIZER_NETBSD) |
154 | Symbolizer::GetOrInit()->GetRefreshedListOfModules(); |
155 | #endif |
156 | |
157 | // Make sure the registry and sanitizer report mutexes are locked while |
158 | // we're printing an error report. |
159 | // We can lock them only here to avoid self-deadlock in case of |
160 | // recursive reports. |
161 | asanThreadRegistry().Lock(); |
162 | Printf( |
163 | format: "=================================================================\n" ); |
164 | } |
165 | |
166 | ~ScopedInErrorReport() { |
167 | if (halt_on_error_ && !__sanitizer_acquire_crash_state()) { |
168 | asanThreadRegistry().Unlock(); |
169 | return; |
170 | } |
171 | ASAN_ON_ERROR(); |
172 | if (current_error_.IsValid()) current_error_.Print(); |
173 | |
174 | // Make sure the current thread is announced. |
175 | DescribeThread(t: GetCurrentThread()); |
176 | // We may want to grab this lock again when printing stats. |
177 | asanThreadRegistry().Unlock(); |
178 | // Print memory stats. |
179 | if (flags()->print_stats) |
180 | __asan_print_accumulated_stats(); |
181 | |
182 | if (common_flags()->print_cmdline) |
183 | PrintCmdline(); |
184 | |
185 | if (common_flags()->print_module_map == 2) |
186 | DumpProcessMap(); |
187 | |
188 | // Copy the message buffer so that we could start logging without holding a |
189 | // lock that gets acquired during printing. |
190 | InternalScopedString buffer_copy; |
191 | { |
192 | Lock l(&error_message_buf_mutex); |
193 | error_message_buffer->push_back(element: '\0'); |
194 | buffer_copy.Append(str: error_message_buffer->data()); |
195 | // Clear error_message_buffer so that if we find other errors |
196 | // we don't re-log this error. |
197 | error_message_buffer->clear(); |
198 | } |
199 | |
200 | LogFullErrorReport(buffer: buffer_copy.data()); |
201 | |
202 | if (error_report_callback) { |
203 | error_report_callback(buffer_copy.data()); |
204 | } |
205 | |
206 | if (halt_on_error_ && common_flags()->abort_on_error) { |
207 | // On Android the message is truncated to 512 characters. |
208 | // FIXME: implement "compact" error format, possibly without, or with |
209 | // highly compressed stack traces? |
210 | // FIXME: or just use the summary line as abort message? |
211 | SetAbortMessage(buffer_copy.data()); |
212 | } |
213 | |
214 | // In halt_on_error = false mode, reset the current error object (before |
215 | // unlocking). |
216 | if (!halt_on_error_) |
217 | internal_memset(s: ¤t_error_, c: 0, n: sizeof(current_error_)); |
218 | |
219 | if (halt_on_error_) { |
220 | Report(format: "ABORTING\n" ); |
221 | Die(); |
222 | } |
223 | } |
224 | |
225 | void ReportError(const ErrorDescription &description) { |
226 | // Can only report one error per ScopedInErrorReport. |
227 | CHECK_EQ(current_error_.kind, kErrorKindInvalid); |
228 | internal_memcpy(dest: ¤t_error_, src: &description, n: sizeof(current_error_)); |
229 | } |
230 | |
231 | static ErrorDescription &CurrentError() { |
232 | return current_error_; |
233 | } |
234 | |
235 | private: |
236 | ScopedErrorReportLock error_report_lock_; |
237 | // Error currently being reported. This enables the destructor to interact |
238 | // with the debugger and point it to an error description. |
239 | static ErrorDescription current_error_; |
240 | bool halt_on_error_; |
241 | }; |
242 | |
243 | ErrorDescription ScopedInErrorReport::current_error_(LINKER_INITIALIZED); |
244 | |
245 | void ReportDeadlySignal(const SignalContext &sig) { |
246 | ScopedInErrorReport in_report(/*fatal*/ true); |
247 | ErrorDeadlySignal error(GetCurrentTidOrInvalid(), sig); |
248 | in_report.ReportError(description: error); |
249 | } |
250 | |
251 | void ReportDoubleFree(uptr addr, BufferedStackTrace *free_stack) { |
252 | ScopedInErrorReport in_report; |
253 | ErrorDoubleFree error(GetCurrentTidOrInvalid(), free_stack, addr); |
254 | in_report.ReportError(description: error); |
255 | } |
256 | |
257 | void ReportNewDeleteTypeMismatch(uptr addr, uptr delete_size, |
258 | uptr delete_alignment, |
259 | BufferedStackTrace *free_stack) { |
260 | ScopedInErrorReport in_report; |
261 | ErrorNewDeleteTypeMismatch error(GetCurrentTidOrInvalid(), free_stack, addr, |
262 | delete_size, delete_alignment); |
263 | in_report.ReportError(description: error); |
264 | } |
265 | |
266 | void ReportFreeNotMalloced(uptr addr, BufferedStackTrace *free_stack) { |
267 | ScopedInErrorReport in_report; |
268 | ErrorFreeNotMalloced error(GetCurrentTidOrInvalid(), free_stack, addr); |
269 | in_report.ReportError(description: error); |
270 | } |
271 | |
272 | void ReportAllocTypeMismatch(uptr addr, BufferedStackTrace *free_stack, |
273 | AllocType alloc_type, |
274 | AllocType dealloc_type) { |
275 | ScopedInErrorReport in_report; |
276 | ErrorAllocTypeMismatch error(GetCurrentTidOrInvalid(), free_stack, addr, |
277 | alloc_type, dealloc_type); |
278 | in_report.ReportError(description: error); |
279 | } |
280 | |
281 | void ReportMallocUsableSizeNotOwned(uptr addr, BufferedStackTrace *stack) { |
282 | ScopedInErrorReport in_report; |
283 | ErrorMallocUsableSizeNotOwned error(GetCurrentTidOrInvalid(), stack, addr); |
284 | in_report.ReportError(description: error); |
285 | } |
286 | |
287 | void ReportSanitizerGetAllocatedSizeNotOwned(uptr addr, |
288 | BufferedStackTrace *stack) { |
289 | ScopedInErrorReport in_report; |
290 | ErrorSanitizerGetAllocatedSizeNotOwned error(GetCurrentTidOrInvalid(), stack, |
291 | addr); |
292 | in_report.ReportError(description: error); |
293 | } |
294 | |
295 | void ReportCallocOverflow(uptr count, uptr size, BufferedStackTrace *stack) { |
296 | ScopedInErrorReport in_report(/*fatal*/ true); |
297 | ErrorCallocOverflow error(GetCurrentTidOrInvalid(), stack, count, size); |
298 | in_report.ReportError(description: error); |
299 | } |
300 | |
301 | void ReportReallocArrayOverflow(uptr count, uptr size, |
302 | BufferedStackTrace *stack) { |
303 | ScopedInErrorReport in_report(/*fatal*/ true); |
304 | ErrorReallocArrayOverflow error(GetCurrentTidOrInvalid(), stack, count, size); |
305 | in_report.ReportError(description: error); |
306 | } |
307 | |
308 | void ReportPvallocOverflow(uptr size, BufferedStackTrace *stack) { |
309 | ScopedInErrorReport in_report(/*fatal*/ true); |
310 | ErrorPvallocOverflow error(GetCurrentTidOrInvalid(), stack, size); |
311 | in_report.ReportError(description: error); |
312 | } |
313 | |
314 | void ReportInvalidAllocationAlignment(uptr alignment, |
315 | BufferedStackTrace *stack) { |
316 | ScopedInErrorReport in_report(/*fatal*/ true); |
317 | ErrorInvalidAllocationAlignment error(GetCurrentTidOrInvalid(), stack, |
318 | alignment); |
319 | in_report.ReportError(description: error); |
320 | } |
321 | |
322 | void ReportInvalidAlignedAllocAlignment(uptr size, uptr alignment, |
323 | BufferedStackTrace *stack) { |
324 | ScopedInErrorReport in_report(/*fatal*/ true); |
325 | ErrorInvalidAlignedAllocAlignment error(GetCurrentTidOrInvalid(), stack, |
326 | size, alignment); |
327 | in_report.ReportError(description: error); |
328 | } |
329 | |
330 | void ReportInvalidPosixMemalignAlignment(uptr alignment, |
331 | BufferedStackTrace *stack) { |
332 | ScopedInErrorReport in_report(/*fatal*/ true); |
333 | ErrorInvalidPosixMemalignAlignment error(GetCurrentTidOrInvalid(), stack, |
334 | alignment); |
335 | in_report.ReportError(description: error); |
336 | } |
337 | |
338 | void ReportAllocationSizeTooBig(uptr user_size, uptr total_size, uptr max_size, |
339 | BufferedStackTrace *stack) { |
340 | ScopedInErrorReport in_report(/*fatal*/ true); |
341 | ErrorAllocationSizeTooBig error(GetCurrentTidOrInvalid(), stack, user_size, |
342 | total_size, max_size); |
343 | in_report.ReportError(description: error); |
344 | } |
345 | |
346 | void (BufferedStackTrace *stack) { |
347 | ScopedInErrorReport in_report(/*fatal*/ true); |
348 | ErrorRssLimitExceeded error(GetCurrentTidOrInvalid(), stack); |
349 | in_report.ReportError(description: error); |
350 | } |
351 | |
352 | void ReportOutOfMemory(uptr requested_size, BufferedStackTrace *stack) { |
353 | ScopedInErrorReport in_report(/*fatal*/ true); |
354 | ErrorOutOfMemory error(GetCurrentTidOrInvalid(), stack, requested_size); |
355 | in_report.ReportError(description: error); |
356 | } |
357 | |
358 | void ReportStringFunctionMemoryRangesOverlap(const char *function, |
359 | const char *offset1, uptr length1, |
360 | const char *offset2, uptr length2, |
361 | BufferedStackTrace *stack) { |
362 | ScopedInErrorReport in_report; |
363 | ErrorStringFunctionMemoryRangesOverlap error( |
364 | GetCurrentTidOrInvalid(), stack, (uptr)offset1, length1, (uptr)offset2, |
365 | length2, function); |
366 | in_report.ReportError(description: error); |
367 | } |
368 | |
369 | void ReportStringFunctionSizeOverflow(uptr offset, uptr size, |
370 | BufferedStackTrace *stack) { |
371 | ScopedInErrorReport in_report; |
372 | ErrorStringFunctionSizeOverflow error(GetCurrentTidOrInvalid(), stack, offset, |
373 | size); |
374 | in_report.ReportError(description: error); |
375 | } |
376 | |
377 | void ReportBadParamsToAnnotateContiguousContainer(uptr beg, uptr end, |
378 | uptr old_mid, uptr new_mid, |
379 | BufferedStackTrace *stack) { |
380 | ScopedInErrorReport in_report; |
381 | ErrorBadParamsToAnnotateContiguousContainer error( |
382 | GetCurrentTidOrInvalid(), stack, beg, end, old_mid, new_mid); |
383 | in_report.ReportError(description: error); |
384 | } |
385 | |
386 | void ReportBadParamsToAnnotateDoubleEndedContiguousContainer( |
387 | uptr storage_beg, uptr storage_end, uptr old_container_beg, |
388 | uptr old_container_end, uptr new_container_beg, uptr new_container_end, |
389 | BufferedStackTrace *stack) { |
390 | ScopedInErrorReport in_report; |
391 | ErrorBadParamsToAnnotateDoubleEndedContiguousContainer error( |
392 | GetCurrentTidOrInvalid(), stack, storage_beg, storage_end, |
393 | old_container_beg, old_container_end, new_container_beg, |
394 | new_container_end); |
395 | in_report.ReportError(description: error); |
396 | } |
397 | |
398 | void ReportBadParamsToCopyContiguousContainerAnnotations( |
399 | uptr old_storage_beg, uptr old_storage_end, uptr new_storage_beg, |
400 | uptr new_storage_end, BufferedStackTrace *stack) { |
401 | ScopedInErrorReport in_report; |
402 | ErrorBadParamsToCopyContiguousContainerAnnotations error( |
403 | GetCurrentTidOrInvalid(), stack, old_storage_beg, old_storage_end, |
404 | new_storage_beg, new_storage_end); |
405 | in_report.ReportError(description: error); |
406 | } |
407 | |
408 | void ReportODRViolation(const __asan_global *g1, u32 stack_id1, |
409 | const __asan_global *g2, u32 stack_id2) { |
410 | ScopedInErrorReport in_report; |
411 | ErrorODRViolation error(GetCurrentTidOrInvalid(), g1, stack_id1, g2, |
412 | stack_id2); |
413 | in_report.ReportError(description: error); |
414 | } |
415 | |
416 | // ----------------------- CheckForInvalidPointerPair ----------- {{{1 |
417 | static NOINLINE void ReportInvalidPointerPair(uptr pc, uptr bp, uptr sp, |
418 | uptr a1, uptr a2) { |
419 | ScopedInErrorReport in_report; |
420 | ErrorInvalidPointerPair error(GetCurrentTidOrInvalid(), pc, bp, sp, a1, a2); |
421 | in_report.ReportError(description: error); |
422 | } |
423 | |
424 | static bool IsInvalidPointerPair(uptr a1, uptr a2) { |
425 | if (a1 == a2) |
426 | return false; |
427 | |
428 | // 256B in shadow memory can be iterated quite fast |
429 | static const uptr kMaxOffset = 2048; |
430 | |
431 | uptr left = a1 < a2 ? a1 : a2; |
432 | uptr right = a1 < a2 ? a2 : a1; |
433 | uptr offset = right - left; |
434 | if (offset <= kMaxOffset) |
435 | return __asan_region_is_poisoned(beg: left, size: offset); |
436 | |
437 | AsanThread *t = GetCurrentThread(); |
438 | |
439 | // check whether left is a stack memory pointer |
440 | if (uptr shadow_offset1 = t->GetStackVariableShadowStart(addr: left)) { |
441 | uptr shadow_offset2 = t->GetStackVariableShadowStart(addr: right); |
442 | return shadow_offset2 == 0 || shadow_offset1 != shadow_offset2; |
443 | } |
444 | |
445 | // check whether left is a heap memory address |
446 | HeapAddressDescription hdesc1, hdesc2; |
447 | if (GetHeapAddressInformation(addr: left, access_size: 0, descr: &hdesc1) && |
448 | hdesc1.chunk_access.access_type == kAccessTypeInside) |
449 | return !GetHeapAddressInformation(addr: right, access_size: 0, descr: &hdesc2) || |
450 | hdesc2.chunk_access.access_type != kAccessTypeInside || |
451 | hdesc1.chunk_access.chunk_begin != hdesc2.chunk_access.chunk_begin; |
452 | |
453 | // check whether left is an address of a global variable |
454 | GlobalAddressDescription gdesc1, gdesc2; |
455 | if (GetGlobalAddressInformation(addr: left, access_size: 0, descr: &gdesc1)) |
456 | return !GetGlobalAddressInformation(addr: right - 1, access_size: 0, descr: &gdesc2) || |
457 | !gdesc1.PointsInsideTheSameVariable(other: gdesc2); |
458 | |
459 | if (t->GetStackVariableShadowStart(addr: right) || |
460 | GetHeapAddressInformation(addr: right, access_size: 0, descr: &hdesc2) || |
461 | GetGlobalAddressInformation(addr: right - 1, access_size: 0, descr: &gdesc2)) |
462 | return true; |
463 | |
464 | // At this point we know nothing about both a1 and a2 addresses. |
465 | return false; |
466 | } |
467 | |
468 | static inline void CheckForInvalidPointerPair(void *p1, void *p2) { |
469 | switch (flags()->detect_invalid_pointer_pairs) { |
470 | case 0: |
471 | return; |
472 | case 1: |
473 | if (p1 == nullptr || p2 == nullptr) |
474 | return; |
475 | break; |
476 | } |
477 | |
478 | uptr a1 = reinterpret_cast<uptr>(p1); |
479 | uptr a2 = reinterpret_cast<uptr>(p2); |
480 | |
481 | if (IsInvalidPointerPair(a1, a2)) { |
482 | GET_CALLER_PC_BP_SP; |
483 | ReportInvalidPointerPair(pc, bp, sp, a1, a2); |
484 | } |
485 | } |
486 | // ----------------------- Mac-specific reports ----------------- {{{1 |
487 | |
488 | void ReportMacMzReallocUnknown(uptr addr, uptr zone_ptr, const char *zone_name, |
489 | BufferedStackTrace *stack) { |
490 | ScopedInErrorReport in_report; |
491 | Printf( |
492 | format: "mz_realloc(%p) -- attempting to realloc unallocated memory.\n" |
493 | "This is an unrecoverable problem, exiting now.\n" , |
494 | (void *)addr); |
495 | PrintZoneForPointer(ptr: addr, zone_ptr, zone_name); |
496 | stack->Print(); |
497 | DescribeAddressIfHeap(addr); |
498 | } |
499 | |
500 | // -------------- SuppressErrorReport -------------- {{{1 |
501 | // Avoid error reports duplicating for ASan recover mode. |
502 | static bool SuppressErrorReport(uptr pc) { |
503 | if (!common_flags()->suppress_equal_pcs) return false; |
504 | for (unsigned i = 0; i < kAsanBuggyPcPoolSize; i++) { |
505 | uptr cmp = atomic_load_relaxed(a: &AsanBuggyPcPool[i]); |
506 | if (cmp == 0 && atomic_compare_exchange_strong(a: &AsanBuggyPcPool[i], cmp: &cmp, |
507 | xchg: pc, mo: memory_order_relaxed)) |
508 | return false; |
509 | if (cmp == pc) return true; |
510 | } |
511 | Die(); |
512 | } |
513 | |
514 | void ReportGenericError(uptr pc, uptr bp, uptr sp, uptr addr, bool is_write, |
515 | uptr access_size, u32 exp, bool fatal) { |
516 | if (__asan_test_only_reported_buggy_pointer) { |
517 | *__asan_test_only_reported_buggy_pointer = addr; |
518 | return; |
519 | } |
520 | if (!fatal && SuppressErrorReport(pc)) return; |
521 | ENABLE_FRAME_POINTER; |
522 | |
523 | // Optimization experiments. |
524 | // The experiments can be used to evaluate potential optimizations that remove |
525 | // instrumentation (assess false negatives). Instead of completely removing |
526 | // some instrumentation, compiler can emit special calls into runtime |
527 | // (e.g. __asan_report_exp_load1 instead of __asan_report_load1) and pass |
528 | // mask of experiments (exp). |
529 | // The reaction to a non-zero value of exp is to be defined. |
530 | (void)exp; |
531 | |
532 | ScopedInErrorReport in_report(fatal); |
533 | ErrorGeneric error(GetCurrentTidOrInvalid(), pc, bp, sp, addr, is_write, |
534 | access_size); |
535 | in_report.ReportError(description: error); |
536 | } |
537 | |
538 | } // namespace __asan |
539 | |
540 | // --------------------------- Interface --------------------- {{{1 |
541 | using namespace __asan; |
542 | |
543 | void __asan_report_error(uptr pc, uptr bp, uptr sp, uptr addr, int is_write, |
544 | uptr access_size, u32 exp) { |
545 | ENABLE_FRAME_POINTER; |
546 | bool fatal = flags()->halt_on_error; |
547 | ReportGenericError(pc, bp, sp, addr, is_write, access_size, exp, fatal); |
548 | } |
549 | |
550 | void NOINLINE __asan_set_error_report_callback(void (*callback)(const char*)) { |
551 | Lock l(&error_message_buf_mutex); |
552 | error_report_callback = callback; |
553 | } |
554 | |
555 | void __asan_describe_address(uptr addr) { |
556 | // Thread registry must be locked while we're describing an address. |
557 | asanThreadRegistry().Lock(); |
558 | PrintAddressDescription(addr, access_size: 1, bug_type: "" ); |
559 | asanThreadRegistry().Unlock(); |
560 | } |
561 | |
562 | int __asan_report_present() { |
563 | return ScopedInErrorReport::CurrentError().kind != kErrorKindInvalid; |
564 | } |
565 | |
566 | uptr __asan_get_report_pc() { |
567 | if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric) |
568 | return ScopedInErrorReport::CurrentError().Generic.pc; |
569 | return 0; |
570 | } |
571 | |
572 | uptr __asan_get_report_bp() { |
573 | if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric) |
574 | return ScopedInErrorReport::CurrentError().Generic.bp; |
575 | return 0; |
576 | } |
577 | |
578 | uptr __asan_get_report_sp() { |
579 | if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric) |
580 | return ScopedInErrorReport::CurrentError().Generic.sp; |
581 | return 0; |
582 | } |
583 | |
584 | uptr __asan_get_report_address() { |
585 | ErrorDescription &err = ScopedInErrorReport::CurrentError(); |
586 | if (err.kind == kErrorKindGeneric) |
587 | return err.Generic.addr_description.Address(); |
588 | else if (err.kind == kErrorKindDoubleFree) |
589 | return err.DoubleFree.addr_description.addr; |
590 | return 0; |
591 | } |
592 | |
593 | int __asan_get_report_access_type() { |
594 | if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric) |
595 | return ScopedInErrorReport::CurrentError().Generic.is_write; |
596 | return 0; |
597 | } |
598 | |
599 | uptr __asan_get_report_access_size() { |
600 | if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric) |
601 | return ScopedInErrorReport::CurrentError().Generic.access_size; |
602 | return 0; |
603 | } |
604 | |
605 | const char *__asan_get_report_description() { |
606 | if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric) |
607 | return ScopedInErrorReport::CurrentError().Generic.bug_descr; |
608 | return ScopedInErrorReport::CurrentError().Base.scariness.GetDescription(); |
609 | } |
610 | |
611 | extern "C" { |
612 | SANITIZER_INTERFACE_ATTRIBUTE |
613 | void __sanitizer_ptr_sub(void *a, void *b) { |
614 | CheckForInvalidPointerPair(p1: a, p2: b); |
615 | } |
616 | SANITIZER_INTERFACE_ATTRIBUTE |
617 | void __sanitizer_ptr_cmp(void *a, void *b) { |
618 | CheckForInvalidPointerPair(p1: a, p2: b); |
619 | } |
620 | } // extern "C" |
621 | |
622 | // Provide default implementation of __asan_on_error that does nothing |
623 | // and may be overridden by user. |
624 | SANITIZER_INTERFACE_WEAK_DEF(void, __asan_on_error, void) {} |
625 | |