1//===-- asan_report.cpp ---------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of AddressSanitizer, an address sanity checker.
10//
11// This file contains error reporting code.
12//===----------------------------------------------------------------------===//
13
14#include "asan_report.h"
15
16#include "asan_descriptions.h"
17#include "asan_errors.h"
18#include "asan_flags.h"
19#include "asan_internal.h"
20#include "asan_mapping.h"
21#include "asan_scariness_score.h"
22#include "asan_stack.h"
23#include "asan_thread.h"
24#include "lsan/lsan_common.h"
25#include "sanitizer_common/sanitizer_common.h"
26#include "sanitizer_common/sanitizer_flags.h"
27#include "sanitizer_common/sanitizer_interface_internal.h"
28#include "sanitizer_common/sanitizer_placement_new.h"
29#include "sanitizer_common/sanitizer_report_decorator.h"
30#include "sanitizer_common/sanitizer_stackdepot.h"
31#include "sanitizer_common/sanitizer_symbolizer.h"
32
33namespace __asan {
34
35// -------------------- User-specified callbacks ----------------- {{{1
36static void (*error_report_callback)(const char*);
37using ErrorMessageBuffer = InternalMmapVectorNoCtor<char, true>;
38alignas(
39 alignof(ErrorMessageBuffer)) static char error_message_buffer_placeholder
40 [sizeof(ErrorMessageBuffer)];
41static ErrorMessageBuffer *error_message_buffer = nullptr;
42static Mutex error_message_buf_mutex;
43static const unsigned kAsanBuggyPcPoolSize = 25;
44static __sanitizer::atomic_uintptr_t AsanBuggyPcPool[kAsanBuggyPcPoolSize];
45
46void AppendToErrorMessageBuffer(const char *buffer) {
47 Lock l(&error_message_buf_mutex);
48 if (!error_message_buffer) {
49 error_message_buffer =
50 new (error_message_buffer_placeholder) ErrorMessageBuffer();
51 error_message_buffer->Initialize(initial_capacity: kErrorMessageBufferSize);
52 }
53 uptr error_message_buffer_len = error_message_buffer->size();
54 uptr buffer_len = internal_strlen(s: buffer);
55 error_message_buffer->resize(new_size: error_message_buffer_len + buffer_len);
56 internal_memcpy(dest: error_message_buffer->data() + error_message_buffer_len,
57 src: buffer, n: buffer_len);
58}
59
60// ---------------------- Helper functions ----------------------- {{{1
61
62void PrintMemoryByte(InternalScopedString *str, const char *before, u8 byte,
63 bool in_shadow, const char *after) {
64 Decorator d;
65 str->AppendF(format: "%s%s%x%x%s%s", before,
66 in_shadow ? d.ShadowByte(byte) : d.MemoryByte(), byte >> 4,
67 byte & 15, d.Default(), after);
68}
69
70static void PrintZoneForPointer(uptr ptr, uptr zone_ptr,
71 const char *zone_name) {
72 if (zone_ptr) {
73 if (zone_name) {
74 Printf(format: "malloc_zone_from_ptr(%p) = %p, which is %s\n", (void *)ptr,
75 (void *)zone_ptr, zone_name);
76 } else {
77 Printf(format: "malloc_zone_from_ptr(%p) = %p, which doesn't have a name\n",
78 (void *)ptr, (void *)zone_ptr);
79 }
80 } else {
81 Printf(format: "malloc_zone_from_ptr(%p) = 0\n", (void *)ptr);
82 }
83}
84
85// ---------------------- Address Descriptions ------------------- {{{1
86
87bool ParseFrameDescription(const char *frame_descr,
88 InternalMmapVector<StackVarDescr> *vars) {
89 CHECK(frame_descr);
90 const char *p;
91 // This string is created by the compiler and has the following form:
92 // "n alloc_1 alloc_2 ... alloc_n"
93 // where alloc_i looks like "offset size len ObjectName"
94 // or "offset size len ObjectName:line".
95 uptr n_objects = (uptr)internal_simple_strtoll(nptr: frame_descr, endptr: &p, base: 10);
96 if (n_objects == 0)
97 return false;
98
99 for (uptr i = 0; i < n_objects; i++) {
100 uptr beg = (uptr)internal_simple_strtoll(nptr: p, endptr: &p, base: 10);
101 uptr size = (uptr)internal_simple_strtoll(nptr: p, endptr: &p, base: 10);
102 uptr len = (uptr)internal_simple_strtoll(nptr: p, endptr: &p, base: 10);
103 if (beg == 0 || size == 0 || *p != ' ') {
104 return false;
105 }
106 p++;
107 char *colon_pos = internal_strchr(s: p, c: ':');
108 uptr line = 0;
109 uptr name_len = len;
110 if (colon_pos != nullptr && colon_pos < p + len) {
111 name_len = colon_pos - p;
112 line = (uptr)internal_simple_strtoll(nptr: colon_pos + 1, endptr: nullptr, base: 10);
113 }
114 StackVarDescr var = {.beg: beg, .size: size, .name_pos: p, .name_len: name_len, .line: line};
115 vars->push_back(element: var);
116 p += len;
117 }
118
119 return true;
120}
121
122// -------------------- Different kinds of reports ----------------- {{{1
123
124// Use ScopedInErrorReport to run common actions just before and
125// immediately after printing error report.
126class ScopedInErrorReport {
127 public:
128 explicit ScopedInErrorReport(bool fatal = false)
129 : halt_on_error_(fatal || flags()->halt_on_error) {
130 // Deadlock Prevention Between ASan and LSan
131 //
132 // Background:
133 // - The `dl_iterate_phdr` function requires holding libdl's internal lock
134 // (Lock A).
135 // - LSan acquires the ASan thread registry lock (Lock B) *after* calling
136 // `dl_iterate_phdr`.
137 //
138 // Problem Scenario:
139 // When ASan attempts to call `dl_iterate_phdr` while holding Lock B (e.g.,
140 // during error reporting via `ErrorDescription::Print`), a circular lock
141 // dependency may occur:
142 // 1. Thread 1: Holds Lock B → Requests Lock A (via dl_iterate_phdr)
143 // 2. Thread 2: Holds Lock A → Requests Lock B (via LSan operations)
144 //
145 // Solution:
146 // Proactively load all required modules before acquiring Lock B.
147 // This ensures:
148 // 1. Any `dl_iterate_phdr` calls during module loading complete before
149 // locking.
150 // 2. Subsequent error reporting avoids nested lock acquisition patterns.
151 // 3. Eliminates the lock order inversion risk between libdl and ASan's
152 // thread registry.
153#if CAN_SANITIZE_LEAKS && (SANITIZER_LINUX || SANITIZER_NETBSD)
154 Symbolizer::GetOrInit()->GetRefreshedListOfModules();
155#endif
156
157 // Make sure the registry and sanitizer report mutexes are locked while
158 // we're printing an error report.
159 // We can lock them only here to avoid self-deadlock in case of
160 // recursive reports.
161 asanThreadRegistry().Lock();
162 Printf(
163 format: "=================================================================\n");
164 }
165
166 ~ScopedInErrorReport() {
167 if (halt_on_error_ && !__sanitizer_acquire_crash_state()) {
168 asanThreadRegistry().Unlock();
169 return;
170 }
171 ASAN_ON_ERROR();
172 if (current_error_.IsValid()) current_error_.Print();
173
174 // Make sure the current thread is announced.
175 DescribeThread(t: GetCurrentThread());
176 // We may want to grab this lock again when printing stats.
177 asanThreadRegistry().Unlock();
178 // Print memory stats.
179 if (flags()->print_stats)
180 __asan_print_accumulated_stats();
181
182 if (common_flags()->print_cmdline)
183 PrintCmdline();
184
185 if (common_flags()->print_module_map == 2)
186 DumpProcessMap();
187
188 // Copy the message buffer so that we could start logging without holding a
189 // lock that gets acquired during printing.
190 InternalScopedString buffer_copy;
191 {
192 Lock l(&error_message_buf_mutex);
193 error_message_buffer->push_back(element: '\0');
194 buffer_copy.Append(str: error_message_buffer->data());
195 // Clear error_message_buffer so that if we find other errors
196 // we don't re-log this error.
197 error_message_buffer->clear();
198 }
199
200 LogFullErrorReport(buffer: buffer_copy.data());
201
202 if (error_report_callback) {
203 error_report_callback(buffer_copy.data());
204 }
205
206 if (halt_on_error_ && common_flags()->abort_on_error) {
207 // On Android the message is truncated to 512 characters.
208 // FIXME: implement "compact" error format, possibly without, or with
209 // highly compressed stack traces?
210 // FIXME: or just use the summary line as abort message?
211 SetAbortMessage(buffer_copy.data());
212 }
213
214 // In halt_on_error = false mode, reset the current error object (before
215 // unlocking).
216 if (!halt_on_error_)
217 internal_memset(s: &current_error_, c: 0, n: sizeof(current_error_));
218
219 if (halt_on_error_) {
220 Report(format: "ABORTING\n");
221 Die();
222 }
223 }
224
225 void ReportError(const ErrorDescription &description) {
226 // Can only report one error per ScopedInErrorReport.
227 CHECK_EQ(current_error_.kind, kErrorKindInvalid);
228 internal_memcpy(dest: &current_error_, src: &description, n: sizeof(current_error_));
229 }
230
231 static ErrorDescription &CurrentError() {
232 return current_error_;
233 }
234
235 private:
236 ScopedErrorReportLock error_report_lock_;
237 // Error currently being reported. This enables the destructor to interact
238 // with the debugger and point it to an error description.
239 static ErrorDescription current_error_;
240 bool halt_on_error_;
241};
242
243ErrorDescription ScopedInErrorReport::current_error_(LINKER_INITIALIZED);
244
245void ReportDeadlySignal(const SignalContext &sig) {
246 ScopedInErrorReport in_report(/*fatal*/ true);
247 ErrorDeadlySignal error(GetCurrentTidOrInvalid(), sig);
248 in_report.ReportError(description: error);
249}
250
251void ReportDoubleFree(uptr addr, BufferedStackTrace *free_stack) {
252 ScopedInErrorReport in_report;
253 ErrorDoubleFree error(GetCurrentTidOrInvalid(), free_stack, addr);
254 in_report.ReportError(description: error);
255}
256
257void ReportNewDeleteTypeMismatch(uptr addr, uptr delete_size,
258 uptr delete_alignment,
259 BufferedStackTrace *free_stack) {
260 ScopedInErrorReport in_report;
261 ErrorNewDeleteTypeMismatch error(GetCurrentTidOrInvalid(), free_stack, addr,
262 delete_size, delete_alignment);
263 in_report.ReportError(description: error);
264}
265
266void ReportFreeSizeMismatch(uptr addr, uptr delete_size, uptr delete_alignment,
267 BufferedStackTrace* free_stack) {
268 ScopedInErrorReport in_report;
269 ErrorFreeSizeMismatch error(GetCurrentTidOrInvalid(), free_stack, addr,
270 delete_size, delete_alignment);
271 in_report.ReportError(description: error);
272}
273
274void ReportFreeNotMalloced(uptr addr, BufferedStackTrace *free_stack) {
275 ScopedInErrorReport in_report;
276 ErrorFreeNotMalloced error(GetCurrentTidOrInvalid(), free_stack, addr);
277 in_report.ReportError(description: error);
278}
279
280void ReportAllocTypeMismatch(uptr addr, BufferedStackTrace *free_stack,
281 AllocType alloc_type,
282 AllocType dealloc_type) {
283 ScopedInErrorReport in_report;
284 ErrorAllocTypeMismatch error(GetCurrentTidOrInvalid(), free_stack, addr,
285 alloc_type, dealloc_type);
286 in_report.ReportError(description: error);
287}
288
289void ReportMallocUsableSizeNotOwned(uptr addr, BufferedStackTrace *stack) {
290 ScopedInErrorReport in_report;
291 ErrorMallocUsableSizeNotOwned error(GetCurrentTidOrInvalid(), stack, addr);
292 in_report.ReportError(description: error);
293}
294
295void ReportSanitizerGetAllocatedSizeNotOwned(uptr addr,
296 BufferedStackTrace *stack) {
297 ScopedInErrorReport in_report;
298 ErrorSanitizerGetAllocatedSizeNotOwned error(GetCurrentTidOrInvalid(), stack,
299 addr);
300 in_report.ReportError(description: error);
301}
302
303void ReportCallocOverflow(uptr count, uptr size, BufferedStackTrace *stack) {
304 ScopedInErrorReport in_report(/*fatal*/ true);
305 ErrorCallocOverflow error(GetCurrentTidOrInvalid(), stack, count, size);
306 in_report.ReportError(description: error);
307}
308
309void ReportReallocArrayOverflow(uptr count, uptr size,
310 BufferedStackTrace *stack) {
311 ScopedInErrorReport in_report(/*fatal*/ true);
312 ErrorReallocArrayOverflow error(GetCurrentTidOrInvalid(), stack, count, size);
313 in_report.ReportError(description: error);
314}
315
316void ReportPvallocOverflow(uptr size, BufferedStackTrace *stack) {
317 ScopedInErrorReport in_report(/*fatal*/ true);
318 ErrorPvallocOverflow error(GetCurrentTidOrInvalid(), stack, size);
319 in_report.ReportError(description: error);
320}
321
322void ReportInvalidAllocationAlignment(uptr alignment,
323 BufferedStackTrace *stack) {
324 ScopedInErrorReport in_report(/*fatal*/ true);
325 ErrorInvalidAllocationAlignment error(GetCurrentTidOrInvalid(), stack,
326 alignment);
327 in_report.ReportError(description: error);
328}
329
330void ReportInvalidAlignedAllocAlignment(uptr size, uptr alignment,
331 BufferedStackTrace *stack) {
332 ScopedInErrorReport in_report(/*fatal*/ true);
333 ErrorInvalidAlignedAllocAlignment error(GetCurrentTidOrInvalid(), stack,
334 size, alignment);
335 in_report.ReportError(description: error);
336}
337
338void ReportInvalidPosixMemalignAlignment(uptr alignment,
339 BufferedStackTrace *stack) {
340 ScopedInErrorReport in_report(/*fatal*/ true);
341 ErrorInvalidPosixMemalignAlignment error(GetCurrentTidOrInvalid(), stack,
342 alignment);
343 in_report.ReportError(description: error);
344}
345
346void ReportAllocationSizeTooBig(uptr user_size, uptr total_size, uptr max_size,
347 BufferedStackTrace *stack) {
348 ScopedInErrorReport in_report(/*fatal*/ true);
349 ErrorAllocationSizeTooBig error(GetCurrentTidOrInvalid(), stack, user_size,
350 total_size, max_size);
351 in_report.ReportError(description: error);
352}
353
354void ReportRssLimitExceeded(BufferedStackTrace *stack) {
355 ScopedInErrorReport in_report(/*fatal*/ true);
356 ErrorRssLimitExceeded error(GetCurrentTidOrInvalid(), stack);
357 in_report.ReportError(description: error);
358}
359
360void ReportOutOfMemory(uptr requested_size, BufferedStackTrace *stack) {
361 ScopedInErrorReport in_report(/*fatal*/ true);
362 ErrorOutOfMemory error(GetCurrentTidOrInvalid(), stack, requested_size);
363 in_report.ReportError(description: error);
364}
365
366void ReportStringFunctionMemoryRangesOverlap(const char *function,
367 const char *offset1, uptr length1,
368 const char *offset2, uptr length2,
369 BufferedStackTrace *stack) {
370 ScopedInErrorReport in_report;
371 ErrorStringFunctionMemoryRangesOverlap error(
372 GetCurrentTidOrInvalid(), stack, (uptr)offset1, length1, (uptr)offset2,
373 length2, function);
374 in_report.ReportError(description: error);
375}
376
377void ReportStringFunctionSizeOverflow(uptr offset, uptr size,
378 BufferedStackTrace *stack) {
379 ScopedInErrorReport in_report;
380 ErrorStringFunctionSizeOverflow error(GetCurrentTidOrInvalid(), stack, offset,
381 size);
382 in_report.ReportError(description: error);
383}
384
385void ReportBadParamsToAnnotateContiguousContainer(uptr beg, uptr end,
386 uptr old_mid, uptr new_mid,
387 BufferedStackTrace *stack) {
388 ScopedInErrorReport in_report;
389 ErrorBadParamsToAnnotateContiguousContainer error(
390 GetCurrentTidOrInvalid(), stack, beg, end, old_mid, new_mid);
391 in_report.ReportError(description: error);
392}
393
394void ReportBadParamsToAnnotateDoubleEndedContiguousContainer(
395 uptr storage_beg, uptr storage_end, uptr old_container_beg,
396 uptr old_container_end, uptr new_container_beg, uptr new_container_end,
397 BufferedStackTrace *stack) {
398 ScopedInErrorReport in_report;
399 ErrorBadParamsToAnnotateDoubleEndedContiguousContainer error(
400 GetCurrentTidOrInvalid(), stack, storage_beg, storage_end,
401 old_container_beg, old_container_end, new_container_beg,
402 new_container_end);
403 in_report.ReportError(description: error);
404}
405
406void ReportBadParamsToCopyContiguousContainerAnnotations(
407 uptr old_storage_beg, uptr old_storage_end, uptr new_storage_beg,
408 uptr new_storage_end, BufferedStackTrace *stack) {
409 ScopedInErrorReport in_report;
410 ErrorBadParamsToCopyContiguousContainerAnnotations error(
411 GetCurrentTidOrInvalid(), stack, old_storage_beg, old_storage_end,
412 new_storage_beg, new_storage_end);
413 in_report.ReportError(description: error);
414}
415
416void ReportODRViolation(const __asan_global *g1, u32 stack_id1,
417 const __asan_global *g2, u32 stack_id2) {
418 ScopedInErrorReport in_report;
419 ErrorODRViolation error(GetCurrentTidOrInvalid(), g1, stack_id1, g2,
420 stack_id2);
421 in_report.ReportError(description: error);
422}
423
424// ----------------------- CheckForInvalidPointerPair ----------- {{{1
425static NOINLINE void ReportInvalidPointerPair(uptr pc, uptr bp, uptr sp,
426 uptr a1, uptr a2) {
427 ScopedInErrorReport in_report;
428 ErrorInvalidPointerPair error(GetCurrentTidOrInvalid(), pc, bp, sp, a1, a2);
429 in_report.ReportError(description: error);
430}
431
432static bool IsInvalidPointerPair(uptr a1, uptr a2) {
433 if (a1 == a2)
434 return false;
435
436 // 256B in shadow memory can be iterated quite fast
437 static const uptr kMaxOffset = 2048;
438
439 uptr left = a1 < a2 ? a1 : a2;
440 uptr right = a1 < a2 ? a2 : a1;
441 uptr offset = right - left;
442 if (offset <= kMaxOffset)
443 return __asan_region_is_poisoned(beg: left, size: offset);
444
445 AsanThread *t = GetCurrentThread();
446
447 // check whether left is a stack memory pointer
448 if (uptr shadow_offset1 = t->GetStackVariableShadowStart(addr: left)) {
449 uptr shadow_offset2 = t->GetStackVariableShadowStart(addr: right);
450 return shadow_offset2 == 0 || shadow_offset1 != shadow_offset2;
451 }
452
453 // check whether left is a heap memory address
454 HeapAddressDescription hdesc1, hdesc2;
455 if (GetHeapAddressInformation(addr: left, access_size: 0, descr: &hdesc1) &&
456 hdesc1.chunk_access.access_type == kAccessTypeInside)
457 return !GetHeapAddressInformation(addr: right, access_size: 0, descr: &hdesc2) ||
458 hdesc2.chunk_access.access_type != kAccessTypeInside ||
459 hdesc1.chunk_access.chunk_begin != hdesc2.chunk_access.chunk_begin;
460
461 // check whether left is an address of a global variable
462 GlobalAddressDescription gdesc1, gdesc2;
463 if (GetGlobalAddressInformation(addr: left, access_size: 0, descr: &gdesc1))
464 return !GetGlobalAddressInformation(addr: right - 1, access_size: 0, descr: &gdesc2) ||
465 !gdesc1.PointsInsideTheSameVariable(other: gdesc2);
466
467 if (t->GetStackVariableShadowStart(addr: right) ||
468 GetHeapAddressInformation(addr: right, access_size: 0, descr: &hdesc2) ||
469 GetGlobalAddressInformation(addr: right - 1, access_size: 0, descr: &gdesc2))
470 return true;
471
472 // At this point we know nothing about both a1 and a2 addresses.
473 return false;
474}
475
476static inline void CheckForInvalidPointerPair(void *p1, void *p2) {
477 switch (flags()->detect_invalid_pointer_pairs) {
478 case 0:
479 return;
480 case 1:
481 if (p1 == nullptr || p2 == nullptr)
482 return;
483 break;
484 }
485
486 uptr a1 = reinterpret_cast<uptr>(p1);
487 uptr a2 = reinterpret_cast<uptr>(p2);
488
489 if (IsInvalidPointerPair(a1, a2)) {
490 GET_CALLER_PC_BP_SP;
491 ReportInvalidPointerPair(pc, bp, sp, a1, a2);
492 }
493}
494// ----------------------- Mac-specific reports ----------------- {{{1
495
496void ReportMacMzReallocUnknown(uptr addr, uptr zone_ptr, const char *zone_name,
497 BufferedStackTrace *stack) {
498 ScopedInErrorReport in_report;
499 Printf(
500 format: "mz_realloc(%p) -- attempting to realloc unallocated memory.\n"
501 "This is an unrecoverable problem, exiting now.\n",
502 (void *)addr);
503 PrintZoneForPointer(ptr: addr, zone_ptr, zone_name);
504 stack->Print();
505 DescribeAddressIfHeap(addr);
506}
507
508// -------------- SuppressErrorReport -------------- {{{1
509// Avoid error reports duplicating for ASan recover mode.
510static bool SuppressErrorReport(uptr pc) {
511 if (!common_flags()->suppress_equal_pcs) return false;
512 for (unsigned i = 0; i < kAsanBuggyPcPoolSize; i++) {
513 uptr cmp = atomic_load_relaxed(a: &AsanBuggyPcPool[i]);
514 if (cmp == 0 && atomic_compare_exchange_strong(a: &AsanBuggyPcPool[i], cmp: &cmp,
515 xchg: pc, mo: memory_order_relaxed))
516 return false;
517 if (cmp == pc) return true;
518 }
519 Die();
520}
521
522void ReportGenericError(uptr pc, uptr bp, uptr sp, uptr addr, bool is_write,
523 uptr access_size, u32 exp, bool fatal) {
524 if (__asan_test_only_reported_buggy_pointer) {
525 *__asan_test_only_reported_buggy_pointer = addr;
526 return;
527 }
528 if (!fatal && SuppressErrorReport(pc)) return;
529 ENABLE_FRAME_POINTER;
530
531 // Optimization experiments.
532 // The experiments can be used to evaluate potential optimizations that remove
533 // instrumentation (assess false negatives). Instead of completely removing
534 // some instrumentation, compiler can emit special calls into runtime
535 // (e.g. __asan_report_exp_load1 instead of __asan_report_load1) and pass
536 // mask of experiments (exp).
537 // The reaction to a non-zero value of exp is to be defined.
538 (void)exp;
539
540 ScopedInErrorReport in_report(fatal);
541 ErrorGeneric error(GetCurrentTidOrInvalid(), pc, bp, sp, addr, is_write,
542 access_size);
543 in_report.ReportError(description: error);
544}
545
546} // namespace __asan
547
548// --------------------------- Interface --------------------- {{{1
549using namespace __asan;
550
551void __asan_report_error(uptr pc, uptr bp, uptr sp, uptr addr, int is_write,
552 uptr access_size, u32 exp) {
553 ENABLE_FRAME_POINTER;
554 bool fatal = flags()->halt_on_error;
555 ReportGenericError(pc, bp, sp, addr, is_write, access_size, exp, fatal);
556}
557
558void NOINLINE __asan_set_error_report_callback(void (*callback)(const char*)) {
559 Lock l(&error_message_buf_mutex);
560 error_report_callback = callback;
561}
562
563void __asan_describe_address(uptr addr) {
564 // Thread registry must be locked while we're describing an address.
565 asanThreadRegistry().Lock();
566 PrintAddressDescription(addr, access_size: 1, bug_type: "");
567 asanThreadRegistry().Unlock();
568}
569
570int __asan_report_present() {
571 return ScopedInErrorReport::CurrentError().kind != kErrorKindInvalid;
572}
573
574uptr __asan_get_report_pc() {
575 if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric)
576 return ScopedInErrorReport::CurrentError().Generic.pc;
577 return 0;
578}
579
580uptr __asan_get_report_bp() {
581 if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric)
582 return ScopedInErrorReport::CurrentError().Generic.bp;
583 return 0;
584}
585
586uptr __asan_get_report_sp() {
587 if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric)
588 return ScopedInErrorReport::CurrentError().Generic.sp;
589 return 0;
590}
591
592uptr __asan_get_report_address() {
593 ErrorDescription &err = ScopedInErrorReport::CurrentError();
594 if (err.kind == kErrorKindGeneric)
595 return err.Generic.addr_description.Address();
596 else if (err.kind == kErrorKindDoubleFree)
597 return err.DoubleFree.addr_description.addr;
598 return 0;
599}
600
601int __asan_get_report_access_type() {
602 if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric)
603 return ScopedInErrorReport::CurrentError().Generic.is_write;
604 return 0;
605}
606
607uptr __asan_get_report_access_size() {
608 if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric)
609 return ScopedInErrorReport::CurrentError().Generic.access_size;
610 return 0;
611}
612
613const char *__asan_get_report_description() {
614 if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric)
615 return ScopedInErrorReport::CurrentError().Generic.bug_descr;
616 return ScopedInErrorReport::CurrentError().Base.scariness.GetDescription();
617}
618
619extern "C" {
620SANITIZER_INTERFACE_ATTRIBUTE
621void __sanitizer_ptr_sub(void *a, void *b) {
622 CheckForInvalidPointerPair(p1: a, p2: b);
623}
624SANITIZER_INTERFACE_ATTRIBUTE
625void __sanitizer_ptr_cmp(void *a, void *b) {
626 CheckForInvalidPointerPair(p1: a, p2: b);
627}
628} // extern "C"
629
630// Provide default implementation of __asan_on_error that does nothing
631// and may be overridden by user.
632SANITIZER_INTERFACE_WEAK_DEF(void, __asan_on_error, void) {}
633