1//===-- sanitizer_common.h --------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is shared between run-time libraries of sanitizers.
10//
11// It declares common functions and classes that are used in both runtimes.
12// Implementation of some functions are provided in sanitizer_common, while
13// others must be defined by run-time library itself.
14//===----------------------------------------------------------------------===//
15#ifndef SANITIZER_COMMON_H
16#define SANITIZER_COMMON_H
17
18#include "sanitizer_flags.h"
19#include "sanitizer_internal_defs.h"
20#include "sanitizer_libc.h"
21#include "sanitizer_list.h"
22#include "sanitizer_mutex.h"
23
24#if defined(_MSC_VER) && !defined(__clang__)
25extern "C" void _ReadWriteBarrier();
26#pragma intrinsic(_ReadWriteBarrier)
27#endif
28
29namespace __sanitizer {
30
31struct AddressInfo;
32struct BufferedStackTrace;
33struct SignalContext;
34struct StackTrace;
35struct SymbolizedStack;
36
37// Constants.
38const uptr kWordSize = SANITIZER_WORDSIZE / 8;
39const uptr kWordSizeInBits = 8 * kWordSize;
40
41const uptr kCacheLineSize = SANITIZER_CACHE_LINE_SIZE;
42
43const uptr kMaxPathLength = 4096;
44
45const uptr kMaxThreadStackSize = 1 << 30; // 1Gb
46
47const uptr kErrorMessageBufferSize = 1 << 16;
48
49// Denotes fake PC values that come from JIT/JAVA/etc.
50// For such PC values __tsan_symbolize_external_ex() will be called.
51const u64 kExternalPCBit = 1ULL << 60;
52
53extern const char *SanitizerToolName; // Can be changed by the tool.
54
55extern atomic_uint32_t current_verbosity;
56inline void SetVerbosity(int verbosity) {
57 atomic_store(a: &current_verbosity, v: verbosity, mo: memory_order_relaxed);
58}
59inline int Verbosity() {
60 return atomic_load(a: &current_verbosity, mo: memory_order_relaxed);
61}
62
63#if SANITIZER_ANDROID && !defined(__aarch64__)
64// 32-bit Android only has 4k pages.
65inline uptr GetPageSize() { return 4096; }
66inline uptr GetPageSizeCached() { return 4096; }
67#else
68uptr GetPageSize();
69extern uptr PageSizeCached;
70inline uptr GetPageSizeCached() {
71 if (!PageSizeCached)
72 PageSizeCached = GetPageSize();
73 return PageSizeCached;
74}
75#endif
76
77uptr GetMmapGranularity();
78uptr GetMaxVirtualAddress();
79uptr GetMaxUserVirtualAddress();
80// Threads
81ThreadID GetTid();
82int TgKill(pid_t pid, ThreadID tid, int sig);
83uptr GetThreadSelf();
84void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
85 uptr *stack_bottom);
86void GetThreadStackAndTls(bool main, uptr *stk_begin, uptr *stk_end,
87 uptr *tls_begin, uptr *tls_end);
88
89// Memory management
90void *MmapOrDie(uptr size, const char *mem_type, bool raw_report = false);
91
92inline void *MmapOrDieQuietly(uptr size, const char *mem_type) {
93 return MmapOrDie(size, mem_type, /*raw_report*/ raw_report: true);
94}
95void UnmapOrDie(void *addr, uptr size, bool raw_report = false);
96// Behaves just like MmapOrDie, but tolerates out of memory condition, in that
97// case returns nullptr.
98void *MmapOrDieOnFatalError(uptr size, const char *mem_type);
99bool MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name = nullptr)
100 WARN_UNUSED_RESULT;
101bool MmapFixedSuperNoReserve(uptr fixed_addr, uptr size,
102 const char *name = nullptr) WARN_UNUSED_RESULT;
103void *MmapNoReserveOrDie(uptr size, const char *mem_type);
104void *MmapFixedOrDie(uptr fixed_addr, uptr size, const char *name = nullptr);
105// Behaves just like MmapFixedOrDie, but tolerates out of memory condition, in
106// that case returns nullptr.
107void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size,
108 const char *name = nullptr);
109void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name = nullptr);
110void *MmapNoAccess(uptr size);
111// Map aligned chunk of address space; size and alignment are powers of two.
112// Dies on all but out of memory errors, in the latter case returns nullptr.
113void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
114 const char *mem_type);
115// Disallow access to a memory range. Use MmapFixedNoAccess to allocate an
116// unaccessible memory.
117bool MprotectNoAccess(uptr addr, uptr size);
118bool MprotectReadOnly(uptr addr, uptr size);
119bool MprotectReadWrite(uptr addr, uptr size);
120
121void MprotectMallocZones(void *addr, int prot);
122
123#if SANITIZER_WINDOWS
124// Zero previously mmap'd memory. Currently used only on Windows.
125bool ZeroMmapFixedRegion(uptr fixed_addr, uptr size) WARN_UNUSED_RESULT;
126#endif
127
128#if SANITIZER_LINUX
129// Unmap memory. Currently only used on Linux.
130void UnmapFromTo(uptr from, uptr to);
131#endif
132
133// Maps shadow_size_bytes of shadow memory and returns shadow address. It will
134// be aligned to the mmap granularity * 2^shadow_scale, or to
135// 2^min_shadow_base_alignment if that is larger. The returned address will
136// have max(2^min_shadow_base_alignment, mmap granularity) on the left, and
137// shadow_size_bytes bytes on the right, which on linux is mapped no access.
138// The high_mem_end may be updated if the original shadow size doesn't fit.
139uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
140 uptr min_shadow_base_alignment, uptr &high_mem_end,
141 uptr granularity);
142
143// Let S = max(shadow_size, num_aliases * alias_size, ring_buffer_size).
144// Reserves 2*S bytes of address space to the right of the returned address and
145// ring_buffer_size bytes to the left. The returned address is aligned to 2*S.
146// Also creates num_aliases regions of accessible memory starting at offset S
147// from the returned address. Each region has size alias_size and is backed by
148// the same physical memory.
149uptr MapDynamicShadowAndAliases(uptr shadow_size, uptr alias_size,
150 uptr num_aliases, uptr ring_buffer_size);
151
152// Reserve memory range [beg, end]. If madvise_shadow is true then apply
153// madvise (e.g. hugepages, core dumping) requested by options.
154void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name,
155 bool madvise_shadow = true);
156
157// Protect size bytes of memory starting at addr. Also try to protect
158// several pages at the start of the address space as specified by
159// zero_base_shadow_start, at most up to the size or zero_base_max_shadow_start.
160void ProtectGap(uptr addr, uptr size, uptr zero_base_shadow_start,
161 uptr zero_base_max_shadow_start);
162
163// Find an available address space.
164uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
165 uptr *largest_gap_found, uptr *max_occupied_addr);
166
167// Used to check if we can map shadow memory to a fixed location.
168bool MemoryRangeIsAvailable(uptr range_start, uptr range_end);
169// Releases memory pages entirely within the [beg, end) address range. Noop if
170// the provided range does not contain at least one entire page.
171void ReleaseMemoryPagesToOS(uptr beg, uptr end);
172void IncreaseTotalMmap(uptr size);
173void DecreaseTotalMmap(uptr size);
174uptr GetRSS();
175void SetShadowRegionHugePageMode(uptr addr, uptr length);
176bool DontDumpShadowMemory(uptr addr, uptr length);
177// Check if the built VMA size matches the runtime one.
178void CheckVMASize();
179void RunMallocHooks(void *ptr, uptr size);
180int RunFreeHooks(void *ptr);
181
182class ReservedAddressRange {
183 public:
184 uptr Init(uptr size, const char *name = nullptr, uptr fixed_addr = 0);
185 uptr InitAligned(uptr size, uptr align, const char *name = nullptr);
186 uptr Map(uptr fixed_addr, uptr size, const char *name = nullptr);
187 uptr MapOrDie(uptr fixed_addr, uptr size, const char *name = nullptr);
188 void Unmap(uptr addr, uptr size);
189 void *base() const { return base_; }
190 uptr size() const { return size_; }
191
192 private:
193 void* base_;
194 uptr size_;
195 const char* name_;
196 uptr os_handle_;
197};
198
199typedef void (*fill_profile_f)(uptr start, uptr rss, bool file,
200 /*out*/ uptr *stats);
201
202// Parse the contents of /proc/self/smaps and generate a memory profile.
203// |cb| is a tool-specific callback that fills the |stats| array.
204void GetMemoryProfile(fill_profile_f cb, uptr *stats);
205void ParseUnixMemoryProfile(fill_profile_f cb, uptr *stats, char *smaps,
206 uptr smaps_len);
207
208// Simple low-level (mmap-based) allocator for internal use. Doesn't have
209// constructor, so all instances of LowLevelAllocator should be
210// linker initialized.
211//
212// NOTE: Users should instead use the singleton provided via
213// `GetGlobalLowLevelAllocator()` rather than create a new one. This way, the
214// number of mmap fragments can be reduced and use the same contiguous mmap
215// provided by this singleton.
216class LowLevelAllocator {
217 public:
218 // Requires an external lock.
219 void *Allocate(uptr size);
220
221 private:
222 char *allocated_end_;
223 char *allocated_current_;
224};
225// Set the min alignment of LowLevelAllocator to at least alignment.
226void SetLowLevelAllocateMinAlignment(uptr alignment);
227typedef void (*LowLevelAllocateCallback)(uptr ptr, uptr size);
228// Allows to register tool-specific callbacks for LowLevelAllocator.
229// Passing NULL removes the callback.
230void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback);
231
232LowLevelAllocator &GetGlobalLowLevelAllocator();
233
234// IO
235void CatastrophicErrorWrite(const char *buffer, uptr length);
236void RawWrite(const char *buffer);
237bool ColorizeReports();
238void RemoveANSIEscapeSequencesFromString(char *buffer);
239void Printf(const char *format, ...) FORMAT(1, 2);
240void Report(const char *format, ...) FORMAT(1, 2);
241void SetPrintfAndReportCallback(void (*callback)(const char *));
242#define VReport(level, ...) \
243 do { \
244 if (UNLIKELY((uptr)Verbosity() >= (level))) \
245 Report(__VA_ARGS__); \
246 } while (0)
247#define VPrintf(level, ...) \
248 do { \
249 if (UNLIKELY((uptr)Verbosity() >= (level))) \
250 Printf(__VA_ARGS__); \
251 } while (0)
252
253// Lock sanitizer error reporting and protects against nested errors.
254class ScopedErrorReportLock {
255 public:
256 ScopedErrorReportLock() SANITIZER_ACQUIRE(mutex_) { Lock(); }
257 ~ScopedErrorReportLock() SANITIZER_RELEASE(mutex_) { Unlock(); }
258
259 static void Lock() SANITIZER_ACQUIRE(mutex_);
260 static void Unlock() SANITIZER_RELEASE(mutex_);
261 static void CheckLocked() SANITIZER_CHECK_LOCKED(mutex_);
262
263 private:
264 static atomic_uintptr_t reporting_thread_;
265 static StaticSpinMutex mutex_;
266};
267
268extern uptr stoptheworld_tracer_pid;
269extern uptr stoptheworld_tracer_ppid;
270
271// Returns true if the entire range can be read.
272bool IsAccessibleMemoryRange(uptr beg, uptr size);
273// Attempts to copy `n` bytes from memory range starting at `src` to `dest`.
274// Returns true if the entire range can be read. Returns `false` if any part of
275// the source range cannot be read, in which case the contents of `dest` are
276// undefined.
277bool TryMemCpy(void *dest, const void *src, uptr n);
278// Copies accessible memory, and zero fill inaccessible.
279void MemCpyAccessible(void *dest, const void *src, uptr n);
280
281// Error report formatting.
282const char *StripPathPrefix(const char *filepath,
283 const char *strip_file_prefix);
284// Strip the directories from the module name.
285const char *StripModuleName(const char *module);
286
287// OS
288uptr ReadBinaryName(/*out*/char *buf, uptr buf_len);
289uptr ReadBinaryNameCached(/*out*/char *buf, uptr buf_len);
290uptr ReadBinaryDir(/*out*/ char *buf, uptr buf_len);
291uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len);
292const char *GetProcessName();
293void UpdateProcessName();
294void CacheBinaryName();
295void DisableCoreDumperIfNecessary();
296void DumpProcessMap();
297const char *GetEnv(const char *name);
298bool SetEnv(const char *name, const char *value);
299
300u32 GetUid();
301void ReExec();
302void CheckASLR();
303void CheckMPROTECT();
304char **GetArgv();
305char **GetEnviron();
306void PrintCmdline();
307bool StackSizeIsUnlimited();
308void SetStackSizeLimitInBytes(uptr limit);
309bool AddressSpaceIsUnlimited();
310void SetAddressSpaceUnlimited();
311void AdjustStackSize(void *attr);
312void PlatformPrepareForSandboxing(void *args);
313void SetSandboxingCallback(void (*f)());
314
315void InitializeCoverage(bool enabled, const char *coverage_dir);
316
317void InitTlsSize();
318uptr GetTlsSize();
319
320// Other
321void WaitForDebugger(unsigned seconds, const char *label);
322void SleepForSeconds(unsigned seconds);
323void SleepForMillis(unsigned millis);
324u64 NanoTime();
325u64 MonotonicNanoTime();
326int Atexit(void (*function)(void));
327bool TemplateMatch(const char *templ, const char *str);
328
329// Exit
330void NORETURN Abort();
331void NORETURN Die();
332void NORETURN
333CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2);
334void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,
335 const char *mmap_type, error_t err,
336 bool raw_report = false);
337void NORETURN ReportMunmapFailureAndDie(void *ptr, uptr size, error_t err,
338 bool raw_report = false);
339
340// Returns true if the platform-specific error reported is an OOM error.
341bool ErrorIsOOM(error_t err);
342
343// This reports an error in the form:
344//
345// `ERROR: {{SanitizerToolName}}: out of memory: {{err_msg}}`
346//
347// Downstream tools that read sanitizer output will know that errors starting
348// in this format are specifically OOM errors.
349#define ERROR_OOM(err_msg, ...) \
350 Report("ERROR: %s: out of memory: " err_msg, SanitizerToolName, __VA_ARGS__)
351
352// Specific tools may override behavior of "Die" function to do tool-specific
353// job.
354typedef void (*DieCallbackType)(void);
355
356// It's possible to add several callbacks that would be run when "Die" is
357// called. The callbacks will be run in the opposite order. The tools are
358// strongly recommended to setup all callbacks during initialization, when there
359// is only a single thread.
360bool AddDieCallback(DieCallbackType callback);
361bool RemoveDieCallback(DieCallbackType callback);
362
363void SetUserDieCallback(DieCallbackType callback);
364
365void SetCheckUnwindCallback(void (*callback)());
366
367// Functions related to signal handling.
368typedef void (*SignalHandlerType)(int, void *, void *);
369HandleSignalMode GetHandleSignalMode(int signum);
370void InstallDeadlySignalHandlers(SignalHandlerType handler);
371
372// Signal reporting.
373// Each sanitizer uses slightly different implementation of stack unwinding.
374typedef void (*UnwindSignalStackCallbackType)(const SignalContext &sig,
375 const void *callback_context,
376 BufferedStackTrace *stack);
377// Print deadly signal report and die.
378void HandleDeadlySignal(void *siginfo, void *context, u32 tid,
379 UnwindSignalStackCallbackType unwind,
380 const void *unwind_context);
381
382// Part of HandleDeadlySignal, exposed for asan.
383void StartReportDeadlySignal();
384// Part of HandleDeadlySignal, exposed for asan.
385void ReportDeadlySignal(const SignalContext &sig, u32 tid,
386 UnwindSignalStackCallbackType unwind,
387 const void *unwind_context);
388
389// Alternative signal stack (POSIX-only).
390void SetAlternateSignalStack();
391void UnsetAlternateSignalStack();
392
393bool IsSignalHandlerFromSanitizer(int signum);
394bool SetSignalHandlerFromSanitizer(int signum, bool new_state);
395
396// Construct a one-line string:
397// SUMMARY: SanitizerToolName: error_message
398// and pass it to __sanitizer_report_error_summary.
399// If alt_tool_name is provided, it's used in place of SanitizerToolName.
400void ReportErrorSummary(const char *error_message,
401 const char *alt_tool_name = nullptr);
402// Same as above, but construct error_message as:
403// error_type file:line[:column][ function]
404void ReportErrorSummary(const char *error_type, const AddressInfo &info,
405 const char *alt_tool_name = nullptr);
406// Same as above, but obtains AddressInfo by symbolizing top stack trace frame.
407void ReportErrorSummary(const char *error_type, const StackTrace *trace,
408 const char *alt_tool_name = nullptr);
409// Skips frames which we consider internal and not usefull to the users.
410const SymbolizedStack *SkipInternalFrames(const SymbolizedStack *frames);
411
412void ReportMmapWriteExec(int prot, int mflags);
413
414// Math
415#if SANITIZER_WINDOWS && !defined(__clang__) && !defined(__GNUC__)
416extern "C" {
417unsigned char _BitScanForward(unsigned long *index, unsigned long mask);
418unsigned char _BitScanReverse(unsigned long *index, unsigned long mask);
419#if defined(_WIN64)
420unsigned char _BitScanForward64(unsigned long *index, unsigned __int64 mask);
421unsigned char _BitScanReverse64(unsigned long *index, unsigned __int64 mask);
422#endif
423}
424#endif
425
426inline uptr MostSignificantSetBitIndex(uptr x) {
427 CHECK_NE(x, 0U);
428 unsigned long up;
429#if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
430# ifdef _WIN64
431 up = SANITIZER_WORDSIZE - 1 - __builtin_clzll(x);
432# else
433 up = SANITIZER_WORDSIZE - 1 - __builtin_clzl(x);
434# endif
435#elif defined(_WIN64)
436 _BitScanReverse64(&up, x);
437#else
438 _BitScanReverse(&up, x);
439#endif
440 return up;
441}
442
443inline uptr LeastSignificantSetBitIndex(uptr x) {
444 CHECK_NE(x, 0U);
445 unsigned long up;
446#if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
447# ifdef _WIN64
448 up = __builtin_ctzll(x);
449# else
450 up = __builtin_ctzl(x);
451# endif
452#elif defined(_WIN64)
453 _BitScanForward64(&up, x);
454#else
455 _BitScanForward(&up, x);
456#endif
457 return up;
458}
459
460inline constexpr bool IsPowerOfTwo(uptr x) { return (x & (x - 1)) == 0; }
461
462inline uptr RoundUpToPowerOfTwo(uptr size) {
463 CHECK(size);
464 if (IsPowerOfTwo(x: size)) return size;
465
466 uptr up = MostSignificantSetBitIndex(x: size);
467 CHECK_LT(size, (1ULL << (up + 1)));
468 CHECK_GT(size, (1ULL << up));
469 return 1ULL << (up + 1);
470}
471
472inline constexpr uptr RoundUpTo(uptr size, uptr boundary) {
473 RAW_CHECK(IsPowerOfTwo(boundary));
474 return (size + boundary - 1) & ~(boundary - 1);
475}
476
477inline constexpr uptr RoundDownTo(uptr x, uptr boundary) {
478 return x & ~(boundary - 1);
479}
480
481inline constexpr bool IsAligned(uptr a, uptr alignment) {
482 return (a & (alignment - 1)) == 0;
483}
484
485inline uptr Log2(uptr x) {
486 CHECK(IsPowerOfTwo(x));
487 return LeastSignificantSetBitIndex(x);
488}
489
490inline bool IntervalsAreSeparate(uptr start1, uptr end1, uptr start2,
491 uptr end2) {
492 CHECK_LE(start1, end1);
493 CHECK_LE(start2, end2);
494 return (end1 < start2) || (end2 < start1);
495}
496
497// Don't use std::min, std::max or std::swap, to minimize dependency
498// on libstdc++.
499template <class T>
500constexpr T Min(T a, T b) {
501 return a < b ? a : b;
502}
503template <class T>
504constexpr T Max(T a, T b) {
505 return a > b ? a : b;
506}
507template <class T>
508constexpr T Abs(T a) {
509 return a < 0 ? -a : a;
510}
511template<class T> void Swap(T& a, T& b) {
512 T tmp = a;
513 a = b;
514 b = tmp;
515}
516
517// Char handling
518inline bool IsSpace(int c) {
519 return (c == ' ') || (c == '\n') || (c == '\t') ||
520 (c == '\f') || (c == '\r') || (c == '\v');
521}
522inline bool IsDigit(int c) {
523 return (c >= '0') && (c <= '9');
524}
525inline int ToLower(int c) {
526 return (c >= 'A' && c <= 'Z') ? (c + 'a' - 'A') : c;
527}
528
529// A low-level vector based on mmap. May incur a significant memory overhead for
530// small vectors.
531// WARNING: The current implementation supports only POD types.
532template <typename T, bool raw_report = false>
533class InternalMmapVectorNoCtor {
534 public:
535 using value_type = T;
536 void Initialize(uptr initial_capacity) {
537 capacity_bytes_ = 0;
538 size_ = 0;
539 data_ = 0;
540 reserve(new_size: initial_capacity);
541 }
542 void Destroy() { UnmapOrDie(data_, capacity_bytes_, raw_report); }
543 T &operator[](uptr i) {
544 CHECK_LT(i, size_);
545 return data_[i];
546 }
547 const T &operator[](uptr i) const {
548 CHECK_LT(i, size_);
549 return data_[i];
550 }
551 void push_back(const T &element) {
552 if (UNLIKELY(size_ >= capacity())) {
553 CHECK_EQ(size_, capacity());
554 uptr new_capacity = RoundUpToPowerOfTwo(size: size_ + 1);
555 Realloc(new_capacity);
556 }
557 internal_memcpy(&data_[size_++], &element, sizeof(T));
558 }
559 T &back() {
560 CHECK_GT(size_, 0);
561 return data_[size_ - 1];
562 }
563 void pop_back() {
564 CHECK_GT(size_, 0);
565 size_--;
566 }
567 uptr size() const {
568 return size_;
569 }
570 const T *data() const {
571 return data_;
572 }
573 T *data() {
574 return data_;
575 }
576 uptr capacity() const { return capacity_bytes_ / sizeof(T); }
577 void reserve(uptr new_size) {
578 // Never downsize internal buffer.
579 if (new_size > capacity())
580 Realloc(new_capacity: new_size);
581 }
582 void resize(uptr new_size) {
583 if (new_size > size_) {
584 reserve(new_size);
585 internal_memset(&data_[size_], 0, sizeof(T) * (new_size - size_));
586 }
587 size_ = new_size;
588 }
589
590 void clear() { size_ = 0; }
591 bool empty() const { return size() == 0; }
592
593 const T *begin() const {
594 return data();
595 }
596 T *begin() {
597 return data();
598 }
599 const T *end() const {
600 return data() + size();
601 }
602 T *end() {
603 return data() + size();
604 }
605
606 void swap(InternalMmapVectorNoCtor &other) {
607 Swap(data_, other.data_);
608 Swap(capacity_bytes_, other.capacity_bytes_);
609 Swap(size_, other.size_);
610 }
611
612 private:
613 NOINLINE void Realloc(uptr new_capacity) {
614 CHECK_GT(new_capacity, 0);
615 CHECK_LE(size_, new_capacity);
616 uptr new_capacity_bytes =
617 RoundUpTo(size: new_capacity * sizeof(T), boundary: GetPageSizeCached());
618 T *new_data =
619 (T *)MmapOrDie(size: new_capacity_bytes, mem_type: "InternalMmapVector", raw_report);
620 internal_memcpy(new_data, data_, size_ * sizeof(T));
621 UnmapOrDie(data_, capacity_bytes_, raw_report);
622 data_ = new_data;
623 capacity_bytes_ = new_capacity_bytes;
624 }
625
626 T *data_;
627 uptr capacity_bytes_;
628 uptr size_;
629};
630
631template <typename T>
632bool operator==(const InternalMmapVectorNoCtor<T> &lhs,
633 const InternalMmapVectorNoCtor<T> &rhs) {
634 if (lhs.size() != rhs.size()) return false;
635 return internal_memcmp(lhs.data(), rhs.data(), lhs.size() * sizeof(T)) == 0;
636}
637
638template <typename T>
639bool operator!=(const InternalMmapVectorNoCtor<T> &lhs,
640 const InternalMmapVectorNoCtor<T> &rhs) {
641 return !(lhs == rhs);
642}
643
644template<typename T>
645class InternalMmapVector : public InternalMmapVectorNoCtor<T> {
646 public:
647 InternalMmapVector() { InternalMmapVectorNoCtor<T>::Initialize(0); }
648 explicit InternalMmapVector(uptr cnt) {
649 InternalMmapVectorNoCtor<T>::Initialize(cnt);
650 this->resize(cnt);
651 }
652 ~InternalMmapVector() { InternalMmapVectorNoCtor<T>::Destroy(); }
653 // Disallow copies and moves.
654 InternalMmapVector(const InternalMmapVector &) = delete;
655 InternalMmapVector &operator=(const InternalMmapVector &) = delete;
656 InternalMmapVector(InternalMmapVector &&) = delete;
657 InternalMmapVector &operator=(InternalMmapVector &&) = delete;
658};
659
660class InternalScopedString {
661 public:
662 InternalScopedString() : buffer_(1) { buffer_[0] = '\0'; }
663
664 uptr length() const { return buffer_.size() - 1; }
665 void clear() {
666 buffer_.resize(new_size: 1);
667 buffer_[0] = '\0';
668 }
669 void Append(const char *str);
670 void AppendF(const char *format, ...) FORMAT(2, 3);
671 const char *data() const { return buffer_.data(); }
672 char *data() { return buffer_.data(); }
673
674 private:
675 InternalMmapVector<char> buffer_;
676};
677
678template <class T>
679struct CompareLess {
680 bool operator()(const T &a, const T &b) const { return a < b; }
681};
682
683// HeapSort for arrays and InternalMmapVector.
684template <class T, class Compare = CompareLess<T>>
685void Sort(T *v, uptr size, Compare comp = {}) {
686 if (size < 2)
687 return;
688 // Stage 1: insert elements to the heap.
689 for (uptr i = 1; i < size; i++) {
690 uptr j, p;
691 for (j = i; j > 0; j = p) {
692 p = (j - 1) / 2;
693 if (comp(v[p], v[j]))
694 Swap(v[j], v[p]);
695 else
696 break;
697 }
698 }
699 // Stage 2: swap largest element with the last one,
700 // and sink the new top.
701 for (uptr i = size - 1; i > 0; i--) {
702 Swap(v[0], v[i]);
703 uptr j, max_ind;
704 for (j = 0; j < i; j = max_ind) {
705 uptr left = 2 * j + 1;
706 uptr right = 2 * j + 2;
707 max_ind = j;
708 if (left < i && comp(v[max_ind], v[left]))
709 max_ind = left;
710 if (right < i && comp(v[max_ind], v[right]))
711 max_ind = right;
712 if (max_ind != j)
713 Swap(v[j], v[max_ind]);
714 else
715 break;
716 }
717 }
718}
719
720// Works like std::lower_bound: finds the first element that is not less
721// than the val.
722template <class Container, class T,
723 class Compare = CompareLess<typename Container::value_type>>
724uptr InternalLowerBound(const Container &v, const T &val, Compare comp = {}) {
725 uptr first = 0;
726 uptr last = v.size();
727 while (last > first) {
728 uptr mid = (first + last) / 2;
729 if (comp(v[mid], val))
730 first = mid + 1;
731 else
732 last = mid;
733 }
734 return first;
735}
736
737enum ModuleArch {
738 kModuleArchUnknown,
739 kModuleArchI386,
740 kModuleArchX86_64,
741 kModuleArchX86_64H,
742 kModuleArchARMV6,
743 kModuleArchARMV7,
744 kModuleArchARMV7S,
745 kModuleArchARMV7K,
746 kModuleArchARM64,
747 kModuleArchARM64E,
748 kModuleArchLoongArch64,
749 kModuleArchRISCV64,
750 kModuleArchHexagon
751};
752
753// Sorts and removes duplicates from the container.
754template <class Container,
755 class Compare = CompareLess<typename Container::value_type>>
756void SortAndDedup(Container &v, Compare comp = {}) {
757 Sort(v.data(), v.size(), comp);
758 uptr size = v.size();
759 if (size < 2)
760 return;
761 uptr last = 0;
762 for (uptr i = 1; i < size; ++i) {
763 if (comp(v[last], v[i])) {
764 ++last;
765 if (last != i)
766 v[last] = v[i];
767 } else {
768 CHECK(!comp(v[i], v[last]));
769 }
770 }
771 v.resize(last + 1);
772}
773
774constexpr uptr kDefaultFileMaxSize = FIRST_32_SECOND_64(1 << 26, 1 << 28);
775
776// Opens the file 'file_name" and reads up to 'max_len' bytes.
777// The resulting buffer is mmaped and stored in '*buff'.
778// Returns true if file was successfully opened and read.
779bool ReadFileToVector(const char *file_name,
780 InternalMmapVectorNoCtor<char> *buff,
781 uptr max_len = kDefaultFileMaxSize,
782 error_t *errno_p = nullptr);
783
784// Opens the file 'file_name" and reads up to 'max_len' bytes.
785// This function is less I/O efficient than ReadFileToVector as it may reread
786// file multiple times to avoid mmap during read attempts. It's used to read
787// procmap, so short reads with mmap in between can produce inconsistent result.
788// The resulting buffer is mmaped and stored in '*buff'.
789// The size of the mmaped region is stored in '*buff_size'.
790// The total number of read bytes is stored in '*read_len'.
791// Returns true if file was successfully opened and read.
792bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
793 uptr *read_len, uptr max_len = kDefaultFileMaxSize,
794 error_t *errno_p = nullptr);
795
796int GetModuleAndOffsetForPc(uptr pc, char *module_name, uptr module_name_len,
797 uptr *pc_offset);
798
799// When adding a new architecture, don't forget to also update
800// script/asan_symbolize.py and sanitizer_symbolizer_libcdep.cpp.
801inline const char *ModuleArchToString(ModuleArch arch) {
802 switch (arch) {
803 case kModuleArchUnknown:
804 return "";
805 case kModuleArchI386:
806 return "i386";
807 case kModuleArchX86_64:
808 return "x86_64";
809 case kModuleArchX86_64H:
810 return "x86_64h";
811 case kModuleArchARMV6:
812 return "armv6";
813 case kModuleArchARMV7:
814 return "armv7";
815 case kModuleArchARMV7S:
816 return "armv7s";
817 case kModuleArchARMV7K:
818 return "armv7k";
819 case kModuleArchARM64:
820 return "arm64";
821 case kModuleArchARM64E:
822 return "arm64e";
823 case kModuleArchLoongArch64:
824 return "loongarch64";
825 case kModuleArchRISCV64:
826 return "riscv64";
827 case kModuleArchHexagon:
828 return "hexagon";
829 }
830 CHECK(0 && "Invalid module arch");
831 return "";
832}
833
834#if SANITIZER_APPLE
835const uptr kModuleUUIDSize = 16;
836#else
837const uptr kModuleUUIDSize = 32;
838#endif
839const uptr kMaxSegName = 16;
840
841// Represents a binary loaded into virtual memory (e.g. this can be an
842// executable or a shared object).
843class LoadedModule {
844 public:
845 LoadedModule()
846 : full_name_(nullptr),
847 base_address_(0),
848 max_address_(0),
849 arch_(kModuleArchUnknown),
850 uuid_size_(0),
851 instrumented_(false) {
852 internal_memset(s: uuid_, c: 0, n: kModuleUUIDSize);
853 ranges_.clear();
854 }
855 void set(const char *module_name, uptr base_address);
856 void set(const char *module_name, uptr base_address, ModuleArch arch,
857 u8 uuid[kModuleUUIDSize], bool instrumented);
858 void setUuid(const char *uuid, uptr size);
859 void clear();
860 void addAddressRange(uptr beg, uptr end, bool executable, bool writable,
861 const char *name = nullptr);
862 bool containsAddress(uptr address) const;
863
864 const char *full_name() const { return full_name_; }
865 uptr base_address() const { return base_address_; }
866 uptr max_address() const { return max_address_; }
867 ModuleArch arch() const { return arch_; }
868 const u8 *uuid() const { return uuid_; }
869 uptr uuid_size() const { return uuid_size_; }
870 bool instrumented() const { return instrumented_; }
871
872 struct AddressRange {
873 AddressRange *next;
874 uptr beg;
875 uptr end;
876 bool executable;
877 bool writable;
878 char name[kMaxSegName];
879
880 AddressRange(uptr beg, uptr end, bool executable, bool writable,
881 const char *name)
882 : next(nullptr),
883 beg(beg),
884 end(end),
885 executable(executable),
886 writable(writable) {
887 internal_strncpy(dst: this->name, src: (name ? name : ""), ARRAY_SIZE(this->name));
888 }
889 };
890
891 const IntrusiveList<AddressRange> &ranges() const { return ranges_; }
892
893 private:
894 char *full_name_; // Owned.
895 uptr base_address_;
896 uptr max_address_;
897 ModuleArch arch_;
898 uptr uuid_size_;
899 u8 uuid_[kModuleUUIDSize];
900 bool instrumented_;
901 IntrusiveList<AddressRange> ranges_;
902};
903
904// List of LoadedModules. OS-dependent implementation is responsible for
905// filling this information.
906class ListOfModules {
907 public:
908 ListOfModules() : initialized(false) {}
909 ~ListOfModules() { clear(); }
910 void init();
911 void fallbackInit(); // Uses fallback init if available, otherwise clears
912 const LoadedModule *begin() const { return modules_.begin(); }
913 LoadedModule *begin() { return modules_.begin(); }
914 const LoadedModule *end() const { return modules_.end(); }
915 LoadedModule *end() { return modules_.end(); }
916 uptr size() const { return modules_.size(); }
917 const LoadedModule &operator[](uptr i) const {
918 CHECK_LT(i, modules_.size());
919 return modules_[i];
920 }
921
922 private:
923 void clear() {
924 for (auto &module : modules_) module.clear();
925 modules_.clear();
926 }
927 void clearOrInit() {
928 initialized ? clear() : modules_.Initialize(initial_capacity: kInitialCapacity);
929 initialized = true;
930 }
931
932 InternalMmapVectorNoCtor<LoadedModule> modules_;
933 // We rarely have more than 16K loaded modules.
934 static const uptr kInitialCapacity = 1 << 14;
935 bool initialized;
936};
937
938// Callback type for iterating over a set of memory ranges.
939typedef void (*RangeIteratorCallback)(uptr begin, uptr end, void *arg);
940
941void WriteToSyslog(const char *buffer);
942
943#if defined(SANITIZER_WINDOWS) && defined(_MSC_VER) && !defined(__clang__)
944#define SANITIZER_WIN_TRACE 1
945#else
946#define SANITIZER_WIN_TRACE 0
947#endif
948
949#if SANITIZER_APPLE || SANITIZER_WIN_TRACE
950void LogFullErrorReport(const char *buffer);
951#else
952inline void LogFullErrorReport(const char *buffer) {}
953#endif
954
955#if SANITIZER_LINUX || SANITIZER_APPLE
956void WriteOneLineToSyslog(const char *s);
957void LogMessageOnPrintf(const char *str);
958#else
959inline void WriteOneLineToSyslog(const char *s) {}
960inline void LogMessageOnPrintf(const char *str) {}
961#endif
962
963#if SANITIZER_LINUX || SANITIZER_WIN_TRACE
964// Initialize Android logging. Any writes before this are silently lost.
965void AndroidLogInit();
966void SetAbortMessage(const char *);
967#else
968inline void AndroidLogInit() {}
969// FIXME: MacOS implementation could use CRSetCrashLogMessage.
970inline void SetAbortMessage(const char *) {}
971#endif
972
973inline uptr GetPthreadDestructorIterations() {
974#if SANITIZER_POSIX
975 return 4;
976#else
977// Unused on Windows.
978 return 0;
979#endif
980}
981
982void *internal_start_thread(void *(*func)(void*), void *arg);
983void internal_join_thread(void *th);
984void MaybeStartBackgroudThread();
985
986// Make the compiler think that something is going on there.
987// Use this inside a loop that looks like memset/memcpy/etc to prevent the
988// compiler from recognising it and turning it into an actual call to
989// memset/memcpy/etc.
990static inline void SanitizerBreakOptimization(void *arg) {
991#if defined(_MSC_VER) && !defined(__clang__)
992 _ReadWriteBarrier();
993#else
994 __asm__ __volatile__("" : : "r" (arg) : "memory");
995#endif
996}
997
998struct SignalContext {
999 void *siginfo;
1000 void *context;
1001 uptr addr;
1002 uptr pc;
1003 uptr sp;
1004 uptr bp;
1005 bool is_memory_access;
1006 enum WriteFlag { Unknown, Read, Write } write_flag;
1007
1008 // In some cases the kernel cannot provide the true faulting address; `addr`
1009 // will be zero then. This field allows to distinguish between these cases
1010 // and dereferences of null.
1011 bool is_true_faulting_addr;
1012
1013 // VS2013 doesn't implement unrestricted unions, so we need a trivial default
1014 // constructor
1015 SignalContext() = default;
1016
1017 // Creates signal context in a platform-specific manner.
1018 // SignalContext is going to keep pointers to siginfo and context without
1019 // owning them.
1020 SignalContext(void *siginfo, void *context)
1021 : siginfo(siginfo),
1022 context(context),
1023 addr(GetAddress()),
1024 is_memory_access(IsMemoryAccess()),
1025 write_flag(GetWriteFlag()),
1026 is_true_faulting_addr(IsTrueFaultingAddress()) {
1027 InitPcSpBp();
1028 }
1029
1030 static void DumpAllRegisters(void *context);
1031
1032 // Type of signal e.g. SIGSEGV or EXCEPTION_ACCESS_VIOLATION.
1033 int GetType() const;
1034
1035 // String description of the signal.
1036 const char *Describe() const;
1037
1038 // Returns true if signal is stack overflow.
1039 bool IsStackOverflow() const;
1040
1041 private:
1042 // Platform specific initialization.
1043 void InitPcSpBp();
1044 uptr GetAddress() const;
1045 WriteFlag GetWriteFlag() const;
1046 bool IsMemoryAccess() const;
1047 bool IsTrueFaultingAddress() const;
1048};
1049
1050void InitializePlatformEarly();
1051
1052template <typename Fn>
1053class RunOnDestruction {
1054 public:
1055 explicit RunOnDestruction(Fn fn) : fn_(fn) {}
1056 ~RunOnDestruction() { fn_(); }
1057
1058 private:
1059 Fn fn_;
1060};
1061
1062// A simple scope guard. Usage:
1063// auto cleanup = at_scope_exit([]{ do_cleanup; });
1064template <typename Fn>
1065RunOnDestruction<Fn> at_scope_exit(Fn fn) {
1066 return RunOnDestruction<Fn>(fn);
1067}
1068
1069// Linux on 64-bit s390 had a nasty bug that crashes the whole machine
1070// if a process uses virtual memory over 4TB (as many sanitizers like
1071// to do). This function will abort the process if running on a kernel
1072// that looks vulnerable.
1073#if SANITIZER_LINUX && SANITIZER_S390_64
1074void AvoidCVE_2016_2143();
1075#else
1076inline void AvoidCVE_2016_2143() {}
1077#endif
1078
1079struct StackDepotStats {
1080 uptr n_uniq_ids;
1081 uptr allocated;
1082};
1083
1084// The default value for allocator_release_to_os_interval_ms common flag to
1085// indicate that sanitizer allocator should not attempt to release memory to OS.
1086const s32 kReleaseToOSIntervalNever = -1;
1087
1088void CheckNoDeepBind(const char *filename, int flag);
1089
1090// Returns the requested amount of random data (up to 256 bytes) that can then
1091// be used to seed a PRNG. Defaults to blocking like the underlying syscall.
1092bool GetRandom(void *buffer, uptr length, bool blocking = true);
1093
1094// Returns the number of logical processors on the system.
1095u32 GetNumberOfCPUs();
1096extern u32 NumberOfCPUsCached;
1097inline u32 GetNumberOfCPUsCached() {
1098 if (!NumberOfCPUsCached)
1099 NumberOfCPUsCached = GetNumberOfCPUs();
1100 return NumberOfCPUsCached;
1101}
1102
1103} // namespace __sanitizer
1104
1105inline void *operator new(__sanitizer::usize size,
1106 __sanitizer::LowLevelAllocator &alloc) {
1107 return alloc.Allocate(size);
1108}
1109
1110#endif // SANITIZER_COMMON_H
1111