| 1 | //===-- sanitizer_allocator.cpp -------------------------------------------===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | // This file is shared between AddressSanitizer and ThreadSanitizer |
| 10 | // run-time libraries. |
| 11 | // This allocator is used inside run-times. |
| 12 | //===----------------------------------------------------------------------===// |
| 13 | |
| 14 | #include "sanitizer_allocator.h" |
| 15 | |
| 16 | #include "sanitizer_allocator_checks.h" |
| 17 | #include "sanitizer_allocator_internal.h" |
| 18 | #include "sanitizer_atomic.h" |
| 19 | #include "sanitizer_common.h" |
| 20 | #include "sanitizer_platform.h" |
| 21 | |
| 22 | namespace __sanitizer { |
| 23 | |
| 24 | // Default allocator names. |
| 25 | const char *PrimaryAllocatorName = "SizeClassAllocator" ; |
| 26 | const char *SecondaryAllocatorName = "LargeMmapAllocator" ; |
| 27 | |
| 28 | alignas(64) static char internal_alloc_placeholder[sizeof(InternalAllocator)]; |
| 29 | static atomic_uint8_t internal_allocator_initialized; |
| 30 | static StaticSpinMutex internal_alloc_init_mu; |
| 31 | |
| 32 | static InternalAllocatorCache internal_allocator_cache; |
| 33 | static StaticSpinMutex internal_allocator_cache_mu; |
| 34 | |
| 35 | InternalAllocator *internal_allocator() { |
| 36 | InternalAllocator *internal_allocator_instance = |
| 37 | reinterpret_cast<InternalAllocator *>(&internal_alloc_placeholder); |
| 38 | if (atomic_load(a: &internal_allocator_initialized, mo: memory_order_acquire) == 0) { |
| 39 | SpinMutexLock l(&internal_alloc_init_mu); |
| 40 | if (atomic_load(a: &internal_allocator_initialized, mo: memory_order_relaxed) == |
| 41 | 0) { |
| 42 | internal_allocator_instance->Init(release_to_os_interval_ms: kReleaseToOSIntervalNever); |
| 43 | atomic_store(a: &internal_allocator_initialized, v: 1, mo: memory_order_release); |
| 44 | } |
| 45 | } |
| 46 | return internal_allocator_instance; |
| 47 | } |
| 48 | |
| 49 | static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache, |
| 50 | uptr alignment) { |
| 51 | if (alignment == 0) alignment = 8; |
| 52 | if (cache == 0) { |
| 53 | SpinMutexLock l(&internal_allocator_cache_mu); |
| 54 | return internal_allocator()->Allocate(cache: &internal_allocator_cache, size, |
| 55 | alignment); |
| 56 | } |
| 57 | return internal_allocator()->Allocate(cache, size, alignment); |
| 58 | } |
| 59 | |
| 60 | static void *RawInternalRealloc(void *ptr, uptr size, |
| 61 | InternalAllocatorCache *cache) { |
| 62 | constexpr usize alignment = Max<usize>(a: 8, b: sizeof(void *)); |
| 63 | if (cache == 0) { |
| 64 | SpinMutexLock l(&internal_allocator_cache_mu); |
| 65 | return internal_allocator()->Reallocate(cache: &internal_allocator_cache, p: ptr, |
| 66 | new_size: size, alignment); |
| 67 | } |
| 68 | return internal_allocator()->Reallocate(cache, p: ptr, new_size: size, alignment); |
| 69 | } |
| 70 | |
| 71 | static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) { |
| 72 | if (!cache) { |
| 73 | SpinMutexLock l(&internal_allocator_cache_mu); |
| 74 | return internal_allocator()->Deallocate(cache: &internal_allocator_cache, p: ptr); |
| 75 | } |
| 76 | internal_allocator()->Deallocate(cache, p: ptr); |
| 77 | } |
| 78 | |
| 79 | static void NORETURN ReportInternalAllocatorOutOfMemory(uptr requested_size) { |
| 80 | SetAllocatorOutOfMemory(); |
| 81 | Report(format: "FATAL: %s: internal allocator is out of memory trying to allocate " |
| 82 | "0x%zx bytes\n" , SanitizerToolName, requested_size); |
| 83 | Die(); |
| 84 | } |
| 85 | |
| 86 | void *InternalAlloc(uptr size, InternalAllocatorCache *cache, uptr alignment) { |
| 87 | void *p = RawInternalAlloc(size, cache, alignment); |
| 88 | if (UNLIKELY(!p)) |
| 89 | ReportInternalAllocatorOutOfMemory(requested_size: size); |
| 90 | return p; |
| 91 | } |
| 92 | |
| 93 | void *InternalRealloc(void *addr, uptr size, InternalAllocatorCache *cache) { |
| 94 | void *p = RawInternalRealloc(ptr: addr, size, cache); |
| 95 | if (UNLIKELY(!p)) |
| 96 | ReportInternalAllocatorOutOfMemory(requested_size: size); |
| 97 | return p; |
| 98 | } |
| 99 | |
| 100 | void *InternalReallocArray(void *addr, uptr count, uptr size, |
| 101 | InternalAllocatorCache *cache) { |
| 102 | if (UNLIKELY(CheckForCallocOverflow(count, size))) { |
| 103 | Report( |
| 104 | format: "FATAL: %s: reallocarray parameters overflow: count * size (%zd * %zd) " |
| 105 | "cannot be represented in type size_t\n" , |
| 106 | SanitizerToolName, count, size); |
| 107 | Die(); |
| 108 | } |
| 109 | return InternalRealloc(addr, size: count * size, cache); |
| 110 | } |
| 111 | |
| 112 | void *InternalCalloc(uptr count, uptr size, InternalAllocatorCache *cache) { |
| 113 | if (UNLIKELY(CheckForCallocOverflow(count, size))) { |
| 114 | Report(format: "FATAL: %s: calloc parameters overflow: count * size (%zd * %zd) " |
| 115 | "cannot be represented in type size_t\n" , SanitizerToolName, count, |
| 116 | size); |
| 117 | Die(); |
| 118 | } |
| 119 | void *p = InternalAlloc(size: count * size, cache); |
| 120 | if (LIKELY(p)) |
| 121 | internal_memset(s: p, c: 0, n: count * size); |
| 122 | return p; |
| 123 | } |
| 124 | |
| 125 | void InternalFree(void *addr, InternalAllocatorCache *cache) { |
| 126 | RawInternalFree(ptr: addr, cache); |
| 127 | } |
| 128 | |
| 129 | void InternalAllocatorLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { |
| 130 | internal_allocator_cache_mu.Lock(); |
| 131 | internal_allocator()->ForceLock(); |
| 132 | } |
| 133 | |
| 134 | void InternalAllocatorUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { |
| 135 | internal_allocator()->ForceUnlock(); |
| 136 | internal_allocator_cache_mu.Unlock(); |
| 137 | } |
| 138 | |
| 139 | // LowLevelAllocator |
| 140 | constexpr usize kLowLevelAllocatorDefaultAlignment = |
| 141 | Max<usize>(a: 8, b: sizeof(void *)); |
| 142 | constexpr uptr kMinNumPagesRounded = 16; |
| 143 | constexpr uptr kMinRoundedSize = 65536; |
| 144 | static uptr low_level_alloc_min_alignment = kLowLevelAllocatorDefaultAlignment; |
| 145 | static LowLevelAllocateCallback low_level_alloc_callback; |
| 146 | |
| 147 | static LowLevelAllocator Alloc; |
| 148 | LowLevelAllocator &GetGlobalLowLevelAllocator() { return Alloc; } |
| 149 | |
| 150 | void *LowLevelAllocator::Allocate(uptr size) { |
| 151 | // Align allocation size. |
| 152 | size = RoundUpTo(size, boundary: low_level_alloc_min_alignment); |
| 153 | if (allocated_end_ - allocated_current_ < (sptr)size) { |
| 154 | uptr size_to_allocate = RoundUpTo( |
| 155 | size, boundary: Min(a: GetPageSizeCached() * kMinNumPagesRounded, b: kMinRoundedSize)); |
| 156 | allocated_current_ = (char *)MmapOrDie(size: size_to_allocate, mem_type: __func__); |
| 157 | allocated_end_ = allocated_current_ + size_to_allocate; |
| 158 | if (low_level_alloc_callback) { |
| 159 | low_level_alloc_callback((uptr)allocated_current_, size_to_allocate); |
| 160 | } |
| 161 | } |
| 162 | CHECK(allocated_end_ - allocated_current_ >= (sptr)size); |
| 163 | void *res = allocated_current_; |
| 164 | allocated_current_ += size; |
| 165 | return res; |
| 166 | } |
| 167 | |
| 168 | void SetLowLevelAllocateMinAlignment(uptr alignment) { |
| 169 | CHECK(IsPowerOfTwo(alignment)); |
| 170 | low_level_alloc_min_alignment = Max(a: alignment, b: low_level_alloc_min_alignment); |
| 171 | } |
| 172 | |
| 173 | void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback) { |
| 174 | low_level_alloc_callback = callback; |
| 175 | } |
| 176 | |
| 177 | // Allocator's OOM and other errors handling support. |
| 178 | |
| 179 | static atomic_uint8_t allocator_out_of_memory = {.val_dont_use: 0}; |
| 180 | static atomic_uint8_t allocator_may_return_null = {.val_dont_use: 0}; |
| 181 | |
| 182 | bool IsAllocatorOutOfMemory() { |
| 183 | return atomic_load_relaxed(a: &allocator_out_of_memory); |
| 184 | } |
| 185 | |
| 186 | void SetAllocatorOutOfMemory() { |
| 187 | atomic_store_relaxed(a: &allocator_out_of_memory, v: 1); |
| 188 | } |
| 189 | |
| 190 | bool AllocatorMayReturnNull() { |
| 191 | return atomic_load(a: &allocator_may_return_null, mo: memory_order_relaxed); |
| 192 | } |
| 193 | |
| 194 | void SetAllocatorMayReturnNull(bool may_return_null) { |
| 195 | atomic_store(a: &allocator_may_return_null, v: may_return_null, |
| 196 | mo: memory_order_relaxed); |
| 197 | } |
| 198 | |
| 199 | void PrintHintAllocatorCannotReturnNull() { |
| 200 | Report(format: "HINT: if you don't care about these errors you may set " |
| 201 | "allocator_may_return_null=1\n" ); |
| 202 | } |
| 203 | |
| 204 | static atomic_uint8_t ; |
| 205 | |
| 206 | bool () { |
| 207 | return atomic_load(a: &rss_limit_exceeded, mo: memory_order_relaxed); |
| 208 | } |
| 209 | |
| 210 | void (bool limit_exceeded) { |
| 211 | atomic_store(a: &rss_limit_exceeded, v: limit_exceeded, mo: memory_order_relaxed); |
| 212 | } |
| 213 | |
| 214 | } // namespace __sanitizer |
| 215 | |