| 1 | //===----------------------------------------------------------------------===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | |
| 9 | #include "fallback_malloc.h" |
| 10 | #include "abort_message.h" |
| 11 | |
| 12 | #include <__thread/support.h> |
| 13 | #ifndef _LIBCXXABI_HAS_NO_THREADS |
| 14 | #if defined(__ELF__) && defined(_LIBCXXABI_LINK_PTHREAD_LIB) |
| 15 | #pragma comment(lib, "pthread") |
| 16 | #endif |
| 17 | #endif |
| 18 | |
| 19 | #include <__memory/aligned_alloc.h> |
| 20 | #include <__assert> |
| 21 | #include <stdlib.h> // for malloc, calloc, free |
| 22 | #include <string.h> // for memset |
| 23 | |
| 24 | // A small, simple heap manager based (loosely) on |
| 25 | // the startup heap manager from FreeBSD, optimized for space. |
| 26 | // |
| 27 | // Manages a fixed-size memory pool, supports malloc and free only. |
| 28 | // No support for realloc. |
| 29 | // |
| 30 | // Allocates chunks in multiples of four bytes, with a four byte header |
| 31 | // for each chunk. The overhead of each chunk is kept low by keeping pointers |
| 32 | // as two byte offsets within the heap, rather than (4 or 8 byte) pointers. |
| 33 | |
| 34 | namespace { |
| 35 | |
| 36 | // When POSIX threads are not available, make the mutex operations a nop |
| 37 | #ifndef _LIBCXXABI_HAS_NO_THREADS |
| 38 | static _LIBCPP_CONSTINIT std::__libcpp_mutex_t heap_mutex = _LIBCPP_MUTEX_INITIALIZER; |
| 39 | #else |
| 40 | static _LIBCPP_CONSTINIT void* heap_mutex = 0; |
| 41 | #endif |
| 42 | |
| 43 | class mutexor { |
| 44 | public: |
| 45 | #ifndef _LIBCXXABI_HAS_NO_THREADS |
| 46 | mutexor(std::__libcpp_mutex_t* m) : mtx_(m) { |
| 47 | std::__libcpp_mutex_lock(m: mtx_); |
| 48 | } |
| 49 | ~mutexor() { std::__libcpp_mutex_unlock(m: mtx_); } |
| 50 | #else |
| 51 | mutexor(void*) {} |
| 52 | ~mutexor() {} |
| 53 | #endif |
| 54 | private: |
| 55 | mutexor(const mutexor& rhs); |
| 56 | mutexor& operator=(const mutexor& rhs); |
| 57 | #ifndef _LIBCXXABI_HAS_NO_THREADS |
| 58 | std::__libcpp_mutex_t* mtx_; |
| 59 | #endif |
| 60 | }; |
| 61 | |
| 62 | static const size_t HEAP_SIZE = 512; |
| 63 | char heap[HEAP_SIZE] __attribute__((aligned)); |
| 64 | |
| 65 | typedef unsigned short heap_offset; |
| 66 | typedef unsigned short heap_size; |
| 67 | |
| 68 | // On both 64 and 32 bit targets heap_node should have the following properties |
| 69 | // Size: 4 |
| 70 | // Alignment: 2 |
| 71 | struct heap_node { |
| 72 | heap_offset next_node; // offset into heap |
| 73 | heap_size len; // size in units of "sizeof(heap_node)" |
| 74 | }; |
| 75 | |
| 76 | // All pointers returned by fallback_malloc must be at least aligned |
| 77 | // as RequiredAligned. Note that RequiredAlignment can be greater than |
| 78 | // alignof(std::max_align_t) on 64 bit systems compiling 32 bit code. |
| 79 | struct FallbackMaxAlignType { |
| 80 | } __attribute__((aligned)); |
| 81 | const size_t RequiredAlignment = alignof(FallbackMaxAlignType); |
| 82 | |
| 83 | static_assert(alignof(FallbackMaxAlignType) % sizeof(heap_node) == 0, |
| 84 | "The required alignment must be evenly divisible by the sizeof(heap_node)" ); |
| 85 | |
| 86 | // The number of heap_node's that can fit in a chunk of memory with the size |
| 87 | // of the RequiredAlignment. On 64 bit targets NodesPerAlignment should be 4. |
| 88 | const size_t NodesPerAlignment = alignof(FallbackMaxAlignType) / sizeof(heap_node); |
| 89 | |
| 90 | static const heap_node* list_end = |
| 91 | (heap_node*)(&heap[HEAP_SIZE]); // one past the end of the heap |
| 92 | static heap_node* freelist = NULL; |
| 93 | |
| 94 | heap_node* node_from_offset(const heap_offset offset) { |
| 95 | return (heap_node*)(heap + (offset * sizeof(heap_node))); |
| 96 | } |
| 97 | |
| 98 | heap_offset offset_from_node(const heap_node* ptr) { |
| 99 | return static_cast<heap_offset>( |
| 100 | static_cast<size_t>(reinterpret_cast<const char*>(ptr) - heap) / |
| 101 | sizeof(heap_node)); |
| 102 | } |
| 103 | |
| 104 | // Return a pointer to the first address, 'A', in `heap` that can actually be |
| 105 | // used to represent a heap_node. 'A' must be aligned so that |
| 106 | // '(A + sizeof(heap_node)) % RequiredAlignment == 0'. On 64 bit systems this |
| 107 | // address should be 12 bytes after the first 16 byte boundary. |
| 108 | heap_node* getFirstAlignedNodeInHeap() { |
| 109 | heap_node* node = (heap_node*)heap; |
| 110 | const size_t alignNBytesAfterBoundary = RequiredAlignment - sizeof(heap_node); |
| 111 | size_t boundaryOffset = reinterpret_cast<size_t>(node) % RequiredAlignment; |
| 112 | size_t requiredOffset = alignNBytesAfterBoundary - boundaryOffset; |
| 113 | size_t NElemOffset = requiredOffset / sizeof(heap_node); |
| 114 | return node + NElemOffset; |
| 115 | } |
| 116 | |
| 117 | void init_heap() { |
| 118 | freelist = getFirstAlignedNodeInHeap(); |
| 119 | freelist->next_node = offset_from_node(ptr: list_end); |
| 120 | freelist->len = static_cast<heap_size>(list_end - freelist); |
| 121 | } |
| 122 | |
| 123 | // How big a chunk we allocate |
| 124 | size_t alloc_size(size_t len) { |
| 125 | return (len + sizeof(heap_node) - 1) / sizeof(heap_node) + 1; |
| 126 | } |
| 127 | |
| 128 | bool is_fallback_ptr(void* ptr) { |
| 129 | return ptr >= heap && ptr < (heap + HEAP_SIZE); |
| 130 | } |
| 131 | |
| 132 | void* fallback_malloc(size_t len) { |
| 133 | heap_node *p, *prev; |
| 134 | const size_t nelems = alloc_size(len); |
| 135 | mutexor mtx(&heap_mutex); |
| 136 | |
| 137 | if (NULL == freelist) |
| 138 | init_heap(); |
| 139 | |
| 140 | // Walk the free list, looking for a "big enough" chunk |
| 141 | for (p = freelist, prev = 0; p && p != list_end; |
| 142 | prev = p, p = node_from_offset(offset: p->next_node)) { |
| 143 | |
| 144 | // Check the invariant that all heap_nodes pointers 'p' are aligned |
| 145 | // so that 'p + 1' has an alignment of at least RequiredAlignment |
| 146 | _LIBCXXABI_ASSERT(reinterpret_cast<size_t>(p + 1) % RequiredAlignment == 0, "" ); |
| 147 | |
| 148 | // Calculate the number of extra padding elements needed in order |
| 149 | // to split 'p' and create a properly aligned heap_node from the tail |
| 150 | // of 'p'. We calculate aligned_nelems such that 'p->len - aligned_nelems' |
| 151 | // will be a multiple of NodesPerAlignment. |
| 152 | size_t aligned_nelems = nelems; |
| 153 | if (p->len > nelems) { |
| 154 | heap_size remaining_len = static_cast<heap_size>(p->len - nelems); |
| 155 | aligned_nelems += remaining_len % NodesPerAlignment; |
| 156 | } |
| 157 | |
| 158 | // chunk is larger and we can create a properly aligned heap_node |
| 159 | // from the tail. In this case we shorten 'p' and return the tail. |
| 160 | if (p->len > aligned_nelems) { |
| 161 | heap_node* q; |
| 162 | p->len = static_cast<heap_size>(p->len - aligned_nelems); |
| 163 | q = p + p->len; |
| 164 | q->next_node = 0; |
| 165 | q->len = static_cast<heap_size>(aligned_nelems); |
| 166 | void* ptr = q + 1; |
| 167 | _LIBCXXABI_ASSERT(reinterpret_cast<size_t>(ptr) % RequiredAlignment == 0, "" ); |
| 168 | return ptr; |
| 169 | } |
| 170 | |
| 171 | // The chunk is the exact size or the chunk is larger but not large |
| 172 | // enough to split due to alignment constraints. |
| 173 | if (p->len >= nelems) { |
| 174 | if (prev == 0) |
| 175 | freelist = node_from_offset(offset: p->next_node); |
| 176 | else |
| 177 | prev->next_node = p->next_node; |
| 178 | p->next_node = 0; |
| 179 | void* ptr = p + 1; |
| 180 | _LIBCXXABI_ASSERT(reinterpret_cast<size_t>(ptr) % RequiredAlignment == 0, "" ); |
| 181 | return ptr; |
| 182 | } |
| 183 | } |
| 184 | return NULL; // couldn't find a spot big enough |
| 185 | } |
| 186 | |
| 187 | // Return the start of the next block |
| 188 | heap_node* after(struct heap_node* p) { return p + p->len; } |
| 189 | |
| 190 | void fallback_free(void* ptr) { |
| 191 | struct heap_node* cp = ((struct heap_node*)ptr) - 1; // retrieve the chunk |
| 192 | struct heap_node *p, *prev; |
| 193 | |
| 194 | mutexor mtx(&heap_mutex); |
| 195 | |
| 196 | #ifdef DEBUG_FALLBACK_MALLOC |
| 197 | std::printf("Freeing item at %d of size %d\n" , offset_from_node(cp), cp->len); |
| 198 | #endif |
| 199 | |
| 200 | for (p = freelist, prev = 0; p && p != list_end; |
| 201 | prev = p, p = node_from_offset(offset: p->next_node)) { |
| 202 | #ifdef DEBUG_FALLBACK_MALLOC |
| 203 | std::printf(" p=%d, cp=%d, after(p)=%d, after(cp)=%d\n" , |
| 204 | offset_from_node(p), offset_from_node(cp), |
| 205 | offset_from_node(after(p)), offset_from_node(after(cp))); |
| 206 | #endif |
| 207 | if (after(p) == cp) { |
| 208 | #ifdef DEBUG_FALLBACK_MALLOC |
| 209 | std::printf(" Appending onto chunk at %d\n" , offset_from_node(p)); |
| 210 | #endif |
| 211 | p->len = static_cast<heap_size>( |
| 212 | p->len + cp->len); // make the free heap_node larger |
| 213 | return; |
| 214 | } else if (after(p: cp) == p) { // there's a free heap_node right after |
| 215 | #ifdef DEBUG_FALLBACK_MALLOC |
| 216 | std::printf(" Appending free chunk at %d\n" , offset_from_node(p)); |
| 217 | #endif |
| 218 | cp->len = static_cast<heap_size>(cp->len + p->len); |
| 219 | if (prev == 0) { |
| 220 | freelist = cp; |
| 221 | cp->next_node = p->next_node; |
| 222 | } else |
| 223 | prev->next_node = offset_from_node(ptr: cp); |
| 224 | return; |
| 225 | } |
| 226 | } |
| 227 | // Nothing to merge with, add it to the start of the free list |
| 228 | #ifdef DEBUG_FALLBACK_MALLOC |
| 229 | std::printf(" Making new free list entry %d\n" , offset_from_node(cp)); |
| 230 | #endif |
| 231 | cp->next_node = offset_from_node(ptr: freelist); |
| 232 | freelist = cp; |
| 233 | } |
| 234 | |
| 235 | #ifdef INSTRUMENT_FALLBACK_MALLOC |
| 236 | size_t print_free_list() { |
| 237 | struct heap_node *p, *prev; |
| 238 | heap_size total_free = 0; |
| 239 | if (NULL == freelist) |
| 240 | init_heap(); |
| 241 | |
| 242 | for (p = freelist, prev = 0; p && p != list_end; |
| 243 | prev = p, p = node_from_offset(p->next_node)) { |
| 244 | std::printf("%sOffset: %d\tsize: %d Next: %d\n" , |
| 245 | (prev == 0 ? "" : " " ), offset_from_node(p), p->len, p->next_node); |
| 246 | total_free += p->len; |
| 247 | } |
| 248 | std::printf("Total Free space: %d\n" , total_free); |
| 249 | return total_free; |
| 250 | } |
| 251 | #endif |
| 252 | } // end unnamed namespace |
| 253 | |
| 254 | namespace __cxxabiv1 { |
| 255 | |
| 256 | struct __attribute__((aligned)) __aligned_type {}; |
| 257 | |
| 258 | void* __aligned_malloc_with_fallback(size_t size) { |
| 259 | #if defined(_WIN32) |
| 260 | if (void* dest = std::__libcpp_aligned_alloc(alignof(__aligned_type), size)) |
| 261 | return dest; |
| 262 | #elif !_LIBCPP_HAS_LIBRARY_ALIGNED_ALLOCATION |
| 263 | if (void* dest = ::malloc(size)) |
| 264 | return dest; |
| 265 | #else |
| 266 | if (size == 0) |
| 267 | size = 1; |
| 268 | if (void* dest = std::__libcpp_aligned_alloc(alignment: __alignof(__aligned_type), size: size)) |
| 269 | return dest; |
| 270 | #endif |
| 271 | return fallback_malloc(len: size); |
| 272 | } |
| 273 | |
| 274 | void* __calloc_with_fallback(size_t count, size_t size) { |
| 275 | void* ptr = ::calloc(nmemb: count, size: size); |
| 276 | if (NULL != ptr) |
| 277 | return ptr; |
| 278 | // if calloc fails, fall back to emergency stash |
| 279 | ptr = fallback_malloc(len: size * count); |
| 280 | if (NULL != ptr) |
| 281 | ::memset(s: ptr, c: 0, n: size * count); |
| 282 | return ptr; |
| 283 | } |
| 284 | |
| 285 | void __aligned_free_with_fallback(void* ptr) { |
| 286 | if (is_fallback_ptr(ptr)) |
| 287 | fallback_free(ptr); |
| 288 | else { |
| 289 | #if !_LIBCPP_HAS_LIBRARY_ALIGNED_ALLOCATION |
| 290 | ::free(ptr); |
| 291 | #else |
| 292 | std::__libcpp_aligned_free(ptr: ptr); |
| 293 | #endif |
| 294 | } |
| 295 | } |
| 296 | |
| 297 | void __free_with_fallback(void* ptr) { |
| 298 | if (is_fallback_ptr(ptr)) |
| 299 | fallback_free(ptr); |
| 300 | else |
| 301 | ::free(ptr: ptr); |
| 302 | } |
| 303 | |
| 304 | } // namespace __cxxabiv1 |
| 305 | |