1//===-- msan_allocator.cpp -------------------------- ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of MemorySanitizer.
10//
11// MemorySanitizer allocator.
12//===----------------------------------------------------------------------===//
13
14#include "msan_allocator.h"
15
16#include "msan.h"
17#include "msan_interface_internal.h"
18#include "msan_origin.h"
19#include "msan_poisoning.h"
20#include "msan_thread.h"
21#include "sanitizer_common/sanitizer_allocator.h"
22#include "sanitizer_common/sanitizer_allocator_checks.h"
23#include "sanitizer_common/sanitizer_allocator_interface.h"
24#include "sanitizer_common/sanitizer_allocator_report.h"
25#include "sanitizer_common/sanitizer_errno.h"
26
27using namespace __msan;
28
29namespace {
30struct Metadata {
31 uptr requested_size;
32};
33
34struct MsanMapUnmapCallback {
35 void OnMap(uptr p, uptr size) const {}
36 void OnMapSecondary(uptr p, uptr size, uptr user_begin,
37 uptr user_size) const {}
38 void OnUnmap(uptr p, uptr size) const {
39 __msan_unpoison(a: (void *)p, size);
40
41 // We are about to unmap a chunk of user memory.
42 // Mark the corresponding shadow memory as not needed.
43 uptr shadow_p = MEM_TO_SHADOW(p);
44 ReleaseMemoryPagesToOS(beg: shadow_p, end: shadow_p + size);
45 if (__msan_get_track_origins()) {
46 uptr origin_p = MEM_TO_ORIGIN(p);
47 ReleaseMemoryPagesToOS(beg: origin_p, end: origin_p + size);
48 }
49 }
50};
51
52// Note: to ensure that the allocator is compatible with the application memory
53// layout (especially with high-entropy ASLR), kSpaceBeg and kSpaceSize must be
54// duplicated as MappingDesc::ALLOCATOR in msan.h.
55#if defined(__mips64)
56const uptr kMaxAllowedMallocSize = 2UL << 30;
57
58struct AP32 {
59 static const uptr kSpaceBeg = SANITIZER_MMAP_BEGIN;
60 static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
61 static const uptr kMetadataSize = sizeof(Metadata);
62 using SizeClassMap = __sanitizer::CompactSizeClassMap;
63 static const uptr kRegionSizeLog = 20;
64 using AddressSpaceView = LocalAddressSpaceView;
65 using MapUnmapCallback = MsanMapUnmapCallback;
66 static const uptr kFlags = 0;
67};
68using PrimaryAllocator = SizeClassAllocator32<AP32>;
69#elif defined(__x86_64__)
70#if SANITIZER_NETBSD || SANITIZER_LINUX
71const uptr kAllocatorSpace = 0x700000000000ULL;
72#else
73const uptr kAllocatorSpace = 0x600000000000ULL;
74#endif
75const uptr kMaxAllowedMallocSize = 1ULL << 40;
76
77struct AP64 { // Allocator64 parameters. Deliberately using a short name.
78 static const uptr kSpaceBeg = kAllocatorSpace;
79 static const uptr kSpaceSize = 0x40000000000; // 4T.
80 static const uptr kMetadataSize = sizeof(Metadata);
81 using SizeClassMap = DefaultSizeClassMap;
82 using MapUnmapCallback = MsanMapUnmapCallback;
83 static const uptr kFlags = 0;
84 using AddressSpaceView = LocalAddressSpaceView;
85};
86
87using PrimaryAllocator = SizeClassAllocator64<AP64>;
88
89#elif defined(__loongarch_lp64)
90const uptr kAllocatorSpace = 0x700000000000ULL;
91const uptr kMaxAllowedMallocSize = 8UL << 30;
92
93struct AP64 { // Allocator64 parameters. Deliberately using a short name.
94 static const uptr kSpaceBeg = kAllocatorSpace;
95 static const uptr kSpaceSize = 0x40000000000; // 4T.
96 static const uptr kMetadataSize = sizeof(Metadata);
97 using SizeClassMap = DefaultSizeClassMap;
98 using MapUnmapCallback = MsanMapUnmapCallback;
99 static const uptr kFlags = 0;
100 using AddressSpaceView = LocalAddressSpaceView;
101};
102
103using PrimaryAllocator = SizeClassAllocator64<AP64>;
104
105#elif defined(__powerpc64__)
106const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G
107
108struct AP64 { // Allocator64 parameters. Deliberately using a short name.
109 static const uptr kSpaceBeg = 0x300000000000;
110 static const uptr kSpaceSize = 0x020000000000; // 2T.
111 static const uptr kMetadataSize = sizeof(Metadata);
112 using SizeClassMap = DefaultSizeClassMap;
113 using MapUnmapCallback = MsanMapUnmapCallback;
114 static const uptr kFlags = 0;
115 using AddressSpaceView = LocalAddressSpaceView;
116};
117
118using PrimaryAllocator = SizeClassAllocator64<AP64>;
119#elif defined(__s390x__)
120const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G
121
122struct AP64 { // Allocator64 parameters. Deliberately using a short name.
123 static const uptr kSpaceBeg = 0x440000000000;
124 static const uptr kSpaceSize = 0x020000000000; // 2T.
125 static const uptr kMetadataSize = sizeof(Metadata);
126 using SizeClassMap = DefaultSizeClassMap;
127 using MapUnmapCallback = MsanMapUnmapCallback;
128 static const uptr kFlags = 0;
129 using AddressSpaceView = LocalAddressSpaceView;
130};
131
132using PrimaryAllocator = SizeClassAllocator64<AP64>;
133#elif SANITIZER_LINUX && defined(__hexagon__)
134const uptr kMaxAllowedMallocSize = 1UL << 30; // 1G
135
136struct AP32 {
137 static const uptr kSpaceBeg = 0x10000000;
138 static const u64 kSpaceSize = 0x10000000; // 256MB
139 static const uptr kMetadataSize = sizeof(Metadata);
140 using SizeClassMap = __sanitizer::CompactSizeClassMap;
141 static const uptr kRegionSizeLog = 20;
142 using AddressSpaceView = LocalAddressSpaceView;
143 using MapUnmapCallback = MsanMapUnmapCallback;
144 static const uptr kFlags = 0;
145};
146using PrimaryAllocator = SizeClassAllocator32<AP32>;
147#elif defined(__aarch64__)
148const uptr kMaxAllowedMallocSize = 8UL << 30;
149
150struct AP64 {
151 static const uptr kSpaceBeg = 0xE00000000000ULL;
152 static const uptr kSpaceSize = 0x40000000000; // 4T.
153 static const uptr kMetadataSize = sizeof(Metadata);
154 using SizeClassMap = DefaultSizeClassMap;
155 using MapUnmapCallback = MsanMapUnmapCallback;
156 static const uptr kFlags = 0;
157 using AddressSpaceView = LocalAddressSpaceView;
158};
159using PrimaryAllocator = SizeClassAllocator64<AP64>;
160#endif
161using Allocator = CombinedAllocator<PrimaryAllocator>;
162using AllocatorCache = Allocator::AllocatorCache;
163} // namespace __msan
164
165static Allocator allocator;
166static AllocatorCache fallback_allocator_cache;
167static StaticSpinMutex fallback_mutex;
168
169static uptr max_malloc_size;
170
171void __msan::MsanAllocatorInit() {
172 SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
173 allocator.Init(release_to_os_interval_ms: common_flags()->allocator_release_to_os_interval_ms);
174 if (common_flags()->max_allocation_size_mb)
175 max_malloc_size = Min(a: common_flags()->max_allocation_size_mb << 20,
176 b: kMaxAllowedMallocSize);
177 else
178 max_malloc_size = kMaxAllowedMallocSize;
179}
180
181void __msan::LockAllocator() { allocator.ForceLock(); }
182
183void __msan::UnlockAllocator() { allocator.ForceUnlock(); }
184
185AllocatorCache *GetAllocatorCache(MsanThreadLocalMallocStorage *ms) {
186 CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator_cache));
187 return reinterpret_cast<AllocatorCache *>(ms->allocator_cache);
188}
189
190void MsanThreadLocalMallocStorage::Init() {
191 allocator.InitCache(cache: GetAllocatorCache(ms: this));
192}
193
194void MsanThreadLocalMallocStorage::CommitBack() {
195 allocator.SwallowCache(cache: GetAllocatorCache(ms: this));
196 allocator.DestroyCache(cache: GetAllocatorCache(ms: this));
197}
198
199static void *MsanAllocate(BufferedStackTrace *stack, uptr size, uptr alignment,
200 bool zero) {
201 if (UNLIKELY(size > max_malloc_size)) {
202 if (AllocatorMayReturnNull()) {
203 Report(format: "WARNING: MemorySanitizer failed to allocate 0x%zx bytes\n", size);
204 return nullptr;
205 }
206 GET_FATAL_STACK_TRACE_IF_EMPTY(stack);
207 ReportAllocationSizeTooBig(user_size: size, max_size: max_malloc_size, stack);
208 }
209 if (UNLIKELY(IsRssLimitExceeded())) {
210 if (AllocatorMayReturnNull())
211 return nullptr;
212 GET_FATAL_STACK_TRACE_IF_EMPTY(stack);
213 ReportRssLimitExceeded(stack);
214 }
215 MsanThread *t = GetCurrentThread();
216 void *allocated;
217 if (t) {
218 AllocatorCache *cache = GetAllocatorCache(ms: &t->malloc_storage());
219 allocated = allocator.Allocate(cache, size, alignment);
220 } else {
221 SpinMutexLock l(&fallback_mutex);
222 AllocatorCache *cache = &fallback_allocator_cache;
223 allocated = allocator.Allocate(cache, size, alignment);
224 }
225 if (UNLIKELY(!allocated)) {
226 SetAllocatorOutOfMemory();
227 if (AllocatorMayReturnNull())
228 return nullptr;
229 GET_FATAL_STACK_TRACE_IF_EMPTY(stack);
230 ReportOutOfMemory(requested_size: size, stack);
231 }
232 auto *meta = reinterpret_cast<Metadata *>(allocator.GetMetaData(p: allocated));
233 meta->requested_size = size;
234 uptr actually_allocated_size = allocator.GetActuallyAllocatedSize(p: allocated);
235 void* padding_start = reinterpret_cast<char*>(allocated) + size;
236 uptr padding_size = actually_allocated_size - size;
237
238 // - With calloc(7,1), we can set the ideal tagging:
239 // bytes 0-6: initialized, origin not set (and irrelevant)
240 // byte 7: uninitialized, origin TAG_ALLOC_PADDING
241 // bytes 8-15: uninitialized, origin TAG_ALLOC_PADDING
242 // - If we have malloc(7) and __msan_get_track_origins() > 1, the 4-byte
243 // origin granularity only allows the slightly suboptimal tagging:
244 // bytes 0-6: uninitialized, origin TAG_ALLOC
245 // byte 7: uninitialized, origin TAG_ALLOC (suboptimal)
246 // bytes 8-15: uninitialized, origin TAG_ALLOC_PADDING
247 // - If we have malloc(7) and __msan_get_track_origins() == 1, we use a
248 // single origin bean to reduce overhead:
249 // bytes 0-6: uninitialized, origin TAG_ALLOC
250 // byte 7: uninitialized, origin TAG_ALLOC (suboptimal)
251 // bytes 8-15: uninitialized, origin TAG_ALLOC (suboptimal)
252 if (__msan_get_track_origins() && flags()->poison_in_malloc &&
253 (zero || (__msan_get_track_origins() > 1))) {
254 stack->tag = STACK_TRACE_TAG_ALLOC_PADDING;
255 Origin o2 = Origin::CreateHeapOrigin(stack);
256 __msan_set_origin(a: padding_start, size: padding_size, origin: o2.raw_id());
257 }
258
259 if (zero) {
260 if (allocator.FromPrimary(p: allocated))
261 __msan_clear_and_unpoison(a: allocated, size);
262 else
263 __msan_unpoison(a: allocated, size); // Mem is already zeroed.
264
265 if (flags()->poison_in_malloc)
266 __msan_poison(a: padding_start, size: padding_size);
267 } else if (flags()->poison_in_malloc) {
268 __msan_poison(a: allocated, size: actually_allocated_size);
269
270 if (__msan_get_track_origins()) {
271 stack->tag = StackTrace::TAG_ALLOC;
272 Origin o = Origin::CreateHeapOrigin(stack);
273 __msan_set_origin(
274 a: allocated,
275 size: __msan_get_track_origins() == 1 ? actually_allocated_size : size,
276 origin: o.raw_id());
277 }
278 }
279
280 UnpoisonParam(n: 2);
281 RunMallocHooks(ptr: allocated, size);
282 return allocated;
283}
284
285void __msan::MsanDeallocate(BufferedStackTrace *stack, void *p) {
286 DCHECK(p);
287 UnpoisonParam(n: 1);
288 RunFreeHooks(ptr: p);
289
290 Metadata *meta = reinterpret_cast<Metadata *>(allocator.GetMetaData(p));
291 uptr size = meta->requested_size;
292 meta->requested_size = 0;
293 // This memory will not be reused by anyone else, so we are free to keep it
294 // poisoned. The secondary allocator will unmap and unpoison by
295 // MsanMapUnmapCallback, no need to poison it here.
296 if (flags()->poison_in_free && allocator.FromPrimary(p)) {
297 __msan_poison(a: p, size);
298 if (__msan_get_track_origins()) {
299 uptr actually_allocated_size = allocator.GetActuallyAllocatedSize(p);
300 stack->tag = StackTrace::TAG_DEALLOC;
301 Origin o = Origin::CreateHeapOrigin(stack);
302 __msan_set_origin(a: p, size: actually_allocated_size, origin: o.raw_id());
303 }
304 }
305 if (MsanThread *t = GetCurrentThread()) {
306 AllocatorCache *cache = GetAllocatorCache(ms: &t->malloc_storage());
307 allocator.Deallocate(cache, p);
308 } else {
309 SpinMutexLock l(&fallback_mutex);
310 AllocatorCache *cache = &fallback_allocator_cache;
311 allocator.Deallocate(cache, p);
312 }
313}
314
315static void *MsanReallocate(BufferedStackTrace *stack, void *old_p,
316 uptr new_size, uptr alignment) {
317 Metadata *meta = reinterpret_cast<Metadata*>(allocator.GetMetaData(p: old_p));
318 uptr old_size = meta->requested_size;
319 uptr actually_allocated_size = allocator.GetActuallyAllocatedSize(p: old_p);
320 if (new_size <= actually_allocated_size) {
321 // We are not reallocating here.
322 meta->requested_size = new_size;
323 if (new_size > old_size) {
324 if (flags()->poison_in_malloc) {
325 stack->tag = StackTrace::TAG_ALLOC;
326 PoisonMemory(dst: (char *)old_p + old_size, size: new_size - old_size, stack);
327 }
328 }
329 return old_p;
330 }
331 uptr memcpy_size = Min(a: new_size, b: old_size);
332 void *new_p = MsanAllocate(stack, size: new_size, alignment, zero: false);
333 if (new_p) {
334 CopyMemory(dst: new_p, src: old_p, size: memcpy_size, stack);
335 MsanDeallocate(stack, p: old_p);
336 }
337 return new_p;
338}
339
340static void *MsanCalloc(BufferedStackTrace *stack, uptr nmemb, uptr size) {
341 if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
342 if (AllocatorMayReturnNull())
343 return nullptr;
344 GET_FATAL_STACK_TRACE_IF_EMPTY(stack);
345 ReportCallocOverflow(count: nmemb, size, stack);
346 }
347 return MsanAllocate(stack, size: nmemb * size, alignment: sizeof(u64), zero: true);
348}
349
350static const void *AllocationBegin(const void *p) {
351 if (!p)
352 return nullptr;
353 void *beg = allocator.GetBlockBegin(p);
354 if (!beg)
355 return nullptr;
356 auto *b = reinterpret_cast<Metadata *>(allocator.GetMetaData(p: beg));
357 if (!b)
358 return nullptr;
359 if (b->requested_size == 0)
360 return nullptr;
361
362 return beg;
363}
364
365static uptr AllocationSizeFast(const void *p) {
366 return reinterpret_cast<Metadata *>(allocator.GetMetaData(p))->requested_size;
367}
368
369static uptr AllocationSize(const void *p) {
370 if (!p)
371 return 0;
372 if (allocator.GetBlockBegin(p) != p)
373 return 0;
374 return AllocationSizeFast(p);
375}
376
377void *__msan::msan_malloc(uptr size, BufferedStackTrace *stack) {
378 return SetErrnoOnNull(MsanAllocate(stack, size, alignment: sizeof(u64), zero: false));
379}
380
381void *__msan::msan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
382 return SetErrnoOnNull(MsanCalloc(stack, nmemb, size));
383}
384
385void *__msan::msan_realloc(void *ptr, uptr size, BufferedStackTrace *stack) {
386 if (!ptr)
387 return SetErrnoOnNull(MsanAllocate(stack, size, alignment: sizeof(u64), zero: false));
388 if (size == 0) {
389 MsanDeallocate(stack, p: ptr);
390 return nullptr;
391 }
392 return SetErrnoOnNull(MsanReallocate(stack, old_p: ptr, new_size: size, alignment: sizeof(u64)));
393}
394
395void *__msan::msan_reallocarray(void *ptr, uptr nmemb, uptr size,
396 BufferedStackTrace *stack) {
397 if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
398 errno = errno_ENOMEM;
399 if (AllocatorMayReturnNull())
400 return nullptr;
401 GET_FATAL_STACK_TRACE_IF_EMPTY(stack);
402 ReportReallocArrayOverflow(count: nmemb, size, stack);
403 }
404 return msan_realloc(ptr, size: nmemb * size, stack);
405}
406
407void *__msan::msan_valloc(uptr size, BufferedStackTrace *stack) {
408 return SetErrnoOnNull(MsanAllocate(stack, size, alignment: GetPageSizeCached(), zero: false));
409}
410
411void *__msan::msan_pvalloc(uptr size, BufferedStackTrace *stack) {
412 uptr PageSize = GetPageSizeCached();
413 if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
414 errno = errno_ENOMEM;
415 if (AllocatorMayReturnNull())
416 return nullptr;
417 GET_FATAL_STACK_TRACE_IF_EMPTY(stack);
418 ReportPvallocOverflow(size, stack);
419 }
420 // pvalloc(0) should allocate one page.
421 size = size ? RoundUpTo(size, boundary: PageSize) : PageSize;
422 return SetErrnoOnNull(MsanAllocate(stack, size, alignment: PageSize, zero: false));
423}
424
425void *__msan::msan_aligned_alloc(uptr alignment, uptr size,
426 BufferedStackTrace *stack) {
427 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
428 errno = errno_EINVAL;
429 if (AllocatorMayReturnNull())
430 return nullptr;
431 GET_FATAL_STACK_TRACE_IF_EMPTY(stack);
432 ReportInvalidAlignedAllocAlignment(size, alignment, stack);
433 }
434 return SetErrnoOnNull(MsanAllocate(stack, size, alignment, zero: false));
435}
436
437void *__msan::msan_memalign(uptr alignment, uptr size,
438 BufferedStackTrace *stack) {
439 if (UNLIKELY(!IsPowerOfTwo(alignment))) {
440 errno = errno_EINVAL;
441 if (AllocatorMayReturnNull())
442 return nullptr;
443 GET_FATAL_STACK_TRACE_IF_EMPTY(stack);
444 ReportInvalidAllocationAlignment(alignment, stack);
445 }
446 return SetErrnoOnNull(MsanAllocate(stack, size, alignment, zero: false));
447}
448
449int __msan::msan_posix_memalign(void **memptr, uptr alignment, uptr size,
450 BufferedStackTrace *stack) {
451 if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
452 if (AllocatorMayReturnNull())
453 return errno_EINVAL;
454 GET_FATAL_STACK_TRACE_IF_EMPTY(stack);
455 ReportInvalidPosixMemalignAlignment(alignment, stack);
456 }
457 void *ptr = MsanAllocate(stack, size, alignment, zero: false);
458 if (UNLIKELY(!ptr))
459 // OOM error is already taken care of by MsanAllocate.
460 return errno_ENOMEM;
461 CHECK(IsAligned((uptr)ptr, alignment));
462 *memptr = ptr;
463 return 0;
464}
465
466extern "C" {
467uptr __sanitizer_get_current_allocated_bytes() {
468 uptr stats[AllocatorStatCount];
469 allocator.GetStats(s: stats);
470 return stats[AllocatorStatAllocated];
471}
472
473uptr __sanitizer_get_heap_size() {
474 uptr stats[AllocatorStatCount];
475 allocator.GetStats(s: stats);
476 return stats[AllocatorStatMapped];
477}
478
479uptr __sanitizer_get_free_bytes() { return 1; }
480
481uptr __sanitizer_get_unmapped_bytes() { return 1; }
482
483uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
484
485int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
486
487const void *__sanitizer_get_allocated_begin(const void *p) {
488 return AllocationBegin(p);
489}
490
491uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }
492
493uptr __sanitizer_get_allocated_size_fast(const void *p) {
494 DCHECK_EQ(p, __sanitizer_get_allocated_begin(p));
495 uptr ret = AllocationSizeFast(p);
496 DCHECK_EQ(ret, __sanitizer_get_allocated_size(p));
497 return ret;
498}
499
500void __sanitizer_purge_allocator() { allocator.ForceReleaseToOS(); }
501}
502