1//===-- combined.h ----------------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#ifndef SCUDO_COMBINED_H_
10#define SCUDO_COMBINED_H_
11
12#include "allocator_config_wrapper.h"
13#include "atomic_helpers.h"
14#include "chunk.h"
15#include "common.h"
16#include "flags.h"
17#include "flags_parser.h"
18#include "mem_map.h"
19#include "memtag.h"
20#include "mutex.h"
21#include "options.h"
22#include "quarantine.h"
23#include "report.h"
24#include "secondary.h"
25#include "size_class_allocator.h"
26#include "stack_depot.h"
27#include "string_utils.h"
28#include "tracing.h"
29#include "tsd.h"
30
31#include "scudo/interface.h"
32
33#ifdef GWP_ASAN_HOOKS
34#include "gwp_asan/guarded_pool_allocator.h"
35#include "gwp_asan/optional/backtrace.h"
36#include "gwp_asan/optional/segv_handler.h"
37#endif // GWP_ASAN_HOOKS
38
39extern "C" inline void EmptyCallback() {}
40
41#ifdef HAVE_ANDROID_UNSAFE_FRAME_POINTER_CHASE
42// This function is not part of the NDK so it does not appear in any public
43// header files. We only declare/use it when targeting the platform.
44extern "C" size_t android_unsafe_frame_pointer_chase(scudo::uptr *buf,
45 size_t num_entries);
46#endif
47
48namespace scudo {
49
50template <class Config, void (*PostInitCallback)(void) = EmptyCallback>
51class Allocator {
52public:
53 using AllocatorConfig = BaseConfig<Config>;
54 using PrimaryT =
55 typename AllocatorConfig::template PrimaryT<PrimaryConfig<Config>>;
56 using SecondaryT =
57 typename AllocatorConfig::template SecondaryT<SecondaryConfig<Config>>;
58 using SizeClassAllocatorT = typename PrimaryT::SizeClassAllocatorT;
59 typedef Allocator<Config, PostInitCallback> ThisT;
60 typedef typename AllocatorConfig::template TSDRegistryT<ThisT> TSDRegistryT;
61
62 void callPostInitCallback() {
63 pthread_once(once_control: &PostInitNonce, init_routine: PostInitCallback);
64 }
65
66 struct QuarantineCallback {
67 explicit QuarantineCallback(ThisT &Instance,
68 SizeClassAllocatorT &SizeClassAllocator)
69 : Allocator(Instance), SizeClassAllocator(SizeClassAllocator) {}
70
71 // Chunk recycling function, returns a quarantined chunk to the backend,
72 // first making sure it hasn't been tampered with.
73 void recycle(void *Ptr) {
74 Chunk::UnpackedHeader Header;
75 Chunk::loadHeader(Cookie: Allocator.Cookie, Ptr, NewUnpackedHeader: &Header);
76 if (UNLIKELY(Header.State != Chunk::State::Quarantined))
77 reportInvalidChunkState(Action: AllocatorAction::Recycling, Ptr);
78
79 Header.State = Chunk::State::Available;
80 Chunk::storeHeader(Cookie: Allocator.Cookie, Ptr, NewUnpackedHeader: &Header);
81
82 if (allocatorSupportsMemoryTagging<AllocatorConfig>())
83 Ptr = untagPointer(Ptr);
84 void *BlockBegin = Allocator::getBlockBegin(Ptr, Header: &Header);
85 SizeClassAllocator.deallocate(Header.ClassId, BlockBegin);
86 }
87
88 // We take a shortcut when allocating a quarantine batch by working with the
89 // appropriate class ID instead of using Size. The compiler should optimize
90 // the class ID computation and work with the associated cache directly.
91 void *allocate(UNUSED uptr Size) {
92 const uptr QuarantineClassId = SizeClassMap::getClassIdBySize(
93 sizeof(QuarantineBatch) + Chunk::getHeaderSize());
94 void *Ptr = SizeClassAllocator.allocate(QuarantineClassId);
95 // Quarantine batch allocation failure is fatal.
96 if (UNLIKELY(!Ptr))
97 reportOutOfMemory(SizeClassMap::getSizeByClassId(QuarantineClassId));
98
99 Ptr = reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) +
100 Chunk::getHeaderSize());
101 Chunk::UnpackedHeader Header = {};
102 Header.ClassId = QuarantineClassId & Chunk::ClassIdMask;
103 Header.SizeOrUnusedBytes = sizeof(QuarantineBatch);
104 Header.State = Chunk::State::Quarantined;
105 Chunk::storeHeader(Cookie: Allocator.Cookie, Ptr, NewUnpackedHeader: &Header);
106
107 // Reset tag to 0 as this chunk may have been previously used for a tagged
108 // user allocation.
109 if (UNLIKELY(useMemoryTagging<AllocatorConfig>(
110 Allocator.Primary.Options.load())))
111 storeTags(Begin: reinterpret_cast<uptr>(Ptr),
112 End: reinterpret_cast<uptr>(Ptr) + sizeof(QuarantineBatch));
113
114 return Ptr;
115 }
116
117 void deallocate(void *Ptr) {
118 const uptr QuarantineClassId = SizeClassMap::getClassIdBySize(
119 sizeof(QuarantineBatch) + Chunk::getHeaderSize());
120 Chunk::UnpackedHeader Header;
121 Chunk::loadHeader(Cookie: Allocator.Cookie, Ptr, NewUnpackedHeader: &Header);
122
123 if (UNLIKELY(Header.State != Chunk::State::Quarantined))
124 reportInvalidChunkState(Action: AllocatorAction::Deallocating, Ptr);
125 DCHECK_EQ(Header.ClassId, QuarantineClassId);
126 DCHECK_EQ(Header.Offset, 0);
127 DCHECK_EQ(Header.SizeOrUnusedBytes, sizeof(QuarantineBatch));
128
129 Header.State = Chunk::State::Available;
130 Chunk::storeHeader(Cookie: Allocator.Cookie, Ptr, NewUnpackedHeader: &Header);
131 SizeClassAllocator.deallocate(
132 QuarantineClassId,
133 reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) -
134 Chunk::getHeaderSize()));
135 }
136
137 private:
138 ThisT &Allocator;
139 SizeClassAllocatorT &SizeClassAllocator;
140 };
141
142 typedef GlobalQuarantine<QuarantineCallback, void> QuarantineT;
143 typedef typename QuarantineT::CacheT QuarantineCacheT;
144
145 void init() {
146 // Make sure that the page size is initialized if it's not a constant.
147 CHECK_NE(getPageSizeCached(), 0U);
148
149 performSanityChecks();
150
151 // Check if hardware CRC32 is supported in the binary and by the platform,
152 // if so, opt for the CRC32 hardware version of the checksum.
153 if (&computeHardwareCRC32 && hasHardwareCRC32())
154 HashAlgorithm = Checksum::HardwareCRC32;
155
156 if (UNLIKELY(!getRandom(&Cookie, sizeof(Cookie))))
157 Cookie = static_cast<u32>(getMonotonicTime() ^
158 (reinterpret_cast<uptr>(this) >> 4));
159
160 initFlags();
161 reportUnrecognizedFlags();
162
163 // Store some flags locally.
164 if (getFlags()->may_return_null)
165 Primary.Options.set(OptionBit::MayReturnNull);
166 if (getFlags()->zero_contents)
167 Primary.Options.setFillContentsMode(ZeroFill);
168 else if (getFlags()->pattern_fill_contents)
169 Primary.Options.setFillContentsMode(PatternOrZeroFill);
170 if (getFlags()->dealloc_type_mismatch)
171 Primary.Options.set(OptionBit::DeallocTypeMismatch);
172 if (getFlags()->delete_size_mismatch)
173 Primary.Options.set(OptionBit::DeleteSizeMismatch);
174 if (systemSupportsMemoryTagging())
175 Primary.Options.set(OptionBit::UseMemoryTagging);
176
177 QuarantineMaxChunkSize =
178 static_cast<u32>(getFlags()->quarantine_max_chunk_size);
179#if SCUDO_FUCHSIA
180 ZeroOnDeallocMaxSize =
181 static_cast<u32>(getFlags()->zero_on_dealloc_max_size);
182#endif
183
184 Stats.init();
185 // TODO(chiahungduan): Given that we support setting the default value in
186 // the PrimaryConfig and CacheConfig, consider to deprecate the use of
187 // `release_to_os_interval_ms` flag.
188 const s32 ReleaseToOsIntervalMs = getFlags()->release_to_os_interval_ms;
189 Primary.init(ReleaseToOsIntervalMs);
190 Secondary.init(&Stats, ReleaseToOsIntervalMs);
191 if (!AllocatorConfig::getQuarantineDisabled()) {
192 Quarantine.init(
193 static_cast<uptr>(getFlags()->quarantine_size_kb << 10),
194 static_cast<uptr>(getFlags()->thread_local_quarantine_size_kb << 10));
195 }
196 }
197
198 void enableRingBuffer() NO_THREAD_SAFETY_ANALYSIS {
199 AllocationRingBuffer *RB = getRingBuffer();
200 if (RB)
201 RB->Depot->enable();
202 RingBufferInitLock.unlock();
203 }
204
205 void disableRingBuffer() NO_THREAD_SAFETY_ANALYSIS {
206 RingBufferInitLock.lock();
207 AllocationRingBuffer *RB = getRingBuffer();
208 if (RB)
209 RB->Depot->disable();
210 }
211
212 // Initialize the embedded GWP-ASan instance. Requires the main allocator to
213 // be functional, best called from PostInitCallback.
214 void initGwpAsan() {
215#ifdef GWP_ASAN_HOOKS
216 gwp_asan::options::Options Opt;
217 Opt.Enabled = getFlags()->GWP_ASAN_Enabled;
218 Opt.MaxSimultaneousAllocations =
219 getFlags()->GWP_ASAN_MaxSimultaneousAllocations;
220 Opt.SampleRate = getFlags()->GWP_ASAN_SampleRate;
221 Opt.InstallSignalHandlers = getFlags()->GWP_ASAN_InstallSignalHandlers;
222 Opt.Recoverable = getFlags()->GWP_ASAN_Recoverable;
223 // Embedded GWP-ASan is locked through the Scudo atfork handler (via
224 // Allocator::disable calling GWPASan.disable). Disable GWP-ASan's atfork
225 // handler.
226 Opt.InstallForkHandlers = false;
227 Opt.Backtrace = gwp_asan::backtrace::getBacktraceFunction();
228 GuardedAlloc.init(Opts: Opt);
229
230 if (Opt.InstallSignalHandlers)
231 gwp_asan::segv_handler::installSignalHandlers(
232 GPA: &GuardedAlloc, Printf,
233 PrintBacktrace: gwp_asan::backtrace::getPrintBacktraceFunction(),
234 SegvBacktrace: gwp_asan::backtrace::getSegvBacktraceFunction(),
235 Recoverable: Opt.Recoverable);
236
237 GuardedAllocSlotSize =
238 GuardedAlloc.getAllocatorState()->maximumAllocationSize();
239 Stats.add(I: StatFree, V: static_cast<uptr>(Opt.MaxSimultaneousAllocations) *
240 GuardedAllocSlotSize);
241#endif // GWP_ASAN_HOOKS
242 }
243
244#ifdef GWP_ASAN_HOOKS
245 const gwp_asan::AllocationMetadata *getGwpAsanAllocationMetadata() {
246 return GuardedAlloc.getMetadataRegion();
247 }
248
249 const gwp_asan::AllocatorState *getGwpAsanAllocatorState() {
250 return GuardedAlloc.getAllocatorState();
251 }
252#endif // GWP_ASAN_HOOKS
253
254 ALWAYS_INLINE void initThreadMaybe(bool MinimalInit = false) {
255 TSDRegistry.initThreadMaybe(this, MinimalInit);
256 }
257
258 void unmapTestOnly() {
259 unmapRingBuffer();
260 TSDRegistry.unmapTestOnly(this);
261 Primary.unmapTestOnly();
262 Secondary.unmapTestOnly();
263#ifdef GWP_ASAN_HOOKS
264 if (getFlags()->GWP_ASAN_InstallSignalHandlers)
265 gwp_asan::segv_handler::uninstallSignalHandlers();
266 GuardedAlloc.uninitTestOnly();
267#endif // GWP_ASAN_HOOKS
268 }
269
270 TSDRegistryT *getTSDRegistry() { return &TSDRegistry; }
271 QuarantineT *getQuarantine() { return &Quarantine; }
272
273 // The Cache must be provided zero-initialized.
274 void initAllocator(SizeClassAllocatorT *SizeClassAllocator) {
275 SizeClassAllocator->init(&Stats, &Primary);
276 }
277
278 // Release the resources used by a TSD, which involves:
279 // - draining the local quarantine cache to the global quarantine;
280 // - releasing the cached pointers back to the Primary;
281 // - unlinking the local stats from the global ones (destroying the cache does
282 // the last two items).
283 void commitBack(TSD<ThisT> *TSD) {
284 TSD->assertLocked(/*BypassCheck=*/true);
285 if (!AllocatorConfig::getQuarantineDisabled()) {
286 Quarantine.drain(&TSD->getQuarantineCache(),
287 QuarantineCallback(*this, TSD->getSizeClassAllocator()));
288 }
289 TSD->getSizeClassAllocator().destroy(&Stats);
290 }
291
292 void drainCache(TSD<ThisT> *TSD) {
293 TSD->assertLocked(/*BypassCheck=*/true);
294 if (!AllocatorConfig::getQuarantineDisabled()) {
295 Quarantine.drainAndRecycle(
296 &TSD->getQuarantineCache(),
297 QuarantineCallback(*this, TSD->getSizeClassAllocator()));
298 }
299 TSD->getSizeClassAllocator().drain();
300 }
301 void drainCaches() { TSDRegistry.drainCaches(this); }
302
303 ALWAYS_INLINE void *getHeaderTaggedPointer(void *Ptr) {
304 if (!allocatorSupportsMemoryTagging<AllocatorConfig>())
305 return Ptr;
306 auto UntaggedPtr = untagPointer(Ptr);
307 if (UntaggedPtr != Ptr)
308 return UntaggedPtr;
309 // Secondary, or pointer allocated while memory tagging is unsupported or
310 // disabled. The tag mismatch is okay in the latter case because tags will
311 // not be checked.
312 return addHeaderTag(Ptr);
313 }
314
315 ALWAYS_INLINE uptr addHeaderTag(uptr Ptr) {
316 if (!allocatorSupportsMemoryTagging<AllocatorConfig>())
317 return Ptr;
318 return addFixedTag(Ptr, Tag: 2);
319 }
320
321 ALWAYS_INLINE void *addHeaderTag(void *Ptr) {
322 return reinterpret_cast<void *>(addHeaderTag(reinterpret_cast<uptr>(Ptr)));
323 }
324
325 NOINLINE u32 collectStackTrace(UNUSED StackDepot *Depot) {
326#ifdef HAVE_ANDROID_UNSAFE_FRAME_POINTER_CHASE
327 // Discard collectStackTrace() frame and allocator function frame.
328 constexpr uptr DiscardFrames = 2;
329 uptr Stack[MaxTraceSize + DiscardFrames];
330 uptr Size =
331 android_unsafe_frame_pointer_chase(Stack, MaxTraceSize + DiscardFrames);
332 Size = Min<uptr>(Size, MaxTraceSize + DiscardFrames);
333 return Depot->insert(Stack + Min<uptr>(DiscardFrames, Size), Stack + Size);
334#else
335 return 0;
336#endif
337 }
338
339 uptr computeOddEvenMaskForPointerMaybe(const Options &Options, uptr Ptr,
340 uptr ClassId) {
341 if (!Options.get(Opt: OptionBit::UseOddEvenTags))
342 return 0;
343
344 // If a chunk's tag is odd, we want the tags of the surrounding blocks to be
345 // even, and vice versa. Blocks are laid out Size bytes apart, and adding
346 // Size to Ptr will flip the least significant set bit of Size in Ptr, so
347 // that bit will have the pattern 010101... for consecutive blocks, which we
348 // can use to determine which tag mask to use.
349 return 0x5555U << ((Ptr >> SizeClassMap::getSizeLSBByClassId(ClassId)) & 1);
350 }
351
352 NOINLINE void *allocate(uptr Size, Chunk::Origin Origin,
353 uptr Alignment = MinAlignment,
354 bool ZeroContents = false) NO_THREAD_SAFETY_ANALYSIS {
355 initThreadMaybe();
356
357 const Options Options = Primary.Options.load();
358 if (UNLIKELY(Alignment > MaxAlignment)) {
359 if (Options.get(Opt: OptionBit::MayReturnNull))
360 return nullptr;
361 reportAlignmentTooBig(Alignment, MaxAlignment);
362 }
363 if (Alignment < MinAlignment)
364 Alignment = MinAlignment;
365
366#ifdef GWP_ASAN_HOOKS
367 if (UNLIKELY(GuardedAlloc.shouldSample())) {
368 if (void *Ptr = GuardedAlloc.allocate(Size, Alignment)) {
369 Stats.lock();
370 Stats.add(I: StatAllocated, V: GuardedAllocSlotSize);
371 Stats.sub(I: StatFree, V: GuardedAllocSlotSize);
372 Stats.unlock();
373 return Ptr;
374 }
375 }
376#endif // GWP_ASAN_HOOKS
377
378 const FillContentsMode FillContents = ZeroContents ? ZeroFill
379 : TSDRegistry.getDisableMemInit()
380 ? NoFill
381 : Options.getFillContentsMode();
382
383 // If the requested size happens to be 0 (more common than you might think),
384 // allocate MinAlignment bytes on top of the header. Then add the extra
385 // bytes required to fulfill the alignment requirements: we allocate enough
386 // to be sure that there will be an address in the block that will satisfy
387 // the alignment.
388 const uptr NeededSize =
389 roundUp(X: Size, Boundary: MinAlignment) +
390 ((Alignment > MinAlignment) ? Alignment : Chunk::getHeaderSize());
391
392 // Takes care of extravagantly large sizes as well as integer overflows.
393 static_assert(MaxAllowedMallocSize < UINTPTR_MAX - MaxAlignment, "");
394 if (UNLIKELY(Size >= MaxAllowedMallocSize)) {
395 if (Options.get(Opt: OptionBit::MayReturnNull))
396 return nullptr;
397 reportAllocationSizeTooBig(UserSize: Size, TotalSize: NeededSize, MaxSize: MaxAllowedMallocSize);
398 }
399 DCHECK_LE(Size, NeededSize);
400
401 void *Block = nullptr;
402 uptr ClassId = 0;
403 uptr SecondaryBlockEnd = 0;
404 if (LIKELY(PrimaryT::canAllocate(NeededSize))) {
405 ClassId = SizeClassMap::getClassIdBySize(NeededSize);
406 DCHECK_NE(ClassId, 0U);
407 typename TSDRegistryT::ScopedTSD TSD(TSDRegistry);
408 Block = TSD->getSizeClassAllocator().allocate(ClassId);
409 // If the allocation failed, retry in each successively larger class until
410 // it fits. If it fails to fit in the largest class, fallback to the
411 // Secondary.
412 if (UNLIKELY(!Block)) {
413 while (ClassId < SizeClassMap::LargestClassId && !Block)
414 Block = TSD->getSizeClassAllocator().allocate(++ClassId);
415 if (!Block)
416 ClassId = 0;
417 }
418 }
419 if (UNLIKELY(ClassId == 0)) {
420 Block = Secondary.allocate(Options, Size, Alignment, &SecondaryBlockEnd,
421 FillContents);
422 }
423
424 if (UNLIKELY(!Block)) {
425 if (Options.get(Opt: OptionBit::MayReturnNull))
426 return nullptr;
427 printStats();
428 reportOutOfMemory(RequestedSize: NeededSize);
429 }
430
431 const uptr UserPtr = roundUp(
432 X: reinterpret_cast<uptr>(Block) + Chunk::getHeaderSize(), Boundary: Alignment);
433 const uptr SizeOrUnusedBytes =
434 ClassId ? Size : SecondaryBlockEnd - (UserPtr + Size);
435
436 if (LIKELY(!useMemoryTagging<AllocatorConfig>(Options))) {
437 return initChunk(ClassId, Origin, Block, UserPtr, SizeOrUnusedBytes,
438 FillContents);
439 }
440
441 return initChunkWithMemoryTagging(ClassId, Origin, Block, UserPtr, Size,
442 SizeOrUnusedBytes, FillContents);
443 }
444
445 NOINLINE void deallocate(void *Ptr, Chunk::Origin Origin, uptr DeleteSize = 0,
446 UNUSED uptr Alignment = MinAlignment) {
447 if (UNLIKELY(!Ptr))
448 return;
449
450 // For a deallocation, we only ensure minimal initialization, meaning thread
451 // local data will be left uninitialized for now (when using ELF TLS). The
452 // fallback cache will be used instead. This is a workaround for a situation
453 // where the only heap operation performed in a thread would be a free past
454 // the TLS destructors, ending up in initialized thread specific data never
455 // being destroyed properly. Any other heap operation will do a full init.
456 initThreadMaybe(/*MinimalInit=*/MinimalInit: true);
457
458#ifdef GWP_ASAN_HOOKS
459 if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr))) {
460 GuardedAlloc.deallocate(Ptr);
461 Stats.lock();
462 Stats.add(I: StatFree, V: GuardedAllocSlotSize);
463 Stats.sub(I: StatAllocated, V: GuardedAllocSlotSize);
464 Stats.unlock();
465 return;
466 }
467#endif // GWP_ASAN_HOOKS
468
469 if (UNLIKELY(!isAligned(reinterpret_cast<uptr>(Ptr), MinAlignment)))
470 reportMisalignedPointer(Action: AllocatorAction::Deallocating, Ptr);
471
472 void *TaggedPtr = Ptr;
473 Ptr = getHeaderTaggedPointer(Ptr);
474
475 Chunk::UnpackedHeader Header;
476 Chunk::loadHeader(Cookie, Ptr, NewUnpackedHeader: &Header);
477
478 if (UNLIKELY(Header.State != Chunk::State::Allocated))
479 reportInvalidChunkState(Action: AllocatorAction::Deallocating, Ptr);
480
481 const Options Options = Primary.Options.load();
482 if (Options.get(Opt: OptionBit::DeallocTypeMismatch)) {
483 if (UNLIKELY(Header.OriginOrWasZeroed != Origin)) {
484 // With the exception of memalign'd chunks, that can be still be free'd.
485 if (Header.OriginOrWasZeroed != Chunk::Origin::Memalign ||
486 Origin != Chunk::Origin::Malloc)
487 reportDeallocTypeMismatch(Action: AllocatorAction::Deallocating, Ptr,
488 TypeA: Header.OriginOrWasZeroed, TypeB: Origin);
489 }
490 }
491
492 const uptr Size = getSize(Ptr, Header: &Header);
493 if (DeleteSize && Options.get(Opt: OptionBit::DeleteSizeMismatch)) {
494 if (UNLIKELY(DeleteSize != Size))
495 reportDeleteSizeMismatch(Ptr, Size: DeleteSize, ExpectedSize: Size);
496 }
497
498 quarantineOrDeallocateChunk(Options, TaggedPtr, Header: &Header, Size);
499 }
500
501 void *reallocate(void *OldPtr, uptr NewSize, uptr Alignment = MinAlignment) {
502 initThreadMaybe();
503
504 const Options Options = Primary.Options.load();
505 if (UNLIKELY(NewSize >= MaxAllowedMallocSize)) {
506 if (Options.get(Opt: OptionBit::MayReturnNull))
507 return nullptr;
508 reportAllocationSizeTooBig(UserSize: NewSize, TotalSize: 0, MaxSize: MaxAllowedMallocSize);
509 }
510
511 // The following cases are handled by the C wrappers.
512 DCHECK_NE(OldPtr, nullptr);
513 DCHECK_NE(NewSize, 0);
514
515#ifdef GWP_ASAN_HOOKS
516 if (UNLIKELY(GuardedAlloc.pointerIsMine(OldPtr))) {
517 uptr OldSize = GuardedAlloc.getSize(Ptr: OldPtr);
518 void *NewPtr = allocate(Size: NewSize, Origin: Chunk::Origin::Malloc, Alignment);
519 if (NewPtr)
520 memcpy(dest: NewPtr, src: OldPtr, n: (NewSize < OldSize) ? NewSize : OldSize);
521 GuardedAlloc.deallocate(Ptr: OldPtr);
522 Stats.lock();
523 Stats.add(I: StatFree, V: GuardedAllocSlotSize);
524 Stats.sub(I: StatAllocated, V: GuardedAllocSlotSize);
525 Stats.unlock();
526 return NewPtr;
527 }
528#endif // GWP_ASAN_HOOKS
529
530 void *OldTaggedPtr = OldPtr;
531 OldPtr = getHeaderTaggedPointer(Ptr: OldPtr);
532
533 if (UNLIKELY(!isAligned(reinterpret_cast<uptr>(OldPtr), MinAlignment)))
534 reportMisalignedPointer(Action: AllocatorAction::Reallocating, Ptr: OldPtr);
535
536 Chunk::UnpackedHeader Header;
537 Chunk::loadHeader(Cookie, Ptr: OldPtr, NewUnpackedHeader: &Header);
538
539 if (UNLIKELY(Header.State != Chunk::State::Allocated))
540 reportInvalidChunkState(Action: AllocatorAction::Reallocating, Ptr: OldPtr);
541
542 // Pointer has to be allocated with a malloc-type function. Some
543 // applications think that it is OK to realloc a memalign'ed pointer, which
544 // will trigger this check. It really isn't.
545 if (Options.get(Opt: OptionBit::DeallocTypeMismatch)) {
546 if (UNLIKELY(Header.OriginOrWasZeroed != Chunk::Origin::Malloc))
547 reportDeallocTypeMismatch(Action: AllocatorAction::Reallocating, Ptr: OldPtr,
548 TypeA: Header.OriginOrWasZeroed,
549 TypeB: Chunk::Origin::Malloc);
550 }
551
552 void *BlockBegin = getBlockBegin(Ptr: OldTaggedPtr, Header: &Header);
553 uptr BlockEnd;
554 uptr OldSize;
555 const uptr ClassId = Header.ClassId;
556 if (LIKELY(ClassId)) {
557 BlockEnd = reinterpret_cast<uptr>(BlockBegin) +
558 SizeClassMap::getSizeByClassId(ClassId);
559 OldSize = Header.SizeOrUnusedBytes;
560 } else {
561 BlockEnd = SecondaryT::getBlockEnd(BlockBegin);
562 OldSize = BlockEnd - (reinterpret_cast<uptr>(OldTaggedPtr) +
563 Header.SizeOrUnusedBytes);
564 }
565 // If the new chunk still fits in the previously allocated block (with a
566 // reasonable delta), we just keep the old block, and update the chunk
567 // header to reflect the size change.
568 if (reinterpret_cast<uptr>(OldTaggedPtr) + NewSize <= BlockEnd) {
569 if (NewSize > OldSize || (OldSize - NewSize) < getPageSizeCached()) {
570 // If we have reduced the size, set the extra bytes to the fill value
571 // so that we are ready to grow it again in the future.
572 if (NewSize < OldSize) {
573 const FillContentsMode FillContents =
574 TSDRegistry.getDisableMemInit() ? NoFill
575 : Options.getFillContentsMode();
576 if (FillContents != NoFill) {
577 memset(s: reinterpret_cast<char *>(OldTaggedPtr) + NewSize,
578 c: FillContents == ZeroFill ? 0 : PatternFillByte,
579 n: OldSize - NewSize);
580 }
581 }
582
583 Header.SizeOrUnusedBytes =
584 (ClassId ? NewSize
585 : BlockEnd -
586 (reinterpret_cast<uptr>(OldTaggedPtr) + NewSize)) &
587 Chunk::SizeOrUnusedBytesMask;
588 Chunk::storeHeader(Cookie, Ptr: OldPtr, NewUnpackedHeader: &Header);
589 if (UNLIKELY(useMemoryTagging<AllocatorConfig>(Options))) {
590 if (ClassId) {
591 resizeTaggedChunk(OldPtr: reinterpret_cast<uptr>(OldTaggedPtr) + OldSize,
592 NewPtr: reinterpret_cast<uptr>(OldTaggedPtr) + NewSize,
593 NewSize, BlockEnd: untagPointer(Ptr: BlockEnd));
594 storePrimaryAllocationStackMaybe(Options, Ptr: OldPtr);
595 } else {
596 storeSecondaryAllocationStackMaybe(Options, Ptr: OldPtr, Size: NewSize);
597 }
598 }
599 return OldTaggedPtr;
600 }
601 }
602
603 // Otherwise we allocate a new one, and deallocate the old one. Some
604 // allocators will allocate an even larger chunk (by a fixed factor) to
605 // allow for potential further in-place realloc. The gains of such a trick
606 // are currently unclear.
607 void *NewPtr = allocate(Size: NewSize, Origin: Chunk::Origin::Malloc, Alignment);
608 if (LIKELY(NewPtr)) {
609 memcpy(dest: NewPtr, src: OldTaggedPtr, n: Min(A: NewSize, B: OldSize));
610 quarantineOrDeallocateChunk(Options, TaggedPtr: OldTaggedPtr, Header: &Header, Size: OldSize);
611 }
612 return NewPtr;
613 }
614
615 // TODO(kostyak): disable() is currently best-effort. There are some small
616 // windows of time when an allocation could still succeed after
617 // this function finishes. We will revisit that later.
618 void disable() NO_THREAD_SAFETY_ANALYSIS {
619 initThreadMaybe();
620#ifdef GWP_ASAN_HOOKS
621 GuardedAlloc.disable();
622#endif
623 TSDRegistry.disable();
624 Stats.disable();
625 if (!AllocatorConfig::getQuarantineDisabled())
626 Quarantine.disable();
627 Primary.disable();
628 Secondary.disable();
629 disableRingBuffer();
630 }
631
632 void enable() NO_THREAD_SAFETY_ANALYSIS {
633 initThreadMaybe();
634 enableRingBuffer();
635 Secondary.enable();
636 Primary.enable();
637 if (!AllocatorConfig::getQuarantineDisabled())
638 Quarantine.enable();
639 Stats.enable();
640 TSDRegistry.enable();
641#ifdef GWP_ASAN_HOOKS
642 GuardedAlloc.enable();
643#endif
644 }
645
646 // The function returns the amount of bytes required to store the statistics,
647 // which might be larger than the amount of bytes provided. Note that the
648 // statistics buffer is not necessarily constant between calls to this
649 // function. This can be called with a null buffer or zero size for buffer
650 // sizing purposes.
651 uptr getStats(char *Buffer, uptr Size) {
652 ScopedString Str;
653 const uptr Length = getStats(&Str) + 1;
654 if (Length < Size)
655 Size = Length;
656 if (Buffer && Size) {
657 memcpy(dest: Buffer, src: Str.data(), n: Size);
658 Buffer[Size - 1] = '\0';
659 }
660 return Length;
661 }
662
663 void printStats() {
664 ScopedString Str;
665 getStats(&Str);
666 Str.output();
667 }
668
669 void printFragmentationInfo() {
670 ScopedString Str;
671 Primary.getFragmentationInfo(&Str);
672 // Secondary allocator dumps the fragmentation data in getStats().
673 Str.output();
674 }
675
676 void releaseToOS(ReleaseToOS ReleaseType) {
677 initThreadMaybe();
678 SCUDO_SCOPED_TRACE(GetReleaseToOSTraceName(ReleaseType));
679 if (ReleaseType == ReleaseToOS::ForceAll)
680 drainCaches();
681 Primary.releaseToOS(ReleaseType);
682 Secondary.releaseToOS(ReleaseType);
683 }
684
685 // Iterate over all chunks and call a callback for all busy chunks located
686 // within the provided memory range. Said callback must not use this allocator
687 // or a deadlock can ensue. This fits Android's malloc_iterate() needs.
688 void iterateOverChunks(uptr Base, uptr Size, iterate_callback Callback,
689 void *Arg) {
690 initThreadMaybe();
691 if (archSupportsMemoryTagging())
692 Base = untagPointer(Ptr: Base);
693 const uptr From = Base;
694 const uptr To = Base + Size;
695 const Options Options = Primary.Options.load();
696 bool MayHaveTaggedPrimary = useMemoryTagging<AllocatorConfig>(Options);
697 auto Lambda = [this, From, To, MayHaveTaggedPrimary, Callback,
698 Arg](uptr Block) {
699 if (Block < From || Block >= To)
700 return;
701 uptr Chunk;
702 Chunk::UnpackedHeader Header;
703 if (UNLIKELY(MayHaveTaggedPrimary)) {
704 // A chunk header can either have a zero tag (tagged primary) or the
705 // header tag (secondary, or untagged primary). We don't know which so
706 // try both.
707 ScopedDisableMemoryTagChecks x;
708 if (!getChunkFromBlock(Block, Chunk: &Chunk, Header: &Header) &&
709 !getChunkFromBlock(Block: addHeaderTag(Block), Chunk: &Chunk, Header: &Header))
710 return;
711 } else if (!getChunkFromBlock(Block: addHeaderTag(Block), Chunk: &Chunk, Header: &Header)) {
712 return;
713 }
714
715 if (Header.State != Chunk::State::Allocated)
716 return;
717
718 uptr TaggedChunk = Chunk;
719 if (allocatorSupportsMemoryTagging<AllocatorConfig>())
720 TaggedChunk = untagPointer(Ptr: TaggedChunk);
721 uptr Size;
722 if (UNLIKELY(useMemoryTagging<AllocatorConfig>(Primary.Options.load()))) {
723 TaggedChunk = loadTag(Ptr: Chunk);
724 Size = getSize(Ptr: reinterpret_cast<void *>(Chunk), Header: &Header);
725 } else if (AllocatorConfig::getExactUsableSize()) {
726 Size = getSize(Ptr: reinterpret_cast<void *>(Chunk), Header: &Header);
727 } else {
728 Size = getUsableSize(reinterpret_cast<void *>(Chunk), &Header);
729 }
730 Callback(TaggedChunk, Size, Arg);
731 };
732 Primary.iterateOverBlocks(Lambda);
733 Secondary.iterateOverBlocks(Lambda);
734#ifdef GWP_ASAN_HOOKS
735 GuardedAlloc.iterate(Base: reinterpret_cast<void *>(Base), Size, Cb: Callback, Arg);
736#endif
737 }
738
739 bool canReturnNull() {
740 initThreadMaybe();
741 return Primary.Options.load().get(OptionBit::MayReturnNull);
742 }
743
744 bool setOption(Option O, sptr Value) {
745 initThreadMaybe();
746 if (O == Option::MemtagTuning) {
747 // Enabling odd/even tags involves a tradeoff between use-after-free
748 // detection and buffer overflow detection. Odd/even tags make it more
749 // likely for buffer overflows to be detected by increasing the size of
750 // the guaranteed "red zone" around the allocation, but on the other hand
751 // use-after-free is less likely to be detected because the tag space for
752 // any particular chunk is cut in half. Therefore we use this tuning
753 // setting to control whether odd/even tags are enabled.
754 if (Value == M_MEMTAG_TUNING_BUFFER_OVERFLOW)
755 Primary.Options.set(OptionBit::UseOddEvenTags);
756 else if (Value == M_MEMTAG_TUNING_UAF)
757 Primary.Options.clear(OptionBit::UseOddEvenTags);
758 return true;
759 } else {
760 // We leave it to the various sub-components to decide whether or not they
761 // want to handle the option, but we do not want to short-circuit
762 // execution if one of the setOption was to return false.
763 const bool PrimaryResult = Primary.setOption(O, Value);
764 const bool SecondaryResult = Secondary.setOption(O, Value);
765 const bool RegistryResult = TSDRegistry.setOption(O, Value);
766 return PrimaryResult && SecondaryResult && RegistryResult;
767 }
768 return false;
769 }
770
771 ALWAYS_INLINE uptr getUsableSize(const void *Ptr,
772 Chunk::UnpackedHeader *Header) {
773 void *BlockBegin = getBlockBegin(Ptr, Header);
774 if (LIKELY(Header->ClassId)) {
775 return SizeClassMap::getSizeByClassId(Header->ClassId) -
776 (reinterpret_cast<uptr>(Ptr) - reinterpret_cast<uptr>(BlockBegin));
777 }
778
779 uptr UntaggedPtr = reinterpret_cast<uptr>(Ptr);
780 if (allocatorSupportsMemoryTagging<AllocatorConfig>()) {
781 UntaggedPtr = untagPointer(Ptr: UntaggedPtr);
782 BlockBegin = untagPointer(Ptr: BlockBegin);
783 }
784 return SecondaryT::getBlockEnd(BlockBegin) - UntaggedPtr;
785 }
786
787 // Return the usable size for a given chunk. If MTE is enabled or if the
788 // ExactUsableSize config parameter is true, we report the exact size of
789 // the original allocation size. Otherwise, we will return the total
790 // actual usable size.
791 uptr getUsableSize(const void *Ptr) {
792 if (UNLIKELY(!Ptr))
793 return 0;
794
795 if (AllocatorConfig::getExactUsableSize() ||
796 UNLIKELY(useMemoryTagging<AllocatorConfig>(Primary.Options.load())))
797 return getAllocSize(Ptr);
798
799 initThreadMaybe();
800
801#ifdef GWP_ASAN_HOOKS
802 if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr)))
803 return GuardedAlloc.getSize(Ptr);
804#endif // GWP_ASAN_HOOKS
805
806 Ptr = getHeaderTaggedPointer(Ptr: const_cast<void *>(Ptr));
807 Chunk::UnpackedHeader Header;
808 Chunk::loadHeader(Cookie, Ptr, NewUnpackedHeader: &Header);
809
810 // Getting the alloc size of a chunk only makes sense if it's allocated.
811 if (UNLIKELY(Header.State != Chunk::State::Allocated))
812 reportInvalidChunkState(Action: AllocatorAction::Sizing, Ptr);
813
814 return getUsableSize(Ptr, &Header);
815 }
816
817 uptr getAllocSize(const void *Ptr) {
818 initThreadMaybe();
819
820#ifdef GWP_ASAN_HOOKS
821 if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr)))
822 return GuardedAlloc.getSize(Ptr);
823#endif // GWP_ASAN_HOOKS
824
825 Ptr = getHeaderTaggedPointer(Ptr: const_cast<void *>(Ptr));
826 Chunk::UnpackedHeader Header;
827 Chunk::loadHeader(Cookie, Ptr, NewUnpackedHeader: &Header);
828
829 // Getting the alloc size of a chunk only makes sense if it's allocated.
830 if (UNLIKELY(Header.State != Chunk::State::Allocated))
831 reportInvalidChunkState(Action: AllocatorAction::Sizing, Ptr);
832
833 return getSize(Ptr, Header: &Header);
834 }
835
836 void getStats(StatCounters S) {
837 initThreadMaybe();
838 Stats.get(S);
839 }
840
841 // Returns true if the pointer provided was allocated by the current
842 // allocator instance, which is compliant with tcmalloc's ownership concept.
843 // A corrupted chunk will not be reported as owned, which is WAI.
844 bool isOwned(const void *Ptr) {
845 initThreadMaybe();
846 // If the allocation is not owned, the tags could be wrong.
847 ScopedDisableMemoryTagChecks x(
848 useMemoryTagging<AllocatorConfig>(Primary.Options.load()));
849#ifdef GWP_ASAN_HOOKS
850 if (GuardedAlloc.pointerIsMine(Ptr))
851 return true;
852#endif // GWP_ASAN_HOOKS
853 if (!Ptr || !isAligned(X: reinterpret_cast<uptr>(Ptr), Alignment: MinAlignment))
854 return false;
855 Ptr = getHeaderTaggedPointer(Ptr: const_cast<void *>(Ptr));
856 Chunk::UnpackedHeader Header;
857 return Chunk::isValid(Cookie, Ptr, NewUnpackedHeader: &Header) &&
858 Header.State == Chunk::State::Allocated;
859 }
860
861 bool useMemoryTaggingTestOnly() const {
862 return useMemoryTagging<AllocatorConfig>(Primary.Options.load());
863 }
864 void disableMemoryTagging() {
865 // If we haven't been initialized yet, we need to initialize now in order to
866 // prevent a future call to initThreadMaybe() from enabling memory tagging
867 // based on feature detection. But don't call initThreadMaybe() because it
868 // may end up calling the allocator (via pthread_atfork, via the post-init
869 // callback), which may cause mappings to be created with memory tagging
870 // enabled.
871 TSDRegistry.initOnceMaybe(this);
872 if (allocatorSupportsMemoryTagging<AllocatorConfig>()) {
873 Secondary.disableMemoryTagging();
874 Primary.Options.clear(OptionBit::UseMemoryTagging);
875 }
876 }
877
878 void setTrackAllocationStacks(bool Track) {
879 initThreadMaybe();
880 if (getFlags()->allocation_ring_buffer_size <= 0) {
881 DCHECK(!Primary.Options.load().get(OptionBit::TrackAllocationStacks));
882 return;
883 }
884
885 if (Track) {
886 initRingBufferMaybe();
887 Primary.Options.set(OptionBit::TrackAllocationStacks);
888 } else
889 Primary.Options.clear(OptionBit::TrackAllocationStacks);
890 }
891
892 void setFillContents(FillContentsMode FillContents) {
893 initThreadMaybe();
894 Primary.Options.setFillContentsMode(FillContents);
895 }
896
897 void setAddLargeAllocationSlack(bool AddSlack) {
898 initThreadMaybe();
899 if (AddSlack)
900 Primary.Options.set(OptionBit::AddLargeAllocationSlack);
901 else
902 Primary.Options.clear(OptionBit::AddLargeAllocationSlack);
903 }
904
905 const char *getStackDepotAddress() {
906 initThreadMaybe();
907 AllocationRingBuffer *RB = getRingBuffer();
908 return RB ? reinterpret_cast<char *>(RB->Depot) : nullptr;
909 }
910
911 uptr getStackDepotSize() {
912 initThreadMaybe();
913 AllocationRingBuffer *RB = getRingBuffer();
914 return RB ? RB->StackDepotSize : 0;
915 }
916
917 const char *getRegionInfoArrayAddress() const {
918 return Primary.getRegionInfoArrayAddress();
919 }
920
921 static uptr getRegionInfoArraySize() {
922 return PrimaryT::getRegionInfoArraySize();
923 }
924
925 const char *getRingBufferAddress() {
926 initThreadMaybe();
927 return reinterpret_cast<char *>(getRingBuffer());
928 }
929
930 uptr getRingBufferSize() {
931 initThreadMaybe();
932 AllocationRingBuffer *RB = getRingBuffer();
933 return RB && RB->RingBufferElements
934 ? ringBufferSizeInBytes(RingBufferElements: RB->RingBufferElements)
935 : 0;
936 }
937
938 static const uptr MaxTraceSize = 64;
939
940 static void collectTraceMaybe(const StackDepot *Depot,
941 uintptr_t (&Trace)[MaxTraceSize], u32 Hash) {
942 uptr RingPos, Size;
943 if (!Depot->find(Hash, RingPosPtr: &RingPos, SizePtr: &Size))
944 return;
945 for (unsigned I = 0; I != Size && I != MaxTraceSize; ++I)
946 Trace[I] = static_cast<uintptr_t>(Depot->at(RingPos: RingPos + I));
947 }
948
949 static void getErrorInfo(struct scudo_error_info *ErrorInfo,
950 uintptr_t FaultAddr, const char *DepotPtr,
951 size_t DepotSize, const char *RegionInfoPtr,
952 const char *RingBufferPtr, size_t RingBufferSize,
953 const char *Memory, const char *MemoryTags,
954 uintptr_t MemoryAddr, size_t MemorySize) {
955 // N.B. we need to support corrupted data in any of the buffers here. We get
956 // this information from an external process (the crashing process) that
957 // should not be able to crash the crash dumper (crash_dump on Android).
958 // See also the get_error_info_fuzzer.
959 *ErrorInfo = {};
960 if (!allocatorSupportsMemoryTagging<AllocatorConfig>() ||
961 MemoryAddr + MemorySize < MemoryAddr)
962 return;
963
964 const StackDepot *Depot = nullptr;
965 if (DepotPtr) {
966 // check for corrupted StackDepot. First we need to check whether we can
967 // read the metadata, then whether the metadata matches the size.
968 if (DepotSize < sizeof(*Depot))
969 return;
970 Depot = reinterpret_cast<const StackDepot *>(DepotPtr);
971 if (!Depot->isValid(BufSize: DepotSize))
972 return;
973 }
974
975 size_t NextErrorReport = 0;
976
977 // Check for OOB in the current block and the two surrounding blocks. Beyond
978 // that, UAF is more likely.
979 if (extractTag(Ptr: FaultAddr) != 0)
980 getInlineErrorInfo(ErrorInfo, NextErrorReport, FaultAddr, Depot,
981 RegionInfoPtr, Memory, MemoryTags, MemoryAddr,
982 MemorySize, MinDistance: 0, MaxDistance: 2);
983
984 // Check the ring buffer. For primary allocations this will only find UAF;
985 // for secondary allocations we can find either UAF or OOB.
986 getRingBufferErrorInfo(ErrorInfo, NextErrorReport, FaultAddr, Depot,
987 RingBufferPtr, RingBufferSize);
988
989 // Check for OOB in the 28 blocks surrounding the 3 we checked earlier.
990 // Beyond that we are likely to hit false positives.
991 if (extractTag(Ptr: FaultAddr) != 0)
992 getInlineErrorInfo(ErrorInfo, NextErrorReport, FaultAddr, Depot,
993 RegionInfoPtr, Memory, MemoryTags, MemoryAddr,
994 MemorySize, MinDistance: 2, MaxDistance: 16);
995 }
996
997 uptr getBlockBeginTestOnly(const void *Ptr) {
998 Chunk::UnpackedHeader Header;
999 Chunk::loadHeader(Cookie, Ptr, NewUnpackedHeader: &Header);
1000 DCHECK(Header.State == Chunk::State::Allocated);
1001
1002 if (allocatorSupportsMemoryTagging<AllocatorConfig>())
1003 Ptr = untagPointer(Ptr: const_cast<void *>(Ptr));
1004 void *Begin = getBlockBegin(Ptr, Header: &Header);
1005 if (allocatorSupportsMemoryTagging<AllocatorConfig>())
1006 Begin = untagPointer(Ptr: Begin);
1007 return reinterpret_cast<uptr>(Begin);
1008 }
1009
1010private:
1011 typedef typename PrimaryT::SizeClassMap SizeClassMap;
1012
1013 static const uptr MinAlignmentLog = SCUDO_MIN_ALIGNMENT_LOG;
1014 static const uptr MaxAlignmentLog = 24U; // 16 MB seems reasonable.
1015 static const uptr MinAlignment = 1UL << MinAlignmentLog;
1016 static const uptr MaxAlignment = 1UL << MaxAlignmentLog;
1017 static const uptr MaxAllowedMallocSize =
1018 FIRST_32_SECOND_64(1UL << 31, 1ULL << 40);
1019
1020 static_assert(MinAlignment >= sizeof(Chunk::PackedHeader),
1021 "Minimal alignment must at least cover a chunk header.");
1022 static_assert(!allocatorSupportsMemoryTagging<AllocatorConfig>() ||
1023 MinAlignment >= archMemoryTagGranuleSize(),
1024 "");
1025
1026 static const u32 BlockMarker = 0x44554353U;
1027
1028 // These are indexes into an "array" of 32-bit values that store information
1029 // inline with a chunk that is relevant to diagnosing memory tag faults, where
1030 // 0 corresponds to the address of the user memory. This means that only
1031 // negative indexes may be used. The smallest index that may be used is -2,
1032 // which corresponds to 8 bytes before the user memory, because the chunk
1033 // header size is 8 bytes and in allocators that support memory tagging the
1034 // minimum alignment is at least the tag granule size (16 on aarch64).
1035 static const sptr MemTagAllocationTraceIndex = -2;
1036 static const sptr MemTagAllocationTidIndex = -1;
1037
1038 u32 Cookie = 0;
1039 u32 QuarantineMaxChunkSize = 0;
1040#if SCUDO_FUCHSIA
1041 u32 ZeroOnDeallocMaxSize = 0;
1042#endif
1043
1044 GlobalStats Stats;
1045 PrimaryT Primary;
1046 SecondaryT Secondary;
1047 QuarantineT Quarantine;
1048 TSDRegistryT TSDRegistry;
1049 pthread_once_t PostInitNonce = PTHREAD_ONCE_INIT;
1050
1051#ifdef GWP_ASAN_HOOKS
1052 gwp_asan::GuardedPoolAllocator GuardedAlloc;
1053 uptr GuardedAllocSlotSize = 0;
1054#endif // GWP_ASAN_HOOKS
1055
1056 struct AllocationRingBuffer {
1057 struct Entry {
1058 atomic_uptr Ptr;
1059 atomic_uptr AllocationSize;
1060 atomic_u32 AllocationTrace;
1061 atomic_u32 AllocationTid;
1062 atomic_u32 DeallocationTrace;
1063 atomic_u32 DeallocationTid;
1064 };
1065 StackDepot *Depot = nullptr;
1066 uptr StackDepotSize = 0;
1067 MemMapT RawRingBufferMap;
1068 MemMapT RawStackDepotMap;
1069 u32 RingBufferElements = 0;
1070 atomic_uptr Pos;
1071 // An array of Size (at least one) elements of type Entry is immediately
1072 // following to this struct.
1073 };
1074 static_assert(sizeof(AllocationRingBuffer) %
1075 alignof(typename AllocationRingBuffer::Entry) ==
1076 0,
1077 "invalid alignment");
1078
1079 // Lock to initialize the RingBuffer
1080 HybridMutex RingBufferInitLock;
1081
1082 // Pointer to memory mapped area starting with AllocationRingBuffer struct,
1083 // and immediately followed by Size elements of type Entry.
1084 atomic_uptr RingBufferAddress = {};
1085
1086 AllocationRingBuffer *getRingBuffer() {
1087 return reinterpret_cast<AllocationRingBuffer *>(
1088 atomic_load(A: &RingBufferAddress, MO: memory_order_acquire));
1089 }
1090
1091 // The following might get optimized out by the compiler.
1092 NOINLINE void performSanityChecks() {
1093 // Verify that the header offset field can hold the maximum offset. In the
1094 // case of the Secondary allocator, it takes care of alignment and the
1095 // offset will always be small. In the case of the Primary, the worst case
1096 // scenario happens in the last size class, when the backend allocation
1097 // would already be aligned on the requested alignment, which would happen
1098 // to be the maximum alignment that would fit in that size class. As a
1099 // result, the maximum offset will be at most the maximum alignment for the
1100 // last size class minus the header size, in multiples of MinAlignment.
1101 Chunk::UnpackedHeader Header = {};
1102 const uptr MaxPrimaryAlignment = 1UL << getMostSignificantSetBitIndex(
1103 SizeClassMap::MaxSize - MinAlignment);
1104 const uptr MaxOffset =
1105 (MaxPrimaryAlignment - Chunk::getHeaderSize()) >> MinAlignmentLog;
1106 Header.Offset = MaxOffset & Chunk::OffsetMask;
1107 if (UNLIKELY(Header.Offset != MaxOffset))
1108 reportSanityCheckError(Field: "offset");
1109
1110 // Verify that we can fit the maximum size or amount of unused bytes in the
1111 // header. Given that the Secondary fits the allocation to a page, the worst
1112 // case scenario happens in the Primary. It will depend on the second to
1113 // last and last class sizes, as well as the dynamic base for the Primary.
1114 // The following is an over-approximation that works for our needs.
1115 const uptr MaxSizeOrUnusedBytes = SizeClassMap::MaxSize - 1;
1116 Header.SizeOrUnusedBytes = MaxSizeOrUnusedBytes;
1117 if (UNLIKELY(Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes))
1118 reportSanityCheckError(Field: "size (or unused bytes)");
1119
1120 const uptr LargestClassId = SizeClassMap::LargestClassId;
1121 Header.ClassId = LargestClassId;
1122 if (UNLIKELY(Header.ClassId != LargestClassId))
1123 reportSanityCheckError(Field: "class ID");
1124 }
1125
1126 static inline void *getBlockBegin(const void *Ptr,
1127 Chunk::UnpackedHeader *Header) {
1128 return reinterpret_cast<void *>(
1129 reinterpret_cast<uptr>(Ptr) - Chunk::getHeaderSize() -
1130 (static_cast<uptr>(Header->Offset) << MinAlignmentLog));
1131 }
1132
1133 // Return the size of a chunk as requested during its allocation.
1134 inline uptr getSize(const void *Ptr, Chunk::UnpackedHeader *Header) {
1135 const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes;
1136 if (LIKELY(Header->ClassId))
1137 return SizeOrUnusedBytes;
1138 if (allocatorSupportsMemoryTagging<AllocatorConfig>())
1139 Ptr = untagPointer(Ptr: const_cast<void *>(Ptr));
1140 return SecondaryT::getBlockEnd(getBlockBegin(Ptr, Header)) -
1141 reinterpret_cast<uptr>(Ptr) - SizeOrUnusedBytes;
1142 }
1143
1144 ALWAYS_INLINE void *initChunk(const uptr ClassId, const Chunk::Origin Origin,
1145 void *Block, const uptr UserPtr,
1146 const uptr SizeOrUnusedBytes,
1147 const FillContentsMode FillContents) {
1148 // Compute the default pointer before adding the header tag
1149 const uptr DefaultAlignedPtr =
1150 reinterpret_cast<uptr>(Block) + Chunk::getHeaderSize();
1151
1152 Block = addHeaderTag(Block);
1153 // Only do content fill when it's from primary allocator because secondary
1154 // allocator has filled the content.
1155 if (ClassId != 0 && UNLIKELY(FillContents != NoFill)) {
1156 // This condition is not necessarily unlikely, but since memset is
1157 // costly, we might as well mark it as such.
1158 memset(Block, FillContents == ZeroFill ? 0 : PatternFillByte,
1159 PrimaryT::getSizeByClassId(ClassId));
1160 }
1161
1162 Chunk::UnpackedHeader Header = {};
1163
1164 if (UNLIKELY(DefaultAlignedPtr != UserPtr)) {
1165 const uptr Offset = UserPtr - DefaultAlignedPtr;
1166 DCHECK_GE(Offset, 2 * sizeof(u32));
1167 // The BlockMarker has no security purpose, but is specifically meant for
1168 // the chunk iteration function that can be used in debugging situations.
1169 // It is the only situation where we have to locate the start of a chunk
1170 // based on its block address.
1171 reinterpret_cast<u32 *>(Block)[0] = BlockMarker;
1172 reinterpret_cast<u32 *>(Block)[1] = static_cast<u32>(Offset);
1173 Header.Offset = (Offset >> MinAlignmentLog) & Chunk::OffsetMask;
1174 }
1175
1176 Header.ClassId = ClassId & Chunk::ClassIdMask;
1177 Header.State = Chunk::State::Allocated;
1178 Header.OriginOrWasZeroed = Origin & Chunk::OriginMask;
1179 Header.SizeOrUnusedBytes = SizeOrUnusedBytes & Chunk::SizeOrUnusedBytesMask;
1180 Chunk::storeHeader(Cookie, Ptr: reinterpret_cast<void *>(addHeaderTag(UserPtr)),
1181 NewUnpackedHeader: &Header);
1182
1183 return reinterpret_cast<void *>(UserPtr);
1184 }
1185
1186 NOINLINE void *
1187 initChunkWithMemoryTagging(const uptr ClassId, const Chunk::Origin Origin,
1188 void *Block, const uptr UserPtr, const uptr Size,
1189 const uptr SizeOrUnusedBytes,
1190 const FillContentsMode FillContents) {
1191 const Options Options = Primary.Options.load();
1192 DCHECK(useMemoryTagging<AllocatorConfig>(Options));
1193
1194 // Compute the default pointer before adding the header tag
1195 const uptr DefaultAlignedPtr =
1196 reinterpret_cast<uptr>(Block) + Chunk::getHeaderSize();
1197
1198 void *Ptr = reinterpret_cast<void *>(UserPtr);
1199 void *TaggedPtr = Ptr;
1200
1201 if (LIKELY(ClassId)) {
1202 // Init the primary chunk.
1203 //
1204 // We only need to zero or tag the contents for Primary backed
1205 // allocations. We only set tags for primary allocations in order to avoid
1206 // faulting potentially large numbers of pages for large secondary
1207 // allocations. We assume that guard pages are enough to protect these
1208 // allocations.
1209 //
1210 // FIXME: When the kernel provides a way to set the background tag of a
1211 // mapping, we should be able to tag secondary allocations as well.
1212 //
1213 // When memory tagging is enabled, zeroing the contents is done as part of
1214 // setting the tag.
1215
1216 Chunk::UnpackedHeader Header;
1217 const uptr BlockSize = PrimaryT::getSizeByClassId(ClassId);
1218 const uptr BlockUptr = reinterpret_cast<uptr>(Block);
1219 const uptr BlockEnd = BlockUptr + BlockSize;
1220 // If possible, try to reuse the UAF tag that was set by deallocate().
1221 // For simplicity, only reuse tags if we have the same start address as
1222 // the previous allocation. This handles the majority of cases since
1223 // most allocations will not be more aligned than the minimum alignment.
1224 //
1225 // We need to handle situations involving reclaimed chunks, and retag
1226 // the reclaimed portions if necessary. In the case where the chunk is
1227 // fully reclaimed, the chunk's header will be zero, which will trigger
1228 // the code path for new mappings and invalid chunks that prepares the
1229 // chunk from scratch. There are three possibilities for partial
1230 // reclaiming:
1231 //
1232 // (1) Header was reclaimed, data was partially reclaimed.
1233 // (2) Header was not reclaimed, all data was reclaimed (e.g. because
1234 // data started on a page boundary).
1235 // (3) Header was not reclaimed, data was partially reclaimed.
1236 //
1237 // Case (1) will be handled in the same way as for full reclaiming,
1238 // since the header will be zero.
1239 //
1240 // We can detect case (2) by loading the tag from the start
1241 // of the chunk. If it is zero, it means that either all data was
1242 // reclaimed (since we never use zero as the chunk tag), or that the
1243 // previous allocation was of size zero. Either way, we need to prepare
1244 // a new chunk from scratch.
1245 //
1246 // We can detect case (3) by moving to the next page (if covered by the
1247 // chunk) and loading the tag of its first granule. If it is zero, it
1248 // means that all following pages may need to be retagged. On the other
1249 // hand, if it is nonzero, we can assume that all following pages are
1250 // still tagged, according to the logic that if any of the pages
1251 // following the next page were reclaimed, the next page would have been
1252 // reclaimed as well.
1253 uptr TaggedUserPtr;
1254 uptr PrevUserPtr;
1255 if (getChunkFromBlock(Block: BlockUptr, Chunk: &PrevUserPtr, Header: &Header) &&
1256 PrevUserPtr == UserPtr &&
1257 (TaggedUserPtr = loadTag(Ptr: UserPtr)) != UserPtr) {
1258 uptr PrevEnd = TaggedUserPtr + Header.SizeOrUnusedBytes;
1259 const uptr NextPage = roundUp(X: TaggedUserPtr, Boundary: getPageSizeCached());
1260 if (NextPage < PrevEnd && loadTag(Ptr: NextPage) != NextPage)
1261 PrevEnd = NextPage;
1262 TaggedPtr = reinterpret_cast<void *>(TaggedUserPtr);
1263 resizeTaggedChunk(OldPtr: PrevEnd, NewPtr: TaggedUserPtr + Size, NewSize: Size, BlockEnd);
1264 if (UNLIKELY(FillContents != NoFill && !Header.OriginOrWasZeroed)) {
1265 // If an allocation needs to be zeroed (i.e. calloc) we can normally
1266 // avoid zeroing the memory now since we can rely on memory having
1267 // been zeroed on free, as this is normally done while setting the
1268 // UAF tag. But if tagging was disabled per-thread when the memory
1269 // was freed, it would not have been retagged and thus zeroed, and
1270 // therefore it needs to be zeroed now.
1271 memset(s: TaggedPtr, c: 0,
1272 n: Min(A: Size, B: roundUp(X: PrevEnd - TaggedUserPtr,
1273 Boundary: archMemoryTagGranuleSize())));
1274 } else if (Size) {
1275 // Clear any stack metadata that may have previously been stored in
1276 // the chunk data.
1277 memset(s: TaggedPtr, c: 0, n: archMemoryTagGranuleSize());
1278 }
1279 } else {
1280 const uptr OddEvenMask =
1281 computeOddEvenMaskForPointerMaybe(Options, Ptr: BlockUptr, ClassId);
1282 TaggedPtr = prepareTaggedChunk(Ptr, Size, ExcludeMask: OddEvenMask, BlockEnd);
1283 }
1284 storePrimaryAllocationStackMaybe(Options, Ptr);
1285 } else {
1286 // Init the secondary chunk.
1287
1288 Block = addHeaderTag(Block);
1289 Ptr = addHeaderTag(Ptr);
1290 storeTags(Begin: reinterpret_cast<uptr>(Block), End: reinterpret_cast<uptr>(Ptr));
1291 storeSecondaryAllocationStackMaybe(Options, Ptr, Size);
1292 }
1293
1294 Chunk::UnpackedHeader Header = {};
1295
1296 if (UNLIKELY(DefaultAlignedPtr != UserPtr)) {
1297 const uptr Offset = UserPtr - DefaultAlignedPtr;
1298 DCHECK_GE(Offset, 2 * sizeof(u32));
1299 // The BlockMarker has no security purpose, but is specifically meant for
1300 // the chunk iteration function that can be used in debugging situations.
1301 // It is the only situation where we have to locate the start of a chunk
1302 // based on its block address.
1303 reinterpret_cast<u32 *>(Block)[0] = BlockMarker;
1304 reinterpret_cast<u32 *>(Block)[1] = static_cast<u32>(Offset);
1305 Header.Offset = (Offset >> MinAlignmentLog) & Chunk::OffsetMask;
1306 }
1307
1308 Header.ClassId = ClassId & Chunk::ClassIdMask;
1309 Header.State = Chunk::State::Allocated;
1310 Header.OriginOrWasZeroed = Origin & Chunk::OriginMask;
1311 Header.SizeOrUnusedBytes = SizeOrUnusedBytes & Chunk::SizeOrUnusedBytesMask;
1312 Chunk::storeHeader(Cookie, Ptr, NewUnpackedHeader: &Header);
1313
1314 return TaggedPtr;
1315 }
1316
1317 void quarantineOrDeallocateChunk(const Options &Options, void *TaggedPtr,
1318 Chunk::UnpackedHeader *Header,
1319 uptr Size) NO_THREAD_SAFETY_ANALYSIS {
1320 void *Ptr = getHeaderTaggedPointer(Ptr: TaggedPtr);
1321 // If the quarantine is disabled, the actual size of a chunk is 0 or larger
1322 // than the maximum allowed, we return a chunk directly to the backend.
1323 // This purposefully underflows for Size == 0.
1324 const bool BypassQuarantine = AllocatorConfig::getQuarantineDisabled() ||
1325 !Quarantine.getCacheSize() ||
1326 ((Size - 1) >= QuarantineMaxChunkSize) ||
1327 !Header->ClassId;
1328 if (BypassQuarantine)
1329 Header->State = Chunk::State::Available;
1330 else
1331 Header->State = Chunk::State::Quarantined;
1332
1333 if (LIKELY(!useMemoryTagging<AllocatorConfig>(Options)))
1334 Header->OriginOrWasZeroed = 0U;
1335 else {
1336 Header->OriginOrWasZeroed =
1337 Header->ClassId && !TSDRegistry.getDisableMemInit();
1338 }
1339
1340 Chunk::storeHeader(Cookie, Ptr, NewUnpackedHeader: Header);
1341
1342 if (BypassQuarantine) {
1343 void *BlockBegin;
1344 if (LIKELY(!useMemoryTagging<AllocatorConfig>(Options))) {
1345 // Must do this after storeHeader because loadHeader uses a tagged ptr.
1346 if (allocatorSupportsMemoryTagging<AllocatorConfig>())
1347 Ptr = untagPointer(Ptr);
1348 BlockBegin = getBlockBegin(Ptr, Header);
1349 } else {
1350 BlockBegin = retagBlock(Options, TaggedPtr, Ptr, Header, Size, BypassQuarantine: true);
1351 }
1352
1353#if SCUDO_FUCHSIA
1354 if (AllocatorConfig::getEnableZeroOnDealloc()) {
1355 // Clearing the header is incompatible with quarantine and tagging.
1356 // Hence, it is fine to implement it only when quarantine is bypassed.
1357 DCHECK(!useMemoryTagging<AllocatorConfig>(Options));
1358 uptr length = reinterpret_cast<uptr>(Ptr) + Size -
1359 reinterpret_cast<uptr>(BlockBegin);
1360 if (length <= ZeroOnDeallocMaxSize)
1361 memset(BlockBegin, 0, length);
1362 }
1363#endif // SCUDO_FUCHSIA
1364
1365 const uptr ClassId = Header->ClassId;
1366 if (LIKELY(ClassId)) {
1367 bool CacheDrained;
1368 {
1369 typename TSDRegistryT::ScopedTSD TSD(TSDRegistry);
1370 CacheDrained =
1371 TSD->getSizeClassAllocator().deallocate(ClassId, BlockBegin);
1372 }
1373 // When we have drained some blocks back to the Primary from TSD, that
1374 // implies that we may have the chance to release some pages as well.
1375 // Note that in order not to block other thread's accessing the TSD,
1376 // release the TSD first then try the page release.
1377 if (CacheDrained)
1378 Primary.tryReleaseToOS(ClassId, ReleaseToOS::Normal);
1379 } else {
1380 Secondary.deallocate(Options, BlockBegin);
1381 }
1382 } else {
1383 if (UNLIKELY(useMemoryTagging<AllocatorConfig>(Options)))
1384 retagBlock(Options, TaggedPtr, Ptr, Header, Size, BypassQuarantine: false);
1385 typename TSDRegistryT::ScopedTSD TSD(TSDRegistry);
1386 Quarantine.put(&TSD->getQuarantineCache(),
1387 QuarantineCallback(*this, TSD->getSizeClassAllocator()),
1388 Ptr, Size);
1389 }
1390 }
1391
1392 NOINLINE void *retagBlock(const Options &Options, void *TaggedPtr, void *&Ptr,
1393 Chunk::UnpackedHeader *Header, const uptr Size,
1394 bool BypassQuarantine) {
1395 DCHECK(useMemoryTagging<AllocatorConfig>(Options));
1396
1397 const u8 PrevTag = extractTag(Ptr: reinterpret_cast<uptr>(TaggedPtr));
1398 storeDeallocationStackMaybe(Options, Ptr, PrevTag, Size);
1399 if (Header->ClassId && !TSDRegistry.getDisableMemInit()) {
1400 uptr TaggedBegin, TaggedEnd;
1401 const uptr OddEvenMask = computeOddEvenMaskForPointerMaybe(
1402 Options, Ptr: reinterpret_cast<uptr>(getBlockBegin(Ptr, Header)),
1403 ClassId: Header->ClassId);
1404 // Exclude the previous tag so that immediate use after free is
1405 // detected 100% of the time.
1406 setRandomTag(Ptr, Size, ExcludeMask: OddEvenMask | (1UL << PrevTag), TaggedBegin: &TaggedBegin,
1407 TaggedEnd: &TaggedEnd);
1408 }
1409
1410 Ptr = untagPointer(Ptr);
1411 void *BlockBegin = getBlockBegin(Ptr, Header);
1412 if (BypassQuarantine && !Header->ClassId) {
1413 storeTags(Begin: reinterpret_cast<uptr>(BlockBegin),
1414 End: reinterpret_cast<uptr>(Ptr));
1415 }
1416
1417 return BlockBegin;
1418 }
1419
1420 bool getChunkFromBlock(uptr Block, uptr *Chunk,
1421 Chunk::UnpackedHeader *Header) {
1422 *Chunk =
1423 Block + getChunkOffsetFromBlock(Block: reinterpret_cast<const char *>(Block));
1424 return Chunk::isValid(Cookie, Ptr: reinterpret_cast<void *>(*Chunk), NewUnpackedHeader: Header);
1425 }
1426
1427 static uptr getChunkOffsetFromBlock(const char *Block) {
1428 u32 Offset = 0;
1429 if (reinterpret_cast<const u32 *>(Block)[0] == BlockMarker)
1430 Offset = reinterpret_cast<const u32 *>(Block)[1];
1431 return Offset + Chunk::getHeaderSize();
1432 }
1433
1434 // Set the tag of the granule past the end of the allocation to 0, to catch
1435 // linear overflows even if a previous larger allocation used the same block
1436 // and tag. Only do this if the granule past the end is in our block, because
1437 // this would otherwise lead to a SEGV if the allocation covers the entire
1438 // block and our block is at the end of a mapping. The tag of the next block's
1439 // header granule will be set to 0, so it will serve the purpose of catching
1440 // linear overflows in this case.
1441 //
1442 // For allocations of size 0 we do not end up storing the address tag to the
1443 // memory tag space, which getInlineErrorInfo() normally relies on to match
1444 // address tags against chunks. To allow matching in this case we store the
1445 // address tag in the first byte of the chunk.
1446 void storeEndMarker(uptr End, uptr Size, uptr BlockEnd) {
1447 DCHECK_EQ(BlockEnd, untagPointer(BlockEnd));
1448 uptr UntaggedEnd = untagPointer(Ptr: End);
1449 if (UntaggedEnd != BlockEnd) {
1450 storeTag(Ptr: UntaggedEnd);
1451 if (Size == 0)
1452 *reinterpret_cast<u8 *>(UntaggedEnd) = extractTag(Ptr: End);
1453 }
1454 }
1455
1456 void *prepareTaggedChunk(void *Ptr, uptr Size, uptr ExcludeMask,
1457 uptr BlockEnd) {
1458 // Prepare the granule before the chunk to store the chunk header by setting
1459 // its tag to 0. Normally its tag will already be 0, but in the case where a
1460 // chunk holding a low alignment allocation is reused for a higher alignment
1461 // allocation, the chunk may already have a non-zero tag from the previous
1462 // allocation.
1463 storeTag(Ptr: reinterpret_cast<uptr>(Ptr) - archMemoryTagGranuleSize());
1464
1465 uptr TaggedBegin, TaggedEnd;
1466 setRandomTag(Ptr, Size, ExcludeMask, TaggedBegin: &TaggedBegin, TaggedEnd: &TaggedEnd);
1467
1468 storeEndMarker(End: TaggedEnd, Size, BlockEnd);
1469 return reinterpret_cast<void *>(TaggedBegin);
1470 }
1471
1472 void resizeTaggedChunk(uptr OldPtr, uptr NewPtr, uptr NewSize,
1473 uptr BlockEnd) {
1474 uptr RoundOldPtr = roundUp(X: OldPtr, Boundary: archMemoryTagGranuleSize());
1475 uptr RoundNewPtr;
1476 if (RoundOldPtr >= NewPtr) {
1477 // If the allocation is shrinking we just need to set the tag past the end
1478 // of the allocation to 0. See explanation in storeEndMarker() above.
1479 RoundNewPtr = roundUp(X: NewPtr, Boundary: archMemoryTagGranuleSize());
1480 } else {
1481 // Set the memory tag of the region
1482 // [RoundOldPtr, roundUp(NewPtr, archMemoryTagGranuleSize()))
1483 // to the pointer tag stored in OldPtr.
1484 RoundNewPtr = storeTags(Begin: RoundOldPtr, End: NewPtr);
1485 }
1486 storeEndMarker(End: RoundNewPtr, Size: NewSize, BlockEnd);
1487 }
1488
1489 void storePrimaryAllocationStackMaybe(const Options &Options, void *Ptr) {
1490 if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
1491 return;
1492 AllocationRingBuffer *RB = getRingBuffer();
1493 if (!RB)
1494 return;
1495 auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
1496 Ptr32[MemTagAllocationTraceIndex] = collectStackTrace(Depot: RB->Depot);
1497 Ptr32[MemTagAllocationTidIndex] = getThreadID();
1498 }
1499
1500 void storeRingBufferEntry(AllocationRingBuffer *RB, void *Ptr,
1501 u32 AllocationTrace, u32 AllocationTid,
1502 uptr AllocationSize, u32 DeallocationTrace,
1503 u32 DeallocationTid) {
1504 uptr Pos = atomic_fetch_add(&RB->Pos, 1, memory_order_relaxed);
1505 typename AllocationRingBuffer::Entry *Entry =
1506 getRingBufferEntry(RB, Pos % RB->RingBufferElements);
1507
1508 // First invalidate our entry so that we don't attempt to interpret a
1509 // partially written state in getSecondaryErrorInfo(). The fences below
1510 // ensure that the compiler does not move the stores to Ptr in between the
1511 // stores to the other fields.
1512 atomic_store_relaxed(&Entry->Ptr, 0);
1513
1514 __atomic_signal_fence(__ATOMIC_SEQ_CST);
1515 atomic_store_relaxed(&Entry->AllocationTrace, AllocationTrace);
1516 atomic_store_relaxed(&Entry->AllocationTid, AllocationTid);
1517 atomic_store_relaxed(&Entry->AllocationSize, AllocationSize);
1518 atomic_store_relaxed(&Entry->DeallocationTrace, DeallocationTrace);
1519 atomic_store_relaxed(&Entry->DeallocationTid, DeallocationTid);
1520 __atomic_signal_fence(__ATOMIC_SEQ_CST);
1521
1522 atomic_store_relaxed(&Entry->Ptr, reinterpret_cast<uptr>(Ptr));
1523 }
1524
1525 void storeSecondaryAllocationStackMaybe(const Options &Options, void *Ptr,
1526 uptr Size) {
1527 if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
1528 return;
1529 AllocationRingBuffer *RB = getRingBuffer();
1530 if (!RB)
1531 return;
1532 u32 Trace = collectStackTrace(Depot: RB->Depot);
1533 u32 Tid = getThreadID();
1534
1535 auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
1536 Ptr32[MemTagAllocationTraceIndex] = Trace;
1537 Ptr32[MemTagAllocationTidIndex] = Tid;
1538
1539 storeRingBufferEntry(RB, Ptr: untagPointer(Ptr), AllocationTrace: Trace, AllocationTid: Tid, AllocationSize: Size, DeallocationTrace: 0, DeallocationTid: 0);
1540 }
1541
1542 void storeDeallocationStackMaybe(const Options &Options, void *Ptr,
1543 u8 PrevTag, uptr Size) {
1544 if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
1545 return;
1546 AllocationRingBuffer *RB = getRingBuffer();
1547 if (!RB)
1548 return;
1549 auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
1550 u32 AllocationTrace = Ptr32[MemTagAllocationTraceIndex];
1551 u32 AllocationTid = Ptr32[MemTagAllocationTidIndex];
1552
1553 u32 DeallocationTrace = collectStackTrace(Depot: RB->Depot);
1554 u32 DeallocationTid = getThreadID();
1555
1556 storeRingBufferEntry(RB, Ptr: addFixedTag(Ptr: untagPointer(Ptr), Tag: PrevTag),
1557 AllocationTrace, AllocationTid, AllocationSize: Size,
1558 DeallocationTrace, DeallocationTid);
1559 }
1560
1561 static const size_t NumErrorReports =
1562 sizeof(((scudo_error_info *)nullptr)->reports) /
1563 sizeof(((scudo_error_info *)nullptr)->reports[0]);
1564
1565 static void getInlineErrorInfo(struct scudo_error_info *ErrorInfo,
1566 size_t &NextErrorReport, uintptr_t FaultAddr,
1567 const StackDepot *Depot,
1568 const char *RegionInfoPtr, const char *Memory,
1569 const char *MemoryTags, uintptr_t MemoryAddr,
1570 size_t MemorySize, size_t MinDistance,
1571 size_t MaxDistance) {
1572 uptr UntaggedFaultAddr = untagPointer(Ptr: FaultAddr);
1573 u8 FaultAddrTag = extractTag(Ptr: FaultAddr);
1574 BlockInfo Info =
1575 PrimaryT::findNearestBlock(RegionInfoPtr, UntaggedFaultAddr);
1576
1577 auto GetGranule = [&](uptr Addr, const char **Data, uint8_t *Tag) -> bool {
1578 if (Addr < MemoryAddr || Addr + archMemoryTagGranuleSize() < Addr ||
1579 Addr + archMemoryTagGranuleSize() > MemoryAddr + MemorySize)
1580 return false;
1581 *Data = &Memory[Addr - MemoryAddr];
1582 *Tag = static_cast<u8>(
1583 MemoryTags[(Addr - MemoryAddr) / archMemoryTagGranuleSize()]);
1584 return true;
1585 };
1586
1587 auto ReadBlock = [&](uptr Addr, uptr *ChunkAddr,
1588 Chunk::UnpackedHeader *Header, const u32 **Data,
1589 u8 *Tag) {
1590 const char *BlockBegin;
1591 u8 BlockBeginTag;
1592 if (!GetGranule(Addr, &BlockBegin, &BlockBeginTag))
1593 return false;
1594 uptr ChunkOffset = getChunkOffsetFromBlock(Block: BlockBegin);
1595 *ChunkAddr = Addr + ChunkOffset;
1596
1597 const char *ChunkBegin;
1598 if (!GetGranule(*ChunkAddr, &ChunkBegin, Tag))
1599 return false;
1600 *Header = *reinterpret_cast<const Chunk::UnpackedHeader *>(
1601 ChunkBegin - Chunk::getHeaderSize());
1602 *Data = reinterpret_cast<const u32 *>(ChunkBegin);
1603
1604 // Allocations of size 0 will have stashed the tag in the first byte of
1605 // the chunk, see storeEndMarker().
1606 if (Header->SizeOrUnusedBytes == 0)
1607 *Tag = static_cast<u8>(*ChunkBegin);
1608
1609 return true;
1610 };
1611
1612 if (NextErrorReport == NumErrorReports)
1613 return;
1614
1615 auto CheckOOB = [&](uptr BlockAddr) {
1616 if (BlockAddr < Info.RegionBegin || BlockAddr >= Info.RegionEnd)
1617 return false;
1618
1619 uptr ChunkAddr;
1620 Chunk::UnpackedHeader Header;
1621 const u32 *Data;
1622 uint8_t Tag;
1623 if (!ReadBlock(BlockAddr, &ChunkAddr, &Header, &Data, &Tag) ||
1624 Header.State != Chunk::State::Allocated || Tag != FaultAddrTag)
1625 return false;
1626
1627 auto *R = &ErrorInfo->reports[NextErrorReport++];
1628 R->error_type =
1629 UntaggedFaultAddr < ChunkAddr ? BUFFER_UNDERFLOW : BUFFER_OVERFLOW;
1630 R->allocation_address = ChunkAddr;
1631 R->allocation_size = Header.SizeOrUnusedBytes;
1632 if (Depot) {
1633 collectTraceMaybe(Depot, Trace&: R->allocation_trace,
1634 Hash: Data[MemTagAllocationTraceIndex]);
1635 }
1636 R->allocation_tid = Data[MemTagAllocationTidIndex];
1637 return NextErrorReport == NumErrorReports;
1638 };
1639
1640 if (MinDistance == 0 && CheckOOB(Info.BlockBegin))
1641 return;
1642
1643 for (size_t I = Max<size_t>(A: MinDistance, B: 1); I != MaxDistance; ++I)
1644 if (CheckOOB(Info.BlockBegin + I * Info.BlockSize) ||
1645 CheckOOB(Info.BlockBegin - I * Info.BlockSize))
1646 return;
1647 }
1648
1649 static void getRingBufferErrorInfo(struct scudo_error_info *ErrorInfo,
1650 size_t &NextErrorReport,
1651 uintptr_t FaultAddr,
1652 const StackDepot *Depot,
1653 const char *RingBufferPtr,
1654 size_t RingBufferSize) {
1655 auto *RingBuffer =
1656 reinterpret_cast<const AllocationRingBuffer *>(RingBufferPtr);
1657 size_t RingBufferElements = ringBufferElementsFromBytes(Bytes: RingBufferSize);
1658 if (!RingBuffer || RingBufferElements == 0 || !Depot)
1659 return;
1660 uptr Pos = atomic_load_relaxed(&RingBuffer->Pos);
1661
1662 for (uptr I = Pos - 1; I != Pos - 1 - RingBufferElements &&
1663 NextErrorReport != NumErrorReports;
1664 --I) {
1665 auto *Entry = getRingBufferEntry(RingBuffer, I % RingBufferElements);
1666 uptr EntryPtr = atomic_load_relaxed(&Entry->Ptr);
1667 if (!EntryPtr)
1668 continue;
1669
1670 uptr UntaggedEntryPtr = untagPointer(Ptr: EntryPtr);
1671 uptr EntrySize = atomic_load_relaxed(&Entry->AllocationSize);
1672 u32 AllocationTrace = atomic_load_relaxed(&Entry->AllocationTrace);
1673 u32 AllocationTid = atomic_load_relaxed(&Entry->AllocationTid);
1674 u32 DeallocationTrace = atomic_load_relaxed(&Entry->DeallocationTrace);
1675 u32 DeallocationTid = atomic_load_relaxed(&Entry->DeallocationTid);
1676
1677 if (DeallocationTid) {
1678 // For UAF we only consider in-bounds fault addresses because
1679 // out-of-bounds UAF is rare and attempting to detect it is very likely
1680 // to result in false positives.
1681 if (FaultAddr < EntryPtr || FaultAddr >= EntryPtr + EntrySize)
1682 continue;
1683 } else {
1684 // Ring buffer OOB is only possible with secondary allocations. In this
1685 // case we are guaranteed a guard region of at least a page on either
1686 // side of the allocation (guard page on the right, guard page + tagged
1687 // region on the left), so ignore any faults outside of that range.
1688 if (FaultAddr < EntryPtr - getPageSizeCached() ||
1689 FaultAddr >= EntryPtr + EntrySize + getPageSizeCached())
1690 continue;
1691
1692 // For UAF the ring buffer will contain two entries, one for the
1693 // allocation and another for the deallocation. Don't report buffer
1694 // overflow/underflow using the allocation entry if we have already
1695 // collected a report from the deallocation entry.
1696 bool Found = false;
1697 for (uptr J = 0; J != NextErrorReport; ++J) {
1698 if (ErrorInfo->reports[J].allocation_address == UntaggedEntryPtr) {
1699 Found = true;
1700 break;
1701 }
1702 }
1703 if (Found)
1704 continue;
1705 }
1706
1707 auto *R = &ErrorInfo->reports[NextErrorReport++];
1708 if (DeallocationTid)
1709 R->error_type = USE_AFTER_FREE;
1710 else if (FaultAddr < EntryPtr)
1711 R->error_type = BUFFER_UNDERFLOW;
1712 else
1713 R->error_type = BUFFER_OVERFLOW;
1714
1715 R->allocation_address = UntaggedEntryPtr;
1716 R->allocation_size = EntrySize;
1717 collectTraceMaybe(Depot, Trace&: R->allocation_trace, Hash: AllocationTrace);
1718 R->allocation_tid = AllocationTid;
1719 collectTraceMaybe(Depot, Trace&: R->deallocation_trace, Hash: DeallocationTrace);
1720 R->deallocation_tid = DeallocationTid;
1721 }
1722 }
1723
1724 uptr getStats(ScopedString *Str) {
1725 Primary.getStats(Str);
1726 Secondary.getStats(Str);
1727 if (!AllocatorConfig::getQuarantineDisabled())
1728 Quarantine.getStats(Str);
1729 TSDRegistry.getStats(Str);
1730 return Str->length();
1731 }
1732
1733 static typename AllocationRingBuffer::Entry *
1734 getRingBufferEntry(AllocationRingBuffer *RB, uptr N) {
1735 char *RBEntryStart =
1736 &reinterpret_cast<char *>(RB)[sizeof(AllocationRingBuffer)];
1737 return &reinterpret_cast<typename AllocationRingBuffer::Entry *>(
1738 RBEntryStart)[N];
1739 }
1740 static const typename AllocationRingBuffer::Entry *
1741 getRingBufferEntry(const AllocationRingBuffer *RB, uptr N) {
1742 const char *RBEntryStart =
1743 &reinterpret_cast<const char *>(RB)[sizeof(AllocationRingBuffer)];
1744 return &reinterpret_cast<const typename AllocationRingBuffer::Entry *>(
1745 RBEntryStart)[N];
1746 }
1747
1748 void initRingBufferMaybe() {
1749 ScopedLock L(RingBufferInitLock);
1750 if (getRingBuffer() != nullptr)
1751 return;
1752
1753 int ring_buffer_size = getFlags()->allocation_ring_buffer_size;
1754 if (ring_buffer_size <= 0)
1755 return;
1756
1757 u32 AllocationRingBufferSize = static_cast<u32>(ring_buffer_size);
1758
1759 // We store alloc and free stacks for each entry.
1760 constexpr u32 kStacksPerRingBufferEntry = 2;
1761 constexpr u32 kMaxU32Pow2 = ~(UINT32_MAX >> 1);
1762 static_assert(isPowerOfTwo(X: kMaxU32Pow2));
1763 // On Android we always have 3 frames at the bottom: __start_main,
1764 // __libc_init, main, and 3 at the top: malloc, scudo_malloc and
1765 // Allocator::allocate. This leaves 10 frames for the user app. The next
1766 // smallest power of two (8) would only leave 2, which is clearly too
1767 // little.
1768 constexpr u32 kFramesPerStack = 16;
1769 static_assert(isPowerOfTwo(X: kFramesPerStack));
1770
1771 if (AllocationRingBufferSize > kMaxU32Pow2 / kStacksPerRingBufferEntry)
1772 return;
1773 u32 TabSize = static_cast<u32>(roundUpPowerOfTwo(Size: kStacksPerRingBufferEntry *
1774 AllocationRingBufferSize));
1775 if (TabSize > UINT32_MAX / kFramesPerStack)
1776 return;
1777 u32 RingSize = static_cast<u32>(TabSize * kFramesPerStack);
1778
1779 uptr StackDepotSize = sizeof(StackDepot) + sizeof(atomic_u64) * RingSize +
1780 sizeof(atomic_u32) * TabSize;
1781 MemMapT DepotMap;
1782 DepotMap.map(
1783 /*Addr=*/Addr: 0U, Size: roundUp(X: StackDepotSize, Boundary: getPageSizeCached()),
1784 Name: "scudo:stack_depot");
1785 auto *Depot = reinterpret_cast<StackDepot *>(DepotMap.getBase());
1786 Depot->init(RingSz: RingSize, TabSz: TabSize);
1787
1788 MemMapT MemMap;
1789 MemMap.map(
1790 /*Addr=*/Addr: 0U,
1791 Size: roundUp(X: ringBufferSizeInBytes(RingBufferElements: AllocationRingBufferSize),
1792 Boundary: getPageSizeCached()),
1793 Name: "scudo:ring_buffer");
1794 auto *RB = reinterpret_cast<AllocationRingBuffer *>(MemMap.getBase());
1795 RB->RawRingBufferMap = MemMap;
1796 RB->RingBufferElements = AllocationRingBufferSize;
1797 RB->Depot = Depot;
1798 RB->StackDepotSize = StackDepotSize;
1799 RB->RawStackDepotMap = DepotMap;
1800
1801 atomic_store(A: &RingBufferAddress, V: reinterpret_cast<uptr>(RB),
1802 MO: memory_order_release);
1803 }
1804
1805 void unmapRingBuffer() {
1806 AllocationRingBuffer *RB = getRingBuffer();
1807 if (RB == nullptr)
1808 return;
1809 // N.B. because RawStackDepotMap is part of RawRingBufferMap, the order
1810 // is very important.
1811 RB->RawStackDepotMap.unmap();
1812 // Note that the `RB->RawRingBufferMap` is stored on the pages managed by
1813 // itself. Take over the ownership before calling unmap() so that any
1814 // operation along with unmap() won't touch inaccessible pages.
1815 MemMapT RawRingBufferMap = RB->RawRingBufferMap;
1816 RawRingBufferMap.unmap();
1817 atomic_store(A: &RingBufferAddress, V: 0, MO: memory_order_release);
1818 }
1819
1820 static constexpr size_t ringBufferSizeInBytes(u32 RingBufferElements) {
1821 return sizeof(AllocationRingBuffer) +
1822 RingBufferElements * sizeof(typename AllocationRingBuffer::Entry);
1823 }
1824
1825 static constexpr size_t ringBufferElementsFromBytes(size_t Bytes) {
1826 if (Bytes < sizeof(AllocationRingBuffer)) {
1827 return 0;
1828 }
1829 return (Bytes - sizeof(AllocationRingBuffer)) /
1830 sizeof(typename AllocationRingBuffer::Entry);
1831 }
1832};
1833
1834} // namespace scudo
1835
1836#endif // SCUDO_COMBINED_H_
1837