1//===-- primary32.h ---------------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#ifndef SCUDO_PRIMARY32_H_
10#define SCUDO_PRIMARY32_H_
11
12#include "allocator_common.h"
13#include "bytemap.h"
14#include "common.h"
15#include "list.h"
16#include "local_cache.h"
17#include "options.h"
18#include "release.h"
19#include "report.h"
20#include "stats.h"
21#include "string_utils.h"
22#include "thread_annotations.h"
23
24namespace scudo {
25
26// SizeClassAllocator32 is an allocator for 32 or 64-bit address space.
27//
28// It maps Regions of 2^RegionSizeLog bytes aligned on a 2^RegionSizeLog bytes
29// boundary, and keeps a bytemap of the mappable address space to track the size
30// class they are associated with.
31//
32// Mapped regions are split into equally sized Blocks according to the size
33// class they belong to, and the associated pointers are shuffled to prevent any
34// predictable address pattern (the predictability increases with the block
35// size).
36//
37// Regions for size class 0 are special and used to hold TransferBatches, which
38// allow to transfer arrays of pointers from the global size class freelist to
39// the thread specific freelist for said class, and back.
40//
41// Memory used by this allocator is never unmapped but can be partially
42// reclaimed if the platform allows for it.
43
44template <typename Config> class SizeClassAllocator32 {
45public:
46 typedef typename Config::CompactPtrT CompactPtrT;
47 typedef typename Config::SizeClassMap SizeClassMap;
48 static const uptr GroupSizeLog = Config::getGroupSizeLog();
49 // The bytemap can only track UINT8_MAX - 1 classes.
50 static_assert(SizeClassMap::LargestClassId <= (UINT8_MAX - 1), "");
51 // Regions should be large enough to hold the largest Block.
52 static_assert((1UL << Config::getRegionSizeLog()) >= SizeClassMap::MaxSize,
53 "");
54 typedef SizeClassAllocator32<Config> ThisT;
55 typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
56 typedef TransferBatch<ThisT> TransferBatchT;
57 typedef BatchGroup<ThisT> BatchGroupT;
58
59 static_assert(sizeof(BatchGroupT) <= sizeof(TransferBatchT),
60 "BatchGroupT uses the same class size as TransferBatchT");
61
62 static uptr getSizeByClassId(uptr ClassId) {
63 return (ClassId == SizeClassMap::BatchClassId)
64 ? sizeof(TransferBatchT)
65 : SizeClassMap::getSizeByClassId(ClassId);
66 }
67
68 static bool canAllocate(uptr Size) { return Size <= SizeClassMap::MaxSize; }
69
70 void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS {
71 if (SCUDO_FUCHSIA)
72 reportError(Message: "SizeClassAllocator32 is not supported on Fuchsia");
73
74 if (SCUDO_TRUSTY)
75 reportError(Message: "SizeClassAllocator32 is not supported on Trusty");
76
77 DCHECK(isAligned(reinterpret_cast<uptr>(this), alignof(ThisT)));
78 PossibleRegions.init();
79 u32 Seed;
80 const u64 Time = getMonotonicTimeFast();
81 if (!getRandom(Buffer: reinterpret_cast<void *>(&Seed), Length: sizeof(Seed)))
82 Seed = static_cast<u32>(
83 Time ^ (reinterpret_cast<uptr>(SizeClassInfoArray) >> 6));
84 for (uptr I = 0; I < NumClasses; I++) {
85 SizeClassInfo *Sci = getSizeClassInfo(ClassId: I);
86 Sci->RandState = getRandomU32(State: &Seed);
87 // Sci->MaxRegionIndex is already initialized to 0.
88 Sci->MinRegionIndex = NumRegions;
89 Sci->ReleaseInfo.LastReleaseAtNs = Time;
90 }
91
92 // The default value in the primary config has the higher priority.
93 if (Config::getDefaultReleaseToOsIntervalMs() != INT32_MIN)
94 ReleaseToOsInterval = Config::getDefaultReleaseToOsIntervalMs();
95 setOption(O: Option::ReleaseInterval, Value: static_cast<sptr>(ReleaseToOsInterval));
96 }
97
98 void unmapTestOnly() {
99 {
100 ScopedLock L(RegionsStashMutex);
101 while (NumberOfStashedRegions > 0) {
102 unmap(Addr: reinterpret_cast<void *>(RegionsStash[--NumberOfStashedRegions]),
103 Size: RegionSize);
104 }
105 }
106
107 uptr MinRegionIndex = NumRegions, MaxRegionIndex = 0;
108 for (uptr I = 0; I < NumClasses; I++) {
109 SizeClassInfo *Sci = getSizeClassInfo(ClassId: I);
110 ScopedLock L(Sci->Mutex);
111 if (Sci->MinRegionIndex < MinRegionIndex)
112 MinRegionIndex = Sci->MinRegionIndex;
113 if (Sci->MaxRegionIndex > MaxRegionIndex)
114 MaxRegionIndex = Sci->MaxRegionIndex;
115 *Sci = {};
116 }
117
118 ScopedLock L(ByteMapMutex);
119 for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++)
120 if (PossibleRegions[I])
121 unmap(Addr: reinterpret_cast<void *>(I * RegionSize), Size: RegionSize);
122 PossibleRegions.unmapTestOnly();
123 }
124
125 // When all blocks are freed, it has to be the same size as `AllocatedUser`.
126 void verifyAllBlocksAreReleasedTestOnly() {
127 // `BatchGroup` and `TransferBatch` also use the blocks from BatchClass.
128 uptr BatchClassUsedInFreeLists = 0;
129 for (uptr I = 0; I < NumClasses; I++) {
130 // We have to count BatchClassUsedInFreeLists in other regions first.
131 if (I == SizeClassMap::BatchClassId)
132 continue;
133 SizeClassInfo *Sci = getSizeClassInfo(ClassId: I);
134 ScopedLock L1(Sci->Mutex);
135 uptr TotalBlocks = 0;
136 for (BatchGroupT &BG : Sci->FreeListInfo.BlockList) {
137 // `BG::Batches` are `TransferBatches`. +1 for `BatchGroup`.
138 BatchClassUsedInFreeLists += BG.Batches.size() + 1;
139 for (const auto &It : BG.Batches)
140 TotalBlocks += It.getCount();
141 }
142
143 const uptr BlockSize = getSizeByClassId(ClassId: I);
144 DCHECK_EQ(TotalBlocks, Sci->AllocatedUser / BlockSize);
145 DCHECK_EQ(Sci->FreeListInfo.PushedBlocks, Sci->FreeListInfo.PoppedBlocks);
146 }
147
148 SizeClassInfo *Sci = getSizeClassInfo(ClassId: SizeClassMap::BatchClassId);
149 ScopedLock L1(Sci->Mutex);
150 uptr TotalBlocks = 0;
151 for (BatchGroupT &BG : Sci->FreeListInfo.BlockList) {
152 if (LIKELY(!BG.Batches.empty())) {
153 for (const auto &It : BG.Batches)
154 TotalBlocks += It.getCount();
155 } else {
156 // `BatchGroup` with empty freelist doesn't have `TransferBatch` record
157 // itself.
158 ++TotalBlocks;
159 }
160 }
161
162 const uptr BlockSize = getSizeByClassId(ClassId: SizeClassMap::BatchClassId);
163 DCHECK_EQ(TotalBlocks + BatchClassUsedInFreeLists,
164 Sci->AllocatedUser / BlockSize);
165 const uptr BlocksInUse =
166 Sci->FreeListInfo.PoppedBlocks - Sci->FreeListInfo.PushedBlocks;
167 DCHECK_EQ(BlocksInUse, BatchClassUsedInFreeLists);
168 }
169
170 CompactPtrT compactPtr(UNUSED uptr ClassId, uptr Ptr) const {
171 return static_cast<CompactPtrT>(Ptr);
172 }
173
174 void *decompactPtr(UNUSED uptr ClassId, CompactPtrT CompactPtr) const {
175 return reinterpret_cast<void *>(static_cast<uptr>(CompactPtr));
176 }
177
178 uptr compactPtrGroupBase(CompactPtrT CompactPtr) {
179 const uptr Mask = (static_cast<uptr>(1) << GroupSizeLog) - 1;
180 return CompactPtr & ~Mask;
181 }
182
183 uptr decompactGroupBase(uptr CompactPtrGroupBase) {
184 return CompactPtrGroupBase;
185 }
186
187 ALWAYS_INLINE static bool isSmallBlock(uptr BlockSize) {
188 const uptr PageSize = getPageSizeCached();
189 return BlockSize < PageSize / 16U;
190 }
191
192 ALWAYS_INLINE static bool isLargeBlock(uptr BlockSize) {
193 const uptr PageSize = getPageSizeCached();
194 return BlockSize > PageSize;
195 }
196
197 u16 popBlocks(CacheT *C, uptr ClassId, CompactPtrT *ToArray,
198 const u16 MaxBlockCount) {
199 DCHECK_LT(ClassId, NumClasses);
200 SizeClassInfo *Sci = getSizeClassInfo(ClassId);
201 ScopedLock L(Sci->Mutex);
202
203 u16 PopCount = popBlocksImpl(C, ClassId, Sci, ToArray, MaxBlockCount);
204 if (UNLIKELY(PopCount == 0)) {
205 if (UNLIKELY(!populateFreeList(C, ClassId, Sci)))
206 return 0U;
207 PopCount = popBlocksImpl(C, ClassId, Sci, ToArray, MaxBlockCount);
208 DCHECK_NE(PopCount, 0U);
209 }
210
211 return PopCount;
212 }
213
214 // Push the array of free blocks to the designated batch group.
215 void pushBlocks(CacheT *C, uptr ClassId, CompactPtrT *Array, u32 Size) {
216 DCHECK_LT(ClassId, NumClasses);
217 DCHECK_GT(Size, 0);
218
219 SizeClassInfo *Sci = getSizeClassInfo(ClassId);
220 if (ClassId == SizeClassMap::BatchClassId) {
221 ScopedLock L(Sci->Mutex);
222 pushBatchClassBlocks(Sci, Array, Size);
223 return;
224 }
225
226 // TODO(chiahungduan): Consider not doing grouping if the group size is not
227 // greater than the block size with a certain scale.
228
229 // Sort the blocks so that blocks belonging to the same group can be pushed
230 // together.
231 bool SameGroup = true;
232 for (u32 I = 1; I < Size; ++I) {
233 if (compactPtrGroupBase(CompactPtr: Array[I - 1]) != compactPtrGroupBase(CompactPtr: Array[I]))
234 SameGroup = false;
235 CompactPtrT Cur = Array[I];
236 u32 J = I;
237 while (J > 0 &&
238 compactPtrGroupBase(CompactPtr: Cur) < compactPtrGroupBase(CompactPtr: Array[J - 1])) {
239 Array[J] = Array[J - 1];
240 --J;
241 }
242 Array[J] = Cur;
243 }
244
245 ScopedLock L(Sci->Mutex);
246 pushBlocksImpl(C, ClassId, Sci, Array, Size, SameGroup);
247 }
248
249 void disable() NO_THREAD_SAFETY_ANALYSIS {
250 // The BatchClassId must be locked last since other classes can use it.
251 for (sptr I = static_cast<sptr>(NumClasses) - 1; I >= 0; I--) {
252 if (static_cast<uptr>(I) == SizeClassMap::BatchClassId)
253 continue;
254 getSizeClassInfo(ClassId: static_cast<uptr>(I))->Mutex.lock();
255 }
256 getSizeClassInfo(ClassId: SizeClassMap::BatchClassId)->Mutex.lock();
257 RegionsStashMutex.lock();
258 ByteMapMutex.lock();
259 }
260
261 void enable() NO_THREAD_SAFETY_ANALYSIS {
262 ByteMapMutex.unlock();
263 RegionsStashMutex.unlock();
264 getSizeClassInfo(ClassId: SizeClassMap::BatchClassId)->Mutex.unlock();
265 for (uptr I = 0; I < NumClasses; I++) {
266 if (I == SizeClassMap::BatchClassId)
267 continue;
268 getSizeClassInfo(ClassId: I)->Mutex.unlock();
269 }
270 }
271
272 template <typename F> void iterateOverBlocks(F Callback) {
273 uptr MinRegionIndex = NumRegions, MaxRegionIndex = 0;
274 for (uptr I = 0; I < NumClasses; I++) {
275 SizeClassInfo *Sci = getSizeClassInfo(ClassId: I);
276 // TODO: The call of `iterateOverBlocks` requires disabling
277 // SizeClassAllocator32. We may consider locking each region on demand
278 // only.
279 Sci->Mutex.assertHeld();
280 if (Sci->MinRegionIndex < MinRegionIndex)
281 MinRegionIndex = Sci->MinRegionIndex;
282 if (Sci->MaxRegionIndex > MaxRegionIndex)
283 MaxRegionIndex = Sci->MaxRegionIndex;
284 }
285
286 // SizeClassAllocator32 is disabled, i.e., ByteMapMutex is held.
287 ByteMapMutex.assertHeld();
288
289 for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++) {
290 if (PossibleRegions[I] &&
291 (PossibleRegions[I] - 1U) != SizeClassMap::BatchClassId) {
292 const uptr BlockSize = getSizeByClassId(ClassId: PossibleRegions[I] - 1U);
293 const uptr From = I * RegionSize;
294 const uptr To = From + (RegionSize / BlockSize) * BlockSize;
295 for (uptr Block = From; Block < To; Block += BlockSize)
296 Callback(Block);
297 }
298 }
299 }
300
301 void getStats(ScopedString *Str) {
302 // TODO(kostyak): get the RSS per region.
303 uptr TotalMapped = 0;
304 uptr PoppedBlocks = 0;
305 uptr PushedBlocks = 0;
306 for (uptr I = 0; I < NumClasses; I++) {
307 SizeClassInfo *Sci = getSizeClassInfo(ClassId: I);
308 ScopedLock L(Sci->Mutex);
309 TotalMapped += Sci->AllocatedUser;
310 PoppedBlocks += Sci->FreeListInfo.PoppedBlocks;
311 PushedBlocks += Sci->FreeListInfo.PushedBlocks;
312 }
313 Str->append(Format: "Stats: SizeClassAllocator32: %zuM mapped in %zu allocations; "
314 "remains %zu\n",
315 TotalMapped >> 20, PoppedBlocks, PoppedBlocks - PushedBlocks);
316 for (uptr I = 0; I < NumClasses; I++) {
317 SizeClassInfo *Sci = getSizeClassInfo(ClassId: I);
318 ScopedLock L(Sci->Mutex);
319 getStats(Str, I, Sci);
320 }
321 }
322
323 void getFragmentationInfo(ScopedString *Str) {
324 Str->append(
325 Format: "Fragmentation Stats: SizeClassAllocator32: page size = %zu bytes\n",
326 getPageSizeCached());
327
328 for (uptr I = 1; I < NumClasses; I++) {
329 SizeClassInfo *Sci = getSizeClassInfo(ClassId: I);
330 ScopedLock L(Sci->Mutex);
331 getSizeClassFragmentationInfo(Sci, ClassId: I, Str);
332 }
333 }
334
335 bool setOption(Option O, sptr Value) {
336 if (O == Option::ReleaseInterval) {
337 const s32 Interval = Max(
338 Min(static_cast<s32>(Value), Config::getMaxReleaseToOsIntervalMs()),
339 Config::getMinReleaseToOsIntervalMs());
340 atomic_store_relaxed(A: &ReleaseToOsIntervalMs, V: Interval);
341 return true;
342 }
343 // Not supported by the Primary, but not an error either.
344 return true;
345 }
346
347 uptr tryReleaseToOS(uptr ClassId, ReleaseToOS ReleaseType) {
348 SizeClassInfo *Sci = getSizeClassInfo(ClassId);
349 // TODO: Once we have separate locks like primary64, we may consider using
350 // tryLock() as well.
351 ScopedLock L(Sci->Mutex);
352 return releaseToOSMaybe(Sci, ClassId, ReleaseType);
353 }
354
355 uptr releaseToOS(ReleaseToOS ReleaseType) {
356 uptr TotalReleasedBytes = 0;
357 for (uptr I = 0; I < NumClasses; I++) {
358 if (I == SizeClassMap::BatchClassId)
359 continue;
360 SizeClassInfo *Sci = getSizeClassInfo(ClassId: I);
361 ScopedLock L(Sci->Mutex);
362 TotalReleasedBytes += releaseToOSMaybe(Sci, ClassId: I, ReleaseType);
363 }
364 return TotalReleasedBytes;
365 }
366
367 const char *getRegionInfoArrayAddress() const { return nullptr; }
368 static uptr getRegionInfoArraySize() { return 0; }
369
370 static BlockInfo findNearestBlock(UNUSED const char *RegionInfoData,
371 UNUSED uptr Ptr) {
372 return {};
373 }
374
375 AtomicOptions Options;
376
377private:
378 static const uptr NumClasses = SizeClassMap::NumClasses;
379 static const uptr RegionSize = 1UL << Config::getRegionSizeLog();
380 static const uptr NumRegions = SCUDO_MMAP_RANGE_SIZE >>
381 Config::getRegionSizeLog();
382 static const u32 MaxNumBatches = SCUDO_ANDROID ? 4U : 8U;
383 typedef FlatByteMap<NumRegions> ByteMap;
384
385 struct ReleaseToOsInfo {
386 uptr BytesInFreeListAtLastCheckpoint;
387 uptr RangesReleased;
388 uptr LastReleasedBytes;
389 u64 LastReleaseAtNs;
390 };
391
392 struct BlocksInfo {
393 SinglyLinkedList<BatchGroupT> BlockList = {};
394 uptr PoppedBlocks = 0;
395 uptr PushedBlocks = 0;
396 };
397
398 struct alignas(SCUDO_CACHE_LINE_SIZE) SizeClassInfo {
399 HybridMutex Mutex;
400 BlocksInfo FreeListInfo GUARDED_BY(Mutex);
401 uptr CurrentRegion GUARDED_BY(Mutex);
402 uptr CurrentRegionAllocated GUARDED_BY(Mutex);
403 u32 RandState;
404 uptr AllocatedUser GUARDED_BY(Mutex);
405 // Lowest & highest region index allocated for this size class, to avoid
406 // looping through the whole NumRegions.
407 uptr MinRegionIndex GUARDED_BY(Mutex);
408 uptr MaxRegionIndex GUARDED_BY(Mutex);
409 ReleaseToOsInfo ReleaseInfo GUARDED_BY(Mutex);
410 };
411 static_assert(sizeof(SizeClassInfo) % SCUDO_CACHE_LINE_SIZE == 0, "");
412
413 uptr computeRegionId(uptr Mem) {
414 const uptr Id = Mem >> Config::getRegionSizeLog();
415 CHECK_LT(Id, NumRegions);
416 return Id;
417 }
418
419 uptr allocateRegionSlow() {
420 uptr MapSize = 2 * RegionSize;
421 const uptr MapBase = reinterpret_cast<uptr>(
422 map(Addr: nullptr, Size: MapSize, Name: "scudo:primary", MAP_ALLOWNOMEM));
423 if (!MapBase)
424 return 0;
425 const uptr MapEnd = MapBase + MapSize;
426 uptr Region = MapBase;
427 if (isAligned(X: Region, Alignment: RegionSize)) {
428 ScopedLock L(RegionsStashMutex);
429 if (NumberOfStashedRegions < MaxStashedRegions)
430 RegionsStash[NumberOfStashedRegions++] = MapBase + RegionSize;
431 else
432 MapSize = RegionSize;
433 } else {
434 Region = roundUp(X: MapBase, Boundary: RegionSize);
435 unmap(Addr: reinterpret_cast<void *>(MapBase), Size: Region - MapBase);
436 MapSize = RegionSize;
437 }
438 const uptr End = Region + MapSize;
439 if (End != MapEnd)
440 unmap(Addr: reinterpret_cast<void *>(End), Size: MapEnd - End);
441
442 DCHECK_EQ(Region % RegionSize, 0U);
443 static_assert(Config::getRegionSizeLog() == GroupSizeLog,
444 "Memory group should be the same size as Region");
445
446 return Region;
447 }
448
449 uptr allocateRegion(SizeClassInfo *Sci, uptr ClassId) REQUIRES(Sci->Mutex) {
450 DCHECK_LT(ClassId, NumClasses);
451 uptr Region = 0;
452 {
453 ScopedLock L(RegionsStashMutex);
454 if (NumberOfStashedRegions > 0)
455 Region = RegionsStash[--NumberOfStashedRegions];
456 }
457 if (!Region)
458 Region = allocateRegionSlow();
459 if (LIKELY(Region)) {
460 // Sci->Mutex is held by the caller, updating the Min/Max is safe.
461 const uptr RegionIndex = computeRegionId(Mem: Region);
462 if (RegionIndex < Sci->MinRegionIndex)
463 Sci->MinRegionIndex = RegionIndex;
464 if (RegionIndex > Sci->MaxRegionIndex)
465 Sci->MaxRegionIndex = RegionIndex;
466 ScopedLock L(ByteMapMutex);
467 PossibleRegions.set(RegionIndex, static_cast<u8>(ClassId + 1U));
468 }
469 return Region;
470 }
471
472 SizeClassInfo *getSizeClassInfo(uptr ClassId) {
473 DCHECK_LT(ClassId, NumClasses);
474 return &SizeClassInfoArray[ClassId];
475 }
476
477 void pushBatchClassBlocks(SizeClassInfo *Sci, CompactPtrT *Array, u32 Size)
478 REQUIRES(Sci->Mutex) {
479 DCHECK_EQ(Sci, getSizeClassInfo(SizeClassMap::BatchClassId));
480
481 // Free blocks are recorded by TransferBatch in freelist for all
482 // size-classes. In addition, TransferBatch is allocated from BatchClassId.
483 // In order not to use additional block to record the free blocks in
484 // BatchClassId, they are self-contained. I.e., A TransferBatch records the
485 // block address of itself. See the figure below:
486 //
487 // TransferBatch at 0xABCD
488 // +----------------------------+
489 // | Free blocks' addr |
490 // | +------+------+------+ |
491 // | |0xABCD|... |... | |
492 // | +------+------+------+ |
493 // +----------------------------+
494 //
495 // When we allocate all the free blocks in the TransferBatch, the block used
496 // by TransferBatch is also free for use. We don't need to recycle the
497 // TransferBatch. Note that the correctness is maintained by the invariant,
498 //
499 // Each popBlocks() request returns the entire TransferBatch. Returning
500 // part of the blocks in a TransferBatch is invalid.
501 //
502 // This ensures that TransferBatch won't leak the address itself while it's
503 // still holding other valid data.
504 //
505 // Besides, BatchGroup is also allocated from BatchClassId and has its
506 // address recorded in the TransferBatch too. To maintain the correctness,
507 //
508 // The address of BatchGroup is always recorded in the last TransferBatch
509 // in the freelist (also imply that the freelist should only be
510 // updated with push_front). Once the last TransferBatch is popped,
511 // the block used by BatchGroup is also free for use.
512 //
513 // With this approach, the blocks used by BatchGroup and TransferBatch are
514 // reusable and don't need additional space for them.
515
516 Sci->FreeListInfo.PushedBlocks += Size;
517 BatchGroupT *BG = Sci->FreeListInfo.BlockList.front();
518
519 if (BG == nullptr) {
520 // Construct `BatchGroup` on the last element.
521 BG = reinterpret_cast<BatchGroupT *>(
522 decompactPtr(ClassId: SizeClassMap::BatchClassId, CompactPtr: Array[Size - 1]));
523 --Size;
524 BG->Batches.clear();
525 // BatchClass hasn't enabled memory group. Use `0` to indicate there's no
526 // memory group here.
527 BG->CompactPtrGroupBase = 0;
528 // `BG` is also the block of BatchClassId. Note that this is different
529 // from `CreateGroup` in `pushBlocksImpl`
530 BG->PushedBlocks = 1;
531 BG->BytesInBGAtLastCheckpoint = 0;
532 BG->MaxCachedPerBatch =
533 CacheT::getMaxCached(getSizeByClassId(ClassId: SizeClassMap::BatchClassId));
534
535 Sci->FreeListInfo.BlockList.push_front(BG);
536 }
537
538 if (UNLIKELY(Size == 0))
539 return;
540
541 // This happens under 2 cases.
542 // 1. just allocated a new `BatchGroup`.
543 // 2. Only 1 block is pushed when the freelist is empty.
544 if (BG->Batches.empty()) {
545 // Construct the `TransferBatch` on the last element.
546 TransferBatchT *TB = reinterpret_cast<TransferBatchT *>(
547 decompactPtr(ClassId: SizeClassMap::BatchClassId, CompactPtr: Array[Size - 1]));
548 TB->clear();
549 // As mentioned above, addresses of `TransferBatch` and `BatchGroup` are
550 // recorded in the TransferBatch.
551 TB->add(Array[Size - 1]);
552 TB->add(
553 compactPtr(ClassId: SizeClassMap::BatchClassId, Ptr: reinterpret_cast<uptr>(BG)));
554 --Size;
555 DCHECK_EQ(BG->PushedBlocks, 1U);
556 // `TB` is also the block of BatchClassId.
557 BG->PushedBlocks += 1;
558 BG->Batches.push_front(TB);
559 }
560
561 TransferBatchT *CurBatch = BG->Batches.front();
562 DCHECK_NE(CurBatch, nullptr);
563
564 for (u32 I = 0; I < Size;) {
565 u16 UnusedSlots =
566 static_cast<u16>(BG->MaxCachedPerBatch - CurBatch->getCount());
567 if (UnusedSlots == 0) {
568 CurBatch = reinterpret_cast<TransferBatchT *>(
569 decompactPtr(ClassId: SizeClassMap::BatchClassId, CompactPtr: Array[I]));
570 CurBatch->clear();
571 // Self-contained
572 CurBatch->add(Array[I]);
573 ++I;
574 // TODO(chiahungduan): Avoid the use of push_back() in `Batches` of
575 // BatchClassId.
576 BG->Batches.push_front(CurBatch);
577 UnusedSlots = static_cast<u16>(BG->MaxCachedPerBatch - 1);
578 }
579 // `UnusedSlots` is u16 so the result will be also fit in u16.
580 const u16 AppendSize = static_cast<u16>(Min<u32>(A: UnusedSlots, B: Size - I));
581 CurBatch->appendFromArray(&Array[I], AppendSize);
582 I += AppendSize;
583 }
584
585 BG->PushedBlocks += Size;
586 }
587 // Push the blocks to their batch group. The layout will be like,
588 //
589 // FreeListInfo.BlockList - > BG -> BG -> BG
590 // | | |
591 // v v v
592 // TB TB TB
593 // |
594 // v
595 // TB
596 //
597 // Each BlockGroup(BG) will associate with unique group id and the free blocks
598 // are managed by a list of TransferBatch(TB). To reduce the time of inserting
599 // blocks, BGs are sorted and the input `Array` are supposed to be sorted so
600 // that we can get better performance of maintaining sorted property.
601 // Use `SameGroup=true` to indicate that all blocks in the array are from the
602 // same group then we will skip checking the group id of each block.
603 //
604 // The region mutex needs to be held while calling this method.
605 void pushBlocksImpl(CacheT *C, uptr ClassId, SizeClassInfo *Sci,
606 CompactPtrT *Array, u32 Size, bool SameGroup = false)
607 REQUIRES(Sci->Mutex) {
608 DCHECK_NE(ClassId, SizeClassMap::BatchClassId);
609 DCHECK_GT(Size, 0U);
610
611 auto CreateGroup = [&](uptr CompactPtrGroupBase) {
612 BatchGroupT *BG =
613 reinterpret_cast<BatchGroupT *>(C->getBatchClassBlock());
614 BG->Batches.clear();
615 TransferBatchT *TB =
616 reinterpret_cast<TransferBatchT *>(C->getBatchClassBlock());
617 TB->clear();
618
619 BG->CompactPtrGroupBase = CompactPtrGroupBase;
620 BG->Batches.push_front(TB);
621 BG->PushedBlocks = 0;
622 BG->BytesInBGAtLastCheckpoint = 0;
623 BG->MaxCachedPerBatch = TransferBatchT::MaxNumCached;
624
625 return BG;
626 };
627
628 auto InsertBlocks = [&](BatchGroupT *BG, CompactPtrT *Array, u32 Size) {
629 SinglyLinkedList<TransferBatchT> &Batches = BG->Batches;
630 TransferBatchT *CurBatch = Batches.front();
631 DCHECK_NE(CurBatch, nullptr);
632
633 for (u32 I = 0; I < Size;) {
634 DCHECK_GE(BG->MaxCachedPerBatch, CurBatch->getCount());
635 u16 UnusedSlots =
636 static_cast<u16>(BG->MaxCachedPerBatch - CurBatch->getCount());
637 if (UnusedSlots == 0) {
638 CurBatch =
639 reinterpret_cast<TransferBatchT *>(C->getBatchClassBlock());
640 CurBatch->clear();
641 Batches.push_front(CurBatch);
642 UnusedSlots = BG->MaxCachedPerBatch;
643 }
644 // `UnusedSlots` is u16 so the result will be also fit in u16.
645 u16 AppendSize = static_cast<u16>(Min<u32>(A: UnusedSlots, B: Size - I));
646 CurBatch->appendFromArray(&Array[I], AppendSize);
647 I += AppendSize;
648 }
649
650 BG->PushedBlocks += Size;
651 };
652
653 Sci->FreeListInfo.PushedBlocks += Size;
654 BatchGroupT *Cur = Sci->FreeListInfo.BlockList.front();
655
656 // In the following, `Cur` always points to the BatchGroup for blocks that
657 // will be pushed next. `Prev` is the element right before `Cur`.
658 BatchGroupT *Prev = nullptr;
659
660 while (Cur != nullptr &&
661 compactPtrGroupBase(CompactPtr: Array[0]) > Cur->CompactPtrGroupBase) {
662 Prev = Cur;
663 Cur = Cur->Next;
664 }
665
666 if (Cur == nullptr ||
667 compactPtrGroupBase(CompactPtr: Array[0]) != Cur->CompactPtrGroupBase) {
668 Cur = CreateGroup(compactPtrGroupBase(CompactPtr: Array[0]));
669 if (Prev == nullptr)
670 Sci->FreeListInfo.BlockList.push_front(Cur);
671 else
672 Sci->FreeListInfo.BlockList.insert(Prev, Cur);
673 }
674
675 // All the blocks are from the same group, just push without checking group
676 // id.
677 if (SameGroup) {
678 for (u32 I = 0; I < Size; ++I)
679 DCHECK_EQ(compactPtrGroupBase(Array[I]), Cur->CompactPtrGroupBase);
680
681 InsertBlocks(Cur, Array, Size);
682 return;
683 }
684
685 // The blocks are sorted by group id. Determine the segment of group and
686 // push them to their group together.
687 u32 Count = 1;
688 for (u32 I = 1; I < Size; ++I) {
689 if (compactPtrGroupBase(CompactPtr: Array[I - 1]) != compactPtrGroupBase(CompactPtr: Array[I])) {
690 DCHECK_EQ(compactPtrGroupBase(Array[I - 1]), Cur->CompactPtrGroupBase);
691 InsertBlocks(Cur, Array + I - Count, Count);
692
693 while (Cur != nullptr &&
694 compactPtrGroupBase(CompactPtr: Array[I]) > Cur->CompactPtrGroupBase) {
695 Prev = Cur;
696 Cur = Cur->Next;
697 }
698
699 if (Cur == nullptr ||
700 compactPtrGroupBase(CompactPtr: Array[I]) != Cur->CompactPtrGroupBase) {
701 Cur = CreateGroup(compactPtrGroupBase(CompactPtr: Array[I]));
702 DCHECK_NE(Prev, nullptr);
703 Sci->FreeListInfo.BlockList.insert(Prev, Cur);
704 }
705
706 Count = 1;
707 } else {
708 ++Count;
709 }
710 }
711
712 InsertBlocks(Cur, Array + Size - Count, Count);
713 }
714
715 u16 popBlocksImpl(CacheT *C, uptr ClassId, SizeClassInfo *Sci,
716 CompactPtrT *ToArray, const u16 MaxBlockCount)
717 REQUIRES(Sci->Mutex) {
718 if (Sci->FreeListInfo.BlockList.empty())
719 return 0U;
720
721 SinglyLinkedList<TransferBatchT> &Batches =
722 Sci->FreeListInfo.BlockList.front()->Batches;
723
724 if (Batches.empty()) {
725 DCHECK_EQ(ClassId, SizeClassMap::BatchClassId);
726 BatchGroupT *BG = Sci->FreeListInfo.BlockList.front();
727 Sci->FreeListInfo.BlockList.pop_front();
728
729 // Block used by `BatchGroup` is from BatchClassId. Turn the block into
730 // `TransferBatch` with single block.
731 TransferBatchT *TB = reinterpret_cast<TransferBatchT *>(BG);
732 ToArray[0] =
733 compactPtr(ClassId: SizeClassMap::BatchClassId, Ptr: reinterpret_cast<uptr>(TB));
734 Sci->FreeListInfo.PoppedBlocks += 1;
735 return 1U;
736 }
737
738 // So far, instead of always filling the blocks to `MaxBlockCount`, we only
739 // examine single `TransferBatch` to minimize the time spent on the primary
740 // allocator. Besides, the sizes of `TransferBatch` and
741 // `CacheT::getMaxCached()` may also impact the time spent on accessing the
742 // primary allocator.
743 // TODO(chiahungduan): Evaluate if we want to always prepare `MaxBlockCount`
744 // blocks and/or adjust the size of `TransferBatch` according to
745 // `CacheT::getMaxCached()`.
746 TransferBatchT *B = Batches.front();
747 DCHECK_NE(B, nullptr);
748 DCHECK_GT(B->getCount(), 0U);
749
750 // BachClassId should always take all blocks in the TransferBatch. Read the
751 // comment in `pushBatchClassBlocks()` for more details.
752 const u16 PopCount = ClassId == SizeClassMap::BatchClassId
753 ? B->getCount()
754 : Min(MaxBlockCount, B->getCount());
755 B->moveNToArray(ToArray, PopCount);
756
757 // TODO(chiahungduan): The deallocation of unused BatchClassId blocks can be
758 // done without holding `Mutex`.
759 if (B->empty()) {
760 Batches.pop_front();
761 // `TransferBatch` of BatchClassId is self-contained, no need to
762 // deallocate. Read the comment in `pushBatchClassBlocks()` for more
763 // details.
764 if (ClassId != SizeClassMap::BatchClassId)
765 C->deallocate(SizeClassMap::BatchClassId, B);
766
767 if (Batches.empty()) {
768 BatchGroupT *BG = Sci->FreeListInfo.BlockList.front();
769 Sci->FreeListInfo.BlockList.pop_front();
770
771 // We don't keep BatchGroup with zero blocks to avoid empty-checking
772 // while allocating. Note that block used for constructing BatchGroup is
773 // recorded as free blocks in the last element of BatchGroup::Batches.
774 // Which means, once we pop the last TransferBatch, the block is
775 // implicitly deallocated.
776 if (ClassId != SizeClassMap::BatchClassId)
777 C->deallocate(SizeClassMap::BatchClassId, BG);
778 }
779 }
780
781 Sci->FreeListInfo.PoppedBlocks += PopCount;
782 return PopCount;
783 }
784
785 NOINLINE bool populateFreeList(CacheT *C, uptr ClassId, SizeClassInfo *Sci)
786 REQUIRES(Sci->Mutex) {
787 uptr Region;
788 uptr Offset;
789 // If the size-class currently has a region associated to it, use it. The
790 // newly created blocks will be located after the currently allocated memory
791 // for that region (up to RegionSize). Otherwise, create a new region, where
792 // the new blocks will be carved from the beginning.
793 if (Sci->CurrentRegion) {
794 Region = Sci->CurrentRegion;
795 DCHECK_GT(Sci->CurrentRegionAllocated, 0U);
796 Offset = Sci->CurrentRegionAllocated;
797 } else {
798 DCHECK_EQ(Sci->CurrentRegionAllocated, 0U);
799 Region = allocateRegion(Sci, ClassId);
800 if (UNLIKELY(!Region))
801 return false;
802 C->getStats().add(StatMapped, RegionSize);
803 Sci->CurrentRegion = Region;
804 Offset = 0;
805 }
806
807 const uptr Size = getSizeByClassId(ClassId);
808 const u16 MaxCount = CacheT::getMaxCached(Size);
809 DCHECK_GT(MaxCount, 0U);
810 // The maximum number of blocks we should carve in the region is dictated
811 // by the maximum number of batches we want to fill, and the amount of
812 // memory left in the current region (we use the lowest of the two). This
813 // will not be 0 as we ensure that a region can at least hold one block (via
814 // static_assert and at the end of this function).
815 const u32 NumberOfBlocks =
816 Min(A: MaxNumBatches * MaxCount,
817 B: static_cast<u32>((RegionSize - Offset) / Size));
818 DCHECK_GT(NumberOfBlocks, 0U);
819
820 constexpr u32 ShuffleArraySize =
821 MaxNumBatches * TransferBatchT::MaxNumCached;
822 // Fill the transfer batches and put them in the size-class freelist. We
823 // need to randomize the blocks for security purposes, so we first fill a
824 // local array that we then shuffle before populating the batches.
825 CompactPtrT ShuffleArray[ShuffleArraySize];
826 DCHECK_LE(NumberOfBlocks, ShuffleArraySize);
827
828 uptr P = Region + Offset;
829 for (u32 I = 0; I < NumberOfBlocks; I++, P += Size)
830 ShuffleArray[I] = reinterpret_cast<CompactPtrT>(P);
831
832 if (ClassId != SizeClassMap::BatchClassId) {
833 u32 N = 1;
834 uptr CurGroup = compactPtrGroupBase(CompactPtr: ShuffleArray[0]);
835 for (u32 I = 1; I < NumberOfBlocks; I++) {
836 if (UNLIKELY(compactPtrGroupBase(ShuffleArray[I]) != CurGroup)) {
837 shuffle(ShuffleArray + I - N, N, &Sci->RandState);
838 pushBlocksImpl(C, ClassId, Sci, Array: ShuffleArray + I - N, Size: N,
839 /*SameGroup=*/SameGroup: true);
840 N = 1;
841 CurGroup = compactPtrGroupBase(CompactPtr: ShuffleArray[I]);
842 } else {
843 ++N;
844 }
845 }
846
847 shuffle(ShuffleArray + NumberOfBlocks - N, N, &Sci->RandState);
848 pushBlocksImpl(C, ClassId, Sci, Array: &ShuffleArray[NumberOfBlocks - N], Size: N,
849 /*SameGroup=*/SameGroup: true);
850 } else {
851 pushBatchClassBlocks(Sci, Array: ShuffleArray, Size: NumberOfBlocks);
852 }
853
854 // Note that `PushedBlocks` and `PoppedBlocks` are supposed to only record
855 // the requests from `PushBlocks` and `PopBatch` which are external
856 // interfaces. `populateFreeList` is the internal interface so we should set
857 // the values back to avoid incorrectly setting the stats.
858 Sci->FreeListInfo.PushedBlocks -= NumberOfBlocks;
859
860 const uptr AllocatedUser = Size * NumberOfBlocks;
861 C->getStats().add(StatFree, AllocatedUser);
862 DCHECK_LE(Sci->CurrentRegionAllocated + AllocatedUser, RegionSize);
863 // If there is not enough room in the region currently associated to fit
864 // more blocks, we deassociate the region by resetting CurrentRegion and
865 // CurrentRegionAllocated. Otherwise, update the allocated amount.
866 if (RegionSize - (Sci->CurrentRegionAllocated + AllocatedUser) < Size) {
867 Sci->CurrentRegion = 0;
868 Sci->CurrentRegionAllocated = 0;
869 } else {
870 Sci->CurrentRegionAllocated += AllocatedUser;
871 }
872 Sci->AllocatedUser += AllocatedUser;
873
874 return true;
875 }
876
877 void getStats(ScopedString *Str, uptr ClassId, SizeClassInfo *Sci)
878 REQUIRES(Sci->Mutex) {
879 if (Sci->AllocatedUser == 0)
880 return;
881 const uptr BlockSize = getSizeByClassId(ClassId);
882 const uptr InUse =
883 Sci->FreeListInfo.PoppedBlocks - Sci->FreeListInfo.PushedBlocks;
884 const uptr BytesInFreeList = Sci->AllocatedUser - InUse * BlockSize;
885 uptr PushedBytesDelta = 0;
886 if (BytesInFreeList >= Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint) {
887 PushedBytesDelta =
888 BytesInFreeList - Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint;
889 }
890 const uptr AvailableChunks = Sci->AllocatedUser / BlockSize;
891 Str->append(Format: " %02zu (%6zu): mapped: %6zuK popped: %7zu pushed: %7zu "
892 "inuse: %6zu avail: %6zu releases: %6zu last released: %6zuK "
893 "latest pushed bytes: %6zuK\n",
894 ClassId, getSizeByClassId(ClassId), Sci->AllocatedUser >> 10,
895 Sci->FreeListInfo.PoppedBlocks, Sci->FreeListInfo.PushedBlocks,
896 InUse, AvailableChunks, Sci->ReleaseInfo.RangesReleased,
897 Sci->ReleaseInfo.LastReleasedBytes >> 10,
898 PushedBytesDelta >> 10);
899 }
900
901 void getSizeClassFragmentationInfo(SizeClassInfo *Sci, uptr ClassId,
902 ScopedString *Str) REQUIRES(Sci->Mutex) {
903 const uptr BlockSize = getSizeByClassId(ClassId);
904 const uptr First = Sci->MinRegionIndex;
905 const uptr Last = Sci->MaxRegionIndex;
906 const uptr Base = First * RegionSize;
907 const uptr NumberOfRegions = Last - First + 1U;
908 auto SkipRegion = [this, First, ClassId](uptr RegionIndex) {
909 ScopedLock L(ByteMapMutex);
910 return (PossibleRegions[First + RegionIndex] - 1U) != ClassId;
911 };
912
913 FragmentationRecorder Recorder;
914 if (!Sci->FreeListInfo.BlockList.empty()) {
915 PageReleaseContext Context =
916 markFreeBlocks(Sci, ClassId, BlockSize, Base, NumberOfRegions,
917 ReleaseType: ReleaseToOS::ForceAll);
918 releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
919 }
920
921 const uptr PageSize = getPageSizeCached();
922 const uptr TotalBlocks = Sci->AllocatedUser / BlockSize;
923 const uptr InUseBlocks =
924 Sci->FreeListInfo.PoppedBlocks - Sci->FreeListInfo.PushedBlocks;
925 uptr AllocatedPagesCount = 0;
926 if (TotalBlocks != 0U) {
927 for (uptr I = 0; I < NumberOfRegions; ++I) {
928 if (SkipRegion(I))
929 continue;
930 AllocatedPagesCount += RegionSize / PageSize;
931 }
932
933 DCHECK_NE(AllocatedPagesCount, 0U);
934 }
935
936 DCHECK_GE(AllocatedPagesCount, Recorder.getReleasedPagesCount());
937 const uptr InUsePages =
938 AllocatedPagesCount - Recorder.getReleasedPagesCount();
939 const uptr InUseBytes = InUsePages * PageSize;
940
941 uptr Integral;
942 uptr Fractional;
943 computePercentage(Numerator: BlockSize * InUseBlocks, Denominator: InUsePages * PageSize, Integral: &Integral,
944 Fractional: &Fractional);
945 Str->append(Format: " %02zu (%6zu): inuse/total blocks: %6zu/%6zu inuse/total "
946 "pages: %6zu/%6zu inuse bytes: %6zuK util: %3zu.%02zu%%\n",
947 ClassId, BlockSize, InUseBlocks, TotalBlocks, InUsePages,
948 AllocatedPagesCount, InUseBytes >> 10, Integral, Fractional);
949 }
950
951 NOINLINE uptr releaseToOSMaybe(SizeClassInfo *Sci, uptr ClassId,
952 ReleaseToOS ReleaseType = ReleaseToOS::Normal)
953 REQUIRES(Sci->Mutex) {
954 const uptr BlockSize = getSizeByClassId(ClassId);
955
956 DCHECK_GE(Sci->FreeListInfo.PoppedBlocks, Sci->FreeListInfo.PushedBlocks);
957 const uptr BytesInFreeList =
958 Sci->AllocatedUser -
959 (Sci->FreeListInfo.PoppedBlocks - Sci->FreeListInfo.PushedBlocks) *
960 BlockSize;
961
962 if (UNLIKELY(BytesInFreeList == 0))
963 return 0;
964
965 // ====================================================================== //
966 // 1. Check if we have enough free blocks and if it's worth doing a page
967 // release.
968 // ====================================================================== //
969 if (ReleaseType != ReleaseToOS::ForceAll &&
970 !hasChanceToReleasePages(Sci, BlockSize, BytesInFreeList,
971 ReleaseType)) {
972 return 0;
973 }
974
975 const uptr First = Sci->MinRegionIndex;
976 const uptr Last = Sci->MaxRegionIndex;
977 DCHECK_NE(Last, 0U);
978 DCHECK_LE(First, Last);
979 uptr TotalReleasedBytes = 0;
980 const uptr Base = First * RegionSize;
981 const uptr NumberOfRegions = Last - First + 1U;
982
983 // ==================================================================== //
984 // 2. Mark the free blocks and we can tell which pages are in-use by
985 // querying `PageReleaseContext`.
986 // ==================================================================== //
987 PageReleaseContext Context = markFreeBlocks(Sci, ClassId, BlockSize, Base,
988 NumberOfRegions, ReleaseType);
989 if (!Context.hasBlockMarked())
990 return 0;
991
992 // ==================================================================== //
993 // 3. Release the unused physical pages back to the OS.
994 // ==================================================================== //
995 ReleaseRecorder Recorder(Base);
996 auto SkipRegion = [this, First, ClassId](uptr RegionIndex) {
997 ScopedLock L(ByteMapMutex);
998 return (PossibleRegions[First + RegionIndex] - 1U) != ClassId;
999 };
1000 releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
1001
1002 if (Recorder.getReleasedRangesCount() > 0) {
1003 Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint = BytesInFreeList;
1004 Sci->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
1005 Sci->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
1006 TotalReleasedBytes += Sci->ReleaseInfo.LastReleasedBytes;
1007 }
1008 Sci->ReleaseInfo.LastReleaseAtNs = getMonotonicTimeFast();
1009
1010 return TotalReleasedBytes;
1011 }
1012
1013 bool hasChanceToReleasePages(SizeClassInfo *Sci, uptr BlockSize,
1014 uptr BytesInFreeList, ReleaseToOS ReleaseType)
1015 REQUIRES(Sci->Mutex) {
1016 DCHECK_GE(Sci->FreeListInfo.PoppedBlocks, Sci->FreeListInfo.PushedBlocks);
1017 const uptr PageSize = getPageSizeCached();
1018
1019 if (BytesInFreeList <= Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint)
1020 Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint = BytesInFreeList;
1021
1022 // Always update `BytesInFreeListAtLastCheckpoint` with the smallest value
1023 // so that we won't underestimate the releasable pages. For example, the
1024 // following is the region usage,
1025 //
1026 // BytesInFreeListAtLastCheckpoint AllocatedUser
1027 // v v
1028 // |--------------------------------------->
1029 // ^ ^
1030 // BytesInFreeList ReleaseThreshold
1031 //
1032 // In general, if we have collected enough bytes and the amount of free
1033 // bytes meets the ReleaseThreshold, we will try to do page release. If we
1034 // don't update `BytesInFreeListAtLastCheckpoint` when the current
1035 // `BytesInFreeList` is smaller, we may take longer time to wait for enough
1036 // freed blocks because we miss the bytes between
1037 // (BytesInFreeListAtLastCheckpoint - BytesInFreeList).
1038 const uptr PushedBytesDelta =
1039 BytesInFreeList - Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint;
1040 if (PushedBytesDelta < PageSize)
1041 return false;
1042
1043 // Releasing smaller blocks is expensive, so we want to make sure that a
1044 // significant amount of bytes are free, and that there has been a good
1045 // amount of batches pushed to the freelist before attempting to release.
1046 if (isSmallBlock(BlockSize) && ReleaseType == ReleaseToOS::Normal)
1047 if (PushedBytesDelta < Sci->AllocatedUser / 16U)
1048 return false;
1049
1050 if (ReleaseType == ReleaseToOS::Normal) {
1051 const s32 IntervalMs = atomic_load_relaxed(A: &ReleaseToOsIntervalMs);
1052 if (IntervalMs < 0)
1053 return false;
1054
1055 // The constant 8 here is selected from profiling some apps and the number
1056 // of unreleased pages in the large size classes is around 16 pages or
1057 // more. Choose half of it as a heuristic and which also avoids page
1058 // release every time for every pushBlocks() attempt by large blocks.
1059 const bool ByPassReleaseInterval =
1060 isLargeBlock(BlockSize) && PushedBytesDelta > 8 * PageSize;
1061 if (!ByPassReleaseInterval) {
1062 if (Sci->ReleaseInfo.LastReleaseAtNs +
1063 static_cast<u64>(IntervalMs) * 1000000 >
1064 getMonotonicTimeFast()) {
1065 // Memory was returned recently.
1066 return false;
1067 }
1068 }
1069 } // if (ReleaseType == ReleaseToOS::Normal)
1070
1071 return true;
1072 }
1073
1074 PageReleaseContext markFreeBlocks(SizeClassInfo *Sci, const uptr ClassId,
1075 const uptr BlockSize, const uptr Base,
1076 const uptr NumberOfRegions,
1077 ReleaseToOS ReleaseType)
1078 REQUIRES(Sci->Mutex) {
1079 const uptr PageSize = getPageSizeCached();
1080 const uptr GroupSize = (1UL << GroupSizeLog);
1081 const uptr CurGroupBase =
1082 compactPtrGroupBase(CompactPtr: compactPtr(ClassId, Ptr: Sci->CurrentRegion));
1083
1084 PageReleaseContext Context(BlockSize, NumberOfRegions,
1085 /*ReleaseSize=*/RegionSize);
1086
1087 auto DecompactPtr = [](CompactPtrT CompactPtr) {
1088 return reinterpret_cast<uptr>(CompactPtr);
1089 };
1090 for (BatchGroupT &BG : Sci->FreeListInfo.BlockList) {
1091 const uptr GroupBase = decompactGroupBase(CompactPtrGroupBase: BG.CompactPtrGroupBase);
1092 // The `GroupSize` may not be divided by `BlockSize`, which means there is
1093 // an unused space at the end of Region. Exclude that space to avoid
1094 // unused page map entry.
1095 uptr AllocatedGroupSize = GroupBase == CurGroupBase
1096 ? Sci->CurrentRegionAllocated
1097 : roundDownSlow(X: GroupSize, Boundary: BlockSize);
1098 if (AllocatedGroupSize == 0)
1099 continue;
1100
1101 // TransferBatches are pushed in front of BG.Batches. The first one may
1102 // not have all caches used.
1103 const uptr NumBlocks = (BG.Batches.size() - 1) * BG.MaxCachedPerBatch +
1104 BG.Batches.front()->getCount();
1105 const uptr BytesInBG = NumBlocks * BlockSize;
1106
1107 if (ReleaseType != ReleaseToOS::ForceAll) {
1108 if (BytesInBG <= BG.BytesInBGAtLastCheckpoint) {
1109 BG.BytesInBGAtLastCheckpoint = BytesInBG;
1110 continue;
1111 }
1112
1113 const uptr PushedBytesDelta = BytesInBG - BG.BytesInBGAtLastCheckpoint;
1114 if (PushedBytesDelta < PageSize)
1115 continue;
1116
1117 // Given the randomness property, we try to release the pages only if
1118 // the bytes used by free blocks exceed certain proportion of allocated
1119 // spaces.
1120 if (isSmallBlock(BlockSize) && (BytesInBG * 100U) / AllocatedGroupSize <
1121 (100U - 1U - BlockSize / 16U)) {
1122 continue;
1123 }
1124 }
1125
1126 // TODO: Consider updating this after page release if `ReleaseRecorder`
1127 // can tell the released bytes in each group.
1128 BG.BytesInBGAtLastCheckpoint = BytesInBG;
1129
1130 const uptr MaxContainedBlocks = AllocatedGroupSize / BlockSize;
1131 const uptr RegionIndex = (GroupBase - Base) / RegionSize;
1132
1133 if (NumBlocks == MaxContainedBlocks) {
1134 for (const auto &It : BG.Batches)
1135 for (u16 I = 0; I < It.getCount(); ++I)
1136 DCHECK_EQ(compactPtrGroupBase(It.get(I)), BG.CompactPtrGroupBase);
1137
1138 const uptr To = GroupBase + AllocatedGroupSize;
1139 Context.markRangeAsAllCounted(From: GroupBase, To, Base: GroupBase, RegionIndex,
1140 RegionSize: AllocatedGroupSize);
1141 } else {
1142 DCHECK_LT(NumBlocks, MaxContainedBlocks);
1143
1144 // Note that we don't always visit blocks in each BatchGroup so that we
1145 // may miss the chance of releasing certain pages that cross
1146 // BatchGroups.
1147 Context.markFreeBlocksInRegion(BG.Batches, DecompactPtr, GroupBase,
1148 RegionIndex, AllocatedGroupSize,
1149 /*MayContainLastBlockInRegion=*/true);
1150 }
1151
1152 // We may not be able to do the page release In a rare case that we may
1153 // fail on PageMap allocation.
1154 if (UNLIKELY(!Context.hasBlockMarked()))
1155 break;
1156 }
1157
1158 return Context;
1159 }
1160
1161 SizeClassInfo SizeClassInfoArray[NumClasses] = {};
1162
1163 HybridMutex ByteMapMutex;
1164 // Track the regions in use, 0 is unused, otherwise store ClassId + 1.
1165 ByteMap PossibleRegions GUARDED_BY(ByteMapMutex) = {};
1166 atomic_s32 ReleaseToOsIntervalMs = {};
1167 // Unless several threads request regions simultaneously from different size
1168 // classes, the stash rarely contains more than 1 entry.
1169 static constexpr uptr MaxStashedRegions = 4;
1170 HybridMutex RegionsStashMutex;
1171 uptr NumberOfStashedRegions GUARDED_BY(RegionsStashMutex) = 0;
1172 uptr RegionsStash[MaxStashedRegions] GUARDED_BY(RegionsStashMutex) = {};
1173};
1174
1175} // namespace scudo
1176
1177#endif // SCUDO_PRIMARY32_H_
1178