1//===-- size_class_allocator.h ----------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#ifndef SCUDO_SIZE_CLASS_ALLOCATOR_H_
10#define SCUDO_SIZE_CLASS_ALLOCATOR_H_
11
12#include "internal_defs.h"
13#include "list.h"
14#include "platform.h"
15#include "report.h"
16#include "stats.h"
17#include "string_utils.h"
18
19namespace scudo {
20
21template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
22 typedef typename SizeClassAllocator::SizeClassMap SizeClassMap;
23 typedef typename SizeClassAllocator::CompactPtrT CompactPtrT;
24
25 void init(GlobalStats *S, SizeClassAllocator *A) {
26 DCHECK(isEmpty());
27 Stats.init();
28 if (LIKELY(S))
29 S->link(S: &Stats);
30 Allocator = A;
31 initAllocator();
32 }
33
34 void destroy(GlobalStats *S) {
35 drain();
36 if (LIKELY(S))
37 S->unlink(S: &Stats);
38 }
39
40 void *allocate(uptr ClassId) {
41 DCHECK_LT(ClassId, NumClasses);
42 PerClass *C = &PerClassArray[ClassId];
43 if (C->Count == 0) {
44 // Refill half of the number of max cached.
45 DCHECK_GT(C->MaxCount / 2, 0U);
46 if (UNLIKELY(!refill(C, ClassId, C->MaxCount / 2)))
47 return nullptr;
48 DCHECK_GT(C->Count, 0);
49 }
50 // We read ClassSize first before accessing Chunks because it's adjacent to
51 // Count, while Chunks might be further off (depending on Count). That keeps
52 // the memory accesses in close quarters.
53 const uptr ClassSize = C->ClassSize;
54 CompactPtrT CompactP = C->Chunks[--C->Count];
55 Stats.add(I: StatAllocated, V: ClassSize);
56 Stats.sub(I: StatFree, V: ClassSize);
57 return Allocator->decompactPtr(ClassId, CompactP);
58 }
59
60 bool deallocate(uptr ClassId, void *P) {
61 CHECK_LT(ClassId, NumClasses);
62 PerClass *C = &PerClassArray[ClassId];
63
64 // If the cache is full, drain half of blocks back to the main allocator.
65 const bool NeedToDrainCache = C->Count == C->MaxCount;
66 if (NeedToDrainCache)
67 drain(C, ClassId);
68 // See comment in allocate() about memory accesses.
69 const uptr ClassSize = C->ClassSize;
70 C->Chunks[C->Count++] =
71 Allocator->compactPtr(ClassId, reinterpret_cast<uptr>(P));
72 Stats.sub(I: StatAllocated, V: ClassSize);
73 Stats.add(I: StatFree, V: ClassSize);
74
75 return NeedToDrainCache;
76 }
77
78 bool isEmpty() const {
79 for (uptr I = 0; I < NumClasses; ++I)
80 if (PerClassArray[I].Count)
81 return false;
82 return true;
83 }
84
85 void drain() {
86 // Drain BatchClassId last as it may be needed while draining normal blocks.
87 for (uptr I = 0; I < NumClasses; ++I) {
88 if (I == BatchClassId)
89 continue;
90 while (PerClassArray[I].Count > 0)
91 drain(&PerClassArray[I], I);
92 }
93 while (PerClassArray[BatchClassId].Count > 0)
94 drain(&PerClassArray[BatchClassId], BatchClassId);
95 DCHECK(isEmpty());
96 }
97
98 void *getBatchClassBlock() {
99 void *B = allocate(ClassId: BatchClassId);
100 if (UNLIKELY(!B))
101 reportOutOfMemory(SizeClassAllocator::getSizeByClassId(BatchClassId));
102 return B;
103 }
104
105 LocalStats &getStats() { return Stats; }
106
107 void getStats(ScopedString *Str) {
108 bool EmptyCache = true;
109 for (uptr I = 0; I < NumClasses; ++I) {
110 if (PerClassArray[I].Count == 0)
111 continue;
112
113 EmptyCache = false;
114 // The size of BatchClass is set to 0 intentionally. See the comment in
115 // initAllocator() for more details.
116 const uptr ClassSize = I == BatchClassId
117 ? SizeClassAllocator::getSizeByClassId(I)
118 : PerClassArray[I].ClassSize;
119 // Note that the string utils don't support printing u16 thus we cast it
120 // to a common use type uptr.
121 Str->append(Format: " %02zu (%6zu): cached: %4zu max: %4zu\n", I, ClassSize,
122 static_cast<uptr>(PerClassArray[I].Count),
123 static_cast<uptr>(PerClassArray[I].MaxCount));
124 }
125
126 if (EmptyCache)
127 Str->append(Format: " No block is cached.\n");
128 }
129
130 static u16 getMaxCached(uptr Size) {
131 return Min(SizeClassMap::MaxNumCachedHint,
132 SizeClassMap::getMaxCachedHint(Size));
133 }
134
135private:
136 static const uptr NumClasses = SizeClassMap::NumClasses;
137 static const uptr BatchClassId = SizeClassMap::BatchClassId;
138 struct alignas(SCUDO_CACHE_LINE_SIZE) PerClass {
139 u16 Count;
140 u16 MaxCount;
141 // Note: ClassSize is zero for the transfer batch.
142 uptr ClassSize;
143 CompactPtrT Chunks[2 * SizeClassMap::MaxNumCachedHint];
144 };
145 PerClass PerClassArray[NumClasses] = {};
146 LocalStats Stats;
147 SizeClassAllocator *Allocator = nullptr;
148
149 NOINLINE void initAllocator() {
150 for (uptr I = 0; I < NumClasses; I++) {
151 PerClass *P = &PerClassArray[I];
152 const uptr Size = SizeClassAllocator::getSizeByClassId(I);
153 P->MaxCount = static_cast<u16>(2 * getMaxCached(Size));
154 if (I != BatchClassId) {
155 P->ClassSize = Size;
156 } else {
157 // ClassSize in this struct is only used for malloc/free stats, which
158 // should only track user allocations, not internal movements.
159 P->ClassSize = 0;
160 }
161 }
162 }
163
164 NOINLINE bool refill(PerClass *C, uptr ClassId, u16 MaxRefill) {
165 const u16 NumBlocksRefilled =
166 Allocator->popBlocks(this, ClassId, C->Chunks, MaxRefill);
167 DCHECK_LE(NumBlocksRefilled, MaxRefill);
168 C->Count = static_cast<u16>(C->Count + NumBlocksRefilled);
169 return NumBlocksRefilled != 0;
170 }
171
172 NOINLINE void drain(PerClass *C, uptr ClassId) {
173 const u16 Count = Min(static_cast<u16>(C->MaxCount / 2), C->Count);
174 Allocator->pushBlocks(this, ClassId, &C->Chunks[0], Count);
175 // u16 will be promoted to int by arithmetic type conversion.
176 C->Count = static_cast<u16>(C->Count - Count);
177 for (u16 I = 0; I < C->Count; I++)
178 C->Chunks[I] = C->Chunks[I + Count];
179 }
180};
181
182template <class SizeClassAllocator> struct SizeClassAllocatorNoCache {
183 typedef typename SizeClassAllocator::SizeClassMap SizeClassMap;
184 typedef typename SizeClassAllocator::CompactPtrT CompactPtrT;
185
186 void init(GlobalStats *S, SizeClassAllocator *A) {
187 Stats.init();
188 if (LIKELY(S))
189 S->link(S: &Stats);
190 Allocator = A;
191 initAllocator();
192 }
193
194 void destroy(GlobalStats *S) {
195 if (LIKELY(S))
196 S->unlink(S: &Stats);
197 }
198
199 void *allocate(uptr ClassId) {
200 CompactPtrT CompactPtr;
201 uptr NumBlocksPopped = Allocator->popBlocks(this, ClassId, &CompactPtr, 1U);
202 if (NumBlocksPopped == 0)
203 return nullptr;
204 DCHECK_EQ(NumBlocksPopped, 1U);
205 const PerClass *C = &PerClassArray[ClassId];
206 Stats.add(I: StatAllocated, V: C->ClassSize);
207 Stats.sub(I: StatFree, V: C->ClassSize);
208 return Allocator->decompactPtr(ClassId, CompactPtr);
209 }
210
211 bool deallocate(uptr ClassId, void *P) {
212 CHECK_LT(ClassId, NumClasses);
213
214 if (ClassId == BatchClassId)
215 return deallocateBatchClassBlock(P);
216
217 CompactPtrT CompactPtr =
218 Allocator->compactPtr(ClassId, reinterpret_cast<uptr>(P));
219 Allocator->pushBlocks(this, ClassId, &CompactPtr, 1U);
220 PerClass *C = &PerClassArray[ClassId];
221 Stats.sub(I: StatAllocated, V: C->ClassSize);
222 Stats.add(I: StatFree, V: C->ClassSize);
223
224 // The following adopts the same strategy of allocator draining as used
225 // in SizeClassAllocatorLocalCache so that use the same hint when doing
226 // a page release.
227 ++C->Count;
228 const bool SuggestDraining = C->Count >= C->MaxCount;
229 if (SuggestDraining)
230 C->Count = 0;
231 return SuggestDraining;
232 }
233
234 void *getBatchClassBlock() {
235 PerClass *C = &PerClassArray[BatchClassId];
236 if (C->Count == 0) {
237 const u16 NumBlocksRefilled = Allocator->popBlocks(
238 this, BatchClassId, BatchClassStorage, C->MaxCount);
239 if (NumBlocksRefilled == 0)
240 reportOutOfMemory(SizeClassAllocator::getSizeByClassId(BatchClassId));
241 DCHECK_LE(NumBlocksRefilled, SizeClassMap::MaxNumCachedHint);
242 C->Count = NumBlocksRefilled;
243 }
244
245 const uptr ClassSize = C->ClassSize;
246 CompactPtrT CompactP = BatchClassStorage[--C->Count];
247 Stats.add(I: StatAllocated, V: ClassSize);
248 Stats.sub(I: StatFree, V: ClassSize);
249
250 return Allocator->decompactPtr(BatchClassId, CompactP);
251 }
252
253 LocalStats &getStats() { return Stats; }
254
255 void getStats(ScopedString *Str) { Str->append(Format: " No block is cached.\n"); }
256
257 bool isEmpty() const {
258 const PerClass *C = &PerClassArray[BatchClassId];
259 return C->Count == 0;
260 }
261 void drain() {
262 PerClass *C = &PerClassArray[BatchClassId];
263 if (C->Count > 0) {
264 Allocator->pushBlocks(this, BatchClassId, BatchClassStorage, C->Count);
265 C->Count = 0;
266 }
267 }
268
269 static u16 getMaxCached(uptr Size) {
270 return Min(SizeClassMap::MaxNumCachedHint,
271 SizeClassMap::getMaxCachedHint(Size));
272 }
273
274private:
275 static const uptr NumClasses = SizeClassMap::NumClasses;
276 static const uptr BatchClassId = SizeClassMap::BatchClassId;
277 struct alignas(SCUDO_CACHE_LINE_SIZE) PerClass {
278 u16 Count = 0;
279 u16 MaxCount;
280 // Note: ClassSize is zero for the transfer batch.
281 uptr ClassSize;
282 };
283 PerClass PerClassArray[NumClasses] = {};
284 // Popping BatchClass blocks requires taking a certain amount of blocks at
285 // once. This restriction comes from how we manage the storing of BatchClass
286 // in the primary allocator. See more details in `popBlocksImpl` in the
287 // primary allocator.
288 CompactPtrT BatchClassStorage[SizeClassMap::MaxNumCachedHint] = {};
289 LocalStats Stats;
290 SizeClassAllocator *Allocator = nullptr;
291
292 bool deallocateBatchClassBlock(void *P) {
293 PerClass *C = &PerClassArray[BatchClassId];
294 // Drain all the blocks.
295 if (C->Count >= C->MaxCount) {
296 Allocator->pushBlocks(this, BatchClassId, BatchClassStorage, C->Count);
297 C->Count = 0;
298 }
299 BatchClassStorage[C->Count++] =
300 Allocator->compactPtr(BatchClassId, reinterpret_cast<uptr>(P));
301
302 // Currently, BatchClass doesn't support page releasing, so we always return
303 // false.
304 return false;
305 }
306
307 NOINLINE void initAllocator() {
308 for (uptr I = 0; I < NumClasses; I++) {
309 PerClass *P = &PerClassArray[I];
310 const uptr Size = SizeClassAllocator::getSizeByClassId(I);
311 if (I != BatchClassId) {
312 P->ClassSize = Size;
313 P->MaxCount = static_cast<u16>(2 * getMaxCached(Size));
314 } else {
315 // ClassSize in this struct is only used for malloc/free stats, which
316 // should only track user allocations, not internal movements.
317 P->ClassSize = 0;
318 P->MaxCount = SizeClassMap::MaxNumCachedHint;
319 }
320 }
321 }
322};
323
324} // namespace scudo
325
326#endif // SCUDO_SIZE_CLASS_ALLOCATOR_H_
327