| 1 | //===-- primary64.h ---------------------------------------------*- C++ -*-===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | |
| 9 | #ifndef SCUDO_PRIMARY64_H_ |
| 10 | #define SCUDO_PRIMARY64_H_ |
| 11 | |
| 12 | #include "allocator_common.h" |
| 13 | #include "bytemap.h" |
| 14 | #include "common.h" |
| 15 | #include "condition_variable.h" |
| 16 | #include "list.h" |
| 17 | #include "mem_map.h" |
| 18 | #include "memtag.h" |
| 19 | #include "options.h" |
| 20 | #include "release.h" |
| 21 | #include "size_class_allocator.h" |
| 22 | #include "stats.h" |
| 23 | #include "string_utils.h" |
| 24 | #include "thread_annotations.h" |
| 25 | |
| 26 | namespace scudo { |
| 27 | |
| 28 | // SizeClassAllocator64 is an allocator tuned for 64-bit address space. |
| 29 | // |
| 30 | // It starts by reserving NumClasses * 2^RegionSizeLog bytes, equally divided in |
| 31 | // Regions, specific to each size class. Note that the base of that mapping is |
| 32 | // random (based to the platform specific map() capabilities). If |
| 33 | // PrimaryEnableRandomOffset is set, each Region actually starts at a random |
| 34 | // offset from its base. |
| 35 | // |
| 36 | // Regions are mapped incrementally on demand to fulfill allocation requests, |
| 37 | // those mappings being split into equally sized Blocks based on the size class |
| 38 | // they belong to. The Blocks created are shuffled to prevent predictable |
| 39 | // address patterns (the predictability increases with the size of the Blocks). |
| 40 | // |
| 41 | // The 1st Region (for size class 0) holds the TransferBatches. This is a |
| 42 | // structure used to transfer arrays of available pointers from the class size |
| 43 | // freelist to the thread specific freelist, and back. |
| 44 | // |
| 45 | // The memory used by this allocator is never unmapped, but can be partially |
| 46 | // released if the platform allows for it. |
| 47 | |
| 48 | template <typename Config> class SizeClassAllocator64 { |
| 49 | public: |
| 50 | typedef typename Config::CompactPtrT CompactPtrT; |
| 51 | typedef typename Config::SizeClassMap SizeClassMap; |
| 52 | typedef typename Config::ConditionVariableT ConditionVariableT; |
| 53 | static const uptr CompactPtrScale = Config::getCompactPtrScale(); |
| 54 | static const uptr RegionSizeLog = Config::getRegionSizeLog(); |
| 55 | static const uptr GroupSizeLog = Config::getGroupSizeLog(); |
| 56 | static_assert(RegionSizeLog >= GroupSizeLog, |
| 57 | "Group size shouldn't be greater than the region size" ); |
| 58 | static const uptr GroupScale = GroupSizeLog - CompactPtrScale; |
| 59 | typedef SizeClassAllocator64<Config> ThisT; |
| 60 | typedef TransferBatch<ThisT> TransferBatchT; |
| 61 | typedef BatchGroup<ThisT> BatchGroupT; |
| 62 | using SizeClassAllocatorT = |
| 63 | typename Conditional<Config::getEnableBlockCache(), |
| 64 | SizeClassAllocatorLocalCache<ThisT>, |
| 65 | SizeClassAllocatorNoCache<ThisT>>::type; |
| 66 | |
| 67 | // BachClass is used to store internal metadata so it needs to be at least as |
| 68 | // large as the largest data structure. |
| 69 | static uptr getSizeByClassId(uptr ClassId) { |
| 70 | return (ClassId == SizeClassMap::BatchClassId) |
| 71 | ? roundUp(X: Max(A: sizeof(TransferBatchT), B: sizeof(BatchGroupT)), |
| 72 | Boundary: 1U << CompactPtrScale) |
| 73 | : SizeClassMap::getSizeByClassId(ClassId); |
| 74 | } |
| 75 | |
| 76 | static bool canAllocate(uptr Size) { return Size <= SizeClassMap::MaxSize; } |
| 77 | |
| 78 | static bool conditionVariableEnabled() { |
| 79 | return Config::hasConditionVariableT(); |
| 80 | } |
| 81 | |
| 82 | void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS { |
| 83 | DCHECK(isAligned(reinterpret_cast<uptr>(this), alignof(ThisT))); |
| 84 | |
| 85 | const uptr PageSize = getPageSizeCached(); |
| 86 | const uptr GroupSize = (1UL << GroupSizeLog); |
| 87 | const uptr PagesInGroup = GroupSize / PageSize; |
| 88 | const uptr MinSizeClass = getSizeByClassId(ClassId: 1); |
| 89 | // When trying to release pages back to memory, visiting smaller size |
| 90 | // classes is expensive. Therefore, we only try to release smaller size |
| 91 | // classes when the amount of free blocks goes over a certain threshold (See |
| 92 | // the comment in releaseToOSMaybe() for more details). For example, for |
| 93 | // size class 32, we only do the release when the size of free blocks is |
| 94 | // greater than 97% of pages in a group. However, this may introduce another |
| 95 | // issue that if the number of free blocks is bouncing between 97% ~ 100%. |
| 96 | // Which means we may try many page releases but only release very few of |
| 97 | // them (less than 3% in a group). Even though we have |
| 98 | // `&ReleaseToOsIntervalMs` which slightly reduce the frequency of these |
| 99 | // calls but it will be better to have another guard to mitigate this issue. |
| 100 | // |
| 101 | // Here we add another constraint on the minimum size requirement. The |
| 102 | // constraint is determined by the size of in-use blocks in the minimal size |
| 103 | // class. Take size class 32 as an example, |
| 104 | // |
| 105 | // +- one memory group -+ |
| 106 | // +----------------------+------+ |
| 107 | // | 97% of free blocks | | |
| 108 | // +----------------------+------+ |
| 109 | // \ / |
| 110 | // 3% in-use blocks |
| 111 | // |
| 112 | // * The release size threshold is 97%. |
| 113 | // |
| 114 | // The 3% size in a group is about 7 pages. For two consecutive |
| 115 | // releaseToOSMaybe(), we require the difference between `PushedBlocks` |
| 116 | // should be greater than 7 pages. This mitigates the page releasing |
| 117 | // thrashing which is caused by memory usage bouncing around the threshold. |
| 118 | // The smallest size class takes longest time to do the page release so we |
| 119 | // use its size of in-use blocks as a heuristic. |
| 120 | SmallerBlockReleasePageDelta = |
| 121 | PagesInGroup * (1 + MinSizeClass / 16U) / 100; |
| 122 | |
| 123 | u32 Seed; |
| 124 | const u64 Time = getMonotonicTimeFast(); |
| 125 | if (!getRandom(Buffer: reinterpret_cast<void *>(&Seed), Length: sizeof(Seed))) |
| 126 | Seed = static_cast<u32>(Time ^ (reinterpret_cast<uptr>(&Seed) >> 12)); |
| 127 | |
| 128 | for (uptr I = 0; I < NumClasses; I++) |
| 129 | getRegionInfo(ClassId: I)->RandState = getRandomU32(State: &Seed); |
| 130 | |
| 131 | if (Config::getEnableContiguousRegions()) { |
| 132 | ReservedMemoryT ReservedMemory = {}; |
| 133 | // Reserve the space required for the Primary. |
| 134 | CHECK(ReservedMemory.create(/*Addr=*/0U, RegionSize * NumClasses, |
| 135 | "scudo:primary_reserve" )); |
| 136 | const uptr PrimaryBase = ReservedMemory.getBase(); |
| 137 | |
| 138 | for (uptr I = 0; I < NumClasses; I++) { |
| 139 | MemMapT RegionMemMap = ReservedMemory.dispatch( |
| 140 | Addr: PrimaryBase + (I << RegionSizeLog), Size: RegionSize); |
| 141 | RegionInfo *Region = getRegionInfo(ClassId: I); |
| 142 | |
| 143 | initRegion(Region, ClassId: I, MemMap: RegionMemMap, EnableRandomOffset: Config::getEnableRandomOffset()); |
| 144 | } |
| 145 | shuffle(RegionInfoArray, NumClasses, &Seed); |
| 146 | } |
| 147 | |
| 148 | // The binding should be done after region shuffling so that it won't bind |
| 149 | // the FLLock from the wrong region. |
| 150 | for (uptr I = 0; I < NumClasses; I++) |
| 151 | getRegionInfo(ClassId: I)->FLLockCV.bindTestOnly(getRegionInfo(ClassId: I)->FLLock); |
| 152 | |
| 153 | // The default value in the primary config has the higher priority. |
| 154 | if (Config::getDefaultReleaseToOsIntervalMs() != INT32_MIN) |
| 155 | ReleaseToOsInterval = Config::getDefaultReleaseToOsIntervalMs(); |
| 156 | setOption(O: Option::ReleaseInterval, Value: static_cast<sptr>(ReleaseToOsInterval)); |
| 157 | } |
| 158 | |
| 159 | void unmapTestOnly() { |
| 160 | for (uptr I = 0; I < NumClasses; I++) { |
| 161 | RegionInfo *Region = getRegionInfo(ClassId: I); |
| 162 | { |
| 163 | ScopedLock ML(Region->MMLock); |
| 164 | MemMapT MemMap = Region->MemMapInfo.MemMap; |
| 165 | if (MemMap.isAllocated()) |
| 166 | MemMap.unmap(); |
| 167 | } |
| 168 | *Region = {}; |
| 169 | } |
| 170 | } |
| 171 | |
| 172 | // When all blocks are freed, it has to be the same size as `AllocatedUser`. |
| 173 | void verifyAllBlocksAreReleasedTestOnly() { |
| 174 | // `BatchGroup` and `TransferBatch` also use the blocks from BatchClass. |
| 175 | uptr BatchClassUsedInFreeLists = 0; |
| 176 | for (uptr I = 0; I < NumClasses; I++) { |
| 177 | // We have to count BatchClassUsedInFreeLists in other regions first. |
| 178 | if (I == SizeClassMap::BatchClassId) |
| 179 | continue; |
| 180 | RegionInfo *Region = getRegionInfo(ClassId: I); |
| 181 | ScopedLock ML(Region->MMLock); |
| 182 | ScopedLock FL(Region->FLLock); |
| 183 | const uptr BlockSize = getSizeByClassId(ClassId: I); |
| 184 | uptr TotalBlocks = 0; |
| 185 | for (BatchGroupT &BG : Region->FreeListInfo.BlockList) { |
| 186 | // `BG::Batches` are `TransferBatches`. +1 for `BatchGroup`. |
| 187 | BatchClassUsedInFreeLists += BG.Batches.size() + 1; |
| 188 | for (const auto &It : BG.Batches) |
| 189 | TotalBlocks += It.getCount(); |
| 190 | } |
| 191 | |
| 192 | DCHECK_EQ(TotalBlocks, Region->MemMapInfo.AllocatedUser / BlockSize); |
| 193 | DCHECK_EQ(Region->FreeListInfo.PushedBlocks, |
| 194 | Region->FreeListInfo.PoppedBlocks); |
| 195 | } |
| 196 | |
| 197 | RegionInfo *Region = getRegionInfo(ClassId: SizeClassMap::BatchClassId); |
| 198 | ScopedLock ML(Region->MMLock); |
| 199 | ScopedLock FL(Region->FLLock); |
| 200 | const uptr BlockSize = getSizeByClassId(ClassId: SizeClassMap::BatchClassId); |
| 201 | uptr TotalBlocks = 0; |
| 202 | for (BatchGroupT &BG : Region->FreeListInfo.BlockList) { |
| 203 | if (LIKELY(!BG.Batches.empty())) { |
| 204 | for (const auto &It : BG.Batches) |
| 205 | TotalBlocks += It.getCount(); |
| 206 | } else { |
| 207 | // `BatchGroup` with empty freelist doesn't have `TransferBatch` record |
| 208 | // itself. |
| 209 | ++TotalBlocks; |
| 210 | } |
| 211 | } |
| 212 | DCHECK_EQ(TotalBlocks + BatchClassUsedInFreeLists, |
| 213 | Region->MemMapInfo.AllocatedUser / BlockSize); |
| 214 | DCHECK_GE(Region->FreeListInfo.PoppedBlocks, |
| 215 | Region->FreeListInfo.PushedBlocks); |
| 216 | const uptr BlocksInUse = |
| 217 | Region->FreeListInfo.PoppedBlocks - Region->FreeListInfo.PushedBlocks; |
| 218 | DCHECK_EQ(BlocksInUse, BatchClassUsedInFreeLists); |
| 219 | } |
| 220 | |
| 221 | u16 popBlocks(SizeClassAllocatorT *SizeClassAllocator, uptr ClassId, |
| 222 | CompactPtrT *ToArray, const u16 MaxBlockCount) { |
| 223 | DCHECK_LT(ClassId, NumClasses); |
| 224 | RegionInfo *Region = getRegionInfo(ClassId); |
| 225 | u16 PopCount = 0; |
| 226 | |
| 227 | { |
| 228 | ScopedLock L(Region->FLLock); |
| 229 | PopCount = popBlocksImpl(SizeClassAllocator, ClassId, Region, ToArray, |
| 230 | MaxBlockCount); |
| 231 | if (PopCount != 0U) |
| 232 | return PopCount; |
| 233 | } |
| 234 | |
| 235 | bool ReportRegionExhausted = false; |
| 236 | |
| 237 | if (conditionVariableEnabled()) { |
| 238 | PopCount = popBlocksWithCV(SizeClassAllocator, ClassId, Region, ToArray, |
| 239 | MaxBlockCount, ReportRegionExhausted); |
| 240 | } else { |
| 241 | while (true) { |
| 242 | // When two threads compete for `Region->MMLock`, we only want one of |
| 243 | // them to call populateFreeListAndPopBlocks(). To avoid both of them |
| 244 | // doing that, always check the freelist before mapping new pages. |
| 245 | ScopedLock ML(Region->MMLock); |
| 246 | { |
| 247 | ScopedLock FL(Region->FLLock); |
| 248 | PopCount = popBlocksImpl(SizeClassAllocator, ClassId, Region, ToArray, |
| 249 | MaxBlockCount); |
| 250 | if (PopCount != 0U) |
| 251 | return PopCount; |
| 252 | } |
| 253 | |
| 254 | const bool RegionIsExhausted = Region->Exhausted; |
| 255 | if (!RegionIsExhausted) { |
| 256 | PopCount = populateFreeListAndPopBlocks( |
| 257 | SizeClassAllocator, ClassId, Region, ToArray, MaxBlockCount); |
| 258 | } |
| 259 | ReportRegionExhausted = !RegionIsExhausted && Region->Exhausted; |
| 260 | break; |
| 261 | } |
| 262 | } |
| 263 | |
| 264 | if (UNLIKELY(ReportRegionExhausted)) { |
| 265 | Printf(Format: "Can't populate more pages for size class %zu.\n" , |
| 266 | getSizeByClassId(ClassId)); |
| 267 | |
| 268 | // Theoretically, BatchClass shouldn't be used up. Abort immediately when |
| 269 | // it happens. |
| 270 | if (ClassId == SizeClassMap::BatchClassId) |
| 271 | reportOutOfBatchClass(); |
| 272 | } |
| 273 | |
| 274 | return PopCount; |
| 275 | } |
| 276 | |
| 277 | // Push the array of free blocks to the designated batch group. |
| 278 | void pushBlocks(SizeClassAllocatorT *SizeClassAllocator, uptr ClassId, |
| 279 | CompactPtrT *Array, u32 Size) { |
| 280 | DCHECK_LT(ClassId, NumClasses); |
| 281 | DCHECK_GT(Size, 0); |
| 282 | |
| 283 | RegionInfo *Region = getRegionInfo(ClassId); |
| 284 | if (ClassId == SizeClassMap::BatchClassId) { |
| 285 | ScopedLock L(Region->FLLock); |
| 286 | pushBatchClassBlocks(Region, Array, Size); |
| 287 | if (conditionVariableEnabled()) |
| 288 | Region->FLLockCV.notifyAll(Region->FLLock); |
| 289 | return; |
| 290 | } |
| 291 | |
| 292 | // TODO(chiahungduan): Consider not doing grouping if the group size is not |
| 293 | // greater than the block size with a certain scale. |
| 294 | |
| 295 | bool SameGroup = true; |
| 296 | if (GroupSizeLog < RegionSizeLog) { |
| 297 | // Sort the blocks so that blocks belonging to the same group can be |
| 298 | // pushed together. |
| 299 | for (u32 I = 1; I < Size; ++I) { |
| 300 | if (compactPtrGroup(CompactPtr: Array[I - 1]) != compactPtrGroup(CompactPtr: Array[I])) |
| 301 | SameGroup = false; |
| 302 | CompactPtrT Cur = Array[I]; |
| 303 | u32 J = I; |
| 304 | while (J > 0 && compactPtrGroup(CompactPtr: Cur) < compactPtrGroup(CompactPtr: Array[J - 1])) { |
| 305 | Array[J] = Array[J - 1]; |
| 306 | --J; |
| 307 | } |
| 308 | Array[J] = Cur; |
| 309 | } |
| 310 | } |
| 311 | |
| 312 | { |
| 313 | ScopedLock L(Region->FLLock); |
| 314 | pushBlocksImpl(SizeClassAllocator, ClassId, Region, Array, Size, |
| 315 | SameGroup); |
| 316 | if (conditionVariableEnabled()) |
| 317 | Region->FLLockCV.notifyAll(Region->FLLock); |
| 318 | } |
| 319 | } |
| 320 | |
| 321 | void disable() NO_THREAD_SAFETY_ANALYSIS { |
| 322 | // The BatchClassId must be locked last since other classes can use it. |
| 323 | for (sptr I = static_cast<sptr>(NumClasses) - 1; I >= 0; I--) { |
| 324 | if (static_cast<uptr>(I) == SizeClassMap::BatchClassId) |
| 325 | continue; |
| 326 | getRegionInfo(ClassId: static_cast<uptr>(I))->MMLock.lock(); |
| 327 | getRegionInfo(ClassId: static_cast<uptr>(I))->FLLock.lock(); |
| 328 | } |
| 329 | getRegionInfo(ClassId: SizeClassMap::BatchClassId)->MMLock.lock(); |
| 330 | getRegionInfo(ClassId: SizeClassMap::BatchClassId)->FLLock.lock(); |
| 331 | } |
| 332 | |
| 333 | void enable() NO_THREAD_SAFETY_ANALYSIS { |
| 334 | getRegionInfo(ClassId: SizeClassMap::BatchClassId)->FLLock.unlock(); |
| 335 | getRegionInfo(ClassId: SizeClassMap::BatchClassId)->MMLock.unlock(); |
| 336 | for (uptr I = 0; I < NumClasses; I++) { |
| 337 | if (I == SizeClassMap::BatchClassId) |
| 338 | continue; |
| 339 | getRegionInfo(ClassId: I)->FLLock.unlock(); |
| 340 | getRegionInfo(ClassId: I)->MMLock.unlock(); |
| 341 | } |
| 342 | } |
| 343 | |
| 344 | template <typename F> void iterateOverBlocks(F Callback) { |
| 345 | for (uptr I = 0; I < NumClasses; I++) { |
| 346 | if (I == SizeClassMap::BatchClassId) |
| 347 | continue; |
| 348 | RegionInfo *Region = getRegionInfo(ClassId: I); |
| 349 | // TODO: The call of `iterateOverBlocks` requires disabling |
| 350 | // SizeClassAllocator64. We may consider locking each region on demand |
| 351 | // only. |
| 352 | Region->FLLock.assertHeld(); |
| 353 | Region->MMLock.assertHeld(); |
| 354 | const uptr BlockSize = getSizeByClassId(ClassId: I); |
| 355 | const uptr From = Region->RegionBeg; |
| 356 | const uptr To = From + Region->MemMapInfo.AllocatedUser; |
| 357 | for (uptr Block = From; Block < To; Block += BlockSize) |
| 358 | Callback(Block); |
| 359 | } |
| 360 | } |
| 361 | |
| 362 | void getStats(ScopedString *Str) { |
| 363 | // TODO(kostyak): get the RSS per region. |
| 364 | uptr TotalMapped = 0; |
| 365 | uptr PoppedBlocks = 0; |
| 366 | uptr PushedBlocks = 0; |
| 367 | for (uptr I = 0; I < NumClasses; I++) { |
| 368 | RegionInfo *Region = getRegionInfo(ClassId: I); |
| 369 | { |
| 370 | ScopedLock L(Region->MMLock); |
| 371 | TotalMapped += Region->MemMapInfo.MappedUser; |
| 372 | } |
| 373 | { |
| 374 | ScopedLock L(Region->FLLock); |
| 375 | PoppedBlocks += Region->FreeListInfo.PoppedBlocks; |
| 376 | PushedBlocks += Region->FreeListInfo.PushedBlocks; |
| 377 | } |
| 378 | } |
| 379 | const s32 IntervalMs = atomic_load_relaxed(A: &ReleaseToOsIntervalMs); |
| 380 | Str->append(Format: "Stats: SizeClassAllocator64: %zuM mapped (%uM rss) in %zu " |
| 381 | "allocations; remains %zu; ReleaseToOsIntervalMs = %d\n" , |
| 382 | TotalMapped >> 20, 0U, PoppedBlocks, |
| 383 | PoppedBlocks - PushedBlocks, IntervalMs >= 0 ? IntervalMs : -1); |
| 384 | |
| 385 | for (uptr I = 0; I < NumClasses; I++) { |
| 386 | RegionInfo *Region = getRegionInfo(ClassId: I); |
| 387 | ScopedLock L1(Region->MMLock); |
| 388 | ScopedLock L2(Region->FLLock); |
| 389 | getStats(Str, I, Region); |
| 390 | } |
| 391 | } |
| 392 | |
| 393 | void getFragmentationInfo(ScopedString *Str) { |
| 394 | Str->append( |
| 395 | Format: "Fragmentation Stats: SizeClassAllocator64: page size = %zu bytes\n" , |
| 396 | getPageSizeCached()); |
| 397 | |
| 398 | for (uptr I = 1; I < NumClasses; I++) { |
| 399 | RegionInfo *Region = getRegionInfo(ClassId: I); |
| 400 | ScopedLock L(Region->MMLock); |
| 401 | getRegionFragmentationInfo(Region, ClassId: I, Str); |
| 402 | } |
| 403 | } |
| 404 | |
| 405 | void getMemoryGroupFragmentationInfo(ScopedString *Str) { |
| 406 | Str->append( |
| 407 | Format: "Fragmentation Stats: SizeClassAllocator64: page size = %zu bytes\n" , |
| 408 | getPageSizeCached()); |
| 409 | |
| 410 | for (uptr I = 1; I < NumClasses; I++) { |
| 411 | RegionInfo *Region = getRegionInfo(ClassId: I); |
| 412 | ScopedLock L(Region->MMLock); |
| 413 | getMemoryGroupFragmentationInfoInRegion(Region, ClassId: I, Str); |
| 414 | } |
| 415 | } |
| 416 | |
| 417 | bool setOption(Option O, sptr Value) { |
| 418 | if (O == Option::ReleaseInterval) { |
| 419 | const s32 Interval = Max( |
| 420 | Min(static_cast<s32>(Value), Config::getMaxReleaseToOsIntervalMs()), |
| 421 | Config::getMinReleaseToOsIntervalMs()); |
| 422 | atomic_store_relaxed(A: &ReleaseToOsIntervalMs, V: Interval); |
| 423 | return true; |
| 424 | } |
| 425 | // Not supported by the Primary, but not an error either. |
| 426 | return true; |
| 427 | } |
| 428 | |
| 429 | uptr tryReleaseToOS(uptr ClassId, ReleaseToOS ReleaseType) { |
| 430 | RegionInfo *Region = getRegionInfo(ClassId); |
| 431 | // Note that the tryLock() may fail spuriously, given that it should rarely |
| 432 | // happen and page releasing is fine to skip, we don't take certain |
| 433 | // approaches to ensure one page release is done. |
| 434 | if (Region->MMLock.tryLock()) { |
| 435 | uptr BytesReleased = releaseToOSMaybe(Region, ClassId, ReleaseType); |
| 436 | Region->MMLock.unlock(); |
| 437 | return BytesReleased; |
| 438 | } |
| 439 | return 0; |
| 440 | } |
| 441 | |
| 442 | uptr releaseToOS(ReleaseToOS ReleaseType) { |
| 443 | uptr TotalReleasedBytes = 0; |
| 444 | for (uptr I = 0; I < NumClasses; I++) { |
| 445 | if (I == SizeClassMap::BatchClassId) |
| 446 | continue; |
| 447 | RegionInfo *Region = getRegionInfo(ClassId: I); |
| 448 | ScopedLock L(Region->MMLock); |
| 449 | TotalReleasedBytes += releaseToOSMaybe(Region, ClassId: I, ReleaseType); |
| 450 | } |
| 451 | return TotalReleasedBytes; |
| 452 | } |
| 453 | |
| 454 | const char *getRegionInfoArrayAddress() const { |
| 455 | return reinterpret_cast<const char *>(RegionInfoArray); |
| 456 | } |
| 457 | |
| 458 | static uptr getRegionInfoArraySize() { return sizeof(RegionInfoArray); } |
| 459 | |
| 460 | uptr getCompactPtrBaseByClassId(uptr ClassId) { |
| 461 | return getRegionInfo(ClassId)->RegionBeg; |
| 462 | } |
| 463 | |
| 464 | CompactPtrT compactPtr(uptr ClassId, uptr Ptr) { |
| 465 | DCHECK_LE(ClassId, SizeClassMap::LargestClassId); |
| 466 | return compactPtrInternal(Base: getCompactPtrBaseByClassId(ClassId), Ptr); |
| 467 | } |
| 468 | |
| 469 | void *decompactPtr(uptr ClassId, CompactPtrT CompactPtr) { |
| 470 | DCHECK_LE(ClassId, SizeClassMap::LargestClassId); |
| 471 | return reinterpret_cast<void *>( |
| 472 | decompactPtrInternal(Base: getCompactPtrBaseByClassId(ClassId), CompactPtr)); |
| 473 | } |
| 474 | |
| 475 | static BlockInfo findNearestBlock(const char *RegionInfoData, |
| 476 | uptr Ptr) NO_THREAD_SAFETY_ANALYSIS { |
| 477 | const RegionInfo *RegionInfoArray = |
| 478 | reinterpret_cast<const RegionInfo *>(RegionInfoData); |
| 479 | |
| 480 | uptr ClassId; |
| 481 | uptr MinDistance = -1UL; |
| 482 | for (uptr I = 0; I != NumClasses; ++I) { |
| 483 | if (I == SizeClassMap::BatchClassId) |
| 484 | continue; |
| 485 | uptr Begin = RegionInfoArray[I].RegionBeg; |
| 486 | // TODO(chiahungduan): In fact, We need to lock the RegionInfo::MMLock. |
| 487 | // However, the RegionInfoData is passed with const qualifier and lock the |
| 488 | // mutex requires modifying RegionInfoData, which means we need to remove |
| 489 | // the const qualifier. This may lead to another undefined behavior (The |
| 490 | // first one is accessing `AllocatedUser` without locking. It's better to |
| 491 | // pass `RegionInfoData` as `void *` then we can lock the mutex properly. |
| 492 | uptr End = Begin + RegionInfoArray[I].MemMapInfo.AllocatedUser; |
| 493 | if (Begin > End || End - Begin < SizeClassMap::getSizeByClassId(I)) |
| 494 | continue; |
| 495 | uptr RegionDistance; |
| 496 | if (Begin <= Ptr) { |
| 497 | if (Ptr < End) |
| 498 | RegionDistance = 0; |
| 499 | else |
| 500 | RegionDistance = Ptr - End; |
| 501 | } else { |
| 502 | RegionDistance = Begin - Ptr; |
| 503 | } |
| 504 | |
| 505 | if (RegionDistance < MinDistance) { |
| 506 | MinDistance = RegionDistance; |
| 507 | ClassId = I; |
| 508 | } |
| 509 | } |
| 510 | |
| 511 | BlockInfo B = {}; |
| 512 | if (MinDistance <= 8192) { |
| 513 | B.RegionBegin = RegionInfoArray[ClassId].RegionBeg; |
| 514 | B.RegionEnd = |
| 515 | B.RegionBegin + RegionInfoArray[ClassId].MemMapInfo.AllocatedUser; |
| 516 | B.BlockSize = SizeClassMap::getSizeByClassId(ClassId); |
| 517 | B.BlockBegin = |
| 518 | B.RegionBegin + uptr(sptr(Ptr - B.RegionBegin) / sptr(B.BlockSize) * |
| 519 | sptr(B.BlockSize)); |
| 520 | while (B.BlockBegin < B.RegionBegin) |
| 521 | B.BlockBegin += B.BlockSize; |
| 522 | while (B.RegionEnd < B.BlockBegin + B.BlockSize) |
| 523 | B.BlockBegin -= B.BlockSize; |
| 524 | } |
| 525 | return B; |
| 526 | } |
| 527 | |
| 528 | AtomicOptions Options; |
| 529 | |
| 530 | private: |
| 531 | static const uptr RegionSize = 1UL << RegionSizeLog; |
| 532 | static const uptr NumClasses = SizeClassMap::NumClasses; |
| 533 | |
| 534 | static const uptr MapSizeIncrement = Config::getMapSizeIncrement(); |
| 535 | // Fill at most this number of batches from the newly map'd memory. |
| 536 | static const u32 MaxNumBatches = SCUDO_ANDROID ? 4U : 8U; |
| 537 | |
| 538 | struct ReleaseToOsInfo { |
| 539 | uptr BytesInFreeListAtLastCheckpoint; |
| 540 | uptr NumReleasesAttempted; |
| 541 | uptr LastReleasedBytes; |
| 542 | // The minimum size of pushed blocks to trigger page release. |
| 543 | uptr TryReleaseThreshold; |
| 544 | // The number of bytes not triggering `releaseToOSMaybe()` because of |
| 545 | // the length of release interval. |
| 546 | uptr PendingPushedBytesDelta; |
| 547 | u64 LastReleaseAtNs; |
| 548 | }; |
| 549 | |
| 550 | struct BlocksInfo { |
| 551 | SinglyLinkedList<BatchGroupT> BlockList = {}; |
| 552 | uptr PoppedBlocks = 0; |
| 553 | uptr PushedBlocks = 0; |
| 554 | }; |
| 555 | |
| 556 | struct PagesInfo { |
| 557 | MemMapT MemMap = {}; |
| 558 | // Bytes mapped for user memory. |
| 559 | uptr MappedUser = 0; |
| 560 | // Bytes allocated for user memory. |
| 561 | uptr AllocatedUser = 0; |
| 562 | }; |
| 563 | |
| 564 | struct UnpaddedRegionInfo { |
| 565 | // Mutex for operations on freelist |
| 566 | HybridMutex FLLock; |
| 567 | ConditionVariableT FLLockCV GUARDED_BY(FLLock); |
| 568 | // Mutex for memmap operations |
| 569 | HybridMutex MMLock ACQUIRED_BEFORE(FLLock); |
| 570 | // `RegionBeg` is initialized before thread creation and won't be changed. |
| 571 | uptr RegionBeg = 0; |
| 572 | u32 RandState GUARDED_BY(MMLock) = 0; |
| 573 | BlocksInfo FreeListInfo GUARDED_BY(FLLock); |
| 574 | PagesInfo MemMapInfo GUARDED_BY(MMLock); |
| 575 | ReleaseToOsInfo ReleaseInfo GUARDED_BY(MMLock) = {}; |
| 576 | bool Exhausted GUARDED_BY(MMLock) = false; |
| 577 | bool isPopulatingFreeList GUARDED_BY(FLLock) = false; |
| 578 | }; |
| 579 | struct RegionInfo : UnpaddedRegionInfo { |
| 580 | char Padding[SCUDO_CACHE_LINE_SIZE - |
| 581 | (sizeof(UnpaddedRegionInfo) % SCUDO_CACHE_LINE_SIZE)] = {}; |
| 582 | }; |
| 583 | static_assert(sizeof(RegionInfo) % SCUDO_CACHE_LINE_SIZE == 0, "" ); |
| 584 | |
| 585 | RegionInfo *getRegionInfo(uptr ClassId) { |
| 586 | DCHECK_LT(ClassId, NumClasses); |
| 587 | return &RegionInfoArray[ClassId]; |
| 588 | } |
| 589 | |
| 590 | uptr getRegionBaseByClassId(uptr ClassId) { |
| 591 | RegionInfo *Region = getRegionInfo(ClassId); |
| 592 | Region->MMLock.assertHeld(); |
| 593 | |
| 594 | if (!Config::getEnableContiguousRegions() && |
| 595 | !Region->MemMapInfo.MemMap.isAllocated()) { |
| 596 | return 0U; |
| 597 | } |
| 598 | return Region->MemMapInfo.MemMap.getBase(); |
| 599 | } |
| 600 | |
| 601 | static CompactPtrT compactPtrInternal(uptr Base, uptr Ptr) { |
| 602 | return static_cast<CompactPtrT>((Ptr - Base) >> CompactPtrScale); |
| 603 | } |
| 604 | |
| 605 | static uptr decompactPtrInternal(uptr Base, CompactPtrT CompactPtr) { |
| 606 | return Base + (static_cast<uptr>(CompactPtr) << CompactPtrScale); |
| 607 | } |
| 608 | |
| 609 | static uptr compactPtrGroup(CompactPtrT CompactPtr) { |
| 610 | const uptr Mask = (static_cast<uptr>(1) << GroupScale) - 1; |
| 611 | return static_cast<uptr>(CompactPtr) & ~Mask; |
| 612 | } |
| 613 | static uptr decompactGroupBase(uptr Base, uptr CompactPtrGroupBase) { |
| 614 | DCHECK_EQ(CompactPtrGroupBase % (static_cast<uptr>(1) << (GroupScale)), 0U); |
| 615 | return Base + (CompactPtrGroupBase << CompactPtrScale); |
| 616 | } |
| 617 | |
| 618 | ALWAYS_INLINE static bool isSmallBlock(uptr BlockSize) { |
| 619 | const uptr PageSize = getPageSizeCached(); |
| 620 | return BlockSize < PageSize / 16U; |
| 621 | } |
| 622 | |
| 623 | ALWAYS_INLINE uptr getMinReleaseAttemptSize(uptr BlockSize) { |
| 624 | return roundUp(X: BlockSize, Boundary: getPageSizeCached()); |
| 625 | } |
| 626 | |
| 627 | ALWAYS_INLINE void initRegion(RegionInfo *Region, uptr ClassId, |
| 628 | MemMapT MemMap, bool EnableRandomOffset) |
| 629 | REQUIRES(Region->MMLock) { |
| 630 | DCHECK(!Region->MemMapInfo.MemMap.isAllocated()); |
| 631 | DCHECK(MemMap.isAllocated()); |
| 632 | |
| 633 | const uptr PageSize = getPageSizeCached(); |
| 634 | |
| 635 | Region->MemMapInfo.MemMap = MemMap; |
| 636 | |
| 637 | Region->RegionBeg = MemMap.getBase(); |
| 638 | if (EnableRandomOffset) { |
| 639 | Region->RegionBeg += |
| 640 | (getRandomModN(&Region->RandState, 16) + 1) * PageSize; |
| 641 | } |
| 642 | |
| 643 | const uptr BlockSize = getSizeByClassId(ClassId); |
| 644 | // Releasing small blocks is expensive, set a higher threshold to avoid |
| 645 | // frequent page releases. |
| 646 | if (isSmallBlock(BlockSize)) { |
| 647 | Region->ReleaseInfo.TryReleaseThreshold = |
| 648 | PageSize * SmallerBlockReleasePageDelta; |
| 649 | } else { |
| 650 | Region->ReleaseInfo.TryReleaseThreshold = |
| 651 | getMinReleaseAttemptSize(BlockSize); |
| 652 | } |
| 653 | } |
| 654 | |
| 655 | void pushBatchClassBlocks(RegionInfo *Region, CompactPtrT *Array, u32 Size) |
| 656 | REQUIRES(Region->FLLock) { |
| 657 | DCHECK_EQ(Region, getRegionInfo(SizeClassMap::BatchClassId)); |
| 658 | |
| 659 | // Free blocks are recorded by TransferBatch in freelist for all |
| 660 | // size-classes. In addition, TransferBatch is allocated from BatchClassId. |
| 661 | // In order not to use additional block to record the free blocks in |
| 662 | // BatchClassId, they are self-contained. I.e., A TransferBatch records the |
| 663 | // block address of itself. See the figure below: |
| 664 | // |
| 665 | // TransferBatch at 0xABCD |
| 666 | // +----------------------------+ |
| 667 | // | Free blocks' addr | |
| 668 | // | +------+------+------+ | |
| 669 | // | |0xABCD|... |... | | |
| 670 | // | +------+------+------+ | |
| 671 | // +----------------------------+ |
| 672 | // |
| 673 | // When we allocate all the free blocks in the TransferBatch, the block used |
| 674 | // by TransferBatch is also free for use. We don't need to recycle the |
| 675 | // TransferBatch. Note that the correctness is maintained by the invariant, |
| 676 | // |
| 677 | // Each popBlocks() request returns the entire TransferBatch. Returning |
| 678 | // part of the blocks in a TransferBatch is invalid. |
| 679 | // |
| 680 | // This ensures that TransferBatch won't leak the address itself while it's |
| 681 | // still holding other valid data. |
| 682 | // |
| 683 | // Besides, BatchGroup is also allocated from BatchClassId and has its |
| 684 | // address recorded in the TransferBatch too. To maintain the correctness, |
| 685 | // |
| 686 | // The address of BatchGroup is always recorded in the last TransferBatch |
| 687 | // in the freelist (also imply that the freelist should only be |
| 688 | // updated with push_front). Once the last TransferBatch is popped, |
| 689 | // the block used by BatchGroup is also free for use. |
| 690 | // |
| 691 | // With this approach, the blocks used by BatchGroup and TransferBatch are |
| 692 | // reusable and don't need additional space for them. |
| 693 | |
| 694 | Region->FreeListInfo.PushedBlocks += Size; |
| 695 | BatchGroupT *BG = Region->FreeListInfo.BlockList.front(); |
| 696 | |
| 697 | if (BG == nullptr) { |
| 698 | // Construct `BatchGroup` on the last element. |
| 699 | BG = reinterpret_cast<BatchGroupT *>( |
| 700 | decompactPtr(ClassId: SizeClassMap::BatchClassId, CompactPtr: Array[Size - 1])); |
| 701 | --Size; |
| 702 | BG->Batches.clear(); |
| 703 | // BatchClass hasn't enabled memory group. Use `0` to indicate there's no |
| 704 | // memory group here. |
| 705 | BG->CompactPtrGroupBase = 0; |
| 706 | BG->BytesInBGAtLastCheckpoint = 0; |
| 707 | BG->MaxCachedPerBatch = SizeClassAllocatorT::getMaxCached( |
| 708 | getSizeByClassId(ClassId: SizeClassMap::BatchClassId)); |
| 709 | |
| 710 | Region->FreeListInfo.BlockList.push_front(BG); |
| 711 | } |
| 712 | |
| 713 | if (UNLIKELY(Size == 0)) |
| 714 | return; |
| 715 | |
| 716 | // This happens under 2 cases. |
| 717 | // 1. just allocated a new `BatchGroup`. |
| 718 | // 2. Only 1 block is pushed when the freelist is empty. |
| 719 | if (BG->Batches.empty()) { |
| 720 | // Construct the `TransferBatch` on the last element. |
| 721 | TransferBatchT *TB = reinterpret_cast<TransferBatchT *>( |
| 722 | decompactPtr(ClassId: SizeClassMap::BatchClassId, CompactPtr: Array[Size - 1])); |
| 723 | TB->clear(); |
| 724 | // As mentioned above, addresses of `TransferBatch` and `BatchGroup` are |
| 725 | // recorded in the TransferBatch. |
| 726 | TB->add(Array[Size - 1]); |
| 727 | TB->add( |
| 728 | compactPtr(ClassId: SizeClassMap::BatchClassId, Ptr: reinterpret_cast<uptr>(BG))); |
| 729 | --Size; |
| 730 | BG->Batches.push_front(TB); |
| 731 | } |
| 732 | |
| 733 | TransferBatchT *CurBatch = BG->Batches.front(); |
| 734 | DCHECK_NE(CurBatch, nullptr); |
| 735 | |
| 736 | for (u32 I = 0; I < Size;) { |
| 737 | u16 UnusedSlots = |
| 738 | static_cast<u16>(BG->MaxCachedPerBatch - CurBatch->getCount()); |
| 739 | if (UnusedSlots == 0) { |
| 740 | CurBatch = reinterpret_cast<TransferBatchT *>( |
| 741 | decompactPtr(ClassId: SizeClassMap::BatchClassId, CompactPtr: Array[I])); |
| 742 | CurBatch->clear(); |
| 743 | // Self-contained |
| 744 | CurBatch->add(Array[I]); |
| 745 | ++I; |
| 746 | // TODO(chiahungduan): Avoid the use of push_back() in `Batches` of |
| 747 | // BatchClassId. |
| 748 | BG->Batches.push_front(CurBatch); |
| 749 | UnusedSlots = static_cast<u16>(BG->MaxCachedPerBatch - 1); |
| 750 | } |
| 751 | // `UnusedSlots` is u16 so the result will be also fit in u16. |
| 752 | const u16 AppendSize = static_cast<u16>(Min<u32>(A: UnusedSlots, B: Size - I)); |
| 753 | CurBatch->appendFromArray(&Array[I], AppendSize); |
| 754 | I += AppendSize; |
| 755 | } |
| 756 | } |
| 757 | |
| 758 | // Push the blocks to their batch group. The layout will be like, |
| 759 | // |
| 760 | // FreeListInfo.BlockList - > BG -> BG -> BG |
| 761 | // | | | |
| 762 | // v v v |
| 763 | // TB TB TB |
| 764 | // | |
| 765 | // v |
| 766 | // TB |
| 767 | // |
| 768 | // Each BlockGroup(BG) will associate with unique group id and the free blocks |
| 769 | // are managed by a list of TransferBatch(TB). To reduce the time of inserting |
| 770 | // blocks, BGs are sorted and the input `Array` are supposed to be sorted so |
| 771 | // that we can get better performance of maintaining sorted property. |
| 772 | // Use `SameGroup=true` to indicate that all blocks in the array are from the |
| 773 | // same group then we will skip checking the group id of each block. |
| 774 | void pushBlocksImpl(SizeClassAllocatorT *SizeClassAllocator, uptr ClassId, |
| 775 | RegionInfo *Region, CompactPtrT *Array, u32 Size, |
| 776 | bool SameGroup = false) REQUIRES(Region->FLLock) { |
| 777 | DCHECK_NE(ClassId, SizeClassMap::BatchClassId); |
| 778 | DCHECK_GT(Size, 0U); |
| 779 | |
| 780 | auto CreateGroup = [&](uptr CompactPtrGroupBase) { |
| 781 | BatchGroupT *BG = reinterpret_cast<BatchGroupT *>( |
| 782 | SizeClassAllocator->getBatchClassBlock()); |
| 783 | BG->Batches.clear(); |
| 784 | TransferBatchT *TB = reinterpret_cast<TransferBatchT *>( |
| 785 | SizeClassAllocator->getBatchClassBlock()); |
| 786 | TB->clear(); |
| 787 | |
| 788 | BG->CompactPtrGroupBase = CompactPtrGroupBase; |
| 789 | BG->Batches.push_front(TB); |
| 790 | BG->BytesInBGAtLastCheckpoint = 0; |
| 791 | BG->MaxCachedPerBatch = TransferBatchT::MaxNumCached; |
| 792 | |
| 793 | return BG; |
| 794 | }; |
| 795 | |
| 796 | auto InsertBlocks = [&](BatchGroupT *BG, CompactPtrT *Array, u32 Size) { |
| 797 | SinglyLinkedList<TransferBatchT> &Batches = BG->Batches; |
| 798 | TransferBatchT *CurBatch = Batches.front(); |
| 799 | DCHECK_NE(CurBatch, nullptr); |
| 800 | |
| 801 | for (u32 I = 0; I < Size;) { |
| 802 | DCHECK_GE(BG->MaxCachedPerBatch, CurBatch->getCount()); |
| 803 | u16 UnusedSlots = |
| 804 | static_cast<u16>(BG->MaxCachedPerBatch - CurBatch->getCount()); |
| 805 | if (UnusedSlots == 0) { |
| 806 | CurBatch = reinterpret_cast<TransferBatchT *>( |
| 807 | SizeClassAllocator->getBatchClassBlock()); |
| 808 | CurBatch->clear(); |
| 809 | Batches.push_front(CurBatch); |
| 810 | UnusedSlots = BG->MaxCachedPerBatch; |
| 811 | } |
| 812 | // `UnusedSlots` is u16 so the result will be also fit in u16. |
| 813 | u16 AppendSize = static_cast<u16>(Min<u32>(A: UnusedSlots, B: Size - I)); |
| 814 | CurBatch->appendFromArray(&Array[I], AppendSize); |
| 815 | I += AppendSize; |
| 816 | } |
| 817 | }; |
| 818 | |
| 819 | Region->FreeListInfo.PushedBlocks += Size; |
| 820 | BatchGroupT *Cur = Region->FreeListInfo.BlockList.front(); |
| 821 | |
| 822 | // In the following, `Cur` always points to the BatchGroup for blocks that |
| 823 | // will be pushed next. `Prev` is the element right before `Cur`. |
| 824 | BatchGroupT *Prev = nullptr; |
| 825 | |
| 826 | while (Cur != nullptr && |
| 827 | compactPtrGroup(CompactPtr: Array[0]) > Cur->CompactPtrGroupBase) { |
| 828 | Prev = Cur; |
| 829 | Cur = Cur->Next; |
| 830 | } |
| 831 | |
| 832 | if (Cur == nullptr || |
| 833 | compactPtrGroup(CompactPtr: Array[0]) != Cur->CompactPtrGroupBase) { |
| 834 | Cur = CreateGroup(compactPtrGroup(CompactPtr: Array[0])); |
| 835 | if (Prev == nullptr) |
| 836 | Region->FreeListInfo.BlockList.push_front(Cur); |
| 837 | else |
| 838 | Region->FreeListInfo.BlockList.insert(Prev, Cur); |
| 839 | } |
| 840 | |
| 841 | // All the blocks are from the same group, just push without checking group |
| 842 | // id. |
| 843 | if (SameGroup) { |
| 844 | for (u32 I = 0; I < Size; ++I) |
| 845 | DCHECK_EQ(compactPtrGroup(Array[I]), Cur->CompactPtrGroupBase); |
| 846 | |
| 847 | InsertBlocks(Cur, Array, Size); |
| 848 | return; |
| 849 | } |
| 850 | |
| 851 | // The blocks are sorted by group id. Determine the segment of group and |
| 852 | // push them to their group together. |
| 853 | u32 Count = 1; |
| 854 | for (u32 I = 1; I < Size; ++I) { |
| 855 | if (compactPtrGroup(CompactPtr: Array[I - 1]) != compactPtrGroup(CompactPtr: Array[I])) { |
| 856 | DCHECK_EQ(compactPtrGroup(Array[I - 1]), Cur->CompactPtrGroupBase); |
| 857 | InsertBlocks(Cur, Array + I - Count, Count); |
| 858 | |
| 859 | while (Cur != nullptr && |
| 860 | compactPtrGroup(CompactPtr: Array[I]) > Cur->CompactPtrGroupBase) { |
| 861 | Prev = Cur; |
| 862 | Cur = Cur->Next; |
| 863 | } |
| 864 | |
| 865 | if (Cur == nullptr || |
| 866 | compactPtrGroup(CompactPtr: Array[I]) != Cur->CompactPtrGroupBase) { |
| 867 | Cur = CreateGroup(compactPtrGroup(CompactPtr: Array[I])); |
| 868 | DCHECK_NE(Prev, nullptr); |
| 869 | Region->FreeListInfo.BlockList.insert(Prev, Cur); |
| 870 | } |
| 871 | |
| 872 | Count = 1; |
| 873 | } else { |
| 874 | ++Count; |
| 875 | } |
| 876 | } |
| 877 | |
| 878 | InsertBlocks(Cur, Array + Size - Count, Count); |
| 879 | } |
| 880 | |
| 881 | u16 popBlocksWithCV(SizeClassAllocatorT *SizeClassAllocator, uptr ClassId, |
| 882 | RegionInfo *Region, CompactPtrT *ToArray, |
| 883 | const u16 MaxBlockCount, bool &ReportRegionExhausted) { |
| 884 | u16 PopCount = 0; |
| 885 | |
| 886 | while (true) { |
| 887 | // We only expect one thread doing the freelist refillment and other |
| 888 | // threads will be waiting for either the completion of the |
| 889 | // `populateFreeListAndPopBlocks()` or `pushBlocks()` called by other |
| 890 | // threads. |
| 891 | bool PopulateFreeList = false; |
| 892 | { |
| 893 | ScopedLock FL(Region->FLLock); |
| 894 | if (!Region->isPopulatingFreeList) { |
| 895 | Region->isPopulatingFreeList = true; |
| 896 | PopulateFreeList = true; |
| 897 | } |
| 898 | } |
| 899 | |
| 900 | if (PopulateFreeList) { |
| 901 | ScopedLock ML(Region->MMLock); |
| 902 | |
| 903 | const bool RegionIsExhausted = Region->Exhausted; |
| 904 | if (!RegionIsExhausted) { |
| 905 | PopCount = populateFreeListAndPopBlocks( |
| 906 | SizeClassAllocator, ClassId, Region, ToArray, MaxBlockCount); |
| 907 | } |
| 908 | ReportRegionExhausted = !RegionIsExhausted && Region->Exhausted; |
| 909 | |
| 910 | { |
| 911 | // Before reacquiring the `FLLock`, the freelist may be used up again |
| 912 | // and some threads are waiting for the freelist refillment by the |
| 913 | // current thread. It's important to set |
| 914 | // `Region->isPopulatingFreeList` to false so the threads about to |
| 915 | // sleep will notice the status change. |
| 916 | ScopedLock FL(Region->FLLock); |
| 917 | Region->isPopulatingFreeList = false; |
| 918 | Region->FLLockCV.notifyAll(Region->FLLock); |
| 919 | } |
| 920 | |
| 921 | break; |
| 922 | } |
| 923 | |
| 924 | // At here, there are two preconditions to be met before waiting, |
| 925 | // 1. The freelist is empty. |
| 926 | // 2. Region->isPopulatingFreeList == true, i.e, someone is still doing |
| 927 | // `populateFreeListAndPopBlocks()`. |
| 928 | // |
| 929 | // Note that it has the chance that freelist is empty but |
| 930 | // Region->isPopulatingFreeList == false because all the new populated |
| 931 | // blocks were used up right after the refillment. Therefore, we have to |
| 932 | // check if someone is still populating the freelist. |
| 933 | ScopedLock FL(Region->FLLock); |
| 934 | PopCount = popBlocksImpl(SizeClassAllocator, ClassId, Region, ToArray, |
| 935 | MaxBlockCount); |
| 936 | if (PopCount != 0U) |
| 937 | break; |
| 938 | |
| 939 | if (!Region->isPopulatingFreeList) |
| 940 | continue; |
| 941 | |
| 942 | // Now the freelist is empty and someone's doing the refillment. We will |
| 943 | // wait until anyone refills the freelist or someone finishes doing |
| 944 | // `populateFreeListAndPopBlocks()`. The refillment can be done by |
| 945 | // `populateFreeListAndPopBlocks()`, `pushBlocks()`, |
| 946 | // `pushBatchClassBlocks()` and `mergeGroupsToReleaseBack()`. |
| 947 | Region->FLLockCV.wait(Region->FLLock); |
| 948 | |
| 949 | PopCount = popBlocksImpl(SizeClassAllocator, ClassId, Region, ToArray, |
| 950 | MaxBlockCount); |
| 951 | if (PopCount != 0U) |
| 952 | break; |
| 953 | } |
| 954 | |
| 955 | return PopCount; |
| 956 | } |
| 957 | |
| 958 | u16 popBlocksImpl(SizeClassAllocatorT *SizeClassAllocator, uptr ClassId, |
| 959 | RegionInfo *Region, CompactPtrT *ToArray, |
| 960 | const u16 MaxBlockCount) REQUIRES(Region->FLLock) { |
| 961 | if (Region->FreeListInfo.BlockList.empty()) |
| 962 | return 0U; |
| 963 | |
| 964 | SinglyLinkedList<TransferBatchT> &Batches = |
| 965 | Region->FreeListInfo.BlockList.front()->Batches; |
| 966 | |
| 967 | if (Batches.empty()) { |
| 968 | DCHECK_EQ(ClassId, SizeClassMap::BatchClassId); |
| 969 | BatchGroupT *BG = Region->FreeListInfo.BlockList.front(); |
| 970 | Region->FreeListInfo.BlockList.pop_front(); |
| 971 | |
| 972 | // Block used by `BatchGroup` is from BatchClassId. Turn the block into |
| 973 | // `TransferBatch` with single block. |
| 974 | TransferBatchT *TB = reinterpret_cast<TransferBatchT *>(BG); |
| 975 | ToArray[0] = |
| 976 | compactPtr(ClassId: SizeClassMap::BatchClassId, Ptr: reinterpret_cast<uptr>(TB)); |
| 977 | Region->FreeListInfo.PoppedBlocks += 1; |
| 978 | return 1U; |
| 979 | } |
| 980 | |
| 981 | // So far, instead of always filling blocks to `MaxBlockCount`, we only |
| 982 | // examine single `TransferBatch` to minimize the time spent in the primary |
| 983 | // allocator. Besides, the sizes of `TransferBatch` and |
| 984 | // `SizeClassAllocatorT::getMaxCached()` may also impact the time spent on |
| 985 | // accessing the primary allocator. |
| 986 | // TODO(chiahungduan): Evaluate if we want to always prepare `MaxBlockCount` |
| 987 | // blocks and/or adjust the size of `TransferBatch` according to |
| 988 | // `SizeClassAllocatorT::getMaxCached()`. |
| 989 | TransferBatchT *B = Batches.front(); |
| 990 | DCHECK_NE(B, nullptr); |
| 991 | DCHECK_GT(B->getCount(), 0U); |
| 992 | |
| 993 | // BachClassId should always take all blocks in the TransferBatch. Read the |
| 994 | // comment in `pushBatchClassBlocks()` for more details. |
| 995 | const u16 PopCount = ClassId == SizeClassMap::BatchClassId |
| 996 | ? B->getCount() |
| 997 | : Min(MaxBlockCount, B->getCount()); |
| 998 | B->moveNToArray(ToArray, PopCount); |
| 999 | |
| 1000 | // TODO(chiahungduan): The deallocation of unused BatchClassId blocks can be |
| 1001 | // done without holding `FLLock`. |
| 1002 | if (B->empty()) { |
| 1003 | Batches.pop_front(); |
| 1004 | // `TransferBatch` of BatchClassId is self-contained, no need to |
| 1005 | // deallocate. Read the comment in `pushBatchClassBlocks()` for more |
| 1006 | // details. |
| 1007 | if (ClassId != SizeClassMap::BatchClassId) |
| 1008 | SizeClassAllocator->deallocate(SizeClassMap::BatchClassId, B); |
| 1009 | |
| 1010 | if (Batches.empty()) { |
| 1011 | BatchGroupT *BG = Region->FreeListInfo.BlockList.front(); |
| 1012 | Region->FreeListInfo.BlockList.pop_front(); |
| 1013 | |
| 1014 | // We don't keep BatchGroup with zero blocks to avoid empty-checking |
| 1015 | // while allocating. Note that block used for constructing BatchGroup is |
| 1016 | // recorded as free blocks in the last element of BatchGroup::Batches. |
| 1017 | // Which means, once we pop the last TransferBatch, the block is |
| 1018 | // implicitly deallocated. |
| 1019 | if (ClassId != SizeClassMap::BatchClassId) |
| 1020 | SizeClassAllocator->deallocate(SizeClassMap::BatchClassId, BG); |
| 1021 | } |
| 1022 | } |
| 1023 | |
| 1024 | Region->FreeListInfo.PoppedBlocks += PopCount; |
| 1025 | |
| 1026 | return PopCount; |
| 1027 | } |
| 1028 | |
| 1029 | NOINLINE u16 populateFreeListAndPopBlocks( |
| 1030 | SizeClassAllocatorT *SizeClassAllocator, uptr ClassId, RegionInfo *Region, |
| 1031 | CompactPtrT *ToArray, const u16 MaxBlockCount) REQUIRES(Region->MMLock) |
| 1032 | EXCLUDES(Region->FLLock) { |
| 1033 | if (!Config::getEnableContiguousRegions() && |
| 1034 | !Region->MemMapInfo.MemMap.isAllocated()) { |
| 1035 | ReservedMemoryT ReservedMemory; |
| 1036 | if (UNLIKELY(!ReservedMemory.create(/*Addr=*/0U, RegionSize, |
| 1037 | "scudo:primary_reserve" , |
| 1038 | MAP_ALLOWNOMEM))) { |
| 1039 | Printf(Format: "Can't reserve pages for size class %zu.\n" , |
| 1040 | getSizeByClassId(ClassId)); |
| 1041 | return 0U; |
| 1042 | } |
| 1043 | initRegion(Region, ClassId, |
| 1044 | MemMap: ReservedMemory.dispatch(Addr: ReservedMemory.getBase(), |
| 1045 | Size: ReservedMemory.getCapacity()), |
| 1046 | /*EnableRandomOffset=*/EnableRandomOffset: false); |
| 1047 | } |
| 1048 | |
| 1049 | DCHECK(Region->MemMapInfo.MemMap.isAllocated()); |
| 1050 | const uptr Size = getSizeByClassId(ClassId); |
| 1051 | const u16 MaxCount = SizeClassAllocatorT::getMaxCached(Size); |
| 1052 | const uptr RegionBeg = Region->RegionBeg; |
| 1053 | const uptr MappedUser = Region->MemMapInfo.MappedUser; |
| 1054 | const uptr TotalUserBytes = |
| 1055 | Region->MemMapInfo.AllocatedUser + MaxCount * Size; |
| 1056 | // Map more space for blocks, if necessary. |
| 1057 | if (TotalUserBytes > MappedUser) { |
| 1058 | // Do the mmap for the user memory. |
| 1059 | const uptr MapSize = |
| 1060 | roundUp(X: TotalUserBytes - MappedUser, Boundary: MapSizeIncrement); |
| 1061 | const uptr RegionBase = RegionBeg - getRegionBaseByClassId(ClassId); |
| 1062 | if (UNLIKELY(RegionBase + MappedUser + MapSize > RegionSize)) { |
| 1063 | Region->Exhausted = true; |
| 1064 | return 0U; |
| 1065 | } |
| 1066 | |
| 1067 | if (UNLIKELY(!Region->MemMapInfo.MemMap.remap( |
| 1068 | RegionBeg + MappedUser, MapSize, "scudo:primary" , |
| 1069 | MAP_ALLOWNOMEM | MAP_RESIZABLE | |
| 1070 | (useMemoryTagging<Config>(Options.load()) ? MAP_MEMTAG |
| 1071 | : 0)))) { |
| 1072 | return 0U; |
| 1073 | } |
| 1074 | Region->MemMapInfo.MappedUser += MapSize; |
| 1075 | SizeClassAllocator->getStats().add(StatMapped, MapSize); |
| 1076 | } |
| 1077 | |
| 1078 | const u32 NumberOfBlocks = |
| 1079 | Min(A: MaxNumBatches * MaxCount, |
| 1080 | B: static_cast<u32>((Region->MemMapInfo.MappedUser - |
| 1081 | Region->MemMapInfo.AllocatedUser) / |
| 1082 | Size)); |
| 1083 | DCHECK_GT(NumberOfBlocks, 0); |
| 1084 | |
| 1085 | constexpr u32 ShuffleArraySize = |
| 1086 | MaxNumBatches * TransferBatchT::MaxNumCached; |
| 1087 | CompactPtrT ShuffleArray[ShuffleArraySize]; |
| 1088 | DCHECK_LE(NumberOfBlocks, ShuffleArraySize); |
| 1089 | |
| 1090 | const uptr CompactPtrBase = getCompactPtrBaseByClassId(ClassId); |
| 1091 | uptr P = RegionBeg + Region->MemMapInfo.AllocatedUser; |
| 1092 | for (u32 I = 0; I < NumberOfBlocks; I++, P += Size) |
| 1093 | ShuffleArray[I] = compactPtrInternal(Base: CompactPtrBase, Ptr: P); |
| 1094 | |
| 1095 | ScopedLock L(Region->FLLock); |
| 1096 | |
| 1097 | if (ClassId != SizeClassMap::BatchClassId) { |
| 1098 | u32 N = 1; |
| 1099 | uptr CurGroup = compactPtrGroup(CompactPtr: ShuffleArray[0]); |
| 1100 | for (u32 I = 1; I < NumberOfBlocks; I++) { |
| 1101 | if (UNLIKELY(compactPtrGroup(ShuffleArray[I]) != CurGroup)) { |
| 1102 | shuffle(ShuffleArray + I - N, N, &Region->RandState); |
| 1103 | pushBlocksImpl(SizeClassAllocator, ClassId, Region, |
| 1104 | Array: ShuffleArray + I - N, Size: N, |
| 1105 | /*SameGroup=*/SameGroup: true); |
| 1106 | N = 1; |
| 1107 | CurGroup = compactPtrGroup(CompactPtr: ShuffleArray[I]); |
| 1108 | } else { |
| 1109 | ++N; |
| 1110 | } |
| 1111 | } |
| 1112 | |
| 1113 | shuffle(ShuffleArray + NumberOfBlocks - N, N, &Region->RandState); |
| 1114 | pushBlocksImpl(SizeClassAllocator, ClassId, Region, |
| 1115 | Array: &ShuffleArray[NumberOfBlocks - N], Size: N, |
| 1116 | /*SameGroup=*/SameGroup: true); |
| 1117 | } else { |
| 1118 | pushBatchClassBlocks(Region, Array: ShuffleArray, Size: NumberOfBlocks); |
| 1119 | } |
| 1120 | |
| 1121 | const u16 PopCount = popBlocksImpl(SizeClassAllocator, ClassId, Region, |
| 1122 | ToArray, MaxBlockCount); |
| 1123 | DCHECK_NE(PopCount, 0U); |
| 1124 | |
| 1125 | // Note that `PushedBlocks` and `PoppedBlocks` are supposed to only record |
| 1126 | // the requests from `PushBlocks` and `PopBatch` which are external |
| 1127 | // interfaces. `populateFreeListAndPopBlocks` is the internal interface so |
| 1128 | // we should set the values back to avoid incorrectly setting the stats. |
| 1129 | Region->FreeListInfo.PushedBlocks -= NumberOfBlocks; |
| 1130 | |
| 1131 | const uptr AllocatedUser = Size * NumberOfBlocks; |
| 1132 | SizeClassAllocator->getStats().add(StatFree, AllocatedUser); |
| 1133 | Region->MemMapInfo.AllocatedUser += AllocatedUser; |
| 1134 | |
| 1135 | return PopCount; |
| 1136 | } |
| 1137 | |
| 1138 | void getStats(ScopedString *Str, uptr ClassId, RegionInfo *Region) |
| 1139 | REQUIRES(Region->MMLock, Region->FLLock) { |
| 1140 | if (Region->MemMapInfo.MappedUser == 0) |
| 1141 | return; |
| 1142 | const uptr BlockSize = getSizeByClassId(ClassId); |
| 1143 | const uptr InUseBlocks = |
| 1144 | Region->FreeListInfo.PoppedBlocks - Region->FreeListInfo.PushedBlocks; |
| 1145 | const uptr BytesInFreeList = |
| 1146 | Region->MemMapInfo.AllocatedUser - InUseBlocks * BlockSize; |
| 1147 | uptr RegionPushedBytesDelta = 0; |
| 1148 | if (BytesInFreeList >= |
| 1149 | Region->ReleaseInfo.BytesInFreeListAtLastCheckpoint) { |
| 1150 | RegionPushedBytesDelta = |
| 1151 | BytesInFreeList - Region->ReleaseInfo.BytesInFreeListAtLastCheckpoint; |
| 1152 | } |
| 1153 | const uptr TotalChunks = Region->MemMapInfo.AllocatedUser / BlockSize; |
| 1154 | Str->append(Format: "%s %02zu (%6zu): mapped: %6zuK popped: %7zu pushed: %7zu " |
| 1155 | "inuse: %6zu total: %6zu releases attempted: %6zu last " |
| 1156 | "released: %6zuK latest pushed bytes: %6zuK region: 0x%zx " |
| 1157 | "(0x%zx)\n" , |
| 1158 | Region->Exhausted ? "E" : " " , ClassId, |
| 1159 | getSizeByClassId(ClassId), Region->MemMapInfo.MappedUser >> 10, |
| 1160 | Region->FreeListInfo.PoppedBlocks, |
| 1161 | Region->FreeListInfo.PushedBlocks, InUseBlocks, TotalChunks, |
| 1162 | Region->ReleaseInfo.NumReleasesAttempted, |
| 1163 | Region->ReleaseInfo.LastReleasedBytes >> 10, |
| 1164 | RegionPushedBytesDelta >> 10, Region->RegionBeg, |
| 1165 | getRegionBaseByClassId(ClassId)); |
| 1166 | } |
| 1167 | |
| 1168 | void getRegionFragmentationInfo(RegionInfo *Region, uptr ClassId, |
| 1169 | ScopedString *Str) REQUIRES(Region->MMLock) { |
| 1170 | const uptr BlockSize = getSizeByClassId(ClassId); |
| 1171 | const uptr AllocatedUserEnd = |
| 1172 | Region->MemMapInfo.AllocatedUser + Region->RegionBeg; |
| 1173 | |
| 1174 | SinglyLinkedList<BatchGroupT> GroupsToRelease; |
| 1175 | { |
| 1176 | ScopedLock L(Region->FLLock); |
| 1177 | GroupsToRelease = Region->FreeListInfo.BlockList; |
| 1178 | Region->FreeListInfo.BlockList.clear(); |
| 1179 | } |
| 1180 | |
| 1181 | FragmentationRecorder Recorder; |
| 1182 | if (!GroupsToRelease.empty()) { |
| 1183 | PageReleaseContext Context = |
| 1184 | markFreeBlocks(Region, BlockSize, AllocatedUserEnd, |
| 1185 | CompactPtrBase: getCompactPtrBaseByClassId(ClassId), GroupsToRelease); |
| 1186 | auto SkipRegion = [](UNUSED uptr RegionIndex) { return false; }; |
| 1187 | releaseFreeMemoryToOS(Context, Recorder, SkipRegion); |
| 1188 | |
| 1189 | mergeGroupsToReleaseBack(Region, GroupsToRelease); |
| 1190 | } |
| 1191 | |
| 1192 | ScopedLock L(Region->FLLock); |
| 1193 | const uptr PageSize = getPageSizeCached(); |
| 1194 | const uptr TotalBlocks = Region->MemMapInfo.AllocatedUser / BlockSize; |
| 1195 | const uptr InUseBlocks = |
| 1196 | Region->FreeListInfo.PoppedBlocks - Region->FreeListInfo.PushedBlocks; |
| 1197 | const uptr AllocatedPagesCount = |
| 1198 | roundUp(Region->MemMapInfo.AllocatedUser, PageSize) / PageSize; |
| 1199 | DCHECK_GE(AllocatedPagesCount, Recorder.getReleasedPagesCount()); |
| 1200 | const uptr InUsePages = |
| 1201 | AllocatedPagesCount - Recorder.getReleasedPagesCount(); |
| 1202 | const uptr InUseBytes = InUsePages * PageSize; |
| 1203 | |
| 1204 | uptr Integral; |
| 1205 | uptr Fractional; |
| 1206 | computePercentage(Numerator: BlockSize * InUseBlocks, Denominator: InUseBytes, Integral: &Integral, |
| 1207 | Fractional: &Fractional); |
| 1208 | Str->append(Format: " %02zu (%6zu): inuse/total blocks: %6zu/%6zu inuse/total " |
| 1209 | "pages: %6zu/%6zu inuse bytes: %6zuK util: %3zu.%02zu%%\n" , |
| 1210 | ClassId, BlockSize, InUseBlocks, TotalBlocks, InUsePages, |
| 1211 | AllocatedPagesCount, InUseBytes >> 10, Integral, Fractional); |
| 1212 | } |
| 1213 | |
| 1214 | void getMemoryGroupFragmentationInfoInRegion(RegionInfo *Region, uptr ClassId, |
| 1215 | ScopedString *Str) |
| 1216 | REQUIRES(Region->MMLock) EXCLUDES(Region->FLLock) { |
| 1217 | const uptr BlockSize = getSizeByClassId(ClassId); |
| 1218 | const uptr AllocatedUserEnd = |
| 1219 | Region->MemMapInfo.AllocatedUser + Region->RegionBeg; |
| 1220 | |
| 1221 | SinglyLinkedList<BatchGroupT> GroupsToRelease; |
| 1222 | { |
| 1223 | ScopedLock L(Region->FLLock); |
| 1224 | GroupsToRelease = Region->FreeListInfo.BlockList; |
| 1225 | Region->FreeListInfo.BlockList.clear(); |
| 1226 | } |
| 1227 | |
| 1228 | constexpr uptr GroupSize = (1UL << GroupSizeLog); |
| 1229 | constexpr uptr MaxNumGroups = RegionSize / GroupSize; |
| 1230 | |
| 1231 | MemoryGroupFragmentationRecorder<GroupSize, MaxNumGroups> Recorder; |
| 1232 | if (!GroupsToRelease.empty()) { |
| 1233 | PageReleaseContext Context = |
| 1234 | markFreeBlocks(Region, BlockSize, AllocatedUserEnd, |
| 1235 | CompactPtrBase: getCompactPtrBaseByClassId(ClassId), GroupsToRelease); |
| 1236 | auto SkipRegion = [](UNUSED uptr RegionIndex) { return false; }; |
| 1237 | releaseFreeMemoryToOS(Context, Recorder, SkipRegion); |
| 1238 | |
| 1239 | mergeGroupsToReleaseBack(Region, GroupsToRelease); |
| 1240 | } |
| 1241 | |
| 1242 | Str->append(Format: "MemoryGroupFragmentationInfo in Region %zu (%zu)\n" , ClassId, |
| 1243 | BlockSize); |
| 1244 | |
| 1245 | const uptr MaxNumGroupsInUse = |
| 1246 | roundUp(Region->MemMapInfo.AllocatedUser, GroupSize) / GroupSize; |
| 1247 | for (uptr I = 0; I < MaxNumGroupsInUse; ++I) { |
| 1248 | uptr Integral; |
| 1249 | uptr Fractional; |
| 1250 | computePercentage(Recorder.NumPagesInOneGroup - |
| 1251 | Recorder.getNumFreePages(I), |
| 1252 | Recorder.NumPagesInOneGroup, &Integral, &Fractional); |
| 1253 | Str->append(Format: "MemoryGroup #%zu (0x%zx): util: %3zu.%02zu%%\n" , I, |
| 1254 | Region->RegionBeg + I * GroupSize, Integral, Fractional); |
| 1255 | } |
| 1256 | } |
| 1257 | |
| 1258 | NOINLINE uptr releaseToOSMaybe(RegionInfo *Region, uptr ClassId, |
| 1259 | ReleaseToOS ReleaseType = ReleaseToOS::Normal) |
| 1260 | REQUIRES(Region->MMLock) EXCLUDES(Region->FLLock) { |
| 1261 | const uptr BlockSize = getSizeByClassId(ClassId); |
| 1262 | uptr BytesInFreeList; |
| 1263 | const uptr AllocatedUserEnd = |
| 1264 | Region->MemMapInfo.AllocatedUser + Region->RegionBeg; |
| 1265 | uptr RegionPushedBytesDelta = 0; |
| 1266 | SinglyLinkedList<BatchGroupT> GroupsToRelease; |
| 1267 | |
| 1268 | { |
| 1269 | ScopedLock L(Region->FLLock); |
| 1270 | |
| 1271 | BytesInFreeList = Region->MemMapInfo.AllocatedUser - |
| 1272 | (Region->FreeListInfo.PoppedBlocks - |
| 1273 | Region->FreeListInfo.PushedBlocks) * |
| 1274 | BlockSize; |
| 1275 | if (UNLIKELY(BytesInFreeList == 0)) |
| 1276 | return false; |
| 1277 | |
| 1278 | // ==================================================================== // |
| 1279 | // 1. Check if we have enough free blocks and if it's worth doing a page |
| 1280 | // release. |
| 1281 | // ==================================================================== // |
| 1282 | if (ReleaseType != ReleaseToOS::ForceAll && |
| 1283 | !hasChanceToReleasePages(Region, BlockSize, BytesInFreeList, |
| 1284 | ReleaseType)) { |
| 1285 | return 0; |
| 1286 | } |
| 1287 | |
| 1288 | // Given that we will unlock the freelist for block operations, cache the |
| 1289 | // value here so that when we are adapting the `TryReleaseThreshold` |
| 1290 | // later, we are using the right metric. |
| 1291 | RegionPushedBytesDelta = |
| 1292 | BytesInFreeList - Region->ReleaseInfo.BytesInFreeListAtLastCheckpoint; |
| 1293 | |
| 1294 | // ==================================================================== // |
| 1295 | // 2. Determine which groups can release the pages. Use a heuristic to |
| 1296 | // gather groups that are candidates for doing a release. |
| 1297 | // ==================================================================== // |
| 1298 | if (ReleaseType == ReleaseToOS::ForceAll) { |
| 1299 | GroupsToRelease = Region->FreeListInfo.BlockList; |
| 1300 | Region->FreeListInfo.BlockList.clear(); |
| 1301 | } else { |
| 1302 | GroupsToRelease = |
| 1303 | collectGroupsToRelease(Region, BlockSize, AllocatedUserEnd, |
| 1304 | CompactPtrBase: getCompactPtrBaseByClassId(ClassId)); |
| 1305 | } |
| 1306 | if (GroupsToRelease.empty()) |
| 1307 | return 0; |
| 1308 | } |
| 1309 | |
| 1310 | // The following steps contribute to the majority time spent in page |
| 1311 | // releasing thus we increment the counter here. |
| 1312 | ++Region->ReleaseInfo.NumReleasesAttempted; |
| 1313 | |
| 1314 | // Note that we have extracted the `GroupsToRelease` from region freelist. |
| 1315 | // It's safe to let pushBlocks()/popBlocks() access the remaining region |
| 1316 | // freelist. In the steps 3 and 4, we will temporarily release the FLLock |
| 1317 | // and lock it again before step 5. |
| 1318 | |
| 1319 | // ==================================================================== // |
| 1320 | // 3. Mark the free blocks in `GroupsToRelease` in the `PageReleaseContext`. |
| 1321 | // Then we can tell which pages are in-use by querying |
| 1322 | // `PageReleaseContext`. |
| 1323 | // ==================================================================== // |
| 1324 | PageReleaseContext Context = |
| 1325 | markFreeBlocks(Region, BlockSize, AllocatedUserEnd, |
| 1326 | CompactPtrBase: getCompactPtrBaseByClassId(ClassId), GroupsToRelease); |
| 1327 | if (UNLIKELY(!Context.hasBlockMarked())) { |
| 1328 | mergeGroupsToReleaseBack(Region, GroupsToRelease); |
| 1329 | return 0; |
| 1330 | } |
| 1331 | |
| 1332 | // ==================================================================== // |
| 1333 | // 4. Release the unused physical pages back to the OS. |
| 1334 | // ==================================================================== // |
| 1335 | RegionReleaseRecorder<MemMapT> Recorder(&Region->MemMapInfo.MemMap, |
| 1336 | Region->RegionBeg, |
| 1337 | Context.getReleaseOffset()); |
| 1338 | auto SkipRegion = [](UNUSED uptr RegionIndex) { return false; }; |
| 1339 | releaseFreeMemoryToOS(Context, Recorder, SkipRegion); |
| 1340 | if (Recorder.getReleasedBytes() > 0) { |
| 1341 | // This is the case that we didn't hit the release threshold but it has |
| 1342 | // been past a certain period of time. Thus we try to release some pages |
| 1343 | // and if it does release some additional pages, it's hint that we are |
| 1344 | // able to lower the threshold. Currently, this case happens when the |
| 1345 | // `RegionPushedBytesDelta` is over half of the `TryReleaseThreshold`. As |
| 1346 | // a result, we shrink the threshold to half accordingly. |
| 1347 | // TODO(chiahungduan): Apply the same adjustment strategy to small blocks. |
| 1348 | if (!isSmallBlock(BlockSize)) { |
| 1349 | if (RegionPushedBytesDelta < Region->ReleaseInfo.TryReleaseThreshold && |
| 1350 | Recorder.getReleasedBytes() > |
| 1351 | Region->ReleaseInfo.LastReleasedBytes + |
| 1352 | getMinReleaseAttemptSize(BlockSize)) { |
| 1353 | Region->ReleaseInfo.TryReleaseThreshold = |
| 1354 | Max(Region->ReleaseInfo.TryReleaseThreshold / 2, |
| 1355 | getMinReleaseAttemptSize(BlockSize)); |
| 1356 | } |
| 1357 | } |
| 1358 | |
| 1359 | Region->ReleaseInfo.BytesInFreeListAtLastCheckpoint = BytesInFreeList; |
| 1360 | Region->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes(); |
| 1361 | } |
| 1362 | Region->ReleaseInfo.LastReleaseAtNs = getMonotonicTimeFast(); |
| 1363 | |
| 1364 | if (Region->ReleaseInfo.PendingPushedBytesDelta > 0) { |
| 1365 | // Instead of increasing the threshold by the amount of |
| 1366 | // `PendingPushedBytesDelta`, we only increase half of the amount so that |
| 1367 | // it won't be a leap (which may lead to higher memory pressure) because |
| 1368 | // of certain memory usage bursts which don't happen frequently. |
| 1369 | Region->ReleaseInfo.TryReleaseThreshold += |
| 1370 | Region->ReleaseInfo.PendingPushedBytesDelta / 2; |
| 1371 | // This is another guard of avoiding the growth of threshold indefinitely. |
| 1372 | // Note that we may consider to make this configurable if we have a better |
| 1373 | // way to model this. |
| 1374 | Region->ReleaseInfo.TryReleaseThreshold = Min<uptr>( |
| 1375 | Region->ReleaseInfo.TryReleaseThreshold, (1UL << GroupSizeLog) / 2); |
| 1376 | Region->ReleaseInfo.PendingPushedBytesDelta = 0; |
| 1377 | } |
| 1378 | |
| 1379 | // ====================================================================== // |
| 1380 | // 5. Merge the `GroupsToRelease` back to the freelist. |
| 1381 | // ====================================================================== // |
| 1382 | mergeGroupsToReleaseBack(Region, GroupsToRelease); |
| 1383 | |
| 1384 | return Recorder.getReleasedBytes(); |
| 1385 | } |
| 1386 | |
| 1387 | bool hasChanceToReleasePages(RegionInfo *Region, uptr BlockSize, |
| 1388 | uptr BytesInFreeList, ReleaseToOS ReleaseType) |
| 1389 | REQUIRES(Region->MMLock, Region->FLLock) { |
| 1390 | DCHECK_GE(Region->FreeListInfo.PoppedBlocks, |
| 1391 | Region->FreeListInfo.PushedBlocks); |
| 1392 | // Always update `BytesInFreeListAtLastCheckpoint` with the smallest value |
| 1393 | // so that we won't underestimate the releasable pages. For example, the |
| 1394 | // following is the region usage, |
| 1395 | // |
| 1396 | // BytesInFreeListAtLastCheckpoint AllocatedUser |
| 1397 | // v v |
| 1398 | // |---------------------------------------> |
| 1399 | // ^ ^ |
| 1400 | // BytesInFreeList ReleaseThreshold |
| 1401 | // |
| 1402 | // In general, if we have collected enough bytes and the amount of free |
| 1403 | // bytes meets the ReleaseThreshold, we will try to do page release. If we |
| 1404 | // don't update `BytesInFreeListAtLastCheckpoint` when the current |
| 1405 | // `BytesInFreeList` is smaller, we may take longer time to wait for enough |
| 1406 | // freed blocks because we miss the bytes between |
| 1407 | // (BytesInFreeListAtLastCheckpoint - BytesInFreeList). |
| 1408 | if (BytesInFreeList <= |
| 1409 | Region->ReleaseInfo.BytesInFreeListAtLastCheckpoint) { |
| 1410 | Region->ReleaseInfo.BytesInFreeListAtLastCheckpoint = BytesInFreeList; |
| 1411 | } |
| 1412 | |
| 1413 | const uptr RegionPushedBytesDelta = |
| 1414 | BytesInFreeList - Region->ReleaseInfo.BytesInFreeListAtLastCheckpoint; |
| 1415 | |
| 1416 | if (ReleaseType == ReleaseToOS::Normal) { |
| 1417 | if (RegionPushedBytesDelta < Region->ReleaseInfo.TryReleaseThreshold / 2) |
| 1418 | return false; |
| 1419 | |
| 1420 | const s64 IntervalMs = atomic_load_relaxed(A: &ReleaseToOsIntervalMs); |
| 1421 | if (IntervalMs < 0) |
| 1422 | return false; |
| 1423 | |
| 1424 | const u64 IntervalNs = static_cast<u64>(IntervalMs) * 1000000; |
| 1425 | const u64 CurTimeNs = getMonotonicTimeFast(); |
| 1426 | const u64 DiffSinceLastReleaseNs = |
| 1427 | CurTimeNs - Region->ReleaseInfo.LastReleaseAtNs; |
| 1428 | |
| 1429 | // At here, `RegionPushedBytesDelta` is more than half of |
| 1430 | // `TryReleaseThreshold`. If the last release happened 2 release interval |
| 1431 | // before, we will still try to see if there's any chance to release some |
| 1432 | // memory even it doesn't exceed the threshold. |
| 1433 | if (RegionPushedBytesDelta < Region->ReleaseInfo.TryReleaseThreshold) { |
| 1434 | // We want the threshold to have a shorter response time to the variant |
| 1435 | // memory usage patterns. According to data collected during experiments |
| 1436 | // (which were done with 1, 2, 4, 8 intervals), `2` strikes the better |
| 1437 | // balance between the memory usage and number of page release attempts. |
| 1438 | if (DiffSinceLastReleaseNs < 2 * IntervalNs) |
| 1439 | return false; |
| 1440 | } else if (DiffSinceLastReleaseNs < IntervalNs) { |
| 1441 | // In this case, we are over the threshold but we just did some page |
| 1442 | // release in the same release interval. This is a hint that we may want |
| 1443 | // a higher threshold so that we can release more memory at once. |
| 1444 | // `TryReleaseThreshold` will be adjusted according to how many bytes |
| 1445 | // are not released, i.e., the `PendingPushedBytesdelta` here. |
| 1446 | // TODO(chiahungduan): Apply the same adjustment strategy to small |
| 1447 | // blocks. |
| 1448 | if (!isSmallBlock(BlockSize)) |
| 1449 | Region->ReleaseInfo.PendingPushedBytesDelta = RegionPushedBytesDelta; |
| 1450 | |
| 1451 | // Memory was returned recently. |
| 1452 | return false; |
| 1453 | } |
| 1454 | } // if (ReleaseType == ReleaseToOS::Normal) |
| 1455 | |
| 1456 | return true; |
| 1457 | } |
| 1458 | |
| 1459 | SinglyLinkedList<BatchGroupT> |
| 1460 | collectGroupsToRelease(RegionInfo *Region, const uptr BlockSize, |
| 1461 | const uptr AllocatedUserEnd, const uptr CompactPtrBase) |
| 1462 | REQUIRES(Region->MMLock, Region->FLLock) { |
| 1463 | const uptr GroupSize = (1UL << GroupSizeLog); |
| 1464 | const uptr PageSize = getPageSizeCached(); |
| 1465 | SinglyLinkedList<BatchGroupT> GroupsToRelease; |
| 1466 | |
| 1467 | // We are examining each group and will take the minimum distance to the |
| 1468 | // release threshold as the next `TryReleaseThreshold`. Note that if the |
| 1469 | // size of free blocks has reached the release threshold, the distance to |
| 1470 | // the next release will be PageSize * SmallerBlockReleasePageDelta. See the |
| 1471 | // comment on `SmallerBlockReleasePageDelta` for more details. |
| 1472 | uptr MinDistToThreshold = GroupSize; |
| 1473 | |
| 1474 | for (BatchGroupT *BG = Region->FreeListInfo.BlockList.front(), |
| 1475 | *Prev = nullptr; |
| 1476 | BG != nullptr;) { |
| 1477 | // Group boundary is always GroupSize-aligned from CompactPtr base. The |
| 1478 | // layout of memory groups is like, |
| 1479 | // |
| 1480 | // (CompactPtrBase) |
| 1481 | // #1 CompactPtrGroupBase #2 CompactPtrGroupBase ... |
| 1482 | // | | | |
| 1483 | // v v v |
| 1484 | // +-----------------------+-----------------------+ |
| 1485 | // \ / \ / |
| 1486 | // --- GroupSize --- --- GroupSize --- |
| 1487 | // |
| 1488 | // After decompacting the CompactPtrGroupBase, we expect the alignment |
| 1489 | // property is held as well. |
| 1490 | const uptr BatchGroupBase = |
| 1491 | decompactGroupBase(Base: CompactPtrBase, CompactPtrGroupBase: BG->CompactPtrGroupBase); |
| 1492 | DCHECK_LE(Region->RegionBeg, BatchGroupBase); |
| 1493 | DCHECK_GE(AllocatedUserEnd, BatchGroupBase); |
| 1494 | DCHECK_EQ((Region->RegionBeg - BatchGroupBase) % GroupSize, 0U); |
| 1495 | // TransferBatches are pushed in front of BG.Batches. The first one may |
| 1496 | // not have all caches used. |
| 1497 | const uptr NumBlocks = (BG->Batches.size() - 1) * BG->MaxCachedPerBatch + |
| 1498 | BG->Batches.front()->getCount(); |
| 1499 | const uptr BytesInBG = NumBlocks * BlockSize; |
| 1500 | |
| 1501 | if (BytesInBG <= BG->BytesInBGAtLastCheckpoint) { |
| 1502 | BG->BytesInBGAtLastCheckpoint = BytesInBG; |
| 1503 | Prev = BG; |
| 1504 | BG = BG->Next; |
| 1505 | continue; |
| 1506 | } |
| 1507 | |
| 1508 | const uptr PushedBytesDelta = BytesInBG - BG->BytesInBGAtLastCheckpoint; |
| 1509 | if (PushedBytesDelta < getMinReleaseAttemptSize(BlockSize)) { |
| 1510 | Prev = BG; |
| 1511 | BG = BG->Next; |
| 1512 | continue; |
| 1513 | } |
| 1514 | |
| 1515 | // Given the randomness property, we try to release the pages only if the |
| 1516 | // bytes used by free blocks exceed certain proportion of group size. Note |
| 1517 | // that this heuristic only applies when all the spaces in a BatchGroup |
| 1518 | // are allocated. |
| 1519 | if (isSmallBlock(BlockSize)) { |
| 1520 | const uptr BatchGroupEnd = BatchGroupBase + GroupSize; |
| 1521 | const uptr AllocatedGroupSize = AllocatedUserEnd >= BatchGroupEnd |
| 1522 | ? GroupSize |
| 1523 | : AllocatedUserEnd - BatchGroupBase; |
| 1524 | const uptr ReleaseThreshold = |
| 1525 | (AllocatedGroupSize * (100 - 1U - BlockSize / 16U)) / 100U; |
| 1526 | const bool HighDensity = BytesInBG >= ReleaseThreshold; |
| 1527 | const bool MayHaveReleasedAll = NumBlocks >= (GroupSize / BlockSize); |
| 1528 | // If all blocks in the group are released, we will do range marking |
| 1529 | // which is fast. Otherwise, we will wait until we have accumulated |
| 1530 | // a certain amount of free memory. |
| 1531 | const bool ReachReleaseDelta = |
| 1532 | MayHaveReleasedAll |
| 1533 | ? true |
| 1534 | : PushedBytesDelta >= PageSize * SmallerBlockReleasePageDelta; |
| 1535 | |
| 1536 | if (!HighDensity) { |
| 1537 | DCHECK_LE(BytesInBG, ReleaseThreshold); |
| 1538 | // The following is the usage of a memroy group, |
| 1539 | // |
| 1540 | // BytesInBG ReleaseThreshold |
| 1541 | // / \ v |
| 1542 | // +---+---------------------------+-----+ |
| 1543 | // | | | | | |
| 1544 | // +---+---------------------------+-----+ |
| 1545 | // \ / ^ |
| 1546 | // PushedBytesDelta GroupEnd |
| 1547 | MinDistToThreshold = |
| 1548 | Min(A: MinDistToThreshold, |
| 1549 | B: ReleaseThreshold - BytesInBG + PushedBytesDelta); |
| 1550 | } else { |
| 1551 | // If it reaches high density at this round, the next time we will try |
| 1552 | // to release is based on SmallerBlockReleasePageDelta |
| 1553 | MinDistToThreshold = |
| 1554 | Min(A: MinDistToThreshold, B: PageSize * SmallerBlockReleasePageDelta); |
| 1555 | } |
| 1556 | |
| 1557 | if (!HighDensity || !ReachReleaseDelta) { |
| 1558 | Prev = BG; |
| 1559 | BG = BG->Next; |
| 1560 | continue; |
| 1561 | } |
| 1562 | } |
| 1563 | |
| 1564 | // If `BG` is the first BatchGroupT in the list, we only need to advance |
| 1565 | // `BG` and call FreeListInfo.BlockList::pop_front(). No update is needed |
| 1566 | // for `Prev`. |
| 1567 | // |
| 1568 | // (BG) (BG->Next) |
| 1569 | // Prev Cur BG |
| 1570 | // | | | |
| 1571 | // v v v |
| 1572 | // nil +--+ +--+ |
| 1573 | // |X | -> | | -> ... |
| 1574 | // +--+ +--+ |
| 1575 | // |
| 1576 | // Otherwise, `Prev` will be used to extract the `Cur` from the |
| 1577 | // `FreeListInfo.BlockList`. |
| 1578 | // |
| 1579 | // (BG) (BG->Next) |
| 1580 | // Prev Cur BG |
| 1581 | // | | | |
| 1582 | // v v v |
| 1583 | // +--+ +--+ +--+ |
| 1584 | // | | -> |X | -> | | -> ... |
| 1585 | // +--+ +--+ +--+ |
| 1586 | // |
| 1587 | // After FreeListInfo.BlockList::extract(), |
| 1588 | // |
| 1589 | // Prev Cur BG |
| 1590 | // | | | |
| 1591 | // v v v |
| 1592 | // +--+ +--+ +--+ |
| 1593 | // | |-+ |X | +->| | -> ... |
| 1594 | // +--+ | +--+ | +--+ |
| 1595 | // +--------+ |
| 1596 | // |
| 1597 | // Note that we need to advance before pushing this BatchGroup to |
| 1598 | // GroupsToRelease because it's a destructive operation. |
| 1599 | |
| 1600 | BatchGroupT *Cur = BG; |
| 1601 | BG = BG->Next; |
| 1602 | |
| 1603 | // Ideally, we may want to update this only after successful release. |
| 1604 | // However, for smaller blocks, each block marking is a costly operation. |
| 1605 | // Therefore, we update it earlier. |
| 1606 | // TODO: Consider updating this after releasing pages if `ReleaseRecorder` |
| 1607 | // can tell the released bytes in each group. |
| 1608 | Cur->BytesInBGAtLastCheckpoint = BytesInBG; |
| 1609 | |
| 1610 | if (Prev != nullptr) |
| 1611 | Region->FreeListInfo.BlockList.extract(Prev, Cur); |
| 1612 | else |
| 1613 | Region->FreeListInfo.BlockList.pop_front(); |
| 1614 | GroupsToRelease.push_back(Cur); |
| 1615 | } |
| 1616 | |
| 1617 | // Only small blocks have the adaptive `TryReleaseThreshold`. |
| 1618 | if (isSmallBlock(BlockSize)) { |
| 1619 | // If the MinDistToThreshold is not updated, that means each memory group |
| 1620 | // may have only pushed less than a page size. In that case, just set it |
| 1621 | // back to normal. |
| 1622 | if (MinDistToThreshold == GroupSize) |
| 1623 | MinDistToThreshold = PageSize * SmallerBlockReleasePageDelta; |
| 1624 | Region->ReleaseInfo.TryReleaseThreshold = MinDistToThreshold; |
| 1625 | } |
| 1626 | |
| 1627 | return GroupsToRelease; |
| 1628 | } |
| 1629 | |
| 1630 | PageReleaseContext |
| 1631 | markFreeBlocks(RegionInfo *Region, const uptr BlockSize, |
| 1632 | const uptr AllocatedUserEnd, const uptr CompactPtrBase, |
| 1633 | SinglyLinkedList<BatchGroupT> &GroupsToRelease) |
| 1634 | REQUIRES(Region->MMLock) EXCLUDES(Region->FLLock) { |
| 1635 | const uptr GroupSize = (1UL << GroupSizeLog); |
| 1636 | auto DecompactPtr = [CompactPtrBase](CompactPtrT CompactPtr) { |
| 1637 | return decompactPtrInternal(Base: CompactPtrBase, CompactPtr); |
| 1638 | }; |
| 1639 | |
| 1640 | const uptr ReleaseBase = decompactGroupBase( |
| 1641 | Base: CompactPtrBase, CompactPtrGroupBase: GroupsToRelease.front()->CompactPtrGroupBase); |
| 1642 | const uptr LastGroupEnd = |
| 1643 | Min(decompactGroupBase(Base: CompactPtrBase, |
| 1644 | CompactPtrGroupBase: GroupsToRelease.back()->CompactPtrGroupBase) + |
| 1645 | GroupSize, |
| 1646 | AllocatedUserEnd); |
| 1647 | // The last block may straddle the group boundary. Rounding up to BlockSize |
| 1648 | // to get the exact range. |
| 1649 | const uptr ReleaseEnd = |
| 1650 | roundUpSlow(LastGroupEnd - Region->RegionBeg, BlockSize) + |
| 1651 | Region->RegionBeg; |
| 1652 | const uptr ReleaseRangeSize = ReleaseEnd - ReleaseBase; |
| 1653 | const uptr ReleaseOffset = ReleaseBase - Region->RegionBeg; |
| 1654 | |
| 1655 | PageReleaseContext Context(BlockSize, /*NumberOfRegions=*/1U, |
| 1656 | ReleaseRangeSize, ReleaseOffset); |
| 1657 | // We may not be able to do the page release in a rare case that we may |
| 1658 | // fail on PageMap allocation. |
| 1659 | if (UNLIKELY(!Context.ensurePageMapAllocated())) |
| 1660 | return Context; |
| 1661 | |
| 1662 | for (BatchGroupT &BG : GroupsToRelease) { |
| 1663 | const uptr BatchGroupBase = |
| 1664 | decompactGroupBase(Base: CompactPtrBase, CompactPtrGroupBase: BG.CompactPtrGroupBase); |
| 1665 | const uptr BatchGroupEnd = BatchGroupBase + GroupSize; |
| 1666 | const uptr AllocatedGroupSize = AllocatedUserEnd >= BatchGroupEnd |
| 1667 | ? GroupSize |
| 1668 | : AllocatedUserEnd - BatchGroupBase; |
| 1669 | const uptr BatchGroupUsedEnd = BatchGroupBase + AllocatedGroupSize; |
| 1670 | const bool MayContainLastBlockInRegion = |
| 1671 | BatchGroupUsedEnd == AllocatedUserEnd; |
| 1672 | const bool BlockAlignedWithUsedEnd = |
| 1673 | (BatchGroupUsedEnd - Region->RegionBeg) % BlockSize == 0; |
| 1674 | |
| 1675 | uptr MaxContainedBlocks = AllocatedGroupSize / BlockSize; |
| 1676 | if (!BlockAlignedWithUsedEnd) |
| 1677 | ++MaxContainedBlocks; |
| 1678 | |
| 1679 | const uptr NumBlocks = (BG.Batches.size() - 1) * BG.MaxCachedPerBatch + |
| 1680 | BG.Batches.front()->getCount(); |
| 1681 | |
| 1682 | if (NumBlocks == MaxContainedBlocks) { |
| 1683 | for (const auto &It : BG.Batches) { |
| 1684 | if (&It != BG.Batches.front()) |
| 1685 | DCHECK_EQ(It.getCount(), BG.MaxCachedPerBatch); |
| 1686 | for (u16 I = 0; I < It.getCount(); ++I) |
| 1687 | DCHECK_EQ(compactPtrGroup(It.get(I)), BG.CompactPtrGroupBase); |
| 1688 | } |
| 1689 | |
| 1690 | Context.markRangeAsAllCounted(From: BatchGroupBase, To: BatchGroupUsedEnd, |
| 1691 | Base: Region->RegionBeg, /*RegionIndex=*/RegionIndex: 0, |
| 1692 | RegionSize: Region->MemMapInfo.AllocatedUser); |
| 1693 | } else { |
| 1694 | DCHECK_LT(NumBlocks, MaxContainedBlocks); |
| 1695 | // Note that we don't always visit blocks in each BatchGroup so that we |
| 1696 | // may miss the chance of releasing certain pages that cross |
| 1697 | // BatchGroups. |
| 1698 | Context.markFreeBlocksInRegion( |
| 1699 | BG.Batches, DecompactPtr, Region->RegionBeg, /*RegionIndex=*/0, |
| 1700 | Region->MemMapInfo.AllocatedUser, MayContainLastBlockInRegion); |
| 1701 | } |
| 1702 | } |
| 1703 | |
| 1704 | DCHECK(Context.hasBlockMarked()); |
| 1705 | |
| 1706 | return Context; |
| 1707 | } |
| 1708 | |
| 1709 | void mergeGroupsToReleaseBack(RegionInfo *Region, |
| 1710 | SinglyLinkedList<BatchGroupT> &GroupsToRelease) |
| 1711 | REQUIRES(Region->MMLock) EXCLUDES(Region->FLLock) { |
| 1712 | ScopedLock L(Region->FLLock); |
| 1713 | |
| 1714 | // After merging two freelists, we may have redundant `BatchGroup`s that |
| 1715 | // need to be recycled. The number of unused `BatchGroup`s is expected to be |
| 1716 | // small. Pick a constant which is inferred from real programs. |
| 1717 | constexpr uptr MaxUnusedSize = 8; |
| 1718 | CompactPtrT Blocks[MaxUnusedSize]; |
| 1719 | u32 Idx = 0; |
| 1720 | RegionInfo *BatchClassRegion = getRegionInfo(ClassId: SizeClassMap::BatchClassId); |
| 1721 | // We can't call pushBatchClassBlocks() to recycle the unused `BatchGroup`s |
| 1722 | // when we are manipulating the freelist of `BatchClassRegion`. Instead, we |
| 1723 | // should just push it back to the freelist when we merge two `BatchGroup`s. |
| 1724 | // This logic hasn't been implemented because we haven't supported releasing |
| 1725 | // pages in `BatchClassRegion`. |
| 1726 | DCHECK_NE(BatchClassRegion, Region); |
| 1727 | |
| 1728 | // Merge GroupsToRelease back to the Region::FreeListInfo.BlockList. Note |
| 1729 | // that both `Region->FreeListInfo.BlockList` and `GroupsToRelease` are |
| 1730 | // sorted. |
| 1731 | for (BatchGroupT *BG = Region->FreeListInfo.BlockList.front(), |
| 1732 | *Prev = nullptr; |
| 1733 | ;) { |
| 1734 | if (BG == nullptr || GroupsToRelease.empty()) { |
| 1735 | if (!GroupsToRelease.empty()) |
| 1736 | Region->FreeListInfo.BlockList.append_back(&GroupsToRelease); |
| 1737 | break; |
| 1738 | } |
| 1739 | |
| 1740 | DCHECK(!BG->Batches.empty()); |
| 1741 | |
| 1742 | if (BG->CompactPtrGroupBase < |
| 1743 | GroupsToRelease.front()->CompactPtrGroupBase) { |
| 1744 | Prev = BG; |
| 1745 | BG = BG->Next; |
| 1746 | continue; |
| 1747 | } |
| 1748 | |
| 1749 | BatchGroupT *Cur = GroupsToRelease.front(); |
| 1750 | TransferBatchT *UnusedTransferBatch = nullptr; |
| 1751 | GroupsToRelease.pop_front(); |
| 1752 | |
| 1753 | if (BG->CompactPtrGroupBase == Cur->CompactPtrGroupBase) { |
| 1754 | // We have updated `BatchGroup::BytesInBGAtLastCheckpoint` while |
| 1755 | // collecting the `GroupsToRelease`. |
| 1756 | BG->BytesInBGAtLastCheckpoint = Cur->BytesInBGAtLastCheckpoint; |
| 1757 | const uptr MaxCachedPerBatch = BG->MaxCachedPerBatch; |
| 1758 | |
| 1759 | // Note that the first TransferBatches in both `Batches` may not be |
| 1760 | // full and only the first TransferBatch can have non-full blocks. Thus |
| 1761 | // we have to merge them before appending one to another. |
| 1762 | if (Cur->Batches.front()->getCount() == MaxCachedPerBatch) { |
| 1763 | BG->Batches.append_back(&Cur->Batches); |
| 1764 | } else { |
| 1765 | TransferBatchT *NonFullBatch = Cur->Batches.front(); |
| 1766 | Cur->Batches.pop_front(); |
| 1767 | const u16 NonFullBatchCount = NonFullBatch->getCount(); |
| 1768 | // The remaining Batches in `Cur` are full. |
| 1769 | BG->Batches.append_back(&Cur->Batches); |
| 1770 | |
| 1771 | if (BG->Batches.front()->getCount() == MaxCachedPerBatch) { |
| 1772 | // Only 1 non-full TransferBatch, push it to the front. |
| 1773 | BG->Batches.push_front(NonFullBatch); |
| 1774 | } else { |
| 1775 | const u16 NumBlocksToMove = static_cast<u16>( |
| 1776 | Min(A: static_cast<u16>(MaxCachedPerBatch - |
| 1777 | BG->Batches.front()->getCount()), |
| 1778 | B: NonFullBatchCount)); |
| 1779 | BG->Batches.front()->appendFromTransferBatch(NonFullBatch, |
| 1780 | NumBlocksToMove); |
| 1781 | if (NonFullBatch->isEmpty()) |
| 1782 | UnusedTransferBatch = NonFullBatch; |
| 1783 | else |
| 1784 | BG->Batches.push_front(NonFullBatch); |
| 1785 | } |
| 1786 | } |
| 1787 | |
| 1788 | const u32 NeededSlots = UnusedTransferBatch == nullptr ? 1U : 2U; |
| 1789 | if (UNLIKELY(Idx + NeededSlots > MaxUnusedSize)) { |
| 1790 | ScopedLock L(BatchClassRegion->FLLock); |
| 1791 | pushBatchClassBlocks(Region: BatchClassRegion, Array: Blocks, Size: Idx); |
| 1792 | if (conditionVariableEnabled()) |
| 1793 | BatchClassRegion->FLLockCV.notifyAll(BatchClassRegion->FLLock); |
| 1794 | Idx = 0; |
| 1795 | } |
| 1796 | Blocks[Idx++] = |
| 1797 | compactPtr(ClassId: SizeClassMap::BatchClassId, Ptr: reinterpret_cast<uptr>(Cur)); |
| 1798 | if (UnusedTransferBatch) { |
| 1799 | Blocks[Idx++] = |
| 1800 | compactPtr(ClassId: SizeClassMap::BatchClassId, |
| 1801 | Ptr: reinterpret_cast<uptr>(UnusedTransferBatch)); |
| 1802 | } |
| 1803 | Prev = BG; |
| 1804 | BG = BG->Next; |
| 1805 | continue; |
| 1806 | } |
| 1807 | |
| 1808 | // At here, the `BG` is the first BatchGroup with CompactPtrGroupBase |
| 1809 | // larger than the first element in `GroupsToRelease`. We need to insert |
| 1810 | // `GroupsToRelease::front()` (which is `Cur` below) before `BG`. |
| 1811 | // |
| 1812 | // 1. If `Prev` is nullptr, we simply push `Cur` to the front of |
| 1813 | // FreeListInfo.BlockList. |
| 1814 | // 2. Otherwise, use `insert()` which inserts an element next to `Prev`. |
| 1815 | // |
| 1816 | // Afterwards, we don't need to advance `BG` because the order between |
| 1817 | // `BG` and the new `GroupsToRelease::front()` hasn't been checked. |
| 1818 | if (Prev == nullptr) |
| 1819 | Region->FreeListInfo.BlockList.push_front(Cur); |
| 1820 | else |
| 1821 | Region->FreeListInfo.BlockList.insert(Prev, Cur); |
| 1822 | DCHECK_EQ(Cur->Next, BG); |
| 1823 | Prev = Cur; |
| 1824 | } |
| 1825 | |
| 1826 | if (Idx != 0) { |
| 1827 | ScopedLock L(BatchClassRegion->FLLock); |
| 1828 | pushBatchClassBlocks(Region: BatchClassRegion, Array: Blocks, Size: Idx); |
| 1829 | if (conditionVariableEnabled()) |
| 1830 | BatchClassRegion->FLLockCV.notifyAll(BatchClassRegion->FLLock); |
| 1831 | } |
| 1832 | |
| 1833 | if (SCUDO_DEBUG) { |
| 1834 | BatchGroupT *Prev = Region->FreeListInfo.BlockList.front(); |
| 1835 | for (BatchGroupT *Cur = Prev->Next; Cur != nullptr; |
| 1836 | Prev = Cur, Cur = Cur->Next) { |
| 1837 | CHECK_LT(Prev->CompactPtrGroupBase, Cur->CompactPtrGroupBase); |
| 1838 | } |
| 1839 | } |
| 1840 | |
| 1841 | if (conditionVariableEnabled()) |
| 1842 | Region->FLLockCV.notifyAll(Region->FLLock); |
| 1843 | } |
| 1844 | |
| 1845 | // The minimum size of pushed blocks that we will try to release the pages in |
| 1846 | // that size class. |
| 1847 | uptr SmallerBlockReleasePageDelta = 0; |
| 1848 | atomic_s32 ReleaseToOsIntervalMs = {}; |
| 1849 | alignas(SCUDO_CACHE_LINE_SIZE) RegionInfo RegionInfoArray[NumClasses]; |
| 1850 | }; |
| 1851 | |
| 1852 | } // namespace scudo |
| 1853 | |
| 1854 | #endif // SCUDO_PRIMARY64_H_ |
| 1855 | |