| 1 | //===-- secondary.h ---------------------------------------------*- C++ -*-===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | |
| 9 | #ifndef SCUDO_SECONDARY_H_ |
| 10 | #define SCUDO_SECONDARY_H_ |
| 11 | |
| 12 | #include "chunk.h" |
| 13 | #include "common.h" |
| 14 | #include "list.h" |
| 15 | #include "mem_map.h" |
| 16 | #include "memtag.h" |
| 17 | #include "mutex.h" |
| 18 | #include "options.h" |
| 19 | #include "stats.h" |
| 20 | #include "string_utils.h" |
| 21 | #include "thread_annotations.h" |
| 22 | #include "vector.h" |
| 23 | |
| 24 | namespace scudo { |
| 25 | |
| 26 | // This allocator wraps the platform allocation primitives, and as such is on |
| 27 | // the slower side and should preferably be used for larger sized allocations. |
| 28 | // Blocks allocated will be preceded and followed by a guard page, and hold |
| 29 | // their own header that is not checksummed: the guard pages and the Combined |
| 30 | // header should be enough for our purpose. |
| 31 | |
| 32 | namespace LargeBlock { |
| 33 | |
| 34 | struct alignas(Max<uptr>(A: archSupportsMemoryTagging() |
| 35 | ? archMemoryTagGranuleSize() |
| 36 | : 1, |
| 37 | B: 1U << SCUDO_MIN_ALIGNMENT_LOG)) { |
| 38 | LargeBlock::Header *; |
| 39 | LargeBlock::Header *; |
| 40 | uptr ; |
| 41 | uptr ; |
| 42 | MemMapT ; |
| 43 | }; |
| 44 | |
| 45 | static_assert(sizeof(Header) % (1U << SCUDO_MIN_ALIGNMENT_LOG) == 0, "" ); |
| 46 | static_assert(!archSupportsMemoryTagging() || |
| 47 | sizeof(Header) % archMemoryTagGranuleSize() == 0, |
| 48 | "" ); |
| 49 | |
| 50 | constexpr uptr () { return sizeof(Header); } |
| 51 | |
| 52 | template <typename Config> static uptr (uptr Ptr) { |
| 53 | if (allocatorSupportsMemoryTagging<Config>()) |
| 54 | return addFixedTag(Ptr, Tag: 1); |
| 55 | return Ptr; |
| 56 | } |
| 57 | |
| 58 | template <typename Config> static Header *(uptr Ptr) { |
| 59 | return reinterpret_cast<Header *>(addHeaderTag<Config>(Ptr)) - 1; |
| 60 | } |
| 61 | |
| 62 | template <typename Config> static Header *(const void *Ptr) { |
| 63 | return getHeader<Config>(reinterpret_cast<uptr>(Ptr)); |
| 64 | } |
| 65 | |
| 66 | } // namespace LargeBlock |
| 67 | |
| 68 | static inline void unmap(MemMapT &MemMap) { MemMap.unmap(); } |
| 69 | |
| 70 | namespace { |
| 71 | |
| 72 | struct CachedBlock { |
| 73 | static constexpr u16 CacheIndexMax = UINT16_MAX; |
| 74 | static constexpr u16 EndOfListVal = CacheIndexMax; |
| 75 | |
| 76 | // We allow a certain amount of fragmentation and part of the fragmented bytes |
| 77 | // will be released by `releaseAndZeroPagesToOS()`. This increases the chance |
| 78 | // of cache hit rate and reduces the overhead to the RSS at the same time. See |
| 79 | // more details in the `MapAllocatorCache::retrieve()` section. |
| 80 | // |
| 81 | // We arrived at this default value after noticing that mapping in larger |
| 82 | // memory regions performs better than releasing memory and forcing a cache |
| 83 | // hit. According to the data, it suggests that beyond 4 pages, the release |
| 84 | // execution time is longer than the map execution time. In this way, |
| 85 | // the default is dependent on the platform. |
| 86 | static constexpr uptr MaxReleasedCachePages = 4U; |
| 87 | |
| 88 | uptr CommitBase = 0; |
| 89 | uptr CommitSize = 0; |
| 90 | uptr BlockBegin = 0; |
| 91 | MemMapT MemMap = {}; |
| 92 | u64 Time = 0; |
| 93 | u16 Next = 0; |
| 94 | u16 Prev = 0; |
| 95 | |
| 96 | bool isValid() { return CommitBase != 0; } |
| 97 | |
| 98 | void invalidate() { CommitBase = 0; } |
| 99 | }; |
| 100 | } // namespace |
| 101 | |
| 102 | template <typename Config> class MapAllocatorNoCache { |
| 103 | public: |
| 104 | void init(UNUSED s32 ReleaseToOsInterval) {} |
| 105 | CachedBlock retrieve(UNUSED uptr MaxAllowedFragmentedBytes, UNUSED uptr Size, |
| 106 | UNUSED uptr Alignment, UNUSED uptr , |
| 107 | UNUSED uptr &) { |
| 108 | return {}; |
| 109 | } |
| 110 | void store(UNUSED Options Options, UNUSED uptr CommitBase, |
| 111 | UNUSED uptr CommitSize, UNUSED uptr BlockBegin, |
| 112 | UNUSED MemMapT MemMap) { |
| 113 | // This should never be called since canCache always returns false. |
| 114 | UNREACHABLE( |
| 115 | "It is not valid to call store on MapAllocatorNoCache objects." ); |
| 116 | } |
| 117 | |
| 118 | bool canCache(UNUSED uptr Size) { return false; } |
| 119 | void disable() {} |
| 120 | void enable() {} |
| 121 | void releaseToOS() {} |
| 122 | void disableMemoryTagging() {} |
| 123 | void unmapTestOnly() {} |
| 124 | bool setOption(Option O, UNUSED sptr Value) { |
| 125 | if (O == Option::ReleaseInterval || O == Option::MaxCacheEntriesCount || |
| 126 | O == Option::MaxCacheEntrySize) |
| 127 | return false; |
| 128 | // Not supported by the Secondary Cache, but not an error either. |
| 129 | return true; |
| 130 | } |
| 131 | |
| 132 | void getStats(UNUSED ScopedString *Str) { |
| 133 | Str->append(Format: "Secondary Cache Disabled\n" ); |
| 134 | } |
| 135 | }; |
| 136 | |
| 137 | static const uptr MaxUnreleasedCachePages = 4U; |
| 138 | |
| 139 | template <typename Config> |
| 140 | bool mapSecondary(const Options &Options, uptr CommitBase, uptr CommitSize, |
| 141 | uptr AllocPos, uptr Flags, MemMapT &MemMap) { |
| 142 | Flags |= MAP_RESIZABLE; |
| 143 | Flags |= MAP_ALLOWNOMEM; |
| 144 | |
| 145 | const uptr PageSize = getPageSizeCached(); |
| 146 | if (SCUDO_TRUSTY) { |
| 147 | /* |
| 148 | * On Trusty we need AllocPos to be usable for shared memory, which cannot |
| 149 | * cross multiple mappings. This means we need to split around AllocPos |
| 150 | * and not over it. We can only do this if the address is page-aligned. |
| 151 | */ |
| 152 | const uptr TaggedSize = AllocPos - CommitBase; |
| 153 | if (useMemoryTagging<Config>(Options) && isAligned(X: TaggedSize, Alignment: PageSize)) { |
| 154 | DCHECK_GT(TaggedSize, 0); |
| 155 | return MemMap.remap(Addr: CommitBase, Size: TaggedSize, Name: "scudo:secondary" , |
| 156 | MAP_MEMTAG | Flags) && |
| 157 | MemMap.remap(Addr: AllocPos, Size: CommitSize - TaggedSize, Name: "scudo:secondary" , |
| 158 | Flags); |
| 159 | } else { |
| 160 | const uptr RemapFlags = |
| 161 | (useMemoryTagging<Config>(Options) ? MAP_MEMTAG : 0) | Flags; |
| 162 | return MemMap.remap(Addr: CommitBase, Size: CommitSize, Name: "scudo:secondary" , |
| 163 | Flags: RemapFlags); |
| 164 | } |
| 165 | } |
| 166 | |
| 167 | const uptr MaxUnreleasedCacheBytes = MaxUnreleasedCachePages * PageSize; |
| 168 | if (useMemoryTagging<Config>(Options) && |
| 169 | CommitSize > MaxUnreleasedCacheBytes) { |
| 170 | const uptr UntaggedPos = |
| 171 | Max(A: AllocPos, B: CommitBase + MaxUnreleasedCacheBytes); |
| 172 | return MemMap.remap(Addr: CommitBase, Size: UntaggedPos - CommitBase, Name: "scudo:secondary" , |
| 173 | MAP_MEMTAG | Flags) && |
| 174 | MemMap.remap(Addr: UntaggedPos, Size: CommitBase + CommitSize - UntaggedPos, |
| 175 | Name: "scudo:secondary" , Flags); |
| 176 | } else { |
| 177 | const uptr RemapFlags = |
| 178 | (useMemoryTagging<Config>(Options) ? MAP_MEMTAG : 0) | Flags; |
| 179 | return MemMap.remap(Addr: CommitBase, Size: CommitSize, Name: "scudo:secondary" , Flags: RemapFlags); |
| 180 | } |
| 181 | } |
| 182 | |
| 183 | // Template specialization to avoid producing zero-length array |
| 184 | template <typename T, size_t Size> class NonZeroLengthArray { |
| 185 | public: |
| 186 | T &operator[](uptr Idx) { return values[Idx]; } |
| 187 | |
| 188 | private: |
| 189 | T values[Size]; |
| 190 | }; |
| 191 | template <typename T> class NonZeroLengthArray<T, 0> { |
| 192 | public: |
| 193 | T &operator[](uptr UNUSED Idx) { UNREACHABLE("Unsupported!" ); } |
| 194 | }; |
| 195 | |
| 196 | // The default unmap callback is simply scudo::unmap. |
| 197 | // In testing, a different unmap callback is used to |
| 198 | // record information about unmaps in the cache |
| 199 | template <typename Config, void (*unmapCallBack)(MemMapT &) = unmap> |
| 200 | class MapAllocatorCache { |
| 201 | public: |
| 202 | void getStats(ScopedString *Str) { |
| 203 | ScopedLock L(Mutex); |
| 204 | uptr Integral; |
| 205 | uptr Fractional; |
| 206 | computePercentage(Numerator: SuccessfulRetrieves, Denominator: CallsToRetrieve, Integral: &Integral, |
| 207 | Fractional: &Fractional); |
| 208 | const s32 Interval = atomic_load_relaxed(A: &ReleaseToOsIntervalMs); |
| 209 | Str->append(Format: "Stats: MapAllocatorCache: EntriesCount: %zu, " |
| 210 | "MaxEntriesCount: %u, MaxEntrySize: %zu, ReleaseToOsSkips: " |
| 211 | "%zu, ReleaseToOsIntervalMs = %d\n" , |
| 212 | LRUEntries.size(), atomic_load_relaxed(A: &MaxEntriesCount), |
| 213 | atomic_load_relaxed(A: &MaxEntrySize), |
| 214 | atomic_load_relaxed(A: &ReleaseToOsSkips), |
| 215 | Interval >= 0 ? Interval : -1); |
| 216 | Str->append(Format: "Stats: CacheRetrievalStats: SuccessRate: %u/%u " |
| 217 | "(%zu.%02zu%%)\n" , |
| 218 | SuccessfulRetrieves, CallsToRetrieve, Integral, Fractional); |
| 219 | Str->append(Format: "Cache Entry Info (Most Recent -> Least Recent):\n" ); |
| 220 | |
| 221 | for (CachedBlock &Entry : LRUEntries) { |
| 222 | Str->append(Format: " StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, " |
| 223 | "BlockSize: %zu %s\n" , |
| 224 | Entry.CommitBase, Entry.CommitBase + Entry.CommitSize, |
| 225 | Entry.CommitSize, Entry.Time == 0 ? "[R]" : "" ); |
| 226 | } |
| 227 | } |
| 228 | |
| 229 | // Ensure the default maximum specified fits the array. |
| 230 | static_assert(Config::getDefaultMaxEntriesCount() <= |
| 231 | Config::getEntriesArraySize(), |
| 232 | "" ); |
| 233 | // Ensure the cache entry array size fits in the LRU list Next and Prev |
| 234 | // index fields |
| 235 | static_assert(Config::getEntriesArraySize() <= CachedBlock::CacheIndexMax, |
| 236 | "Cache entry array is too large to be indexed." ); |
| 237 | |
| 238 | void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS { |
| 239 | DCHECK_EQ(LRUEntries.size(), 0U); |
| 240 | setOption(O: Option::MaxCacheEntriesCount, |
| 241 | Value: static_cast<sptr>(Config::getDefaultMaxEntriesCount())); |
| 242 | setOption(O: Option::MaxCacheEntrySize, |
| 243 | Value: static_cast<sptr>(Config::getDefaultMaxEntrySize())); |
| 244 | // The default value in the cache config has the higher priority. |
| 245 | if (Config::getDefaultReleaseToOsIntervalMs() != INT32_MIN) |
| 246 | ReleaseToOsInterval = Config::getDefaultReleaseToOsIntervalMs(); |
| 247 | setOption(O: Option::ReleaseInterval, Value: static_cast<sptr>(ReleaseToOsInterval)); |
| 248 | |
| 249 | LRUEntries.clear(); |
| 250 | LRUEntries.init(Base: Entries, BaseSize: sizeof(Entries)); |
| 251 | |
| 252 | AvailEntries.clear(); |
| 253 | AvailEntries.init(Base: Entries, BaseSize: sizeof(Entries)); |
| 254 | for (u32 I = 0; I < Config::getEntriesArraySize(); I++) |
| 255 | AvailEntries.push_back(X: &Entries[I]); |
| 256 | } |
| 257 | |
| 258 | void store(const Options &Options, uptr CommitBase, uptr CommitSize, |
| 259 | uptr BlockBegin, MemMapT MemMap) EXCLUDES(Mutex) { |
| 260 | DCHECK(canCache(CommitSize)); |
| 261 | |
| 262 | const s32 Interval = atomic_load_relaxed(A: &ReleaseToOsIntervalMs); |
| 263 | u64 Time; |
| 264 | CachedBlock Entry; |
| 265 | |
| 266 | Entry.CommitBase = CommitBase; |
| 267 | Entry.CommitSize = CommitSize; |
| 268 | Entry.BlockBegin = BlockBegin; |
| 269 | Entry.MemMap = MemMap; |
| 270 | Entry.Time = UINT64_MAX; |
| 271 | |
| 272 | if (useMemoryTagging<Config>(Options)) { |
| 273 | if (Interval == 0 && !SCUDO_FUCHSIA) { |
| 274 | // Release the memory and make it inaccessible at the same time by |
| 275 | // creating a new MAP_NOACCESS mapping on top of the existing mapping. |
| 276 | // Fuchsia does not support replacing mappings by creating a new mapping |
| 277 | // on top so we just do the two syscalls there. |
| 278 | Entry.Time = 0; |
| 279 | mapSecondary<Config>(Options, Entry.CommitBase, Entry.CommitSize, |
| 280 | Entry.CommitBase, MAP_NOACCESS, Entry.MemMap); |
| 281 | } else { |
| 282 | Entry.MemMap.setMemoryPermission(Addr: Entry.CommitBase, Size: Entry.CommitSize, |
| 283 | MAP_NOACCESS); |
| 284 | } |
| 285 | } |
| 286 | |
| 287 | // Usually only one entry will be evicted from the cache. |
| 288 | // Only in the rare event that the cache shrinks in real-time |
| 289 | // due to a decrease in the configurable value MaxEntriesCount |
| 290 | // will more than one cache entry be evicted. |
| 291 | // The vector is used to save the MemMaps of evicted entries so |
| 292 | // that the unmap call can be performed outside the lock |
| 293 | Vector<MemMapT, 1U> EvictionMemMaps; |
| 294 | |
| 295 | do { |
| 296 | ScopedLock L(Mutex); |
| 297 | |
| 298 | // Time must be computed under the lock to ensure |
| 299 | // that the LRU cache remains sorted with respect to |
| 300 | // time in a multithreaded environment |
| 301 | Time = getMonotonicTimeFast(); |
| 302 | if (Entry.Time != 0) |
| 303 | Entry.Time = Time; |
| 304 | |
| 305 | if (useMemoryTagging<Config>(Options) && QuarantinePos == -1U) { |
| 306 | // If we get here then memory tagging was disabled in between when we |
| 307 | // read Options and when we locked Mutex. We can't insert our entry into |
| 308 | // the quarantine or the cache because the permissions would be wrong so |
| 309 | // just unmap it. |
| 310 | unmapCallBack(Entry.MemMap); |
| 311 | break; |
| 312 | } |
| 313 | if (Config::getQuarantineSize() && useMemoryTagging<Config>(Options)) { |
| 314 | QuarantinePos = |
| 315 | (QuarantinePos + 1) % Max(Config::getQuarantineSize(), 1u); |
| 316 | if (!Quarantine[QuarantinePos].isValid()) { |
| 317 | Quarantine[QuarantinePos] = Entry; |
| 318 | return; |
| 319 | } |
| 320 | CachedBlock PrevEntry = Quarantine[QuarantinePos]; |
| 321 | Quarantine[QuarantinePos] = Entry; |
| 322 | if (OldestTime == 0) |
| 323 | OldestTime = Entry.Time; |
| 324 | Entry = PrevEntry; |
| 325 | } |
| 326 | |
| 327 | // All excess entries are evicted from the cache. Note that when |
| 328 | // `MaxEntriesCount` is zero, cache storing shouldn't happen and it's |
| 329 | // guarded by the `DCHECK(canCache(CommitSize))` above. As a result, we |
| 330 | // won't try to pop `LRUEntries` when it's empty. |
| 331 | while (LRUEntries.size() >= atomic_load_relaxed(A: &MaxEntriesCount)) { |
| 332 | // Save MemMaps of evicted entries to perform unmap outside of lock |
| 333 | CachedBlock *Entry = LRUEntries.back(); |
| 334 | EvictionMemMaps.push_back(Element: Entry->MemMap); |
| 335 | remove(Entry); |
| 336 | } |
| 337 | |
| 338 | insert(Entry); |
| 339 | |
| 340 | if (OldestTime == 0) |
| 341 | OldestTime = Entry.Time; |
| 342 | } while (0); |
| 343 | |
| 344 | for (MemMapT &EvictMemMap : EvictionMemMaps) |
| 345 | unmapCallBack(EvictMemMap); |
| 346 | |
| 347 | if (Interval >= 0) { |
| 348 | // It is very likely that multiple threads trying to do a release at the |
| 349 | // same time will not actually release any extra elements. Therefore, |
| 350 | // let any other thread continue, skipping the release. |
| 351 | if (Mutex.tryLock()) { |
| 352 | // TODO: Add ReleaseToOS logic to LRU algorithm |
| 353 | releaseOlderThan(Time: Time - static_cast<u64>(Interval) * 1000000); |
| 354 | Mutex.unlock(); |
| 355 | } else |
| 356 | atomic_fetch_add(A: &ReleaseToOsSkips, V: 1U, MO: memory_order_relaxed); |
| 357 | } |
| 358 | } |
| 359 | |
| 360 | CachedBlock retrieve(uptr MaxAllowedFragmentedPages, uptr Size, |
| 361 | uptr Alignment, uptr , uptr &) |
| 362 | EXCLUDES(Mutex) { |
| 363 | const uptr PageSize = getPageSizeCached(); |
| 364 | // 10% of the requested size proved to be the optimal choice for |
| 365 | // retrieving cached blocks after testing several options. |
| 366 | constexpr u32 FragmentedBytesDivisor = 10; |
| 367 | CachedBlock Entry; |
| 368 | EntryHeaderPos = 0; |
| 369 | { |
| 370 | ScopedLock L(Mutex); |
| 371 | CallsToRetrieve++; |
| 372 | if (LRUEntries.size() == 0) |
| 373 | return {}; |
| 374 | CachedBlock *RetrievedEntry = nullptr; |
| 375 | uptr MinDiff = UINTPTR_MAX; |
| 376 | |
| 377 | // Since allocation sizes don't always match cached memory chunk sizes |
| 378 | // we allow some memory to be unused (called fragmented bytes). The |
| 379 | // amount of unused bytes is exactly EntryHeaderPos - CommitBase. |
| 380 | // |
| 381 | // CommitBase CommitBase + CommitSize |
| 382 | // V V |
| 383 | // +---+------------+-----------------+---+ |
| 384 | // | | | | | |
| 385 | // +---+------------+-----------------+---+ |
| 386 | // ^ ^ ^ |
| 387 | // Guard EntryHeaderPos Guard-page-end |
| 388 | // page-begin |
| 389 | // |
| 390 | // [EntryHeaderPos, CommitBase + CommitSize) contains the user data as |
| 391 | // well as the header metadata. If EntryHeaderPos - CommitBase exceeds |
| 392 | // MaxAllowedFragmentedPages * PageSize, the cached memory chunk is |
| 393 | // not considered valid for retrieval. |
| 394 | for (CachedBlock &Entry : LRUEntries) { |
| 395 | const uptr CommitBase = Entry.CommitBase; |
| 396 | const uptr CommitSize = Entry.CommitSize; |
| 397 | const uptr AllocPos = |
| 398 | roundDown(X: CommitBase + CommitSize - Size, Boundary: Alignment); |
| 399 | const uptr = AllocPos - HeadersSize; |
| 400 | const uptr MaxAllowedFragmentedBytes = |
| 401 | MaxAllowedFragmentedPages * PageSize; |
| 402 | if (HeaderPos > CommitBase + CommitSize) |
| 403 | continue; |
| 404 | // TODO: Remove AllocPos > CommitBase + MaxAllowedFragmentedBytes |
| 405 | // and replace with Diff > MaxAllowedFragmentedBytes |
| 406 | if (HeaderPos < CommitBase || |
| 407 | AllocPos > CommitBase + MaxAllowedFragmentedBytes) { |
| 408 | continue; |
| 409 | } |
| 410 | |
| 411 | const uptr Diff = roundDown(X: HeaderPos, Boundary: PageSize) - CommitBase; |
| 412 | |
| 413 | // Keep track of the smallest cached block |
| 414 | // that is greater than (AllocSize + HeaderSize) |
| 415 | if (Diff >= MinDiff) |
| 416 | continue; |
| 417 | |
| 418 | MinDiff = Diff; |
| 419 | RetrievedEntry = &Entry; |
| 420 | EntryHeaderPos = HeaderPos; |
| 421 | |
| 422 | // Immediately use a cached block if its size is close enough to the |
| 423 | // requested size |
| 424 | const uptr OptimalFitThesholdBytes = |
| 425 | (CommitBase + CommitSize - HeaderPos) / FragmentedBytesDivisor; |
| 426 | if (Diff <= OptimalFitThesholdBytes) |
| 427 | break; |
| 428 | } |
| 429 | |
| 430 | if (RetrievedEntry != nullptr) { |
| 431 | Entry = *RetrievedEntry; |
| 432 | remove(Entry: RetrievedEntry); |
| 433 | SuccessfulRetrieves++; |
| 434 | } |
| 435 | } |
| 436 | |
| 437 | // The difference between the retrieved memory chunk and the request |
| 438 | // size is at most MaxAllowedFragmentedPages |
| 439 | // |
| 440 | // +- MaxAllowedFragmentedPages * PageSize -+ |
| 441 | // +--------------------------+-------------+ |
| 442 | // | | | |
| 443 | // +--------------------------+-------------+ |
| 444 | // \ Bytes to be released / ^ |
| 445 | // | |
| 446 | // (may or may not be committed) |
| 447 | // |
| 448 | // The maximum number of bytes released to the OS is capped by |
| 449 | // MaxReleasedCachePages |
| 450 | // |
| 451 | // TODO : Consider making MaxReleasedCachePages configurable since |
| 452 | // the release to OS API can vary across systems. |
| 453 | if (Entry.Time != 0) { |
| 454 | const uptr FragmentedBytes = |
| 455 | roundDown(X: EntryHeaderPos, Boundary: PageSize) - Entry.CommitBase; |
| 456 | const uptr MaxUnreleasedCacheBytes = MaxUnreleasedCachePages * PageSize; |
| 457 | if (FragmentedBytes > MaxUnreleasedCacheBytes) { |
| 458 | const uptr MaxReleasedCacheBytes = |
| 459 | CachedBlock::MaxReleasedCachePages * PageSize; |
| 460 | uptr BytesToRelease = |
| 461 | roundUp(X: Min<uptr>(A: MaxReleasedCacheBytes, |
| 462 | B: FragmentedBytes - MaxUnreleasedCacheBytes), |
| 463 | Boundary: PageSize); |
| 464 | Entry.MemMap.releaseAndZeroPagesToOS(From: Entry.CommitBase, Size: BytesToRelease); |
| 465 | } |
| 466 | } |
| 467 | |
| 468 | return Entry; |
| 469 | } |
| 470 | |
| 471 | bool canCache(uptr Size) { |
| 472 | return atomic_load_relaxed(A: &MaxEntriesCount) != 0U && |
| 473 | Size <= atomic_load_relaxed(A: &MaxEntrySize); |
| 474 | } |
| 475 | |
| 476 | bool setOption(Option O, sptr Value) { |
| 477 | if (O == Option::ReleaseInterval) { |
| 478 | const s32 Interval = Max( |
| 479 | Min(static_cast<s32>(Value), Config::getMaxReleaseToOsIntervalMs()), |
| 480 | Config::getMinReleaseToOsIntervalMs()); |
| 481 | atomic_store_relaxed(A: &ReleaseToOsIntervalMs, V: Interval); |
| 482 | return true; |
| 483 | } |
| 484 | if (O == Option::MaxCacheEntriesCount) { |
| 485 | if (Value < 0) |
| 486 | return false; |
| 487 | atomic_store_relaxed( |
| 488 | &MaxEntriesCount, |
| 489 | Min<u32>(static_cast<u32>(Value), Config::getEntriesArraySize())); |
| 490 | return true; |
| 491 | } |
| 492 | if (O == Option::MaxCacheEntrySize) { |
| 493 | atomic_store_relaxed(A: &MaxEntrySize, V: static_cast<uptr>(Value)); |
| 494 | return true; |
| 495 | } |
| 496 | // Not supported by the Secondary Cache, but not an error either. |
| 497 | return true; |
| 498 | } |
| 499 | |
| 500 | void releaseToOS() EXCLUDES(Mutex) { |
| 501 | // Since this is a request to release everything, always wait for the |
| 502 | // lock so that we guarantee all entries are released after this call. |
| 503 | ScopedLock L(Mutex); |
| 504 | releaseOlderThan(UINT64_MAX); |
| 505 | } |
| 506 | |
| 507 | void disableMemoryTagging() EXCLUDES(Mutex) { |
| 508 | ScopedLock L(Mutex); |
| 509 | for (u32 I = 0; I != Config::getQuarantineSize(); ++I) { |
| 510 | if (Quarantine[I].isValid()) { |
| 511 | MemMapT &MemMap = Quarantine[I].MemMap; |
| 512 | unmapCallBack(MemMap); |
| 513 | Quarantine[I].invalidate(); |
| 514 | } |
| 515 | } |
| 516 | for (CachedBlock &Entry : LRUEntries) |
| 517 | Entry.MemMap.setMemoryPermission(Addr: Entry.CommitBase, Size: Entry.CommitSize, Flags: 0); |
| 518 | QuarantinePos = -1U; |
| 519 | } |
| 520 | |
| 521 | void disable() NO_THREAD_SAFETY_ANALYSIS { Mutex.lock(); } |
| 522 | |
| 523 | void enable() NO_THREAD_SAFETY_ANALYSIS { Mutex.unlock(); } |
| 524 | |
| 525 | void unmapTestOnly() { empty(); } |
| 526 | |
| 527 | private: |
| 528 | void insert(const CachedBlock &Entry) REQUIRES(Mutex) { |
| 529 | CachedBlock *AvailEntry = AvailEntries.front(); |
| 530 | AvailEntries.pop_front(); |
| 531 | |
| 532 | *AvailEntry = Entry; |
| 533 | LRUEntries.push_front(X: AvailEntry); |
| 534 | } |
| 535 | |
| 536 | void remove(CachedBlock *Entry) REQUIRES(Mutex) { |
| 537 | DCHECK(Entry->isValid()); |
| 538 | LRUEntries.remove(X: Entry); |
| 539 | Entry->invalidate(); |
| 540 | AvailEntries.push_front(X: Entry); |
| 541 | } |
| 542 | |
| 543 | void empty() { |
| 544 | MemMapT MapInfo[Config::getEntriesArraySize()]; |
| 545 | uptr N = 0; |
| 546 | { |
| 547 | ScopedLock L(Mutex); |
| 548 | |
| 549 | for (CachedBlock &Entry : LRUEntries) |
| 550 | MapInfo[N++] = Entry.MemMap; |
| 551 | LRUEntries.clear(); |
| 552 | } |
| 553 | for (uptr I = 0; I < N; I++) { |
| 554 | MemMapT &MemMap = MapInfo[I]; |
| 555 | unmapCallBack(MemMap); |
| 556 | } |
| 557 | } |
| 558 | |
| 559 | void releaseIfOlderThan(CachedBlock &Entry, u64 Time) REQUIRES(Mutex) { |
| 560 | if (!Entry.isValid() || !Entry.Time) |
| 561 | return; |
| 562 | if (Entry.Time > Time) { |
| 563 | if (OldestTime == 0 || Entry.Time < OldestTime) |
| 564 | OldestTime = Entry.Time; |
| 565 | return; |
| 566 | } |
| 567 | Entry.MemMap.releaseAndZeroPagesToOS(From: Entry.CommitBase, Size: Entry.CommitSize); |
| 568 | Entry.Time = 0; |
| 569 | } |
| 570 | |
| 571 | void releaseOlderThan(u64 Time) REQUIRES(Mutex) { |
| 572 | if (!LRUEntries.size() || OldestTime == 0 || OldestTime > Time) |
| 573 | return; |
| 574 | OldestTime = 0; |
| 575 | for (uptr I = 0; I < Config::getQuarantineSize(); I++) |
| 576 | releaseIfOlderThan(Entry&: Quarantine[I], Time); |
| 577 | for (uptr I = 0; I < Config::getEntriesArraySize(); I++) |
| 578 | releaseIfOlderThan(Entry&: Entries[I], Time); |
| 579 | } |
| 580 | |
| 581 | HybridMutex Mutex; |
| 582 | u32 QuarantinePos GUARDED_BY(Mutex) = 0; |
| 583 | atomic_u32 MaxEntriesCount = {}; |
| 584 | atomic_uptr MaxEntrySize = {}; |
| 585 | u64 OldestTime GUARDED_BY(Mutex) = 0; |
| 586 | atomic_s32 ReleaseToOsIntervalMs = {}; |
| 587 | u32 CallsToRetrieve GUARDED_BY(Mutex) = 0; |
| 588 | u32 SuccessfulRetrieves GUARDED_BY(Mutex) = 0; |
| 589 | atomic_uptr ReleaseToOsSkips = {}; |
| 590 | |
| 591 | CachedBlock Entries[Config::getEntriesArraySize()] GUARDED_BY(Mutex) = {}; |
| 592 | NonZeroLengthArray<CachedBlock, Config::getQuarantineSize()> |
| 593 | Quarantine GUARDED_BY(Mutex) = {}; |
| 594 | |
| 595 | // Cached blocks stored in LRU order |
| 596 | DoublyLinkedList<CachedBlock> LRUEntries GUARDED_BY(Mutex); |
| 597 | // The unused Entries |
| 598 | SinglyLinkedList<CachedBlock> AvailEntries GUARDED_BY(Mutex); |
| 599 | }; |
| 600 | |
| 601 | template <typename Config> class MapAllocator { |
| 602 | public: |
| 603 | void init(GlobalStats *S, |
| 604 | s32 ReleaseToOsInterval = -1) NO_THREAD_SAFETY_ANALYSIS { |
| 605 | DCHECK_EQ(AllocatedBytes, 0U); |
| 606 | DCHECK_EQ(FreedBytes, 0U); |
| 607 | Cache.init(ReleaseToOsInterval); |
| 608 | Stats.init(); |
| 609 | if (LIKELY(S)) |
| 610 | S->link(S: &Stats); |
| 611 | } |
| 612 | |
| 613 | void *allocate(const Options &Options, uptr Size, uptr AlignmentHint = 0, |
| 614 | uptr *BlockEnd = nullptr, |
| 615 | FillContentsMode FillContents = NoFill); |
| 616 | |
| 617 | void deallocate(const Options &Options, void *Ptr); |
| 618 | |
| 619 | void *tryAllocateFromCache(const Options &Options, uptr Size, uptr Alignment, |
| 620 | uptr *BlockEndPtr, FillContentsMode FillContents); |
| 621 | |
| 622 | static uptr getBlockEnd(void *Ptr) { |
| 623 | auto *B = LargeBlock::getHeader<Config>(Ptr); |
| 624 | return B->CommitBase + B->CommitSize; |
| 625 | } |
| 626 | |
| 627 | static uptr getBlockSize(void *Ptr) { |
| 628 | return getBlockEnd(Ptr) - reinterpret_cast<uptr>(Ptr); |
| 629 | } |
| 630 | |
| 631 | static uptr getGuardPageSize() { |
| 632 | if (Config::getEnableGuardPages()) |
| 633 | return getPageSizeCached(); |
| 634 | return 0U; |
| 635 | } |
| 636 | |
| 637 | static constexpr uptr () { |
| 638 | return Chunk::getHeaderSize() + LargeBlock::getHeaderSize(); |
| 639 | } |
| 640 | |
| 641 | void disable() NO_THREAD_SAFETY_ANALYSIS { |
| 642 | Mutex.lock(); |
| 643 | Cache.disable(); |
| 644 | } |
| 645 | |
| 646 | void enable() NO_THREAD_SAFETY_ANALYSIS { |
| 647 | Cache.enable(); |
| 648 | Mutex.unlock(); |
| 649 | } |
| 650 | |
| 651 | template <typename F> void iterateOverBlocks(F Callback) const { |
| 652 | Mutex.assertHeld(); |
| 653 | |
| 654 | for (const auto &H : InUseBlocks) { |
| 655 | uptr Ptr = reinterpret_cast<uptr>(&H) + LargeBlock::getHeaderSize(); |
| 656 | if (allocatorSupportsMemoryTagging<Config>()) |
| 657 | Ptr = untagPointer(Ptr); |
| 658 | Callback(Ptr); |
| 659 | } |
| 660 | } |
| 661 | |
| 662 | bool canCache(uptr Size) { return Cache.canCache(Size); } |
| 663 | |
| 664 | bool setOption(Option O, sptr Value) { return Cache.setOption(O, Value); } |
| 665 | |
| 666 | void releaseToOS() { Cache.releaseToOS(); } |
| 667 | |
| 668 | void disableMemoryTagging() { Cache.disableMemoryTagging(); } |
| 669 | |
| 670 | void unmapTestOnly() { Cache.unmapTestOnly(); } |
| 671 | |
| 672 | void getStats(ScopedString *Str); |
| 673 | |
| 674 | private: |
| 675 | typename Config::template CacheT<typename Config::CacheConfig> Cache; |
| 676 | |
| 677 | mutable HybridMutex Mutex; |
| 678 | DoublyLinkedList<LargeBlock::Header> InUseBlocks GUARDED_BY(Mutex); |
| 679 | uptr AllocatedBytes GUARDED_BY(Mutex) = 0; |
| 680 | uptr FreedBytes GUARDED_BY(Mutex) = 0; |
| 681 | uptr FragmentedBytes GUARDED_BY(Mutex) = 0; |
| 682 | uptr LargestSize GUARDED_BY(Mutex) = 0; |
| 683 | u32 NumberOfAllocs GUARDED_BY(Mutex) = 0; |
| 684 | u32 NumberOfFrees GUARDED_BY(Mutex) = 0; |
| 685 | LocalStats Stats GUARDED_BY(Mutex); |
| 686 | }; |
| 687 | |
| 688 | template <typename Config> |
| 689 | void * |
| 690 | MapAllocator<Config>::tryAllocateFromCache(const Options &Options, uptr Size, |
| 691 | uptr Alignment, uptr *BlockEndPtr, |
| 692 | FillContentsMode FillContents) { |
| 693 | CachedBlock Entry; |
| 694 | uptr ; |
| 695 | uptr MaxAllowedFragmentedPages = MaxUnreleasedCachePages; |
| 696 | |
| 697 | if (LIKELY(!useMemoryTagging<Config>(Options))) { |
| 698 | MaxAllowedFragmentedPages += CachedBlock::MaxReleasedCachePages; |
| 699 | } else { |
| 700 | // TODO: Enable MaxReleasedCachePages may result in pages for an entry being |
| 701 | // partially released and it erases the tag of those pages as well. To |
| 702 | // support this feature for MTE, we need to tag those pages again. |
| 703 | DCHECK_EQ(MaxAllowedFragmentedPages, MaxUnreleasedCachePages); |
| 704 | } |
| 705 | |
| 706 | Entry = Cache.retrieve(MaxAllowedFragmentedPages, Size, Alignment, |
| 707 | getHeadersSize(), EntryHeaderPos); |
| 708 | if (!Entry.isValid()) |
| 709 | return nullptr; |
| 710 | |
| 711 | LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>( |
| 712 | LargeBlock::addHeaderTag<Config>(EntryHeaderPos)); |
| 713 | bool Zeroed = Entry.Time == 0; |
| 714 | if (useMemoryTagging<Config>(Options)) { |
| 715 | uptr NewBlockBegin = reinterpret_cast<uptr>(H + 1); |
| 716 | Entry.MemMap.setMemoryPermission(Addr: Entry.CommitBase, Size: Entry.CommitSize, Flags: 0); |
| 717 | if (Zeroed) { |
| 718 | storeTags(LargeBlock::addHeaderTag<Config>(Entry.CommitBase), |
| 719 | NewBlockBegin); |
| 720 | } else if (Entry.BlockBegin < NewBlockBegin) { |
| 721 | storeTags(Begin: Entry.BlockBegin, End: NewBlockBegin); |
| 722 | } else { |
| 723 | storeTags(Begin: untagPointer(Ptr: NewBlockBegin), End: untagPointer(Ptr: Entry.BlockBegin)); |
| 724 | } |
| 725 | } |
| 726 | |
| 727 | H->CommitBase = Entry.CommitBase; |
| 728 | H->CommitSize = Entry.CommitSize; |
| 729 | H->MemMap = Entry.MemMap; |
| 730 | |
| 731 | const uptr BlockEnd = H->CommitBase + H->CommitSize; |
| 732 | if (BlockEndPtr) |
| 733 | *BlockEndPtr = BlockEnd; |
| 734 | uptr HInt = reinterpret_cast<uptr>(H); |
| 735 | if (allocatorSupportsMemoryTagging<Config>()) |
| 736 | HInt = untagPointer(Ptr: HInt); |
| 737 | const uptr PtrInt = HInt + LargeBlock::getHeaderSize(); |
| 738 | void *Ptr = reinterpret_cast<void *>(PtrInt); |
| 739 | if (FillContents && !Zeroed) |
| 740 | memset(s: Ptr, c: FillContents == ZeroFill ? 0 : PatternFillByte, |
| 741 | n: BlockEnd - PtrInt); |
| 742 | { |
| 743 | ScopedLock L(Mutex); |
| 744 | InUseBlocks.push_back(X: H); |
| 745 | AllocatedBytes += H->CommitSize; |
| 746 | FragmentedBytes += H->MemMap.getCapacity() - H->CommitSize; |
| 747 | NumberOfAllocs++; |
| 748 | Stats.add(I: StatAllocated, V: H->CommitSize); |
| 749 | Stats.add(I: StatMapped, V: H->MemMap.getCapacity()); |
| 750 | } |
| 751 | return Ptr; |
| 752 | } |
| 753 | // As with the Primary, the size passed to this function includes any desired |
| 754 | // alignment, so that the frontend can align the user allocation. The hint |
| 755 | // parameter allows us to unmap spurious memory when dealing with larger |
| 756 | // (greater than a page) alignments on 32-bit platforms. |
| 757 | // Due to the sparsity of address space available on those platforms, requesting |
| 758 | // an allocation from the Secondary with a large alignment would end up wasting |
| 759 | // VA space (even though we are not committing the whole thing), hence the need |
| 760 | // to trim off some of the reserved space. |
| 761 | // For allocations requested with an alignment greater than or equal to a page, |
| 762 | // the committed memory will amount to something close to Size - AlignmentHint |
| 763 | // (pending rounding and headers). |
| 764 | template <typename Config> |
| 765 | void *MapAllocator<Config>::allocate(const Options &Options, uptr Size, |
| 766 | uptr Alignment, uptr *BlockEndPtr, |
| 767 | FillContentsMode FillContents) { |
| 768 | if (Options.get(Opt: OptionBit::AddLargeAllocationSlack)) |
| 769 | Size += 1UL << SCUDO_MIN_ALIGNMENT_LOG; |
| 770 | Alignment = Max(A: Alignment, B: uptr(1U) << SCUDO_MIN_ALIGNMENT_LOG); |
| 771 | const uptr PageSize = getPageSizeCached(); |
| 772 | |
| 773 | // Note that cached blocks may have aligned address already. Thus we simply |
| 774 | // pass the required size (`Size` + `getHeadersSize()`) to do cache look up. |
| 775 | const uptr MinNeededSizeForCache = roundUp(X: Size + getHeadersSize(), Boundary: PageSize); |
| 776 | |
| 777 | if (Alignment < PageSize && Cache.canCache(MinNeededSizeForCache)) { |
| 778 | void *Ptr = tryAllocateFromCache(Options, Size, Alignment, BlockEndPtr, |
| 779 | FillContents); |
| 780 | if (Ptr != nullptr) |
| 781 | return Ptr; |
| 782 | } |
| 783 | |
| 784 | uptr RoundedSize = |
| 785 | roundUp(X: roundUp(X: Size, Boundary: Alignment) + getHeadersSize(), Boundary: PageSize); |
| 786 | if (UNLIKELY(Alignment > PageSize)) |
| 787 | RoundedSize += Alignment - PageSize; |
| 788 | |
| 789 | ReservedMemoryT ReservedMemory; |
| 790 | const uptr MapSize = RoundedSize + 2 * getGuardPageSize(); |
| 791 | if (UNLIKELY(!ReservedMemory.create(/*Addr=*/0U, MapSize, nullptr, |
| 792 | MAP_ALLOWNOMEM))) { |
| 793 | return nullptr; |
| 794 | } |
| 795 | |
| 796 | // Take the entire ownership of reserved region. |
| 797 | MemMapT MemMap = ReservedMemory.dispatch(Addr: ReservedMemory.getBase(), |
| 798 | Size: ReservedMemory.getCapacity()); |
| 799 | uptr MapBase = MemMap.getBase(); |
| 800 | uptr CommitBase = MapBase + getGuardPageSize(); |
| 801 | uptr MapEnd = MapBase + MapSize; |
| 802 | |
| 803 | // In the unlikely event of alignments larger than a page, adjust the amount |
| 804 | // of memory we want to commit, and trim the extra memory. |
| 805 | if (UNLIKELY(Alignment >= PageSize)) { |
| 806 | // For alignments greater than or equal to a page, the user pointer (eg: |
| 807 | // the pointer that is returned by the C or C++ allocation APIs) ends up |
| 808 | // on a page boundary , and our headers will live in the preceding page. |
| 809 | CommitBase = |
| 810 | roundUp(X: MapBase + getGuardPageSize() + 1, Boundary: Alignment) - PageSize; |
| 811 | // We only trim the extra memory on 32-bit platforms: 64-bit platforms |
| 812 | // are less constrained memory wise, and that saves us two syscalls. |
| 813 | if (SCUDO_WORDSIZE == 32U) { |
| 814 | const uptr NewMapBase = CommitBase - getGuardPageSize(); |
| 815 | DCHECK_GE(NewMapBase, MapBase); |
| 816 | if (NewMapBase != MapBase) { |
| 817 | MemMap.unmap(Addr: MapBase, Size: NewMapBase - MapBase); |
| 818 | MapBase = NewMapBase; |
| 819 | } |
| 820 | // CommitBase is past the first guard page, but this computation needs |
| 821 | // to include a page where the header lives. |
| 822 | const uptr NewMapEnd = |
| 823 | CommitBase + PageSize + roundUp(X: Size, Boundary: PageSize) + getGuardPageSize(); |
| 824 | DCHECK_LE(NewMapEnd, MapEnd); |
| 825 | if (NewMapEnd != MapEnd) { |
| 826 | MemMap.unmap(Addr: NewMapEnd, Size: MapEnd - NewMapEnd); |
| 827 | MapEnd = NewMapEnd; |
| 828 | } |
| 829 | } |
| 830 | } |
| 831 | |
| 832 | const uptr CommitSize = MapEnd - getGuardPageSize() - CommitBase; |
| 833 | const uptr AllocPos = roundDown(X: CommitBase + CommitSize - Size, Boundary: Alignment); |
| 834 | if (!mapSecondary<Config>(Options, CommitBase, CommitSize, AllocPos, 0, |
| 835 | MemMap)) { |
| 836 | unmap(MemMap); |
| 837 | return nullptr; |
| 838 | } |
| 839 | const uptr = AllocPos - getHeadersSize(); |
| 840 | // Make sure that the header is not in the guard page or before the base. |
| 841 | DCHECK_GE(HeaderPos, MapBase + getGuardPageSize()); |
| 842 | LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>( |
| 843 | LargeBlock::addHeaderTag<Config>(HeaderPos)); |
| 844 | if (useMemoryTagging<Config>(Options)) |
| 845 | storeTags(LargeBlock::addHeaderTag<Config>(CommitBase), |
| 846 | reinterpret_cast<uptr>(H + 1)); |
| 847 | H->CommitBase = CommitBase; |
| 848 | H->CommitSize = CommitSize; |
| 849 | H->MemMap = MemMap; |
| 850 | if (BlockEndPtr) |
| 851 | *BlockEndPtr = CommitBase + CommitSize; |
| 852 | { |
| 853 | ScopedLock L(Mutex); |
| 854 | InUseBlocks.push_back(X: H); |
| 855 | AllocatedBytes += CommitSize; |
| 856 | FragmentedBytes += H->MemMap.getCapacity() - CommitSize; |
| 857 | if (LargestSize < CommitSize) |
| 858 | LargestSize = CommitSize; |
| 859 | NumberOfAllocs++; |
| 860 | Stats.add(I: StatAllocated, V: CommitSize); |
| 861 | Stats.add(I: StatMapped, V: H->MemMap.getCapacity()); |
| 862 | } |
| 863 | return reinterpret_cast<void *>(HeaderPos + LargeBlock::getHeaderSize()); |
| 864 | } |
| 865 | |
| 866 | template <typename Config> |
| 867 | void MapAllocator<Config>::deallocate(const Options &Options, void *Ptr) |
| 868 | EXCLUDES(Mutex) { |
| 869 | LargeBlock::Header *H = LargeBlock::getHeader<Config>(Ptr); |
| 870 | const uptr CommitSize = H->CommitSize; |
| 871 | { |
| 872 | ScopedLock L(Mutex); |
| 873 | InUseBlocks.remove(X: H); |
| 874 | FreedBytes += CommitSize; |
| 875 | FragmentedBytes -= H->MemMap.getCapacity() - CommitSize; |
| 876 | NumberOfFrees++; |
| 877 | Stats.sub(I: StatAllocated, V: CommitSize); |
| 878 | Stats.sub(I: StatMapped, V: H->MemMap.getCapacity()); |
| 879 | } |
| 880 | |
| 881 | if (Cache.canCache(H->CommitSize)) { |
| 882 | Cache.store(Options, H->CommitBase, H->CommitSize, |
| 883 | reinterpret_cast<uptr>(H + 1), H->MemMap); |
| 884 | } else { |
| 885 | // Note that the `H->MemMap` is stored on the pages managed by itself. Take |
| 886 | // over the ownership before unmap() so that any operation along with |
| 887 | // unmap() won't touch inaccessible pages. |
| 888 | MemMapT MemMap = H->MemMap; |
| 889 | unmap(MemMap); |
| 890 | } |
| 891 | } |
| 892 | |
| 893 | template <typename Config> |
| 894 | void MapAllocator<Config>::getStats(ScopedString *Str) EXCLUDES(Mutex) { |
| 895 | ScopedLock L(Mutex); |
| 896 | Str->append(Format: "Stats: MapAllocator: allocated %u times (%zuK), freed %u times " |
| 897 | "(%zuK), remains %u (%zuK) max %zuM, Fragmented %zuK\n" , |
| 898 | NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees, |
| 899 | FreedBytes >> 10, NumberOfAllocs - NumberOfFrees, |
| 900 | (AllocatedBytes - FreedBytes) >> 10, LargestSize >> 20, |
| 901 | FragmentedBytes >> 10); |
| 902 | Cache.getStats(Str); |
| 903 | } |
| 904 | |
| 905 | } // namespace scudo |
| 906 | |
| 907 | #endif // SCUDO_SECONDARY_H_ |
| 908 | |