| 1 | //===-- secondary.h ---------------------------------------------*- C++ -*-===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | |
| 9 | #ifndef SCUDO_SECONDARY_H_ |
| 10 | #define SCUDO_SECONDARY_H_ |
| 11 | |
| 12 | #ifndef __STDC_FORMAT_MACROS |
| 13 | // Ensure PRId64 macro is available |
| 14 | #define __STDC_FORMAT_MACROS 1 |
| 15 | #endif |
| 16 | #include <inttypes.h> |
| 17 | |
| 18 | #include "chunk.h" |
| 19 | #include "common.h" |
| 20 | #include "list.h" |
| 21 | #include "mem_map.h" |
| 22 | #include "memtag.h" |
| 23 | #include "mutex.h" |
| 24 | #include "options.h" |
| 25 | #include "stats.h" |
| 26 | #include "string_utils.h" |
| 27 | #include "thread_annotations.h" |
| 28 | #include "tracing.h" |
| 29 | #include "vector.h" |
| 30 | |
| 31 | namespace scudo { |
| 32 | |
| 33 | // This allocator wraps the platform allocation primitives, and as such is on |
| 34 | // the slower side and should preferably be used for larger sized allocations. |
| 35 | // Blocks allocated will be preceded and followed by a guard page, and hold |
| 36 | // their own header that is not checksummed: the guard pages and the Combined |
| 37 | // header should be enough for our purpose. |
| 38 | |
| 39 | namespace LargeBlock { |
| 40 | |
| 41 | struct alignas(Max<uptr>(A: archSupportsMemoryTagging() |
| 42 | ? archMemoryTagGranuleSize() |
| 43 | : 1, |
| 44 | B: 1U << SCUDO_MIN_ALIGNMENT_LOG)) { |
| 45 | LargeBlock::Header *; |
| 46 | LargeBlock::Header *; |
| 47 | uptr ; |
| 48 | uptr ; |
| 49 | MemMapT ; |
| 50 | }; |
| 51 | |
| 52 | static_assert(sizeof(Header) % (1U << SCUDO_MIN_ALIGNMENT_LOG) == 0, "" ); |
| 53 | static_assert(!archSupportsMemoryTagging() || |
| 54 | sizeof(Header) % archMemoryTagGranuleSize() == 0, |
| 55 | "" ); |
| 56 | |
| 57 | constexpr uptr () { return sizeof(Header); } |
| 58 | |
| 59 | template <typename Config> static uptr (uptr Ptr) { |
| 60 | if (allocatorSupportsMemoryTagging<Config>()) |
| 61 | return addFixedTag(Ptr, Tag: 1); |
| 62 | return Ptr; |
| 63 | } |
| 64 | |
| 65 | template <typename Config> static Header *(uptr Ptr) { |
| 66 | return reinterpret_cast<Header *>(addHeaderTag<Config>(Ptr)) - 1; |
| 67 | } |
| 68 | |
| 69 | template <typename Config> static Header *(const void *Ptr) { |
| 70 | return getHeader<Config>(reinterpret_cast<uptr>(Ptr)); |
| 71 | } |
| 72 | |
| 73 | } // namespace LargeBlock |
| 74 | |
| 75 | static inline void unmap(MemMapT &MemMap) { MemMap.unmap(); } |
| 76 | |
| 77 | namespace { |
| 78 | |
| 79 | struct CachedBlock { |
| 80 | static constexpr u16 CacheIndexMax = UINT16_MAX; |
| 81 | static constexpr u16 EndOfListVal = CacheIndexMax; |
| 82 | |
| 83 | // We allow a certain amount of fragmentation and part of the fragmented bytes |
| 84 | // will be released by `releaseAndZeroPagesToOS()`. This increases the chance |
| 85 | // of cache hit rate and reduces the overhead to the RSS at the same time. See |
| 86 | // more details in the `MapAllocatorCache::retrieve()` section. |
| 87 | // |
| 88 | // We arrived at this default value after noticing that mapping in larger |
| 89 | // memory regions performs better than releasing memory and forcing a cache |
| 90 | // hit. According to the data, it suggests that beyond 4 pages, the release |
| 91 | // execution time is longer than the map execution time. In this way, |
| 92 | // the default is dependent on the platform. |
| 93 | static constexpr uptr MaxReleasedCachePages = 4U; |
| 94 | |
| 95 | uptr CommitBase = 0; |
| 96 | uptr CommitSize = 0; |
| 97 | uptr BlockBegin = 0; |
| 98 | MemMapT MemMap = {}; |
| 99 | u64 Time = 0; |
| 100 | u16 Next = 0; |
| 101 | u16 Prev = 0; |
| 102 | |
| 103 | enum CacheFlags : u16 { |
| 104 | None = 0, |
| 105 | NoAccess = 0x1, |
| 106 | }; |
| 107 | CacheFlags Flags = CachedBlock::None; |
| 108 | |
| 109 | bool isValid() { return CommitBase != 0; } |
| 110 | |
| 111 | void invalidate() { CommitBase = 0; } |
| 112 | }; |
| 113 | } // namespace |
| 114 | |
| 115 | template <typename Config> class MapAllocatorNoCache { |
| 116 | public: |
| 117 | void init(UNUSED s32 ReleaseToOsInterval) {} |
| 118 | CachedBlock retrieve(UNUSED uptr MaxAllowedFragmentedBytes, UNUSED uptr Size, |
| 119 | UNUSED uptr Alignment, UNUSED uptr , |
| 120 | UNUSED uptr &) { |
| 121 | return {}; |
| 122 | } |
| 123 | void store(UNUSED Options Options, UNUSED uptr CommitBase, |
| 124 | UNUSED uptr CommitSize, UNUSED uptr BlockBegin, |
| 125 | UNUSED MemMapT MemMap) { |
| 126 | // This should never be called since canCache always returns false. |
| 127 | UNREACHABLE( |
| 128 | "It is not valid to call store on MapAllocatorNoCache objects." ); |
| 129 | } |
| 130 | |
| 131 | bool canCache(UNUSED uptr Size) { return false; } |
| 132 | void disable() {} |
| 133 | void enable() {} |
| 134 | void releaseToOS(ReleaseToOS) {} |
| 135 | void disableMemoryTagging() {} |
| 136 | void unmapTestOnly() {} |
| 137 | bool setOption(Option O, UNUSED sptr Value) { |
| 138 | if (O == Option::ReleaseInterval || O == Option::MaxCacheEntriesCount || |
| 139 | O == Option::MaxCacheEntrySize) |
| 140 | return false; |
| 141 | // Not supported by the Secondary Cache, but not an error either. |
| 142 | return true; |
| 143 | } |
| 144 | |
| 145 | void getStats(UNUSED ScopedString *Str) { |
| 146 | Str->append(Format: "Secondary Cache Disabled\n" ); |
| 147 | } |
| 148 | }; |
| 149 | |
| 150 | static const uptr MaxUnreleasedCachePages = 4U; |
| 151 | |
| 152 | template <typename Config> |
| 153 | bool mapSecondary(const Options &Options, uptr CommitBase, uptr CommitSize, |
| 154 | uptr AllocPos, uptr Flags, MemMapT &MemMap) { |
| 155 | Flags |= MAP_RESIZABLE; |
| 156 | Flags |= MAP_ALLOWNOMEM; |
| 157 | |
| 158 | const uptr PageSize = getPageSizeCached(); |
| 159 | if (SCUDO_TRUSTY) { |
| 160 | /* |
| 161 | * On Trusty we need AllocPos to be usable for shared memory, which cannot |
| 162 | * cross multiple mappings. This means we need to split around AllocPos |
| 163 | * and not over it. We can only do this if the address is page-aligned. |
| 164 | */ |
| 165 | const uptr TaggedSize = AllocPos - CommitBase; |
| 166 | if (useMemoryTagging<Config>(Options) && isAligned(X: TaggedSize, Alignment: PageSize)) { |
| 167 | DCHECK_GT(TaggedSize, 0); |
| 168 | return MemMap.remap(Addr: CommitBase, Size: TaggedSize, Name: "scudo:secondary" , |
| 169 | MAP_MEMTAG | Flags) && |
| 170 | MemMap.remap(Addr: AllocPos, Size: CommitSize - TaggedSize, Name: "scudo:secondary" , |
| 171 | Flags); |
| 172 | } else { |
| 173 | const uptr RemapFlags = |
| 174 | (useMemoryTagging<Config>(Options) ? MAP_MEMTAG : 0) | Flags; |
| 175 | return MemMap.remap(Addr: CommitBase, Size: CommitSize, Name: "scudo:secondary" , |
| 176 | Flags: RemapFlags); |
| 177 | } |
| 178 | } |
| 179 | |
| 180 | const uptr MaxUnreleasedCacheBytes = MaxUnreleasedCachePages * PageSize; |
| 181 | if (useMemoryTagging<Config>(Options) && |
| 182 | CommitSize > MaxUnreleasedCacheBytes) { |
| 183 | const uptr UntaggedPos = |
| 184 | Max(A: AllocPos, B: CommitBase + MaxUnreleasedCacheBytes); |
| 185 | return MemMap.remap(Addr: CommitBase, Size: UntaggedPos - CommitBase, Name: "scudo:secondary" , |
| 186 | MAP_MEMTAG | Flags) && |
| 187 | MemMap.remap(Addr: UntaggedPos, Size: CommitBase + CommitSize - UntaggedPos, |
| 188 | Name: "scudo:secondary" , Flags); |
| 189 | } else { |
| 190 | const uptr RemapFlags = |
| 191 | (useMemoryTagging<Config>(Options) ? MAP_MEMTAG : 0) | Flags; |
| 192 | return MemMap.remap(Addr: CommitBase, Size: CommitSize, Name: "scudo:secondary" , Flags: RemapFlags); |
| 193 | } |
| 194 | } |
| 195 | |
| 196 | // Template specialization to avoid producing zero-length array |
| 197 | template <typename T, size_t Size> class NonZeroLengthArray { |
| 198 | public: |
| 199 | T &operator[](uptr Idx) { return values[Idx]; } |
| 200 | |
| 201 | private: |
| 202 | T values[Size]; |
| 203 | }; |
| 204 | template <typename T> class NonZeroLengthArray<T, 0> { |
| 205 | public: |
| 206 | T &operator[](uptr UNUSED Idx) { UNREACHABLE("Unsupported!" ); } |
| 207 | }; |
| 208 | |
| 209 | // The default unmap callback is simply scudo::unmap. |
| 210 | // In testing, a different unmap callback is used to |
| 211 | // record information about unmaps in the cache |
| 212 | template <typename Config, void (*unmapCallBack)(MemMapT &) = unmap> |
| 213 | class MapAllocatorCache { |
| 214 | public: |
| 215 | void getStats(ScopedString *Str) { |
| 216 | ScopedLock L(Mutex); |
| 217 | uptr Integral; |
| 218 | uptr Fractional; |
| 219 | computePercentage(Numerator: SuccessfulRetrieves, Denominator: CallsToRetrieve, Integral: &Integral, |
| 220 | Fractional: &Fractional); |
| 221 | const s32 Interval = atomic_load_relaxed(A: &ReleaseToOsIntervalMs); |
| 222 | Str->append(Format: "Stats: MapAllocatorCache: EntriesCount: %zu, " |
| 223 | "MaxEntriesCount: %u, MaxEntrySize: %zu, ReleaseToOsSkips: " |
| 224 | "%zu, ReleaseToOsIntervalMs = %d\n" , |
| 225 | LRUEntries.size(), atomic_load_relaxed(A: &MaxEntriesCount), |
| 226 | atomic_load_relaxed(A: &MaxEntrySize), |
| 227 | atomic_load_relaxed(A: &ReleaseToOsSkips), |
| 228 | Interval >= 0 ? Interval : -1); |
| 229 | Str->append(Format: "Stats: CacheRetrievalStats: SuccessRate: %u/%u " |
| 230 | "(%zu.%02zu%%)\n" , |
| 231 | SuccessfulRetrieves, CallsToRetrieve, Integral, Fractional); |
| 232 | Str->append(Format: "Cache Entry Info (Most Recent -> Least Recent):\n" ); |
| 233 | |
| 234 | for (CachedBlock &Entry : LRUEntries) { |
| 235 | Str->append(Format: " StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, " |
| 236 | "BlockSize: %zu%s" , |
| 237 | Entry.CommitBase, Entry.CommitBase + Entry.CommitSize, |
| 238 | Entry.CommitSize, Entry.Time == 0 ? " [R]" : "" ); |
| 239 | #if SCUDO_LINUX |
| 240 | // getResidentPages only works on linux systems currently. |
| 241 | Str->append(Format: ", Resident Pages: %" PRId64 "/%zu\n" , |
| 242 | getResidentPages(BaseAddress: Entry.CommitBase, Size: Entry.CommitSize), |
| 243 | Entry.CommitSize / getPageSizeCached()); |
| 244 | #else |
| 245 | Str->append("\n" ); |
| 246 | #endif |
| 247 | } |
| 248 | } |
| 249 | |
| 250 | // Ensure the default maximum specified fits the array. |
| 251 | static_assert(Config::getDefaultMaxEntriesCount() <= |
| 252 | Config::getEntriesArraySize(), |
| 253 | "" ); |
| 254 | // Ensure the cache entry array size fits in the LRU list Next and Prev |
| 255 | // index fields |
| 256 | static_assert(Config::getEntriesArraySize() <= CachedBlock::CacheIndexMax, |
| 257 | "Cache entry array is too large to be indexed." ); |
| 258 | |
| 259 | void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS { |
| 260 | DCHECK_EQ(LRUEntries.size(), 0U); |
| 261 | setOption(O: Option::MaxCacheEntriesCount, |
| 262 | Value: static_cast<sptr>(Config::getDefaultMaxEntriesCount())); |
| 263 | setOption(O: Option::MaxCacheEntrySize, |
| 264 | Value: static_cast<sptr>(Config::getDefaultMaxEntrySize())); |
| 265 | // The default value in the cache config has the higher priority. |
| 266 | if (Config::getDefaultReleaseToOsIntervalMs() != INT32_MIN) |
| 267 | ReleaseToOsInterval = Config::getDefaultReleaseToOsIntervalMs(); |
| 268 | setOption(O: Option::ReleaseInterval, Value: static_cast<sptr>(ReleaseToOsInterval)); |
| 269 | |
| 270 | LRUEntries.clear(); |
| 271 | LRUEntries.init(Base: Entries, BaseSize: sizeof(Entries)); |
| 272 | OldestPresentEntry = nullptr; |
| 273 | |
| 274 | AvailEntries.clear(); |
| 275 | AvailEntries.init(Base: Entries, BaseSize: sizeof(Entries)); |
| 276 | for (u32 I = 0; I < Config::getEntriesArraySize(); I++) |
| 277 | AvailEntries.push_back(X: &Entries[I]); |
| 278 | } |
| 279 | |
| 280 | void store(const Options &Options, uptr CommitBase, uptr CommitSize, |
| 281 | uptr BlockBegin, MemMapT MemMap) EXCLUDES(Mutex) { |
| 282 | DCHECK(canCache(CommitSize)); |
| 283 | |
| 284 | const s32 Interval = atomic_load_relaxed(A: &ReleaseToOsIntervalMs); |
| 285 | u64 Time; |
| 286 | CachedBlock Entry; |
| 287 | |
| 288 | Entry.CommitBase = CommitBase; |
| 289 | Entry.CommitSize = CommitSize; |
| 290 | Entry.BlockBegin = BlockBegin; |
| 291 | Entry.MemMap = MemMap; |
| 292 | Entry.Time = UINT64_MAX; |
| 293 | Entry.Flags = CachedBlock::None; |
| 294 | |
| 295 | bool MemoryTaggingEnabled = useMemoryTagging<Config>(Options); |
| 296 | if (MemoryTaggingEnabled) { |
| 297 | if (Interval == 0 && !SCUDO_FUCHSIA) { |
| 298 | // Release the memory and make it inaccessible at the same time by |
| 299 | // creating a new MAP_NOACCESS mapping on top of the existing mapping. |
| 300 | // Fuchsia does not support replacing mappings by creating a new mapping |
| 301 | // on top so we just do the two syscalls there. |
| 302 | Entry.Time = 0; |
| 303 | mapSecondary<Config>(Options, Entry.CommitBase, Entry.CommitSize, |
| 304 | Entry.CommitBase, MAP_NOACCESS, Entry.MemMap); |
| 305 | } else { |
| 306 | Entry.MemMap.setMemoryPermission(Addr: Entry.CommitBase, Size: Entry.CommitSize, |
| 307 | MAP_NOACCESS); |
| 308 | } |
| 309 | Entry.Flags = CachedBlock::NoAccess; |
| 310 | } |
| 311 | |
| 312 | // Usually only one entry will be evicted from the cache. |
| 313 | // Only in the rare event that the cache shrinks in real-time |
| 314 | // due to a decrease in the configurable value MaxEntriesCount |
| 315 | // will more than one cache entry be evicted. |
| 316 | // The vector is used to save the MemMaps of evicted entries so |
| 317 | // that the unmap call can be performed outside the lock |
| 318 | Vector<MemMapT, 1U> EvictionMemMaps; |
| 319 | |
| 320 | do { |
| 321 | ScopedLock L(Mutex); |
| 322 | |
| 323 | // Time must be computed under the lock to ensure |
| 324 | // that the LRU cache remains sorted with respect to |
| 325 | // time in a multithreaded environment |
| 326 | Time = getMonotonicTimeFast(); |
| 327 | if (Entry.Time != 0) |
| 328 | Entry.Time = Time; |
| 329 | |
| 330 | if (MemoryTaggingEnabled && !useMemoryTagging<Config>(Options)) { |
| 331 | // If we get here then memory tagging was disabled in between when we |
| 332 | // read Options and when we locked Mutex. We can't insert our entry into |
| 333 | // the quarantine or the cache because the permissions would be wrong so |
| 334 | // just unmap it. |
| 335 | unmapCallBack(Entry.MemMap); |
| 336 | break; |
| 337 | } |
| 338 | |
| 339 | if (!Config::getQuarantineDisabled() && Config::getQuarantineSize()) { |
| 340 | QuarantinePos = |
| 341 | (QuarantinePos + 1) % Max(Config::getQuarantineSize(), 1u); |
| 342 | if (!Quarantine[QuarantinePos].isValid()) { |
| 343 | Quarantine[QuarantinePos] = Entry; |
| 344 | return; |
| 345 | } |
| 346 | CachedBlock PrevEntry = Quarantine[QuarantinePos]; |
| 347 | Quarantine[QuarantinePos] = Entry; |
| 348 | Entry = PrevEntry; |
| 349 | } |
| 350 | |
| 351 | // All excess entries are evicted from the cache. Note that when |
| 352 | // `MaxEntriesCount` is zero, cache storing shouldn't happen and it's |
| 353 | // guarded by the `DCHECK(canCache(CommitSize))` above. As a result, we |
| 354 | // won't try to pop `LRUEntries` when it's empty. |
| 355 | while (LRUEntries.size() >= atomic_load_relaxed(A: &MaxEntriesCount)) { |
| 356 | // Save MemMaps of evicted entries to perform unmap outside of lock |
| 357 | CachedBlock *Entry = LRUEntries.back(); |
| 358 | EvictionMemMaps.push_back(Element: Entry->MemMap); |
| 359 | remove(Entry); |
| 360 | } |
| 361 | |
| 362 | insert(Entry); |
| 363 | } while (0); |
| 364 | |
| 365 | for (MemMapT &EvictMemMap : EvictionMemMaps) |
| 366 | unmapCallBack(EvictMemMap); |
| 367 | |
| 368 | if (Interval >= 0) { |
| 369 | // It is very likely that multiple threads trying to do a release at the |
| 370 | // same time will not actually release any extra elements. Therefore, |
| 371 | // let any other thread continue, skipping the release. |
| 372 | if (Mutex.tryLock()) { |
| 373 | SCUDO_SCOPED_TRACE( |
| 374 | GetSecondaryReleaseToOSTraceName(ReleaseToOS::Normal)); |
| 375 | |
| 376 | releaseOlderThan(ReleaseTime: Time - static_cast<u64>(Interval) * 1000000); |
| 377 | Mutex.unlock(); |
| 378 | } else |
| 379 | atomic_fetch_add(A: &ReleaseToOsSkips, V: 1U, MO: memory_order_relaxed); |
| 380 | } |
| 381 | } |
| 382 | |
| 383 | CachedBlock retrieve(uptr MaxAllowedFragmentedPages, uptr Size, |
| 384 | uptr Alignment, uptr , uptr &) |
| 385 | EXCLUDES(Mutex) { |
| 386 | const uptr PageSize = getPageSizeCached(); |
| 387 | // 10% of the requested size proved to be the optimal choice for |
| 388 | // retrieving cached blocks after testing several options. |
| 389 | constexpr u32 FragmentedBytesDivisor = 10; |
| 390 | CachedBlock Entry; |
| 391 | EntryHeaderPos = 0; |
| 392 | { |
| 393 | ScopedLock L(Mutex); |
| 394 | CallsToRetrieve++; |
| 395 | if (LRUEntries.size() == 0) |
| 396 | return {}; |
| 397 | CachedBlock *RetrievedEntry = nullptr; |
| 398 | uptr MinDiff = UINTPTR_MAX; |
| 399 | |
| 400 | // Since allocation sizes don't always match cached memory chunk sizes |
| 401 | // we allow some memory to be unused (called fragmented bytes). The |
| 402 | // amount of unused bytes is exactly EntryHeaderPos - CommitBase. |
| 403 | // |
| 404 | // CommitBase CommitBase + CommitSize |
| 405 | // V V |
| 406 | // +---+------------+-----------------+---+ |
| 407 | // | | | | | |
| 408 | // +---+------------+-----------------+---+ |
| 409 | // ^ ^ ^ |
| 410 | // Guard EntryHeaderPos Guard-page-end |
| 411 | // page-begin |
| 412 | // |
| 413 | // [EntryHeaderPos, CommitBase + CommitSize) contains the user data as |
| 414 | // well as the header metadata. If EntryHeaderPos - CommitBase exceeds |
| 415 | // MaxAllowedFragmentedPages * PageSize, the cached memory chunk is |
| 416 | // not considered valid for retrieval. |
| 417 | for (CachedBlock &Entry : LRUEntries) { |
| 418 | const uptr CommitBase = Entry.CommitBase; |
| 419 | const uptr CommitSize = Entry.CommitSize; |
| 420 | const uptr AllocPos = |
| 421 | roundDown(X: CommitBase + CommitSize - Size, Boundary: Alignment); |
| 422 | const uptr = AllocPos - HeadersSize; |
| 423 | const uptr MaxAllowedFragmentedBytes = |
| 424 | MaxAllowedFragmentedPages * PageSize; |
| 425 | if (HeaderPos > CommitBase + CommitSize) |
| 426 | continue; |
| 427 | // TODO: Remove AllocPos > CommitBase + MaxAllowedFragmentedBytes |
| 428 | // and replace with Diff > MaxAllowedFragmentedBytes |
| 429 | if (HeaderPos < CommitBase || |
| 430 | AllocPos > CommitBase + MaxAllowedFragmentedBytes) { |
| 431 | continue; |
| 432 | } |
| 433 | |
| 434 | const uptr Diff = roundDown(X: HeaderPos, Boundary: PageSize) - CommitBase; |
| 435 | |
| 436 | // Keep track of the smallest cached block |
| 437 | // that is greater than (AllocSize + HeaderSize) |
| 438 | if (Diff >= MinDiff) |
| 439 | continue; |
| 440 | |
| 441 | MinDiff = Diff; |
| 442 | RetrievedEntry = &Entry; |
| 443 | EntryHeaderPos = HeaderPos; |
| 444 | |
| 445 | // Immediately use a cached block if its size is close enough to the |
| 446 | // requested size |
| 447 | const uptr OptimalFitThesholdBytes = |
| 448 | (CommitBase + CommitSize - HeaderPos) / FragmentedBytesDivisor; |
| 449 | if (Diff <= OptimalFitThesholdBytes) |
| 450 | break; |
| 451 | } |
| 452 | |
| 453 | if (RetrievedEntry != nullptr) { |
| 454 | Entry = *RetrievedEntry; |
| 455 | remove(Entry: RetrievedEntry); |
| 456 | SuccessfulRetrieves++; |
| 457 | } |
| 458 | } |
| 459 | |
| 460 | // The difference between the retrieved memory chunk and the request |
| 461 | // size is at most MaxAllowedFragmentedPages |
| 462 | // |
| 463 | // +- MaxAllowedFragmentedPages * PageSize -+ |
| 464 | // +--------------------------+-------------+ |
| 465 | // | | | |
| 466 | // +--------------------------+-------------+ |
| 467 | // \ Bytes to be released / ^ |
| 468 | // | |
| 469 | // (may or may not be committed) |
| 470 | // |
| 471 | // The maximum number of bytes released to the OS is capped by |
| 472 | // MaxReleasedCachePages |
| 473 | // |
| 474 | // TODO : Consider making MaxReleasedCachePages configurable since |
| 475 | // the release to OS API can vary across systems. |
| 476 | if (Entry.Time != 0) { |
| 477 | const uptr FragmentedBytes = |
| 478 | roundDown(X: EntryHeaderPos, Boundary: PageSize) - Entry.CommitBase; |
| 479 | const uptr MaxUnreleasedCacheBytes = MaxUnreleasedCachePages * PageSize; |
| 480 | if (FragmentedBytes > MaxUnreleasedCacheBytes) { |
| 481 | const uptr MaxReleasedCacheBytes = |
| 482 | CachedBlock::MaxReleasedCachePages * PageSize; |
| 483 | uptr BytesToRelease = |
| 484 | roundUp(X: Min<uptr>(A: MaxReleasedCacheBytes, |
| 485 | B: FragmentedBytes - MaxUnreleasedCacheBytes), |
| 486 | Boundary: PageSize); |
| 487 | Entry.MemMap.releaseAndZeroPagesToOS(From: Entry.CommitBase, Size: BytesToRelease); |
| 488 | } |
| 489 | } |
| 490 | |
| 491 | return Entry; |
| 492 | } |
| 493 | |
| 494 | bool canCache(uptr Size) { |
| 495 | return atomic_load_relaxed(A: &MaxEntriesCount) != 0U && |
| 496 | Size <= atomic_load_relaxed(A: &MaxEntrySize); |
| 497 | } |
| 498 | |
| 499 | bool setOption(Option O, sptr Value) { |
| 500 | if (O == Option::ReleaseInterval) { |
| 501 | const s32 Interval = Max( |
| 502 | Min(static_cast<s32>(Value), Config::getMaxReleaseToOsIntervalMs()), |
| 503 | Config::getMinReleaseToOsIntervalMs()); |
| 504 | atomic_store_relaxed(A: &ReleaseToOsIntervalMs, V: Interval); |
| 505 | return true; |
| 506 | } |
| 507 | if (O == Option::MaxCacheEntriesCount) { |
| 508 | if (Value < 0) |
| 509 | return false; |
| 510 | atomic_store_relaxed( |
| 511 | &MaxEntriesCount, |
| 512 | Min<u32>(static_cast<u32>(Value), Config::getEntriesArraySize())); |
| 513 | return true; |
| 514 | } |
| 515 | if (O == Option::MaxCacheEntrySize) { |
| 516 | atomic_store_relaxed(A: &MaxEntrySize, V: static_cast<uptr>(Value)); |
| 517 | return true; |
| 518 | } |
| 519 | // Not supported by the Secondary Cache, but not an error either. |
| 520 | return true; |
| 521 | } |
| 522 | |
| 523 | void releaseToOS([[maybe_unused]] ReleaseToOS ReleaseType) EXCLUDES(Mutex) { |
| 524 | SCUDO_SCOPED_TRACE(GetSecondaryReleaseToOSTraceName(ReleaseType)); |
| 525 | |
| 526 | // Since this is a request to release everything, always wait for the |
| 527 | // lock so that we guarantee all entries are released after this call. |
| 528 | ScopedLock L(Mutex); |
| 529 | releaseOlderThan(UINT64_MAX); |
| 530 | } |
| 531 | |
| 532 | void disableMemoryTagging() EXCLUDES(Mutex) { |
| 533 | if (Config::getQuarantineDisabled()) |
| 534 | return; |
| 535 | |
| 536 | ScopedLock L(Mutex); |
| 537 | for (u32 I = 0; I != Config::getQuarantineSize(); ++I) { |
| 538 | if (Quarantine[I].isValid()) { |
| 539 | MemMapT &MemMap = Quarantine[I].MemMap; |
| 540 | unmapCallBack(MemMap); |
| 541 | Quarantine[I].invalidate(); |
| 542 | } |
| 543 | } |
| 544 | QuarantinePos = -1U; |
| 545 | } |
| 546 | |
| 547 | void disable() NO_THREAD_SAFETY_ANALYSIS { Mutex.lock(); } |
| 548 | |
| 549 | void enable() NO_THREAD_SAFETY_ANALYSIS { Mutex.unlock(); } |
| 550 | |
| 551 | void unmapTestOnly() { empty(); } |
| 552 | |
| 553 | void releaseOlderThanTestOnly(u64 ReleaseTime) { |
| 554 | ScopedLock L(Mutex); |
| 555 | releaseOlderThan(ReleaseTime); |
| 556 | } |
| 557 | |
| 558 | private: |
| 559 | void insert(const CachedBlock &Entry) REQUIRES(Mutex) { |
| 560 | CachedBlock *AvailEntry = AvailEntries.front(); |
| 561 | AvailEntries.pop_front(); |
| 562 | |
| 563 | *AvailEntry = Entry; |
| 564 | LRUEntries.push_front(X: AvailEntry); |
| 565 | if (OldestPresentEntry == nullptr && AvailEntry->Time != 0) |
| 566 | OldestPresentEntry = AvailEntry; |
| 567 | } |
| 568 | |
| 569 | void remove(CachedBlock *Entry) REQUIRES(Mutex) { |
| 570 | DCHECK(Entry->isValid()); |
| 571 | if (OldestPresentEntry == Entry) { |
| 572 | OldestPresentEntry = LRUEntries.getPrev(X: Entry); |
| 573 | DCHECK(OldestPresentEntry == nullptr || OldestPresentEntry->Time != 0); |
| 574 | } |
| 575 | LRUEntries.remove(X: Entry); |
| 576 | Entry->invalidate(); |
| 577 | AvailEntries.push_front(X: Entry); |
| 578 | } |
| 579 | |
| 580 | void empty() { |
| 581 | MemMapT MapInfo[Config::getEntriesArraySize()]; |
| 582 | uptr N = 0; |
| 583 | { |
| 584 | ScopedLock L(Mutex); |
| 585 | |
| 586 | for (CachedBlock &Entry : LRUEntries) |
| 587 | MapInfo[N++] = Entry.MemMap; |
| 588 | LRUEntries.clear(); |
| 589 | OldestPresentEntry = nullptr; |
| 590 | } |
| 591 | for (uptr I = 0; I < N; I++) { |
| 592 | MemMapT &MemMap = MapInfo[I]; |
| 593 | unmapCallBack(MemMap); |
| 594 | } |
| 595 | } |
| 596 | |
| 597 | void releaseOlderThan(u64 ReleaseTime) REQUIRES(Mutex) { |
| 598 | SCUDO_SCOPED_TRACE(GetSecondaryReleaseOlderThanTraceName()); |
| 599 | |
| 600 | if (!Config::getQuarantineDisabled()) { |
| 601 | for (uptr I = 0; I < Config::getQuarantineSize(); I++) { |
| 602 | auto &Entry = Quarantine[I]; |
| 603 | if (!Entry.isValid() || Entry.Time == 0 || Entry.Time > ReleaseTime) |
| 604 | continue; |
| 605 | Entry.MemMap.releaseAndZeroPagesToOS(Entry.CommitBase, |
| 606 | Entry.CommitSize); |
| 607 | Entry.Time = 0; |
| 608 | } |
| 609 | } |
| 610 | |
| 611 | for (CachedBlock *Entry = OldestPresentEntry; Entry != nullptr; |
| 612 | Entry = LRUEntries.getPrev(X: Entry)) { |
| 613 | DCHECK(Entry->isValid()); |
| 614 | DCHECK(Entry->Time != 0); |
| 615 | |
| 616 | if (Entry->Time > ReleaseTime) { |
| 617 | // All entries are newer than this, so no need to keep scanning. |
| 618 | OldestPresentEntry = Entry; |
| 619 | return; |
| 620 | } |
| 621 | |
| 622 | Entry->MemMap.releaseAndZeroPagesToOS(From: Entry->CommitBase, |
| 623 | Size: Entry->CommitSize); |
| 624 | Entry->Time = 0; |
| 625 | } |
| 626 | OldestPresentEntry = nullptr; |
| 627 | } |
| 628 | |
| 629 | HybridMutex Mutex; |
| 630 | u32 QuarantinePos GUARDED_BY(Mutex) = 0; |
| 631 | atomic_u32 MaxEntriesCount = {}; |
| 632 | atomic_uptr MaxEntrySize = {}; |
| 633 | atomic_s32 ReleaseToOsIntervalMs = {}; |
| 634 | u32 CallsToRetrieve GUARDED_BY(Mutex) = 0; |
| 635 | u32 SuccessfulRetrieves GUARDED_BY(Mutex) = 0; |
| 636 | atomic_uptr ReleaseToOsSkips = {}; |
| 637 | |
| 638 | CachedBlock Entries[Config::getEntriesArraySize()] GUARDED_BY(Mutex) = {}; |
| 639 | NonZeroLengthArray<CachedBlock, Config::getQuarantineSize()> |
| 640 | Quarantine GUARDED_BY(Mutex) = {}; |
| 641 | |
| 642 | // The oldest entry in the LRUEntries that has Time non-zero. |
| 643 | CachedBlock *OldestPresentEntry GUARDED_BY(Mutex) = nullptr; |
| 644 | // Cached blocks stored in LRU order |
| 645 | DoublyLinkedList<CachedBlock> LRUEntries GUARDED_BY(Mutex); |
| 646 | // The unused Entries |
| 647 | SinglyLinkedList<CachedBlock> AvailEntries GUARDED_BY(Mutex); |
| 648 | }; |
| 649 | |
| 650 | template <typename Config> class MapAllocator { |
| 651 | public: |
| 652 | void init(GlobalStats *S, |
| 653 | s32 ReleaseToOsInterval = -1) NO_THREAD_SAFETY_ANALYSIS { |
| 654 | DCHECK_EQ(AllocatedBytes, 0U); |
| 655 | DCHECK_EQ(FreedBytes, 0U); |
| 656 | Cache.init(ReleaseToOsInterval); |
| 657 | Stats.init(); |
| 658 | if (LIKELY(S)) |
| 659 | S->link(S: &Stats); |
| 660 | } |
| 661 | |
| 662 | void *allocate(const Options &Options, uptr Size, uptr AlignmentHint = 0, |
| 663 | uptr *BlockEnd = nullptr, |
| 664 | FillContentsMode FillContents = NoFill); |
| 665 | |
| 666 | void deallocate(const Options &Options, void *Ptr); |
| 667 | |
| 668 | void *tryAllocateFromCache(const Options &Options, uptr Size, uptr Alignment, |
| 669 | uptr *BlockEndPtr, FillContentsMode FillContents); |
| 670 | |
| 671 | static uptr getBlockEnd(void *Ptr) { |
| 672 | auto *B = LargeBlock::getHeader<Config>(Ptr); |
| 673 | return B->CommitBase + B->CommitSize; |
| 674 | } |
| 675 | |
| 676 | static uptr getBlockSize(void *Ptr) { |
| 677 | return getBlockEnd(Ptr) - reinterpret_cast<uptr>(Ptr); |
| 678 | } |
| 679 | |
| 680 | static uptr getGuardPageSize() { |
| 681 | if (Config::getEnableGuardPages()) |
| 682 | return getPageSizeCached(); |
| 683 | return 0U; |
| 684 | } |
| 685 | |
| 686 | static constexpr uptr () { |
| 687 | return Chunk::getHeaderSize() + LargeBlock::getHeaderSize(); |
| 688 | } |
| 689 | |
| 690 | void disable() NO_THREAD_SAFETY_ANALYSIS { |
| 691 | Mutex.lock(); |
| 692 | Cache.disable(); |
| 693 | } |
| 694 | |
| 695 | void enable() NO_THREAD_SAFETY_ANALYSIS { |
| 696 | Cache.enable(); |
| 697 | Mutex.unlock(); |
| 698 | } |
| 699 | |
| 700 | template <typename F> void iterateOverBlocks(F Callback) const { |
| 701 | Mutex.assertHeld(); |
| 702 | |
| 703 | for (const auto &H : InUseBlocks) { |
| 704 | uptr Ptr = reinterpret_cast<uptr>(&H) + LargeBlock::getHeaderSize(); |
| 705 | if (allocatorSupportsMemoryTagging<Config>()) |
| 706 | Ptr = untagPointer(Ptr); |
| 707 | Callback(Ptr); |
| 708 | } |
| 709 | } |
| 710 | |
| 711 | bool canCache(uptr Size) { return Cache.canCache(Size); } |
| 712 | |
| 713 | bool setOption(Option O, sptr Value) { return Cache.setOption(O, Value); } |
| 714 | |
| 715 | void releaseToOS(ReleaseToOS ReleaseType) { Cache.releaseToOS(ReleaseType); } |
| 716 | |
| 717 | void disableMemoryTagging() { Cache.disableMemoryTagging(); } |
| 718 | |
| 719 | void unmapTestOnly() { Cache.unmapTestOnly(); } |
| 720 | |
| 721 | void getStats(ScopedString *Str); |
| 722 | |
| 723 | private: |
| 724 | typename Config::template CacheT<typename Config::CacheConfig> Cache; |
| 725 | |
| 726 | mutable HybridMutex Mutex; |
| 727 | DoublyLinkedList<LargeBlock::Header> InUseBlocks GUARDED_BY(Mutex); |
| 728 | uptr AllocatedBytes GUARDED_BY(Mutex) = 0; |
| 729 | uptr FreedBytes GUARDED_BY(Mutex) = 0; |
| 730 | uptr FragmentedBytes GUARDED_BY(Mutex) = 0; |
| 731 | uptr LargestSize GUARDED_BY(Mutex) = 0; |
| 732 | u32 NumberOfAllocs GUARDED_BY(Mutex) = 0; |
| 733 | u32 NumberOfFrees GUARDED_BY(Mutex) = 0; |
| 734 | LocalStats Stats GUARDED_BY(Mutex); |
| 735 | }; |
| 736 | |
| 737 | template <typename Config> |
| 738 | void * |
| 739 | MapAllocator<Config>::tryAllocateFromCache(const Options &Options, uptr Size, |
| 740 | uptr Alignment, uptr *BlockEndPtr, |
| 741 | FillContentsMode FillContents) { |
| 742 | CachedBlock Entry; |
| 743 | uptr ; |
| 744 | uptr MaxAllowedFragmentedPages = MaxUnreleasedCachePages; |
| 745 | |
| 746 | if (LIKELY(!useMemoryTagging<Config>(Options))) { |
| 747 | MaxAllowedFragmentedPages += CachedBlock::MaxReleasedCachePages; |
| 748 | } else { |
| 749 | // TODO: Enable MaxReleasedCachePages may result in pages for an entry being |
| 750 | // partially released and it erases the tag of those pages as well. To |
| 751 | // support this feature for MTE, we need to tag those pages again. |
| 752 | DCHECK_EQ(MaxAllowedFragmentedPages, MaxUnreleasedCachePages); |
| 753 | } |
| 754 | |
| 755 | Entry = Cache.retrieve(MaxAllowedFragmentedPages, Size, Alignment, |
| 756 | getHeadersSize(), EntryHeaderPos); |
| 757 | if (!Entry.isValid()) |
| 758 | return nullptr; |
| 759 | |
| 760 | LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>( |
| 761 | LargeBlock::addHeaderTag<Config>(EntryHeaderPos)); |
| 762 | bool Zeroed = Entry.Time == 0; |
| 763 | |
| 764 | if (UNLIKELY(Entry.Flags & CachedBlock::NoAccess)) { |
| 765 | // NOTE: Flags set to 0 actually restores read-write. |
| 766 | Entry.MemMap.setMemoryPermission(Addr: Entry.CommitBase, Size: Entry.CommitSize, |
| 767 | /*Flags=*/Flags: 0); |
| 768 | } |
| 769 | |
| 770 | if (useMemoryTagging<Config>(Options)) { |
| 771 | uptr NewBlockBegin = reinterpret_cast<uptr>(H + 1); |
| 772 | if (Zeroed) { |
| 773 | storeTags(LargeBlock::addHeaderTag<Config>(Entry.CommitBase), |
| 774 | NewBlockBegin); |
| 775 | } else if (Entry.BlockBegin < NewBlockBegin) { |
| 776 | storeTags(Begin: Entry.BlockBegin, End: NewBlockBegin); |
| 777 | } else { |
| 778 | storeTags(Begin: untagPointer(Ptr: NewBlockBegin), End: untagPointer(Ptr: Entry.BlockBegin)); |
| 779 | } |
| 780 | } |
| 781 | |
| 782 | H->CommitBase = Entry.CommitBase; |
| 783 | H->CommitSize = Entry.CommitSize; |
| 784 | H->MemMap = Entry.MemMap; |
| 785 | |
| 786 | const uptr BlockEnd = H->CommitBase + H->CommitSize; |
| 787 | if (BlockEndPtr) |
| 788 | *BlockEndPtr = BlockEnd; |
| 789 | uptr HInt = reinterpret_cast<uptr>(H); |
| 790 | if (allocatorSupportsMemoryTagging<Config>()) |
| 791 | HInt = untagPointer(Ptr: HInt); |
| 792 | const uptr PtrInt = HInt + LargeBlock::getHeaderSize(); |
| 793 | void *Ptr = reinterpret_cast<void *>(PtrInt); |
| 794 | if (FillContents && !Zeroed) |
| 795 | memset(s: Ptr, c: FillContents == ZeroFill ? 0 : PatternFillByte, |
| 796 | n: BlockEnd - PtrInt); |
| 797 | { |
| 798 | ScopedLock L(Mutex); |
| 799 | InUseBlocks.push_back(X: H); |
| 800 | AllocatedBytes += H->CommitSize; |
| 801 | FragmentedBytes += H->MemMap.getCapacity() - H->CommitSize; |
| 802 | NumberOfAllocs++; |
| 803 | Stats.add(I: StatAllocated, V: H->CommitSize); |
| 804 | Stats.add(I: StatMapped, V: H->MemMap.getCapacity()); |
| 805 | } |
| 806 | return Ptr; |
| 807 | } |
| 808 | // As with the Primary, the size passed to this function includes any desired |
| 809 | // alignment, so that the frontend can align the user allocation. The hint |
| 810 | // parameter allows us to unmap spurious memory when dealing with larger |
| 811 | // (greater than a page) alignments on 32-bit platforms. |
| 812 | // Due to the sparsity of address space available on those platforms, requesting |
| 813 | // an allocation from the Secondary with a large alignment would end up wasting |
| 814 | // VA space (even though we are not committing the whole thing), hence the need |
| 815 | // to trim off some of the reserved space. |
| 816 | // For allocations requested with an alignment greater than or equal to a page, |
| 817 | // the committed memory will amount to something close to Size - AlignmentHint |
| 818 | // (pending rounding and headers). |
| 819 | template <typename Config> |
| 820 | void *MapAllocator<Config>::allocate(const Options &Options, uptr Size, |
| 821 | uptr Alignment, uptr *BlockEndPtr, |
| 822 | FillContentsMode FillContents) { |
| 823 | if (Options.get(Opt: OptionBit::AddLargeAllocationSlack)) |
| 824 | Size += 1UL << SCUDO_MIN_ALIGNMENT_LOG; |
| 825 | Alignment = Max(A: Alignment, B: uptr(1U) << SCUDO_MIN_ALIGNMENT_LOG); |
| 826 | const uptr PageSize = getPageSizeCached(); |
| 827 | |
| 828 | // Note that cached blocks may have aligned address already. Thus we simply |
| 829 | // pass the required size (`Size` + `getHeadersSize()`) to do cache look up. |
| 830 | const uptr MinNeededSizeForCache = roundUp(X: Size + getHeadersSize(), Boundary: PageSize); |
| 831 | |
| 832 | if (Alignment < PageSize && Cache.canCache(MinNeededSizeForCache)) { |
| 833 | void *Ptr = tryAllocateFromCache(Options, Size, Alignment, BlockEndPtr, |
| 834 | FillContents); |
| 835 | if (Ptr != nullptr) |
| 836 | return Ptr; |
| 837 | } |
| 838 | |
| 839 | uptr RoundedSize = |
| 840 | roundUp(X: roundUp(X: Size, Boundary: Alignment) + getHeadersSize(), Boundary: PageSize); |
| 841 | if (UNLIKELY(Alignment > PageSize)) |
| 842 | RoundedSize += Alignment - PageSize; |
| 843 | |
| 844 | ReservedMemoryT ReservedMemory; |
| 845 | const uptr MapSize = RoundedSize + 2 * getGuardPageSize(); |
| 846 | if (UNLIKELY(!ReservedMemory.create(/*Addr=*/0U, MapSize, nullptr, |
| 847 | MAP_ALLOWNOMEM))) { |
| 848 | return nullptr; |
| 849 | } |
| 850 | |
| 851 | // Take the entire ownership of reserved region. |
| 852 | MemMapT MemMap = ReservedMemory.dispatch(Addr: ReservedMemory.getBase(), |
| 853 | Size: ReservedMemory.getCapacity()); |
| 854 | uptr MapBase = MemMap.getBase(); |
| 855 | uptr CommitBase = MapBase + getGuardPageSize(); |
| 856 | uptr MapEnd = MapBase + MapSize; |
| 857 | |
| 858 | // In the unlikely event of alignments larger than a page, adjust the amount |
| 859 | // of memory we want to commit, and trim the extra memory. |
| 860 | if (UNLIKELY(Alignment >= PageSize)) { |
| 861 | // For alignments greater than or equal to a page, the user pointer (eg: |
| 862 | // the pointer that is returned by the C or C++ allocation APIs) ends up |
| 863 | // on a page boundary , and our headers will live in the preceding page. |
| 864 | CommitBase = |
| 865 | roundUp(X: MapBase + getGuardPageSize() + 1, Boundary: Alignment) - PageSize; |
| 866 | // We only trim the extra memory on 32-bit platforms: 64-bit platforms |
| 867 | // are less constrained memory wise, and that saves us two syscalls. |
| 868 | if (SCUDO_WORDSIZE == 32U) { |
| 869 | const uptr NewMapBase = CommitBase - getGuardPageSize(); |
| 870 | DCHECK_GE(NewMapBase, MapBase); |
| 871 | if (NewMapBase != MapBase) { |
| 872 | MemMap.unmap(Addr: MapBase, Size: NewMapBase - MapBase); |
| 873 | MapBase = NewMapBase; |
| 874 | } |
| 875 | // CommitBase is past the first guard page, but this computation needs |
| 876 | // to include a page where the header lives. |
| 877 | const uptr NewMapEnd = |
| 878 | CommitBase + PageSize + roundUp(X: Size, Boundary: PageSize) + getGuardPageSize(); |
| 879 | DCHECK_LE(NewMapEnd, MapEnd); |
| 880 | if (NewMapEnd != MapEnd) { |
| 881 | MemMap.unmap(Addr: NewMapEnd, Size: MapEnd - NewMapEnd); |
| 882 | MapEnd = NewMapEnd; |
| 883 | } |
| 884 | } |
| 885 | } |
| 886 | |
| 887 | const uptr CommitSize = MapEnd - getGuardPageSize() - CommitBase; |
| 888 | const uptr AllocPos = roundDown(X: CommitBase + CommitSize - Size, Boundary: Alignment); |
| 889 | if (!mapSecondary<Config>(Options, CommitBase, CommitSize, AllocPos, 0, |
| 890 | MemMap)) { |
| 891 | unmap(MemMap); |
| 892 | return nullptr; |
| 893 | } |
| 894 | const uptr = AllocPos - getHeadersSize(); |
| 895 | // Make sure that the header is not in the guard page or before the base. |
| 896 | DCHECK_GE(HeaderPos, MapBase + getGuardPageSize()); |
| 897 | LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>( |
| 898 | LargeBlock::addHeaderTag<Config>(HeaderPos)); |
| 899 | if (useMemoryTagging<Config>(Options)) |
| 900 | storeTags(LargeBlock::addHeaderTag<Config>(CommitBase), |
| 901 | reinterpret_cast<uptr>(H + 1)); |
| 902 | H->CommitBase = CommitBase; |
| 903 | H->CommitSize = CommitSize; |
| 904 | H->MemMap = MemMap; |
| 905 | if (BlockEndPtr) |
| 906 | *BlockEndPtr = CommitBase + CommitSize; |
| 907 | { |
| 908 | ScopedLock L(Mutex); |
| 909 | InUseBlocks.push_back(X: H); |
| 910 | AllocatedBytes += CommitSize; |
| 911 | FragmentedBytes += H->MemMap.getCapacity() - CommitSize; |
| 912 | if (LargestSize < CommitSize) |
| 913 | LargestSize = CommitSize; |
| 914 | NumberOfAllocs++; |
| 915 | Stats.add(I: StatAllocated, V: CommitSize); |
| 916 | Stats.add(I: StatMapped, V: H->MemMap.getCapacity()); |
| 917 | } |
| 918 | return reinterpret_cast<void *>(HeaderPos + LargeBlock::getHeaderSize()); |
| 919 | } |
| 920 | |
| 921 | template <typename Config> |
| 922 | void MapAllocator<Config>::deallocate(const Options &Options, void *Ptr) |
| 923 | EXCLUDES(Mutex) { |
| 924 | LargeBlock::Header *H = LargeBlock::getHeader<Config>(Ptr); |
| 925 | const uptr CommitSize = H->CommitSize; |
| 926 | { |
| 927 | ScopedLock L(Mutex); |
| 928 | InUseBlocks.remove(X: H); |
| 929 | FreedBytes += CommitSize; |
| 930 | FragmentedBytes -= H->MemMap.getCapacity() - CommitSize; |
| 931 | NumberOfFrees++; |
| 932 | Stats.sub(I: StatAllocated, V: CommitSize); |
| 933 | Stats.sub(I: StatMapped, V: H->MemMap.getCapacity()); |
| 934 | } |
| 935 | |
| 936 | if (Cache.canCache(H->CommitSize)) { |
| 937 | Cache.store(Options, H->CommitBase, H->CommitSize, |
| 938 | reinterpret_cast<uptr>(H + 1), H->MemMap); |
| 939 | } else { |
| 940 | // Note that the `H->MemMap` is stored on the pages managed by itself. Take |
| 941 | // over the ownership before unmap() so that any operation along with |
| 942 | // unmap() won't touch inaccessible pages. |
| 943 | MemMapT MemMap = H->MemMap; |
| 944 | unmap(MemMap); |
| 945 | } |
| 946 | } |
| 947 | |
| 948 | template <typename Config> |
| 949 | void MapAllocator<Config>::getStats(ScopedString *Str) EXCLUDES(Mutex) { |
| 950 | ScopedLock L(Mutex); |
| 951 | Str->append(Format: "Stats: MapAllocator: allocated %u times (%zuK), freed %u times " |
| 952 | "(%zuK), remains %u (%zuK) max %zuM, Fragmented %zuK\n" , |
| 953 | NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees, |
| 954 | FreedBytes >> 10, NumberOfAllocs - NumberOfFrees, |
| 955 | (AllocatedBytes - FreedBytes) >> 10, LargestSize >> 20, |
| 956 | FragmentedBytes >> 10); |
| 957 | Cache.getStats(Str); |
| 958 | } |
| 959 | |
| 960 | } // namespace scudo |
| 961 | |
| 962 | #endif // SCUDO_SECONDARY_H_ |
| 963 | |