| 1 | //===-- sanitizer_allocator_stats.h -----------------------------*- C++ -*-===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | // Part of the Sanitizer Allocator. |
| 10 | // |
| 11 | //===----------------------------------------------------------------------===// |
| 12 | #ifndef SANITIZER_ALLOCATOR_H |
| 13 | #error This file must be included inside sanitizer_allocator.h |
| 14 | #endif |
| 15 | |
| 16 | // Memory allocator statistics |
| 17 | enum AllocatorStat { |
| 18 | AllocatorStatAllocated, |
| 19 | AllocatorStatMapped, |
| 20 | AllocatorStatCount |
| 21 | }; |
| 22 | |
| 23 | typedef uptr AllocatorStatCounters[AllocatorStatCount]; |
| 24 | |
| 25 | // Per-thread stats, live in per-thread cache. |
| 26 | class AllocatorStats { |
| 27 | public: |
| 28 | void Init() { internal_memset(s: this, c: 0, n: sizeof(*this)); } |
| 29 | void Add(AllocatorStat i, uptr v) { |
| 30 | atomic_fetch_add(a: &stats_[i], v, mo: memory_order_relaxed); |
| 31 | } |
| 32 | |
| 33 | void Sub(AllocatorStat i, uptr v) { |
| 34 | atomic_fetch_sub(a: &stats_[i], v, mo: memory_order_relaxed); |
| 35 | } |
| 36 | |
| 37 | void Set(AllocatorStat i, uptr v) { |
| 38 | atomic_store(a: &stats_[i], v, mo: memory_order_relaxed); |
| 39 | } |
| 40 | |
| 41 | uptr Get(AllocatorStat i) const { |
| 42 | return atomic_load(a: &stats_[i], mo: memory_order_relaxed); |
| 43 | } |
| 44 | |
| 45 | private: |
| 46 | friend class AllocatorGlobalStats; |
| 47 | AllocatorStats *next_; |
| 48 | AllocatorStats *prev_; |
| 49 | atomic_uintptr_t stats_[AllocatorStatCount]; |
| 50 | }; |
| 51 | |
| 52 | // Global stats, used for aggregation and querying. |
| 53 | class AllocatorGlobalStats : public AllocatorStats { |
| 54 | public: |
| 55 | void Init() { |
| 56 | internal_memset(s: this, c: 0, n: sizeof(*this)); |
| 57 | } |
| 58 | |
| 59 | void Register(AllocatorStats *s) { |
| 60 | SpinMutexLock l(&mu_); |
| 61 | LazyInit(); |
| 62 | s->next_ = next_; |
| 63 | s->prev_ = this; |
| 64 | next_->prev_ = s; |
| 65 | next_ = s; |
| 66 | } |
| 67 | |
| 68 | void Unregister(AllocatorStats *s) { |
| 69 | SpinMutexLock l(&mu_); |
| 70 | s->prev_->next_ = s->next_; |
| 71 | s->next_->prev_ = s->prev_; |
| 72 | for (int i = 0; i < AllocatorStatCount; i++) |
| 73 | Add(i: AllocatorStat(i), v: s->Get(i: AllocatorStat(i))); |
| 74 | } |
| 75 | |
| 76 | void Get(AllocatorStatCounters s) const { |
| 77 | internal_memset(s, c: 0, n: AllocatorStatCount * sizeof(uptr)); |
| 78 | SpinMutexLock l(&mu_); |
| 79 | const AllocatorStats *stats = this; |
| 80 | for (; stats;) { |
| 81 | for (int i = 0; i < AllocatorStatCount; i++) |
| 82 | s[i] += stats->Get(i: AllocatorStat(i)); |
| 83 | stats = stats->next_; |
| 84 | if (stats == this) |
| 85 | break; |
| 86 | } |
| 87 | // All stats must be non-negative. |
| 88 | for (int i = 0; i < AllocatorStatCount; i++) |
| 89 | s[i] = ((sptr)s[i]) >= 0 ? s[i] : 0; |
| 90 | } |
| 91 | |
| 92 | private: |
| 93 | void LazyInit() { |
| 94 | if (!next_) { |
| 95 | next_ = this; |
| 96 | prev_ = this; |
| 97 | } |
| 98 | } |
| 99 | |
| 100 | mutable StaticSpinMutex mu_; |
| 101 | }; |
| 102 | |
| 103 | |
| 104 | |