1//===-- memprof_stats.cpp ------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of MemProfiler, a memory profiler.
10//
11// Code related to statistics collected by MemProfiler.
12//===----------------------------------------------------------------------===//
13#include "memprof_stats.h"
14#include "memprof_interceptors.h"
15#include "memprof_internal.h"
16#include "memprof_thread.h"
17#include "sanitizer_common/sanitizer_allocator_interface.h"
18#include "sanitizer_common/sanitizer_mutex.h"
19#include "sanitizer_common/sanitizer_stackdepot.h"
20
21namespace __memprof {
22
23MemprofStats::MemprofStats() { Clear(); }
24
25void MemprofStats::Clear() {
26 if (REAL(memset))
27 return (void)REAL(memset)(this, 0, sizeof(MemprofStats));
28 internal_memset(s: this, c: 0, n: sizeof(MemprofStats));
29}
30
31static void PrintMallocStatsArray(const char *prefix,
32 uptr (&array)[kNumberOfSizeClasses]) {
33 Printf(format: "%s", prefix);
34 for (uptr i = 0; i < kNumberOfSizeClasses; i++) {
35 if (!array[i])
36 continue;
37 Printf(format: "%zu:%zu; ", i, array[i]);
38 }
39 Printf(format: "\n");
40}
41
42void MemprofStats::Print() {
43 Printf(format: "Stats: %zuM malloced (%zuM for overhead) by %zu calls\n",
44 malloced >> 20, malloced_overhead >> 20, mallocs);
45 Printf(format: "Stats: %zuM realloced by %zu calls\n", realloced >> 20, reallocs);
46 Printf(format: "Stats: %zuM freed by %zu calls\n", freed >> 20, frees);
47 Printf(format: "Stats: %zuM really freed by %zu calls\n", really_freed >> 20,
48 real_frees);
49 Printf(format: "Stats: %zuM (%zuM-%zuM) mmaped; %zu maps, %zu unmaps\n",
50 (mmaped - munmaped) >> 20, mmaped >> 20, munmaped >> 20, mmaps,
51 munmaps);
52
53 PrintMallocStatsArray(prefix: " mallocs by size class: ", array&: malloced_by_size);
54 Printf(format: "Stats: malloc large: %zu\n", malloc_large);
55}
56
57void MemprofStats::MergeFrom(const MemprofStats *stats) {
58 uptr *dst_ptr = reinterpret_cast<uptr *>(this);
59 const uptr *src_ptr = reinterpret_cast<const uptr *>(stats);
60 uptr num_fields = sizeof(*this) / sizeof(uptr);
61 for (uptr i = 0; i < num_fields; i++)
62 dst_ptr[i] += src_ptr[i];
63}
64
65static Mutex print_lock;
66
67static MemprofStats unknown_thread_stats(LINKER_INITIALIZED);
68static MemprofStats dead_threads_stats(LINKER_INITIALIZED);
69static Mutex dead_threads_stats_lock;
70// Required for malloc_zone_statistics() on OS X. This can't be stored in
71// per-thread MemprofStats.
72static uptr max_malloced_memory;
73
74static void MergeThreadStats(ThreadContextBase *tctx_base, void *arg) {
75 MemprofStats *accumulated_stats = reinterpret_cast<MemprofStats *>(arg);
76 MemprofThreadContext *tctx = static_cast<MemprofThreadContext *>(tctx_base);
77 if (MemprofThread *t = tctx->thread)
78 accumulated_stats->MergeFrom(stats: &t->stats());
79}
80
81static void GetAccumulatedStats(MemprofStats *stats) {
82 stats->Clear();
83 {
84 ThreadRegistryLock l(&memprofThreadRegistry());
85 memprofThreadRegistry().RunCallbackForEachThreadLocked(cb: MergeThreadStats,
86 arg: stats);
87 }
88 stats->MergeFrom(stats: &unknown_thread_stats);
89 {
90 Lock lock(&dead_threads_stats_lock);
91 stats->MergeFrom(stats: &dead_threads_stats);
92 }
93 // This is not very accurate: we may miss allocation peaks that happen
94 // between two updates of accumulated_stats_. For more accurate bookkeeping
95 // the maximum should be updated on every malloc(), which is unacceptable.
96 if (max_malloced_memory < stats->malloced) {
97 max_malloced_memory = stats->malloced;
98 }
99}
100
101void FlushToDeadThreadStats(MemprofStats *stats) {
102 Lock lock(&dead_threads_stats_lock);
103 dead_threads_stats.MergeFrom(stats);
104 stats->Clear();
105}
106
107MemprofStats &GetCurrentThreadStats() {
108 MemprofThread *t = GetCurrentThread();
109 return (t) ? t->stats() : unknown_thread_stats;
110}
111
112static void PrintAccumulatedStats() {
113 MemprofStats stats;
114 GetAccumulatedStats(stats: &stats);
115 // Use lock to keep reports from mixing up.
116 Lock lock(&print_lock);
117 stats.Print();
118 StackDepotStats stack_depot_stats = StackDepotGetStats();
119 Printf(format: "Stats: StackDepot: %zd ids; %zdM allocated\n",
120 stack_depot_stats.n_uniq_ids, stack_depot_stats.allocated >> 20);
121 PrintInternalAllocatorStats();
122}
123
124} // namespace __memprof
125
126// ---------------------- Interface ---------------- {{{1
127using namespace __memprof;
128
129uptr __sanitizer_get_current_allocated_bytes() {
130 MemprofStats stats;
131 GetAccumulatedStats(stats: &stats);
132 uptr malloced = stats.malloced;
133 uptr freed = stats.freed;
134 // Return sane value if malloced < freed due to racy
135 // way we update accumulated stats.
136 return (malloced > freed) ? malloced - freed : 1;
137}
138
139uptr __sanitizer_get_heap_size() {
140 MemprofStats stats;
141 GetAccumulatedStats(stats: &stats);
142 return stats.mmaped - stats.munmaped;
143}
144
145uptr __sanitizer_get_free_bytes() {
146 MemprofStats stats;
147 GetAccumulatedStats(stats: &stats);
148 uptr total_free = stats.mmaped - stats.munmaped + stats.really_freed;
149 uptr total_used = stats.malloced;
150 // Return sane value if total_free < total_used due to racy
151 // way we update accumulated stats.
152 return (total_free > total_used) ? total_free - total_used : 1;
153}
154
155uptr __sanitizer_get_unmapped_bytes() { return 0; }
156
157void __memprof_print_accumulated_stats() { PrintAccumulatedStats(); }
158