| 1 | //===-- memprof_mapping.h --------------------------------------*- C++ -*-===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | // This file is a part of MemProfiler, a memory profiler. |
| 10 | // |
| 11 | // Defines MemProf memory mapping. |
| 12 | //===----------------------------------------------------------------------===// |
| 13 | #ifndef MEMPROF_MAPPING_H |
| 14 | #define MEMPROF_MAPPING_H |
| 15 | |
| 16 | #include "memprof_internal.h" |
| 17 | |
| 18 | static const u64 kDefaultShadowScale = 3; |
| 19 | #define SHADOW_SCALE kDefaultShadowScale |
| 20 | |
| 21 | #define SHADOW_OFFSET __memprof_shadow_memory_dynamic_address |
| 22 | |
| 23 | #define SHADOW_GRANULARITY (1ULL << SHADOW_SCALE) |
| 24 | #define MEMPROF_ALIGNMENT 32 |
| 25 | namespace __memprof { |
| 26 | |
| 27 | extern uptr kHighMemEnd; // Initialized in __memprof_init. |
| 28 | |
| 29 | } // namespace __memprof |
| 30 | |
| 31 | // Size of memory block mapped to a single shadow location |
| 32 | #define MEM_GRANULARITY 64ULL |
| 33 | |
| 34 | #define SHADOW_MASK ~(MEM_GRANULARITY - 1) |
| 35 | |
| 36 | #define MEM_TO_SHADOW(mem) \ |
| 37 | ((((mem) & SHADOW_MASK) >> SHADOW_SCALE) + (SHADOW_OFFSET)) |
| 38 | |
| 39 | // Histogram shadow memory is laid different to the standard configuration: |
| 40 | |
| 41 | // 8 bytes |
| 42 | // +---+---+---+ +---+---+---+ +---+---+---+ |
| 43 | // Memory | a | | b | | c | |
| 44 | // +---+---+---+ +---+---+---+ +---+---+---+ |
| 45 | |
| 46 | // +---+ +---+ +---+ |
| 47 | // Shadow | a | | b | | c | |
| 48 | // +---+ +---+ +---+ |
| 49 | // 1 byte |
| 50 | // |
| 51 | // Where we have a 1 byte counter for each 8 bytes. HISTOGRAM_MEM_TO_SHADOW |
| 52 | // translates a memory address to the address of its corresponding shadow |
| 53 | // counter memory address. The same data is still provided in MIB whether |
| 54 | // histograms are used or not. Total access counts per allocations are |
| 55 | // computed by summing up all individual 1 byte counters. This can incur an |
| 56 | // accuracy penalty. |
| 57 | |
| 58 | #define HISTOGRAM_GRANULARITY 8ULL |
| 59 | |
| 60 | #define HISTOGRAM_MAX_COUNTER 255U |
| 61 | |
| 62 | #define HISTOGRAM_SHADOW_MASK ~(HISTOGRAM_GRANULARITY - 1) |
| 63 | |
| 64 | #define HISTOGRAM_MEM_TO_SHADOW(mem) \ |
| 65 | ((((mem) & HISTOGRAM_SHADOW_MASK) >> SHADOW_SCALE) + (SHADOW_OFFSET)) |
| 66 | |
| 67 | #define SHADOW_ENTRY_SIZE (MEM_GRANULARITY >> SHADOW_SCALE) |
| 68 | |
| 69 | #define kLowMemBeg 0 |
| 70 | #define kLowMemEnd (SHADOW_OFFSET ? SHADOW_OFFSET - 1 : 0) |
| 71 | |
| 72 | #define kLowShadowBeg SHADOW_OFFSET |
| 73 | #define kLowShadowEnd (MEM_TO_SHADOW(kLowMemEnd) + SHADOW_ENTRY_SIZE - 1) |
| 74 | |
| 75 | #define kHighMemBeg (MEM_TO_SHADOW(kHighMemEnd) + 1 + SHADOW_ENTRY_SIZE - 1) |
| 76 | |
| 77 | #define kHighShadowBeg MEM_TO_SHADOW(kHighMemBeg) |
| 78 | #define kHighShadowEnd (MEM_TO_SHADOW(kHighMemEnd) + SHADOW_ENTRY_SIZE - 1) |
| 79 | |
| 80 | // With the zero shadow base we can not actually map pages starting from 0. |
| 81 | // This constant is somewhat arbitrary. |
| 82 | #define kZeroBaseShadowStart 0 |
| 83 | #define kZeroBaseMaxShadowStart (1 << 18) |
| 84 | |
| 85 | #define kShadowGapBeg (kLowShadowEnd ? kLowShadowEnd + 1 : kZeroBaseShadowStart) |
| 86 | #define kShadowGapEnd (kHighShadowBeg - 1) |
| 87 | |
| 88 | namespace __memprof { |
| 89 | |
| 90 | inline uptr MemToShadowSize(uptr size) { return size >> SHADOW_SCALE; } |
| 91 | inline bool AddrIsInLowMem(uptr a) { return a <= kLowMemEnd; } |
| 92 | |
| 93 | inline bool AddrIsInLowShadow(uptr a) { |
| 94 | return a >= kLowShadowBeg && a <= kLowShadowEnd; |
| 95 | } |
| 96 | |
| 97 | inline bool AddrIsInHighMem(uptr a) { |
| 98 | return kHighMemBeg && a >= kHighMemBeg && a <= kHighMemEnd; |
| 99 | } |
| 100 | |
| 101 | inline bool AddrIsInHighShadow(uptr a) { |
| 102 | return kHighMemBeg && a >= kHighShadowBeg && a <= kHighShadowEnd; |
| 103 | } |
| 104 | |
| 105 | inline bool AddrIsInShadowGap(uptr a) { |
| 106 | // In zero-based shadow mode we treat addresses near zero as addresses |
| 107 | // in shadow gap as well. |
| 108 | if (SHADOW_OFFSET == 0) |
| 109 | return a <= kShadowGapEnd; |
| 110 | return a >= kShadowGapBeg && a <= kShadowGapEnd; |
| 111 | } |
| 112 | |
| 113 | inline bool AddrIsInMem(uptr a) { |
| 114 | return AddrIsInLowMem(a) || AddrIsInHighMem(a) || |
| 115 | (flags()->protect_shadow_gap == 0 && AddrIsInShadowGap(a)); |
| 116 | } |
| 117 | |
| 118 | inline uptr MemToShadow(uptr p) { |
| 119 | CHECK(AddrIsInMem(p)); |
| 120 | return MEM_TO_SHADOW(p); |
| 121 | } |
| 122 | |
| 123 | inline bool AddrIsInShadow(uptr a) { |
| 124 | return AddrIsInLowShadow(a) || AddrIsInHighShadow(a); |
| 125 | } |
| 126 | |
| 127 | inline bool AddrIsAlignedByGranularity(uptr a) { |
| 128 | return (a & (SHADOW_GRANULARITY - 1)) == 0; |
| 129 | } |
| 130 | |
| 131 | inline void RecordAccess(uptr a) { |
| 132 | // If we use a different shadow size then the type below needs adjustment. |
| 133 | CHECK_EQ(SHADOW_ENTRY_SIZE, 8); |
| 134 | u64 *shadow_address = (u64 *)MEM_TO_SHADOW(a); |
| 135 | (*shadow_address)++; |
| 136 | } |
| 137 | |
| 138 | inline void RecordAccessHistogram(uptr a) { |
| 139 | CHECK_EQ(SHADOW_ENTRY_SIZE, 8); |
| 140 | u8 *shadow_address = (u8 *)HISTOGRAM_MEM_TO_SHADOW(a); |
| 141 | if (*shadow_address < HISTOGRAM_MAX_COUNTER) { |
| 142 | (*shadow_address)++; |
| 143 | } |
| 144 | } |
| 145 | |
| 146 | } // namespace __memprof |
| 147 | |
| 148 | #endif // MEMPROF_MAPPING_H |
| 149 | |