1 | //=-- lsan_allocator.cpp --------------------------------------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file is a part of LeakSanitizer. |
10 | // See lsan_allocator.h for details. |
11 | // |
12 | //===----------------------------------------------------------------------===// |
13 | |
14 | #include "lsan_allocator.h" |
15 | |
16 | #include "sanitizer_common/sanitizer_allocator.h" |
17 | #include "sanitizer_common/sanitizer_allocator_checks.h" |
18 | #include "sanitizer_common/sanitizer_allocator_interface.h" |
19 | #include "sanitizer_common/sanitizer_allocator_report.h" |
20 | #include "sanitizer_common/sanitizer_errno.h" |
21 | #include "sanitizer_common/sanitizer_internal_defs.h" |
22 | #include "sanitizer_common/sanitizer_stackdepot.h" |
23 | #include "sanitizer_common/sanitizer_stacktrace.h" |
24 | #include "lsan_common.h" |
25 | |
26 | extern "C" void *memset(void *ptr, int value, uptr num); |
27 | |
28 | namespace __lsan { |
29 | #if defined(__i386__) || defined(__arm__) |
30 | static const uptr kMaxAllowedMallocSize = 1ULL << 30; |
31 | #elif defined(__mips64) || defined(__aarch64__) |
32 | static const uptr kMaxAllowedMallocSize = 4ULL << 30; |
33 | #else |
34 | static const uptr kMaxAllowedMallocSize = 1ULL << 40; |
35 | #endif |
36 | |
37 | static Allocator allocator; |
38 | |
39 | static uptr max_malloc_size; |
40 | |
41 | void InitializeAllocator() { |
42 | SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null); |
43 | allocator.InitLinkerInitialized( |
44 | release_to_os_interval_ms: common_flags()->allocator_release_to_os_interval_ms); |
45 | if (common_flags()->max_allocation_size_mb) |
46 | max_malloc_size = Min(a: common_flags()->max_allocation_size_mb << 20, |
47 | b: kMaxAllowedMallocSize); |
48 | else |
49 | max_malloc_size = kMaxAllowedMallocSize; |
50 | } |
51 | |
52 | void AllocatorThreadStart() { allocator.InitCache(cache: GetAllocatorCache()); } |
53 | |
54 | void AllocatorThreadFinish() { |
55 | allocator.SwallowCache(cache: GetAllocatorCache()); |
56 | allocator.DestroyCache(cache: GetAllocatorCache()); |
57 | } |
58 | |
59 | static ChunkMetadata *Metadata(const void *p) { |
60 | return reinterpret_cast<ChunkMetadata *>(allocator.GetMetaData(p)); |
61 | } |
62 | |
63 | static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) { |
64 | if (!p) return; |
65 | ChunkMetadata *m = Metadata(p); |
66 | CHECK(m); |
67 | m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked; |
68 | m->stack_trace_id = StackDepotPut(stack); |
69 | m->requested_size = size; |
70 | atomic_store(a: reinterpret_cast<atomic_uint8_t *>(m), v: 1, mo: memory_order_relaxed); |
71 | RunMallocHooks(ptr: p, size); |
72 | } |
73 | |
74 | static void RegisterDeallocation(void *p) { |
75 | if (!p) return; |
76 | ChunkMetadata *m = Metadata(p); |
77 | CHECK(m); |
78 | RunFreeHooks(ptr: p); |
79 | atomic_store(a: reinterpret_cast<atomic_uint8_t *>(m), v: 0, mo: memory_order_relaxed); |
80 | } |
81 | |
82 | static void *ReportAllocationSizeTooBig(uptr size, const StackTrace &stack) { |
83 | if (AllocatorMayReturnNull()) { |
84 | Report(format: "WARNING: LeakSanitizer failed to allocate 0x%zx bytes\n" , size); |
85 | return nullptr; |
86 | } |
87 | ReportAllocationSizeTooBig(user_size: size, max_size: max_malloc_size, stack: &stack); |
88 | } |
89 | |
90 | void *Allocate(const StackTrace &stack, uptr size, uptr alignment, |
91 | bool cleared) { |
92 | if (size == 0) |
93 | size = 1; |
94 | if (size > max_malloc_size) |
95 | return ReportAllocationSizeTooBig(size, stack); |
96 | if (UNLIKELY(IsRssLimitExceeded())) { |
97 | if (AllocatorMayReturnNull()) |
98 | return nullptr; |
99 | ReportRssLimitExceeded(stack: &stack); |
100 | } |
101 | void *p = allocator.Allocate(cache: GetAllocatorCache(), size, alignment); |
102 | if (UNLIKELY(!p)) { |
103 | SetAllocatorOutOfMemory(); |
104 | if (AllocatorMayReturnNull()) |
105 | return nullptr; |
106 | ReportOutOfMemory(requested_size: size, stack: &stack); |
107 | } |
108 | // Do not rely on the allocator to clear the memory (it's slow). |
109 | if (cleared && allocator.FromPrimary(p)) |
110 | memset(ptr: p, value: 0, num: size); |
111 | RegisterAllocation(stack, p, size); |
112 | return p; |
113 | } |
114 | |
115 | static void *Calloc(uptr nmemb, uptr size, const StackTrace &stack) { |
116 | if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) { |
117 | if (AllocatorMayReturnNull()) |
118 | return nullptr; |
119 | ReportCallocOverflow(count: nmemb, size, stack: &stack); |
120 | } |
121 | size *= nmemb; |
122 | return Allocate(stack, size, alignment: 1, cleared: true); |
123 | } |
124 | |
125 | void Deallocate(void *p) { |
126 | RegisterDeallocation(p); |
127 | allocator.Deallocate(cache: GetAllocatorCache(), p); |
128 | } |
129 | |
130 | void *Reallocate(const StackTrace &stack, void *p, uptr new_size, |
131 | uptr alignment) { |
132 | if (new_size > max_malloc_size) { |
133 | ReportAllocationSizeTooBig(size: new_size, stack); |
134 | return nullptr; |
135 | } |
136 | RegisterDeallocation(p); |
137 | void *new_p = |
138 | allocator.Reallocate(cache: GetAllocatorCache(), p, new_size, alignment); |
139 | if (new_p) |
140 | RegisterAllocation(stack, p: new_p, size: new_size); |
141 | else if (new_size != 0) |
142 | RegisterAllocation(stack, p, size: new_size); |
143 | return new_p; |
144 | } |
145 | |
146 | void GetAllocatorCacheRange(uptr *begin, uptr *end) { |
147 | *begin = (uptr)GetAllocatorCache(); |
148 | *end = *begin + sizeof(AllocatorCache); |
149 | } |
150 | |
151 | static const void *GetMallocBegin(const void *p) { |
152 | if (!p) |
153 | return nullptr; |
154 | void *beg = allocator.GetBlockBegin(p); |
155 | if (!beg) |
156 | return nullptr; |
157 | ChunkMetadata *m = Metadata(p: beg); |
158 | if (!m) |
159 | return nullptr; |
160 | if (!m->allocated) |
161 | return nullptr; |
162 | if (m->requested_size == 0) |
163 | return nullptr; |
164 | return (const void *)beg; |
165 | } |
166 | |
167 | uptr GetMallocUsableSize(const void *p) { |
168 | if (!p) |
169 | return 0; |
170 | ChunkMetadata *m = Metadata(p); |
171 | if (!m) return 0; |
172 | return m->requested_size; |
173 | } |
174 | |
175 | uptr GetMallocUsableSizeFast(const void *p) { |
176 | return Metadata(p)->requested_size; |
177 | } |
178 | |
179 | int lsan_posix_memalign(void **memptr, uptr alignment, uptr size, |
180 | const StackTrace &stack) { |
181 | if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) { |
182 | if (AllocatorMayReturnNull()) |
183 | return errno_EINVAL; |
184 | ReportInvalidPosixMemalignAlignment(alignment, stack: &stack); |
185 | } |
186 | void *ptr = Allocate(stack, size, alignment, cleared: kAlwaysClearMemory); |
187 | if (UNLIKELY(!ptr)) |
188 | // OOM error is already taken care of by Allocate. |
189 | return errno_ENOMEM; |
190 | CHECK(IsAligned((uptr)ptr, alignment)); |
191 | *memptr = ptr; |
192 | return 0; |
193 | } |
194 | |
195 | void *lsan_aligned_alloc(uptr alignment, uptr size, const StackTrace &stack) { |
196 | if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) { |
197 | errno = errno_EINVAL; |
198 | if (AllocatorMayReturnNull()) |
199 | return nullptr; |
200 | ReportInvalidAlignedAllocAlignment(size, alignment, stack: &stack); |
201 | } |
202 | return SetErrnoOnNull(Allocate(stack, size, alignment, cleared: kAlwaysClearMemory)); |
203 | } |
204 | |
205 | void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack) { |
206 | if (UNLIKELY(!IsPowerOfTwo(alignment))) { |
207 | errno = errno_EINVAL; |
208 | if (AllocatorMayReturnNull()) |
209 | return nullptr; |
210 | ReportInvalidAllocationAlignment(alignment, stack: &stack); |
211 | } |
212 | return SetErrnoOnNull(Allocate(stack, size, alignment, cleared: kAlwaysClearMemory)); |
213 | } |
214 | |
215 | void *lsan_malloc(uptr size, const StackTrace &stack) { |
216 | return SetErrnoOnNull(Allocate(stack, size, alignment: 1, cleared: kAlwaysClearMemory)); |
217 | } |
218 | |
219 | void lsan_free(void *p) { |
220 | Deallocate(p); |
221 | } |
222 | |
223 | void lsan_free_sized(void *p, uptr) { Deallocate(p); } |
224 | |
225 | void lsan_free_aligned_sized(void *p, uptr, uptr) { Deallocate(p); } |
226 | |
227 | void *lsan_realloc(void *p, uptr size, const StackTrace &stack) { |
228 | return SetErrnoOnNull(Reallocate(stack, p, new_size: size, alignment: 1)); |
229 | } |
230 | |
231 | void *lsan_reallocarray(void *ptr, uptr nmemb, uptr size, |
232 | const StackTrace &stack) { |
233 | if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) { |
234 | errno = errno_ENOMEM; |
235 | if (AllocatorMayReturnNull()) |
236 | return nullptr; |
237 | ReportReallocArrayOverflow(count: nmemb, size, stack: &stack); |
238 | } |
239 | return lsan_realloc(p: ptr, size: nmemb * size, stack); |
240 | } |
241 | |
242 | void *lsan_calloc(uptr nmemb, uptr size, const StackTrace &stack) { |
243 | return SetErrnoOnNull(Calloc(nmemb, size, stack)); |
244 | } |
245 | |
246 | void *lsan_valloc(uptr size, const StackTrace &stack) { |
247 | return SetErrnoOnNull( |
248 | Allocate(stack, size, alignment: GetPageSizeCached(), cleared: kAlwaysClearMemory)); |
249 | } |
250 | |
251 | void *lsan_pvalloc(uptr size, const StackTrace &stack) { |
252 | uptr PageSize = GetPageSizeCached(); |
253 | if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) { |
254 | errno = errno_ENOMEM; |
255 | if (AllocatorMayReturnNull()) |
256 | return nullptr; |
257 | ReportPvallocOverflow(size, stack: &stack); |
258 | } |
259 | // pvalloc(0) should allocate one page. |
260 | size = size ? RoundUpTo(size, boundary: PageSize) : PageSize; |
261 | return SetErrnoOnNull(Allocate(stack, size, alignment: PageSize, cleared: kAlwaysClearMemory)); |
262 | } |
263 | |
264 | uptr lsan_mz_size(const void *p) { |
265 | return GetMallocUsableSize(p); |
266 | } |
267 | |
268 | ///// Interface to the common LSan module. ///// |
269 | |
270 | void LockAllocator() { |
271 | allocator.ForceLock(); |
272 | } |
273 | |
274 | void UnlockAllocator() { |
275 | allocator.ForceUnlock(); |
276 | } |
277 | |
278 | void GetAllocatorGlobalRange(uptr *begin, uptr *end) { |
279 | *begin = (uptr)&allocator; |
280 | *end = *begin + sizeof(allocator); |
281 | } |
282 | |
283 | uptr PointsIntoChunk(void* p) { |
284 | uptr addr = reinterpret_cast<uptr>(p); |
285 | uptr chunk = reinterpret_cast<uptr>(allocator.GetBlockBeginFastLocked(p)); |
286 | if (!chunk) return 0; |
287 | // LargeMmapAllocator considers pointers to the meta-region of a chunk to be |
288 | // valid, but we don't want that. |
289 | if (addr < chunk) return 0; |
290 | ChunkMetadata *m = Metadata(p: reinterpret_cast<void *>(chunk)); |
291 | CHECK(m); |
292 | if (!m->allocated) |
293 | return 0; |
294 | if (addr < chunk + m->requested_size) |
295 | return chunk; |
296 | if (IsSpecialCaseOfOperatorNew0(chunk_beg: chunk, chunk_size: m->requested_size, addr)) |
297 | return chunk; |
298 | return 0; |
299 | } |
300 | |
301 | uptr GetUserBegin(uptr chunk) { |
302 | return chunk; |
303 | } |
304 | |
305 | uptr GetUserAddr(uptr chunk) { |
306 | return chunk; |
307 | } |
308 | |
309 | LsanMetadata::LsanMetadata(uptr chunk) { |
310 | metadata_ = Metadata(p: reinterpret_cast<void *>(chunk)); |
311 | CHECK(metadata_); |
312 | } |
313 | |
314 | bool LsanMetadata::allocated() const { |
315 | return reinterpret_cast<ChunkMetadata *>(metadata_)->allocated; |
316 | } |
317 | |
318 | ChunkTag LsanMetadata::tag() const { |
319 | return reinterpret_cast<ChunkMetadata *>(metadata_)->tag; |
320 | } |
321 | |
322 | void LsanMetadata::set_tag(ChunkTag value) { |
323 | reinterpret_cast<ChunkMetadata *>(metadata_)->tag = value; |
324 | } |
325 | |
326 | uptr LsanMetadata::requested_size() const { |
327 | return reinterpret_cast<ChunkMetadata *>(metadata_)->requested_size; |
328 | } |
329 | |
330 | u32 LsanMetadata::stack_trace_id() const { |
331 | return reinterpret_cast<ChunkMetadata *>(metadata_)->stack_trace_id; |
332 | } |
333 | |
334 | void ForEachChunk(ForEachChunkCallback callback, void *arg) { |
335 | allocator.ForEachChunk(callback, arg); |
336 | } |
337 | |
338 | IgnoreObjectResult IgnoreObject(const void *p) { |
339 | void *chunk = allocator.GetBlockBegin(p); |
340 | if (!chunk || p < chunk) return kIgnoreObjectInvalid; |
341 | ChunkMetadata *m = Metadata(p: chunk); |
342 | CHECK(m); |
343 | if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size) { |
344 | if (m->tag == kIgnored) |
345 | return kIgnoreObjectAlreadyIgnored; |
346 | m->tag = kIgnored; |
347 | return kIgnoreObjectSuccess; |
348 | } else { |
349 | return kIgnoreObjectInvalid; |
350 | } |
351 | } |
352 | |
353 | } // namespace __lsan |
354 | |
355 | using namespace __lsan; |
356 | |
357 | extern "C" { |
358 | SANITIZER_INTERFACE_ATTRIBUTE |
359 | uptr __sanitizer_get_current_allocated_bytes() { |
360 | uptr stats[AllocatorStatCount]; |
361 | allocator.GetStats(s: stats); |
362 | return stats[AllocatorStatAllocated]; |
363 | } |
364 | |
365 | SANITIZER_INTERFACE_ATTRIBUTE |
366 | uptr __sanitizer_get_heap_size() { |
367 | uptr stats[AllocatorStatCount]; |
368 | allocator.GetStats(s: stats); |
369 | return stats[AllocatorStatMapped]; |
370 | } |
371 | |
372 | SANITIZER_INTERFACE_ATTRIBUTE |
373 | uptr __sanitizer_get_free_bytes() { return 1; } |
374 | |
375 | SANITIZER_INTERFACE_ATTRIBUTE |
376 | uptr __sanitizer_get_unmapped_bytes() { return 0; } |
377 | |
378 | SANITIZER_INTERFACE_ATTRIBUTE |
379 | uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; } |
380 | |
381 | SANITIZER_INTERFACE_ATTRIBUTE |
382 | int __sanitizer_get_ownership(const void *p) { |
383 | return GetMallocBegin(p) != nullptr; |
384 | } |
385 | |
386 | SANITIZER_INTERFACE_ATTRIBUTE |
387 | const void * __sanitizer_get_allocated_begin(const void *p) { |
388 | return GetMallocBegin(p); |
389 | } |
390 | |
391 | SANITIZER_INTERFACE_ATTRIBUTE |
392 | uptr __sanitizer_get_allocated_size(const void *p) { |
393 | return GetMallocUsableSize(p); |
394 | } |
395 | |
396 | SANITIZER_INTERFACE_ATTRIBUTE |
397 | uptr __sanitizer_get_allocated_size_fast(const void *p) { |
398 | DCHECK_EQ(p, __sanitizer_get_allocated_begin(p)); |
399 | uptr ret = GetMallocUsableSizeFast(p); |
400 | DCHECK_EQ(ret, __sanitizer_get_allocated_size(p)); |
401 | return ret; |
402 | } |
403 | |
404 | SANITIZER_INTERFACE_ATTRIBUTE |
405 | void __sanitizer_purge_allocator() { allocator.ForceReleaseToOS(); } |
406 | |
407 | } // extern "C" |
408 | |