1//===-- tsan_mman.cpp -----------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of ThreadSanitizer (TSan), a race detector.
10//
11//===----------------------------------------------------------------------===//
12#include "tsan_mman.h"
13
14#include "sanitizer_common/sanitizer_allocator_checks.h"
15#include "sanitizer_common/sanitizer_allocator_interface.h"
16#include "sanitizer_common/sanitizer_allocator_report.h"
17#include "sanitizer_common/sanitizer_common.h"
18#include "sanitizer_common/sanitizer_errno.h"
19#include "sanitizer_common/sanitizer_placement_new.h"
20#include "sanitizer_common/sanitizer_stackdepot.h"
21#include "tsan_flags.h"
22#include "tsan_interface.h"
23#include "tsan_report.h"
24#include "tsan_rtl.h"
25
26namespace __tsan {
27
28struct MapUnmapCallback {
29 void OnMap(uptr p, uptr size) const { }
30 void OnMapSecondary(uptr p, uptr size, uptr user_begin,
31 uptr user_size) const {};
32 void OnUnmap(uptr p, uptr size) const {
33 // We are about to unmap a chunk of user memory.
34 // Mark the corresponding shadow memory as not needed.
35 DontNeedShadowFor(addr: p, size);
36 // Mark the corresponding meta shadow memory as not needed.
37 // Note the block does not contain any meta info at this point
38 // (this happens after free).
39 const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize;
40 const uptr kPageSize = GetPageSizeCached() * kMetaRatio;
41 // Block came from LargeMmapAllocator, so must be large.
42 // We rely on this in the calculations below.
43 CHECK_GE(size, 2 * kPageSize);
44 uptr diff = RoundUp(p, align: kPageSize) - p;
45 if (diff != 0) {
46 p += diff;
47 size -= diff;
48 }
49 diff = p + size - RoundDown(p: p + size, align: kPageSize);
50 if (diff != 0)
51 size -= diff;
52 uptr p_meta = (uptr)MemToMeta(x: p);
53 ReleaseMemoryPagesToOS(beg: p_meta, end: p_meta + size / kMetaRatio);
54 }
55};
56
57alignas(64) static char allocator_placeholder[sizeof(Allocator)];
58Allocator *allocator() {
59 return reinterpret_cast<Allocator*>(&allocator_placeholder);
60}
61
62struct GlobalProc {
63 Mutex mtx;
64 Processor *proc;
65 // This mutex represents the internal allocator combined for
66 // the purposes of deadlock detection. The internal allocator
67 // uses multiple mutexes, moreover they are locked only occasionally
68 // and they are spin mutexes which don't support deadlock detection.
69 // So we use this fake mutex to serve as a substitute for these mutexes.
70 CheckedMutex internal_alloc_mtx;
71
72 GlobalProc()
73 : mtx(MutexTypeGlobalProc),
74 proc(ProcCreate()),
75 internal_alloc_mtx(MutexTypeInternalAlloc) {}
76};
77
78alignas(64) static char global_proc_placeholder[sizeof(GlobalProc)];
79GlobalProc *global_proc() {
80 return reinterpret_cast<GlobalProc*>(&global_proc_placeholder);
81}
82
83static void InternalAllocAccess() {
84 global_proc()->internal_alloc_mtx.Lock();
85 global_proc()->internal_alloc_mtx.Unlock();
86}
87
88ScopedGlobalProcessor::ScopedGlobalProcessor() {
89 GlobalProc *gp = global_proc();
90 ThreadState *thr = cur_thread();
91 if (thr->proc())
92 return;
93 // If we don't have a proc, use the global one.
94 // There are currently only two known case where this path is triggered:
95 // __interceptor_free
96 // __nptl_deallocate_tsd
97 // start_thread
98 // clone
99 // and:
100 // ResetRange
101 // __interceptor_munmap
102 // __deallocate_stack
103 // start_thread
104 // clone
105 // Ideally, we destroy thread state (and unwire proc) when a thread actually
106 // exits (i.e. when we join/wait it). Then we would not need the global proc
107 gp->mtx.Lock();
108 ProcWire(proc: gp->proc, thr);
109}
110
111ScopedGlobalProcessor::~ScopedGlobalProcessor() {
112 GlobalProc *gp = global_proc();
113 ThreadState *thr = cur_thread();
114 if (thr->proc() != gp->proc)
115 return;
116 ProcUnwire(proc: gp->proc, thr);
117 gp->mtx.Unlock();
118}
119
120void AllocatorLockBeforeFork() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
121 global_proc()->internal_alloc_mtx.Lock();
122 InternalAllocatorLock();
123#if !SANITIZER_APPLE
124 // OS X allocates from hooks, see 6a3958247a.
125 allocator()->ForceLock();
126 StackDepotLockBeforeFork();
127#endif
128}
129
130void AllocatorUnlockAfterFork(bool child) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
131#if !SANITIZER_APPLE
132 StackDepotUnlockAfterFork(fork_child: child);
133 allocator()->ForceUnlock();
134#endif
135 InternalAllocatorUnlock();
136 global_proc()->internal_alloc_mtx.Unlock();
137}
138
139void GlobalProcessorLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
140 global_proc()->mtx.Lock();
141}
142
143void GlobalProcessorUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
144 global_proc()->mtx.Unlock();
145}
146
147static constexpr uptr kMaxAllowedMallocSize = 1ull << 40;
148static uptr max_user_defined_malloc_size;
149
150void InitializeAllocator() {
151 SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
152 allocator()->Init(release_to_os_interval_ms: common_flags()->allocator_release_to_os_interval_ms);
153 max_user_defined_malloc_size = common_flags()->max_allocation_size_mb
154 ? common_flags()->max_allocation_size_mb
155 << 20
156 : kMaxAllowedMallocSize;
157}
158
159void InitializeAllocatorLate() {
160 new(global_proc()) GlobalProc();
161}
162
163void AllocatorProcStart(Processor *proc) {
164 allocator()->InitCache(cache: &proc->alloc_cache);
165 internal_allocator()->InitCache(cache: &proc->internal_alloc_cache);
166}
167
168void AllocatorProcFinish(Processor *proc) {
169 allocator()->DestroyCache(cache: &proc->alloc_cache);
170 internal_allocator()->DestroyCache(cache: &proc->internal_alloc_cache);
171}
172
173void AllocatorPrintStats() {
174 allocator()->PrintStats();
175}
176
177static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
178 if (atomic_load_relaxed(a: &thr->in_signal_handler) == 0 ||
179 !ShouldReport(thr, typ: ReportTypeSignalUnsafe))
180 return;
181 VarSizeStackTrace stack;
182 ObtainCurrentStack(thr, toppc: pc, stack: &stack);
183 if (IsFiredSuppression(ctx, type: ReportTypeSignalUnsafe, trace: stack))
184 return;
185 // Use alloca, because malloc during signal handling deadlocks
186 ScopedReport *rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport));
187 // Take a new scope as Apple platforms require the below locks released
188 // before symbolizing in order to avoid a deadlock
189 {
190 ThreadRegistryLock l(&ctx->thread_registry);
191 new (rep) ScopedReport(ReportTypeSignalUnsafe);
192 rep->AddStack(stack, suppressable: true);
193#if SANITIZER_APPLE
194 } // Close this scope to release the locks
195#endif
196 OutputReport(thr, srep&: *rep);
197
198 // Need to manually destroy this because we used placement new to allocate
199 rep->~ScopedReport();
200#if !SANITIZER_APPLE
201 }
202#endif
203}
204
205
206void *user_alloc_internal(ThreadState *thr, uptr pc, uptr sz, uptr align,
207 bool signal) {
208 if (sz >= kMaxAllowedMallocSize || align >= kMaxAllowedMallocSize ||
209 sz > max_user_defined_malloc_size) {
210 if (AllocatorMayReturnNull())
211 return nullptr;
212 uptr malloc_limit =
213 Min(a: kMaxAllowedMallocSize, b: max_user_defined_malloc_size);
214 GET_STACK_TRACE_FATAL(thr, pc);
215 ReportAllocationSizeTooBig(user_size: sz, max_size: malloc_limit, stack: &stack);
216 }
217 if (UNLIKELY(IsRssLimitExceeded())) {
218 if (AllocatorMayReturnNull())
219 return nullptr;
220 GET_STACK_TRACE_FATAL(thr, pc);
221 ReportRssLimitExceeded(stack: &stack);
222 }
223 void *p = allocator()->Allocate(cache: &thr->proc()->alloc_cache, size: sz, alignment: align);
224 if (UNLIKELY(!p)) {
225 SetAllocatorOutOfMemory();
226 if (AllocatorMayReturnNull())
227 return nullptr;
228 GET_STACK_TRACE_FATAL(thr, pc);
229 ReportOutOfMemory(requested_size: sz, stack: &stack);
230 }
231 if (ctx && ctx->initialized)
232 OnUserAlloc(thr, pc, p: (uptr)p, sz, write: true);
233 if (signal)
234 SignalUnsafeCall(thr, pc);
235 return p;
236}
237
238void user_free(ThreadState *thr, uptr pc, void *p, bool signal) {
239 ScopedGlobalProcessor sgp;
240 if (ctx && ctx->initialized)
241 OnUserFree(thr, pc, p: (uptr)p, write: true);
242 allocator()->Deallocate(cache: &thr->proc()->alloc_cache, p);
243 if (signal)
244 SignalUnsafeCall(thr, pc);
245}
246
247void *user_alloc(ThreadState *thr, uptr pc, uptr sz) {
248 return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align: kDefaultAlignment));
249}
250
251void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
252 if (UNLIKELY(CheckForCallocOverflow(size, n))) {
253 if (AllocatorMayReturnNull())
254 return SetErrnoOnNull(nullptr);
255 GET_STACK_TRACE_FATAL(thr, pc);
256 ReportCallocOverflow(count: n, size, stack: &stack);
257 }
258 void *p = user_alloc_internal(thr, pc, sz: n * size);
259 if (p)
260 internal_memset(s: p, c: 0, n: n * size);
261 return SetErrnoOnNull(p);
262}
263
264void *user_reallocarray(ThreadState *thr, uptr pc, void *p, uptr size, uptr n) {
265 if (UNLIKELY(CheckForCallocOverflow(size, n))) {
266 if (AllocatorMayReturnNull())
267 return SetErrnoOnNull(nullptr);
268 GET_STACK_TRACE_FATAL(thr, pc);
269 ReportReallocArrayOverflow(count: n, size, stack: &stack);
270 }
271 return user_realloc(thr, pc, p, sz: size * n);
272}
273
274void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) {
275 DPrintf("#%d: alloc(%zu) = 0x%zx\n", thr->tid, sz, p);
276 // Note: this can run before thread initialization/after finalization.
277 // As a result this is not necessarily synchronized with DoReset,
278 // which iterates over and resets all sync objects,
279 // but it is fine to create new MBlocks in this context.
280 ctx->metamap.AllocBlock(thr, pc, p, sz);
281 // If this runs before thread initialization/after finalization
282 // and we don't have trace initialized, we can't imitate writes.
283 // In such case just reset the shadow range, it is fine since
284 // it affects only a small fraction of special objects.
285 if (write && thr->ignore_reads_and_writes == 0 &&
286 atomic_load_relaxed(a: &thr->trace_pos))
287 MemoryRangeImitateWrite(thr, pc, addr: (uptr)p, size: sz);
288 else
289 MemoryResetRange(thr, pc, addr: (uptr)p, size: sz);
290}
291
292void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write) {
293 CHECK_NE(p, (void*)0);
294 if (!thr->slot) {
295 // Very early/late in thread lifetime, or during fork.
296 UNUSED uptr sz = ctx->metamap.FreeBlock(proc: thr->proc(), p, reset: false);
297 DPrintf("#%d: free(0x%zx, %zu) (no slot)\n", thr->tid, p, sz);
298 return;
299 }
300 SlotLocker locker(thr);
301 uptr sz = ctx->metamap.FreeBlock(proc: thr->proc(), p, reset: true);
302 DPrintf("#%d: free(0x%zx, %zu)\n", thr->tid, p, sz);
303 if (write && thr->ignore_reads_and_writes == 0)
304 MemoryRangeFreed(thr, pc, addr: (uptr)p, size: sz);
305}
306
307void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
308 // FIXME: Handle "shrinking" more efficiently,
309 // it seems that some software actually does this.
310 if (!p)
311 return SetErrnoOnNull(user_alloc_internal(thr, pc, sz));
312 if (!sz) {
313 user_free(thr, pc, p);
314 return nullptr;
315 }
316 void *new_p = user_alloc_internal(thr, pc, sz);
317 if (new_p) {
318 uptr old_sz = user_alloc_usable_size(p);
319 internal_memcpy(dest: new_p, src: p, n: min(a: old_sz, b: sz));
320 user_free(thr, pc, p);
321 }
322 return SetErrnoOnNull(new_p);
323}
324
325void *user_memalign(ThreadState *thr, uptr pc, uptr align, uptr sz) {
326 if (UNLIKELY(!IsPowerOfTwo(align))) {
327 errno = errno_EINVAL;
328 if (AllocatorMayReturnNull())
329 return nullptr;
330 GET_STACK_TRACE_FATAL(thr, pc);
331 ReportInvalidAllocationAlignment(alignment: align, stack: &stack);
332 }
333 return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));
334}
335
336int user_posix_memalign(ThreadState *thr, uptr pc, void **memptr, uptr align,
337 uptr sz) {
338 if (UNLIKELY(!CheckPosixMemalignAlignment(align))) {
339 if (AllocatorMayReturnNull())
340 return errno_EINVAL;
341 GET_STACK_TRACE_FATAL(thr, pc);
342 ReportInvalidPosixMemalignAlignment(alignment: align, stack: &stack);
343 }
344 void *ptr = user_alloc_internal(thr, pc, sz, align);
345 if (UNLIKELY(!ptr))
346 // OOM error is already taken care of by user_alloc_internal.
347 return errno_ENOMEM;
348 CHECK(IsAligned((uptr)ptr, align));
349 *memptr = ptr;
350 return 0;
351}
352
353void *user_aligned_alloc(ThreadState *thr, uptr pc, uptr align, uptr sz) {
354 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(align, sz))) {
355 errno = errno_EINVAL;
356 if (AllocatorMayReturnNull())
357 return nullptr;
358 GET_STACK_TRACE_FATAL(thr, pc);
359 ReportInvalidAlignedAllocAlignment(size: sz, alignment: align, stack: &stack);
360 }
361 return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));
362}
363
364void *user_valloc(ThreadState *thr, uptr pc, uptr sz) {
365 return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align: GetPageSizeCached()));
366}
367
368void *user_pvalloc(ThreadState *thr, uptr pc, uptr sz) {
369 uptr PageSize = GetPageSizeCached();
370 if (UNLIKELY(CheckForPvallocOverflow(sz, PageSize))) {
371 errno = errno_ENOMEM;
372 if (AllocatorMayReturnNull())
373 return nullptr;
374 GET_STACK_TRACE_FATAL(thr, pc);
375 ReportPvallocOverflow(size: sz, stack: &stack);
376 }
377 // pvalloc(0) should allocate one page.
378 sz = sz ? RoundUpTo(size: sz, boundary: PageSize) : PageSize;
379 return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align: PageSize));
380}
381
382static const void *user_alloc_begin(const void *p) {
383 if (p == nullptr || !IsAppMem(mem: (uptr)p))
384 return nullptr;
385 void *beg = allocator()->GetBlockBegin(p);
386 if (!beg)
387 return nullptr;
388
389 MBlock *b = ctx->metamap.GetBlock(p: (uptr)beg);
390 if (!b)
391 return nullptr; // Not a valid pointer.
392
393 return (const void *)beg;
394}
395
396uptr user_alloc_usable_size(const void *p) {
397 if (p == 0 || !IsAppMem(mem: (uptr)p))
398 return 0;
399 MBlock *b = ctx->metamap.GetBlock(p: (uptr)p);
400 if (!b)
401 return 0; // Not a valid pointer.
402 if (b->siz == 0)
403 return 1; // Zero-sized allocations are actually 1 byte.
404 return b->siz;
405}
406
407uptr user_alloc_usable_size_fast(const void *p) {
408 MBlock *b = ctx->metamap.GetBlock(p: (uptr)p);
409 // Static objects may have malloc'd before tsan completes
410 // initialization, and may believe returned ptrs to be valid.
411 if (!b)
412 return 0; // Not a valid pointer.
413 if (b->siz == 0)
414 return 1; // Zero-sized allocations are actually 1 byte.
415 return b->siz;
416}
417
418void invoke_malloc_hook(void *ptr, uptr size) {
419 ThreadState *thr = cur_thread();
420 if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
421 return;
422 RunMallocHooks(ptr, size);
423}
424
425void invoke_free_hook(void *ptr) {
426 ThreadState *thr = cur_thread();
427 if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
428 return;
429 RunFreeHooks(ptr);
430}
431
432void *Alloc(uptr sz) {
433 ThreadState *thr = cur_thread();
434 if (thr->nomalloc) {
435 thr->nomalloc = 0; // CHECK calls internal_malloc().
436 CHECK(0);
437 }
438 InternalAllocAccess();
439 return InternalAlloc(size: sz, cache: &thr->proc()->internal_alloc_cache);
440}
441
442void FreeImpl(void *p) {
443 ThreadState *thr = cur_thread();
444 if (thr->nomalloc) {
445 thr->nomalloc = 0; // CHECK calls internal_malloc().
446 CHECK(0);
447 }
448 InternalAllocAccess();
449 InternalFree(p, cache: &thr->proc()->internal_alloc_cache);
450}
451
452} // namespace __tsan
453
454using namespace __tsan;
455
456extern "C" {
457uptr __sanitizer_get_current_allocated_bytes() {
458 uptr stats[AllocatorStatCount];
459 allocator()->GetStats(s: stats);
460 return stats[AllocatorStatAllocated];
461}
462
463uptr __sanitizer_get_heap_size() {
464 uptr stats[AllocatorStatCount];
465 allocator()->GetStats(s: stats);
466 return stats[AllocatorStatMapped];
467}
468
469uptr __sanitizer_get_free_bytes() {
470 return 1;
471}
472
473uptr __sanitizer_get_unmapped_bytes() {
474 return 1;
475}
476
477uptr __sanitizer_get_estimated_allocated_size(uptr size) {
478 return size;
479}
480
481int __sanitizer_get_ownership(const void *p) {
482 return allocator()->GetBlockBegin(p) != 0;
483}
484
485const void *__sanitizer_get_allocated_begin(const void *p) {
486 return user_alloc_begin(p);
487}
488
489uptr __sanitizer_get_allocated_size(const void *p) {
490 return user_alloc_usable_size(p);
491}
492
493uptr __sanitizer_get_allocated_size_fast(const void *p) {
494 DCHECK_EQ(p, __sanitizer_get_allocated_begin(p));
495 uptr ret = user_alloc_usable_size_fast(p);
496 DCHECK_EQ(ret, __sanitizer_get_allocated_size(p));
497 return ret;
498}
499
500void __sanitizer_purge_allocator() {
501 allocator()->ForceReleaseToOS();
502}
503
504void __tsan_on_thread_idle() {
505 ThreadState *thr = cur_thread();
506 allocator()->SwallowCache(cache: &thr->proc()->alloc_cache);
507 internal_allocator()->SwallowCache(cache: &thr->proc()->internal_alloc_cache);
508 ctx->metamap.OnProcIdle(proc: thr->proc());
509}
510} // extern "C"
511