1//===-- wrappers_c.inc ------------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#ifndef SCUDO_PREFIX
10#error "Define SCUDO_PREFIX prior to including this file!"
11#endif
12
13// malloc-type functions have to be aligned to std::max_align_t. This is
14// distinct from (1U << SCUDO_MIN_ALIGNMENT_LOG), since C++ new-type functions
15// do not have to abide by the same requirement.
16#ifndef SCUDO_MALLOC_ALIGNMENT
17#define SCUDO_MALLOC_ALIGNMENT FIRST_32_SECOND_64(8U, 16U)
18#endif
19
20static void reportAllocation(void *ptr, size_t size) {
21 if (SCUDO_ENABLE_HOOKS)
22 if (__scudo_allocate_hook && ptr)
23 __scudo_allocate_hook(ptr, size);
24}
25static void reportDeallocation(void *ptr) {
26 if (SCUDO_ENABLE_HOOKS)
27 if (__scudo_deallocate_hook)
28 __scudo_deallocate_hook(ptr);
29}
30static void reportReallocAllocation(void *old_ptr, void *new_ptr, size_t size) {
31 DCHECK_NE(new_ptr, nullptr);
32
33 if (SCUDO_ENABLE_HOOKS) {
34 if (__scudo_realloc_allocate_hook)
35 __scudo_realloc_allocate_hook(old_ptr, new_ptr, size);
36 else if (__scudo_allocate_hook)
37 __scudo_allocate_hook(ptr: new_ptr, size);
38 }
39}
40static void reportReallocDeallocation(void *old_ptr) {
41 if (SCUDO_ENABLE_HOOKS) {
42 if (__scudo_realloc_deallocate_hook)
43 __scudo_realloc_deallocate_hook(old_ptr);
44 else if (__scudo_deallocate_hook)
45 __scudo_deallocate_hook(ptr: old_ptr);
46 }
47}
48
49extern "C" {
50
51INTERFACE WEAK void *SCUDO_PREFIX(calloc)(size_t nmemb, size_t size) {
52 scudo::uptr Product;
53 if (UNLIKELY(scudo::checkForCallocOverflow(size, nmemb, &Product))) {
54 if (SCUDO_ALLOCATOR.canReturnNull()) {
55 errno = ENOMEM;
56 return nullptr;
57 }
58 scudo::reportCallocOverflow(Count: nmemb, Size: size);
59 }
60 void *Ptr = SCUDO_ALLOCATOR.allocate(Size: Product, Origin: scudo::Chunk::Origin::Malloc,
61 SCUDO_MALLOC_ALIGNMENT, ZeroContents: true);
62 reportAllocation(ptr: Ptr, size: Product);
63 return scudo::setErrnoOnNull(Ptr);
64}
65
66INTERFACE WEAK void SCUDO_PREFIX(free)(void *ptr) {
67 reportDeallocation(ptr);
68 SCUDO_ALLOCATOR.deallocate(Ptr: ptr, Origin: scudo::Chunk::Origin::Malloc);
69}
70
71INTERFACE WEAK void SCUDO_PREFIX(free_sized)(void *ptr, size_t size) {
72 reportDeallocation(ptr);
73 SCUDO_ALLOCATOR.deallocateSized(Ptr: ptr, Origin: scudo::Chunk::Origin::Malloc, DeleteSize: size);
74}
75
76INTERFACE WEAK void
77SCUDO_PREFIX(free_aligned_sized)(void *ptr, size_t alignment, size_t size) {
78 reportDeallocation(ptr);
79 SCUDO_ALLOCATOR.deallocateSizedAligned(Ptr: ptr, Origin: scudo::Chunk::Origin::Malloc,
80 DeleteSize: size, DeleteAlignment: alignment);
81}
82
83INTERFACE WEAK struct SCUDO_MALLINFO SCUDO_PREFIX(mallinfo)(void) {
84 struct SCUDO_MALLINFO Info = {};
85 scudo::StatCounters Stats;
86 SCUDO_ALLOCATOR.getStats(S: Stats);
87 // Space allocated in mmapped regions (bytes)
88 Info.hblkhd = static_cast<__scudo_mallinfo_data_t>(Stats[scudo::StatMapped]);
89 // Maximum total allocated space (bytes)
90 Info.usmblks = Info.hblkhd;
91 // Space in freed fastbin blocks (bytes)
92 Info.fsmblks = static_cast<__scudo_mallinfo_data_t>(Stats[scudo::StatFree]);
93 // Total allocated space (bytes)
94 Info.uordblks =
95 static_cast<__scudo_mallinfo_data_t>(Stats[scudo::StatAllocated]);
96 // Total free space (bytes)
97 Info.fordblks = Info.fsmblks;
98 return Info;
99}
100
101// On Android, mallinfo2 is an alias of mallinfo, so don't define both.
102#if !SCUDO_ANDROID
103INTERFACE WEAK struct __scudo_mallinfo2 SCUDO_PREFIX(mallinfo2)(void) {
104 struct __scudo_mallinfo2 Info = {};
105 scudo::StatCounters Stats;
106 SCUDO_ALLOCATOR.getStats(S: Stats);
107 // Space allocated in mmapped regions (bytes)
108 Info.hblkhd = Stats[scudo::StatMapped];
109 // Maximum total allocated space (bytes)
110 Info.usmblks = Info.hblkhd;
111 // Space in freed fastbin blocks (bytes)
112 Info.fsmblks = Stats[scudo::StatFree];
113 // Total allocated space (bytes)
114 Info.uordblks = Stats[scudo::StatAllocated];
115 // Total free space (bytes)
116 Info.fordblks = Info.fsmblks;
117 return Info;
118}
119#endif
120
121INTERFACE WEAK void *SCUDO_PREFIX(malloc)(size_t size) {
122 void *Ptr = SCUDO_ALLOCATOR.allocate(Size: size, Origin: scudo::Chunk::Origin::Malloc,
123 SCUDO_MALLOC_ALIGNMENT);
124 reportAllocation(ptr: Ptr, size);
125 return scudo::setErrnoOnNull(Ptr);
126}
127
128#if SCUDO_ANDROID
129INTERFACE WEAK size_t SCUDO_PREFIX(malloc_usable_size)(const void *ptr) {
130#else
131INTERFACE WEAK size_t SCUDO_PREFIX(malloc_usable_size)(void *ptr) {
132#endif
133 return SCUDO_ALLOCATOR.getUsableSize(Ptr: ptr);
134}
135
136INTERFACE WEAK void *SCUDO_PREFIX(memalign)(size_t alignment, size_t size) {
137 // Android rounds up the alignment to a power of two if it isn't one.
138 if (SCUDO_ANDROID) {
139 if (UNLIKELY(!alignment)) {
140 alignment = 1U;
141 } else {
142 if (UNLIKELY(!scudo::isPowerOfTwo(alignment)))
143 alignment = scudo::roundUpPowerOfTwo(Size: alignment);
144 }
145 } else {
146 if (UNLIKELY(!scudo::isPowerOfTwo(alignment))) {
147 if (SCUDO_ALLOCATOR.canReturnNull()) {
148 errno = EINVAL;
149 return nullptr;
150 }
151 scudo::reportAlignmentNotPowerOfTwo(Alignment: alignment);
152 }
153 }
154 void *Ptr =
155 SCUDO_ALLOCATOR.allocate(Size: size, Origin: scudo::Chunk::Origin::Memalign, Alignment: alignment);
156 reportAllocation(ptr: Ptr, size);
157 return Ptr;
158}
159
160INTERFACE WEAK int SCUDO_PREFIX(posix_memalign)(void **memptr, size_t alignment,
161 size_t size) {
162 if (UNLIKELY(scudo::checkPosixMemalignAlignment(alignment))) {
163 if (!SCUDO_ALLOCATOR.canReturnNull())
164 scudo::reportInvalidPosixMemalignAlignment(Alignment: alignment);
165 return EINVAL;
166 }
167 void *Ptr =
168 SCUDO_ALLOCATOR.allocate(Size: size, Origin: scudo::Chunk::Origin::Memalign, Alignment: alignment);
169 if (UNLIKELY(!Ptr))
170 return ENOMEM;
171 reportAllocation(ptr: Ptr, size);
172
173 *memptr = Ptr;
174 return 0;
175}
176
177INTERFACE WEAK void *SCUDO_PREFIX(pvalloc)(size_t size) {
178 const scudo::uptr PageSize = scudo::getPageSizeCached();
179 if (UNLIKELY(scudo::checkForPvallocOverflow(size, PageSize))) {
180 if (SCUDO_ALLOCATOR.canReturnNull()) {
181 errno = ENOMEM;
182 return nullptr;
183 }
184 scudo::reportPvallocOverflow(Size: size);
185 }
186 // pvalloc(0) should allocate one page.
187 void *Ptr =
188 SCUDO_ALLOCATOR.allocate(Size: size ? scudo::roundUp(X: size, Boundary: PageSize) : PageSize,
189 Origin: scudo::Chunk::Origin::Memalign, Alignment: PageSize);
190 reportAllocation(ptr: Ptr, size: scudo::roundUp(X: size, Boundary: PageSize));
191
192 return scudo::setErrnoOnNull(Ptr);
193}
194
195INTERFACE WEAK void *SCUDO_PREFIX(realloc)(void *ptr, size_t size) {
196 if (!ptr) {
197 void *Ptr = SCUDO_ALLOCATOR.allocate(Size: size, Origin: scudo::Chunk::Origin::Malloc,
198 SCUDO_MALLOC_ALIGNMENT);
199 reportAllocation(ptr: Ptr, size);
200 return scudo::setErrnoOnNull(Ptr);
201 }
202 if (size == 0) {
203 reportDeallocation(ptr);
204 SCUDO_ALLOCATOR.deallocate(Ptr: ptr, Origin: scudo::Chunk::Origin::Malloc);
205 return nullptr;
206 }
207
208 // Given that the reporting of deallocation and allocation are not atomic, we
209 // always pretend the old pointer will be released so that the user doesn't
210 // need to worry about the false double-use case from the view of hooks.
211 //
212 // For example, assume that `realloc` releases the old pointer and allocates a
213 // new pointer. Before the reporting of both operations has been done, another
214 // thread may get the old pointer from `malloc`. It may be misinterpreted as
215 // double-use if it's not handled properly on the hook side.
216 reportReallocDeallocation(old_ptr: ptr);
217 void *NewPtr = SCUDO_ALLOCATOR.reallocate(OldPtr: ptr, NewSize: size, SCUDO_MALLOC_ALIGNMENT);
218 if (NewPtr != nullptr) {
219 // Note that even if NewPtr == ptr, the size has changed. We still need to
220 // report the new size.
221 reportReallocAllocation(/*OldPtr=*/old_ptr: ptr, new_ptr: NewPtr, size);
222 } else {
223 // If `realloc` fails, the old pointer is not released. Report the old
224 // pointer as allocated again.
225 reportReallocAllocation(/*OldPtr=*/old_ptr: ptr, /*NewPtr=*/new_ptr: ptr,
226 SCUDO_ALLOCATOR.getAllocSize(Ptr: ptr));
227 }
228
229 return scudo::setErrnoOnNull(NewPtr);
230}
231
232INTERFACE WEAK void *SCUDO_PREFIX(reallocarray)(void *ptr, size_t nmemb,
233 size_t size) {
234 scudo::uptr Product;
235 if (UNLIKELY(scudo::checkForCallocOverflow(size, nmemb, &Product))) {
236 if (SCUDO_ALLOCATOR.canReturnNull()) {
237 errno = ENOMEM;
238 return nullptr;
239 }
240 scudo::reportReallocarrayOverflow(Count: nmemb, Size: size);
241 }
242 return SCUDO_PREFIX(realloc)(ptr, size: Product);
243}
244
245INTERFACE WEAK void *SCUDO_PREFIX(valloc)(size_t size) {
246 void *Ptr = SCUDO_ALLOCATOR.allocate(Size: size, Origin: scudo::Chunk::Origin::Memalign,
247 Alignment: scudo::getPageSizeCached());
248 reportAllocation(ptr: Ptr, size);
249
250 return scudo::setErrnoOnNull(Ptr);
251}
252
253INTERFACE WEAK int SCUDO_PREFIX(malloc_iterate)(
254 uintptr_t base, size_t size,
255 void (*callback)(uintptr_t base, size_t size, void *arg), void *arg) {
256 SCUDO_ALLOCATOR.iterateOverChunks(Base: base, Size: size, Callback: callback, Arg: arg);
257 return 0;
258}
259
260INTERFACE WEAK void SCUDO_PREFIX(malloc_enable)() { SCUDO_ALLOCATOR.enable(); }
261
262INTERFACE WEAK void SCUDO_PREFIX(malloc_disable)() {
263 SCUDO_ALLOCATOR.disable();
264}
265
266void SCUDO_PREFIX(malloc_postinit)() {
267 SCUDO_ALLOCATOR.initGwpAsan();
268 pthread_atfork(SCUDO_PREFIX(malloc_disable), SCUDO_PREFIX(malloc_enable),
269 SCUDO_PREFIX(malloc_enable));
270}
271
272INTERFACE WEAK int SCUDO_PREFIX(mallopt)(int param, int value) {
273 if (param == M_DECAY_TIME) {
274 if (SCUDO_ANDROID) {
275 // Before changing the interval, reset the memory usage status by doing a
276 // M_PURGE call so that we can minimize the impact of any unreleased pages
277 // introduced by interval transition.
278 SCUDO_ALLOCATOR.releaseToOS(ReleaseType: scudo::ReleaseToOS::Force);
279
280 // The values allowed on Android are {-1, 0, 1}. "1" means the longest
281 // interval.
282 CHECK(value >= -1 && value <= 1);
283 if (value == 1)
284 value = INT32_MAX;
285 }
286
287 SCUDO_ALLOCATOR.setOption(O: scudo::Option::ReleaseInterval,
288 Value: static_cast<scudo::sptr>(value));
289 return 1;
290 } else if (param == M_PURGE) {
291 SCUDO_ALLOCATOR.releaseToOS(ReleaseType: scudo::ReleaseToOS::Force);
292 return 1;
293 } else if (param == M_PURGE_FAST) {
294 SCUDO_ALLOCATOR.releaseToOS(ReleaseType: scudo::ReleaseToOS::ForceFast);
295 return 1;
296 } else if (param == M_PURGE_ALL) {
297 SCUDO_ALLOCATOR.releaseToOS(ReleaseType: scudo::ReleaseToOS::ForceAll);
298 return 1;
299 } else if (param == M_LOG_STATS) {
300 SCUDO_ALLOCATOR.printStats();
301 SCUDO_ALLOCATOR.printFragmentationInfo();
302 return 1;
303 } else {
304 scudo::Option option;
305 switch (param) {
306 case M_MEMTAG_TUNING:
307 option = scudo::Option::MemtagTuning;
308 break;
309 case M_THREAD_DISABLE_MEM_INIT:
310 option = scudo::Option::ThreadDisableMemInit;
311 break;
312 case M_CACHE_COUNT_MAX:
313 option = scudo::Option::MaxCacheEntriesCount;
314 break;
315 case M_CACHE_SIZE_MAX:
316 option = scudo::Option::MaxCacheEntrySize;
317 break;
318 case M_TSDS_COUNT_MAX:
319 option = scudo::Option::MaxTSDsCount;
320 break;
321 default:
322 return 0;
323 }
324 return SCUDO_ALLOCATOR.setOption(O: option, Value: static_cast<scudo::sptr>(value));
325 }
326}
327
328INTERFACE WEAK void *SCUDO_PREFIX(aligned_alloc)(size_t alignment,
329 size_t size) {
330 if (UNLIKELY(scudo::checkAlignedAllocAlignmentAndSize(alignment, size))) {
331 if (SCUDO_ALLOCATOR.canReturnNull()) {
332 errno = EINVAL;
333 return nullptr;
334 }
335 scudo::reportInvalidAlignedAllocAlignment(Size: alignment, Alignment: size);
336 }
337
338 void *Ptr =
339 SCUDO_ALLOCATOR.allocate(Size: size, Origin: scudo::Chunk::Origin::Memalign, Alignment: alignment);
340 reportAllocation(ptr: Ptr, size);
341
342 return scudo::setErrnoOnNull(Ptr);
343}
344
345INTERFACE WEAK int SCUDO_PREFIX(malloc_info)(UNUSED int options, FILE *stream) {
346 const scudo::uptr max_size =
347 decltype(SCUDO_ALLOCATOR)::PrimaryT::SizeClassMap::MaxSize;
348 auto *sizes = static_cast<scudo::uptr *>(
349 SCUDO_PREFIX(calloc)(nmemb: max_size, size: sizeof(scudo::uptr)));
350 auto callback = [](uintptr_t, size_t size, void *arg) {
351 auto *sizes = reinterpret_cast<scudo::uptr *>(arg);
352 if (size < max_size)
353 sizes[size]++;
354 };
355
356 SCUDO_ALLOCATOR.disable();
357 SCUDO_ALLOCATOR.iterateOverChunks(Base: 0, Size: -1ul, Callback: callback, Arg: sizes);
358 SCUDO_ALLOCATOR.enable();
359
360 fputs(s: "<malloc version=\"scudo-1\">\n", stream: stream);
361 for (scudo::uptr i = 0; i != max_size; ++i)
362 if (sizes[i])
363 fprintf(stream: stream, format: "<alloc size=\"%zu\" count=\"%zu\"/>\n", i, sizes[i]);
364 fputs(s: "</malloc>\n", stream: stream);
365 SCUDO_PREFIX(free)(ptr: sizes);
366 return 0;
367}
368
369// Disable memory tagging for the heap. The caller must disable memory tag
370// checks globally (e.g. by clearing TCF0 on aarch64) before calling this
371// function, and may not re-enable them after calling the function.
372INTERFACE WEAK void SCUDO_PREFIX(malloc_disable_memory_tagging)() {
373 SCUDO_ALLOCATOR.disableMemoryTagging();
374}
375
376// Sets whether scudo records stack traces and other metadata for allocations
377// and deallocations. This function only has an effect if the allocator and
378// hardware support memory tagging.
379INTERFACE WEAK void
380SCUDO_PREFIX(malloc_set_track_allocation_stacks)(int track) {
381 SCUDO_ALLOCATOR.setTrackAllocationStacks(track);
382}
383
384// Sets whether scudo zero-initializes all allocated memory.
385INTERFACE WEAK void SCUDO_PREFIX(malloc_set_zero_contents)(int zero_contents) {
386 SCUDO_ALLOCATOR.setFillContents(zero_contents ? scudo::ZeroFill
387 : scudo::NoFill);
388}
389
390// Sets whether scudo pattern-initializes all allocated memory.
391INTERFACE WEAK void
392SCUDO_PREFIX(malloc_set_pattern_fill_contents)(int pattern_fill_contents) {
393 SCUDO_ALLOCATOR.setFillContents(
394 pattern_fill_contents ? scudo::PatternOrZeroFill : scudo::NoFill);
395}
396
397// Sets whether scudo adds a small amount of slack at the end of large
398// allocations, before the guard page. This can be enabled to work around buggy
399// applications that read a few bytes past the end of their allocation.
400INTERFACE WEAK void
401SCUDO_PREFIX(malloc_set_add_large_allocation_slack)(int add_slack) {
402 SCUDO_ALLOCATOR.setAddLargeAllocationSlack(add_slack);
403}
404
405} // extern "C"
406