1//===- SectionMemoryManager.cpp - Memory manager for MCJIT/RtDyld *- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the section-based memory manager used by the MCJIT
10// execution engine and RuntimeDyld
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/ExecutionEngine/SectionMemoryManager.h"
15#include "llvm/Config/config.h"
16#include "llvm/Support/Process.h"
17
18namespace llvm {
19
20bool SectionMemoryManager::hasSpace(const MemoryGroup &MemGroup,
21 uintptr_t Size) const {
22 for (const FreeMemBlock &FreeMB : MemGroup.FreeMem) {
23 if (FreeMB.Free.allocatedSize() >= Size)
24 return true;
25 }
26 return false;
27}
28
29void SectionMemoryManager::reserveAllocationSpace(
30 uintptr_t CodeSize, Align CodeAlign, uintptr_t RODataSize,
31 Align RODataAlign, uintptr_t RWDataSize, Align RWDataAlign) {
32 if (CodeSize == 0 && RODataSize == 0 && RWDataSize == 0)
33 return;
34
35 static const size_t PageSize = sys::Process::getPageSizeEstimate();
36
37 // Code alignment needs to be at least the stub alignment - however, we
38 // don't have an easy way to get that here so as a workaround, we assume
39 // it's 8, which is the largest value I observed across all platforms.
40 constexpr uint64_t StubAlign = 8;
41 CodeAlign = Align(std::max(a: CodeAlign.value(), b: StubAlign));
42 RODataAlign = Align(std::max(a: RODataAlign.value(), b: StubAlign));
43 RWDataAlign = Align(std::max(a: RWDataAlign.value(), b: StubAlign));
44
45 // Get space required for each section. Use the same calculation as
46 // allocateSection because we need to be able to satisfy it.
47 uint64_t RequiredCodeSize = alignTo(Size: CodeSize, A: CodeAlign) + CodeAlign.value();
48 uint64_t RequiredRODataSize =
49 alignTo(Size: RODataSize, A: RODataAlign) + RODataAlign.value();
50 uint64_t RequiredRWDataSize =
51 alignTo(Size: RWDataSize, A: RWDataAlign) + RWDataAlign.value();
52
53 if (hasSpace(MemGroup: CodeMem, Size: RequiredCodeSize) &&
54 hasSpace(MemGroup: RODataMem, Size: RequiredRODataSize) &&
55 hasSpace(MemGroup: RWDataMem, Size: RequiredRWDataSize)) {
56 // Sufficient space in contiguous block already available.
57 return;
58 }
59
60 // MemoryManager does not have functions for releasing memory after it's
61 // allocated. Normally it tries to use any excess blocks that were allocated
62 // due to page alignment, but if we have insufficient free memory for the
63 // request this can lead to allocating disparate memory that can violate the
64 // ARM ABI. Clear free memory so only the new allocations are used, but do
65 // not release allocated memory as it may still be in-use.
66 CodeMem.FreeMem.clear();
67 RODataMem.FreeMem.clear();
68 RWDataMem.FreeMem.clear();
69
70 // Round up to the nearest page size. Blocks must be page-aligned.
71 RequiredCodeSize = alignTo(Value: RequiredCodeSize, Align: PageSize);
72 RequiredRODataSize = alignTo(Value: RequiredRODataSize, Align: PageSize);
73 RequiredRWDataSize = alignTo(Value: RequiredRWDataSize, Align: PageSize);
74 uint64_t RequiredSize =
75 RequiredCodeSize + RequiredRODataSize + RequiredRWDataSize;
76
77 std::error_code ec;
78 sys::MemoryBlock MB = MMapper->allocateMappedMemory(
79 Purpose: AllocationPurpose::RWData, NumBytes: RequiredSize, NearBlock: nullptr,
80 Flags: sys::Memory::MF_READ | sys::Memory::MF_WRITE, EC&: ec);
81 if (ec) {
82 return;
83 }
84 // CodeMem will arbitrarily own this MemoryBlock to handle cleanup.
85 CodeMem.AllocatedMem.push_back(Elt: MB);
86 uintptr_t Addr = (uintptr_t)MB.base();
87 FreeMemBlock FreeMB;
88 FreeMB.PendingPrefixIndex = (unsigned)-1;
89
90 if (CodeSize > 0) {
91 assert(isAddrAligned(CodeAlign, (void *)Addr));
92 FreeMB.Free = sys::MemoryBlock((void *)Addr, RequiredCodeSize);
93 CodeMem.FreeMem.push_back(Elt: FreeMB);
94 Addr += RequiredCodeSize;
95 }
96
97 if (RODataSize > 0) {
98 assert(isAddrAligned(RODataAlign, (void *)Addr));
99 FreeMB.Free = sys::MemoryBlock((void *)Addr, RequiredRODataSize);
100 RODataMem.FreeMem.push_back(Elt: FreeMB);
101 Addr += RequiredRODataSize;
102 }
103
104 if (RWDataSize > 0) {
105 assert(isAddrAligned(RWDataAlign, (void *)Addr));
106 FreeMB.Free = sys::MemoryBlock((void *)Addr, RequiredRWDataSize);
107 RWDataMem.FreeMem.push_back(Elt: FreeMB);
108 }
109}
110
111uint8_t *SectionMemoryManager::allocateDataSection(uintptr_t Size,
112 unsigned Alignment,
113 unsigned SectionID,
114 StringRef SectionName,
115 bool IsReadOnly) {
116 if (IsReadOnly)
117 return allocateSection(Purpose: SectionMemoryManager::AllocationPurpose::ROData,
118 Size, Alignment);
119 return allocateSection(Purpose: SectionMemoryManager::AllocationPurpose::RWData, Size,
120 Alignment);
121}
122
123uint8_t *SectionMemoryManager::allocateCodeSection(uintptr_t Size,
124 unsigned Alignment,
125 unsigned SectionID,
126 StringRef SectionName) {
127 return allocateSection(Purpose: SectionMemoryManager::AllocationPurpose::Code, Size,
128 Alignment);
129}
130
131uint8_t *SectionMemoryManager::allocateSection(
132 SectionMemoryManager::AllocationPurpose Purpose, uintptr_t Size,
133 unsigned Alignment) {
134 if (!Alignment)
135 Alignment = 16;
136
137 assert(!(Alignment & (Alignment - 1)) && "Alignment must be a power of two.");
138
139 uintptr_t RequiredSize = Alignment * ((Size + Alignment - 1) / Alignment + 1);
140 uintptr_t Addr = 0;
141
142 MemoryGroup &MemGroup = [&]() -> MemoryGroup & {
143 switch (Purpose) {
144 case AllocationPurpose::Code:
145 return CodeMem;
146 case AllocationPurpose::ROData:
147 return RODataMem;
148 case AllocationPurpose::RWData:
149 return RWDataMem;
150 }
151 llvm_unreachable("Unknown SectionMemoryManager::AllocationPurpose");
152 }();
153
154 // Look in the list of free memory regions and use a block there if one
155 // is available.
156 for (FreeMemBlock &FreeMB : MemGroup.FreeMem) {
157 if (FreeMB.Free.allocatedSize() >= RequiredSize) {
158 Addr = (uintptr_t)FreeMB.Free.base();
159 uintptr_t EndOfBlock = Addr + FreeMB.Free.allocatedSize();
160 // Align the address.
161 Addr = (Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1);
162
163 if (FreeMB.PendingPrefixIndex == (unsigned)-1) {
164 // The part of the block we're giving out to the user is now pending
165 MemGroup.PendingMem.push_back(Elt: sys::MemoryBlock((void *)Addr, Size));
166
167 // Remember this pending block, such that future allocations can just
168 // modify it rather than creating a new one
169 FreeMB.PendingPrefixIndex = MemGroup.PendingMem.size() - 1;
170 } else {
171 sys::MemoryBlock &PendingMB =
172 MemGroup.PendingMem[FreeMB.PendingPrefixIndex];
173 PendingMB = sys::MemoryBlock(PendingMB.base(),
174 Addr + Size - (uintptr_t)PendingMB.base());
175 }
176
177 // Remember how much free space is now left in this block
178 FreeMB.Free =
179 sys::MemoryBlock((void *)(Addr + Size), EndOfBlock - Addr - Size);
180 return (uint8_t *)Addr;
181 }
182 }
183
184 // No pre-allocated free block was large enough. Allocate a new memory region.
185 // Note that all sections get allocated as read-write. The permissions will
186 // be updated later based on memory group.
187 //
188 // FIXME: It would be useful to define a default allocation size (or add
189 // it as a constructor parameter) to minimize the number of allocations.
190 //
191 // FIXME: Initialize the Near member for each memory group to avoid
192 // interleaving.
193 std::error_code ec;
194 sys::MemoryBlock MB = MMapper->allocateMappedMemory(
195 Purpose, NumBytes: RequiredSize, NearBlock: &MemGroup.Near,
196 Flags: sys::Memory::MF_READ | sys::Memory::MF_WRITE, EC&: ec);
197 if (ec) {
198 // FIXME: Add error propagation to the interface.
199 return nullptr;
200 }
201
202 // Save this address as the basis for our next request
203 MemGroup.Near = MB;
204
205 // Copy the address to all the other groups, if they have not
206 // been initialized.
207 if (CodeMem.Near.base() == nullptr)
208 CodeMem.Near = MB;
209 if (RODataMem.Near.base() == nullptr)
210 RODataMem.Near = MB;
211 if (RWDataMem.Near.base() == nullptr)
212 RWDataMem.Near = MB;
213
214 // Remember that we allocated this memory
215 MemGroup.AllocatedMem.push_back(Elt: MB);
216 Addr = (uintptr_t)MB.base();
217 uintptr_t EndOfBlock = Addr + MB.allocatedSize();
218
219 // Align the address.
220 Addr = (Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1);
221
222 // The part of the block we're giving out to the user is now pending
223 MemGroup.PendingMem.push_back(Elt: sys::MemoryBlock((void *)Addr, Size));
224
225 // The allocateMappedMemory may allocate much more memory than we need. In
226 // this case, we store the unused memory as a free memory block.
227 unsigned FreeSize = EndOfBlock - Addr - Size;
228 if (FreeSize > 16) {
229 FreeMemBlock FreeMB;
230 FreeMB.Free = sys::MemoryBlock((void *)(Addr + Size), FreeSize);
231 FreeMB.PendingPrefixIndex = (unsigned)-1;
232 MemGroup.FreeMem.push_back(Elt: FreeMB);
233 }
234
235 // Return aligned address
236 return (uint8_t *)Addr;
237}
238
239bool SectionMemoryManager::finalizeMemory(std::string *ErrMsg) {
240 // FIXME: Should in-progress permissions be reverted if an error occurs?
241 std::error_code ec;
242
243 // Make code memory executable.
244 ec = applyMemoryGroupPermissions(MemGroup&: CodeMem,
245 Permissions: sys::Memory::MF_READ | sys::Memory::MF_EXEC);
246 if (ec) {
247 if (ErrMsg) {
248 *ErrMsg = ec.message();
249 }
250 return true;
251 }
252
253 // Make read-only data memory read-only.
254 ec = applyMemoryGroupPermissions(MemGroup&: RODataMem, Permissions: sys::Memory::MF_READ);
255 if (ec) {
256 if (ErrMsg) {
257 *ErrMsg = ec.message();
258 }
259 return true;
260 }
261
262 // Read-write data memory already has the correct permissions
263
264 // Some platforms with separate data cache and instruction cache require
265 // explicit cache flush, otherwise JIT code manipulations (like resolved
266 // relocations) will get to the data cache but not to the instruction cache.
267 invalidateInstructionCache();
268
269 return false;
270}
271
272static sys::MemoryBlock trimBlockToPageSize(sys::MemoryBlock M) {
273 static const size_t PageSize = sys::Process::getPageSizeEstimate();
274
275 size_t StartOverlap =
276 (PageSize - ((uintptr_t)M.base() % PageSize)) % PageSize;
277
278 size_t TrimmedSize = M.allocatedSize();
279 TrimmedSize -= StartOverlap;
280 TrimmedSize -= TrimmedSize % PageSize;
281
282 sys::MemoryBlock Trimmed((void *)((uintptr_t)M.base() + StartOverlap),
283 TrimmedSize);
284
285 assert(((uintptr_t)Trimmed.base() % PageSize) == 0);
286 assert((Trimmed.allocatedSize() % PageSize) == 0);
287 assert(M.base() <= Trimmed.base() &&
288 Trimmed.allocatedSize() <= M.allocatedSize());
289
290 return Trimmed;
291}
292
293std::error_code
294SectionMemoryManager::applyMemoryGroupPermissions(MemoryGroup &MemGroup,
295 unsigned Permissions) {
296 for (sys::MemoryBlock &MB : MemGroup.PendingMem)
297 if (std::error_code EC = MMapper->protectMappedMemory(Block: MB, Flags: Permissions))
298 return EC;
299
300 MemGroup.PendingMem.clear();
301
302 // Now go through free blocks and trim any of them that don't span the entire
303 // page because one of the pending blocks may have overlapped it.
304 for (FreeMemBlock &FreeMB : MemGroup.FreeMem) {
305 FreeMB.Free = trimBlockToPageSize(M: FreeMB.Free);
306 // We cleared the PendingMem list, so all these pointers are now invalid
307 FreeMB.PendingPrefixIndex = (unsigned)-1;
308 }
309
310 // Remove all blocks which are now empty
311 erase_if(C&: MemGroup.FreeMem, P: [](FreeMemBlock &FreeMB) {
312 return FreeMB.Free.allocatedSize() == 0;
313 });
314
315 return std::error_code();
316}
317
318void SectionMemoryManager::invalidateInstructionCache() {
319 for (sys::MemoryBlock &Block : CodeMem.PendingMem)
320 sys::Memory::InvalidateInstructionCache(Addr: Block.base(),
321 Len: Block.allocatedSize());
322}
323
324SectionMemoryManager::~SectionMemoryManager() {
325 for (MemoryGroup *Group : {&CodeMem, &RWDataMem, &RODataMem}) {
326 for (sys::MemoryBlock &Block : Group->AllocatedMem)
327 MMapper->releaseMappedMemory(M&: Block);
328 }
329}
330
331SectionMemoryManager::MemoryMapper::~MemoryMapper() = default;
332
333void SectionMemoryManager::anchor() {}
334
335namespace {
336// Trivial implementation of SectionMemoryManager::MemoryMapper that just calls
337// into sys::Memory.
338class DefaultMMapper final : public SectionMemoryManager::MemoryMapper {
339public:
340 sys::MemoryBlock
341 allocateMappedMemory(SectionMemoryManager::AllocationPurpose Purpose,
342 size_t NumBytes, const sys::MemoryBlock *const NearBlock,
343 unsigned Flags, std::error_code &EC) override {
344 return sys::Memory::allocateMappedMemory(NumBytes, NearBlock, Flags, EC);
345 }
346
347 std::error_code protectMappedMemory(const sys::MemoryBlock &Block,
348 unsigned Flags) override {
349 return sys::Memory::protectMappedMemory(Block, Flags);
350 }
351
352 std::error_code releaseMappedMemory(sys::MemoryBlock &M) override {
353 return sys::Memory::releaseMappedMemory(Block&: M);
354 }
355};
356} // namespace
357
358SectionMemoryManager::SectionMemoryManager(MemoryMapper *UnownedMM,
359 bool ReserveAlloc)
360 : MMapper(UnownedMM), OwnedMMapper(nullptr),
361 ReserveAllocation(ReserveAlloc) {
362 if (!MMapper) {
363 OwnedMMapper = std::make_unique<DefaultMMapper>();
364 MMapper = OwnedMMapper.get();
365 }
366}
367
368} // namespace llvm
369