1//===--- JITLinkMemoryManager.cpp - JITLinkMemoryManager implementation ---===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "llvm/ExecutionEngine/JITLink/JITLinkMemoryManager.h"
10#include "llvm/ExecutionEngine/JITLink/JITLink.h"
11#include "llvm/Support/FormatVariadic.h"
12#include "llvm/Support/Process.h"
13
14#define DEBUG_TYPE "jitlink"
15
16using namespace llvm;
17
18namespace llvm {
19namespace jitlink {
20
21JITLinkMemoryManager::~JITLinkMemoryManager() = default;
22JITLinkMemoryManager::InFlightAlloc::~InFlightAlloc() = default;
23
24BasicLayout::BasicLayout(LinkGraph &G) : G(G) {
25
26 for (auto &Sec : G.sections()) {
27 // Skip empty sections, and sections with NoAlloc lifetime policies.
28 if (Sec.blocks().empty() ||
29 Sec.getMemLifetime() == orc::MemLifetime::NoAlloc)
30 continue;
31
32 auto &Seg = Segments[{Sec.getMemProt(), Sec.getMemLifetime()}];
33 for (auto *B : Sec.blocks())
34 if (LLVM_LIKELY(!B->isZeroFill()))
35 Seg.ContentBlocks.push_back(x: B);
36 else
37 Seg.ZeroFillBlocks.push_back(x: B);
38 }
39
40 // Build Segments map.
41 auto CompareBlocks = [](const Block *LHS, const Block *RHS) {
42 // Sort by section, address and size
43 if (LHS->getSection().getOrdinal() != RHS->getSection().getOrdinal())
44 return LHS->getSection().getOrdinal() < RHS->getSection().getOrdinal();
45 if (LHS->getAddress() != RHS->getAddress())
46 return LHS->getAddress() < RHS->getAddress();
47 return LHS->getSize() < RHS->getSize();
48 };
49
50 LLVM_DEBUG(dbgs() << "Generated BasicLayout for " << G.getName() << ":\n");
51 for (auto &KV : Segments) {
52 auto &Seg = KV.second;
53
54 llvm::sort(C&: Seg.ContentBlocks, Comp: CompareBlocks);
55 llvm::sort(C&: Seg.ZeroFillBlocks, Comp: CompareBlocks);
56
57 for (auto *B : Seg.ContentBlocks) {
58 Seg.ContentSize = alignToBlock(Addr: Seg.ContentSize, B: *B);
59 Seg.ContentSize += B->getSize();
60 Seg.Alignment = std::max(a: Seg.Alignment, b: Align(B->getAlignment()));
61 }
62
63 uint64_t SegEndOffset = Seg.ContentSize;
64 for (auto *B : Seg.ZeroFillBlocks) {
65 SegEndOffset = alignToBlock(Addr: SegEndOffset, B: *B);
66 SegEndOffset += B->getSize();
67 Seg.Alignment = std::max(a: Seg.Alignment, b: Align(B->getAlignment()));
68 }
69 Seg.ZeroFillSize = SegEndOffset - Seg.ContentSize;
70
71 LLVM_DEBUG({
72 dbgs() << " Seg " << KV.first
73 << ": content-size=" << formatv("{0:x}", Seg.ContentSize)
74 << ", zero-fill-size=" << formatv("{0:x}", Seg.ZeroFillSize)
75 << ", align=" << formatv("{0:x}", Seg.Alignment.value()) << "\n";
76 });
77 }
78}
79
80Expected<BasicLayout::ContiguousPageBasedLayoutSizes>
81BasicLayout::getContiguousPageBasedLayoutSizes(uint64_t PageSize) {
82 ContiguousPageBasedLayoutSizes SegsSizes;
83
84 for (auto &KV : segments()) {
85 auto &AG = KV.first;
86 auto &Seg = KV.second;
87
88 if (Seg.Alignment > PageSize)
89 return make_error<StringError>(Args: "Segment alignment greater than page size",
90 Args: inconvertibleErrorCode());
91
92 uint64_t SegSize = alignTo(Value: Seg.ContentSize + Seg.ZeroFillSize, Align: PageSize);
93 if (AG.getMemLifetime() == orc::MemLifetime::Standard)
94 SegsSizes.StandardSegs += SegSize;
95 else
96 SegsSizes.FinalizeSegs += SegSize;
97 }
98
99 return SegsSizes;
100}
101
102Error BasicLayout::apply() {
103 for (auto &KV : Segments) {
104 auto &Seg = KV.second;
105
106 assert(!(Seg.ContentBlocks.empty() && Seg.ZeroFillBlocks.empty()) &&
107 "Empty section recorded?");
108
109 for (auto *B : Seg.ContentBlocks) {
110 // Align addr and working-mem-offset.
111 Seg.Addr = alignToBlock(Addr: Seg.Addr, B: *B);
112 Seg.NextWorkingMemOffset = alignToBlock(Addr: Seg.NextWorkingMemOffset, B: *B);
113
114 // Update block addr.
115 B->setAddress(Seg.Addr);
116 Seg.Addr += B->getSize();
117
118 // Copy content to working memory, then update content to point at working
119 // memory.
120 memcpy(dest: Seg.WorkingMem + Seg.NextWorkingMemOffset, src: B->getContent().data(),
121 n: B->getSize());
122 B->setMutableContent(
123 {Seg.WorkingMem + Seg.NextWorkingMemOffset, B->getSize()});
124 Seg.NextWorkingMemOffset += B->getSize();
125 }
126
127 for (auto *B : Seg.ZeroFillBlocks) {
128 // Align addr.
129 Seg.Addr = alignToBlock(Addr: Seg.Addr, B: *B);
130 // Update block addr.
131 B->setAddress(Seg.Addr);
132 Seg.Addr += B->getSize();
133 }
134
135 Seg.ContentBlocks.clear();
136 Seg.ZeroFillBlocks.clear();
137 }
138
139 return Error::success();
140}
141
142orc::shared::AllocActions &BasicLayout::graphAllocActions() {
143 return G.allocActions();
144}
145
146void SimpleSegmentAlloc::Create(JITLinkMemoryManager &MemMgr,
147 std::shared_ptr<orc::SymbolStringPool> SSP,
148 Triple TT, const JITLinkDylib *JD,
149 SegmentMap Segments,
150 OnCreatedFunction OnCreated) {
151
152 static_assert(orc::AllocGroup::NumGroups == 32,
153 "AllocGroup has changed. Section names below must be updated");
154 StringRef AGSectionNames[] = {
155 "__---.standard", "__R--.standard", "__-W-.standard", "__RW-.standard",
156 "__--X.standard", "__R-X.standard", "__-WX.standard", "__RWX.standard",
157 "__---.finalize", "__R--.finalize", "__-W-.finalize", "__RW-.finalize",
158 "__--X.finalize", "__R-X.finalize", "__-WX.finalize", "__RWX.finalize"};
159
160 auto G =
161 std::make_unique<LinkGraph>(args: "", args: std::move(SSP), args: std::move(TT),
162 args: SubtargetFeatures(), args&: getGenericEdgeKindName);
163 orc::AllocGroupSmallMap<Block *> ContentBlocks;
164
165 orc::ExecutorAddr NextAddr(0x100000);
166 for (auto &KV : Segments) {
167 auto &AG = KV.first;
168 auto &Seg = KV.second;
169
170 assert(AG.getMemLifetime() != orc::MemLifetime::NoAlloc &&
171 "NoAlloc segments are not supported by SimpleSegmentAlloc");
172
173 auto AGSectionName =
174 AGSectionNames[static_cast<unsigned>(AG.getMemProt()) |
175 static_cast<bool>(AG.getMemLifetime()) << 3];
176
177 auto &Sec = G->createSection(Name: AGSectionName, Prot: AG.getMemProt());
178 Sec.setMemLifetime(AG.getMemLifetime());
179
180 if (Seg.ContentSize != 0) {
181 NextAddr =
182 orc::ExecutorAddr(alignTo(Size: NextAddr.getValue(), A: Seg.ContentAlign));
183 auto &B =
184 G->createMutableContentBlock(Parent&: Sec, MutableContent: G->allocateBuffer(Size: Seg.ContentSize),
185 Address: NextAddr, Alignment: Seg.ContentAlign.value(), AlignmentOffset: 0);
186 ContentBlocks[AG] = &B;
187 NextAddr += Seg.ContentSize;
188 }
189 }
190
191 // GRef declared separately since order-of-argument-eval isn't specified.
192 auto &GRef = *G;
193 MemMgr.allocate(JD, G&: GRef,
194 OnAllocated: [G = std::move(G), ContentBlocks = std::move(ContentBlocks),
195 OnCreated = std::move(OnCreated)](
196 JITLinkMemoryManager::AllocResult Alloc) mutable {
197 if (!Alloc)
198 OnCreated(Alloc.takeError());
199 else
200 OnCreated(SimpleSegmentAlloc(std::move(G),
201 std::move(ContentBlocks),
202 std::move(*Alloc)));
203 });
204}
205
206Expected<SimpleSegmentAlloc> SimpleSegmentAlloc::Create(
207 JITLinkMemoryManager &MemMgr, std::shared_ptr<orc::SymbolStringPool> SSP,
208 Triple TT, const JITLinkDylib *JD, SegmentMap Segments) {
209 std::promise<MSVCPExpected<SimpleSegmentAlloc>> AllocP;
210 auto AllocF = AllocP.get_future();
211 Create(MemMgr, SSP: std::move(SSP), TT: std::move(TT), JD, Segments: std::move(Segments),
212 OnCreated: [&](Expected<SimpleSegmentAlloc> Result) {
213 AllocP.set_value(std::move(Result));
214 });
215 return AllocF.get();
216}
217
218SimpleSegmentAlloc::SimpleSegmentAlloc(SimpleSegmentAlloc &&) = default;
219SimpleSegmentAlloc &
220SimpleSegmentAlloc::operator=(SimpleSegmentAlloc &&) = default;
221SimpleSegmentAlloc::~SimpleSegmentAlloc() = default;
222
223SimpleSegmentAlloc::SegmentInfo
224SimpleSegmentAlloc::getSegInfo(orc::AllocGroup AG) {
225 auto I = ContentBlocks.find(G: AG);
226 if (I != ContentBlocks.end()) {
227 auto &B = *I->second;
228 return {.Addr: B.getAddress(), .WorkingMem: B.getAlreadyMutableContent()};
229 }
230 return {};
231}
232
233SimpleSegmentAlloc::SimpleSegmentAlloc(
234 std::unique_ptr<LinkGraph> G,
235 orc::AllocGroupSmallMap<Block *> ContentBlocks,
236 std::unique_ptr<JITLinkMemoryManager::InFlightAlloc> Alloc)
237 : G(std::move(G)), ContentBlocks(std::move(ContentBlocks)),
238 Alloc(std::move(Alloc)) {}
239
240class InProcessMemoryManager::IPInFlightAlloc
241 : public JITLinkMemoryManager::InFlightAlloc {
242public:
243 IPInFlightAlloc(InProcessMemoryManager &MemMgr, LinkGraph &G, BasicLayout BL,
244 sys::MemoryBlock StandardSegments,
245 sys::MemoryBlock FinalizationSegments)
246 : MemMgr(MemMgr), G(&G), BL(std::move(BL)),
247 StandardSegments(std::move(StandardSegments)),
248 FinalizationSegments(std::move(FinalizationSegments)) {}
249
250 ~IPInFlightAlloc() {
251 assert(!G && "InFlight alloc neither abandoned nor finalized");
252 }
253
254 void finalize(OnFinalizedFunction OnFinalized) override {
255
256 // Apply memory protections to all segments.
257 if (auto Err = applyProtections()) {
258 OnFinalized(std::move(Err));
259 return;
260 }
261
262 // Run finalization actions.
263 using WrapperFunctionCall = orc::shared::WrapperFunctionCall;
264 runFinalizeActions(
265 AAs&: G->allocActions(),
266 OnComplete: [this, OnFinalized = std::move(OnFinalized)](
267 Expected<std::vector<WrapperFunctionCall>> DeallocActions) mutable {
268 completeFinalization(OnFinalized: std::move(OnFinalized),
269 DeallocActions: std::move(DeallocActions));
270 });
271 }
272
273 void abandon(OnAbandonedFunction OnAbandoned) override {
274 Error Err = Error::success();
275 if (auto EC = sys::Memory::releaseMappedMemory(Block&: FinalizationSegments))
276 Err = joinErrors(E1: std::move(Err), E2: errorCodeToError(EC));
277 if (auto EC = sys::Memory::releaseMappedMemory(Block&: StandardSegments))
278 Err = joinErrors(E1: std::move(Err), E2: errorCodeToError(EC));
279
280#ifndef NDEBUG
281 // Set 'G' to null to flag that we've been successfully finalized.
282 // This allows us to assert at destruction time that a call has been made
283 // to either finalize or abandon.
284 G = nullptr;
285#endif
286
287 OnAbandoned(std::move(Err));
288 }
289
290private:
291 void completeFinalization(
292 OnFinalizedFunction OnFinalized,
293 Expected<std::vector<orc::shared::WrapperFunctionCall>> DeallocActions) {
294
295 if (!DeallocActions)
296 return OnFinalized(DeallocActions.takeError());
297
298 // Release the finalize segments slab.
299 if (auto EC = sys::Memory::releaseMappedMemory(Block&: FinalizationSegments)) {
300 OnFinalized(errorCodeToError(EC));
301 return;
302 }
303
304#ifndef NDEBUG
305 // Set 'G' to null to flag that we've been successfully finalized.
306 // This allows us to assert at destruction time that a call has been made
307 // to either finalize or abandon.
308 G = nullptr;
309#endif
310
311 // Continue with finalized allocation.
312 OnFinalized(MemMgr.createFinalizedAlloc(StandardSegments: std::move(StandardSegments),
313 DeallocActions: std::move(*DeallocActions)));
314 }
315
316 Error applyProtections() {
317 for (auto &KV : BL.segments()) {
318 const auto &AG = KV.first;
319 auto &Seg = KV.second;
320
321 auto Prot = toSysMemoryProtectionFlags(MP: AG.getMemProt());
322
323 uint64_t SegSize =
324 alignTo(Value: Seg.ContentSize + Seg.ZeroFillSize, Align: MemMgr.PageSize);
325 sys::MemoryBlock MB(Seg.WorkingMem, SegSize);
326 if (auto EC = sys::Memory::protectMappedMemory(Block: MB, Flags: Prot))
327 return errorCodeToError(EC);
328 if (Prot & sys::Memory::MF_EXEC)
329 sys::Memory::InvalidateInstructionCache(Addr: MB.base(), Len: MB.allocatedSize());
330 }
331 return Error::success();
332 }
333
334 InProcessMemoryManager &MemMgr;
335 LinkGraph *G;
336 BasicLayout BL;
337 sys::MemoryBlock StandardSegments;
338 sys::MemoryBlock FinalizationSegments;
339};
340
341Expected<std::unique_ptr<InProcessMemoryManager>>
342InProcessMemoryManager::Create() {
343 if (auto PageSize = sys::Process::getPageSize()) {
344 // FIXME: Just check this once on startup.
345 if (!isPowerOf2_64(Value: (uint64_t)*PageSize))
346 return make_error<StringError>(
347 Args: "Could not create InProcessMemoryManager: Page size " +
348 Twine(*PageSize) + " is not a power of 2",
349 Args: inconvertibleErrorCode());
350
351 return std::make_unique<InProcessMemoryManager>(args&: *PageSize);
352 } else
353 return PageSize.takeError();
354}
355
356void InProcessMemoryManager::allocate(const JITLinkDylib *JD, LinkGraph &G,
357 OnAllocatedFunction OnAllocated) {
358 BasicLayout BL(G);
359
360 /// Scan the request and calculate the group and total sizes.
361 /// Check that segment size is no larger than a page.
362 auto SegsSizes = BL.getContiguousPageBasedLayoutSizes(PageSize);
363 if (!SegsSizes) {
364 OnAllocated(SegsSizes.takeError());
365 return;
366 }
367
368 /// Check that the total size requested (including zero fill) is not larger
369 /// than a size_t.
370 if (SegsSizes->total() > std::numeric_limits<size_t>::max()) {
371 OnAllocated(make_error<JITLinkError>(
372 Args: "Total requested size " + formatv(Fmt: "{0:x}", Vals: SegsSizes->total()) +
373 " for graph " + G.getName() + " exceeds address space"));
374 return;
375 }
376
377 // Allocate one slab for the whole thing (to make sure everything is
378 // in-range), then partition into standard and finalization blocks.
379 //
380 // FIXME: Make two separate allocations in the future to reduce
381 // fragmentation: finalization segments will usually be a single page, and
382 // standard segments are likely to be more than one page. Where multiple
383 // allocations are in-flight at once (likely) the current approach will leave
384 // a lot of single-page holes.
385 sys::MemoryBlock Slab;
386 sys::MemoryBlock StandardSegsMem;
387 sys::MemoryBlock FinalizeSegsMem;
388 {
389 const sys::Memory::ProtectionFlags ReadWrite =
390 static_cast<sys::Memory::ProtectionFlags>(sys::Memory::MF_READ |
391 sys::Memory::MF_WRITE);
392
393 std::error_code EC;
394 Slab = sys::Memory::allocateMappedMemory(NumBytes: SegsSizes->total(), NearBlock: nullptr,
395 Flags: ReadWrite, EC);
396
397 if (EC) {
398 OnAllocated(errorCodeToError(EC));
399 return;
400 }
401
402 // Zero-fill the whole slab up-front.
403 memset(s: Slab.base(), c: 0, n: Slab.allocatedSize());
404
405 StandardSegsMem = {Slab.base(),
406 static_cast<size_t>(SegsSizes->StandardSegs)};
407 FinalizeSegsMem = {(void *)((char *)Slab.base() + SegsSizes->StandardSegs),
408 static_cast<size_t>(SegsSizes->FinalizeSegs)};
409 }
410
411 auto NextStandardSegAddr = orc::ExecutorAddr::fromPtr(Ptr: StandardSegsMem.base());
412 auto NextFinalizeSegAddr = orc::ExecutorAddr::fromPtr(Ptr: FinalizeSegsMem.base());
413
414 LLVM_DEBUG({
415 dbgs() << "InProcessMemoryManager allocated:\n";
416 if (SegsSizes->StandardSegs)
417 dbgs() << formatv(" [ {0:x16} -- {1:x16} ]", NextStandardSegAddr,
418 NextStandardSegAddr + StandardSegsMem.allocatedSize())
419 << " to stardard segs\n";
420 else
421 dbgs() << " no standard segs\n";
422 if (SegsSizes->FinalizeSegs)
423 dbgs() << formatv(" [ {0:x16} -- {1:x16} ]", NextFinalizeSegAddr,
424 NextFinalizeSegAddr + FinalizeSegsMem.allocatedSize())
425 << " to finalize segs\n";
426 else
427 dbgs() << " no finalize segs\n";
428 });
429
430 // Build ProtMap, assign addresses.
431 for (auto &KV : BL.segments()) {
432 auto &AG = KV.first;
433 auto &Seg = KV.second;
434
435 auto &SegAddr = (AG.getMemLifetime() == orc::MemLifetime::Standard)
436 ? NextStandardSegAddr
437 : NextFinalizeSegAddr;
438
439 Seg.WorkingMem = SegAddr.toPtr<char *>();
440 Seg.Addr = SegAddr;
441
442 SegAddr += alignTo(Value: Seg.ContentSize + Seg.ZeroFillSize, Align: PageSize);
443 }
444
445 if (auto Err = BL.apply()) {
446 OnAllocated(std::move(Err));
447 return;
448 }
449
450 OnAllocated(std::make_unique<IPInFlightAlloc>(args&: *this, args&: G, args: std::move(BL),
451 args: std::move(StandardSegsMem),
452 args: std::move(FinalizeSegsMem)));
453}
454
455void InProcessMemoryManager::deallocate(std::vector<FinalizedAlloc> Allocs,
456 OnDeallocatedFunction OnDeallocated) {
457 std::vector<sys::MemoryBlock> StandardSegmentsList;
458 std::vector<std::vector<orc::shared::WrapperFunctionCall>> DeallocActionsList;
459
460 {
461 std::lock_guard<std::mutex> Lock(FinalizedAllocsMutex);
462 for (auto &Alloc : Allocs) {
463 auto *FA = Alloc.release().toPtr<FinalizedAllocInfo *>();
464 StandardSegmentsList.push_back(x: std::move(FA->StandardSegments));
465 DeallocActionsList.push_back(x: std::move(FA->DeallocActions));
466 FA->~FinalizedAllocInfo();
467 FinalizedAllocInfos.Deallocate(E: FA);
468 }
469 }
470
471 Error DeallocErr = Error::success();
472
473 while (!DeallocActionsList.empty()) {
474 auto &DeallocActions = DeallocActionsList.back();
475 auto &StandardSegments = StandardSegmentsList.back();
476
477 /// Run any deallocate calls.
478 while (!DeallocActions.empty()) {
479 if (auto Err = DeallocActions.back().runWithSPSRetErrorMerged())
480 DeallocErr = joinErrors(E1: std::move(DeallocErr), E2: std::move(Err));
481 DeallocActions.pop_back();
482 }
483
484 /// Release the standard segments slab.
485 if (auto EC = sys::Memory::releaseMappedMemory(Block&: StandardSegments))
486 DeallocErr = joinErrors(E1: std::move(DeallocErr), E2: errorCodeToError(EC));
487
488 DeallocActionsList.pop_back();
489 StandardSegmentsList.pop_back();
490 }
491
492 OnDeallocated(std::move(DeallocErr));
493}
494
495JITLinkMemoryManager::FinalizedAlloc
496InProcessMemoryManager::createFinalizedAlloc(
497 sys::MemoryBlock StandardSegments,
498 std::vector<orc::shared::WrapperFunctionCall> DeallocActions) {
499 std::lock_guard<std::mutex> Lock(FinalizedAllocsMutex);
500 auto *FA = FinalizedAllocInfos.Allocate<FinalizedAllocInfo>();
501 new (FA) FinalizedAllocInfo(
502 {.StandardSegments: std::move(StandardSegments), .DeallocActions: std::move(DeallocActions)});
503 return FinalizedAlloc(orc::ExecutorAddr::fromPtr(Ptr: FA));
504}
505
506} // end namespace jitlink
507} // end namespace llvm
508