1//===- SimpleExecuorMemoryManagare.cpp - Simple executor-side memory mgmt -===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "llvm/ExecutionEngine/Orc/TargetProcess/SimpleExecutorMemoryManager.h"
10
11#include "llvm/ADT/ScopeExit.h"
12#include "llvm/ExecutionEngine/Orc/Shared/OrcRTBridge.h"
13#include "llvm/Support/FormatVariadic.h"
14
15#define DEBUG_TYPE "orc"
16
17namespace llvm {
18namespace orc {
19namespace rt_bootstrap {
20
21SimpleExecutorMemoryManager::~SimpleExecutorMemoryManager() {
22 assert(Slabs.empty() && "shutdown not called?");
23}
24
25Expected<ExecutorAddr> SimpleExecutorMemoryManager::reserve(uint64_t Size) {
26 std::error_code EC;
27 auto MB = sys::Memory::allocateMappedMemory(
28 NumBytes: Size, NearBlock: nullptr, Flags: sys::Memory::MF_READ | sys::Memory::MF_WRITE, EC);
29 if (EC)
30 return errorCodeToError(EC);
31 std::lock_guard<std::mutex> Lock(M);
32 assert(!Slabs.count(MB.base()) && "Duplicate allocation addr");
33 Slabs[MB.base()].Size = Size;
34 return ExecutorAddr::fromPtr(Ptr: MB.base());
35}
36
37Expected<ExecutorAddr>
38SimpleExecutorMemoryManager::initialize(tpctypes::FinalizeRequest &FR) {
39 if (FR.Segments.empty()) {
40 if (FR.Actions.empty())
41 return make_error<StringError>(Args: "Finalization request is empty",
42 Args: inconvertibleErrorCode());
43 else
44 return make_error<StringError>(Args: "Finalization actions attached to empty "
45 "finalization request",
46 Args: inconvertibleErrorCode());
47 }
48
49 ExecutorAddrRange RR(FR.Segments.front().Addr, FR.Segments.front().Addr);
50
51 std::vector<sys::MemoryBlock> MBsToReset;
52 llvm::scope_exit ResetMBs([&]() {
53 for (auto &MB : MBsToReset)
54 sys::Memory::protectMappedMemory(Block: MB, Flags: sys::Memory::MF_READ |
55 sys::Memory::MF_WRITE);
56 sys::Memory::InvalidateInstructionCache(Addr: RR.Start.toPtr<void *>(),
57 Len: RR.size());
58 });
59
60 // Copy content and apply permissions.
61 for (auto &Seg : FR.Segments) {
62 RR.Start = std::min(a: RR.Start, b: Seg.Addr);
63 RR.End = std::max(a: RR.End, b: Seg.Addr + Seg.Size);
64
65 // Check segment ranges.
66 if (LLVM_UNLIKELY(Seg.Size < Seg.Content.size()))
67 return make_error<StringError>(
68 Args: formatv(Fmt: "Segment {0:x} content size ({1:x} bytes) "
69 "exceeds segment size ({2:x} bytes)",
70 Vals: Seg.Addr.getValue(), Vals: Seg.Content.size(), Vals&: Seg.Size),
71 Args: inconvertibleErrorCode());
72 ExecutorAddr SegEnd = Seg.Addr + ExecutorAddrDiff(Seg.Size);
73 if (LLVM_UNLIKELY(Seg.Addr < RR.Start || SegEnd > RR.End))
74 return make_error<StringError>(
75 Args: formatv(Fmt: "Segment {0:x} -- {1:x} crosses boundary of "
76 "allocation {2:x} -- {3:x}",
77 Vals&: Seg.Addr, Vals&: SegEnd, Vals&: RR.Start, Vals&: RR.End),
78 Args: inconvertibleErrorCode());
79
80 char *Mem = Seg.Addr.toPtr<char *>();
81 if (!Seg.Content.empty())
82 memcpy(dest: Mem, src: Seg.Content.data(), n: Seg.Content.size());
83 memset(s: Mem + Seg.Content.size(), c: 0, n: Seg.Size - Seg.Content.size());
84 assert(Seg.Size <= std::numeric_limits<size_t>::max());
85
86 sys::MemoryBlock MB(Mem, Seg.Size);
87 if (auto EC = sys::Memory::protectMappedMemory(
88 Block: MB, Flags: toSysMemoryProtectionFlags(MP: Seg.RAG.Prot)))
89 return errorCodeToError(EC);
90
91 MBsToReset.push_back(x: MB);
92
93 if ((Seg.RAG.Prot & MemProt::Exec) == MemProt::Exec)
94 sys::Memory::InvalidateInstructionCache(Addr: Mem, Len: Seg.Size);
95 }
96
97 auto DeallocActions = runFinalizeActions(AAs&: FR.Actions);
98 if (!DeallocActions)
99 return DeallocActions.takeError();
100
101 {
102 std::lock_guard<std::mutex> Lock(M);
103 auto Region = createRegionInfo(R: RR, Context: "In initialize");
104 if (!Region)
105 return Region.takeError();
106 Region->DeallocActions = std::move(*DeallocActions);
107 }
108
109 // Successful initialization.
110 ResetMBs.release();
111
112 return RR.Start;
113}
114
115Error SimpleExecutorMemoryManager::deinitialize(
116 const std::vector<ExecutorAddr> &InitKeys) {
117 Error Err = Error::success();
118
119 for (auto &KeyAddr : llvm::reverse(C: InitKeys)) {
120 std::vector<shared::WrapperFunctionCall> DeallocActions;
121 {
122 std::scoped_lock<std::mutex> Lock(M);
123 auto Slab = getSlabInfo(A: KeyAddr, Context: "In deinitialize");
124 if (!Slab) {
125 Err = joinErrors(E1: std::move(Err), E2: Slab.takeError());
126 continue;
127 }
128
129 auto RI = getRegionInfo(Slab&: *Slab, A: KeyAddr, Context: "In deinitialize");
130 if (!RI) {
131 Err = joinErrors(E1: std::move(Err), E2: RI.takeError());
132 continue;
133 }
134
135 DeallocActions = std::move(RI->DeallocActions);
136 }
137
138 Err = joinErrors(E1: std::move(Err),
139 E2: runDeallocActions(DAs: std::move(DeallocActions)));
140 }
141
142 return Err;
143}
144
145Error SimpleExecutorMemoryManager::release(
146 const std::vector<ExecutorAddr> &Bases) {
147 Error Err = Error::success();
148
149 // TODO: Prohibit new initializations within the slabs being removed?
150 for (auto &Base : llvm::reverse(C: Bases)) {
151 std::vector<shared::WrapperFunctionCall> DeallocActions;
152 sys::MemoryBlock MB;
153
154 {
155 std::scoped_lock<std::mutex> Lock(M);
156
157 auto SlabI = Slabs.find(x: Base.toPtr<void *>());
158 if (SlabI == Slabs.end()) {
159 Err = joinErrors(
160 E1: std::move(Err),
161 E2: make_error<StringError>(Args: "In release, " + formatv(Fmt: "{0:x}", Vals: Base) +
162 " is not part of any reserved "
163 "address range",
164 Args: inconvertibleErrorCode()));
165 continue;
166 }
167
168 auto &Slab = SlabI->second;
169
170 for (auto &[Addr, Region] : Slab.Regions)
171 llvm::copy(Range&: Region.DeallocActions, Out: back_inserter(x&: DeallocActions));
172
173 MB = {Base.toPtr<void *>(), Slab.Size};
174
175 Slabs.erase(position: SlabI);
176 }
177
178 Err = joinErrors(E1: std::move(Err), E2: runDeallocActions(DAs: DeallocActions));
179 if (auto EC = sys::Memory::releaseMappedMemory(Block&: MB))
180 Err = joinErrors(E1: std::move(Err), E2: errorCodeToError(EC));
181 }
182
183 return Err;
184}
185
186Error SimpleExecutorMemoryManager::shutdown() {
187
188 // TODO: Prevent new allocations during shutdown.
189 std::vector<ExecutorAddr> Bases;
190 {
191 std::scoped_lock<std::mutex> Lock(M);
192 for (auto &[Base, Slab] : Slabs)
193 Bases.push_back(x: ExecutorAddr::fromPtr(Ptr: Base));
194 }
195
196 return release(Bases);
197}
198
199void SimpleExecutorMemoryManager::addBootstrapSymbols(
200 StringMap<ExecutorAddr> &M) {
201 M[rt::SimpleExecutorMemoryManagerInstanceName] = ExecutorAddr::fromPtr(Ptr: this);
202 M[rt::SimpleExecutorMemoryManagerReserveWrapperName] =
203 ExecutorAddr::fromPtr(Ptr: &reserveWrapper);
204 M[rt::SimpleExecutorMemoryManagerInitializeWrapperName] =
205 ExecutorAddr::fromPtr(Ptr: &initializeWrapper);
206 M[rt::SimpleExecutorMemoryManagerDeinitializeWrapperName] =
207 ExecutorAddr::fromPtr(Ptr: &deinitializeWrapper);
208 M[rt::SimpleExecutorMemoryManagerReleaseWrapperName] =
209 ExecutorAddr::fromPtr(Ptr: &releaseWrapper);
210}
211
212Expected<SimpleExecutorMemoryManager::SlabInfo &>
213SimpleExecutorMemoryManager::getSlabInfo(ExecutorAddr A, StringRef Context) {
214 auto MakeBadSlabError = [&]() {
215 return make_error<StringError>(
216 Args: Context + ", address " + formatv(Fmt: "{0:x}", Vals&: A) +
217 " is not part of any reserved address range",
218 Args: inconvertibleErrorCode());
219 };
220
221 auto I = Slabs.upper_bound(x: A.toPtr<void *>());
222 if (I == Slabs.begin())
223 return MakeBadSlabError();
224 --I;
225 if (!ExecutorAddrRange(ExecutorAddr::fromPtr(Ptr: I->first), I->second.Size)
226 .contains(Addr: A))
227 return MakeBadSlabError();
228
229 return I->second;
230}
231
232Expected<SimpleExecutorMemoryManager::SlabInfo &>
233SimpleExecutorMemoryManager::getSlabInfo(ExecutorAddrRange R,
234 StringRef Context) {
235 auto MakeBadSlabError = [&]() {
236 return make_error<StringError>(
237 Args: Context + ", range " + formatv(Fmt: "{0:x}", Vals&: R) +
238 " is not part of any reserved address range",
239 Args: inconvertibleErrorCode());
240 };
241
242 auto I = Slabs.upper_bound(x: R.Start.toPtr<void *>());
243 if (I == Slabs.begin())
244 return MakeBadSlabError();
245 --I;
246 if (!ExecutorAddrRange(ExecutorAddr::fromPtr(Ptr: I->first), I->second.Size)
247 .contains(Other: R))
248 return MakeBadSlabError();
249
250 return I->second;
251}
252
253Expected<SimpleExecutorMemoryManager::RegionInfo &>
254SimpleExecutorMemoryManager::createRegionInfo(ExecutorAddrRange R,
255 StringRef Context) {
256
257 auto Slab = getSlabInfo(R, Context);
258 if (!Slab)
259 return Slab.takeError();
260
261 auto MakeBadRegionError = [&](ExecutorAddrRange Other, bool Prev) {
262 return make_error<StringError>(Args: Context + ", region " + formatv(Fmt: "{0:x}", Vals&: R) +
263 " overlaps " +
264 (Prev ? "previous" : "following") +
265 " region " + formatv(Fmt: "{0:x}", Vals&: Other),
266 Args: inconvertibleErrorCode());
267 };
268
269 auto I = Slab->Regions.upper_bound(x: R.Start);
270 if (I != Slab->Regions.begin()) {
271 auto J = std::prev(x: I);
272 ExecutorAddrRange PrevRange(J->first, J->second.Size);
273 if (PrevRange.overlaps(Other: R))
274 return MakeBadRegionError(PrevRange, true);
275 }
276 if (I != Slab->Regions.end()) {
277 ExecutorAddrRange NextRange(I->first, I->second.Size);
278 if (NextRange.overlaps(Other: R))
279 return MakeBadRegionError(NextRange, false);
280 }
281
282 auto &RInfo = Slab->Regions[R.Start];
283 RInfo.Size = R.size();
284 return RInfo;
285}
286
287Expected<SimpleExecutorMemoryManager::RegionInfo &>
288SimpleExecutorMemoryManager::getRegionInfo(SlabInfo &Slab, ExecutorAddr A,
289 StringRef Context) {
290 auto I = Slab.Regions.find(x: A);
291 if (I == Slab.Regions.end())
292 return make_error<StringError>(
293 Args: Context + ", address " + formatv(Fmt: "{0:x}", Vals&: A) +
294 " does not correspond to the start of any initialized region",
295 Args: inconvertibleErrorCode());
296
297 return I->second;
298}
299
300Expected<SimpleExecutorMemoryManager::RegionInfo &>
301SimpleExecutorMemoryManager::getRegionInfo(ExecutorAddr A, StringRef Context) {
302 auto Slab = getSlabInfo(A, Context);
303 if (!Slab)
304 return Slab.takeError();
305
306 return getRegionInfo(Slab&: *Slab, A, Context);
307}
308
309llvm::orc::shared::CWrapperFunctionBuffer
310SimpleExecutorMemoryManager::reserveWrapper(const char *ArgData,
311 size_t ArgSize) {
312 return shared::WrapperFunction<rt::SPSSimpleRemoteMemoryMapReserveSignature>::
313 handle(ArgData, ArgSize,
314 Handler: shared::makeMethodWrapperHandler(
315 Method: &SimpleExecutorMemoryManager::reserve))
316 .release();
317}
318
319llvm::orc::shared::CWrapperFunctionBuffer
320SimpleExecutorMemoryManager::initializeWrapper(const char *ArgData,
321 size_t ArgSize) {
322 return shared::
323 WrapperFunction<rt::SPSSimpleRemoteMemoryMapInitializeSignature>::handle(
324 ArgData, ArgSize,
325 Handler: shared::makeMethodWrapperHandler(
326 Method: &SimpleExecutorMemoryManager::initialize))
327 .release();
328}
329
330llvm::orc::shared::CWrapperFunctionBuffer
331SimpleExecutorMemoryManager::deinitializeWrapper(const char *ArgData,
332 size_t ArgSize) {
333 return shared::WrapperFunction<
334 rt::SPSSimpleRemoteMemoryMapDeinitializeSignature>::
335 handle(ArgData, ArgSize,
336 Handler: shared::makeMethodWrapperHandler(
337 Method: &SimpleExecutorMemoryManager::deinitialize))
338 .release();
339}
340
341llvm::orc::shared::CWrapperFunctionBuffer
342SimpleExecutorMemoryManager::releaseWrapper(const char *ArgData,
343 size_t ArgSize) {
344 return shared::WrapperFunction<rt::SPSSimpleRemoteMemoryMapReleaseSignature>::
345 handle(ArgData, ArgSize,
346 Handler: shared::makeMethodWrapperHandler(
347 Method: &SimpleExecutorMemoryManager::release))
348 .release();
349}
350
351} // namespace rt_bootstrap
352} // end namespace orc
353} // end namespace llvm
354