1//===- MemoryMapper.cpp - Cross-process memory mapper ------------*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "llvm/ExecutionEngine/Orc/MemoryMapper.h"
10
11#include "llvm/Config/llvm-config.h" // for LLVM_ON_UNIX
12#include "llvm/ExecutionEngine/Orc/Shared/OrcRTBridge.h"
13#include "llvm/Support/WindowsError.h"
14
15#if defined(LLVM_ON_UNIX) && !defined(__ANDROID__)
16#include <fcntl.h>
17#include <sys/mman.h>
18#if defined(__MVS__)
19#include "llvm/Support/BLAKE3.h"
20#include <sys/shm.h>
21#endif
22#include <unistd.h>
23#elif defined(_WIN32)
24#include <windows.h>
25#endif
26
27namespace llvm {
28namespace orc {
29
30MemoryMapper::~MemoryMapper() = default;
31
32InProcessMemoryMapper::InProcessMemoryMapper(size_t PageSize)
33 : PageSize(PageSize) {}
34
35Expected<std::unique_ptr<InProcessMemoryMapper>>
36InProcessMemoryMapper::Create() {
37 auto PageSize = sys::Process::getPageSize();
38 if (!PageSize)
39 return PageSize.takeError();
40 return std::make_unique<InProcessMemoryMapper>(args&: *PageSize);
41}
42
43void InProcessMemoryMapper::reserve(size_t NumBytes,
44 OnReservedFunction OnReserved) {
45 std::error_code EC;
46 auto MB = sys::Memory::allocateMappedMemory(
47 NumBytes, NearBlock: nullptr, Flags: sys::Memory::MF_READ | sys::Memory::MF_WRITE, EC);
48
49 if (EC)
50 return OnReserved(errorCodeToError(EC));
51
52 {
53 std::lock_guard<std::mutex> Lock(Mutex);
54 Reservations[MB.base()].Size = MB.allocatedSize();
55 }
56
57 OnReserved(
58 ExecutorAddrRange(ExecutorAddr::fromPtr(Ptr: MB.base()), MB.allocatedSize()));
59}
60
61char *InProcessMemoryMapper::prepare(jitlink::LinkGraph &G, ExecutorAddr Addr,
62 size_t ContentSize) {
63 return Addr.toPtr<char *>();
64}
65
66void InProcessMemoryMapper::initialize(MemoryMapper::AllocInfo &AI,
67 OnInitializedFunction OnInitialized) {
68 ExecutorAddr MinAddr(~0ULL);
69 ExecutorAddr MaxAddr(0);
70
71 // FIXME: Release finalize lifetime segments.
72 for (auto &Segment : AI.Segments) {
73 auto Base = AI.MappingBase + Segment.Offset;
74 auto Size = Segment.ContentSize + Segment.ZeroFillSize;
75
76 if (Base < MinAddr)
77 MinAddr = Base;
78
79 if (Base + Size > MaxAddr)
80 MaxAddr = Base + Size;
81
82 std::memset(s: (Base + Segment.ContentSize).toPtr<void *>(), c: 0,
83 n: Segment.ZeroFillSize);
84
85 if (auto EC = sys::Memory::protectMappedMemory(
86 Block: {Base.toPtr<void *>(), Size},
87 Flags: toSysMemoryProtectionFlags(MP: Segment.AG.getMemProt()))) {
88 return OnInitialized(errorCodeToError(EC));
89 }
90 if ((Segment.AG.getMemProt() & MemProt::Exec) == MemProt::Exec)
91 sys::Memory::InvalidateInstructionCache(Addr: Base.toPtr<void *>(), Len: Size);
92 }
93
94 auto DeinitializeActions = shared::runFinalizeActions(AAs&: AI.Actions);
95 if (!DeinitializeActions)
96 return OnInitialized(DeinitializeActions.takeError());
97
98 {
99 std::lock_guard<std::mutex> Lock(Mutex);
100
101 // This is the maximum range whose permission have been possibly modified
102 auto &Alloc = Allocations[MinAddr];
103 Alloc.Size = MaxAddr - MinAddr;
104 Alloc.DeinitializationActions = std::move(*DeinitializeActions);
105 Reservations[AI.MappingBase.toPtr<void *>()].Allocations.push_back(x: MinAddr);
106 }
107
108 OnInitialized(MinAddr);
109}
110
111void InProcessMemoryMapper::deinitialize(
112 ArrayRef<ExecutorAddr> Bases,
113 MemoryMapper::OnDeinitializedFunction OnDeinitialized) {
114 Error AllErr = Error::success();
115
116 {
117 std::lock_guard<std::mutex> Lock(Mutex);
118
119 for (auto Base : llvm::reverse(C&: Bases)) {
120
121 if (Error Err = shared::runDeallocActions(
122 DAs: Allocations[Base].DeinitializationActions)) {
123 AllErr = joinErrors(E1: std::move(AllErr), E2: std::move(Err));
124 }
125
126 // Reset protections to read/write so the area can be reused
127 if (auto EC = sys::Memory::protectMappedMemory(
128 Block: {Base.toPtr<void *>(), Allocations[Base].Size},
129 Flags: sys::Memory::ProtectionFlags::MF_READ |
130 sys::Memory::ProtectionFlags::MF_WRITE)) {
131 AllErr = joinErrors(E1: std::move(AllErr), E2: errorCodeToError(EC));
132 }
133
134 Allocations.erase(Val: Base);
135 }
136 }
137
138 OnDeinitialized(std::move(AllErr));
139}
140
141void InProcessMemoryMapper::release(ArrayRef<ExecutorAddr> Bases,
142 OnReleasedFunction OnReleased) {
143 Error Err = Error::success();
144
145 for (auto Base : Bases) {
146 std::vector<ExecutorAddr> AllocAddrs;
147 size_t Size;
148 {
149 std::lock_guard<std::mutex> Lock(Mutex);
150 auto &R = Reservations[Base.toPtr<void *>()];
151 Size = R.Size;
152 AllocAddrs.swap(x&: R.Allocations);
153 }
154
155 // deinitialize sub allocations
156 std::promise<MSVCPError> P;
157 auto F = P.get_future();
158 deinitialize(Bases: AllocAddrs, OnDeinitialized: [&](Error Err) { P.set_value(std::move(Err)); });
159 if (Error E = F.get()) {
160 Err = joinErrors(E1: std::move(Err), E2: std::move(E));
161 }
162
163 // free the memory
164 auto MB = sys::MemoryBlock(Base.toPtr<void *>(), Size);
165
166 auto EC = sys::Memory::releaseMappedMemory(Block&: MB);
167 if (EC) {
168 Err = joinErrors(E1: std::move(Err), E2: errorCodeToError(EC));
169 }
170
171 std::lock_guard<std::mutex> Lock(Mutex);
172 Reservations.erase(Val: Base.toPtr<void *>());
173 }
174
175 OnReleased(std::move(Err));
176}
177
178InProcessMemoryMapper::~InProcessMemoryMapper() {
179 std::vector<ExecutorAddr> ReservationAddrs;
180 {
181 std::lock_guard<std::mutex> Lock(Mutex);
182
183 ReservationAddrs.reserve(n: Reservations.size());
184 for (const auto &R : Reservations) {
185 ReservationAddrs.push_back(x: ExecutorAddr::fromPtr(Ptr: R.getFirst()));
186 }
187 }
188
189 std::promise<MSVCPError> P;
190 auto F = P.get_future();
191 release(Bases: ReservationAddrs, OnReleased: [&](Error Err) { P.set_value(std::move(Err)); });
192 cantFail(Err: F.get());
193}
194
195// SharedMemoryMapper
196
197SharedMemoryMapper::SharedMemoryMapper(ExecutorProcessControl &EPC,
198 SymbolAddrs SAs, size_t PageSize)
199 : EPC(EPC), SAs(SAs), PageSize(PageSize) {
200#if (!defined(LLVM_ON_UNIX) || defined(__ANDROID__)) && !defined(_WIN32)
201 llvm_unreachable("SharedMemoryMapper is not supported on this platform yet");
202#endif
203}
204
205Expected<std::unique_ptr<SharedMemoryMapper>>
206SharedMemoryMapper::Create(ExecutorProcessControl &EPC, SymbolAddrs SAs) {
207#if (defined(LLVM_ON_UNIX) && !defined(__ANDROID__)) || defined(_WIN32)
208 auto PageSize = sys::Process::getPageSize();
209 if (!PageSize)
210 return PageSize.takeError();
211
212 return std::make_unique<SharedMemoryMapper>(args&: EPC, args&: SAs, args&: *PageSize);
213#else
214 return make_error<StringError>(
215 "SharedMemoryMapper is not supported on this platform yet",
216 inconvertibleErrorCode());
217#endif
218}
219
220void SharedMemoryMapper::reserve(size_t NumBytes,
221 OnReservedFunction OnReserved) {
222#if (defined(LLVM_ON_UNIX) && !defined(__ANDROID__)) || defined(_WIN32)
223
224 int SharedMemoryId = -1;
225 EPC.callSPSWrapperAsync<
226 rt::SPSExecutorSharedMemoryMapperServiceReserveSignature>(
227 WrapperFnAddr: SAs.Reserve,
228 SendResult: [this, NumBytes, OnReserved = std::move(OnReserved), SharedMemoryId](
229 Error SerializationErr,
230 Expected<std::pair<ExecutorAddr, std::string>> Result) mutable {
231 if (SerializationErr) {
232 cantFail(Err: Result.takeError());
233 return OnReserved(std::move(SerializationErr));
234 }
235
236 if (!Result)
237 return OnReserved(Result.takeError());
238
239 ExecutorAddr RemoteAddr;
240 std::string SharedMemoryName;
241 std::tie(args&: RemoteAddr, args&: SharedMemoryName) = std::move(*Result);
242
243 void *LocalAddr = nullptr;
244
245#if defined(LLVM_ON_UNIX)
246
247#if defined(__MVS__)
248 ArrayRef<uint8_t> Data(
249 reinterpret_cast<const uint8_t *>(SharedMemoryName.c_str()),
250 SharedMemoryName.size());
251 auto HashedName = BLAKE3::hash<sizeof(key_t)>(Data);
252 key_t Key = *reinterpret_cast<key_t *>(HashedName.data());
253 SharedMemoryId =
254 shmget(Key, NumBytes, IPC_CREAT | __IPC_SHAREAS | 0700);
255 if (SharedMemoryId < 0) {
256 return OnReserved(errorCodeToError(
257 std::error_code(errno, std::generic_category())));
258 }
259 LocalAddr = shmat(SharedMemoryId, nullptr, 0);
260 if (LocalAddr == reinterpret_cast<void *>(-1)) {
261 return OnReserved(errorCodeToError(
262 std::error_code(errno, std::generic_category())));
263 }
264#else
265 int SharedMemoryFile = shm_open(name: SharedMemoryName.c_str(), O_RDWR, mode: 0700);
266 if (SharedMemoryFile < 0) {
267 return OnReserved(errorCodeToError(EC: errnoAsErrorCode()));
268 }
269
270 // this prevents other processes from accessing it by name
271 shm_unlink(name: SharedMemoryName.c_str());
272
273 LocalAddr = mmap(addr: nullptr, len: NumBytes, PROT_READ | PROT_WRITE, MAP_SHARED,
274 fd: SharedMemoryFile, offset: 0);
275 if (LocalAddr == MAP_FAILED) {
276 return OnReserved(errorCodeToError(EC: errnoAsErrorCode()));
277 }
278
279 close(fd: SharedMemoryFile);
280#endif
281
282#elif defined(_WIN32)
283
284 std::wstring WideSharedMemoryName(SharedMemoryName.begin(),
285 SharedMemoryName.end());
286 HANDLE SharedMemoryFile = OpenFileMappingW(
287 FILE_MAP_ALL_ACCESS, FALSE, WideSharedMemoryName.c_str());
288 if (!SharedMemoryFile)
289 return OnReserved(errorCodeToError(mapWindowsError(GetLastError())));
290
291 LocalAddr =
292 MapViewOfFile(SharedMemoryFile, FILE_MAP_ALL_ACCESS, 0, 0, 0);
293 if (!LocalAddr) {
294 CloseHandle(SharedMemoryFile);
295 return OnReserved(errorCodeToError(mapWindowsError(GetLastError())));
296 }
297
298 CloseHandle(SharedMemoryFile);
299
300#endif
301 {
302 std::lock_guard<std::mutex> Lock(Mutex);
303 Reservations.insert(
304 x: {RemoteAddr, {.LocalAddr: LocalAddr, .Size: NumBytes, .SharedMemoryId: SharedMemoryId}});
305 }
306
307 OnReserved(ExecutorAddrRange(RemoteAddr, NumBytes));
308 },
309 Args: SAs.Instance, Args: static_cast<uint64_t>(NumBytes));
310
311#else
312 OnReserved(make_error<StringError>(
313 "SharedMemoryMapper is not supported on this platform yet",
314 inconvertibleErrorCode()));
315#endif
316}
317
318char *SharedMemoryMapper::prepare(jitlink::LinkGraph &G, ExecutorAddr Addr,
319 size_t ContentSize) {
320 auto R = Reservations.upper_bound(x: Addr);
321 assert(R != Reservations.begin() && "Attempt to prepare unreserved range");
322 R--;
323
324 ExecutorAddrDiff Offset = Addr - R->first;
325
326 return static_cast<char *>(R->second.LocalAddr) + Offset;
327}
328
329void SharedMemoryMapper::initialize(MemoryMapper::AllocInfo &AI,
330 OnInitializedFunction OnInitialized) {
331 auto Reservation = Reservations.upper_bound(x: AI.MappingBase);
332 assert(Reservation != Reservations.begin() && "Attempt to initialize unreserved range");
333 Reservation--;
334
335 auto AllocationOffset = AI.MappingBase - Reservation->first;
336
337 tpctypes::SharedMemoryFinalizeRequest FR;
338
339 AI.Actions.swap(x&: FR.Actions);
340
341 FR.Segments.reserve(n: AI.Segments.size());
342
343 for (auto Segment : AI.Segments) {
344 char *Base = static_cast<char *>(Reservation->second.LocalAddr) +
345 AllocationOffset + Segment.Offset;
346 std::memset(s: Base + Segment.ContentSize, c: 0, n: Segment.ZeroFillSize);
347
348 tpctypes::SharedMemorySegFinalizeRequest SegReq;
349 SegReq.RAG = {Segment.AG.getMemProt(),
350 Segment.AG.getMemLifetime() == MemLifetime::Finalize};
351 SegReq.Addr = AI.MappingBase + Segment.Offset;
352 SegReq.Size = Segment.ContentSize + Segment.ZeroFillSize;
353
354 FR.Segments.push_back(x: SegReq);
355 }
356
357 EPC.callSPSWrapperAsync<
358 rt::SPSExecutorSharedMemoryMapperServiceInitializeSignature>(
359 WrapperFnAddr: SAs.Initialize,
360 SendResult: [OnInitialized = std::move(OnInitialized)](
361 Error SerializationErr, Expected<ExecutorAddr> Result) mutable {
362 if (SerializationErr) {
363 cantFail(Err: Result.takeError());
364 return OnInitialized(std::move(SerializationErr));
365 }
366
367 OnInitialized(std::move(Result));
368 },
369 Args: SAs.Instance, Args: Reservation->first, Args: std::move(FR));
370}
371
372void SharedMemoryMapper::deinitialize(
373 ArrayRef<ExecutorAddr> Allocations,
374 MemoryMapper::OnDeinitializedFunction OnDeinitialized) {
375 EPC.callSPSWrapperAsync<
376 rt::SPSExecutorSharedMemoryMapperServiceDeinitializeSignature>(
377 WrapperFnAddr: SAs.Deinitialize,
378 SendResult: [OnDeinitialized = std::move(OnDeinitialized)](Error SerializationErr,
379 Error Result) mutable {
380 if (SerializationErr) {
381 cantFail(Err: std::move(Result));
382 return OnDeinitialized(std::move(SerializationErr));
383 }
384
385 OnDeinitialized(std::move(Result));
386 },
387 Args: SAs.Instance, Args: Allocations);
388}
389
390void SharedMemoryMapper::release(ArrayRef<ExecutorAddr> Bases,
391 OnReleasedFunction OnReleased) {
392#if (defined(LLVM_ON_UNIX) && !defined(__ANDROID__)) || defined(_WIN32)
393 Error Err = Error::success();
394
395 {
396 std::lock_guard<std::mutex> Lock(Mutex);
397
398 for (auto Base : Bases) {
399
400#if defined(LLVM_ON_UNIX)
401
402#if defined(__MVS__)
403 if (shmdt(Reservations[Base].LocalAddr) < 0 ||
404 shmctl(Reservations[Base].SharedMemoryId, IPC_RMID, NULL) < 0)
405 Err = joinErrors(std::move(Err), errorCodeToError(errnoAsErrorCode()));
406#else
407 if (munmap(addr: Reservations[Base].LocalAddr, len: Reservations[Base].Size) != 0)
408 Err = joinErrors(E1: std::move(Err), E2: errorCodeToError(EC: errnoAsErrorCode()));
409#endif
410
411#elif defined(_WIN32)
412
413 if (!UnmapViewOfFile(Reservations[Base].LocalAddr))
414 Err = joinErrors(std::move(Err),
415 errorCodeToError(mapWindowsError(GetLastError())));
416
417#endif
418
419 Reservations.erase(x: Base);
420 }
421 }
422
423 EPC.callSPSWrapperAsync<
424 rt::SPSExecutorSharedMemoryMapperServiceReleaseSignature>(
425 WrapperFnAddr: SAs.Release,
426 SendResult: [OnReleased = std::move(OnReleased),
427 Err = std::move(Err)](Error SerializationErr, Error Result) mutable {
428 if (SerializationErr) {
429 cantFail(Err: std::move(Result));
430 return OnReleased(
431 joinErrors(E1: std::move(Err), E2: std::move(SerializationErr)));
432 }
433
434 return OnReleased(joinErrors(E1: std::move(Err), E2: std::move(Result)));
435 },
436 Args: SAs.Instance, Args: Bases);
437#else
438 OnReleased(make_error<StringError>(
439 "SharedMemoryMapper is not supported on this platform yet",
440 inconvertibleErrorCode()));
441#endif
442}
443
444SharedMemoryMapper::~SharedMemoryMapper() {
445 std::lock_guard<std::mutex> Lock(Mutex);
446 for (const auto &R : Reservations) {
447
448#if defined(LLVM_ON_UNIX) && !defined(__ANDROID__)
449
450#if defined(__MVS__)
451 shmdt(R.second.LocalAddr);
452#else
453 munmap(addr: R.second.LocalAddr, len: R.second.Size);
454#endif
455
456#elif defined(_WIN32)
457
458 UnmapViewOfFile(R.second.LocalAddr);
459
460#else
461
462 (void)R;
463
464#endif
465 }
466}
467
468} // namespace orc
469
470} // namespace llvm
471