| 1 | //===- Unix/Memory.cpp - Generic UNIX System Configuration ------*- C++ -*-===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | // This file defines some functions for various memory management utilities. |
| 10 | // |
| 11 | //===----------------------------------------------------------------------===// |
| 12 | |
| 13 | #include "Unix.h" |
| 14 | #include "llvm/Config/config.h" |
| 15 | #include "llvm/Support/Alignment.h" |
| 16 | #include "llvm/Support/DataTypes.h" |
| 17 | #include "llvm/Support/ErrorHandling.h" |
| 18 | #include "llvm/Support/Process.h" |
| 19 | #include "llvm/Support/Valgrind.h" |
| 20 | |
| 21 | #ifdef HAVE_SYS_MMAN_H |
| 22 | #include <sys/mman.h> |
| 23 | #endif |
| 24 | |
| 25 | #ifdef __APPLE__ |
| 26 | #include <mach/mach.h> |
| 27 | #endif |
| 28 | |
| 29 | #ifdef __Fuchsia__ |
| 30 | #include <zircon/syscalls.h> |
| 31 | #endif |
| 32 | |
| 33 | #if defined(__APPLE__) |
| 34 | extern "C" void sys_icache_invalidate(const void *Addr, size_t len); |
| 35 | #else |
| 36 | extern "C" void __clear_cache(void *, void *); |
| 37 | #endif |
| 38 | |
| 39 | static int getPosixProtectionFlags(unsigned Flags) { |
| 40 | switch (Flags & llvm::sys::Memory::MF_RWE_MASK) { |
| 41 | case llvm::sys::Memory::MF_READ: |
| 42 | return PROT_READ; |
| 43 | case llvm::sys::Memory::MF_WRITE: |
| 44 | return PROT_WRITE; |
| 45 | case llvm::sys::Memory::MF_READ | llvm::sys::Memory::MF_WRITE: |
| 46 | return PROT_READ | PROT_WRITE; |
| 47 | case llvm::sys::Memory::MF_READ | llvm::sys::Memory::MF_EXEC: |
| 48 | return PROT_READ | PROT_EXEC; |
| 49 | case llvm::sys::Memory::MF_READ | llvm::sys::Memory::MF_WRITE | |
| 50 | llvm::sys::Memory::MF_EXEC: |
| 51 | return PROT_READ | PROT_WRITE | PROT_EXEC; |
| 52 | case llvm::sys::Memory::MF_EXEC: |
| 53 | #if defined(__FreeBSD__) || defined(__powerpc__) |
| 54 | // On PowerPC, having an executable page that has no read permission |
| 55 | // can have unintended consequences. The function InvalidateInstruction- |
| 56 | // Cache uses instructions dcbf and icbi, both of which are treated by |
| 57 | // the processor as loads. If the page has no read permissions, |
| 58 | // executing these instructions will result in a segmentation fault. |
| 59 | return PROT_READ | PROT_EXEC; |
| 60 | #else |
| 61 | return PROT_EXEC; |
| 62 | #endif |
| 63 | default: |
| 64 | llvm_unreachable("Illegal memory protection flag specified!" ); |
| 65 | } |
| 66 | // Provide a default return value as required by some compilers. |
| 67 | return PROT_NONE; |
| 68 | } |
| 69 | |
| 70 | namespace llvm { |
| 71 | namespace sys { |
| 72 | |
| 73 | MemoryBlock Memory::allocateMappedMemory(size_t NumBytes, |
| 74 | const MemoryBlock *const NearBlock, |
| 75 | unsigned PFlags, std::error_code &EC) { |
| 76 | EC = std::error_code(); |
| 77 | if (NumBytes == 0) |
| 78 | return MemoryBlock(); |
| 79 | |
| 80 | // On platforms that have it, we can use MAP_ANON to get a memory-mapped |
| 81 | // page without file backing, but we need a fallback of opening /dev/zero |
| 82 | // for strictly POSIX platforms instead. |
| 83 | int fd; |
| 84 | #if defined(MAP_ANON) |
| 85 | fd = -1; |
| 86 | #else |
| 87 | fd = open("/dev/zero" , O_RDWR); |
| 88 | if (fd == -1) { |
| 89 | EC = errnoAsErrorCode(); |
| 90 | return MemoryBlock(); |
| 91 | } |
| 92 | #endif |
| 93 | |
| 94 | int MMFlags = MAP_PRIVATE; |
| 95 | #if defined(MAP_ANON) |
| 96 | MMFlags |= MAP_ANON; |
| 97 | #endif |
| 98 | int Protect = getPosixProtectionFlags(Flags: PFlags); |
| 99 | |
| 100 | #if defined(__NetBSD__) && defined(PROT_MPROTECT) |
| 101 | Protect |= PROT_MPROTECT(PROT_READ | PROT_WRITE | PROT_EXEC); |
| 102 | #endif |
| 103 | |
| 104 | // Use any near hint and the page size to set a page-aligned starting address |
| 105 | uintptr_t Start = NearBlock ? reinterpret_cast<uintptr_t>(NearBlock->base()) + |
| 106 | NearBlock->allocatedSize() |
| 107 | : 0; |
| 108 | static const size_t PageSize = Process::getPageSizeEstimate(); |
| 109 | const size_t NumPages = (NumBytes + PageSize - 1) / PageSize; |
| 110 | |
| 111 | if (Start && Start % PageSize) |
| 112 | Start += PageSize - Start % PageSize; |
| 113 | |
| 114 | // FIXME: Handle huge page requests (MF_HUGE_HINT). |
| 115 | void *Addr = ::mmap(addr: reinterpret_cast<void *>(Start), len: PageSize * NumPages, |
| 116 | prot: Protect, flags: MMFlags, fd: fd, offset: 0); |
| 117 | if (Addr == MAP_FAILED) { |
| 118 | if (NearBlock) { // Try again without a near hint |
| 119 | #if !defined(MAP_ANON) |
| 120 | close(fd); |
| 121 | #endif |
| 122 | return allocateMappedMemory(NumBytes, NearBlock: nullptr, PFlags, EC); |
| 123 | } |
| 124 | |
| 125 | EC = errnoAsErrorCode(); |
| 126 | #if !defined(MAP_ANON) |
| 127 | close(fd); |
| 128 | #endif |
| 129 | return MemoryBlock(); |
| 130 | } |
| 131 | |
| 132 | #if !defined(MAP_ANON) |
| 133 | close(fd); |
| 134 | #endif |
| 135 | |
| 136 | MemoryBlock Result; |
| 137 | Result.Address = Addr; |
| 138 | Result.AllocatedSize = PageSize * NumPages; |
| 139 | Result.Flags = PFlags; |
| 140 | |
| 141 | // Rely on protectMappedMemory to invalidate instruction cache. |
| 142 | if (PFlags & MF_EXEC) { |
| 143 | EC = Memory::protectMappedMemory(Block: Result, Flags: PFlags); |
| 144 | if (EC != std::error_code()) |
| 145 | return MemoryBlock(); |
| 146 | } |
| 147 | |
| 148 | return Result; |
| 149 | } |
| 150 | |
| 151 | std::error_code Memory::releaseMappedMemory(MemoryBlock &M) { |
| 152 | if (M.Address == nullptr || M.AllocatedSize == 0) |
| 153 | return std::error_code(); |
| 154 | |
| 155 | if (0 != ::munmap(addr: M.Address, len: M.AllocatedSize)) |
| 156 | return errnoAsErrorCode(); |
| 157 | |
| 158 | M.Address = nullptr; |
| 159 | M.AllocatedSize = 0; |
| 160 | |
| 161 | return std::error_code(); |
| 162 | } |
| 163 | |
| 164 | std::error_code Memory::protectMappedMemory(const MemoryBlock &M, |
| 165 | unsigned Flags) { |
| 166 | static const Align PageSize = Align(Process::getPageSizeEstimate()); |
| 167 | if (M.Address == nullptr || M.AllocatedSize == 0) |
| 168 | return std::error_code(); |
| 169 | |
| 170 | if (!Flags) |
| 171 | return std::error_code(EINVAL, std::generic_category()); |
| 172 | |
| 173 | int Protect = getPosixProtectionFlags(Flags); |
| 174 | uintptr_t Start = |
| 175 | alignAddr(Addr: (const uint8_t *)M.Address - PageSize.value() + 1, Alignment: PageSize); |
| 176 | uintptr_t End = |
| 177 | alignAddr(Addr: (const uint8_t *)M.Address + M.AllocatedSize, Alignment: PageSize); |
| 178 | |
| 179 | bool InvalidateCache = (Flags & MF_EXEC); |
| 180 | |
| 181 | #if defined(__arm__) || defined(__aarch64__) |
| 182 | // Certain ARM implementations treat icache clear instruction as a memory |
| 183 | // read, and CPU segfaults on trying to clear cache on !PROT_READ page. |
| 184 | // Therefore we need to temporarily add PROT_READ for the sake of flushing the |
| 185 | // instruction caches. |
| 186 | if (InvalidateCache && !(Protect & PROT_READ)) { |
| 187 | int Result = ::mprotect((void *)Start, End - Start, Protect | PROT_READ); |
| 188 | if (Result != 0) |
| 189 | return errnoAsErrorCode(); |
| 190 | |
| 191 | Memory::InvalidateInstructionCache(M.Address, M.AllocatedSize); |
| 192 | InvalidateCache = false; |
| 193 | } |
| 194 | #endif |
| 195 | |
| 196 | int Result = ::mprotect(addr: (void *)Start, len: End - Start, prot: Protect); |
| 197 | |
| 198 | if (Result != 0) |
| 199 | return errnoAsErrorCode(); |
| 200 | |
| 201 | if (InvalidateCache) |
| 202 | Memory::InvalidateInstructionCache(Addr: M.Address, Len: M.AllocatedSize); |
| 203 | |
| 204 | return std::error_code(); |
| 205 | } |
| 206 | |
| 207 | /// InvalidateInstructionCache - Before the JIT can run a block of code |
| 208 | /// that has been emitted it must invalidate the instruction cache on some |
| 209 | /// platforms. |
| 210 | void Memory::InvalidateInstructionCache(const void *Addr, size_t Len) { |
| 211 | |
| 212 | // icache invalidation for PPC and ARM. |
| 213 | #if defined(__APPLE__) |
| 214 | |
| 215 | #if (defined(__powerpc__) || defined(__arm__) || defined(__arm64__)) |
| 216 | sys_icache_invalidate(const_cast<void *>(Addr), Len); |
| 217 | #endif |
| 218 | |
| 219 | #elif defined(__Fuchsia__) |
| 220 | |
| 221 | zx_status_t Status = zx_cache_flush(Addr, Len, ZX_CACHE_FLUSH_INSN); |
| 222 | assert(Status == ZX_OK && "cannot invalidate instruction cache" ); |
| 223 | |
| 224 | #else |
| 225 | |
| 226 | #if defined(__powerpc__) && defined(__GNUC__) |
| 227 | const size_t LineSize = 32; |
| 228 | |
| 229 | const intptr_t Mask = ~(LineSize - 1); |
| 230 | const intptr_t StartLine = ((intptr_t)Addr) & Mask; |
| 231 | const intptr_t EndLine = ((intptr_t)Addr + Len + LineSize - 1) & Mask; |
| 232 | |
| 233 | for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize) |
| 234 | asm volatile("dcbf 0, %0" : : "r" (Line)); |
| 235 | asm volatile("sync" ); |
| 236 | |
| 237 | for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize) |
| 238 | asm volatile("icbi 0, %0" : : "r" (Line)); |
| 239 | asm volatile("isync" ); |
| 240 | #elif (defined(__arm__) || defined(__aarch64__) || defined(__loongarch__) || \ |
| 241 | defined(__mips__)) && \ |
| 242 | defined(__GNUC__) |
| 243 | // FIXME: Can we safely always call this for __GNUC__ everywhere? |
| 244 | const char *Start = static_cast<const char *>(Addr); |
| 245 | const char *End = Start + Len; |
| 246 | __clear_cache(const_cast<char *>(Start), const_cast<char *>(End)); |
| 247 | #endif |
| 248 | |
| 249 | #endif // end apple |
| 250 | |
| 251 | ValgrindDiscardTranslations(Addr, Len); |
| 252 | } |
| 253 | |
| 254 | } // namespace sys |
| 255 | } // namespace llvm |
| 256 | |