1 //===- Unix/Memory.cpp - Generic UNIX System Configuration ------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines some functions for various memory management utilities. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "Unix.h" 15 #include "llvm/Support/DataTypes.h" 16 #include "llvm/Support/ErrorHandling.h" 17 #include "llvm/Support/Process.h" 18 19 #ifdef HAVE_SYS_MMAN_H 20 #include <sys/mman.h> 21 #endif 22 23 #ifdef __APPLE__ 24 #include <mach/mach.h> 25 #endif 26 27 #if defined(__mips__) 28 # if defined(__OpenBSD__) 29 # include <mips64/sysarch.h> 30 # else 31 # include <sys/cachectl.h> 32 # endif 33 #endif 34 35 #ifdef __APPLE__ 36 extern "C" void sys_icache_invalidate(const void *Addr, size_t len); 37 #else 38 extern "C" void __clear_cache(void *, void*); 39 #endif 40 41 namespace { 42 43 int getPosixProtectionFlags(unsigned Flags) { 44 switch (Flags) { 45 case llvm::sys::Memory::MF_READ: 46 return PROT_READ; 47 case llvm::sys::Memory::MF_WRITE: 48 return PROT_WRITE; 49 case llvm::sys::Memory::MF_READ|llvm::sys::Memory::MF_WRITE: 50 return PROT_READ | PROT_WRITE; 51 case llvm::sys::Memory::MF_READ|llvm::sys::Memory::MF_EXEC: 52 return PROT_READ | PROT_EXEC; 53 case llvm::sys::Memory::MF_READ | 54 llvm::sys::Memory::MF_WRITE | 55 llvm::sys::Memory::MF_EXEC: 56 return PROT_READ | PROT_WRITE | PROT_EXEC; 57 case llvm::sys::Memory::MF_EXEC: 58 #if defined(__FreeBSD__) 59 // On PowerPC, having an executable page that has no read permission 60 // can have unintended consequences. The function InvalidateInstruction- 61 // Cache uses instructions dcbf and icbi, both of which are treated by 62 // the processor as loads. If the page has no read permissions, 63 // executing these instructions will result in a segmentation fault. 64 // Somehow, this problem is not present on Linux, but it does happen 65 // on FreeBSD. 66 return PROT_READ | PROT_EXEC; 67 #else 68 return PROT_EXEC; 69 #endif 70 default: 71 llvm_unreachable("Illegal memory protection flag specified!"); 72 } 73 // Provide a default return value as required by some compilers. 74 return PROT_NONE; 75 } 76 77 } // namespace 78 79 namespace llvm { 80 namespace sys { 81 82 MemoryBlock 83 Memory::allocateMappedMemory(size_t NumBytes, 84 const MemoryBlock *const NearBlock, 85 unsigned PFlags, 86 std::error_code &EC) { 87 EC = std::error_code(); 88 if (NumBytes == 0) 89 return MemoryBlock(); 90 91 static const size_t PageSize = process::get_self()->page_size(); 92 const size_t NumPages = (NumBytes+PageSize-1)/PageSize; 93 94 int fd = -1; 95 #ifdef NEED_DEV_ZERO_FOR_MMAP 96 static int zero_fd = open("/dev/zero", O_RDWR); 97 if (zero_fd == -1) { 98 EC = std::error_code(errno, std::generic_category()); 99 return MemoryBlock(); 100 } 101 fd = zero_fd; 102 #endif 103 104 int MMFlags = MAP_PRIVATE | 105 #ifdef HAVE_MMAP_ANONYMOUS 106 MAP_ANONYMOUS 107 #else 108 MAP_ANON 109 #endif 110 ; // Ends statement above 111 112 int Protect = getPosixProtectionFlags(PFlags); 113 114 // Use any near hint and the page size to set a page-aligned starting address 115 uintptr_t Start = NearBlock ? reinterpret_cast<uintptr_t>(NearBlock->base()) + 116 NearBlock->size() : 0; 117 if (Start && Start % PageSize) 118 Start += PageSize - Start % PageSize; 119 120 void *Addr = ::mmap(reinterpret_cast<void*>(Start), PageSize*NumPages, 121 Protect, MMFlags, fd, 0); 122 if (Addr == MAP_FAILED) { 123 if (NearBlock) //Try again without a near hint 124 return allocateMappedMemory(NumBytes, nullptr, PFlags, EC); 125 126 EC = std::error_code(errno, std::generic_category()); 127 return MemoryBlock(); 128 } 129 130 MemoryBlock Result; 131 Result.Address = Addr; 132 Result.Size = NumPages*PageSize; 133 134 if (PFlags & MF_EXEC) 135 Memory::InvalidateInstructionCache(Result.Address, Result.Size); 136 137 return Result; 138 } 139 140 std::error_code 141 Memory::releaseMappedMemory(MemoryBlock &M) { 142 if (M.Address == nullptr || M.Size == 0) 143 return std::error_code(); 144 145 if (0 != ::munmap(M.Address, M.Size)) 146 return std::error_code(errno, std::generic_category()); 147 148 M.Address = nullptr; 149 M.Size = 0; 150 151 return std::error_code(); 152 } 153 154 std::error_code 155 Memory::protectMappedMemory(const MemoryBlock &M, unsigned Flags) { 156 if (M.Address == nullptr || M.Size == 0) 157 return std::error_code(); 158 159 if (!Flags) 160 return std::error_code(EINVAL, std::generic_category()); 161 162 int Protect = getPosixProtectionFlags(Flags); 163 164 int Result = ::mprotect(M.Address, M.Size, Protect); 165 if (Result != 0) 166 return std::error_code(errno, std::generic_category()); 167 168 if (Flags & MF_EXEC) 169 Memory::InvalidateInstructionCache(M.Address, M.Size); 170 171 return std::error_code(); 172 } 173 174 /// AllocateRWX - Allocate a slab of memory with read/write/execute 175 /// permissions. This is typically used for JIT applications where we want 176 /// to emit code to the memory then jump to it. Getting this type of memory 177 /// is very OS specific. 178 /// 179 MemoryBlock 180 Memory::AllocateRWX(size_t NumBytes, const MemoryBlock* NearBlock, 181 std::string *ErrMsg) { 182 if (NumBytes == 0) return MemoryBlock(); 183 184 size_t PageSize = process::get_self()->page_size(); 185 size_t NumPages = (NumBytes+PageSize-1)/PageSize; 186 187 int fd = -1; 188 #ifdef NEED_DEV_ZERO_FOR_MMAP 189 static int zero_fd = open("/dev/zero", O_RDWR); 190 if (zero_fd == -1) { 191 MakeErrMsg(ErrMsg, "Can't open /dev/zero device"); 192 return MemoryBlock(); 193 } 194 fd = zero_fd; 195 #endif 196 197 int flags = MAP_PRIVATE | 198 #ifdef HAVE_MMAP_ANONYMOUS 199 MAP_ANONYMOUS 200 #else 201 MAP_ANON 202 #endif 203 ; 204 205 void* start = NearBlock ? (unsigned char*)NearBlock->base() + 206 NearBlock->size() : nullptr; 207 208 #if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__)) 209 void *pa = ::mmap(start, PageSize*NumPages, PROT_READ|PROT_EXEC, 210 flags, fd, 0); 211 #else 212 void *pa = ::mmap(start, PageSize*NumPages, PROT_READ|PROT_WRITE|PROT_EXEC, 213 flags, fd, 0); 214 #endif 215 if (pa == MAP_FAILED) { 216 if (NearBlock) //Try again without a near hint 217 return AllocateRWX(NumBytes, nullptr); 218 219 MakeErrMsg(ErrMsg, "Can't allocate RWX Memory"); 220 return MemoryBlock(); 221 } 222 223 #if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__)) 224 kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)pa, 225 (vm_size_t)(PageSize*NumPages), 0, 226 VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY); 227 if (KERN_SUCCESS != kr) { 228 MakeErrMsg(ErrMsg, "vm_protect max RX failed"); 229 return MemoryBlock(); 230 } 231 232 kr = vm_protect(mach_task_self(), (vm_address_t)pa, 233 (vm_size_t)(PageSize*NumPages), 0, 234 VM_PROT_READ | VM_PROT_WRITE); 235 if (KERN_SUCCESS != kr) { 236 MakeErrMsg(ErrMsg, "vm_protect RW failed"); 237 return MemoryBlock(); 238 } 239 #endif 240 241 MemoryBlock result; 242 result.Address = pa; 243 result.Size = NumPages*PageSize; 244 245 return result; 246 } 247 248 bool Memory::ReleaseRWX(MemoryBlock &M, std::string *ErrMsg) { 249 if (M.Address == nullptr || M.Size == 0) return false; 250 if (0 != ::munmap(M.Address, M.Size)) 251 return MakeErrMsg(ErrMsg, "Can't release RWX Memory"); 252 return false; 253 } 254 255 bool Memory::setWritable (MemoryBlock &M, std::string *ErrMsg) { 256 #if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__)) 257 if (M.Address == 0 || M.Size == 0) return false; 258 Memory::InvalidateInstructionCache(M.Address, M.Size); 259 kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)M.Address, 260 (vm_size_t)M.Size, 0, VM_PROT_READ | VM_PROT_WRITE); 261 return KERN_SUCCESS == kr; 262 #else 263 return true; 264 #endif 265 } 266 267 bool Memory::setExecutable (MemoryBlock &M, std::string *ErrMsg) { 268 #if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__)) 269 if (M.Address == 0 || M.Size == 0) return false; 270 Memory::InvalidateInstructionCache(M.Address, M.Size); 271 kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)M.Address, 272 (vm_size_t)M.Size, 0, VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY); 273 return KERN_SUCCESS == kr; 274 #elif defined(__arm__) || defined(__aarch64__) 275 Memory::InvalidateInstructionCache(M.Address, M.Size); 276 return true; 277 #else 278 return true; 279 #endif 280 } 281 282 bool Memory::setRangeWritable(const void *Addr, size_t Size) { 283 #if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__)) 284 kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)Addr, 285 (vm_size_t)Size, 0, 286 VM_PROT_READ | VM_PROT_WRITE); 287 return KERN_SUCCESS == kr; 288 #else 289 return true; 290 #endif 291 } 292 293 bool Memory::setRangeExecutable(const void *Addr, size_t Size) { 294 #if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__)) 295 kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)Addr, 296 (vm_size_t)Size, 0, 297 VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY); 298 return KERN_SUCCESS == kr; 299 #else 300 return true; 301 #endif 302 } 303 304 /// InvalidateInstructionCache - Before the JIT can run a block of code 305 /// that has been emitted it must invalidate the instruction cache on some 306 /// platforms. 307 void Memory::InvalidateInstructionCache(const void *Addr, 308 size_t Len) { 309 310 // icache invalidation for PPC and ARM. 311 #if defined(__APPLE__) 312 313 # if (defined(__POWERPC__) || defined (__ppc__) || \ 314 defined(_POWER) || defined(_ARCH_PPC) || defined(__arm__) || \ 315 defined(__arm64__)) 316 sys_icache_invalidate(const_cast<void *>(Addr), Len); 317 # endif 318 319 #else 320 321 # if (defined(__POWERPC__) || defined (__ppc__) || \ 322 defined(_POWER) || defined(_ARCH_PPC)) && defined(__GNUC__) 323 const size_t LineSize = 32; 324 325 const intptr_t Mask = ~(LineSize - 1); 326 const intptr_t StartLine = ((intptr_t) Addr) & Mask; 327 const intptr_t EndLine = ((intptr_t) Addr + Len + LineSize - 1) & Mask; 328 329 for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize) 330 asm volatile("dcbf 0, %0" : : "r"(Line)); 331 asm volatile("sync"); 332 333 for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize) 334 asm volatile("icbi 0, %0" : : "r"(Line)); 335 asm volatile("isync"); 336 # elif (defined(__arm__) || defined(__aarch64__)) && defined(__GNUC__) 337 // FIXME: Can we safely always call this for __GNUC__ everywhere? 338 const char *Start = static_cast<const char *>(Addr); 339 const char *End = Start + Len; 340 __clear_cache(const_cast<char *>(Start), const_cast<char *>(End)); 341 # elif defined(__mips__) 342 const char *Start = static_cast<const char *>(Addr); 343 # if defined(ANDROID) 344 // The declaration of "cacheflush" in Android bionic: 345 // extern int cacheflush(long start, long end, long flags); 346 const char *End = Start + Len; 347 long LStart = reinterpret_cast<long>(const_cast<char *>(Start)); 348 long LEnd = reinterpret_cast<long>(const_cast<char *>(End)); 349 cacheflush(LStart, LEnd, BCACHE); 350 # else 351 cacheflush(const_cast<char *>(Start), Len, BCACHE); 352 # endif 353 # endif 354 355 #endif // end apple 356 357 ValgrindDiscardTranslations(Addr, Len); 358 } 359 360 } // namespace sys 361 } // namespace llvm 362