1 //===- Unix/Memory.cpp - Generic UNIX System Configuration ------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines some functions for various memory management utilities. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "Unix.h" 15 #include "llvm/Support/DataTypes.h" 16 #include "llvm/Support/ErrorHandling.h" 17 #include "llvm/Support/Process.h" 18 19 #ifdef HAVE_SYS_MMAN_H 20 #include <sys/mman.h> 21 #endif 22 23 #ifdef __APPLE__ 24 #include <mach/mach.h> 25 #endif 26 27 #if defined(__mips__) 28 # if defined(__OpenBSD__) 29 # include <mips64/sysarch.h> 30 # else 31 # include <sys/cachectl.h> 32 # endif 33 #endif 34 35 extern "C" void sys_icache_invalidate(const void *Addr, size_t len); 36 37 namespace { 38 39 int getPosixProtectionFlags(unsigned Flags) { 40 switch (Flags) { 41 case llvm::sys::Memory::MF_READ: 42 return PROT_READ; 43 case llvm::sys::Memory::MF_WRITE: 44 return PROT_WRITE; 45 case llvm::sys::Memory::MF_READ|llvm::sys::Memory::MF_WRITE: 46 return PROT_READ | PROT_WRITE; 47 case llvm::sys::Memory::MF_READ|llvm::sys::Memory::MF_EXEC: 48 return PROT_READ | PROT_EXEC; 49 case llvm::sys::Memory::MF_READ | 50 llvm::sys::Memory::MF_WRITE | 51 llvm::sys::Memory::MF_EXEC: 52 return PROT_READ | PROT_WRITE | PROT_EXEC; 53 case llvm::sys::Memory::MF_EXEC: 54 #if defined(__FreeBSD__) 55 // On PowerPC, having an executable page that has no read permission 56 // can have unintended consequences. The function InvalidateInstruction- 57 // Cache uses instructions dcbf and icbi, both of which are treated by 58 // the processor as loads. If the page has no read permissions, 59 // executing these instructions will result in a segmentation fault. 60 // Somehow, this problem is not present on Linux, but it does happen 61 // on FreeBSD. 62 return PROT_READ | PROT_EXEC; 63 #else 64 return PROT_EXEC; 65 #endif 66 default: 67 llvm_unreachable("Illegal memory protection flag specified!"); 68 } 69 // Provide a default return value as required by some compilers. 70 return PROT_NONE; 71 } 72 73 } // namespace 74 75 namespace llvm { 76 namespace sys { 77 78 MemoryBlock 79 Memory::allocateMappedMemory(size_t NumBytes, 80 const MemoryBlock *const NearBlock, 81 unsigned PFlags, 82 error_code &EC) { 83 EC = error_code::success(); 84 if (NumBytes == 0) 85 return MemoryBlock(); 86 87 static const size_t PageSize = process::get_self()->page_size(); 88 const size_t NumPages = (NumBytes+PageSize-1)/PageSize; 89 90 int fd = -1; 91 #ifdef NEED_DEV_ZERO_FOR_MMAP 92 static int zero_fd = open("/dev/zero", O_RDWR); 93 if (zero_fd == -1) { 94 EC = error_code(errno, system_category()); 95 return MemoryBlock(); 96 } 97 fd = zero_fd; 98 #endif 99 100 int MMFlags = MAP_PRIVATE | 101 #ifdef HAVE_MMAP_ANONYMOUS 102 MAP_ANONYMOUS 103 #else 104 MAP_ANON 105 #endif 106 ; // Ends statement above 107 108 int Protect = getPosixProtectionFlags(PFlags); 109 110 // Use any near hint and the page size to set a page-aligned starting address 111 uintptr_t Start = NearBlock ? reinterpret_cast<uintptr_t>(NearBlock->base()) + 112 NearBlock->size() : 0; 113 if (Start && Start % PageSize) 114 Start += PageSize - Start % PageSize; 115 116 void *Addr = ::mmap(reinterpret_cast<void*>(Start), PageSize*NumPages, 117 Protect, MMFlags, fd, 0); 118 if (Addr == MAP_FAILED) { 119 if (NearBlock) //Try again without a near hint 120 return allocateMappedMemory(NumBytes, 0, PFlags, EC); 121 122 EC = error_code(errno, system_category()); 123 return MemoryBlock(); 124 } 125 126 MemoryBlock Result; 127 Result.Address = Addr; 128 Result.Size = NumPages*PageSize; 129 130 if (PFlags & MF_EXEC) 131 Memory::InvalidateInstructionCache(Result.Address, Result.Size); 132 133 return Result; 134 } 135 136 error_code 137 Memory::releaseMappedMemory(MemoryBlock &M) { 138 if (M.Address == 0 || M.Size == 0) 139 return error_code::success(); 140 141 if (0 != ::munmap(M.Address, M.Size)) 142 return error_code(errno, system_category()); 143 144 M.Address = 0; 145 M.Size = 0; 146 147 return error_code::success(); 148 } 149 150 error_code 151 Memory::protectMappedMemory(const MemoryBlock &M, unsigned Flags) { 152 if (M.Address == 0 || M.Size == 0) 153 return error_code::success(); 154 155 if (!Flags) 156 return error_code(EINVAL, generic_category()); 157 158 int Protect = getPosixProtectionFlags(Flags); 159 160 int Result = ::mprotect(M.Address, M.Size, Protect); 161 if (Result != 0) 162 return error_code(errno, system_category()); 163 164 if (Flags & MF_EXEC) 165 Memory::InvalidateInstructionCache(M.Address, M.Size); 166 167 return error_code::success(); 168 } 169 170 /// AllocateRWX - Allocate a slab of memory with read/write/execute 171 /// permissions. This is typically used for JIT applications where we want 172 /// to emit code to the memory then jump to it. Getting this type of memory 173 /// is very OS specific. 174 /// 175 MemoryBlock 176 Memory::AllocateRWX(size_t NumBytes, const MemoryBlock* NearBlock, 177 std::string *ErrMsg) { 178 if (NumBytes == 0) return MemoryBlock(); 179 180 size_t PageSize = process::get_self()->page_size(); 181 size_t NumPages = (NumBytes+PageSize-1)/PageSize; 182 183 int fd = -1; 184 #ifdef NEED_DEV_ZERO_FOR_MMAP 185 static int zero_fd = open("/dev/zero", O_RDWR); 186 if (zero_fd == -1) { 187 MakeErrMsg(ErrMsg, "Can't open /dev/zero device"); 188 return MemoryBlock(); 189 } 190 fd = zero_fd; 191 #endif 192 193 int flags = MAP_PRIVATE | 194 #ifdef HAVE_MMAP_ANONYMOUS 195 MAP_ANONYMOUS 196 #else 197 MAP_ANON 198 #endif 199 ; 200 201 void* start = NearBlock ? (unsigned char*)NearBlock->base() + 202 NearBlock->size() : 0; 203 204 #if defined(__APPLE__) && defined(__arm__) 205 void *pa = ::mmap(start, PageSize*NumPages, PROT_READ|PROT_EXEC, 206 flags, fd, 0); 207 #else 208 void *pa = ::mmap(start, PageSize*NumPages, PROT_READ|PROT_WRITE|PROT_EXEC, 209 flags, fd, 0); 210 #endif 211 if (pa == MAP_FAILED) { 212 if (NearBlock) //Try again without a near hint 213 return AllocateRWX(NumBytes, 0); 214 215 MakeErrMsg(ErrMsg, "Can't allocate RWX Memory"); 216 return MemoryBlock(); 217 } 218 219 #if defined(__APPLE__) && defined(__arm__) 220 kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)pa, 221 (vm_size_t)(PageSize*NumPages), 0, 222 VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY); 223 if (KERN_SUCCESS != kr) { 224 MakeErrMsg(ErrMsg, "vm_protect max RX failed"); 225 return MemoryBlock(); 226 } 227 228 kr = vm_protect(mach_task_self(), (vm_address_t)pa, 229 (vm_size_t)(PageSize*NumPages), 0, 230 VM_PROT_READ | VM_PROT_WRITE); 231 if (KERN_SUCCESS != kr) { 232 MakeErrMsg(ErrMsg, "vm_protect RW failed"); 233 return MemoryBlock(); 234 } 235 #endif 236 237 MemoryBlock result; 238 result.Address = pa; 239 result.Size = NumPages*PageSize; 240 241 return result; 242 } 243 244 bool Memory::ReleaseRWX(MemoryBlock &M, std::string *ErrMsg) { 245 if (M.Address == 0 || M.Size == 0) return false; 246 if (0 != ::munmap(M.Address, M.Size)) 247 return MakeErrMsg(ErrMsg, "Can't release RWX Memory"); 248 return false; 249 } 250 251 bool Memory::setWritable (MemoryBlock &M, std::string *ErrMsg) { 252 #if defined(__APPLE__) && defined(__arm__) 253 if (M.Address == 0 || M.Size == 0) return false; 254 Memory::InvalidateInstructionCache(M.Address, M.Size); 255 kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)M.Address, 256 (vm_size_t)M.Size, 0, VM_PROT_READ | VM_PROT_WRITE); 257 return KERN_SUCCESS == kr; 258 #else 259 return true; 260 #endif 261 } 262 263 bool Memory::setExecutable (MemoryBlock &M, std::string *ErrMsg) { 264 #if defined(__APPLE__) && defined(__arm__) 265 if (M.Address == 0 || M.Size == 0) return false; 266 Memory::InvalidateInstructionCache(M.Address, M.Size); 267 kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)M.Address, 268 (vm_size_t)M.Size, 0, VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY); 269 return KERN_SUCCESS == kr; 270 #else 271 return true; 272 #endif 273 } 274 275 bool Memory::setRangeWritable(const void *Addr, size_t Size) { 276 #if defined(__APPLE__) && defined(__arm__) 277 kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)Addr, 278 (vm_size_t)Size, 0, 279 VM_PROT_READ | VM_PROT_WRITE); 280 return KERN_SUCCESS == kr; 281 #else 282 return true; 283 #endif 284 } 285 286 bool Memory::setRangeExecutable(const void *Addr, size_t Size) { 287 #if defined(__APPLE__) && defined(__arm__) 288 kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)Addr, 289 (vm_size_t)Size, 0, 290 VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY); 291 return KERN_SUCCESS == kr; 292 #else 293 return true; 294 #endif 295 } 296 297 /// InvalidateInstructionCache - Before the JIT can run a block of code 298 /// that has been emitted it must invalidate the instruction cache on some 299 /// platforms. 300 void Memory::InvalidateInstructionCache(const void *Addr, 301 size_t Len) { 302 303 // icache invalidation for PPC and ARM. 304 #if defined(__APPLE__) 305 306 # if (defined(__POWERPC__) || defined (__ppc__) || \ 307 defined(_POWER) || defined(_ARCH_PPC)) || defined(__arm__) 308 sys_icache_invalidate(const_cast<void *>(Addr), Len); 309 # endif 310 311 #else 312 313 # if (defined(__POWERPC__) || defined (__ppc__) || \ 314 defined(_POWER) || defined(_ARCH_PPC)) && defined(__GNUC__) 315 const size_t LineSize = 32; 316 317 const intptr_t Mask = ~(LineSize - 1); 318 const intptr_t StartLine = ((intptr_t) Addr) & Mask; 319 const intptr_t EndLine = ((intptr_t) Addr + Len + LineSize - 1) & Mask; 320 321 for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize) 322 asm volatile("dcbf 0, %0" : : "r"(Line)); 323 asm volatile("sync"); 324 325 for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize) 326 asm volatile("icbi 0, %0" : : "r"(Line)); 327 asm volatile("isync"); 328 # elif defined(__arm__) && defined(__GNUC__) 329 // FIXME: Can we safely always call this for __GNUC__ everywhere? 330 const char *Start = static_cast<const char *>(Addr); 331 const char *End = Start + Len; 332 __clear_cache(const_cast<char *>(Start), const_cast<char *>(End)); 333 # elif defined(__mips__) 334 const char *Start = static_cast<const char *>(Addr); 335 # if defined(ANDROID) 336 // The declaration of "cacheflush" in Android bionic: 337 // extern int cacheflush(long start, long end, long flags); 338 const char *End = Start + Len; 339 long LStart = reinterpret_cast<long>(const_cast<char *>(Start)); 340 long LEnd = reinterpret_cast<long>(const_cast<char *>(End)); 341 cacheflush(LStart, LEnd, BCACHE); 342 # else 343 cacheflush(const_cast<char *>(Start), Len, BCACHE); 344 # endif 345 # endif 346 347 #endif // end apple 348 349 ValgrindDiscardTranslations(Addr, Len); 350 } 351 352 } // namespace sys 353 } // namespace llvm 354