1 //===- SectionMemoryManager.cpp - Memory manager for MCJIT/RtDyld *- C++ -*-==// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the section-based memory manager used by the MCJIT 11 // execution engine and RuntimeDyld 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Config/config.h" 16 #include "llvm/ExecutionEngine/SectionMemoryManager.h" 17 #include "llvm/Support/MathExtras.h" 18 #include "llvm/Support/Process.h" 19 20 namespace llvm { 21 22 uint8_t *SectionMemoryManager::allocateDataSection(uintptr_t Size, 23 unsigned Alignment, 24 unsigned SectionID, 25 StringRef SectionName, 26 bool IsReadOnly) { 27 if (IsReadOnly) 28 return allocateSection(RODataMem, Size, Alignment); 29 return allocateSection(RWDataMem, Size, Alignment); 30 } 31 32 uint8_t *SectionMemoryManager::allocateCodeSection(uintptr_t Size, 33 unsigned Alignment, 34 unsigned SectionID, 35 StringRef SectionName) { 36 return allocateSection(CodeMem, Size, Alignment); 37 } 38 39 uint8_t *SectionMemoryManager::allocateSection(MemoryGroup &MemGroup, 40 uintptr_t Size, 41 unsigned Alignment) { 42 if (!Alignment) 43 Alignment = 16; 44 45 assert(!(Alignment & (Alignment - 1)) && "Alignment must be a power of two."); 46 47 uintptr_t RequiredSize = Alignment * ((Size + Alignment - 1)/Alignment + 1); 48 uintptr_t Addr = 0; 49 50 // Look in the list of free memory regions and use a block there if one 51 // is available. 52 for (FreeMemBlock &FreeMB : MemGroup.FreeMem) { 53 if (FreeMB.Free.size() >= RequiredSize) { 54 Addr = (uintptr_t)FreeMB.Free.base(); 55 uintptr_t EndOfBlock = Addr + FreeMB.Free.size(); 56 // Align the address. 57 Addr = (Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1); 58 59 if (FreeMB.PendingPrefixIndex == (unsigned)-1) { 60 // The part of the block we're giving out to the user is now pending 61 MemGroup.PendingMem.push_back(sys::MemoryBlock((void *)Addr, Size)); 62 63 // Remember this pending block, such that future allocations can just 64 // modify it rather than creating a new one 65 FreeMB.PendingPrefixIndex = MemGroup.PendingMem.size() - 1; 66 } else { 67 sys::MemoryBlock &PendingMB = MemGroup.PendingMem[FreeMB.PendingPrefixIndex]; 68 PendingMB = sys::MemoryBlock(PendingMB.base(), Addr + Size - (uintptr_t)PendingMB.base()); 69 } 70 71 // Remember how much free space is now left in this block 72 FreeMB.Free = sys::MemoryBlock((void *)(Addr + Size), EndOfBlock - Addr - Size); 73 return (uint8_t*)Addr; 74 } 75 } 76 77 // No pre-allocated free block was large enough. Allocate a new memory region. 78 // Note that all sections get allocated as read-write. The permissions will 79 // be updated later based on memory group. 80 // 81 // FIXME: It would be useful to define a default allocation size (or add 82 // it as a constructor parameter) to minimize the number of allocations. 83 // 84 // FIXME: Initialize the Near member for each memory group to avoid 85 // interleaving. 86 std::error_code ec; 87 sys::MemoryBlock MB = sys::Memory::allocateMappedMemory(RequiredSize, 88 &MemGroup.Near, 89 sys::Memory::MF_READ | 90 sys::Memory::MF_WRITE, 91 ec); 92 if (ec) { 93 // FIXME: Add error propagation to the interface. 94 return nullptr; 95 } 96 97 // Save this address as the basis for our next request 98 MemGroup.Near = MB; 99 100 // Remember that we allocated this memory 101 MemGroup.AllocatedMem.push_back(MB); 102 Addr = (uintptr_t)MB.base(); 103 uintptr_t EndOfBlock = Addr + MB.size(); 104 105 // Align the address. 106 Addr = (Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1); 107 108 // The part of the block we're giving out to the user is now pending 109 MemGroup.PendingMem.push_back(sys::MemoryBlock((void *)Addr, Size)); 110 111 // The allocateMappedMemory may allocate much more memory than we need. In 112 // this case, we store the unused memory as a free memory block. 113 unsigned FreeSize = EndOfBlock-Addr-Size; 114 if (FreeSize > 16) { 115 FreeMemBlock FreeMB; 116 FreeMB.Free = sys::MemoryBlock((void*)(Addr + Size), FreeSize); 117 FreeMB.PendingPrefixIndex = (unsigned)-1; 118 MemGroup.FreeMem.push_back(FreeMB); 119 } 120 121 // Return aligned address 122 return (uint8_t*)Addr; 123 } 124 125 bool SectionMemoryManager::finalizeMemory(std::string *ErrMsg) 126 { 127 // FIXME: Should in-progress permissions be reverted if an error occurs? 128 std::error_code ec; 129 130 // Make code memory executable. 131 ec = applyMemoryGroupPermissions(CodeMem, 132 sys::Memory::MF_READ | sys::Memory::MF_EXEC); 133 if (ec) { 134 if (ErrMsg) { 135 *ErrMsg = ec.message(); 136 } 137 return true; 138 } 139 140 // Don't allow free memory blocks to be used after setting protection flags. 141 RODataMem.FreeMem.clear(); 142 143 // Make read-only data memory read-only. 144 ec = applyMemoryGroupPermissions(RODataMem, 145 sys::Memory::MF_READ | sys::Memory::MF_EXEC); 146 if (ec) { 147 if (ErrMsg) { 148 *ErrMsg = ec.message(); 149 } 150 return true; 151 } 152 153 // Read-write data memory already has the correct permissions 154 155 // Some platforms with separate data cache and instruction cache require 156 // explicit cache flush, otherwise JIT code manipulations (like resolved 157 // relocations) will get to the data cache but not to the instruction cache. 158 invalidateInstructionCache(); 159 160 return false; 161 } 162 163 static sys::MemoryBlock trimBlockToPageSize(sys::MemoryBlock M) { 164 static const size_t PageSize = sys::Process::getPageSize(); 165 166 size_t StartOverlap = 167 (PageSize - ((uintptr_t)M.base() % PageSize)) % PageSize; 168 169 size_t TrimmedSize = M.size(); 170 TrimmedSize -= StartOverlap; 171 TrimmedSize -= TrimmedSize % PageSize; 172 173 sys::MemoryBlock Trimmed((void *)((uintptr_t)M.base() + StartOverlap), TrimmedSize); 174 175 assert(((uintptr_t)Trimmed.base() % PageSize) == 0); 176 assert((Trimmed.size() % PageSize) == 0); 177 assert(M.base() <= Trimmed.base() && Trimmed.size() <= M.size()); 178 179 return Trimmed; 180 } 181 182 183 std::error_code 184 SectionMemoryManager::applyMemoryGroupPermissions(MemoryGroup &MemGroup, 185 unsigned Permissions) { 186 for (sys::MemoryBlock &MB : MemGroup.PendingMem) 187 if (std::error_code EC = sys::Memory::protectMappedMemory(MB, Permissions)) 188 return EC; 189 190 MemGroup.PendingMem.clear(); 191 192 // Now go through free blocks and trim any of them that don't span the entire 193 // page because one of the pending blocks may have overlapped it. 194 for (FreeMemBlock &FreeMB : MemGroup.FreeMem) { 195 FreeMB.Free = trimBlockToPageSize(FreeMB.Free); 196 // We cleared the PendingMem list, so all these pointers are now invalid 197 FreeMB.PendingPrefixIndex = (unsigned)-1; 198 } 199 200 // Remove all blocks which are now empty 201 MemGroup.FreeMem.erase( 202 std::remove_if(MemGroup.FreeMem.begin(), MemGroup.FreeMem.end(), 203 [](FreeMemBlock &FreeMB) { return FreeMB.Free.size() == 0; }), 204 MemGroup.FreeMem.end()); 205 206 return std::error_code(); 207 } 208 209 void SectionMemoryManager::invalidateInstructionCache() { 210 for (sys::MemoryBlock &Block : CodeMem.PendingMem) 211 sys::Memory::InvalidateInstructionCache(Block.base(), Block.size()); 212 } 213 214 SectionMemoryManager::~SectionMemoryManager() { 215 for (MemoryGroup *Group : {&CodeMem, &RWDataMem, &RODataMem}) { 216 for (sys::MemoryBlock &Block : Group->AllocatedMem) 217 sys::Memory::releaseMappedMemory(Block); 218 } 219 } 220 221 } // namespace llvm 222