1 //===-- Memory.cpp ----------------------------------------------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 10 #include "lldb/Target/Memory.h" 11 // C Includes 12 // C++ Includes 13 // Other libraries and framework includes 14 // Project includes 15 #include "lldb/Core/DataBufferHeap.h" 16 #include "lldb/Core/State.h" 17 #include "lldb/Core/Log.h" 18 #include "lldb/Target/Process.h" 19 20 using namespace lldb; 21 using namespace lldb_private; 22 23 //---------------------------------------------------------------------- 24 // MemoryCache constructor 25 //---------------------------------------------------------------------- 26 MemoryCache::MemoryCache(Process &process) : 27 m_process (process), 28 m_cache_line_byte_size (512), 29 m_mutex (Mutex::eMutexTypeRecursive), 30 m_cache (), 31 m_invalid_ranges () 32 { 33 } 34 35 //---------------------------------------------------------------------- 36 // Destructor 37 //---------------------------------------------------------------------- 38 MemoryCache::~MemoryCache() 39 { 40 } 41 42 void 43 MemoryCache::Clear(bool clear_invalid_ranges) 44 { 45 Mutex::Locker locker (m_mutex); 46 m_cache.clear(); 47 if (clear_invalid_ranges) 48 m_invalid_ranges.Clear(); 49 } 50 51 void 52 MemoryCache::Flush (addr_t addr, size_t size) 53 { 54 if (size == 0) 55 return; 56 57 Mutex::Locker locker (m_mutex); 58 if (m_cache.empty()) 59 return; 60 61 const uint32_t cache_line_byte_size = m_cache_line_byte_size; 62 const addr_t end_addr = (addr + size - 1); 63 const addr_t first_cache_line_addr = addr - (addr % cache_line_byte_size); 64 const addr_t last_cache_line_addr = end_addr - (end_addr % cache_line_byte_size); 65 // Watch for overflow where size will cause us to go off the end of the 66 // 64 bit address space 67 uint32_t num_cache_lines; 68 if (last_cache_line_addr >= first_cache_line_addr) 69 num_cache_lines = ((last_cache_line_addr - first_cache_line_addr)/cache_line_byte_size) + 1; 70 else 71 num_cache_lines = (UINT64_MAX - first_cache_line_addr + 1)/cache_line_byte_size; 72 73 uint32_t cache_idx = 0; 74 for (addr_t curr_addr = first_cache_line_addr; 75 cache_idx < num_cache_lines; 76 curr_addr += cache_line_byte_size, ++cache_idx) 77 { 78 BlockMap::iterator pos = m_cache.find (curr_addr); 79 if (pos != m_cache.end()) 80 m_cache.erase(pos); 81 } 82 } 83 84 void 85 MemoryCache::AddInvalidRange (lldb::addr_t base_addr, lldb::addr_t byte_size) 86 { 87 if (byte_size > 0) 88 { 89 Mutex::Locker locker (m_mutex); 90 InvalidRanges::Entry range (base_addr, byte_size); 91 m_invalid_ranges.Append(range); 92 m_invalid_ranges.Sort(); 93 } 94 } 95 96 bool 97 MemoryCache::RemoveInvalidRange (lldb::addr_t base_addr, lldb::addr_t byte_size) 98 { 99 if (byte_size > 0) 100 { 101 Mutex::Locker locker (m_mutex); 102 const uint32_t idx = m_invalid_ranges.FindEntryIndexThatContains(base_addr); 103 if (idx != UINT32_MAX) 104 { 105 const InvalidRanges::Entry *entry = m_invalid_ranges.GetEntryAtIndex (idx); 106 if (entry->GetRangeBase() == base_addr && entry->GetByteSize() == byte_size) 107 return m_invalid_ranges.RemoveEntrtAtIndex (idx); 108 } 109 } 110 return false; 111 } 112 113 114 115 size_t 116 MemoryCache::Read (addr_t addr, 117 void *dst, 118 size_t dst_len, 119 Error &error) 120 { 121 size_t bytes_left = dst_len; 122 if (dst && bytes_left > 0) 123 { 124 const uint32_t cache_line_byte_size = m_cache_line_byte_size; 125 uint8_t *dst_buf = (uint8_t *)dst; 126 addr_t curr_addr = addr - (addr % cache_line_byte_size); 127 addr_t cache_offset = addr - curr_addr; 128 Mutex::Locker locker (m_mutex); 129 130 while (bytes_left > 0) 131 { 132 if (m_invalid_ranges.FindEntryThatContains(curr_addr)) 133 { 134 error.SetErrorStringWithFormat("memory read failed for 0x%" PRIx64, curr_addr); 135 return dst_len - bytes_left; 136 } 137 138 BlockMap::const_iterator pos = m_cache.find (curr_addr); 139 BlockMap::const_iterator end = m_cache.end (); 140 141 if (pos != end) 142 { 143 size_t curr_read_size = cache_line_byte_size - cache_offset; 144 if (curr_read_size > bytes_left) 145 curr_read_size = bytes_left; 146 147 memcpy (dst_buf + dst_len - bytes_left, pos->second->GetBytes() + cache_offset, curr_read_size); 148 149 bytes_left -= curr_read_size; 150 curr_addr += curr_read_size + cache_offset; 151 cache_offset = 0; 152 153 if (bytes_left > 0) 154 { 155 // Get sequential cache page hits 156 for (++pos; (pos != end) && (bytes_left > 0); ++pos) 157 { 158 assert ((curr_addr % cache_line_byte_size) == 0); 159 160 if (pos->first != curr_addr) 161 break; 162 163 curr_read_size = pos->second->GetByteSize(); 164 if (curr_read_size > bytes_left) 165 curr_read_size = bytes_left; 166 167 memcpy (dst_buf + dst_len - bytes_left, pos->second->GetBytes(), curr_read_size); 168 169 bytes_left -= curr_read_size; 170 curr_addr += curr_read_size; 171 172 // We have a cache page that succeeded to read some bytes 173 // but not an entire page. If this happens, we must cap 174 // off how much data we are able to read... 175 if (pos->second->GetByteSize() != cache_line_byte_size) 176 return dst_len - bytes_left; 177 } 178 } 179 } 180 181 // We need to read from the process 182 183 if (bytes_left > 0) 184 { 185 assert ((curr_addr % cache_line_byte_size) == 0); 186 std::unique_ptr<DataBufferHeap> data_buffer_heap_ap(new DataBufferHeap (cache_line_byte_size, 0)); 187 size_t process_bytes_read = m_process.ReadMemoryFromInferior (curr_addr, 188 data_buffer_heap_ap->GetBytes(), 189 data_buffer_heap_ap->GetByteSize(), 190 error); 191 if (process_bytes_read == 0) 192 return dst_len - bytes_left; 193 194 if (process_bytes_read != cache_line_byte_size) 195 data_buffer_heap_ap->SetByteSize (process_bytes_read); 196 m_cache[curr_addr] = DataBufferSP (data_buffer_heap_ap.release()); 197 // We have read data and put it into the cache, continue through the 198 // loop again to get the data out of the cache... 199 } 200 } 201 } 202 203 return dst_len - bytes_left; 204 } 205 206 207 208 AllocatedBlock::AllocatedBlock (lldb::addr_t addr, 209 uint32_t byte_size, 210 uint32_t permissions, 211 uint32_t chunk_size) : 212 m_addr (addr), 213 m_byte_size (byte_size), 214 m_permissions (permissions), 215 m_chunk_size (chunk_size), 216 m_offset_to_chunk_size () 217 // m_allocated (byte_size / chunk_size) 218 { 219 assert (byte_size > chunk_size); 220 } 221 222 AllocatedBlock::~AllocatedBlock () 223 { 224 } 225 226 lldb::addr_t 227 AllocatedBlock::ReserveBlock (uint32_t size) 228 { 229 addr_t addr = LLDB_INVALID_ADDRESS; 230 if (size <= m_byte_size) 231 { 232 const uint32_t needed_chunks = CalculateChunksNeededForSize (size); 233 Log *log (GetLogIfAllCategoriesSet (LIBLLDB_LOG_PROCESS | LIBLLDB_LOG_VERBOSE)); 234 235 if (m_offset_to_chunk_size.empty()) 236 { 237 m_offset_to_chunk_size[0] = needed_chunks; 238 if (log) 239 log->Printf ("[1] AllocatedBlock::ReserveBlock (size = %u (0x%x)) => offset = 0x%x, %u %u bit chunks", size, size, 0, needed_chunks, m_chunk_size); 240 addr = m_addr; 241 } 242 else 243 { 244 uint32_t last_offset = 0; 245 OffsetToChunkSize::const_iterator pos = m_offset_to_chunk_size.begin(); 246 OffsetToChunkSize::const_iterator end = m_offset_to_chunk_size.end(); 247 while (pos != end) 248 { 249 if (pos->first > last_offset) 250 { 251 const uint32_t bytes_available = pos->first - last_offset; 252 const uint32_t num_chunks = CalculateChunksNeededForSize (bytes_available); 253 if (num_chunks >= needed_chunks) 254 { 255 m_offset_to_chunk_size[last_offset] = needed_chunks; 256 if (log) 257 log->Printf ("[2] AllocatedBlock::ReserveBlock (size = %u (0x%x)) => offset = 0x%x, %u %u bit chunks", size, size, last_offset, needed_chunks, m_chunk_size); 258 addr = m_addr + last_offset; 259 break; 260 } 261 } 262 263 last_offset = pos->first + pos->second * m_chunk_size; 264 265 if (++pos == end) 266 { 267 // Last entry... 268 const uint32_t chunks_left = CalculateChunksNeededForSize (m_byte_size - last_offset); 269 if (chunks_left >= needed_chunks) 270 { 271 m_offset_to_chunk_size[last_offset] = needed_chunks; 272 if (log) 273 log->Printf ("[3] AllocatedBlock::ReserveBlock (size = %u (0x%x)) => offset = 0x%x, %u %u bit chunks", size, size, last_offset, needed_chunks, m_chunk_size); 274 addr = m_addr + last_offset; 275 break; 276 } 277 } 278 } 279 } 280 // const uint32_t total_chunks = m_allocated.size (); 281 // uint32_t unallocated_idx = 0; 282 // uint32_t allocated_idx = m_allocated.find_first(); 283 // uint32_t first_chunk_idx = UINT32_MAX; 284 // uint32_t num_chunks; 285 // while (1) 286 // { 287 // if (allocated_idx == UINT32_MAX) 288 // { 289 // // No more bits are set starting from unallocated_idx, so we 290 // // either have enough chunks for the request, or we don't. 291 // // Eiter way we break out of the while loop... 292 // num_chunks = total_chunks - unallocated_idx; 293 // if (needed_chunks <= num_chunks) 294 // first_chunk_idx = unallocated_idx; 295 // break; 296 // } 297 // else if (allocated_idx > unallocated_idx) 298 // { 299 // // We have some allocated chunks, check if there are enough 300 // // free chunks to satisfy the request? 301 // num_chunks = allocated_idx - unallocated_idx; 302 // if (needed_chunks <= num_chunks) 303 // { 304 // // Yep, we have enough! 305 // first_chunk_idx = unallocated_idx; 306 // break; 307 // } 308 // } 309 // 310 // while (unallocated_idx < total_chunks) 311 // { 312 // if (m_allocated[unallocated_idx]) 313 // ++unallocated_idx; 314 // else 315 // break; 316 // } 317 // 318 // if (unallocated_idx >= total_chunks) 319 // break; 320 // 321 // allocated_idx = m_allocated.find_next(unallocated_idx); 322 // } 323 // 324 // if (first_chunk_idx != UINT32_MAX) 325 // { 326 // const uint32_t end_bit_idx = unallocated_idx + needed_chunks; 327 // for (uint32_t idx = first_chunk_idx; idx < end_bit_idx; ++idx) 328 // m_allocated.set(idx); 329 // return m_addr + m_chunk_size * first_chunk_idx; 330 // } 331 } 332 Log *log (GetLogIfAllCategoriesSet (LIBLLDB_LOG_PROCESS | LIBLLDB_LOG_VERBOSE)); 333 if (log) 334 log->Printf ("AllocatedBlock::ReserveBlock (size = %u (0x%x)) => 0x%16.16" PRIx64, size, size, (uint64_t)addr); 335 return addr; 336 } 337 338 bool 339 AllocatedBlock::FreeBlock (addr_t addr) 340 { 341 uint32_t offset = addr - m_addr; 342 OffsetToChunkSize::iterator pos = m_offset_to_chunk_size.find (offset); 343 bool success = false; 344 if (pos != m_offset_to_chunk_size.end()) 345 { 346 m_offset_to_chunk_size.erase (pos); 347 success = true; 348 } 349 Log *log (GetLogIfAllCategoriesSet (LIBLLDB_LOG_PROCESS | LIBLLDB_LOG_VERBOSE)); 350 if (log) 351 log->Printf ("AllocatedBlock::FreeBlock (addr = 0x%16.16" PRIx64 ") => %i", (uint64_t)addr, success); 352 return success; 353 } 354 355 356 AllocatedMemoryCache::AllocatedMemoryCache (Process &process) : 357 m_process (process), 358 m_mutex (Mutex::eMutexTypeRecursive), 359 m_memory_map() 360 { 361 } 362 363 AllocatedMemoryCache::~AllocatedMemoryCache () 364 { 365 } 366 367 368 void 369 AllocatedMemoryCache::Clear() 370 { 371 Mutex::Locker locker (m_mutex); 372 if (m_process.IsAlive()) 373 { 374 PermissionsToBlockMap::iterator pos, end = m_memory_map.end(); 375 for (pos = m_memory_map.begin(); pos != end; ++pos) 376 m_process.DoDeallocateMemory(pos->second->GetBaseAddress()); 377 } 378 m_memory_map.clear(); 379 } 380 381 382 AllocatedMemoryCache::AllocatedBlockSP 383 AllocatedMemoryCache::AllocatePage (uint32_t byte_size, 384 uint32_t permissions, 385 uint32_t chunk_size, 386 Error &error) 387 { 388 AllocatedBlockSP block_sp; 389 const size_t page_size = 4096; 390 const size_t num_pages = (byte_size + page_size - 1) / page_size; 391 const size_t page_byte_size = num_pages * page_size; 392 393 addr_t addr = m_process.DoAllocateMemory(page_byte_size, permissions, error); 394 395 Log *log (GetLogIfAllCategoriesSet (LIBLLDB_LOG_PROCESS)); 396 if (log) 397 { 398 log->Printf ("Process::DoAllocateMemory (byte_size = 0x%8.8zx, permissions = %s) => 0x%16.16" PRIx64, 399 page_byte_size, 400 GetPermissionsAsCString(permissions), 401 (uint64_t)addr); 402 } 403 404 if (addr != LLDB_INVALID_ADDRESS) 405 { 406 block_sp.reset (new AllocatedBlock (addr, page_byte_size, permissions, chunk_size)); 407 m_memory_map.insert (std::make_pair (permissions, block_sp)); 408 } 409 return block_sp; 410 } 411 412 lldb::addr_t 413 AllocatedMemoryCache::AllocateMemory (size_t byte_size, 414 uint32_t permissions, 415 Error &error) 416 { 417 Mutex::Locker locker (m_mutex); 418 419 addr_t addr = LLDB_INVALID_ADDRESS; 420 std::pair<PermissionsToBlockMap::iterator, PermissionsToBlockMap::iterator> range = m_memory_map.equal_range (permissions); 421 422 for (PermissionsToBlockMap::iterator pos = range.first; pos != range.second; ++pos) 423 { 424 addr = (*pos).second->ReserveBlock (byte_size); 425 } 426 427 if (addr == LLDB_INVALID_ADDRESS) 428 { 429 AllocatedBlockSP block_sp (AllocatePage (byte_size, permissions, 16, error)); 430 431 if (block_sp) 432 addr = block_sp->ReserveBlock (byte_size); 433 } 434 Log *log (GetLogIfAllCategoriesSet (LIBLLDB_LOG_PROCESS)); 435 if (log) 436 log->Printf ("AllocatedMemoryCache::AllocateMemory (byte_size = 0x%8.8zx, permissions = %s) => 0x%16.16" PRIx64, byte_size, GetPermissionsAsCString(permissions), (uint64_t)addr); 437 return addr; 438 } 439 440 bool 441 AllocatedMemoryCache::DeallocateMemory (lldb::addr_t addr) 442 { 443 Mutex::Locker locker (m_mutex); 444 445 PermissionsToBlockMap::iterator pos, end = m_memory_map.end(); 446 bool success = false; 447 for (pos = m_memory_map.begin(); pos != end; ++pos) 448 { 449 if (pos->second->Contains (addr)) 450 { 451 success = pos->second->FreeBlock (addr); 452 break; 453 } 454 } 455 Log *log (GetLogIfAllCategoriesSet (LIBLLDB_LOG_PROCESS)); 456 if (log) 457 log->Printf("AllocatedMemoryCache::DeallocateMemory (addr = 0x%16.16" PRIx64 ") => %i", (uint64_t)addr, success); 458 return success; 459 } 460 461 462