Home | History | Annotate | Download | only in libmemunreachable
      1 /*
      2  * Copyright (C) 2016 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include <inttypes.h>
     18 
     19 #include <functional>
     20 #include <iomanip>
     21 #include <mutex>
     22 #include <string>
     23 #include <sstream>
     24 #include <unordered_map>
     25 
     26 #include <backtrace.h>
     27 #include <android-base/macros.h>
     28 
     29 #include "Allocator.h"
     30 #include "HeapWalker.h"
     31 #include "Leak.h"
     32 #include "LeakFolding.h"
     33 #include "LeakPipe.h"
     34 #include "ProcessMappings.h"
     35 #include "PtracerThread.h"
     36 #include "ScopedDisableMalloc.h"
     37 #include "Semaphore.h"
     38 #include "ThreadCapture.h"
     39 
     40 #include "memunreachable/memunreachable.h"
     41 #include "bionic.h"
     42 #include "log.h"
     43 
     44 const size_t Leak::contents_length;
     45 
     46 using namespace std::chrono_literals;
     47 
     48 class MemUnreachable {
     49  public:
     50   MemUnreachable(pid_t pid, Allocator<void> allocator) : pid_(pid), allocator_(allocator),
     51       heap_walker_(allocator_) {}
     52   bool CollectAllocations(const allocator::vector<ThreadInfo>& threads,
     53       const allocator::vector<Mapping>& mappings);
     54   bool GetUnreachableMemory(allocator::vector<Leak>& leaks, size_t limit,
     55       size_t* num_leaks, size_t* leak_bytes);
     56   size_t Allocations() { return heap_walker_.Allocations(); }
     57   size_t AllocationBytes() { return heap_walker_.AllocationBytes(); }
     58  private:
     59   bool ClassifyMappings(const allocator::vector<Mapping>& mappings,
     60       allocator::vector<Mapping>& heap_mappings,
     61       allocator::vector<Mapping>& anon_mappings,
     62       allocator::vector<Mapping>& globals_mappings,
     63       allocator::vector<Mapping>& stack_mappings);
     64   DISALLOW_COPY_AND_ASSIGN(MemUnreachable);
     65   pid_t pid_;
     66   Allocator<void> allocator_;
     67   HeapWalker heap_walker_;
     68 };
     69 
     70 static void HeapIterate(const Mapping& heap_mapping,
     71     const std::function<void(uintptr_t, size_t)>& func) {
     72   malloc_iterate(heap_mapping.begin, heap_mapping.end - heap_mapping.begin,
     73       [](uintptr_t base, size_t size, void* arg) {
     74     auto f = reinterpret_cast<const std::function<void(uintptr_t, size_t)>*>(arg);
     75     (*f)(base, size);
     76   }, const_cast<void*>(reinterpret_cast<const void*>(&func)));
     77 }
     78 
     79 bool MemUnreachable::CollectAllocations(const allocator::vector<ThreadInfo>& threads,
     80     const allocator::vector<Mapping>& mappings) {
     81   ALOGI("searching process %d for allocations", pid_);
     82   allocator::vector<Mapping> heap_mappings{mappings};
     83   allocator::vector<Mapping> anon_mappings{mappings};
     84   allocator::vector<Mapping> globals_mappings{mappings};
     85   allocator::vector<Mapping> stack_mappings{mappings};
     86   if (!ClassifyMappings(mappings, heap_mappings, anon_mappings,
     87       globals_mappings, stack_mappings)) {
     88     return false;
     89   }
     90 
     91   for (auto it = heap_mappings.begin(); it != heap_mappings.end(); it++) {
     92     ALOGV("Heap mapping %" PRIxPTR "-%" PRIxPTR " %s", it->begin, it->end, it->name);
     93     HeapIterate(*it, [&](uintptr_t base, size_t size) {
     94       heap_walker_.Allocation(base, base + size);
     95     });
     96   }
     97 
     98   for (auto it = anon_mappings.begin(); it != anon_mappings.end(); it++) {
     99     ALOGV("Anon mapping %" PRIxPTR "-%" PRIxPTR " %s", it->begin, it->end, it->name);
    100     heap_walker_.Allocation(it->begin, it->end);
    101   }
    102 
    103   for (auto it = globals_mappings.begin(); it != globals_mappings.end(); it++) {
    104     ALOGV("Globals mapping %" PRIxPTR "-%" PRIxPTR " %s", it->begin, it->end, it->name);
    105     heap_walker_.Root(it->begin, it->end);
    106   }
    107 
    108   for (auto thread_it = threads.begin(); thread_it != threads.end(); thread_it++) {
    109     for (auto it = stack_mappings.begin(); it != stack_mappings.end(); it++) {
    110       if (thread_it->stack.first >= it->begin && thread_it->stack.first <= it->end) {
    111         ALOGV("Stack %" PRIxPTR "-%" PRIxPTR " %s", thread_it->stack.first, it->end, it->name);
    112         heap_walker_.Root(thread_it->stack.first, it->end);
    113       }
    114     }
    115     heap_walker_.Root(thread_it->regs);
    116   }
    117 
    118   ALOGI("searching done");
    119 
    120   return true;
    121 }
    122 
    123 bool MemUnreachable::GetUnreachableMemory(allocator::vector<Leak>& leaks,
    124     size_t limit, size_t* num_leaks, size_t* leak_bytes) {
    125   ALOGI("sweeping process %d for unreachable memory", pid_);
    126   leaks.clear();
    127 
    128   if (!heap_walker_.DetectLeaks()) {
    129     return false;
    130   }
    131 
    132 
    133   allocator::vector<Range> leaked1{allocator_};
    134   heap_walker_.Leaked(leaked1, 0, num_leaks, leak_bytes);
    135 
    136   ALOGI("sweeping done");
    137 
    138   ALOGI("folding related leaks");
    139 
    140   LeakFolding folding(allocator_, heap_walker_);
    141   if (!folding.FoldLeaks()) {
    142     return false;
    143   }
    144 
    145   allocator::vector<LeakFolding::Leak> leaked{allocator_};
    146 
    147   if (!folding.Leaked(leaked, num_leaks, leak_bytes)) {
    148     return false;
    149   }
    150 
    151   allocator::unordered_map<Leak::Backtrace, Leak*> backtrace_map{allocator_};
    152 
    153   // Prevent reallocations of backing memory so we can store pointers into it
    154   // in backtrace_map.
    155   leaks.reserve(leaked.size());
    156 
    157   for (auto& it: leaked) {
    158     leaks.emplace_back();
    159     Leak* leak = &leaks.back();
    160 
    161     ssize_t num_backtrace_frames = malloc_backtrace(reinterpret_cast<void*>(it.range.begin),
    162         leak->backtrace.frames, leak->backtrace.max_frames);
    163     if (num_backtrace_frames > 0) {
    164       leak->backtrace.num_frames = num_backtrace_frames;
    165 
    166       auto inserted = backtrace_map.emplace(leak->backtrace, leak);
    167       if (!inserted.second) {
    168         // Leak with same backtrace already exists, drop this one and
    169         // increment similar counts on the existing one.
    170         leaks.pop_back();
    171         Leak* similar_leak = inserted.first->second;
    172         similar_leak->similar_count++;
    173         similar_leak->similar_size += it.range.size();
    174         similar_leak->similar_referenced_count += it.referenced_count;
    175         similar_leak->similar_referenced_size += it.referenced_size;
    176         similar_leak->total_size += it.range.size();
    177         similar_leak->total_size += it.referenced_size;
    178         continue;
    179       }
    180     }
    181 
    182     leak->begin = it.range.begin;
    183     leak->size = it.range.size();
    184     leak->referenced_count = it.referenced_count;
    185     leak->referenced_size = it.referenced_size;
    186     leak->total_size = leak->size + leak->referenced_size;
    187     memcpy(leak->contents, reinterpret_cast<void*>(it.range.begin),
    188         std::min(leak->size, Leak::contents_length));
    189   }
    190 
    191   ALOGI("folding done");
    192 
    193   std::sort(leaks.begin(), leaks.end(), [](const Leak& a, const Leak& b) {
    194     return a.total_size > b.total_size;
    195   });
    196 
    197   if (leaks.size() > limit) {
    198     leaks.resize(limit);
    199   }
    200 
    201   return true;
    202 }
    203 
    204 static bool has_prefix(const allocator::string& s, const char* prefix) {
    205   int ret = s.compare(0, strlen(prefix), prefix);
    206   return ret == 0;
    207 }
    208 
    209 bool MemUnreachable::ClassifyMappings(const allocator::vector<Mapping>& mappings,
    210     allocator::vector<Mapping>& heap_mappings,
    211     allocator::vector<Mapping>& anon_mappings,
    212     allocator::vector<Mapping>& globals_mappings,
    213     allocator::vector<Mapping>& stack_mappings)
    214 {
    215   heap_mappings.clear();
    216   anon_mappings.clear();
    217   globals_mappings.clear();
    218   stack_mappings.clear();
    219 
    220   allocator::string current_lib{allocator_};
    221 
    222   for (auto it = mappings.begin(); it != mappings.end(); it++) {
    223     if (it->execute) {
    224       current_lib = it->name;
    225       continue;
    226     }
    227 
    228     if (!it->read) {
    229       continue;
    230     }
    231 
    232     const allocator::string mapping_name{it->name, allocator_};
    233     if (mapping_name == "[anon:.bss]") {
    234       // named .bss section
    235       globals_mappings.emplace_back(*it);
    236     } else if (mapping_name == current_lib) {
    237       // .rodata or .data section
    238       globals_mappings.emplace_back(*it);
    239     } else if (mapping_name == "[anon:libc_malloc]") {
    240       // named malloc mapping
    241       heap_mappings.emplace_back(*it);
    242     } else if (has_prefix(mapping_name, "/dev/ashmem/dalvik")) {
    243       // named dalvik heap mapping
    244       globals_mappings.emplace_back(*it);
    245     } else if (has_prefix(mapping_name, "[stack")) {
    246       // named stack mapping
    247       stack_mappings.emplace_back(*it);
    248     } else if (mapping_name.size() == 0) {
    249       globals_mappings.emplace_back(*it);
    250     } else if (has_prefix(mapping_name, "[anon:") && mapping_name != "[anon:leak_detector_malloc]") {
    251       // TODO(ccross): it would be nice to treat named anonymous mappings as
    252       // possible leaks, but naming something in a .bss or .data section makes
    253       // it impossible to distinguish them from mmaped and then named mappings.
    254       globals_mappings.emplace_back(*it);
    255     }
    256   }
    257 
    258   return true;
    259 }
    260 
    261 template<typename T>
    262 static inline const char* plural(T val) {
    263   return (val == 1) ? "" : "s";
    264 }
    265 
    266 bool GetUnreachableMemory(UnreachableMemoryInfo& info, size_t limit) {
    267   int parent_pid = getpid();
    268   int parent_tid = gettid();
    269 
    270   Heap heap;
    271 
    272   Semaphore continue_parent_sem;
    273   LeakPipe pipe;
    274 
    275   PtracerThread thread{[&]() -> int {
    276     /////////////////////////////////////////////
    277     // Collection thread
    278     /////////////////////////////////////////////
    279     ALOGI("collecting thread info for process %d...", parent_pid);
    280 
    281     ThreadCapture thread_capture(parent_pid, heap);
    282     allocator::vector<ThreadInfo> thread_info(heap);
    283     allocator::vector<Mapping> mappings(heap);
    284 
    285     // ptrace all the threads
    286     if (!thread_capture.CaptureThreads()) {
    287       continue_parent_sem.Post();
    288       return 1;
    289     }
    290 
    291     // collect register contents and stacks
    292     if (!thread_capture.CapturedThreadInfo(thread_info)) {
    293       continue_parent_sem.Post();
    294       return 1;
    295     }
    296 
    297     // snapshot /proc/pid/maps
    298     if (!ProcessMappings(parent_pid, mappings)) {
    299       continue_parent_sem.Post();
    300       return 1;
    301     }
    302 
    303     // malloc must be enabled to call fork, at_fork handlers take the same
    304     // locks as ScopedDisableMalloc.  All threads are paused in ptrace, so
    305     // memory state is still consistent.  Unfreeze the original thread so it
    306     // can drop the malloc locks, it will block until the collection thread
    307     // exits.
    308     thread_capture.ReleaseThread(parent_tid);
    309     continue_parent_sem.Post();
    310 
    311     // fork a process to do the heap walking
    312     int ret = fork();
    313     if (ret < 0) {
    314       return 1;
    315     } else if (ret == 0) {
    316       /////////////////////////////////////////////
    317       // Heap walker process
    318       /////////////////////////////////////////////
    319       // Examine memory state in the child using the data collected above and
    320       // the CoW snapshot of the process memory contents.
    321 
    322       if (!pipe.OpenSender()) {
    323         _exit(1);
    324       }
    325 
    326       MemUnreachable unreachable{parent_pid, heap};
    327 
    328       if (!unreachable.CollectAllocations(thread_info, mappings)) {
    329         _exit(2);
    330       }
    331       size_t num_allocations = unreachable.Allocations();
    332       size_t allocation_bytes = unreachable.AllocationBytes();
    333 
    334       allocator::vector<Leak> leaks{heap};
    335 
    336       size_t num_leaks = 0;
    337       size_t leak_bytes = 0;
    338       bool ok = unreachable.GetUnreachableMemory(leaks, limit, &num_leaks, &leak_bytes);
    339 
    340       ok = ok && pipe.Sender().Send(num_allocations);
    341       ok = ok && pipe.Sender().Send(allocation_bytes);
    342       ok = ok && pipe.Sender().Send(num_leaks);
    343       ok = ok && pipe.Sender().Send(leak_bytes);
    344       ok = ok && pipe.Sender().SendVector(leaks);
    345 
    346       if (!ok) {
    347         _exit(3);
    348       }
    349 
    350       _exit(0);
    351     } else {
    352       // Nothing left to do in the collection thread, return immediately,
    353       // releasing all the captured threads.
    354       ALOGI("collection thread done");
    355       return 0;
    356     }
    357   }};
    358 
    359   /////////////////////////////////////////////
    360   // Original thread
    361   /////////////////////////////////////////////
    362 
    363   {
    364     // Disable malloc to get a consistent view of memory
    365     ScopedDisableMalloc disable_malloc;
    366 
    367     // Start the collection thread
    368     thread.Start();
    369 
    370     // Wait for the collection thread to signal that it is ready to fork the
    371     // heap walker process.
    372     continue_parent_sem.Wait(30s);
    373 
    374     // Re-enable malloc so the collection thread can fork.
    375   }
    376 
    377   // Wait for the collection thread to exit
    378   int ret = thread.Join();
    379   if (ret != 0) {
    380     return false;
    381   }
    382 
    383   // Get a pipe from the heap walker process.  Transferring a new pipe fd
    384   // ensures no other forked processes can have it open, so when the heap
    385   // walker process dies the remote side of the pipe will close.
    386   if (!pipe.OpenReceiver()) {
    387     return false;
    388   }
    389 
    390   bool ok = true;
    391   ok = ok && pipe.Receiver().Receive(&info.num_allocations);
    392   ok = ok && pipe.Receiver().Receive(&info.allocation_bytes);
    393   ok = ok && pipe.Receiver().Receive(&info.num_leaks);
    394   ok = ok && pipe.Receiver().Receive(&info.leak_bytes);
    395   ok = ok && pipe.Receiver().ReceiveVector(info.leaks);
    396   if (!ok) {
    397     return false;
    398   }
    399 
    400   ALOGI("unreachable memory detection done");
    401   ALOGE("%zu bytes in %zu allocation%s unreachable out of %zu bytes in %zu allocation%s",
    402       info.leak_bytes, info.num_leaks, plural(info.num_leaks),
    403       info.allocation_bytes, info.num_allocations, plural(info.num_allocations));
    404   return true;
    405 }
    406 
    407 std::string Leak::ToString(bool log_contents) const {
    408 
    409   std::ostringstream oss;
    410 
    411   oss << "  " << std::dec << size;
    412   oss << " bytes unreachable at ";
    413   oss << std::hex << begin;
    414   oss << std::endl;
    415   if (referenced_count > 0) {
    416     oss << std::dec;
    417     oss << "   referencing " << referenced_size << " unreachable bytes";
    418     oss << " in " << referenced_count << " allocation" << plural(referenced_count);
    419     oss << std::endl;
    420   }
    421   if (similar_count > 0) {
    422     oss << std::dec;
    423     oss << "   and " << similar_size << " similar unreachable bytes";
    424     oss << " in " << similar_count << " allocation" << plural(similar_count);
    425     oss << std::endl;
    426     if (similar_referenced_count > 0) {
    427       oss << "   referencing " << similar_referenced_size << " unreachable bytes";
    428       oss << " in " << similar_referenced_count << " allocation" << plural(similar_referenced_count);
    429       oss << std::endl;
    430     }
    431   }
    432 
    433   if (log_contents) {
    434     const int bytes_per_line = 16;
    435     const size_t bytes = std::min(size, contents_length);
    436 
    437     if (bytes == size) {
    438       oss << "   contents:" << std::endl;
    439     } else {
    440       oss << "   first " << bytes << " bytes of contents:" << std::endl;
    441     }
    442 
    443     for (size_t i = 0; i < bytes; i += bytes_per_line) {
    444       oss << "   " << std::hex << begin + i << ": ";
    445       size_t j;
    446       oss << std::setfill('0');
    447       for (j = i; j < bytes && j < i + bytes_per_line; j++) {
    448         oss << std::setw(2) << static_cast<int>(contents[j]) << " ";
    449       }
    450       oss << std::setfill(' ');
    451       for (; j < i + bytes_per_line; j++) {
    452         oss << "   ";
    453       }
    454       for (j = i; j < bytes && j < i + bytes_per_line; j++) {
    455         char c = contents[j];
    456         if (c < ' ' || c >= 0x7f) {
    457           c = '.';
    458         }
    459         oss << c;
    460       }
    461       oss << std::endl;
    462     }
    463   }
    464   if (backtrace.num_frames > 0) {
    465     oss << backtrace_string(backtrace.frames, backtrace.num_frames);
    466   }
    467 
    468   return oss.str();
    469 }
    470 
    471 // Figure out the abi based on defined macros.
    472 #if defined(__arm__)
    473 #define ABI_STRING "arm"
    474 #elif defined(__aarch64__)
    475 #define ABI_STRING "arm64"
    476 #elif defined(__mips__) && !defined(__LP64__)
    477 #define ABI_STRING "mips"
    478 #elif defined(__mips__) && defined(__LP64__)
    479 #define ABI_STRING "mips64"
    480 #elif defined(__i386__)
    481 #define ABI_STRING "x86"
    482 #elif defined(__x86_64__)
    483 #define ABI_STRING "x86_64"
    484 #else
    485 #error "Unsupported ABI"
    486 #endif
    487 
    488 std::string UnreachableMemoryInfo::ToString(bool log_contents) const {
    489   std::ostringstream oss;
    490   oss << "  " << leak_bytes << " bytes in ";
    491   oss << num_leaks << " unreachable allocation" << plural(num_leaks);
    492   oss << std::endl;
    493   oss << "  ABI: '" ABI_STRING "'" << std::endl;
    494   oss << std::endl;
    495 
    496   for (auto it = leaks.begin(); it != leaks.end(); it++) {
    497       oss << it->ToString(log_contents);
    498       oss << std::endl;
    499   }
    500 
    501   return oss.str();
    502 }
    503 
    504 std::string GetUnreachableMemoryString(bool log_contents, size_t limit) {
    505   UnreachableMemoryInfo info;
    506   if (!GetUnreachableMemory(info, limit)) {
    507     return "Failed to get unreachable memory\n";
    508   }
    509 
    510   return info.ToString(log_contents);
    511 }
    512 
    513 bool LogUnreachableMemory(bool log_contents, size_t limit) {
    514   UnreachableMemoryInfo info;
    515   if (!GetUnreachableMemory(info, limit)) {
    516     return false;
    517   }
    518 
    519   for (auto it = info.leaks.begin(); it != info.leaks.end(); it++) {
    520     ALOGE("%s", it->ToString(log_contents).c_str());
    521   }
    522   return true;
    523 }
    524 
    525 
    526 bool NoLeaks() {
    527   UnreachableMemoryInfo info;
    528   if (!GetUnreachableMemory(info, 0)) {
    529     return false;
    530   }
    531 
    532   return info.num_leaks == 0;
    533 }
    534