Home | History | Annotate | Download | only in libmemunreachable
      1 /*
      2  * Copyright (C) 2016 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include <inttypes.h>
     18 #include <string.h>
     19 
     20 #include <functional>
     21 #include <iomanip>
     22 #include <mutex>
     23 #include <sstream>
     24 #include <string>
     25 #include <unordered_map>
     26 
     27 #include <android-base/macros.h>
     28 #include <backtrace.h>
     29 
     30 #include "Allocator.h"
     31 #include "Binder.h"
     32 #include "HeapWalker.h"
     33 #include "Leak.h"
     34 #include "LeakFolding.h"
     35 #include "LeakPipe.h"
     36 #include "ProcessMappings.h"
     37 #include "PtracerThread.h"
     38 #include "ScopedDisableMalloc.h"
     39 #include "Semaphore.h"
     40 #include "ThreadCapture.h"
     41 
     42 #include "bionic.h"
     43 #include "log.h"
     44 #include "memunreachable/memunreachable.h"
     45 
     46 using namespace std::chrono_literals;
     47 
     48 namespace android {
     49 
     50 const size_t Leak::contents_length;
     51 
     52 class MemUnreachable {
     53  public:
     54   MemUnreachable(pid_t pid, Allocator<void> allocator)
     55       : pid_(pid), allocator_(allocator), heap_walker_(allocator_) {}
     56   bool CollectAllocations(const allocator::vector<ThreadInfo>& threads,
     57                           const allocator::vector<Mapping>& mappings,
     58                           const allocator::vector<uintptr_t>& refs);
     59   bool GetUnreachableMemory(allocator::vector<Leak>& leaks, size_t limit, size_t* num_leaks,
     60                             size_t* leak_bytes);
     61   size_t Allocations() { return heap_walker_.Allocations(); }
     62   size_t AllocationBytes() { return heap_walker_.AllocationBytes(); }
     63 
     64  private:
     65   bool ClassifyMappings(const allocator::vector<Mapping>& mappings,
     66                         allocator::vector<Mapping>& heap_mappings,
     67                         allocator::vector<Mapping>& anon_mappings,
     68                         allocator::vector<Mapping>& globals_mappings,
     69                         allocator::vector<Mapping>& stack_mappings);
     70   DISALLOW_COPY_AND_ASSIGN(MemUnreachable);
     71   pid_t pid_;
     72   Allocator<void> allocator_;
     73   HeapWalker heap_walker_;
     74 };
     75 
     76 static void HeapIterate(const Mapping& heap_mapping,
     77                         const std::function<void(uintptr_t, size_t)>& func) {
     78   malloc_iterate(heap_mapping.begin, heap_mapping.end - heap_mapping.begin,
     79                  [](uintptr_t base, size_t size, void* arg) {
     80                    auto f = reinterpret_cast<const std::function<void(uintptr_t, size_t)>*>(arg);
     81                    (*f)(base, size);
     82                  },
     83                  const_cast<void*>(reinterpret_cast<const void*>(&func)));
     84 }
     85 
     86 bool MemUnreachable::CollectAllocations(const allocator::vector<ThreadInfo>& threads,
     87                                         const allocator::vector<Mapping>& mappings,
     88                                         const allocator::vector<uintptr_t>& refs) {
     89   MEM_ALOGI("searching process %d for allocations", pid_);
     90   allocator::vector<Mapping> heap_mappings{mappings};
     91   allocator::vector<Mapping> anon_mappings{mappings};
     92   allocator::vector<Mapping> globals_mappings{mappings};
     93   allocator::vector<Mapping> stack_mappings{mappings};
     94   if (!ClassifyMappings(mappings, heap_mappings, anon_mappings, globals_mappings, stack_mappings)) {
     95     return false;
     96   }
     97 
     98   for (auto it = heap_mappings.begin(); it != heap_mappings.end(); it++) {
     99     MEM_ALOGV("Heap mapping %" PRIxPTR "-%" PRIxPTR " %s", it->begin, it->end, it->name);
    100     HeapIterate(*it,
    101                 [&](uintptr_t base, size_t size) { heap_walker_.Allocation(base, base + size); });
    102   }
    103 
    104   for (auto it = anon_mappings.begin(); it != anon_mappings.end(); it++) {
    105     MEM_ALOGV("Anon mapping %" PRIxPTR "-%" PRIxPTR " %s", it->begin, it->end, it->name);
    106     heap_walker_.Allocation(it->begin, it->end);
    107   }
    108 
    109   for (auto it = globals_mappings.begin(); it != globals_mappings.end(); it++) {
    110     MEM_ALOGV("Globals mapping %" PRIxPTR "-%" PRIxPTR " %s", it->begin, it->end, it->name);
    111     heap_walker_.Root(it->begin, it->end);
    112   }
    113 
    114   for (auto thread_it = threads.begin(); thread_it != threads.end(); thread_it++) {
    115     for (auto it = stack_mappings.begin(); it != stack_mappings.end(); it++) {
    116       if (thread_it->stack.first >= it->begin && thread_it->stack.first <= it->end) {
    117         MEM_ALOGV("Stack %" PRIxPTR "-%" PRIxPTR " %s", thread_it->stack.first, it->end, it->name);
    118         heap_walker_.Root(thread_it->stack.first, it->end);
    119       }
    120     }
    121     heap_walker_.Root(thread_it->regs);
    122   }
    123 
    124   heap_walker_.Root(refs);
    125 
    126   MEM_ALOGI("searching done");
    127 
    128   return true;
    129 }
    130 
    131 bool MemUnreachable::GetUnreachableMemory(allocator::vector<Leak>& leaks, size_t limit,
    132                                           size_t* num_leaks, size_t* leak_bytes) {
    133   MEM_ALOGI("sweeping process %d for unreachable memory", pid_);
    134   leaks.clear();
    135 
    136   if (!heap_walker_.DetectLeaks()) {
    137     return false;
    138   }
    139 
    140   allocator::vector<Range> leaked1{allocator_};
    141   heap_walker_.Leaked(leaked1, 0, num_leaks, leak_bytes);
    142 
    143   MEM_ALOGI("sweeping done");
    144 
    145   MEM_ALOGI("folding related leaks");
    146 
    147   LeakFolding folding(allocator_, heap_walker_);
    148   if (!folding.FoldLeaks()) {
    149     return false;
    150   }
    151 
    152   allocator::vector<LeakFolding::Leak> leaked{allocator_};
    153 
    154   if (!folding.Leaked(leaked, num_leaks, leak_bytes)) {
    155     return false;
    156   }
    157 
    158   allocator::unordered_map<Leak::Backtrace, Leak*> backtrace_map{allocator_};
    159 
    160   // Prevent reallocations of backing memory so we can store pointers into it
    161   // in backtrace_map.
    162   leaks.reserve(leaked.size());
    163 
    164   for (auto& it : leaked) {
    165     leaks.emplace_back();
    166     Leak* leak = &leaks.back();
    167 
    168     ssize_t num_backtrace_frames = malloc_backtrace(
    169         reinterpret_cast<void*>(it.range.begin), leak->backtrace.frames, leak->backtrace.max_frames);
    170     if (num_backtrace_frames > 0) {
    171       leak->backtrace.num_frames = num_backtrace_frames;
    172 
    173       auto inserted = backtrace_map.emplace(leak->backtrace, leak);
    174       if (!inserted.second) {
    175         // Leak with same backtrace already exists, drop this one and
    176         // increment similar counts on the existing one.
    177         leaks.pop_back();
    178         Leak* similar_leak = inserted.first->second;
    179         similar_leak->similar_count++;
    180         similar_leak->similar_size += it.range.size();
    181         similar_leak->similar_referenced_count += it.referenced_count;
    182         similar_leak->similar_referenced_size += it.referenced_size;
    183         similar_leak->total_size += it.range.size();
    184         similar_leak->total_size += it.referenced_size;
    185         continue;
    186       }
    187     }
    188 
    189     leak->begin = it.range.begin;
    190     leak->size = it.range.size();
    191     leak->referenced_count = it.referenced_count;
    192     leak->referenced_size = it.referenced_size;
    193     leak->total_size = leak->size + leak->referenced_size;
    194     memcpy(leak->contents, reinterpret_cast<void*>(it.range.begin),
    195            std::min(leak->size, Leak::contents_length));
    196   }
    197 
    198   MEM_ALOGI("folding done");
    199 
    200   std::sort(leaks.begin(), leaks.end(),
    201             [](const Leak& a, const Leak& b) { return a.total_size > b.total_size; });
    202 
    203   if (leaks.size() > limit) {
    204     leaks.resize(limit);
    205   }
    206 
    207   return true;
    208 }
    209 
    210 static bool has_prefix(const allocator::string& s, const char* prefix) {
    211   int ret = s.compare(0, strlen(prefix), prefix);
    212   return ret == 0;
    213 }
    214 
    215 bool MemUnreachable::ClassifyMappings(const allocator::vector<Mapping>& mappings,
    216                                       allocator::vector<Mapping>& heap_mappings,
    217                                       allocator::vector<Mapping>& anon_mappings,
    218                                       allocator::vector<Mapping>& globals_mappings,
    219                                       allocator::vector<Mapping>& stack_mappings) {
    220   heap_mappings.clear();
    221   anon_mappings.clear();
    222   globals_mappings.clear();
    223   stack_mappings.clear();
    224 
    225   allocator::string current_lib{allocator_};
    226 
    227   for (auto it = mappings.begin(); it != mappings.end(); it++) {
    228     if (it->execute) {
    229       current_lib = it->name;
    230       continue;
    231     }
    232 
    233     if (!it->read) {
    234       continue;
    235     }
    236 
    237     const allocator::string mapping_name{it->name, allocator_};
    238     if (mapping_name == "[anon:.bss]") {
    239       // named .bss section
    240       globals_mappings.emplace_back(*it);
    241     } else if (mapping_name == current_lib) {
    242       // .rodata or .data section
    243       globals_mappings.emplace_back(*it);
    244     } else if (mapping_name == "[anon:libc_malloc]") {
    245       // named malloc mapping
    246       heap_mappings.emplace_back(*it);
    247     } else if (has_prefix(mapping_name, "/dev/ashmem/dalvik")) {
    248       // named dalvik heap mapping
    249       globals_mappings.emplace_back(*it);
    250     } else if (has_prefix(mapping_name, "[stack")) {
    251       // named stack mapping
    252       stack_mappings.emplace_back(*it);
    253     } else if (mapping_name.size() == 0) {
    254       globals_mappings.emplace_back(*it);
    255     } else if (has_prefix(mapping_name, "[anon:") &&
    256                mapping_name != "[anon:leak_detector_malloc]") {
    257       // TODO(ccross): it would be nice to treat named anonymous mappings as
    258       // possible leaks, but naming something in a .bss or .data section makes
    259       // it impossible to distinguish them from mmaped and then named mappings.
    260       globals_mappings.emplace_back(*it);
    261     }
    262   }
    263 
    264   return true;
    265 }
    266 
    267 template <typename T>
    268 static inline const char* plural(T val) {
    269   return (val == 1) ? "" : "s";
    270 }
    271 
    272 bool GetUnreachableMemory(UnreachableMemoryInfo& info, size_t limit) {
    273   int parent_pid = getpid();
    274   int parent_tid = gettid();
    275 
    276   Heap heap;
    277 
    278   Semaphore continue_parent_sem;
    279   LeakPipe pipe;
    280 
    281   PtracerThread thread{[&]() -> int {
    282     /////////////////////////////////////////////
    283     // Collection thread
    284     /////////////////////////////////////////////
    285     MEM_ALOGI("collecting thread info for process %d...", parent_pid);
    286 
    287     ThreadCapture thread_capture(parent_pid, heap);
    288     allocator::vector<ThreadInfo> thread_info(heap);
    289     allocator::vector<Mapping> mappings(heap);
    290     allocator::vector<uintptr_t> refs(heap);
    291 
    292     // ptrace all the threads
    293     if (!thread_capture.CaptureThreads()) {
    294       continue_parent_sem.Post();
    295       return 1;
    296     }
    297 
    298     // collect register contents and stacks
    299     if (!thread_capture.CapturedThreadInfo(thread_info)) {
    300       continue_parent_sem.Post();
    301       return 1;
    302     }
    303 
    304     // snapshot /proc/pid/maps
    305     if (!ProcessMappings(parent_pid, mappings)) {
    306       continue_parent_sem.Post();
    307       return 1;
    308     }
    309 
    310     if (!BinderReferences(refs)) {
    311       continue_parent_sem.Post();
    312       return 1;
    313     }
    314 
    315     // malloc must be enabled to call fork, at_fork handlers take the same
    316     // locks as ScopedDisableMalloc.  All threads are paused in ptrace, so
    317     // memory state is still consistent.  Unfreeze the original thread so it
    318     // can drop the malloc locks, it will block until the collection thread
    319     // exits.
    320     thread_capture.ReleaseThread(parent_tid);
    321     continue_parent_sem.Post();
    322 
    323     // fork a process to do the heap walking
    324     int ret = fork();
    325     if (ret < 0) {
    326       return 1;
    327     } else if (ret == 0) {
    328       /////////////////////////////////////////////
    329       // Heap walker process
    330       /////////////////////////////////////////////
    331       // Examine memory state in the child using the data collected above and
    332       // the CoW snapshot of the process memory contents.
    333 
    334       if (!pipe.OpenSender()) {
    335         _exit(1);
    336       }
    337 
    338       MemUnreachable unreachable{parent_pid, heap};
    339 
    340       if (!unreachable.CollectAllocations(thread_info, mappings, refs)) {
    341         _exit(2);
    342       }
    343       size_t num_allocations = unreachable.Allocations();
    344       size_t allocation_bytes = unreachable.AllocationBytes();
    345 
    346       allocator::vector<Leak> leaks{heap};
    347 
    348       size_t num_leaks = 0;
    349       size_t leak_bytes = 0;
    350       bool ok = unreachable.GetUnreachableMemory(leaks, limit, &num_leaks, &leak_bytes);
    351 
    352       ok = ok && pipe.Sender().Send(num_allocations);
    353       ok = ok && pipe.Sender().Send(allocation_bytes);
    354       ok = ok && pipe.Sender().Send(num_leaks);
    355       ok = ok && pipe.Sender().Send(leak_bytes);
    356       ok = ok && pipe.Sender().SendVector(leaks);
    357 
    358       if (!ok) {
    359         _exit(3);
    360       }
    361 
    362       _exit(0);
    363     } else {
    364       // Nothing left to do in the collection thread, return immediately,
    365       // releasing all the captured threads.
    366       MEM_ALOGI("collection thread done");
    367       return 0;
    368     }
    369   }};
    370 
    371   /////////////////////////////////////////////
    372   // Original thread
    373   /////////////////////////////////////////////
    374 
    375   {
    376     // Disable malloc to get a consistent view of memory
    377     ScopedDisableMalloc disable_malloc;
    378 
    379     // Start the collection thread
    380     thread.Start();
    381 
    382     // Wait for the collection thread to signal that it is ready to fork the
    383     // heap walker process.
    384     continue_parent_sem.Wait(30s);
    385 
    386     // Re-enable malloc so the collection thread can fork.
    387   }
    388 
    389   // Wait for the collection thread to exit
    390   int ret = thread.Join();
    391   if (ret != 0) {
    392     return false;
    393   }
    394 
    395   // Get a pipe from the heap walker process.  Transferring a new pipe fd
    396   // ensures no other forked processes can have it open, so when the heap
    397   // walker process dies the remote side of the pipe will close.
    398   if (!pipe.OpenReceiver()) {
    399     return false;
    400   }
    401 
    402   bool ok = true;
    403   ok = ok && pipe.Receiver().Receive(&info.num_allocations);
    404   ok = ok && pipe.Receiver().Receive(&info.allocation_bytes);
    405   ok = ok && pipe.Receiver().Receive(&info.num_leaks);
    406   ok = ok && pipe.Receiver().Receive(&info.leak_bytes);
    407   ok = ok && pipe.Receiver().ReceiveVector(info.leaks);
    408   if (!ok) {
    409     return false;
    410   }
    411 
    412   MEM_ALOGI("unreachable memory detection done");
    413   MEM_ALOGE("%zu bytes in %zu allocation%s unreachable out of %zu bytes in %zu allocation%s",
    414             info.leak_bytes, info.num_leaks, plural(info.num_leaks), info.allocation_bytes,
    415             info.num_allocations, plural(info.num_allocations));
    416   return true;
    417 }
    418 
    419 std::string Leak::ToString(bool log_contents) const {
    420   std::ostringstream oss;
    421 
    422   oss << "  " << std::dec << size;
    423   oss << " bytes unreachable at ";
    424   oss << std::hex << begin;
    425   oss << std::endl;
    426   if (referenced_count > 0) {
    427     oss << std::dec;
    428     oss << "   referencing " << referenced_size << " unreachable bytes";
    429     oss << " in " << referenced_count << " allocation" << plural(referenced_count);
    430     oss << std::endl;
    431   }
    432   if (similar_count > 0) {
    433     oss << std::dec;
    434     oss << "   and " << similar_size << " similar unreachable bytes";
    435     oss << " in " << similar_count << " allocation" << plural(similar_count);
    436     oss << std::endl;
    437     if (similar_referenced_count > 0) {
    438       oss << "   referencing " << similar_referenced_size << " unreachable bytes";
    439       oss << " in " << similar_referenced_count << " allocation" << plural(similar_referenced_count);
    440       oss << std::endl;
    441     }
    442   }
    443 
    444   if (log_contents) {
    445     const int bytes_per_line = 16;
    446     const size_t bytes = std::min(size, contents_length);
    447 
    448     if (bytes == size) {
    449       oss << "   contents:" << std::endl;
    450     } else {
    451       oss << "   first " << bytes << " bytes of contents:" << std::endl;
    452     }
    453 
    454     for (size_t i = 0; i < bytes; i += bytes_per_line) {
    455       oss << "   " << std::hex << begin + i << ": ";
    456       size_t j;
    457       oss << std::setfill('0');
    458       for (j = i; j < bytes && j < i + bytes_per_line; j++) {
    459         oss << std::setw(2) << static_cast<int>(contents[j]) << " ";
    460       }
    461       oss << std::setfill(' ');
    462       for (; j < i + bytes_per_line; j++) {
    463         oss << "   ";
    464       }
    465       for (j = i; j < bytes && j < i + bytes_per_line; j++) {
    466         char c = contents[j];
    467         if (c < ' ' || c >= 0x7f) {
    468           c = '.';
    469         }
    470         oss << c;
    471       }
    472       oss << std::endl;
    473     }
    474   }
    475   if (backtrace.num_frames > 0) {
    476     oss << backtrace_string(backtrace.frames, backtrace.num_frames);
    477   }
    478 
    479   return oss.str();
    480 }
    481 
    482 std::string UnreachableMemoryInfo::ToString(bool log_contents) const {
    483   std::ostringstream oss;
    484   oss << "  " << leak_bytes << " bytes in ";
    485   oss << num_leaks << " unreachable allocation" << plural(num_leaks);
    486   oss << std::endl;
    487   oss << "  ABI: '" ABI_STRING "'" << std::endl;
    488   oss << std::endl;
    489 
    490   for (auto it = leaks.begin(); it != leaks.end(); it++) {
    491     oss << it->ToString(log_contents);
    492     oss << std::endl;
    493   }
    494 
    495   return oss.str();
    496 }
    497 
    498 UnreachableMemoryInfo::~UnreachableMemoryInfo() {
    499   // Clear the memory that holds the leaks, otherwise the next attempt to
    500   // detect leaks may find the old data (for example in the jemalloc tcache)
    501   // and consider all the leaks to be referenced.
    502   memset(leaks.data(), 0, leaks.capacity() * sizeof(Leak));
    503 
    504   std::vector<Leak> tmp;
    505   leaks.swap(tmp);
    506 
    507   // Disable and re-enable malloc to flush the jemalloc tcache to make sure
    508   // there are no copies of the leaked pointer addresses there.
    509   malloc_disable();
    510   malloc_enable();
    511 }
    512 
    513 std::string GetUnreachableMemoryString(bool log_contents, size_t limit) {
    514   UnreachableMemoryInfo info;
    515   if (!GetUnreachableMemory(info, limit)) {
    516     return "Failed to get unreachable memory\n"
    517            "If you are trying to get unreachable memory from a system app\n"
    518            "(like com.android.systemui), disable selinux first using\n"
    519            "setenforce 0\n";
    520   }
    521 
    522   return info.ToString(log_contents);
    523 }
    524 
    525 }  // namespace android
    526 
    527 bool LogUnreachableMemory(bool log_contents, size_t limit) {
    528   android::UnreachableMemoryInfo info;
    529   if (!android::GetUnreachableMemory(info, limit)) {
    530     return false;
    531   }
    532 
    533   for (auto it = info.leaks.begin(); it != info.leaks.end(); it++) {
    534     MEM_ALOGE("%s", it->ToString(log_contents).c_str());
    535   }
    536   return true;
    537 }
    538 
    539 bool NoLeaks() {
    540   android::UnreachableMemoryInfo info;
    541   if (!android::GetUnreachableMemory(info, 0)) {
    542     return false;
    543   }
    544 
    545   return info.num_leaks == 0;
    546 }
    547