Home | History | Annotate | Download | only in jit
      1 /*
      2  * Copyright (C) 2015 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "debugger_interface.h"
     18 
     19 #include <android-base/logging.h>
     20 
     21 #include "base/array_ref.h"
     22 #include "base/logging.h"
     23 #include "base/mutex.h"
     24 #include "base/time_utils.h"
     25 #include "base/utils.h"
     26 #include "dex/dex_file.h"
     27 #include "thread-current-inl.h"
     28 #include "thread.h"
     29 
     30 #include <atomic>
     31 #include <cstddef>
     32 #include <deque>
     33 #include <map>
     34 
     35 //
     36 // Debug interface for native tools (gdb, lldb, libunwind, simpleperf).
     37 //
     38 // See http://sourceware.org/gdb/onlinedocs/gdb/Declarations.html
     39 //
     40 // There are two ways for native tools to access the debug data safely:
     41 //
     42 // 1) Synchronously, by setting a breakpoint in the __*_debug_register_code
     43 //    method, which is called after every modification of the linked list.
     44 //    GDB does this, but it is complex to set up and it stops the process.
     45 //
     46 // 2) Asynchronously, by monitoring the action_seqlock_.
     47 //   * The seqlock is a monotonically increasing counter which is incremented
     48 //     before and after every modification of the linked list. Odd value of
     49 //     the counter means the linked list is being modified (it is locked).
     50 //   * The tool should read the value of the seqlock both before and after
     51 //     copying the linked list.  If the seqlock values match and are even,
     52 //     the copy is consistent.  Otherwise, the reader should try again.
     53 //     * Note that using the data directly while is it being modified
     54 //       might crash the tool.  Therefore, the only safe way is to make
     55 //       a copy and use the copy only after the seqlock has been checked.
     56 //     * Note that the process might even free and munmap the data while
     57 //       it is being copied, therefore the reader should either handle
     58 //       SEGV or use OS calls to read the memory (e.g. process_vm_readv).
     59 //   * The seqlock can be used to determine the number of modifications of
     60 //     the linked list, which can be used to intelligently cache the data.
     61 //     Note the possible overflow of the seqlock.  It is intentionally
     62 //     32-bit, since 64-bit atomics can be tricky on some architectures.
     63 //   * The timestamps on the entry record the time when the entry was
     64 //     created which is relevant if the unwinding is not live and is
     65 //     postponed until much later.  All timestamps must be unique.
     66 //   * Memory barriers are used to make it possible to reason about
     67 //     the data even when it is being modified (e.g. the process crashed
     68 //     while that data was locked, and thus it will be never unlocked).
     69 //     * In particular, it should be possible to:
     70 //       1) read the seqlock and then the linked list head pointer.
     71 //       2) copy the entry and check that seqlock has not changed.
     72 //       3) copy the symfile and check that seqlock has not changed.
     73 //       4) go back to step 2 using the next pointer (if non-null).
     74 //       This safely creates copy of all symfiles, although other data
     75 //       might be inconsistent/unusable (e.g. prev_, action_timestamp_).
     76 //   * For full conformance with the C++ memory model, all seqlock
     77 //     protected accesses should be atomic. We currently do this in the
     78 //     more critical cases. The rest will have to be fixed before
     79 //     attempting to run TSAN on this code.
     80 //
     81 
     82 namespace art {
     83 
     84 static Mutex g_jit_debug_lock("JIT native debug entries", kNativeDebugInterfaceLock);
     85 static Mutex g_dex_debug_lock("DEX native debug entries", kNativeDebugInterfaceLock);
     86 
     87 extern "C" {
     88   enum JITAction {
     89     JIT_NOACTION = 0,
     90     JIT_REGISTER_FN,
     91     JIT_UNREGISTER_FN
     92   };
     93 
     94   struct JITCodeEntry {
     95     // Atomic to ensure the reader can always iterate over the linked list
     96     // (e.g. the process could crash in the middle of writing this field).
     97     std::atomic<JITCodeEntry*> next_;
     98     // Non-atomic. The reader should not use it. It is only used for deletion.
     99     JITCodeEntry* prev_;
    100     const uint8_t* symfile_addr_;
    101     uint64_t symfile_size_;  // Beware of the offset (12 on x86; but 16 on ARM32).
    102 
    103     // Android-specific fields:
    104     uint64_t register_timestamp_;  // CLOCK_MONOTONIC time of entry registration.
    105   };
    106 
    107   struct JITDescriptor {
    108     uint32_t version_ = 1;                      // NB: GDB supports only version 1.
    109     uint32_t action_flag_ = JIT_NOACTION;       // One of the JITAction enum values.
    110     JITCodeEntry* relevant_entry_ = nullptr;    // The entry affected by the action.
    111     std::atomic<JITCodeEntry*> head_{nullptr};  // Head of link list of all entries.
    112 
    113     // Android-specific fields:
    114     uint8_t magic_[8] = {'A', 'n', 'd', 'r', 'o', 'i', 'd', '1'};
    115     uint32_t flags_ = 0;  // Reserved for future use. Must be 0.
    116     uint32_t sizeof_descriptor = sizeof(JITDescriptor);
    117     uint32_t sizeof_entry = sizeof(JITCodeEntry);
    118     std::atomic_uint32_t action_seqlock_{0};  // Incremented before and after any modification.
    119     uint64_t action_timestamp_ = 1;           // CLOCK_MONOTONIC time of last action.
    120   };
    121 
    122   // Check that std::atomic has the expected layout.
    123   static_assert(alignof(std::atomic_uint32_t) == alignof(uint32_t), "Weird alignment");
    124   static_assert(sizeof(std::atomic_uint32_t) == sizeof(uint32_t), "Weird size");
    125   static_assert(alignof(std::atomic<void*>) == alignof(void*), "Weird alignment");
    126   static_assert(sizeof(std::atomic<void*>) == sizeof(void*), "Weird size");
    127 
    128   // GDB may set breakpoint here. We must ensure it is not removed or deduplicated.
    129   void __attribute__((noinline)) __jit_debug_register_code() {
    130     __asm__("");
    131   }
    132 
    133   // Alternatively, native tools may overwrite this field to execute custom handler.
    134   void (*__jit_debug_register_code_ptr)() = __jit_debug_register_code;
    135 
    136   // The root data structure describing of all JITed methods.
    137   JITDescriptor __jit_debug_descriptor GUARDED_BY(g_jit_debug_lock) {};
    138 
    139   // The following globals mirror the ones above, but are used to register dex files.
    140   void __attribute__((noinline)) __dex_debug_register_code() {
    141     __asm__("");
    142   }
    143   void (*__dex_debug_register_code_ptr)() = __dex_debug_register_code;
    144   JITDescriptor __dex_debug_descriptor GUARDED_BY(g_dex_debug_lock) {};
    145 }
    146 
    147 // Mark the descriptor as "locked", so native tools know the data is being modified.
    148 static void ActionSeqlock(JITDescriptor& descriptor) {
    149   DCHECK_EQ(descriptor.action_seqlock_.load() & 1, 0u) << "Already locked";
    150   descriptor.action_seqlock_.fetch_add(1, std::memory_order_relaxed);
    151   // Ensure that any writes within the locked section cannot be reordered before the increment.
    152   std::atomic_thread_fence(std::memory_order_release);
    153 }
    154 
    155 // Mark the descriptor as "unlocked", so native tools know the data is safe to read.
    156 static void ActionSequnlock(JITDescriptor& descriptor) {
    157   DCHECK_EQ(descriptor.action_seqlock_.load() & 1, 1u) << "Already unlocked";
    158   // Ensure that any writes within the locked section cannot be reordered after the increment.
    159   std::atomic_thread_fence(std::memory_order_release);
    160   descriptor.action_seqlock_.fetch_add(1, std::memory_order_relaxed);
    161 }
    162 
    163 static JITCodeEntry* CreateJITCodeEntryInternal(
    164     JITDescriptor& descriptor,
    165     void (*register_code_ptr)(),
    166     ArrayRef<const uint8_t> symfile,
    167     bool copy_symfile) {
    168   // Make a copy of the buffer to shrink it and to pass ownership to JITCodeEntry.
    169   if (copy_symfile) {
    170     uint8_t* copy = new uint8_t[symfile.size()];
    171     CHECK(copy != nullptr);
    172     memcpy(copy, symfile.data(), symfile.size());
    173     symfile = ArrayRef<const uint8_t>(copy, symfile.size());
    174   }
    175 
    176   // Ensure the timestamp is monotonically increasing even in presence of low
    177   // granularity system timer.  This ensures each entry has unique timestamp.
    178   uint64_t timestamp = std::max(descriptor.action_timestamp_ + 1, NanoTime());
    179 
    180   JITCodeEntry* head = descriptor.head_.load(std::memory_order_relaxed);
    181   JITCodeEntry* entry = new JITCodeEntry;
    182   CHECK(entry != nullptr);
    183   entry->symfile_addr_ = symfile.data();
    184   entry->symfile_size_ = symfile.size();
    185   entry->prev_ = nullptr;
    186   entry->next_.store(head, std::memory_order_relaxed);
    187   entry->register_timestamp_ = timestamp;
    188 
    189   // We are going to modify the linked list, so take the seqlock.
    190   ActionSeqlock(descriptor);
    191   if (head != nullptr) {
    192     head->prev_ = entry;
    193   }
    194   descriptor.head_.store(entry, std::memory_order_relaxed);
    195   descriptor.relevant_entry_ = entry;
    196   descriptor.action_flag_ = JIT_REGISTER_FN;
    197   descriptor.action_timestamp_ = timestamp;
    198   ActionSequnlock(descriptor);
    199 
    200   (*register_code_ptr)();
    201   return entry;
    202 }
    203 
    204 static void DeleteJITCodeEntryInternal(
    205     JITDescriptor& descriptor,
    206     void (*register_code_ptr)(),
    207     JITCodeEntry* entry,
    208     bool free_symfile) {
    209   CHECK(entry != nullptr);
    210   const uint8_t* symfile = entry->symfile_addr_;
    211 
    212   // Ensure the timestamp is monotonically increasing even in presence of low
    213   // granularity system timer.  This ensures each entry has unique timestamp.
    214   uint64_t timestamp = std::max(descriptor.action_timestamp_ + 1, NanoTime());
    215 
    216   // We are going to modify the linked list, so take the seqlock.
    217   ActionSeqlock(descriptor);
    218   JITCodeEntry* next = entry->next_.load(std::memory_order_relaxed);
    219   if (entry->prev_ != nullptr) {
    220     entry->prev_->next_.store(next, std::memory_order_relaxed);
    221   } else {
    222     descriptor.head_.store(next, std::memory_order_relaxed);
    223   }
    224   if (next != nullptr) {
    225     next->prev_ = entry->prev_;
    226   }
    227   descriptor.relevant_entry_ = entry;
    228   descriptor.action_flag_ = JIT_UNREGISTER_FN;
    229   descriptor.action_timestamp_ = timestamp;
    230   ActionSequnlock(descriptor);
    231 
    232   (*register_code_ptr)();
    233 
    234   // Ensure that clear below can not be reordered above the unlock above.
    235   std::atomic_thread_fence(std::memory_order_release);
    236 
    237   // Aggressively clear the entry as an extra check of the synchronisation.
    238   memset(entry, 0, sizeof(*entry));
    239 
    240   delete entry;
    241   if (free_symfile) {
    242     delete[] symfile;
    243   }
    244 }
    245 
    246 static std::map<const DexFile*, JITCodeEntry*> g_dex_debug_entries GUARDED_BY(g_dex_debug_lock);
    247 
    248 void AddNativeDebugInfoForDex(Thread* self, const DexFile* dexfile) {
    249   MutexLock mu(self, g_dex_debug_lock);
    250   DCHECK(dexfile != nullptr);
    251   // This is just defensive check. The class linker should not register the dex file twice.
    252   if (g_dex_debug_entries.count(dexfile) == 0) {
    253     const ArrayRef<const uint8_t> symfile(dexfile->Begin(), dexfile->Size());
    254     JITCodeEntry* entry = CreateJITCodeEntryInternal(__dex_debug_descriptor,
    255                                                      __dex_debug_register_code_ptr,
    256                                                      symfile,
    257                                                      /*copy_symfile=*/ false);
    258     g_dex_debug_entries.emplace(dexfile, entry);
    259   }
    260 }
    261 
    262 void RemoveNativeDebugInfoForDex(Thread* self, const DexFile* dexfile) {
    263   MutexLock mu(self, g_dex_debug_lock);
    264   auto it = g_dex_debug_entries.find(dexfile);
    265   // We register dex files in the class linker and free them in DexFile_closeDexFile, but
    266   // there might be cases where we load the dex file without using it in the class linker.
    267   if (it != g_dex_debug_entries.end()) {
    268     DeleteJITCodeEntryInternal(__dex_debug_descriptor,
    269                                __dex_debug_register_code_ptr,
    270                                /*entry=*/ it->second,
    271                                /*free_symfile=*/ false);
    272     g_dex_debug_entries.erase(it);
    273   }
    274 }
    275 
    276 // Mapping from handle to entry. Used to manage life-time of the entries.
    277 static std::multimap<const void*, JITCodeEntry*> g_jit_debug_entries GUARDED_BY(g_jit_debug_lock);
    278 
    279 // Number of entries added since last packing.  Used to pack entries in bulk.
    280 static size_t g_jit_num_unpacked_entries GUARDED_BY(g_jit_debug_lock) = 0;
    281 
    282 // We postpone removal so that it is done in bulk.
    283 static std::deque<const void*> g_jit_removed_entries GUARDED_BY(g_jit_debug_lock);
    284 
    285 // Split the JIT code cache into groups of fixed size and create singe JITCodeEntry for each group.
    286 // The start address of method's code determines which group it belongs to.  The end is irrelevant.
    287 // As a consequnce, newly added mini debug infos will be merged and old ones (GCed) will be pruned.
    288 static void MaybePackJitMiniDebugInfo(PackElfFileForJITFunction pack,
    289                                       InstructionSet isa,
    290                                       const InstructionSetFeatures* features)
    291     REQUIRES(g_jit_debug_lock) {
    292   // Size of memory range covered by each JITCodeEntry.
    293   // The number of methods per entry is variable (depending on how many fit in that range).
    294   constexpr uint32_t kGroupSize = 64 * KB;
    295   // Even if there are no removed entries, we want to pack new entries on regular basis.
    296   constexpr uint32_t kPackFrequency = 64;
    297 
    298   std::deque<const void*>& removed_entries = g_jit_removed_entries;
    299   std::sort(removed_entries.begin(), removed_entries.end());
    300   if (removed_entries.empty() && g_jit_num_unpacked_entries < kPackFrequency) {
    301     return;  // Nothing to do.
    302   }
    303 
    304   std::vector<ArrayRef<const uint8_t>> added_elf_files;
    305   std::vector<const void*> removed_symbols;
    306   auto added_it = g_jit_debug_entries.begin();
    307   auto removed_it = removed_entries.begin();
    308   while (added_it != g_jit_debug_entries.end()) {
    309     // Collect all entries that have been added or removed within our memory range.
    310     const void* group_ptr = AlignDown(added_it->first, kGroupSize);
    311     added_elf_files.clear();
    312     auto added_begin = added_it;
    313     while (added_it != g_jit_debug_entries.end() &&
    314            AlignDown(added_it->first, kGroupSize) == group_ptr) {
    315       JITCodeEntry* entry = (added_it++)->second;
    316       added_elf_files.emplace_back(entry->symfile_addr_, entry->symfile_size_);
    317     }
    318     removed_symbols.clear();
    319     while (removed_it != removed_entries.end() &&
    320            AlignDown(*removed_it, kGroupSize) == group_ptr) {
    321       removed_symbols.push_back(*(removed_it++));
    322     }
    323 
    324     // Create new singe JITCodeEntry that covers this memory range.
    325     if (added_elf_files.size() == 1 && removed_symbols.size() == 0) {
    326       continue;  // Nothing changed in this memory range.
    327     }
    328     uint64_t start_time = MilliTime();
    329     size_t symbols;
    330     std::vector<uint8_t> packed = pack(isa, features, added_elf_files, removed_symbols, &symbols);
    331     VLOG(jit)
    332         << "JIT mini-debug-info packed"
    333         << " for " << group_ptr
    334         << " in " << MilliTime() - start_time << "ms"
    335         << " files=" << added_elf_files.size()
    336         << " removed=" << removed_symbols.size()
    337         << " symbols=" << symbols
    338         << " size=" << PrettySize(packed.size());
    339 
    340     // Replace the old entries with the new one (with their lifetime temporally overlapping).
    341     JITCodeEntry* packed_entry = CreateJITCodeEntryInternal(
    342         __jit_debug_descriptor,
    343         __jit_debug_register_code_ptr,
    344         ArrayRef<const uint8_t>(packed),
    345         /*copy_symfile=*/ true);
    346     for (auto it = added_begin; it != added_it; ++it) {
    347       DeleteJITCodeEntryInternal(__jit_debug_descriptor,
    348                                  __jit_debug_register_code_ptr,
    349                                  /*entry=*/ it->second,
    350                                  /*free_symfile=*/ true);
    351     }
    352     g_jit_debug_entries.erase(added_begin, added_it);
    353     g_jit_debug_entries.emplace(group_ptr, packed_entry);
    354   }
    355   CHECK(added_it == g_jit_debug_entries.end());
    356   CHECK(removed_it == removed_entries.end());
    357   removed_entries.clear();
    358   g_jit_num_unpacked_entries = 0;
    359 }
    360 
    361 void AddNativeDebugInfoForJit(Thread* self,
    362                               const void* code_ptr,
    363                               const std::vector<uint8_t>& symfile,
    364                               PackElfFileForJITFunction pack,
    365                               InstructionSet isa,
    366                               const InstructionSetFeatures* features) {
    367   MutexLock mu(self, g_jit_debug_lock);
    368   DCHECK_NE(symfile.size(), 0u);
    369 
    370   MaybePackJitMiniDebugInfo(pack, isa, features);
    371 
    372   JITCodeEntry* entry = CreateJITCodeEntryInternal(
    373       __jit_debug_descriptor,
    374       __jit_debug_register_code_ptr,
    375       ArrayRef<const uint8_t>(symfile),
    376       /*copy_symfile=*/ true);
    377 
    378   VLOG(jit)
    379       << "JIT mini-debug-info added"
    380       << " for " << code_ptr
    381       << " size=" << PrettySize(symfile.size());
    382 
    383   // We don't provide code_ptr for type debug info, which means we cannot free it later.
    384   // (this only happens when --generate-debug-info flag is enabled for the purpose
    385   // of being debugged with gdb; it does not happen for debuggable apps by default).
    386   if (code_ptr != nullptr) {
    387     g_jit_debug_entries.emplace(code_ptr, entry);
    388     // Count how many entries we have added since the last mini-debug-info packing.
    389     // We avoid g_jit_debug_entries.size() here because it can shrink during packing.
    390     g_jit_num_unpacked_entries++;
    391   }
    392 }
    393 
    394 void RemoveNativeDebugInfoForJit(Thread* self, const void* code_ptr) {
    395   MutexLock mu(self, g_jit_debug_lock);
    396   // We generate JIT native debug info only if the right runtime flags are enabled,
    397   // but we try to remove it unconditionally whenever code is freed from JIT cache.
    398   if (!g_jit_debug_entries.empty()) {
    399     g_jit_removed_entries.push_back(code_ptr);
    400   }
    401 }
    402 
    403 size_t GetJitMiniDebugInfoMemUsage() {
    404   MutexLock mu(Thread::Current(), g_jit_debug_lock);
    405   size_t size = 0;
    406   for (auto entry : g_jit_debug_entries) {
    407     size += sizeof(JITCodeEntry) + entry.second->symfile_size_ + /*map entry*/ 4 * sizeof(void*);
    408   }
    409   return size;
    410 }
    411 
    412 }  // namespace art
    413