Home | History | Annotate | Download | only in src
      1 // Copyright (c) 2013 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "crazy_linker_rdebug.h"
      6 
      7 #include <elf.h>
      8 #include <inttypes.h>
      9 #include <sys/mman.h>
     10 #include <unistd.h>
     11 
     12 #include "crazy_linker_debug.h"
     13 #include "crazy_linker_proc_maps.h"
     14 #include "crazy_linker_util.h"
     15 #include "crazy_linker_system.h"
     16 #include "elf_traits.h"
     17 
     18 namespace crazy {
     19 
     20 namespace {
     21 
     22 // Find the full path of the current executable. On success return true
     23 // and sets |exe_path|. On failure, return false and sets errno.
     24 bool FindExecutablePath(String* exe_path) {
     25   // /proc/self/exe is a symlink to the full path. Read it with
     26   // readlink().
     27   exe_path->Resize(512);
     28   ssize_t ret = TEMP_FAILURE_RETRY(
     29       readlink("/proc/self/exe", exe_path->ptr(), exe_path->size()));
     30   if (ret < 0) {
     31     LOG_ERRNO("%s: Could not get /proc/self/exe link", __FUNCTION__);
     32     return false;
     33   }
     34 
     35   exe_path->Resize(static_cast<size_t>(ret));
     36   LOG("%s: Current executable: %s\n", __FUNCTION__, exe_path->c_str());
     37   return true;
     38 }
     39 
     40 // Given an ELF binary at |path| that is _already_ mapped in the process,
     41 // find the address of its dynamic section and its size.
     42 // |path| is the full path of the binary (as it appears in /proc/self/maps.
     43 // |self_maps| is an instance of ProcMaps that is used to inspect
     44 // /proc/self/maps. The function rewind + iterates over it.
     45 // On success, return true and set |*dynamic_offset| and |*dynamic_size|.
     46 bool FindElfDynamicSection(const char* path,
     47                            ProcMaps* self_maps,
     48                            size_t* dynamic_address,
     49                            size_t* dynamic_size) {
     50   // Read the ELF header first.
     51   ELF::Ehdr header[1];
     52 
     53   crazy::FileDescriptor fd;
     54   if (!fd.OpenReadOnly(path) ||
     55       fd.Read(header, sizeof(header)) != static_cast<int>(sizeof(header))) {
     56     LOG_ERRNO("%s: Could not load ELF binary header", __FUNCTION__);
     57     return false;
     58   }
     59 
     60   // Sanity check.
     61   if (header->e_ident[0] != 127 || header->e_ident[1] != 'E' ||
     62       header->e_ident[2] != 'L' || header->e_ident[3] != 'F' ||
     63       header->e_ident[4] != ELF::kElfClass) {
     64     LOG("%s: Not a %d-bit ELF binary: %s\n",
     65         __FUNCTION__,
     66         ELF::kElfBits,
     67         path);
     68     return false;
     69   }
     70 
     71   if (header->e_phoff == 0 || header->e_phentsize != sizeof(ELF::Phdr)) {
     72     LOG("%s: Invalid program header values: %s\n", __FUNCTION__, path);
     73     return false;
     74   }
     75 
     76   // Scan the program header table.
     77   if (fd.SeekTo(header->e_phoff) < 0) {
     78     LOG_ERRNO("%s: Could not find ELF program header table", __FUNCTION__);
     79     return false;
     80   }
     81 
     82   ELF::Phdr phdr_load0 = {0, };
     83   ELF::Phdr phdr_dyn = {0, };
     84   bool found_load0 = false;
     85   bool found_dyn = false;
     86 
     87   for (size_t n = 0; n < header->e_phnum; ++n) {
     88     ELF::Phdr phdr;
     89     if (fd.Read(&phdr, sizeof(phdr)) != sizeof(phdr)) {
     90       LOG_ERRNO("%s: Could not read program header entry", __FUNCTION__);
     91       return false;
     92     }
     93 
     94     if (phdr.p_type == PT_LOAD && !found_load0) {
     95       phdr_load0 = phdr;
     96       found_load0 = true;
     97     } else if (phdr.p_type == PT_DYNAMIC && !found_dyn) {
     98       phdr_dyn = phdr;
     99       found_dyn = true;
    100     }
    101   }
    102 
    103   if (!found_load0) {
    104     LOG("%s: Could not find loadable segment!?\n", __FUNCTION__);
    105     return false;
    106   }
    107   if (!found_dyn) {
    108     LOG("%s: Could not find dynamic segment!?\n", __FUNCTION__);
    109     return false;
    110   }
    111 
    112   LOG("%s: Found first loadable segment [offset=%p vaddr=%p]\n",
    113       __FUNCTION__,
    114       (void*)phdr_load0.p_offset,
    115       (void*)phdr_load0.p_vaddr);
    116 
    117   LOG("%s: Found dynamic segment [offset=%p vaddr=%p size=%p]\n",
    118       __FUNCTION__,
    119       (void*)phdr_dyn.p_offset,
    120       (void*)phdr_dyn.p_vaddr,
    121       (void*)phdr_dyn.p_memsz);
    122 
    123   // Parse /proc/self/maps to find the load address of the first
    124   // loadable segment.
    125   size_t path_len = strlen(path);
    126   self_maps->Rewind();
    127   ProcMaps::Entry entry;
    128   while (self_maps->GetNextEntry(&entry)) {
    129     if (!entry.path || entry.path_len != path_len ||
    130         memcmp(entry.path, path, path_len) != 0)
    131       continue;
    132 
    133     LOG("%s: Found executable segment mapped [%p-%p offset=%p]\n",
    134         __FUNCTION__,
    135         (void*)entry.vma_start,
    136         (void*)entry.vma_end,
    137         (void*)entry.load_offset);
    138 
    139     size_t load_bias = entry.vma_start - phdr_load0.p_vaddr;
    140     LOG("%s: Load bias is %p\n", __FUNCTION__, (void*)load_bias);
    141 
    142     *dynamic_address = load_bias + phdr_dyn.p_vaddr;
    143     *dynamic_size = phdr_dyn.p_memsz;
    144     LOG("%s: Dynamic section addr=%p size=%p\n",
    145         __FUNCTION__,
    146         (void*)*dynamic_address,
    147         (void*)*dynamic_size);
    148     return true;
    149   }
    150 
    151   LOG("%s: Executable is not mapped in current process.\n", __FUNCTION__);
    152   return false;
    153 }
    154 
    155 // Helper class to temporarily remap a page to readable+writable until
    156 // scope exit.
    157 class ScopedPageMapper {
    158  public:
    159   ScopedPageMapper() : page_address_(0), page_prot_(0) {}
    160   void MapReadWrite(void* address);
    161   ~ScopedPageMapper();
    162 
    163  private:
    164   static const uintptr_t kPageSize = 4096;
    165   uintptr_t page_address_;
    166   int page_prot_;
    167 };
    168 
    169 void ScopedPageMapper::MapReadWrite(void* address) {
    170   page_address_ = reinterpret_cast<uintptr_t>(address) & ~(kPageSize - 1);
    171   page_prot_ = 0;
    172   if (!FindProtectionFlagsForAddress(address, &page_prot_) ||
    173       (page_prot_ & (PROT_READ | PROT_WRITE)) == (PROT_READ | PROT_WRITE)) {
    174     // If the address is invalid, or if the page is already read+write,
    175     // no need to do anything here.
    176     page_address_ = 0;
    177     return;
    178   }
    179   int new_page_prot = page_prot_ | PROT_READ | PROT_WRITE;
    180   int ret = mprotect(
    181       reinterpret_cast<void*>(page_address_), kPageSize, new_page_prot);
    182   if (ret < 0) {
    183     LOG_ERRNO("Could not remap page to read/write");
    184     page_address_ = 0;
    185   }
    186 }
    187 
    188 ScopedPageMapper::~ScopedPageMapper() {
    189   if (page_address_) {
    190     int ret =
    191         mprotect(reinterpret_cast<void*>(page_address_), kPageSize, page_prot_);
    192     if (ret < 0)
    193       LOG_ERRNO("Could not remap page to old protection flags");
    194   }
    195 }
    196 
    197 }  // namespace
    198 
    199 bool RDebug::Init() {
    200   // The address of '_r_debug' is in the DT_DEBUG entry of the current
    201   // executable.
    202   init_ = true;
    203 
    204   size_t dynamic_addr = 0;
    205   size_t dynamic_size = 0;
    206   String path;
    207 
    208   // Find the current executable's full path, and its dynamic section
    209   // information.
    210   if (!FindExecutablePath(&path))
    211     return false;
    212 
    213   ProcMaps self_maps;
    214   if (!FindElfDynamicSection(
    215            path.c_str(), &self_maps, &dynamic_addr, &dynamic_size)) {
    216     return false;
    217   }
    218 
    219   // Parse the dynamic table and find the DT_DEBUG entry.
    220   const ELF::Dyn* dyn_section = reinterpret_cast<const ELF::Dyn*>(dynamic_addr);
    221 
    222   while (dynamic_size >= sizeof(*dyn_section)) {
    223     if (dyn_section->d_tag == DT_DEBUG) {
    224       // Found it!
    225       LOG("%s: Found DT_DEBUG entry inside %s at %p, pointing to %p\n",
    226           __FUNCTION__,
    227           path.c_str(),
    228           dyn_section,
    229           dyn_section->d_un.d_ptr);
    230       if (dyn_section->d_un.d_ptr) {
    231         r_debug_ = reinterpret_cast<r_debug*>(dyn_section->d_un.d_ptr);
    232         LOG("%s: r_debug [r_version=%d r_map=%p r_brk=%p r_ldbase=%p]\n",
    233             __FUNCTION__,
    234             r_debug_->r_version,
    235             r_debug_->r_map,
    236             r_debug_->r_brk,
    237             r_debug_->r_ldbase);
    238         // Only version 1 of the struct is supported.
    239         if (r_debug_->r_version != 1) {
    240           LOG("%s: r_debug.r_version is %d, 1 expected.\n",
    241               __FUNCTION__,
    242               r_debug_->r_version);
    243           r_debug_ = NULL;
    244         }
    245 
    246         // The linker of recent Android releases maps its link map entries
    247         // in read-only pages. Determine if this is the case and record it
    248         // for later. The first entry in the list corresponds to the
    249         // executable.
    250         int prot = self_maps.GetProtectionFlagsForAddress(r_debug_->r_map);
    251         readonly_entries_ = (prot & PROT_WRITE) == 0;
    252 
    253         LOG("%s: r_debug.readonly_entries=%s\n",
    254             __FUNCTION__,
    255             readonly_entries_ ? "true" : "false");
    256         return true;
    257       }
    258     }
    259     dyn_section++;
    260     dynamic_size -= sizeof(*dyn_section);
    261   }
    262 
    263   LOG("%s: There is no non-0 DT_DEBUG entry in this process\n", __FUNCTION__);
    264   return false;
    265 }
    266 
    267 namespace {
    268 
    269 // Helper runnable class. Handler is one of the two static functions
    270 // AddEntryInternal() or DelEntryInternal(). Calling these invokes
    271 // AddEntryImpl() or DelEntryImpl() respectively on rdebug.
    272 class RDebugRunnable {
    273  public:
    274   RDebugRunnable(rdebug_callback_handler_t handler,
    275                  RDebug* rdebug,
    276                  link_map_t* entry)
    277       : handler_(handler), rdebug_(rdebug), entry_(entry) { }
    278 
    279   static void Run(void* opaque);
    280 
    281  private:
    282   rdebug_callback_handler_t handler_;
    283   RDebug* rdebug_;
    284   link_map_t* entry_;
    285 };
    286 
    287 // Callback entry point.
    288 void RDebugRunnable::Run(void* opaque) {
    289   RDebugRunnable* runnable = static_cast<RDebugRunnable*>(opaque);
    290 
    291   LOG("%s: Callback received, runnable=%p\n", __FUNCTION__, runnable);
    292   (*runnable->handler_)(runnable->rdebug_, runnable->entry_);
    293   delete runnable;
    294 }
    295 
    296 }  // namespace
    297 
    298 // Helper function to schedule AddEntry() and DelEntry() calls onto another
    299 // thread where possible. Running them there avoids races with the system
    300 // linker, which expects to be able to set r_map pages readonly when it
    301 // is not using them and which may run simultaneously on the main thread.
    302 bool RDebug::PostCallback(rdebug_callback_handler_t handler,
    303                           link_map_t* entry) {
    304   if (!post_for_later_execution_) {
    305     LOG("%s: Deferred execution disabled\n", __FUNCTION__);
    306     return false;
    307   }
    308 
    309   RDebugRunnable* runnable = new RDebugRunnable(handler, this, entry);
    310   void* context = post_for_later_execution_context_;
    311 
    312   if (!(*post_for_later_execution_)(context, &RDebugRunnable::Run, runnable)) {
    313     LOG("%s: Deferred execution enabled, but posting failed\n", __FUNCTION__);
    314     delete runnable;
    315     return false;
    316   }
    317 
    318   LOG("%s: Posted for later execution, runnable=%p\n", __FUNCTION__, runnable);
    319   return true;
    320 }
    321 
    322 void RDebug::AddEntryImpl(link_map_t* entry) {
    323   LOG("%s: Adding: %s\n", __FUNCTION__, entry->l_name);
    324   if (!init_)
    325     Init();
    326 
    327   if (!r_debug_) {
    328     LOG("%s: Nothing to do\n", __FUNCTION__);
    329     return;
    330   }
    331 
    332   // Tell GDB the list is going to be modified.
    333   r_debug_->r_state = RT_ADD;
    334   r_debug_->r_brk();
    335 
    336   // IMPORTANT: GDB expects the first entry in the list to correspond
    337   // to the executable. So add our new entry just after it. This is ok
    338   // because by default, the linker is always the second entry, as in:
    339   //
    340   //   [<executable>, /system/bin/linker, libc.so, libm.so, ...]
    341   //
    342   // By design, the first two entries should never be removed since they
    343   // can't be unloaded from the process (they are loaded by the kernel
    344   // when invoking the program).
    345   //
    346   // TODO(digit): Does GDB expect the linker to be the second entry?
    347   // It doesn't seem so, but have a look at the GDB sources to confirm
    348   // this. No problem appear experimentally.
    349   //
    350   // What happens for static binaries? They don't have an .interp section,
    351   // and don't have a r_debug variable on Android, so GDB should not be
    352   // able to debug shared libraries at all for them (assuming one
    353   // statically links a linker into the executable).
    354   if (!r_debug_->r_map || !r_debug_->r_map->l_next ||
    355       !r_debug_->r_map->l_next->l_next) {
    356     // Sanity check: Must have at least two items in the list.
    357     LOG("%s: Malformed r_debug.r_map list\n", __FUNCTION__);
    358     r_debug_ = NULL;
    359     return;
    360   }
    361 
    362   link_map_t* before = r_debug_->r_map->l_next;
    363   link_map_t* after = before->l_next;
    364 
    365   // Prepare the new entry.
    366   entry->l_prev = before;
    367   entry->l_next = after;
    368 
    369   // IMPORTANT: Before modifying the previous and next entries in the
    370   // list, ensure that they are writable. This avoids crashing when
    371   // updating the 'l_prev' or 'l_next' fields of a system linker entry,
    372   // which are mapped read-only.
    373   {
    374     ScopedPageMapper mapper;
    375     if (readonly_entries_)
    376       mapper.MapReadWrite(before);
    377     before->l_next = entry;
    378   }
    379 
    380   {
    381     ScopedPageMapper mapper;
    382     if (readonly_entries_)
    383       mapper.MapReadWrite(after);
    384     after->l_prev = entry;
    385   }
    386 
    387   // Tell GDB that the list modification has completed.
    388   r_debug_->r_state = RT_CONSISTENT;
    389   r_debug_->r_brk();
    390 }
    391 
    392 void RDebug::DelEntryImpl(link_map_t* entry) {
    393   LOG("%s: Deleting: %s\n", __FUNCTION__, entry->l_name);
    394   if (!r_debug_)
    395     return;
    396 
    397   // Tell GDB the list is going to be modified.
    398   r_debug_->r_state = RT_DELETE;
    399   r_debug_->r_brk();
    400 
    401   // IMPORTANT: Before modifying the previous and next entries in the
    402   // list, ensure that they are writable. See comment above for more
    403   // details.
    404   if (entry->l_prev) {
    405     ScopedPageMapper mapper;
    406     if (readonly_entries_)
    407       mapper.MapReadWrite(entry->l_prev);
    408     entry->l_prev->l_next = entry->l_next;
    409   }
    410 
    411   if (entry->l_next) {
    412     ScopedPageMapper mapper;
    413     if (readonly_entries_)
    414       mapper.MapReadWrite(entry->l_next);
    415     entry->l_next->l_prev = entry->l_prev;
    416   }
    417 
    418   if (r_debug_->r_map == entry)
    419     r_debug_->r_map = entry->l_next;
    420 
    421   entry->l_prev = NULL;
    422   entry->l_next = NULL;
    423 
    424   // Tell GDB the list modification has completed.
    425   r_debug_->r_state = RT_CONSISTENT;
    426   r_debug_->r_brk();
    427 }
    428 
    429 }  // namespace crazy
    430