Home | History | Annotate | Download | only in src
      1 /* libunwind - a platform-independent unwind library
      2    Copyright (C) 2003, 2005 Hewlett-Packard Co
      3    Copyright (C) 2007 David Mosberger-Tang
      4 	Contributed by David Mosberger-Tang <dmosberger (at) gmail.com>
      5 
      6 This file is part of libunwind.
      7 
      8 Permission is hereby granted, free of charge, to any person obtaining
      9 a copy of this software and associated documentation files (the
     10 "Software"), to deal in the Software without restriction, including
     11 without limitation the rights to use, copy, modify, merge, publish,
     12 distribute, sublicense, and/or sell copies of the Software, and to
     13 permit persons to whom the Software is furnished to do so, subject to
     14 the following conditions:
     15 
     16 The above copyright notice and this permission notice shall be
     17 included in all copies or substantial portions of the Software.
     18 
     19 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
     20 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     21 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
     22 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
     23 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
     24 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
     25 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.  */
     26 
     27 #include <fcntl.h>
     28 #include <stddef.h>
     29 #include <unistd.h>
     30 
     31 #include <sys/mman.h>
     32 #include <sys/stat.h>
     33 
     34 #include "libunwind_i.h"
     35 #include "map_info.h"
     36 
     37 #if ELF_CLASS == ELFCLASS32
     38 # define ELF_W(x)	ELF32_##x
     39 # define Elf_W(x)	Elf32_##x
     40 # define elf_w(x)	_Uelf32_##x
     41 #else
     42 # define ELF_W(x)	ELF64_##x
     43 # define Elf_W(x)	Elf64_##x
     44 # define elf_w(x)	_Uelf64_##x
     45 #endif
     46 
     47 #define GET_FIELD(ei, offset, struct_name, elf_struct, field, check_cached) \
     48   { \
     49     if (!check_cached || (elf_struct)->field == 0) { \
     50       if (sizeof((elf_struct)->field) != elf_w (memory_read) ( \
     51           ei, ei->u.memory.start + offset + offsetof(struct_name, field), \
     52           (uint8_t*) &((elf_struct)->field), sizeof((elf_struct)->field), false)) { \
     53         return false; \
     54       } \
     55     } \
     56   }
     57 
     58 #define GET_EHDR_FIELD(ei, ehdr, field, check_cached) \
     59   GET_FIELD(ei, 0, Elf_W(Ehdr), ehdr, field, check_cached)
     60 
     61 #define GET_PHDR_FIELD(ei, offset, phdr, field) \
     62   GET_FIELD(ei, offset, Elf_W(Phdr), phdr, field, false)
     63 
     64 #define GET_SHDR_FIELD(ei, offset, shdr, field) \
     65   GET_FIELD(ei, offset, Elf_W(Shdr), shdr, field, false)
     66 
     67 #define GET_SYM_FIELD(ei, offset, sym, field) \
     68   GET_FIELD(ei, offset, Elf_W(Sym), sym, field, false)
     69 
     70 #define GET_DYN_FIELD(ei, offset, dyn, field) \
     71   GET_FIELD(ei, offset, Elf_W(Dyn), dyn, field, false)
     72 
     73 extern bool elf_w (get_proc_name) (
     74     unw_addr_space_t as, pid_t pid, unw_word_t ip, char* buf, size_t len,
     75     unw_word_t* offp, void* as_arg);
     76 
     77 extern bool elf_w (get_proc_name_in_image) (
     78     unw_addr_space_t as, struct elf_image* ei, unsigned long segbase,
     79     unsigned long mapoff, unw_word_t ip, char* buf, size_t buf_len, unw_word_t* offp);
     80 
     81 extern bool elf_w (get_load_base) (struct elf_image* ei, unw_word_t mapoff, unw_word_t* load_base);
     82 
     83 extern size_t elf_w (memory_read) (
     84     struct elf_image* ei, unw_word_t addr, uint8_t* buffer, size_t bytes, bool string_read);
     85 
     86 extern bool elf_w (xz_decompress) (uint8_t* src, size_t src_size,
     87                                    uint8_t** dst, size_t* dst_size);
     88 
     89 extern bool elf_w (find_section_mapped) (struct elf_image *ei, const char* name,
     90                                          uint8_t** section, size_t* size, Elf_W(Addr)* vaddr);
     91 
     92 static inline bool elf_w (valid_object_mapped) (struct elf_image* ei) {
     93   if (ei->u.mapped.size <= EI_VERSION) {
     94     return false;
     95   }
     96 
     97   uint8_t* e_ident = (uint8_t*) ei->u.mapped.image;
     98   return (memcmp (ei->u.mapped.image, ELFMAG, SELFMAG) == 0
     99           && e_ident[EI_CLASS] == ELF_CLASS && e_ident[EI_VERSION] != EV_NONE
    100           && e_ident[EI_VERSION] <= EV_CURRENT);
    101 }
    102 
    103 static inline bool elf_w (valid_object_memory) (struct elf_image* ei) {
    104   uint8_t e_ident[EI_NIDENT];
    105   uintptr_t start = ei->u.memory.start;
    106   if (SELFMAG != elf_w (memory_read) (ei, start, e_ident, SELFMAG, false)) {
    107     return false;
    108   }
    109   if (memcmp (e_ident, ELFMAG, SELFMAG) != 0) {
    110     return false;
    111   }
    112   // Read the rest of the ident data.
    113   if (EI_NIDENT - SELFMAG != elf_w (memory_read) (
    114       ei, start + SELFMAG, e_ident + SELFMAG, EI_NIDENT - SELFMAG, false)) {
    115     return false;
    116   }
    117   return e_ident[EI_CLASS] == ELF_CLASS && e_ident[EI_VERSION] != EV_NONE
    118          && e_ident[EI_VERSION] <= EV_CURRENT;
    119 }
    120 
    121 static inline bool elf_map_image (struct elf_image* ei, const char* path) {
    122   struct stat stat;
    123   int fd;
    124 
    125   fd = open (path, O_RDONLY);
    126   if (fd < 0) {
    127     return false;
    128   }
    129 
    130   if (fstat (fd, &stat) == -1) {
    131     close (fd);
    132     return false;
    133   }
    134 
    135   ei->u.mapped.size = stat.st_size;
    136   ei->u.mapped.image = mmap (NULL, ei->u.mapped.size, PROT_READ, MAP_PRIVATE, fd, 0);
    137   close (fd);
    138   if (ei->u.mapped.image == MAP_FAILED) {
    139     return false;
    140   }
    141 
    142   ei->valid = elf_w (valid_object_mapped) (ei);
    143   if (!ei->valid) {
    144     munmap (ei->u.mapped.image, ei->u.mapped.size);
    145     return false;
    146   }
    147 
    148   ei->mapped = true;
    149   // Set to true for cases where this is called outside of elf_map_cached.
    150   ei->load_attempted = true;
    151 
    152   return true;
    153 }
    154 
    155 static inline bool elf_map_cached_image (
    156     unw_addr_space_t as, void* as_arg, struct map_info* map, unw_word_t ip,
    157     bool local_unwind) {
    158   intrmask_t saved_mask;
    159 
    160   // Don't even try and cache this unless the map is readable and executable.
    161   if ((map->flags & (PROT_READ | PROT_EXEC)) != (PROT_READ | PROT_EXEC)) {
    162     return false;
    163   }
    164 
    165   // Do not try and cache the map if it's a file from /dev/ that is not
    166   // /dev/ashmem/.
    167   if (map->path != NULL && strncmp ("/dev/", map->path, 5) == 0
    168       && strncmp ("ashmem/", map->path + 5, 7) != 0) {
    169     return false;
    170   }
    171 
    172   // Lock while loading the cached elf image.
    173   lock_acquire (&map->ei_lock, saved_mask);
    174   if (!map->ei.load_attempted) {
    175     map->ei.load_attempted = true;
    176 
    177     if (!elf_map_image (&map->ei, map->path)) {
    178       // If the image cannot be loaded, we'll read data directly from
    179       // the process using the access_mem function.
    180       if (map->flags & PROT_READ) {
    181         map->ei.u.memory.start = map->start;
    182         map->ei.u.memory.end = map->end;
    183         map->ei.u.memory.as = as;
    184         map->ei.u.memory.as_arg = as_arg;
    185         map->ei.valid = elf_w (valid_object_memory) (&map->ei);
    186       }
    187     } else if (!local_unwind) {
    188       // Do not process the compressed section for local unwinds.
    189       // Uncompressing this section can consume a large amount of memory
    190       // and cause the unwind to take longer, which can cause problems
    191       // when an ANR occurs in the system. Compressed sections are
    192       // only used to contain java stack trace information. Since ART is
    193       // one of the only ways that a local trace is done, and it already
    194       // dumps the java stack, this information is redundant.
    195 
    196       // Try to cache the minidebuginfo data.
    197       uint8_t *compressed = NULL;
    198       size_t compressed_len;
    199       if (elf_w (find_section_mapped) (&map->ei, ".gnu_debugdata", &compressed,
    200           &compressed_len, NULL)) {
    201         if (elf_w (xz_decompress) (compressed, compressed_len,
    202             (uint8_t**) &map->ei.mini_debug_info_data, &map->ei.mini_debug_info_size)) {
    203           Debug (1, "Decompressed and cached .gnu_debugdata");
    204         } else {
    205           map->ei.mini_debug_info_data = NULL;
    206           map->ei.mini_debug_info_size = 0;
    207         }
    208       }
    209     }
    210     unw_word_t load_base;
    211     if (map->ei.valid && elf_w (get_load_base) (&map->ei, map->offset, &load_base)) {
    212       map->load_base = load_base;
    213     }
    214   } else if (map->ei.valid && !map->ei.mapped && map->ei.u.memory.as != as) {
    215     // If this map is only in memory, this might be a cached map
    216     // that crosses over multiple unwinds. In this case, we've detected
    217     // that the as is stale, so set it to a valid as.
    218     map->ei.u.memory.as = as;
    219   }
    220   lock_release (&map->ei_lock, saved_mask);
    221   return map->ei.valid;
    222 }
    223