Home | History | Annotate | Download | only in linker
      1 /*
      2  * Copyright (C) 2012 The Android Open Source Project
      3  * All rights reserved.
      4  *
      5  * Redistribution and use in source and binary forms, with or without
      6  * modification, are permitted provided that the following conditions
      7  * are met:
      8  *  * Redistributions of source code must retain the above copyright
      9  *    notice, this list of conditions and the following disclaimer.
     10  *  * Redistributions in binary form must reproduce the above copyright
     11  *    notice, this list of conditions and the following disclaimer in
     12  *    the documentation and/or other materials provided with the
     13  *    distribution.
     14  *
     15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     16  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     17  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
     18  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
     19  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
     20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
     21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
     22  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
     23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
     24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
     25  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     26  * SUCH DAMAGE.
     27  */
     28 
     29 #include "linker_phdr.h"
     30 
     31 #include <errno.h>
     32 #include <machine/exec.h>
     33 #include <sys/mman.h>
     34 #include <sys/types.h>
     35 #include <sys/stat.h>
     36 #include <unistd.h>
     37 
     38 #include "linker.h"
     39 #include "linker_debug.h"
     40 
     41 /**
     42   TECHNICAL NOTE ON ELF LOADING.
     43 
     44   An ELF file's program header table contains one or more PT_LOAD
     45   segments, which corresponds to portions of the file that need to
     46   be mapped into the process' address space.
     47 
     48   Each loadable segment has the following important properties:
     49 
     50     p_offset  -> segment file offset
     51     p_filesz  -> segment file size
     52     p_memsz   -> segment memory size (always >= p_filesz)
     53     p_vaddr   -> segment's virtual address
     54     p_flags   -> segment flags (e.g. readable, writable, executable)
     55 
     56   We will ignore the p_paddr and p_align fields of ElfW(Phdr) for now.
     57 
     58   The loadable segments can be seen as a list of [p_vaddr ... p_vaddr+p_memsz)
     59   ranges of virtual addresses. A few rules apply:
     60 
     61   - the virtual address ranges should not overlap.
     62 
     63   - if a segment's p_filesz is smaller than its p_memsz, the extra bytes
     64     between them should always be initialized to 0.
     65 
     66   - ranges do not necessarily start or end at page boundaries. Two distinct
     67     segments can have their start and end on the same page. In this case, the
     68     page inherits the mapping flags of the latter segment.
     69 
     70   Finally, the real load addrs of each segment is not p_vaddr. Instead the
     71   loader decides where to load the first segment, then will load all others
     72   relative to the first one to respect the initial range layout.
     73 
     74   For example, consider the following list:
     75 
     76     [ offset:0,      filesz:0x4000, memsz:0x4000, vaddr:0x30000 ],
     77     [ offset:0x4000, filesz:0x2000, memsz:0x8000, vaddr:0x40000 ],
     78 
     79   This corresponds to two segments that cover these virtual address ranges:
     80 
     81        0x30000...0x34000
     82        0x40000...0x48000
     83 
     84   If the loader decides to load the first segment at address 0xa0000000
     85   then the segments' load address ranges will be:
     86 
     87        0xa0030000...0xa0034000
     88        0xa0040000...0xa0048000
     89 
     90   In other words, all segments must be loaded at an address that has the same
     91   constant offset from their p_vaddr value. This offset is computed as the
     92   difference between the first segment's load address, and its p_vaddr value.
     93 
     94   However, in practice, segments do _not_ start at page boundaries. Since we
     95   can only memory-map at page boundaries, this means that the bias is
     96   computed as:
     97 
     98        load_bias = phdr0_load_address - PAGE_START(phdr0->p_vaddr)
     99 
    100   (NOTE: The value must be used as a 32-bit unsigned integer, to deal with
    101           possible wrap around UINT32_MAX for possible large p_vaddr values).
    102 
    103   And that the phdr0_load_address must start at a page boundary, with
    104   the segment's real content starting at:
    105 
    106        phdr0_load_address + PAGE_OFFSET(phdr0->p_vaddr)
    107 
    108   Note that ELF requires the following condition to make the mmap()-ing work:
    109 
    110       PAGE_OFFSET(phdr0->p_vaddr) == PAGE_OFFSET(phdr0->p_offset)
    111 
    112   The load_bias must be added to any p_vaddr value read from the ELF file to
    113   determine the corresponding memory address.
    114 
    115  **/
    116 
    117 #define MAYBE_MAP_FLAG(x, from, to)  (((x) & (from)) ? (to) : 0)
    118 #define PFLAGS_TO_PROT(x)            (MAYBE_MAP_FLAG((x), PF_X, PROT_EXEC) | \
    119                                       MAYBE_MAP_FLAG((x), PF_R, PROT_READ) | \
    120                                       MAYBE_MAP_FLAG((x), PF_W, PROT_WRITE))
    121 
    122 ElfReader::ElfReader(const char* name, int fd, off64_t file_offset)
    123     : name_(name), fd_(fd), file_offset_(file_offset),
    124       phdr_num_(0), phdr_mmap_(nullptr), phdr_table_(nullptr), phdr_size_(0),
    125       load_start_(nullptr), load_size_(0), load_bias_(0),
    126       loaded_phdr_(nullptr) {
    127 }
    128 
    129 ElfReader::~ElfReader() {
    130   if (phdr_mmap_ != nullptr) {
    131     munmap(phdr_mmap_, phdr_size_);
    132   }
    133 }
    134 
    135 bool ElfReader::Load(const android_dlextinfo* extinfo) {
    136   return ReadElfHeader() &&
    137          VerifyElfHeader() &&
    138          ReadProgramHeader() &&
    139          ReserveAddressSpace(extinfo) &&
    140          LoadSegments() &&
    141          FindPhdr();
    142 }
    143 
    144 bool ElfReader::ReadElfHeader() {
    145   ssize_t rc = TEMP_FAILURE_RETRY(pread64(fd_, &header_, sizeof(header_), file_offset_));
    146   if (rc < 0) {
    147     DL_ERR("can't read file \"%s\": %s", name_, strerror(errno));
    148     return false;
    149   }
    150 
    151   if (rc != sizeof(header_)) {
    152     DL_ERR("\"%s\" is too small to be an ELF executable: only found %zd bytes", name_,
    153            static_cast<size_t>(rc));
    154     return false;
    155   }
    156   return true;
    157 }
    158 
    159 bool ElfReader::VerifyElfHeader() {
    160   if (memcmp(header_.e_ident, ELFMAG, SELFMAG) != 0) {
    161     DL_ERR("\"%s\" has bad ELF magic", name_);
    162     return false;
    163   }
    164 
    165   // Try to give a clear diagnostic for ELF class mismatches, since they're
    166   // an easy mistake to make during the 32-bit/64-bit transition period.
    167   int elf_class = header_.e_ident[EI_CLASS];
    168 #if defined(__LP64__)
    169   if (elf_class != ELFCLASS64) {
    170     if (elf_class == ELFCLASS32) {
    171       DL_ERR("\"%s\" is 32-bit instead of 64-bit", name_);
    172     } else {
    173       DL_ERR("\"%s\" has unknown ELF class: %d", name_, elf_class);
    174     }
    175     return false;
    176   }
    177 #else
    178   if (elf_class != ELFCLASS32) {
    179     if (elf_class == ELFCLASS64) {
    180       DL_ERR("\"%s\" is 64-bit instead of 32-bit", name_);
    181     } else {
    182       DL_ERR("\"%s\" has unknown ELF class: %d", name_, elf_class);
    183     }
    184     return false;
    185   }
    186 #endif
    187 
    188   if (header_.e_ident[EI_DATA] != ELFDATA2LSB) {
    189     DL_ERR("\"%s\" not little-endian: %d", name_, header_.e_ident[EI_DATA]);
    190     return false;
    191   }
    192 
    193   if (header_.e_type != ET_DYN) {
    194     DL_ERR("\"%s\" has unexpected e_type: %d", name_, header_.e_type);
    195     return false;
    196   }
    197 
    198   if (header_.e_version != EV_CURRENT) {
    199     DL_ERR("\"%s\" has unexpected e_version: %d", name_, header_.e_version);
    200     return false;
    201   }
    202 
    203   if (header_.e_machine != ELF_TARG_MACH) {
    204     DL_ERR("\"%s\" has unexpected e_machine: %d", name_, header_.e_machine);
    205     return false;
    206   }
    207 
    208   return true;
    209 }
    210 
    211 // Loads the program header table from an ELF file into a read-only private
    212 // anonymous mmap-ed block.
    213 bool ElfReader::ReadProgramHeader() {
    214   phdr_num_ = header_.e_phnum;
    215 
    216   // Like the kernel, we only accept program header tables that
    217   // are smaller than 64KiB.
    218   if (phdr_num_ < 1 || phdr_num_ > 65536/sizeof(ElfW(Phdr))) {
    219     DL_ERR("\"%s\" has invalid e_phnum: %zd", name_, phdr_num_);
    220     return false;
    221   }
    222 
    223   ElfW(Addr) page_min = PAGE_START(header_.e_phoff);
    224   ElfW(Addr) page_max = PAGE_END(header_.e_phoff + (phdr_num_ * sizeof(ElfW(Phdr))));
    225   ElfW(Addr) page_offset = PAGE_OFFSET(header_.e_phoff);
    226 
    227   phdr_size_ = page_max - page_min;
    228 
    229   void* mmap_result = mmap64(nullptr, phdr_size_, PROT_READ, MAP_PRIVATE, fd_, file_offset_ + page_min);
    230   if (mmap_result == MAP_FAILED) {
    231     DL_ERR("\"%s\" phdr mmap failed: %s", name_, strerror(errno));
    232     return false;
    233   }
    234 
    235   phdr_mmap_ = mmap_result;
    236   phdr_table_ = reinterpret_cast<ElfW(Phdr)*>(reinterpret_cast<char*>(mmap_result) + page_offset);
    237   return true;
    238 }
    239 
    240 /* Returns the size of the extent of all the possibly non-contiguous
    241  * loadable segments in an ELF program header table. This corresponds
    242  * to the page-aligned size in bytes that needs to be reserved in the
    243  * process' address space. If there are no loadable segments, 0 is
    244  * returned.
    245  *
    246  * If out_min_vaddr or out_max_vaddr are not null, they will be
    247  * set to the minimum and maximum addresses of pages to be reserved,
    248  * or 0 if there is nothing to load.
    249  */
    250 size_t phdr_table_get_load_size(const ElfW(Phdr)* phdr_table, size_t phdr_count,
    251                                 ElfW(Addr)* out_min_vaddr,
    252                                 ElfW(Addr)* out_max_vaddr) {
    253   ElfW(Addr) min_vaddr = UINTPTR_MAX;
    254   ElfW(Addr) max_vaddr = 0;
    255 
    256   bool found_pt_load = false;
    257   for (size_t i = 0; i < phdr_count; ++i) {
    258     const ElfW(Phdr)* phdr = &phdr_table[i];
    259 
    260     if (phdr->p_type != PT_LOAD) {
    261       continue;
    262     }
    263     found_pt_load = true;
    264 
    265     if (phdr->p_vaddr < min_vaddr) {
    266       min_vaddr = phdr->p_vaddr;
    267     }
    268 
    269     if (phdr->p_vaddr + phdr->p_memsz > max_vaddr) {
    270       max_vaddr = phdr->p_vaddr + phdr->p_memsz;
    271     }
    272   }
    273   if (!found_pt_load) {
    274     min_vaddr = 0;
    275   }
    276 
    277   min_vaddr = PAGE_START(min_vaddr);
    278   max_vaddr = PAGE_END(max_vaddr);
    279 
    280   if (out_min_vaddr != nullptr) {
    281     *out_min_vaddr = min_vaddr;
    282   }
    283   if (out_max_vaddr != nullptr) {
    284     *out_max_vaddr = max_vaddr;
    285   }
    286   return max_vaddr - min_vaddr;
    287 }
    288 
    289 // Reserve a virtual address range big enough to hold all loadable
    290 // segments of a program header table. This is done by creating a
    291 // private anonymous mmap() with PROT_NONE.
    292 bool ElfReader::ReserveAddressSpace(const android_dlextinfo* extinfo) {
    293   ElfW(Addr) min_vaddr;
    294   load_size_ = phdr_table_get_load_size(phdr_table_, phdr_num_, &min_vaddr);
    295   if (load_size_ == 0) {
    296     DL_ERR("\"%s\" has no loadable segments", name_);
    297     return false;
    298   }
    299 
    300   uint8_t* addr = reinterpret_cast<uint8_t*>(min_vaddr);
    301   void* start;
    302   size_t reserved_size = 0;
    303   bool reserved_hint = true;
    304 
    305   if (extinfo != nullptr) {
    306     if (extinfo->flags & ANDROID_DLEXT_RESERVED_ADDRESS) {
    307       reserved_size = extinfo->reserved_size;
    308       reserved_hint = false;
    309     } else if (extinfo->flags & ANDROID_DLEXT_RESERVED_ADDRESS_HINT) {
    310       reserved_size = extinfo->reserved_size;
    311     }
    312   }
    313 
    314   if (load_size_ > reserved_size) {
    315     if (!reserved_hint) {
    316       DL_ERR("reserved address space %zd smaller than %zd bytes needed for \"%s\"",
    317              reserved_size - load_size_, load_size_, name_);
    318       return false;
    319     }
    320     int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
    321     start = mmap(addr, load_size_, PROT_NONE, mmap_flags, -1, 0);
    322     if (start == MAP_FAILED) {
    323       DL_ERR("couldn't reserve %zd bytes of address space for \"%s\"", load_size_, name_);
    324       return false;
    325     }
    326   } else {
    327     start = extinfo->reserved_addr;
    328   }
    329 
    330   load_start_ = start;
    331   load_bias_ = reinterpret_cast<uint8_t*>(start) - addr;
    332   return true;
    333 }
    334 
    335 bool ElfReader::LoadSegments() {
    336   for (size_t i = 0; i < phdr_num_; ++i) {
    337     const ElfW(Phdr)* phdr = &phdr_table_[i];
    338 
    339     if (phdr->p_type != PT_LOAD) {
    340       continue;
    341     }
    342 
    343     // Segment addresses in memory.
    344     ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
    345     ElfW(Addr) seg_end   = seg_start + phdr->p_memsz;
    346 
    347     ElfW(Addr) seg_page_start = PAGE_START(seg_start);
    348     ElfW(Addr) seg_page_end   = PAGE_END(seg_end);
    349 
    350     ElfW(Addr) seg_file_end   = seg_start + phdr->p_filesz;
    351 
    352     // File offsets.
    353     ElfW(Addr) file_start = phdr->p_offset;
    354     ElfW(Addr) file_end   = file_start + phdr->p_filesz;
    355 
    356     ElfW(Addr) file_page_start = PAGE_START(file_start);
    357     ElfW(Addr) file_length = file_end - file_page_start;
    358 
    359     if (file_length != 0) {
    360       void* seg_addr = mmap64(reinterpret_cast<void*>(seg_page_start),
    361                             file_length,
    362                             PFLAGS_TO_PROT(phdr->p_flags),
    363                             MAP_FIXED|MAP_PRIVATE,
    364                             fd_,
    365                             file_offset_ + file_page_start);
    366       if (seg_addr == MAP_FAILED) {
    367         DL_ERR("couldn't map \"%s\" segment %zd: %s", name_, i, strerror(errno));
    368         return false;
    369       }
    370     }
    371 
    372     // if the segment is writable, and does not end on a page boundary,
    373     // zero-fill it until the page limit.
    374     if ((phdr->p_flags & PF_W) != 0 && PAGE_OFFSET(seg_file_end) > 0) {
    375       memset(reinterpret_cast<void*>(seg_file_end), 0, PAGE_SIZE - PAGE_OFFSET(seg_file_end));
    376     }
    377 
    378     seg_file_end = PAGE_END(seg_file_end);
    379 
    380     // seg_file_end is now the first page address after the file
    381     // content. If seg_end is larger, we need to zero anything
    382     // between them. This is done by using a private anonymous
    383     // map for all extra pages.
    384     if (seg_page_end > seg_file_end) {
    385       void* zeromap = mmap(reinterpret_cast<void*>(seg_file_end),
    386                            seg_page_end - seg_file_end,
    387                            PFLAGS_TO_PROT(phdr->p_flags),
    388                            MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE,
    389                            -1,
    390                            0);
    391       if (zeromap == MAP_FAILED) {
    392         DL_ERR("couldn't zero fill \"%s\" gap: %s", name_, strerror(errno));
    393         return false;
    394       }
    395     }
    396   }
    397   return true;
    398 }
    399 
    400 /* Used internally. Used to set the protection bits of all loaded segments
    401  * with optional extra flags (i.e. really PROT_WRITE). Used by
    402  * phdr_table_protect_segments and phdr_table_unprotect_segments.
    403  */
    404 static int _phdr_table_set_load_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
    405                                      ElfW(Addr) load_bias, int extra_prot_flags) {
    406   const ElfW(Phdr)* phdr = phdr_table;
    407   const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
    408 
    409   for (; phdr < phdr_limit; phdr++) {
    410     if (phdr->p_type != PT_LOAD || (phdr->p_flags & PF_W) != 0) {
    411       continue;
    412     }
    413 
    414     ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
    415     ElfW(Addr) seg_page_end   = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
    416 
    417     int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
    418                        seg_page_end - seg_page_start,
    419                        PFLAGS_TO_PROT(phdr->p_flags) | extra_prot_flags);
    420     if (ret < 0) {
    421       return -1;
    422     }
    423   }
    424   return 0;
    425 }
    426 
    427 /* Restore the original protection modes for all loadable segments.
    428  * You should only call this after phdr_table_unprotect_segments and
    429  * applying all relocations.
    430  *
    431  * Input:
    432  *   phdr_table  -> program header table
    433  *   phdr_count  -> number of entries in tables
    434  *   load_bias   -> load bias
    435  * Return:
    436  *   0 on error, -1 on failure (error code in errno).
    437  */
    438 int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count, ElfW(Addr) load_bias) {
    439   return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, 0);
    440 }
    441 
    442 /* Change the protection of all loaded segments in memory to writable.
    443  * This is useful before performing relocations. Once completed, you
    444  * will have to call phdr_table_protect_segments to restore the original
    445  * protection flags on all segments.
    446  *
    447  * Note that some writable segments can also have their content turned
    448  * to read-only by calling phdr_table_protect_gnu_relro. This is no
    449  * performed here.
    450  *
    451  * Input:
    452  *   phdr_table  -> program header table
    453  *   phdr_count  -> number of entries in tables
    454  *   load_bias   -> load bias
    455  * Return:
    456  *   0 on error, -1 on failure (error code in errno).
    457  */
    458 int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count, ElfW(Addr) load_bias) {
    459   return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE);
    460 }
    461 
    462 /* Used internally by phdr_table_protect_gnu_relro and
    463  * phdr_table_unprotect_gnu_relro.
    464  */
    465 static int _phdr_table_set_gnu_relro_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
    466                                           ElfW(Addr) load_bias, int prot_flags) {
    467   const ElfW(Phdr)* phdr = phdr_table;
    468   const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
    469 
    470   for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
    471     if (phdr->p_type != PT_GNU_RELRO) {
    472       continue;
    473     }
    474 
    475     // Tricky: what happens when the relro segment does not start
    476     // or end at page boundaries? We're going to be over-protective
    477     // here and put every page touched by the segment as read-only.
    478 
    479     // This seems to match Ian Lance Taylor's description of the
    480     // feature at http://www.airs.com/blog/archives/189.
    481 
    482     //    Extract:
    483     //       Note that the current dynamic linker code will only work
    484     //       correctly if the PT_GNU_RELRO segment starts on a page
    485     //       boundary. This is because the dynamic linker rounds the
    486     //       p_vaddr field down to the previous page boundary. If
    487     //       there is anything on the page which should not be read-only,
    488     //       the program is likely to fail at runtime. So in effect the
    489     //       linker must only emit a PT_GNU_RELRO segment if it ensures
    490     //       that it starts on a page boundary.
    491     ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
    492     ElfW(Addr) seg_page_end   = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
    493 
    494     int ret = mprotect(reinterpret_cast<void*>(seg_page_start),
    495                        seg_page_end - seg_page_start,
    496                        prot_flags);
    497     if (ret < 0) {
    498       return -1;
    499     }
    500   }
    501   return 0;
    502 }
    503 
    504 /* Apply GNU relro protection if specified by the program header. This will
    505  * turn some of the pages of a writable PT_LOAD segment to read-only, as
    506  * specified by one or more PT_GNU_RELRO segments. This must be always
    507  * performed after relocations.
    508  *
    509  * The areas typically covered are .got and .data.rel.ro, these are
    510  * read-only from the program's POV, but contain absolute addresses
    511  * that need to be relocated before use.
    512  *
    513  * Input:
    514  *   phdr_table  -> program header table
    515  *   phdr_count  -> number of entries in tables
    516  *   load_bias   -> load bias
    517  * Return:
    518  *   0 on error, -1 on failure (error code in errno).
    519  */
    520 int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count, ElfW(Addr) load_bias) {
    521   return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ);
    522 }
    523 
    524 /* Serialize the GNU relro segments to the given file descriptor. This can be
    525  * performed after relocations to allow another process to later share the
    526  * relocated segment, if it was loaded at the same address.
    527  *
    528  * Input:
    529  *   phdr_table  -> program header table
    530  *   phdr_count  -> number of entries in tables
    531  *   load_bias   -> load bias
    532  *   fd          -> writable file descriptor to use
    533  * Return:
    534  *   0 on error, -1 on failure (error code in errno).
    535  */
    536 int phdr_table_serialize_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count, ElfW(Addr) load_bias,
    537                                    int fd) {
    538   const ElfW(Phdr)* phdr = phdr_table;
    539   const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
    540   ssize_t file_offset = 0;
    541 
    542   for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
    543     if (phdr->p_type != PT_GNU_RELRO) {
    544       continue;
    545     }
    546 
    547     ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
    548     ElfW(Addr) seg_page_end   = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
    549     ssize_t size = seg_page_end - seg_page_start;
    550 
    551     ssize_t written = TEMP_FAILURE_RETRY(write(fd, reinterpret_cast<void*>(seg_page_start), size));
    552     if (written != size) {
    553       return -1;
    554     }
    555     void* map = mmap(reinterpret_cast<void*>(seg_page_start), size, PROT_READ,
    556                      MAP_PRIVATE|MAP_FIXED, fd, file_offset);
    557     if (map == MAP_FAILED) {
    558       return -1;
    559     }
    560     file_offset += size;
    561   }
    562   return 0;
    563 }
    564 
    565 /* Where possible, replace the GNU relro segments with mappings of the given
    566  * file descriptor. This can be performed after relocations to allow a file
    567  * previously created by phdr_table_serialize_gnu_relro in another process to
    568  * replace the dirty relocated pages, saving memory, if it was loaded at the
    569  * same address. We have to compare the data before we map over it, since some
    570  * parts of the relro segment may not be identical due to other libraries in
    571  * the process being loaded at different addresses.
    572  *
    573  * Input:
    574  *   phdr_table  -> program header table
    575  *   phdr_count  -> number of entries in tables
    576  *   load_bias   -> load bias
    577  *   fd          -> readable file descriptor to use
    578  * Return:
    579  *   0 on error, -1 on failure (error code in errno).
    580  */
    581 int phdr_table_map_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count, ElfW(Addr) load_bias,
    582                              int fd) {
    583   // Map the file at a temporary location so we can compare its contents.
    584   struct stat file_stat;
    585   if (TEMP_FAILURE_RETRY(fstat(fd, &file_stat)) != 0) {
    586     return -1;
    587   }
    588   off_t file_size = file_stat.st_size;
    589   void* temp_mapping = nullptr;
    590   if (file_size > 0) {
    591     temp_mapping = mmap(nullptr, file_size, PROT_READ, MAP_PRIVATE, fd, 0);
    592     if (temp_mapping == MAP_FAILED) {
    593       return -1;
    594     }
    595   }
    596   size_t file_offset = 0;
    597 
    598   // Iterate over the relro segments and compare/remap the pages.
    599   const ElfW(Phdr)* phdr = phdr_table;
    600   const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
    601 
    602   for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
    603     if (phdr->p_type != PT_GNU_RELRO) {
    604       continue;
    605     }
    606 
    607     ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
    608     ElfW(Addr) seg_page_end   = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
    609 
    610     char* file_base = static_cast<char*>(temp_mapping) + file_offset;
    611     char* mem_base = reinterpret_cast<char*>(seg_page_start);
    612     size_t match_offset = 0;
    613     size_t size = seg_page_end - seg_page_start;
    614 
    615     if (file_size - file_offset < size) {
    616       // File is too short to compare to this segment. The contents are likely
    617       // different as well (it's probably for a different library version) so
    618       // just don't bother checking.
    619       break;
    620     }
    621 
    622     while (match_offset < size) {
    623       // Skip over dissimilar pages.
    624       while (match_offset < size &&
    625              memcmp(mem_base + match_offset, file_base + match_offset, PAGE_SIZE) != 0) {
    626         match_offset += PAGE_SIZE;
    627       }
    628 
    629       // Count similar pages.
    630       size_t mismatch_offset = match_offset;
    631       while (mismatch_offset < size &&
    632              memcmp(mem_base + mismatch_offset, file_base + mismatch_offset, PAGE_SIZE) == 0) {
    633         mismatch_offset += PAGE_SIZE;
    634       }
    635 
    636       // Map over similar pages.
    637       if (mismatch_offset > match_offset) {
    638         void* map = mmap(mem_base + match_offset, mismatch_offset - match_offset,
    639                          PROT_READ, MAP_PRIVATE|MAP_FIXED, fd, match_offset);
    640         if (map == MAP_FAILED) {
    641           munmap(temp_mapping, file_size);
    642           return -1;
    643         }
    644       }
    645 
    646       match_offset = mismatch_offset;
    647     }
    648 
    649     // Add to the base file offset in case there are multiple relro segments.
    650     file_offset += size;
    651   }
    652   munmap(temp_mapping, file_size);
    653   return 0;
    654 }
    655 
    656 
    657 #if defined(__arm__)
    658 
    659 #  ifndef PT_ARM_EXIDX
    660 #    define PT_ARM_EXIDX    0x70000001      /* .ARM.exidx segment */
    661 #  endif
    662 
    663 /* Return the address and size of the .ARM.exidx section in memory,
    664  * if present.
    665  *
    666  * Input:
    667  *   phdr_table  -> program header table
    668  *   phdr_count  -> number of entries in tables
    669  *   load_bias   -> load bias
    670  * Output:
    671  *   arm_exidx       -> address of table in memory (null on failure).
    672  *   arm_exidx_count -> number of items in table (0 on failure).
    673  * Return:
    674  *   0 on error, -1 on failure (_no_ error code in errno)
    675  */
    676 int phdr_table_get_arm_exidx(const ElfW(Phdr)* phdr_table, size_t phdr_count,
    677                              ElfW(Addr) load_bias,
    678                              ElfW(Addr)** arm_exidx, unsigned* arm_exidx_count) {
    679   const ElfW(Phdr)* phdr = phdr_table;
    680   const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
    681 
    682   for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
    683     if (phdr->p_type != PT_ARM_EXIDX) {
    684       continue;
    685     }
    686 
    687     *arm_exidx = reinterpret_cast<ElfW(Addr)*>(load_bias + phdr->p_vaddr);
    688     *arm_exidx_count = (unsigned)(phdr->p_memsz / 8);
    689     return 0;
    690   }
    691   *arm_exidx = nullptr;
    692   *arm_exidx_count = 0;
    693   return -1;
    694 }
    695 #endif
    696 
    697 /* Return the address and size of the ELF file's .dynamic section in memory,
    698  * or null if missing.
    699  *
    700  * Input:
    701  *   phdr_table  -> program header table
    702  *   phdr_count  -> number of entries in tables
    703  *   load_bias   -> load bias
    704  * Output:
    705  *   dynamic       -> address of table in memory (null on failure).
    706  *   dynamic_flags -> protection flags for section (unset on failure)
    707  * Return:
    708  *   void
    709  */
    710 void phdr_table_get_dynamic_section(const ElfW(Phdr)* phdr_table, size_t phdr_count,
    711                                     ElfW(Addr) load_bias, ElfW(Dyn)** dynamic,
    712                                     ElfW(Word)* dynamic_flags) {
    713   *dynamic = nullptr;
    714   for (const ElfW(Phdr)* phdr = phdr_table, *phdr_limit = phdr + phdr_count; phdr < phdr_limit; phdr++) {
    715     if (phdr->p_type == PT_DYNAMIC) {
    716       *dynamic = reinterpret_cast<ElfW(Dyn)*>(load_bias + phdr->p_vaddr);
    717       if (dynamic_flags) {
    718         *dynamic_flags = phdr->p_flags;
    719       }
    720       return;
    721     }
    722   }
    723 }
    724 
    725 // Returns the address of the program header table as it appears in the loaded
    726 // segments in memory. This is in contrast with 'phdr_table_' which
    727 // is temporary and will be released before the library is relocated.
    728 bool ElfReader::FindPhdr() {
    729   const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
    730 
    731   // If there is a PT_PHDR, use it directly.
    732   for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
    733     if (phdr->p_type == PT_PHDR) {
    734       return CheckPhdr(load_bias_ + phdr->p_vaddr);
    735     }
    736   }
    737 
    738   // Otherwise, check the first loadable segment. If its file offset
    739   // is 0, it starts with the ELF header, and we can trivially find the
    740   // loaded program header from it.
    741   for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
    742     if (phdr->p_type == PT_LOAD) {
    743       if (phdr->p_offset == 0) {
    744         ElfW(Addr)  elf_addr = load_bias_ + phdr->p_vaddr;
    745         const ElfW(Ehdr)* ehdr = reinterpret_cast<const ElfW(Ehdr)*>(elf_addr);
    746         ElfW(Addr)  offset = ehdr->e_phoff;
    747         return CheckPhdr((ElfW(Addr))ehdr + offset);
    748       }
    749       break;
    750     }
    751   }
    752 
    753   DL_ERR("can't find loaded phdr for \"%s\"", name_);
    754   return false;
    755 }
    756 
    757 // Ensures that our program header is actually within a loadable
    758 // segment. This should help catch badly-formed ELF files that
    759 // would cause the linker to crash later when trying to access it.
    760 bool ElfReader::CheckPhdr(ElfW(Addr) loaded) {
    761   const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
    762   ElfW(Addr) loaded_end = loaded + (phdr_num_ * sizeof(ElfW(Phdr)));
    763   for (ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
    764     if (phdr->p_type != PT_LOAD) {
    765       continue;
    766     }
    767     ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
    768     ElfW(Addr) seg_end = phdr->p_filesz + seg_start;
    769     if (seg_start <= loaded && loaded_end <= seg_end) {
    770       loaded_phdr_ = reinterpret_cast<const ElfW(Phdr)*>(loaded);
    771       return true;
    772     }
    773   }
    774   DL_ERR("\"%s\" loaded phdr %p not in loadable segment", name_, reinterpret_cast<void*>(loaded));
    775   return false;
    776 }
    777