Home | History | Annotate | Download | only in linker
      1 // Copyright 2015 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 // This is the version of the Android-specific Chromium linker that uses
      6 // the Android M and later system linker to load libraries.
      7 
      8 // This source code *cannot* depend on anything from base/ or the C++
      9 // STL, to keep the final library small, and avoid ugly dependency issues.
     10 
     11 #include "modern_linker_jni.h"
     12 
     13 #include <sys/mman.h>
     14 #include <sys/stat.h>
     15 #include <sys/types.h>
     16 #include <dlfcn.h>
     17 #include <errno.h>
     18 #include <fcntl.h>
     19 #include <jni.h>
     20 #include <limits.h>
     21 #include <link.h>
     22 #include <stddef.h>
     23 #include <string.h>
     24 
     25 #include "android_dlext.h"
     26 #include "linker_jni.h"
     27 
     28 #define PAGE_START(x) ((x) & PAGE_MASK)
     29 #define PAGE_END(x) PAGE_START((x) + (PAGE_SIZE - 1))
     30 
     31 namespace chromium_android_linker {
     32 namespace {
     33 
     34 // Record of the Java VM passed to JNI_OnLoad().
     35 static JavaVM* s_java_vm = nullptr;
     36 
     37 // Get the CPU ABI string for which the linker is running.
     38 //
     39 // The returned string is used to construct the path to libchrome.so when
     40 // loading directly from APK.
     41 //
     42 // |env| is the current JNI environment handle.
     43 // |clazz| is the static class handle for org.chromium.base.Linker,
     44 // and is ignored here.
     45 // Returns the CPU ABI string for which the linker is running.
     46 jstring GetCpuAbi(JNIEnv* env, jclass clazz) {
     47 #if defined(__arm__) && defined(__ARM_ARCH_7A__)
     48   static const char* kCurrentAbi = "armeabi-v7a";
     49 #elif defined(__arm__)
     50   static const char* kCurrentAbi = "armeabi";
     51 #elif defined(__i386__)
     52   static const char* kCurrentAbi = "x86";
     53 #elif defined(__mips__)
     54   static const char* kCurrentAbi = "mips";
     55 #elif defined(__x86_64__)
     56   static const char* kCurrentAbi = "x86_64";
     57 #elif defined(__aarch64__)
     58   static const char* kCurrentAbi = "arm64-v8a";
     59 #else
     60 #error "Unsupported target abi"
     61 #endif
     62   return env->NewStringUTF(kCurrentAbi);
     63 }
     64 
     65 // Convenience wrapper around dlsym() on the main executable. Returns
     66 // the address of the requested symbol, or nullptr if not found. Status
     67 // is available from dlerror().
     68 void* Dlsym(const char* symbol_name) {
     69   static void* handle = nullptr;
     70 
     71   if (!handle)
     72     handle = dlopen(nullptr, RTLD_NOW);
     73 
     74   void* result = dlsym(handle, symbol_name);
     75   return result;
     76 }
     77 
     78 // dl_iterate_phdr() wrapper, accessed via dlsym lookup. Done this way.
     79 // so that this code compiles for Android versions that are too early to
     80 // offer it. Checks in LibraryLoader.java should ensure that we
     81 // never reach here at runtime on Android versions that are too old to
     82 // supply dl_iterate_phdr; that is, earlier than Android M. Returns
     83 // false if no dl_iterate_phdr() is available, otherwise true with the
     84 // return value from dl_iterate_phdr() in |status|.
     85 bool DlIteratePhdr(int (*callback)(dl_phdr_info*, size_t, void*),
     86                    void* data,
     87                    int* status) {
     88   using DlIteratePhdrCallback = int (*)(dl_phdr_info*, size_t, void*);
     89   using DlIteratePhdrFunctionPtr = int (*)(DlIteratePhdrCallback, void*);
     90   static DlIteratePhdrFunctionPtr function_ptr = nullptr;
     91 
     92   if (!function_ptr) {
     93     function_ptr =
     94         reinterpret_cast<DlIteratePhdrFunctionPtr>(Dlsym("dl_iterate_phdr"));
     95     if (!function_ptr) {
     96       LOG_ERROR("dlsym: dl_iterate_phdr: %s", dlerror());
     97       return false;
     98     }
     99   }
    100 
    101   *status = (*function_ptr)(callback, data);
    102   return true;
    103 }
    104 
    105 // Convenience struct wrapper round android_dlextinfo.
    106 struct AndroidDlextinfo {
    107   AndroidDlextinfo(int flags,
    108                    void* reserved_addr, size_t reserved_size, int relro_fd) {
    109     memset(&extinfo, 0, sizeof(extinfo));
    110     extinfo.flags = flags;
    111     extinfo.reserved_addr = reserved_addr;
    112     extinfo.reserved_size = reserved_size;
    113     extinfo.relro_fd = relro_fd;
    114   }
    115 
    116   android_dlextinfo extinfo;
    117 };
    118 
    119 // android_dlopen_ext() wrapper, accessed via dlsym lookup. Returns false
    120 // if no android_dlopen_ext() is available, otherwise true with the return
    121 // value from android_dlopen_ext() in |status|.
    122 bool AndroidDlopenExt(const char* filename,
    123                       int flag,
    124                       const AndroidDlextinfo* dlextinfo,
    125                       void** status) {
    126   using DlopenExtFunctionPtr = void* (*)(const char*,
    127                                          int, const android_dlextinfo*);
    128   static DlopenExtFunctionPtr function_ptr = nullptr;
    129 
    130   if (!function_ptr) {
    131     function_ptr =
    132         reinterpret_cast<DlopenExtFunctionPtr>(Dlsym("android_dlopen_ext"));
    133     if (!function_ptr) {
    134       LOG_ERROR("dlsym: android_dlopen_ext: %s", dlerror());
    135       return false;
    136     }
    137   }
    138 
    139   const android_dlextinfo* extinfo = &dlextinfo->extinfo;
    140   LOG_INFO("android_dlopen_ext:"
    141            " flags=0x%llx, reserved_addr=%p, reserved_size=%d, relro_fd=%d",
    142            static_cast<long long>(extinfo->flags),
    143            extinfo->reserved_addr,
    144            static_cast<int>(extinfo->reserved_size),
    145            extinfo->relro_fd);
    146 
    147   *status = (*function_ptr)(filename, flag, extinfo);
    148   return true;
    149 }
    150 
    151 // Callback data for FindLoadedLibrarySize().
    152 struct CallbackData {
    153   explicit CallbackData(void* address)
    154       : load_address(address), load_size(0), min_vaddr(0) { }
    155 
    156   const void* load_address;
    157   size_t load_size;
    158   size_t min_vaddr;
    159 };
    160 
    161 // Callback for dl_iterate_phdr(). Read phdrs to identify whether or not
    162 // this library's load address matches the |load_address| passed in
    163 // |data|. If yes, pass back load size and min vaddr via |data|. A non-zero
    164 // return value terminates iteration.
    165 int FindLoadedLibrarySize(dl_phdr_info* info, size_t size UNUSED, void* data) {
    166   CallbackData* callback_data = reinterpret_cast<CallbackData*>(data);
    167 
    168   // Use max and min vaddr to compute the library's load size.
    169   ElfW(Addr) min_vaddr = ~0;
    170   ElfW(Addr) max_vaddr = 0;
    171 
    172   bool is_matching = false;
    173   for (size_t i = 0; i < info->dlpi_phnum; ++i) {
    174     const ElfW(Phdr)* phdr = &info->dlpi_phdr[i];
    175     if (phdr->p_type != PT_LOAD)
    176       continue;
    177 
    178     // See if this segment's load address matches what we passed to
    179     // android_dlopen_ext as extinfo.reserved_addr.
    180     void* load_addr = reinterpret_cast<void*>(info->dlpi_addr + phdr->p_vaddr);
    181     if (load_addr == callback_data->load_address)
    182       is_matching = true;
    183 
    184     if (phdr->p_vaddr < min_vaddr)
    185       min_vaddr = phdr->p_vaddr;
    186     if (phdr->p_vaddr + phdr->p_memsz > max_vaddr)
    187       max_vaddr = phdr->p_vaddr + phdr->p_memsz;
    188   }
    189 
    190   // If this library matches what we seek, return its load size.
    191   if (is_matching) {
    192     callback_data->load_size = PAGE_END(max_vaddr) - PAGE_START(min_vaddr);
    193     callback_data->min_vaddr = min_vaddr;
    194     return true;
    195   }
    196 
    197   return false;
    198 }
    199 
    200 // Helper class for anonymous memory mapping.
    201 class ScopedAnonymousMmap {
    202  public:
    203   ScopedAnonymousMmap(void* addr, size_t size);
    204 
    205   ~ScopedAnonymousMmap() { munmap(addr_, size_); }
    206 
    207   void* GetAddr() const { return effective_addr_; }
    208   void Release() { addr_ = nullptr; size_ = 0; effective_addr_ = nullptr; }
    209 
    210  private:
    211   void* addr_;
    212   size_t size_;
    213 
    214   // The effective_addr_ is the address seen by client code. It may or may
    215   // not be the same as addr_, the real start of the anonymous mapping.
    216   void* effective_addr_;
    217 };
    218 
    219 // ScopedAnonymousMmap constructor. |addr| is a requested mapping address, or
    220 // zero if any address will do, and |size| is the size of mapping required.
    221 ScopedAnonymousMmap::ScopedAnonymousMmap(void* addr, size_t size) {
    222 #if RESERVE_BREAKPAD_GUARD_REGION
    223   // Increase size to extend the address reservation mapping so that it will
    224   // also include a guard region from load_bias_ to start_addr. If loading
    225   // at a fixed address, move our requested address back by the guard region
    226   // size.
    227   size += kBreakpadGuardRegionBytes;
    228   if (addr) {
    229     if (addr < reinterpret_cast<void*>(kBreakpadGuardRegionBytes)) {
    230       LOG_ERROR("Fixed address %p is too low to accommodate Breakpad guard",
    231                 addr);
    232       addr_ = MAP_FAILED;
    233       size_ = 0;
    234       return;
    235     }
    236     addr = reinterpret_cast<void*>(
    237         reinterpret_cast<uintptr_t>(addr) - kBreakpadGuardRegionBytes);
    238   }
    239   LOG_INFO("Added %d to size, for Breakpad guard",
    240            static_cast<int>(kBreakpadGuardRegionBytes));
    241 #endif
    242 
    243   addr_ = mmap(addr, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
    244   if (addr_ != MAP_FAILED) {
    245     size_ = size;
    246   } else {
    247     LOG_INFO("mmap failed: %s", strerror(errno));
    248     size_ = 0;
    249   }
    250   effective_addr_ = addr_;
    251 
    252 #if RESERVE_BREAKPAD_GUARD_REGION
    253   // If we increased size to accommodate a Breakpad guard region, move
    254   // the effective address, if valid, upwards by the size of the guard region.
    255   if (addr_ == MAP_FAILED)
    256     return;
    257   if (addr_ < reinterpret_cast<void*>(kBreakpadGuardRegionBytes)) {
    258     LOG_ERROR("Map address %p is too low to accommodate Breakpad guard",
    259               addr_);
    260     effective_addr_ = MAP_FAILED;
    261   } else {
    262     effective_addr_ = reinterpret_cast<void*>(
    263         reinterpret_cast<uintptr_t>(addr_) + kBreakpadGuardRegionBytes);
    264   }
    265 #endif
    266 }
    267 
    268 // Helper for LoadLibrary(). Return the actual size of the library loaded
    269 // at |addr| in |load_size|, and the min vaddr in |min_vaddr|. Returns false
    270 // if the library appears not to be loaded.
    271 bool GetLibraryLoadSize(void* addr, size_t* load_size, size_t* min_vaddr) {
    272   LOG_INFO("Called for %p", addr);
    273 
    274   // Find the real load size and min vaddr for the library loaded at |addr|.
    275   CallbackData callback_data(addr);
    276   int status = 0;
    277   if (!DlIteratePhdr(&FindLoadedLibrarySize, &callback_data, &status)) {
    278     LOG_ERROR("No dl_iterate_phdr function found");
    279     return false;
    280   }
    281   if (!status) {
    282     LOG_ERROR("Failed to find library at address %p", addr);
    283     return false;
    284   }
    285 
    286   *load_size = callback_data.load_size;
    287   *min_vaddr = callback_data.min_vaddr;
    288   return true;
    289 }
    290 
    291 // Helper for LoadLibrary(). We reserve an address space larger than
    292 // needed. After library loading we want to trim that reservation to only
    293 // what is needed. Failure to trim should not occur, but if it does then
    294 // everything will still run, so we treat it as a warning rather than
    295 // an error.
    296 void ResizeReservedAddressSpace(void* addr,
    297                                 size_t reserved_size,
    298                                 size_t load_size,
    299                                 size_t min_vaddr) {
    300   LOG_INFO("Called for %p, reserved %d, loaded %d, min_vaddr %d",
    301            addr, static_cast<int>(reserved_size),
    302            static_cast<int>(load_size), static_cast<int>(min_vaddr));
    303 
    304   const uintptr_t uintptr_addr = reinterpret_cast<uintptr_t>(addr);
    305 
    306   if (reserved_size > load_size) {
    307     // Unmap the part of the reserved address space that is beyond the end of
    308     // the loaded library data.
    309     void* unmap = reinterpret_cast<void*>(uintptr_addr + load_size);
    310     const size_t length = reserved_size - load_size;
    311     if (munmap(unmap, length) == -1) {
    312       LOG_ERROR("WARNING: unmap of %d bytes at %p failed: %s",
    313                 static_cast<int>(length), unmap, strerror(errno));
    314     }
    315   } else {
    316     LOG_ERROR("WARNING: library reservation was too small");
    317   }
    318 
    319 #if RESERVE_BREAKPAD_GUARD_REGION
    320   if (kBreakpadGuardRegionBytes > min_vaddr) {
    321     // Unmap the part of the reserved address space that is ahead of where we
    322     // actually need the guard region to start. Resizes the guard region to
    323     // min_vaddr bytes.
    324     void* unmap =
    325         reinterpret_cast<void*>(uintptr_addr - kBreakpadGuardRegionBytes);
    326     const size_t length = kBreakpadGuardRegionBytes - min_vaddr;
    327     if (munmap(unmap, length) == -1) {
    328       LOG_ERROR("WARNING: unmap of %d bytes at %p failed: %s",
    329                 static_cast<int>(length), unmap, strerror(errno));
    330     }
    331   } else {
    332     LOG_ERROR("WARNING: breakpad guard region reservation was too small");
    333   }
    334 #endif
    335 }
    336 
    337 // Load a library with the chromium linker, using android_dlopen_ext().
    338 //
    339 // android_dlopen_ext() understands how to directly load from a zipfile,
    340 // based on the format of |dlopen_ext_path|. If it contains a "!/" separator
    341 // then the string indicates <zip_path>!/<file_path> and indicates the
    342 // file_path element within the zip file at zip_path. A library in a
    343 // zipfile must be uncompressed and page aligned. The library is expected
    344 // to be lib/<abi_tag>/crazy.<basename>. The <abi_tag> used will be the
    345 // same as the abi for this linker. The "crazy." prefix is included
    346 // so that the Android Package Manager doesn't extract the library into
    347 // /data/app-lib.
    348 //
    349 // If |dlopen_ext_path| contains no "!/" separator then android_dlopen_ext()
    350 // assumes that it is a normal path to a standalone library file.
    351 //
    352 // Loading the library will also call its JNI_OnLoad() method, which
    353 // shall register its methods. Note that lazy native method resolution
    354 // will _not_ work after this, because Dalvik uses the system's dlsym()
    355 // which won't see the new library, so explicit registration is mandatory.
    356 //
    357 // |env| is the current JNI environment handle.
    358 // |clazz| is the static class handle for org.chromium.base.Linker,
    359 // and is ignored here.
    360 // |dlopen_ext_path| is the library identifier (e.g. libfoo.so).
    361 // |load_address| is an explicit load address.
    362 // |relro_path| is the path to the file into which RELRO data is held.
    363 // |lib_info_obj| is a LibInfo handle used to communicate information
    364 // with the Java side.
    365 // Return true on success.
    366 jboolean LoadLibrary(JNIEnv* env,
    367                      jclass clazz,
    368                      jstring dlopen_ext_path,
    369                      jlong load_address,
    370                      jobject lib_info_obj) {
    371   String dlopen_library_path(env, dlopen_ext_path);
    372   LOG_INFO("Called for %s, at address 0x%llx",
    373            dlopen_library_path.c_str(), load_address);
    374 
    375   if (!IsValidAddress(load_address)) {
    376     LOG_ERROR("Invalid address 0x%llx", load_address);
    377     return false;
    378   }
    379 
    380   const size_t size = kAddressSpaceReservationSize;
    381   void* wanted_addr = reinterpret_cast<void*>(load_address);
    382 
    383   // Reserve the address space into which we load the library.
    384   ScopedAnonymousMmap mapping(wanted_addr, size);
    385   void* addr = mapping.GetAddr();
    386   if (addr == MAP_FAILED) {
    387     LOG_ERROR("Failed to reserve space for load");
    388     return false;
    389   }
    390   if (wanted_addr && addr != wanted_addr) {
    391     LOG_ERROR("Failed to obtain fixed address for load");
    392     return false;
    393   }
    394 
    395   // Build dlextinfo to load the library into the reserved space, using
    396   // the shared RELRO if supplied and if its start address matches addr.
    397   int relro_fd = -1;
    398   int flags = ANDROID_DLEXT_RESERVED_ADDRESS;
    399   if (wanted_addr && lib_info_obj) {
    400     void* relro_start;
    401     s_lib_info_fields.GetRelroInfo(env, lib_info_obj,
    402                                    reinterpret_cast<size_t*>(&relro_start),
    403                                    nullptr, &relro_fd);
    404     if (relro_fd != -1 && relro_start == addr) {
    405       flags |= ANDROID_DLEXT_USE_RELRO;
    406     }
    407   }
    408   AndroidDlextinfo dlextinfo(flags, addr, size, relro_fd);
    409 
    410   // Load the library into the reserved space.
    411   const char* path = dlopen_library_path.c_str();
    412   void* handle = nullptr;
    413   if (!AndroidDlopenExt(path, RTLD_NOW, &dlextinfo, &handle)) {
    414     LOG_ERROR("No android_dlopen_ext function found");
    415     return false;
    416   }
    417   if (handle == nullptr) {
    418     LOG_ERROR("android_dlopen_ext: %s", dlerror());
    419     return false;
    420   }
    421 
    422   // For https://crbug.com/568880.
    423   //
    424   // Release the scoped mapping. Now that the library has loaded we can no
    425   // longer assume we have control of all of this area. libdl knows addr and
    426   // has loaded the library into some portion of the reservation. It will
    427   // not expect that portion of memory to be arbitrarily unmapped.
    428   mapping.Release();
    429 
    430   // After loading we can find the actual size of the library. It should
    431   // be less than the space we reserved for it.
    432   size_t load_size = 0;
    433   size_t min_vaddr = 0;
    434   if (!GetLibraryLoadSize(addr, &load_size, &min_vaddr)) {
    435     LOG_ERROR("Unable to find size for load at %p", addr);
    436     return false;
    437   }
    438 
    439   // Trim the reservation mapping to match the library's actual size. Failure
    440   // to resize is not a fatal error. At worst we lose a portion of virtual
    441   // address space that we might otherwise have recovered. Note that trimming
    442   // the mapping here requires that we have already released the scoped
    443   // mapping.
    444   ResizeReservedAddressSpace(addr, size, load_size, min_vaddr);
    445 
    446   // Locate and if found then call the loaded library's JNI_OnLoad() function.
    447   using JNI_OnLoadFunctionPtr = int (*)(void* vm, void* reserved);
    448   auto jni_onload =
    449       reinterpret_cast<JNI_OnLoadFunctionPtr>(dlsym(handle, "JNI_OnLoad"));
    450   if (jni_onload != nullptr) {
    451     // Check that JNI_OnLoad returns a usable JNI version.
    452     int jni_version = (*jni_onload)(s_java_vm, nullptr);
    453     if (jni_version < JNI_VERSION_1_4) {
    454       LOG_ERROR("JNI version is invalid: %d", jni_version);
    455       return false;
    456     }
    457   }
    458 
    459   // Note the load address and load size in the supplied libinfo object.
    460   const size_t cast_addr = reinterpret_cast<size_t>(addr);
    461   s_lib_info_fields.SetLoadInfo(env, lib_info_obj, cast_addr, load_size);
    462 
    463   LOG_INFO("Success loading library %s", dlopen_library_path.c_str());
    464   return true;
    465 }
    466 
    467 // Create a shared RELRO file for a library, using android_dlopen_ext().
    468 //
    469 // Loads the library similarly to LoadLibrary() above, by reserving address
    470 // space and then using android_dlopen_ext() to load into the reserved
    471 // area. Adds flags to android_dlopen_ext() to saved the library's RELRO
    472 // memory into the given file path, then unload the library and returns.
    473 //
    474 // Does not call JNI_OnLoad() or otherwise execute any code from the library.
    475 //
    476 // |env| is the current JNI environment handle.
    477 // |clazz| is the static class handle for org.chromium.base.Linker,
    478 // and is ignored here.
    479 // |dlopen_ext_path| is the library identifier (e.g. libfoo.so).
    480 // |load_address| is an explicit load address.
    481 // |relro_path| is the path to the file into which RELRO data is written.
    482 // |lib_info_obj| is a LibInfo handle used to communicate information
    483 // with the Java side.
    484 // Return true on success.
    485 jboolean CreateSharedRelro(JNIEnv* env,
    486                            jclass clazz,
    487                            jstring dlopen_ext_path,
    488                            jlong load_address,
    489                            jstring relro_path,
    490                            jobject lib_info_obj) {
    491   String dlopen_library_path(env, dlopen_ext_path);
    492   LOG_INFO("Called for %s, at address 0x%llx",
    493            dlopen_library_path.c_str(), load_address);
    494 
    495   if (!IsValidAddress(load_address) || load_address == 0) {
    496     LOG_ERROR("Invalid address 0x%llx", load_address);
    497     return false;
    498   }
    499 
    500   const size_t size = kAddressSpaceReservationSize;
    501   void* wanted_addr = reinterpret_cast<void*>(load_address);
    502 
    503   // Reserve the address space into which we load the library.
    504   ScopedAnonymousMmap mapping(wanted_addr, size);
    505   void* addr = mapping.GetAddr();
    506   if (addr == MAP_FAILED) {
    507     LOG_ERROR("Failed to reserve space for load");
    508     return false;
    509   }
    510   if (addr != wanted_addr) {
    511     LOG_ERROR("Failed to obtain fixed address for load");
    512     return false;
    513   }
    514 
    515   // Open the shared RELRO file for write. Overwrites any prior content.
    516   String shared_relro_path(env, relro_path);
    517   const char* filepath = shared_relro_path.c_str();
    518   unlink(filepath);
    519   int relro_fd = open(filepath, O_RDWR | O_CREAT | O_EXCL, S_IRUSR | S_IWUSR);
    520   if (relro_fd == -1) {
    521     LOG_ERROR("open: %s: %s", filepath, strerror(errno));
    522     return false;
    523   }
    524 
    525   // Use android_dlopen_ext() to create the shared RELRO.
    526   const int flags = ANDROID_DLEXT_RESERVED_ADDRESS
    527                     | ANDROID_DLEXT_WRITE_RELRO;
    528   AndroidDlextinfo dlextinfo(flags, addr, size, relro_fd);
    529 
    530   const char* path = dlopen_library_path.c_str();
    531   void* handle = nullptr;
    532   if (!AndroidDlopenExt(path, RTLD_NOW, &dlextinfo, &handle)) {
    533     LOG_ERROR("No android_dlopen_ext function found");
    534     close(relro_fd);
    535     return false;
    536   }
    537   if (handle == nullptr) {
    538     LOG_ERROR("android_dlopen_ext: %s", dlerror());
    539     close(relro_fd);
    540     return false;
    541   }
    542 
    543   // For https://crbug.com/568880.
    544   //
    545   // Release the scoped mapping. See comment in LoadLibrary() above for more.
    546   mapping.Release();
    547 
    548   // For https://crbug.com/568880.
    549   //
    550   // Unload the library from this address. Calling dlclose() will unmap the
    551   // part of the reservation occupied by the libary, but will leave the
    552   // remainder of the reservation mapped, and we have no effective way of
    553   // unmapping the leftover portions because we don't know where dlclose's
    554   // unmap ended.
    555   //
    556   // For now we live with this. It is a loss of some virtual address space
    557   // (but not actual memory), and because it occurs only once and only in
    558   // the browser process, and never in renderer processes, it is not a
    559   // significant issue.
    560   //
    561   // TODO(simonb): Between mapping.Release() and here, consider calling the
    562   // functions that trim the reservation down to the size of the loaded
    563   // library. This may help recover some or all of the virtual address space
    564   // that is otherwise lost.
    565   dlclose(handle);
    566 
    567   // Reopen the shared RELRO fd in read-only mode. This ensures that nothing
    568   // can write to it through the RELRO fd that we return in libinfo.
    569   close(relro_fd);
    570   relro_fd = open(filepath, O_RDONLY);
    571   if (relro_fd == -1) {
    572     LOG_ERROR("open: %s: %s", filepath, strerror(errno));
    573     return false;
    574   }
    575 
    576   // Delete the directory entry for the RELRO file. The fd we hold ensures
    577   // that its data remains intact.
    578   if (unlink(filepath) == -1) {
    579     LOG_ERROR("unlink: %s: %s", filepath, strerror(errno));
    580     return false;
    581   }
    582 
    583   // Note the shared RELRO fd in the supplied libinfo object. In this
    584   // implementation the RELRO start is set to the library's load address,
    585   // and the RELRO size is unused.
    586   const size_t cast_addr = reinterpret_cast<size_t>(addr);
    587   s_lib_info_fields.SetRelroInfo(env, lib_info_obj, cast_addr, 0, relro_fd);
    588 
    589   LOG_INFO("Success creating shared RELRO %s", shared_relro_path.c_str());
    590   return true;
    591 }
    592 
    593 const JNINativeMethod kNativeMethods[] = {
    594     {"nativeGetCpuAbi",
    595      "("
    596      ")"
    597      "Ljava/lang/String;",
    598      reinterpret_cast<void*>(&GetCpuAbi)},
    599     {"nativeLoadLibrary",
    600      "("
    601      "Ljava/lang/String;"
    602      "J"
    603      "Lorg/chromium/base/library_loader/Linker$LibInfo;"
    604      ")"
    605      "Z",
    606      reinterpret_cast<void*>(&LoadLibrary)},
    607     {"nativeCreateSharedRelro",
    608      "("
    609      "Ljava/lang/String;"
    610      "J"
    611      "Ljava/lang/String;"
    612      "Lorg/chromium/base/library_loader/Linker$LibInfo;"
    613      ")"
    614      "Z",
    615      reinterpret_cast<void*>(&CreateSharedRelro)},
    616 };
    617 
    618 const size_t kNumNativeMethods =
    619     sizeof(kNativeMethods) / sizeof(kNativeMethods[0]);
    620 
    621 }  // namespace
    622 
    623 bool ModernLinkerJNIInit(JavaVM* vm, JNIEnv* env) {
    624   LOG_INFO("Entering");
    625 
    626   // Register native methods.
    627   jclass linker_class;
    628   if (!InitClassReference(env,
    629                           "org/chromium/base/library_loader/ModernLinker",
    630                           &linker_class))
    631     return false;
    632 
    633   LOG_INFO("Registering native methods");
    634   if (env->RegisterNatives(linker_class, kNativeMethods, kNumNativeMethods) < 0)
    635     return false;
    636 
    637   // Record the Java VM handle.
    638   s_java_vm = vm;
    639 
    640   return true;
    641 }
    642 
    643 }  // namespace chromium_android_linker
    644