Home | History | Annotate | Download | only in linker
      1 /*
      2  * Copyright (C) 2011 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_DEX2OAT_LINKER_IMAGE_WRITER_H_
     18 #define ART_DEX2OAT_LINKER_IMAGE_WRITER_H_
     19 
     20 #include <stdint.h>
     21 #include "base/memory_tool.h"
     22 
     23 #include <cstddef>
     24 #include <memory>
     25 #include <ostream>
     26 #include <set>
     27 #include <stack>
     28 #include <string>
     29 #include <unordered_map>
     30 
     31 #include "art_method.h"
     32 #include "base/bit_utils.h"
     33 #include "base/dchecked_vector.h"
     34 #include "base/enums.h"
     35 #include "base/hash_set.h"
     36 #include "base/length_prefixed_array.h"
     37 #include "base/macros.h"
     38 #include "base/mem_map.h"
     39 #include "base/os.h"
     40 #include "base/safe_map.h"
     41 #include "base/utils.h"
     42 #include "class_table.h"
     43 #include "image.h"
     44 #include "intern_table.h"
     45 #include "lock_word.h"
     46 #include "mirror/dex_cache.h"
     47 #include "oat_file.h"
     48 #include "obj_ptr.h"
     49 
     50 namespace art {
     51 namespace gc {
     52 namespace accounting {
     53 template <size_t kAlignment> class SpaceBitmap;
     54 typedef SpaceBitmap<kObjectAlignment> ContinuousSpaceBitmap;
     55 }  // namespace accounting
     56 namespace space {
     57 class ImageSpace;
     58 }  // namespace space
     59 }  // namespace gc
     60 
     61 namespace mirror {
     62 class ClassLoader;
     63 }  // namespace mirror
     64 
     65 class ClassLoaderVisitor;
     66 class CompilerOptions;
     67 template<class T> class Handle;
     68 class ImTable;
     69 class ImtConflictTable;
     70 class TimingLogger;
     71 
     72 static constexpr int kInvalidFd = -1;
     73 
     74 namespace linker {
     75 
     76 // Write a Space built during compilation for use during execution.
     77 class ImageWriter final {
     78  public:
     79   ImageWriter(const CompilerOptions& compiler_options,
     80               uintptr_t image_begin,
     81               ImageHeader::StorageMode image_storage_mode,
     82               const std::vector<std::string>& oat_filenames,
     83               const std::unordered_map<const DexFile*, size_t>& dex_file_oat_index_map,
     84               jobject class_loader,
     85               const HashSet<std::string>* dirty_image_objects);
     86 
     87   /*
     88    * Modifies the heap and collects information about objects and code so that
     89    * they can be written to the boot or app image later.
     90    *
     91    * First, unneeded classes are removed from the managed heap.  Next, we
     92    * remove cached values and calculate necessary metadata for later in the
     93    * process. Optionally some debugging information is collected and used to
     94    * verify the state of the heap at this point.  Next, metadata from earlier
     95    * is used to calculate offsets of references to strings to speed up string
     96    * interning when the image is loaded.  Lastly, we allocate enough memory to
     97    * fit all image data minus the bitmap and relocation sections.
     98    *
     99    * This function should only be called when all objects to be included in the
    100    * image have been initialized and all native methods have been generated.  In
    101    * addition, no other thread should be modifying the heap.
    102    */
    103   bool PrepareImageAddressSpace(TimingLogger* timings);
    104 
    105   bool IsImageAddressSpaceReady() const {
    106     DCHECK(!image_infos_.empty());
    107     for (const ImageInfo& image_info : image_infos_) {
    108       if (image_info.image_roots_address_ == 0u) {
    109         return false;
    110       }
    111     }
    112     return true;
    113   }
    114 
    115   ObjPtr<mirror::ClassLoader> GetAppClassLoader() const REQUIRES_SHARED(Locks::mutator_lock_);
    116 
    117   template <typename T>
    118   T* GetImageAddress(T* object) const REQUIRES_SHARED(Locks::mutator_lock_) {
    119     if (object == nullptr || IsInBootImage(object)) {
    120       return object;
    121     } else {
    122       size_t oat_index = GetOatIndex(object);
    123       const ImageInfo& image_info = GetImageInfo(oat_index);
    124       return reinterpret_cast<T*>(image_info.image_begin_ + GetImageOffset(object));
    125     }
    126   }
    127 
    128   ArtMethod* GetImageMethodAddress(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
    129   const void* GetIntrinsicReferenceAddress(uint32_t intrinsic_data)
    130       REQUIRES_SHARED(Locks::mutator_lock_);
    131 
    132   size_t GetOatFileOffset(size_t oat_index) const {
    133     return GetImageInfo(oat_index).oat_offset_;
    134   }
    135 
    136   const uint8_t* GetOatFileBegin(size_t oat_index) const {
    137     return GetImageInfo(oat_index).oat_file_begin_;
    138   }
    139 
    140   // If image_fd is not kInvalidFd, then we use that for the image file. Otherwise we open
    141   // the names in image_filenames.
    142   // If oat_fd is not kInvalidFd, then we use that for the oat file. Otherwise we open
    143   // the names in oat_filenames.
    144   bool Write(int image_fd,
    145              const std::vector<std::string>& image_filenames,
    146              const std::vector<std::string>& oat_filenames)
    147       REQUIRES(!Locks::mutator_lock_);
    148 
    149   uintptr_t GetOatDataBegin(size_t oat_index) {
    150     return reinterpret_cast<uintptr_t>(GetImageInfo(oat_index).oat_data_begin_);
    151   }
    152 
    153   // Get the index of the oat file containing the dex file.
    154   //
    155   // This "oat_index" is used to retrieve information about the the memory layout
    156   // of the oat file and its associated image file, needed for link-time patching
    157   // of references to the image or across oat files.
    158   size_t GetOatIndexForDexFile(const DexFile* dex_file) const;
    159 
    160   // Get the index of the oat file containing the dex file served by the dex cache.
    161   size_t GetOatIndexForDexCache(ObjPtr<mirror::DexCache> dex_cache) const
    162       REQUIRES_SHARED(Locks::mutator_lock_);
    163 
    164   // Update the oat layout for the given oat file.
    165   // This will make the oat_offset for the next oat file valid.
    166   void UpdateOatFileLayout(size_t oat_index,
    167                            size_t oat_loaded_size,
    168                            size_t oat_data_offset,
    169                            size_t oat_data_size);
    170   // Update information about the oat header, i.e. checksum and trampoline offsets.
    171   void UpdateOatFileHeader(size_t oat_index, const OatHeader& oat_header);
    172 
    173  private:
    174   using WorkStack = std::stack<std::pair<mirror::Object*, size_t>>;
    175 
    176   bool AllocMemory();
    177 
    178   // Mark the objects defined in this space in the given live bitmap.
    179   void RecordImageAllocations() REQUIRES_SHARED(Locks::mutator_lock_);
    180 
    181   // Classify different kinds of bins that objects end up getting packed into during image writing.
    182   // Ordered from dirtiest to cleanest (until ArtMethods).
    183   enum class Bin {
    184     kKnownDirty,                  // Known dirty objects from --dirty-image-objects list
    185     kMiscDirty,                   // Dex caches, object locks, etc...
    186     kClassVerified,               // Class verified, but initializers haven't been run
    187     // Unknown mix of clean/dirty:
    188     kRegular,
    189     kClassInitialized,            // Class initializers have been run
    190     // All classes get their own bins since their fields often dirty
    191     kClassInitializedFinalStatics,  // Class initializers have been run, no non-final statics
    192     // Likely-clean:
    193     kString,                      // [String] Almost always immutable (except for obj header).
    194     // Add more bins here if we add more segregation code.
    195     // Non mirror fields must be below.
    196     // ArtFields should be always clean.
    197     kArtField,
    198     // If the class is initialized, then the ArtMethods are probably clean.
    199     kArtMethodClean,
    200     // ArtMethods may be dirty if the class has native methods or a declaring class that isn't
    201     // initialized.
    202     kArtMethodDirty,
    203     // IMT (clean)
    204     kImTable,
    205     // Conflict tables (clean).
    206     kIMTConflictTable,
    207     // Runtime methods (always clean, do not have a length prefix array).
    208     kRuntimeMethod,
    209     // Metadata bin for data that is temporary during image lifetime.
    210     kMetadata,
    211     // Dex cache arrays have a special slot for PC-relative addressing. Since they are
    212     // huge, and as such their dirtiness is not important for the clean/dirty separation,
    213     // we arbitrarily keep them at the end of the native data.
    214     kDexCacheArray,               // Arrays belonging to dex cache.
    215     kLast = kDexCacheArray,
    216     // Number of bins which are for mirror objects.
    217     kMirrorCount = kArtField,
    218   };
    219   friend std::ostream& operator<<(std::ostream& stream, const Bin& bin);
    220 
    221   enum class NativeObjectRelocationType {
    222     kArtField,
    223     kArtFieldArray,
    224     kArtMethodClean,
    225     kArtMethodArrayClean,
    226     kArtMethodDirty,
    227     kArtMethodArrayDirty,
    228     kGcRootPointer,
    229     kRuntimeMethod,
    230     kIMTable,
    231     kIMTConflictTable,
    232     kDexCacheArray,
    233   };
    234   friend std::ostream& operator<<(std::ostream& stream, const NativeObjectRelocationType& type);
    235 
    236   enum class StubType {
    237     kJNIDlsymLookup,
    238     kQuickGenericJNITrampoline,
    239     kQuickIMTConflictTrampoline,
    240     kQuickResolutionTrampoline,
    241     kQuickToInterpreterBridge,
    242     kLast = kQuickToInterpreterBridge,
    243   };
    244   friend std::ostream& operator<<(std::ostream& stream, const StubType& stub_type);
    245 
    246   static constexpr size_t kBinBits =
    247       MinimumBitsToStore<uint32_t>(static_cast<size_t>(Bin::kMirrorCount) - 1);
    248   // uint32 = typeof(lockword_)
    249   // Subtract read barrier bits since we want these to remain 0, or else it may result in DCHECK
    250   // failures due to invalid read barrier bits during object field reads.
    251   static const size_t kBinShift = BitSizeOf<uint32_t>() - kBinBits - LockWord::kGCStateSize;
    252   // 111000.....0
    253   static const size_t kBinMask = ((static_cast<size_t>(1) << kBinBits) - 1) << kBinShift;
    254 
    255   // Number of bins, including non-mirror bins.
    256   static constexpr size_t kNumberOfBins = static_cast<size_t>(Bin::kLast) + 1u;
    257 
    258   // Number of stub types.
    259   static constexpr size_t kNumberOfStubTypes = static_cast<size_t>(StubType::kLast) + 1u;
    260 
    261   // We use the lock word to store the bin # and bin index of the object in the image.
    262   //
    263   // The struct size must be exactly sizeof(LockWord), currently 32-bits, since this will end up
    264   // stored in the lock word bit-for-bit when object forwarding addresses are being calculated.
    265   struct BinSlot {
    266     explicit BinSlot(uint32_t lockword);
    267     BinSlot(Bin bin, uint32_t index);
    268 
    269     // The bin an object belongs to, i.e. regular, class/verified, class/initialized, etc.
    270     Bin GetBin() const;
    271     // The offset in bytes from the beginning of the bin. Aligned to object size.
    272     uint32_t GetIndex() const;
    273     // Pack into a single uint32_t, for storing into a lock word.
    274     uint32_t Uint32Value() const { return lockword_; }
    275     // Comparison operator for map support
    276     bool operator<(const BinSlot& other) const  { return lockword_ < other.lockword_; }
    277 
    278    private:
    279     // Must be the same size as LockWord, any larger and we would truncate the data.
    280     uint32_t lockword_;
    281   };
    282 
    283   struct ImageInfo {
    284     ImageInfo();
    285     ImageInfo(ImageInfo&&) = default;
    286 
    287     /*
    288      * Creates ImageSection objects that describe most of the sections of a
    289      * boot or AppImage. The following sections are not included:
    290      *   - ImageHeader::kSectionImageBitmap
    291      *
    292      * In addition, the ImageHeader is not covered here.
    293      *
    294      * This function will return the total size of the covered sections as well
    295      * as a vector containing the individual ImageSection objects.
    296      */
    297     std::pair<size_t, std::vector<ImageSection>> CreateImageSections() const;
    298 
    299     size_t GetStubOffset(StubType stub_type) const {
    300       DCHECK_LT(static_cast<size_t>(stub_type), kNumberOfStubTypes);
    301       return stub_offsets_[static_cast<size_t>(stub_type)];
    302     }
    303 
    304     void SetStubOffset(StubType stub_type, size_t offset) {
    305       DCHECK_LT(static_cast<size_t>(stub_type), kNumberOfStubTypes);
    306       stub_offsets_[static_cast<size_t>(stub_type)] = offset;
    307     }
    308 
    309     size_t GetBinSlotOffset(Bin bin) const {
    310       DCHECK_LT(static_cast<size_t>(bin), kNumberOfBins);
    311       return bin_slot_offsets_[static_cast<size_t>(bin)];
    312     }
    313 
    314     void IncrementBinSlotSize(Bin bin, size_t size_to_add) {
    315       DCHECK_LT(static_cast<size_t>(bin), kNumberOfBins);
    316       bin_slot_sizes_[static_cast<size_t>(bin)] += size_to_add;
    317     }
    318 
    319     size_t GetBinSlotSize(Bin bin) const {
    320       DCHECK_LT(static_cast<size_t>(bin), kNumberOfBins);
    321       return bin_slot_sizes_[static_cast<size_t>(bin)];
    322     }
    323 
    324     void IncrementBinSlotCount(Bin bin, size_t count_to_add) {
    325       DCHECK_LT(static_cast<size_t>(bin), kNumberOfBins);
    326       bin_slot_count_[static_cast<size_t>(bin)] += count_to_add;
    327     }
    328 
    329     // Calculate the sum total of the bin slot sizes in [0, up_to). Defaults to all bins.
    330     size_t GetBinSizeSum(Bin up_to) const;
    331 
    332     MemMap image_;  // Memory mapped for generating the image.
    333 
    334     // Target begin of this image. Notes: It is not valid to write here, this is the address
    335     // of the target image, not necessarily where image_ is mapped. The address is only valid
    336     // after layouting (otherwise null).
    337     uint8_t* image_begin_ = nullptr;
    338 
    339     // Offset to the free space in image_, initially size of image header.
    340     size_t image_end_ = RoundUp(sizeof(ImageHeader), kObjectAlignment);
    341     uint32_t image_roots_address_ = 0;  // The image roots address in the image.
    342     size_t image_offset_ = 0;  // Offset of this image from the start of the first image.
    343 
    344     // Image size is the *address space* covered by this image. As the live bitmap is aligned
    345     // to the page size, the live bitmap will cover more address space than necessary. But live
    346     // bitmaps may not overlap, so an image has a "shadow," which is accounted for in the size.
    347     // The next image may only start at image_begin_ + image_size_ (which is guaranteed to be
    348     // page-aligned).
    349     size_t image_size_ = 0;
    350 
    351     // Oat data.
    352     // Offset of the oat file for this image from start of oat files. This is
    353     // valid when the previous oat file has been written.
    354     size_t oat_offset_ = 0;
    355     // Layout of the loaded ELF file containing the oat file, valid after UpdateOatFileLayout().
    356     const uint8_t* oat_file_begin_ = nullptr;
    357     size_t oat_loaded_size_ = 0;
    358     const uint8_t* oat_data_begin_ = nullptr;
    359     size_t oat_size_ = 0;  // Size of the corresponding oat data.
    360     // The oat header checksum, valid after UpdateOatFileHeader().
    361     uint32_t oat_checksum_ = 0u;
    362 
    363     // Image bitmap which lets us know where the objects inside of the image reside.
    364     std::unique_ptr<gc::accounting::ContinuousSpaceBitmap> image_bitmap_;
    365 
    366     // The start offsets of the dex cache arrays.
    367     SafeMap<const DexFile*, size_t> dex_cache_array_starts_;
    368 
    369     // Offset from oat_data_begin_ to the stubs.
    370     uint32_t stub_offsets_[kNumberOfStubTypes] = {};
    371 
    372     // Bin slot tracking for dirty object packing.
    373     size_t bin_slot_sizes_[kNumberOfBins] = {};  // Number of bytes in a bin.
    374     size_t bin_slot_offsets_[kNumberOfBins] = {};  // Number of bytes in previous bins.
    375     size_t bin_slot_count_[kNumberOfBins] = {};  // Number of objects in a bin.
    376 
    377     // Cached size of the intern table for when we allocate memory.
    378     size_t intern_table_bytes_ = 0;
    379 
    380     // Number of image class table bytes.
    381     size_t class_table_bytes_ = 0;
    382 
    383     // Number of object fixup bytes.
    384     size_t object_fixup_bytes_ = 0;
    385 
    386     // Number of pointer fixup bytes.
    387     size_t pointer_fixup_bytes_ = 0;
    388 
    389     // Number of offsets to string references that will be written to the
    390     // StringFieldOffsets section.
    391     size_t num_string_references_ = 0;
    392 
    393     // Intern table associated with this image for serialization.
    394     std::unique_ptr<InternTable> intern_table_;
    395 
    396     // Class table associated with this image for serialization.
    397     std::unique_ptr<ClassTable> class_table_;
    398 
    399     // Padding objects to ensure region alignment (if required).
    400     std::vector<size_t> padding_object_offsets_;
    401   };
    402 
    403   // We use the lock word to store the offset of the object in the image.
    404   void AssignImageOffset(mirror::Object* object, BinSlot bin_slot)
    405       REQUIRES_SHARED(Locks::mutator_lock_);
    406   void SetImageOffset(mirror::Object* object, size_t offset)
    407       REQUIRES_SHARED(Locks::mutator_lock_);
    408   bool IsImageOffsetAssigned(mirror::Object* object) const
    409       REQUIRES_SHARED(Locks::mutator_lock_);
    410   size_t GetImageOffset(mirror::Object* object) const REQUIRES_SHARED(Locks::mutator_lock_);
    411   void UpdateImageOffset(mirror::Object* obj, uintptr_t offset)
    412       REQUIRES_SHARED(Locks::mutator_lock_);
    413 
    414   void PrepareDexCacheArraySlots() REQUIRES_SHARED(Locks::mutator_lock_);
    415   void AssignImageBinSlot(mirror::Object* object, size_t oat_index)
    416       REQUIRES_SHARED(Locks::mutator_lock_);
    417   mirror::Object* TryAssignBinSlot(WorkStack& work_stack, mirror::Object* obj, size_t oat_index)
    418       REQUIRES_SHARED(Locks::mutator_lock_);
    419   void SetImageBinSlot(mirror::Object* object, BinSlot bin_slot)
    420       REQUIRES_SHARED(Locks::mutator_lock_);
    421   bool IsImageBinSlotAssigned(mirror::Object* object) const
    422       REQUIRES_SHARED(Locks::mutator_lock_);
    423   BinSlot GetImageBinSlot(mirror::Object* object) const REQUIRES_SHARED(Locks::mutator_lock_);
    424 
    425   void AddDexCacheArrayRelocation(void* array, size_t offset, size_t oat_index)
    426       REQUIRES_SHARED(Locks::mutator_lock_);
    427   void AddMethodPointerArray(ObjPtr<mirror::PointerArray> arr)
    428       REQUIRES_SHARED(Locks::mutator_lock_);
    429 
    430   mirror::Object* GetLocalAddress(mirror::Object* object) const
    431       REQUIRES_SHARED(Locks::mutator_lock_) {
    432     size_t offset = GetImageOffset(object);
    433     size_t oat_index = GetOatIndex(object);
    434     const ImageInfo& image_info = GetImageInfo(oat_index);
    435     uint8_t* dst = image_info.image_.Begin() + offset;
    436     return reinterpret_cast<mirror::Object*>(dst);
    437   }
    438 
    439   // Returns the address in the boot image if we are compiling the app image.
    440   const uint8_t* GetOatAddress(StubType type) const;
    441 
    442   const uint8_t* GetOatAddressForOffset(uint32_t offset, const ImageInfo& image_info) const {
    443     // With Quick, code is within the OatFile, as there are all in one
    444     // .o ELF object. But interpret it as signed.
    445     DCHECK_LE(static_cast<int32_t>(offset), static_cast<int32_t>(image_info.oat_size_));
    446     DCHECK(image_info.oat_data_begin_ != nullptr);
    447     return offset == 0u ? nullptr : image_info.oat_data_begin_ + static_cast<int32_t>(offset);
    448   }
    449 
    450   // Returns true if the class was in the original requested image classes list.
    451   bool KeepClass(ObjPtr<mirror::Class> klass) REQUIRES_SHARED(Locks::mutator_lock_);
    452 
    453   // Debug aid that list of requested image classes.
    454   void DumpImageClasses();
    455 
    456   // Visit all class loaders.
    457   void VisitClassLoaders(ClassLoaderVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
    458 
    459   // Remove unwanted classes from various roots.
    460   void PruneNonImageClasses() REQUIRES_SHARED(Locks::mutator_lock_);
    461 
    462   // Remove unwanted classes from the DexCache roots.
    463   void PruneDexCache(ObjPtr<mirror::DexCache> dex_cache, ObjPtr<mirror::ClassLoader> class_loader)
    464       REQUIRES_SHARED(Locks::mutator_lock_)
    465       REQUIRES(!Locks::classlinker_classes_lock_);
    466 
    467   // Preload deterministic DexCache contents.
    468   void PreloadDexCache(ObjPtr<mirror::DexCache> dex_cache, ObjPtr<mirror::ClassLoader> class_loader)
    469       REQUIRES_SHARED(Locks::mutator_lock_)
    470       REQUIRES(!Locks::classlinker_classes_lock_);
    471 
    472   // Find dex caches for pruning or preloading.
    473   std::vector<ObjPtr<mirror::DexCache>> FindDexCaches(Thread* self)
    474       REQUIRES_SHARED(Locks::mutator_lock_)
    475       REQUIRES(!Locks::classlinker_classes_lock_);
    476 
    477   // Verify unwanted classes removed.
    478   void CheckNonImageClassesRemoved() REQUIRES_SHARED(Locks::mutator_lock_);
    479 
    480   // Lays out where the image objects will be at runtime.
    481   void CalculateNewObjectOffsets()
    482       REQUIRES_SHARED(Locks::mutator_lock_);
    483   void ProcessWorkStack(WorkStack* work_stack)
    484       REQUIRES_SHARED(Locks::mutator_lock_);
    485   void CreateHeader(size_t oat_index)
    486       REQUIRES_SHARED(Locks::mutator_lock_);
    487   ObjPtr<mirror::ObjectArray<mirror::Object>> CollectDexCaches(Thread* self, size_t oat_index) const
    488       REQUIRES_SHARED(Locks::mutator_lock_);
    489   ObjPtr<mirror::ObjectArray<mirror::Object>> CreateImageRoots(
    490       size_t oat_index,
    491       Handle<mirror::ObjectArray<mirror::Object>> boot_image_live_objects) const
    492       REQUIRES_SHARED(Locks::mutator_lock_);
    493   void CalculateObjectBinSlots(mirror::Object* obj)
    494       REQUIRES_SHARED(Locks::mutator_lock_);
    495   void UnbinObjectsIntoOffset(mirror::Object* obj)
    496       REQUIRES_SHARED(Locks::mutator_lock_);
    497 
    498   // Creates the contiguous image in memory and adjusts pointers.
    499   void CopyAndFixupNativeData(size_t oat_index) REQUIRES_SHARED(Locks::mutator_lock_);
    500   void CopyAndFixupObjects() REQUIRES_SHARED(Locks::mutator_lock_);
    501   void CopyAndFixupObject(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_);
    502   void CopyAndFixupMethod(ArtMethod* orig, ArtMethod* copy, size_t oat_index)
    503       REQUIRES_SHARED(Locks::mutator_lock_);
    504   void CopyAndFixupImTable(ImTable* orig, ImTable* copy)
    505       REQUIRES_SHARED(Locks::mutator_lock_);
    506   void CopyAndFixupImtConflictTable(ImtConflictTable* orig, ImtConflictTable* copy)
    507       REQUIRES_SHARED(Locks::mutator_lock_);
    508 
    509   /*
    510    * Copies metadata from the heap into a buffer that will be compressed and
    511    * written to the image.
    512    *
    513    * This function copies the string offset metadata from a local vector to an
    514    * offset inside the image_ field of an ImageInfo struct.  The offset into the
    515    * memory pointed to by the image_ field is obtained from the ImageSection
    516    * object for the String Offsets section.
    517    *
    518    * All data for the image, besides the object bitmap and the relocation data,
    519    * will also be copied into the memory region pointed to by image_.
    520    */
    521   void CopyMetadata();
    522 
    523   void FixupClass(mirror::Class* orig, mirror::Class* copy)
    524       REQUIRES_SHARED(Locks::mutator_lock_);
    525   void FixupObject(mirror::Object* orig, mirror::Object* copy)
    526       REQUIRES_SHARED(Locks::mutator_lock_);
    527   template <typename T>
    528   void FixupDexCacheArrayEntry(std::atomic<mirror::DexCachePair<T>>* orig_array,
    529                                std::atomic<mirror::DexCachePair<T>>* new_array,
    530                                uint32_t array_index)
    531       REQUIRES_SHARED(Locks::mutator_lock_);
    532   template <typename T>
    533   void FixupDexCacheArrayEntry(std::atomic<mirror::NativeDexCachePair<T>>* orig_array,
    534                                std::atomic<mirror::NativeDexCachePair<T>>* new_array,
    535                                uint32_t array_index)
    536       REQUIRES_SHARED(Locks::mutator_lock_);
    537   void FixupDexCacheArrayEntry(GcRoot<mirror::CallSite>* orig_array,
    538                                GcRoot<mirror::CallSite>* new_array,
    539                                uint32_t array_index)
    540       REQUIRES_SHARED(Locks::mutator_lock_);
    541   template <typename EntryType>
    542   void FixupDexCacheArray(mirror::DexCache* orig_dex_cache,
    543                           mirror::DexCache* copy_dex_cache,
    544                           MemberOffset array_offset,
    545                           uint32_t size)
    546       REQUIRES_SHARED(Locks::mutator_lock_);
    547   void FixupDexCache(mirror::DexCache* orig_dex_cache,
    548                      mirror::DexCache* copy_dex_cache)
    549       REQUIRES_SHARED(Locks::mutator_lock_);
    550   void FixupPointerArray(mirror::Object* dst,
    551                          mirror::PointerArray* arr,
    552                          Bin array_type)
    553       REQUIRES_SHARED(Locks::mutator_lock_);
    554 
    555   // Get quick code for non-resolution/imt_conflict/abstract method.
    556   const uint8_t* GetQuickCode(ArtMethod* method,
    557                               const ImageInfo& image_info,
    558                               bool* quick_is_interpreted)
    559       REQUIRES_SHARED(Locks::mutator_lock_);
    560 
    561   // Return true if a method is likely to be dirtied at runtime.
    562   bool WillMethodBeDirty(ArtMethod* m) const REQUIRES_SHARED(Locks::mutator_lock_);
    563 
    564   // Assign the offset for an ArtMethod.
    565   void AssignMethodOffset(ArtMethod* method,
    566                           NativeObjectRelocationType type,
    567                           size_t oat_index)
    568       REQUIRES_SHARED(Locks::mutator_lock_);
    569 
    570   // Return true if imt was newly inserted.
    571   bool TryAssignImTableOffset(ImTable* imt, size_t oat_index) REQUIRES_SHARED(Locks::mutator_lock_);
    572 
    573   // Assign the offset for an IMT conflict table. Does nothing if the table already has a native
    574   // relocation.
    575   void TryAssignConflictTableOffset(ImtConflictTable* table, size_t oat_index)
    576       REQUIRES_SHARED(Locks::mutator_lock_);
    577 
    578   // Return true if klass is loaded by the boot class loader but not in the boot image.
    579   bool IsBootClassLoaderNonImageClass(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
    580 
    581   // Return true if klass depends on a boot class loader non image class. We want to prune these
    582   // classes since we do not want any boot class loader classes in the image. This means that
    583   // we also cannot have any classes which refer to these boot class loader non image classes.
    584   // PruneAppImageClass also prunes if klass depends on a non-image class according to the compiler
    585   // options.
    586   bool PruneAppImageClass(ObjPtr<mirror::Class> klass) REQUIRES_SHARED(Locks::mutator_lock_);
    587 
    588   // early_exit is true if we had a cyclic dependency anywhere down the chain.
    589   bool PruneAppImageClassInternal(ObjPtr<mirror::Class> klass,
    590                                   bool* early_exit,
    591                                   std::unordered_set<mirror::Object*>* visited)
    592       REQUIRES_SHARED(Locks::mutator_lock_);
    593 
    594   /*
    595    * This type holds the information necessary for calculating
    596    * AppImageReferenceOffsetInfo values after the object relocations have been
    597    * computed.
    598    *
    599    * The first element will always be a pointer to a managed object.  If the
    600    * pointer has been tagged (testable with HasDexCacheNativeRefTag) it
    601    * indicates that the referenced object is a DexCache object that requires
    602    * special handling during loading and the second element has no meaningful
    603    * value.  If the pointer isn't tagged then the second element is an
    604    * object-relative offset to a field containing a string reference.
    605    *
    606    * Note that it is possible for an untagged DexCache pointer to occur in the
    607    * first position if it has a managed reference that needs to be updated.
    608    *
    609    * TODO (chriswailes): Add a note indicating the source line where we ensure
    610    * that no moving garbage collection will occur.
    611    *
    612    * TODO (chriswailes): Replace with std::variant once ART is building with
    613    * C++17
    614    */
    615   typedef std::pair<uintptr_t, uint32_t> HeapReferencePointerInfo;
    616 
    617   /*
    618    * Collects the info necessary for calculating image offsets to string field
    619    * later.
    620    *
    621    * This function is used when constructing AppImages.  Because AppImages
    622    * contain strings that must be interned we need to visit references to these
    623    * strings when the AppImage is loaded and either insert them into the
    624    * runtime intern table or replace the existing reference with a reference
    625    * to the interned strings.
    626    *
    627    * To speed up the interning of strings when the AppImage is loaded we include
    628    * a list of offsets to string references in the AppImage.  These are then
    629    * iterated over at load time and fixed up.
    630    *
    631    * To record the offsets we first have to count the number of string
    632    * references that will be included in the AppImage.  This allows use to both
    633    * allocate enough memory for soring the offsets and correctly calculate the
    634    * offsets of various objects into the image.  Once the image offset
    635    * calculations are done for managed objects the reference object/offset pairs
    636    * are translated to image offsets.  The CopyMetadata function then copies
    637    * these offsets into the image.
    638    */
    639   std::vector<HeapReferencePointerInfo> CollectStringReferenceInfo() const
    640       REQUIRES_SHARED(Locks::mutator_lock_);
    641 
    642   /*
    643    * Ensures that assumptions about native GC roots and AppImages hold.
    644    *
    645    * This function verifies the following condition(s):
    646    *   - Native references to managed strings are only reachable through DexCache
    647    *     objects
    648    */
    649   void VerifyNativeGCRootInvariants() const REQUIRES_SHARED(Locks::mutator_lock_);
    650 
    651   bool IsMultiImage() const {
    652     return image_infos_.size() > 1;
    653   }
    654 
    655   static Bin BinTypeForNativeRelocationType(NativeObjectRelocationType type);
    656 
    657   struct NativeObjectRelocation {
    658     size_t oat_index;
    659     uintptr_t offset;
    660     NativeObjectRelocationType type;
    661 
    662     bool IsArtMethodRelocation() const {
    663       return type == NativeObjectRelocationType::kArtMethodClean ||
    664           type == NativeObjectRelocationType::kArtMethodDirty ||
    665           type == NativeObjectRelocationType::kRuntimeMethod;
    666     }
    667   };
    668 
    669   NativeObjectRelocation GetNativeRelocation(void* obj) REQUIRES_SHARED(Locks::mutator_lock_);
    670 
    671   // Location of where the object will be when the image is loaded at runtime.
    672   template <typename T>
    673   T* NativeLocationInImage(T* obj) REQUIRES_SHARED(Locks::mutator_lock_);
    674 
    675   // Location of where the temporary copy of the object currently is.
    676   template <typename T>
    677   T* NativeCopyLocation(T* obj) REQUIRES_SHARED(Locks::mutator_lock_);
    678 
    679   // Return true if `obj` belongs to the image we're writing.
    680   // For a boot image, this is true for all objects.
    681   // For an app image, boot image objects and boot class path dex caches are excluded.
    682   bool IsImageObject(ObjPtr<mirror::Object> obj) const REQUIRES_SHARED(Locks::mutator_lock_);
    683 
    684   // Return true if `obj` is inside of the boot image space. This may only return true if we are
    685   // compiling an app image.
    686   bool IsInBootImage(const void* obj) const;
    687 
    688   // Return true if ptr is within the boot oat file.
    689   bool IsInBootOatFile(const void* ptr) const;
    690 
    691   // Get the index of the oat file associated with the object.
    692   size_t GetOatIndex(mirror::Object* object) const REQUIRES_SHARED(Locks::mutator_lock_);
    693 
    694   // The oat index for shared data in multi-image and all data in single-image compilation.
    695   size_t GetDefaultOatIndex() const {
    696     return 0u;
    697   }
    698 
    699   ImageInfo& GetImageInfo(size_t oat_index) {
    700     return image_infos_[oat_index];
    701   }
    702 
    703   const ImageInfo& GetImageInfo(size_t oat_index) const {
    704     return image_infos_[oat_index];
    705   }
    706 
    707   // Find an already strong interned string in the other images or in the boot image. Used to
    708   // remove duplicates in the multi image and app image case.
    709   mirror::String* FindInternedString(mirror::String* string) REQUIRES_SHARED(Locks::mutator_lock_);
    710 
    711   // Return true if there already exists a native allocation for an object.
    712   bool NativeRelocationAssigned(void* ptr) const;
    713 
    714   // Copy a reference and record image relocation.
    715   template <typename DestType>
    716   void CopyAndFixupReference(DestType* dest, ObjPtr<mirror::Object> src)
    717       REQUIRES_SHARED(Locks::mutator_lock_);
    718 
    719   // Copy a native pointer and record image relocation.
    720   void CopyAndFixupPointer(void** target, void* value, PointerSize pointer_size)
    721       REQUIRES_SHARED(Locks::mutator_lock_);
    722   void CopyAndFixupPointer(void** target, void* value)
    723       REQUIRES_SHARED(Locks::mutator_lock_);
    724   void CopyAndFixupPointer(
    725       void* object, MemberOffset offset, void* value, PointerSize pointer_size)
    726       REQUIRES_SHARED(Locks::mutator_lock_);
    727   void CopyAndFixupPointer(void* object, MemberOffset offset, void* value)
    728       REQUIRES_SHARED(Locks::mutator_lock_);
    729 
    730   /*
    731    * Tests an object to see if it will be contained in an AppImage.
    732    *
    733    * An object reference is considered to be a AppImage String reference iff:
    734    *   - It isn't null
    735    *   - The referred-object isn't in the boot image
    736    *   - The referred-object is a Java String
    737    */
    738   ALWAYS_INLINE
    739   bool IsValidAppImageStringReference(ObjPtr<mirror::Object> referred_obj) const
    740       REQUIRES_SHARED(Locks::mutator_lock_);
    741 
    742   const CompilerOptions& compiler_options_;
    743 
    744   // Beginning target image address for the first image.
    745   uint8_t* global_image_begin_;
    746 
    747   // Offset from image_begin_ to where the first object is in image_.
    748   size_t image_objects_offset_begin_;
    749 
    750   // Pointer arrays that need to be updated. Since these are only some int and long arrays, we need
    751   // to keep track. These include vtable arrays, iftable arrays, and dex caches.
    752   std::unordered_map<mirror::PointerArray*, Bin> pointer_arrays_;
    753 
    754   // Saved hash codes. We use these to restore lockwords which were temporarily used to have
    755   // forwarding addresses as well as copying over hash codes.
    756   std::unordered_map<mirror::Object*, uint32_t> saved_hashcode_map_;
    757 
    758   // Oat index map for objects.
    759   std::unordered_map<mirror::Object*, uint32_t> oat_index_map_;
    760 
    761   // Size of pointers on the target architecture.
    762   PointerSize target_ptr_size_;
    763 
    764   // Image data indexed by the oat file index.
    765   dchecked_vector<ImageInfo> image_infos_;
    766 
    767   // ArtField, ArtMethod relocating map. These are allocated as array of structs but we want to
    768   // have one entry per art field for convenience. ArtFields are placed right after the end of the
    769   // image objects (aka sum of bin_slot_sizes_). ArtMethods are placed right after the ArtFields.
    770   std::unordered_map<void*, NativeObjectRelocation> native_object_relocations_;
    771 
    772   // Runtime ArtMethods which aren't reachable from any Class but need to be copied into the image.
    773   ArtMethod* image_methods_[ImageHeader::kImageMethodsCount];
    774 
    775   // Counters for measurements, used for logging only.
    776   uint64_t dirty_methods_;
    777   uint64_t clean_methods_;
    778 
    779   // Prune class memoization table to speed up ContainsBootClassLoaderNonImageClass.
    780   std::unordered_map<mirror::Class*, bool> prune_class_memo_;
    781 
    782   // The application class loader. Null for boot image.
    783   jobject app_class_loader_;
    784 
    785   // Boot image live objects, null for app image.
    786   mirror::ObjectArray<mirror::Object>* boot_image_live_objects_;
    787 
    788   // Offsets into the image that indicate where string references are recorded.
    789   std::vector<AppImageReferenceOffsetInfo> string_reference_offsets_;
    790 
    791   // Which mode the image is stored as, see image.h
    792   const ImageHeader::StorageMode image_storage_mode_;
    793 
    794   // The file names of oat files.
    795   const std::vector<std::string>& oat_filenames_;
    796 
    797   // Map of dex files to the indexes of oat files that they were compiled into.
    798   const std::unordered_map<const DexFile*, size_t>& dex_file_oat_index_map_;
    799 
    800   // Set of objects known to be dirty in the image. Can be nullptr if there are none.
    801   const HashSet<std::string>* dirty_image_objects_;
    802 
    803   // Objects are guaranteed to not cross the region size boundary.
    804   size_t region_size_ = 0u;
    805 
    806   // Region alignment bytes wasted.
    807   size_t region_alignment_wasted_ = 0u;
    808 
    809   class ImageFileGuard;
    810   class FixupClassVisitor;
    811   class FixupRootVisitor;
    812   class FixupVisitor;
    813   class GetRootsVisitor;
    814   class NativeLocationVisitor;
    815   class PruneClassesVisitor;
    816   class PruneClassLoaderClassesVisitor;
    817   class PruneObjectReferenceVisitor;
    818   class RegisterBootClassPathClassesVisitor;
    819   class VisitReferencesVisitor;
    820 
    821   /*
    822    * A visitor class for extracting object/offset pairs.
    823    *
    824    * This visitor walks the fields of an object and extracts object/offset pairs
    825    * that are later translated to image offsets.  This visitor is only
    826    * responsible for extracting info for Java references.  Native references to
    827    * Java strings are handled in the wrapper function
    828    * CollectStringReferenceInfo().
    829    */
    830   class CollectStringReferenceVisitor;
    831 
    832   // A visitor used by the VerifyNativeGCRootInvariants() function.
    833   class NativeGCRootInvariantVisitor;
    834 
    835   DISALLOW_COPY_AND_ASSIGN(ImageWriter);
    836 };
    837 
    838 }  // namespace linker
    839 }  // namespace art
    840 
    841 #endif  // ART_DEX2OAT_LINKER_IMAGE_WRITER_H_
    842