1 /* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #include "image_writer.h" 18 19 #include <sys/stat.h> 20 21 #include <memory> 22 #include <vector> 23 24 #include "base/logging.h" 25 #include "base/unix_file/fd_file.h" 26 #include "class_linker.h" 27 #include "compiled_method.h" 28 #include "dex_file-inl.h" 29 #include "driver/compiler_driver.h" 30 #include "elf_file.h" 31 #include "elf_utils.h" 32 #include "elf_patcher.h" 33 #include "elf_writer.h" 34 #include "gc/accounting/card_table-inl.h" 35 #include "gc/accounting/heap_bitmap.h" 36 #include "gc/accounting/space_bitmap-inl.h" 37 #include "gc/heap.h" 38 #include "gc/space/large_object_space.h" 39 #include "gc/space/space-inl.h" 40 #include "globals.h" 41 #include "image.h" 42 #include "intern_table.h" 43 #include "lock_word.h" 44 #include "mirror/art_field-inl.h" 45 #include "mirror/art_method-inl.h" 46 #include "mirror/array-inl.h" 47 #include "mirror/class-inl.h" 48 #include "mirror/class_loader.h" 49 #include "mirror/dex_cache-inl.h" 50 #include "mirror/object-inl.h" 51 #include "mirror/object_array-inl.h" 52 #include "mirror/string-inl.h" 53 #include "oat.h" 54 #include "oat_file.h" 55 #include "runtime.h" 56 #include "scoped_thread_state_change.h" 57 #include "handle_scope-inl.h" 58 59 #include <numeric> 60 61 using ::art::mirror::ArtField; 62 using ::art::mirror::ArtMethod; 63 using ::art::mirror::Class; 64 using ::art::mirror::DexCache; 65 using ::art::mirror::EntryPointFromInterpreter; 66 using ::art::mirror::Object; 67 using ::art::mirror::ObjectArray; 68 using ::art::mirror::String; 69 70 namespace art { 71 72 // Separate objects into multiple bins to optimize dirty memory use. 73 static constexpr bool kBinObjects = true; 74 75 bool ImageWriter::Write(const std::string& image_filename, 76 uintptr_t image_begin, 77 const std::string& oat_filename, 78 const std::string& oat_location, 79 bool compile_pic) { 80 CHECK(!image_filename.empty()); 81 82 CHECK_NE(image_begin, 0U); 83 image_begin_ = reinterpret_cast<byte*>(image_begin); 84 compile_pic_ = compile_pic; 85 86 ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); 87 88 target_ptr_size_ = InstructionSetPointerSize(compiler_driver_.GetInstructionSet()); 89 std::unique_ptr<File> oat_file(OS::OpenFileReadWrite(oat_filename.c_str())); 90 if (oat_file.get() == NULL) { 91 LOG(ERROR) << "Failed to open oat file " << oat_filename << " for " << oat_location; 92 return false; 93 } 94 std::string error_msg; 95 oat_file_ = OatFile::OpenReadable(oat_file.get(), oat_location, &error_msg); 96 if (oat_file_ == nullptr) { 97 LOG(ERROR) << "Failed to open writable oat file " << oat_filename << " for " << oat_location 98 << ": " << error_msg; 99 return false; 100 } 101 CHECK_EQ(class_linker->RegisterOatFile(oat_file_), oat_file_); 102 103 interpreter_to_interpreter_bridge_offset_ = 104 oat_file_->GetOatHeader().GetInterpreterToInterpreterBridgeOffset(); 105 interpreter_to_compiled_code_bridge_offset_ = 106 oat_file_->GetOatHeader().GetInterpreterToCompiledCodeBridgeOffset(); 107 108 jni_dlsym_lookup_offset_ = oat_file_->GetOatHeader().GetJniDlsymLookupOffset(); 109 110 portable_imt_conflict_trampoline_offset_ = 111 oat_file_->GetOatHeader().GetPortableImtConflictTrampolineOffset(); 112 portable_resolution_trampoline_offset_ = 113 oat_file_->GetOatHeader().GetPortableResolutionTrampolineOffset(); 114 portable_to_interpreter_bridge_offset_ = 115 oat_file_->GetOatHeader().GetPortableToInterpreterBridgeOffset(); 116 117 quick_generic_jni_trampoline_offset_ = 118 oat_file_->GetOatHeader().GetQuickGenericJniTrampolineOffset(); 119 quick_imt_conflict_trampoline_offset_ = 120 oat_file_->GetOatHeader().GetQuickImtConflictTrampolineOffset(); 121 quick_resolution_trampoline_offset_ = 122 oat_file_->GetOatHeader().GetQuickResolutionTrampolineOffset(); 123 quick_to_interpreter_bridge_offset_ = 124 oat_file_->GetOatHeader().GetQuickToInterpreterBridgeOffset(); 125 { 126 Thread::Current()->TransitionFromSuspendedToRunnable(); 127 PruneNonImageClasses(); // Remove junk 128 ComputeLazyFieldsForImageClasses(); // Add useful information 129 ProcessStrings(); 130 Thread::Current()->TransitionFromRunnableToSuspended(kNative); 131 } 132 gc::Heap* heap = Runtime::Current()->GetHeap(); 133 heap->CollectGarbage(false); // Remove garbage. 134 135 if (!AllocMemory()) { 136 return false; 137 } 138 139 if (kIsDebugBuild) { 140 ScopedObjectAccess soa(Thread::Current()); 141 CheckNonImageClassesRemoved(); 142 } 143 144 Thread::Current()->TransitionFromSuspendedToRunnable(); 145 size_t oat_loaded_size = 0; 146 size_t oat_data_offset = 0; 147 ElfWriter::GetOatElfInformation(oat_file.get(), oat_loaded_size, oat_data_offset); 148 CalculateNewObjectOffsets(oat_loaded_size, oat_data_offset); 149 CopyAndFixupObjects(); 150 151 PatchOatCodeAndMethods(oat_file.get()); 152 153 // Before flushing, which might fail, release the mutator lock. 154 Thread::Current()->TransitionFromRunnableToSuspended(kNative); 155 156 if (oat_file->FlushCloseOrErase() != 0) { 157 LOG(ERROR) << "Failed to flush and close oat file " << oat_filename << " for " << oat_location; 158 return false; 159 } 160 161 std::unique_ptr<File> image_file(OS::CreateEmptyFile(image_filename.c_str())); 162 ImageHeader* image_header = reinterpret_cast<ImageHeader*>(image_->Begin()); 163 if (image_file.get() == NULL) { 164 LOG(ERROR) << "Failed to open image file " << image_filename; 165 return false; 166 } 167 if (fchmod(image_file->Fd(), 0644) != 0) { 168 PLOG(ERROR) << "Failed to make image file world readable: " << image_filename; 169 image_file->Erase(); 170 return EXIT_FAILURE; 171 } 172 173 // Write out the image. 174 CHECK_EQ(image_end_, image_header->GetImageSize()); 175 if (!image_file->WriteFully(image_->Begin(), image_end_)) { 176 PLOG(ERROR) << "Failed to write image file " << image_filename; 177 image_file->Erase(); 178 return false; 179 } 180 181 // Write out the image bitmap at the page aligned start of the image end. 182 CHECK_ALIGNED(image_header->GetImageBitmapOffset(), kPageSize); 183 if (!image_file->Write(reinterpret_cast<char*>(image_bitmap_->Begin()), 184 image_header->GetImageBitmapSize(), 185 image_header->GetImageBitmapOffset())) { 186 PLOG(ERROR) << "Failed to write image file " << image_filename; 187 image_file->Erase(); 188 return false; 189 } 190 191 if (image_file->FlushCloseOrErase() != 0) { 192 PLOG(ERROR) << "Failed to flush and close image file " << image_filename; 193 return false; 194 } 195 return true; 196 } 197 198 void ImageWriter::SetImageOffset(mirror::Object* object, 199 ImageWriter::BinSlot bin_slot, 200 size_t offset) { 201 DCHECK(object != nullptr); 202 DCHECK_NE(offset, 0U); 203 mirror::Object* obj = reinterpret_cast<mirror::Object*>(image_->Begin() + offset); 204 DCHECK_ALIGNED(obj, kObjectAlignment); 205 206 image_bitmap_->Set(obj); // Mark the obj as mutated, since we will end up changing it. 207 { 208 // Remember the object-inside-of-the-image's hash code so we can restore it after the copy. 209 auto hash_it = saved_hashes_map_.find(bin_slot); 210 if (hash_it != saved_hashes_map_.end()) { 211 std::pair<BinSlot, uint32_t> slot_hash = *hash_it; 212 saved_hashes_.push_back(std::make_pair(obj, slot_hash.second)); 213 saved_hashes_map_.erase(hash_it); 214 } 215 } 216 // The object is already deflated from when we set the bin slot. Just overwrite the lock word. 217 object->SetLockWord(LockWord::FromForwardingAddress(offset), false); 218 DCHECK(IsImageOffsetAssigned(object)); 219 } 220 221 void ImageWriter::AssignImageOffset(mirror::Object* object, ImageWriter::BinSlot bin_slot) { 222 DCHECK(object != nullptr); 223 DCHECK_NE(image_objects_offset_begin_, 0u); 224 225 size_t previous_bin_sizes = GetBinSizeSum(bin_slot.GetBin()); // sum sizes in [0..bin#) 226 size_t new_offset = image_objects_offset_begin_ + previous_bin_sizes + bin_slot.GetIndex(); 227 DCHECK_ALIGNED(new_offset, kObjectAlignment); 228 229 SetImageOffset(object, bin_slot, new_offset); 230 DCHECK_LT(new_offset, image_end_); 231 } 232 233 bool ImageWriter::IsImageOffsetAssigned(mirror::Object* object) const { 234 // Will also return true if the bin slot was assigned since we are reusing the lock word. 235 DCHECK(object != nullptr); 236 return object->GetLockWord(false).GetState() == LockWord::kForwardingAddress; 237 } 238 239 size_t ImageWriter::GetImageOffset(mirror::Object* object) const { 240 DCHECK(object != nullptr); 241 DCHECK(IsImageOffsetAssigned(object)); 242 LockWord lock_word = object->GetLockWord(false); 243 size_t offset = lock_word.ForwardingAddress(); 244 DCHECK_LT(offset, image_end_); 245 return offset; 246 } 247 248 void ImageWriter::SetImageBinSlot(mirror::Object* object, BinSlot bin_slot) { 249 DCHECK(object != nullptr); 250 DCHECK(!IsImageOffsetAssigned(object)); 251 DCHECK(!IsImageBinSlotAssigned(object)); 252 253 // Before we stomp over the lock word, save the hash code for later. 254 Monitor::Deflate(Thread::Current(), object);; 255 LockWord lw(object->GetLockWord(false)); 256 switch (lw.GetState()) { 257 case LockWord::kFatLocked: { 258 LOG(FATAL) << "Fat locked object " << object << " found during object copy"; 259 break; 260 } 261 case LockWord::kThinLocked: { 262 LOG(FATAL) << "Thin locked object " << object << " found during object copy"; 263 break; 264 } 265 case LockWord::kUnlocked: 266 // No hash, don't need to save it. 267 break; 268 case LockWord::kHashCode: 269 saved_hashes_map_[bin_slot] = lw.GetHashCode(); 270 break; 271 default: 272 LOG(FATAL) << "Unreachable."; 273 break; 274 } 275 object->SetLockWord(LockWord::FromForwardingAddress(static_cast<uint32_t>(bin_slot)), 276 false); 277 DCHECK(IsImageBinSlotAssigned(object)); 278 } 279 280 void ImageWriter::AssignImageBinSlot(mirror::Object* object) { 281 DCHECK(object != nullptr); 282 size_t object_size; 283 if (object->IsArtMethod()) { 284 // Methods are sized based on the target pointer size. 285 object_size = mirror::ArtMethod::InstanceSize(target_ptr_size_); 286 } else { 287 object_size = object->SizeOf(); 288 } 289 290 // The magic happens here. We segregate objects into different bins based 291 // on how likely they are to get dirty at runtime. 292 // 293 // Likely-to-dirty objects get packed together into the same bin so that 294 // at runtime their page dirtiness ratio (how many dirty objects a page has) is 295 // maximized. 296 // 297 // This means more pages will stay either clean or shared dirty (with zygote) and 298 // the app will use less of its own (private) memory. 299 Bin bin = kBinRegular; 300 301 if (kBinObjects) { 302 // 303 // Changing the bin of an object is purely a memory-use tuning. 304 // It has no change on runtime correctness. 305 // 306 // Memory analysis has determined that the following types of objects get dirtied 307 // the most: 308 // 309 // * Class'es which are verified [their clinit runs only at runtime] 310 // - classes in general [because their static fields get overwritten] 311 // - initialized classes with all-final statics are unlikely to be ever dirty, 312 // so bin them separately 313 // * Art Methods that are: 314 // - native [their native entry point is not looked up until runtime] 315 // - have declaring classes that aren't initialized 316 // [their interpreter/quick entry points are trampolines until the class 317 // becomes initialized] 318 // 319 // We also assume the following objects get dirtied either never or extremely rarely: 320 // * Strings (they are immutable) 321 // * Art methods that aren't native and have initialized declared classes 322 // 323 // We assume that "regular" bin objects are highly unlikely to become dirtied, 324 // so packing them together will not result in a noticeably tighter dirty-to-clean ratio. 325 // 326 if (object->IsClass()) { 327 bin = kBinClassVerified; 328 mirror::Class* klass = object->AsClass(); 329 330 if (klass->GetStatus() == Class::kStatusInitialized) { 331 bin = kBinClassInitialized; 332 333 // If the class's static fields are all final, put it into a separate bin 334 // since it's very likely it will stay clean. 335 uint32_t num_static_fields = klass->NumStaticFields(); 336 if (num_static_fields == 0) { 337 bin = kBinClassInitializedFinalStatics; 338 } else { 339 // Maybe all the statics are final? 340 bool all_final = true; 341 for (uint32_t i = 0; i < num_static_fields; ++i) { 342 ArtField* field = klass->GetStaticField(i); 343 if (!field->IsFinal()) { 344 all_final = false; 345 break; 346 } 347 } 348 349 if (all_final) { 350 bin = kBinClassInitializedFinalStatics; 351 } 352 } 353 } 354 } else if (object->IsArtMethod<kVerifyNone>()) { 355 mirror::ArtMethod* art_method = down_cast<ArtMethod*>(object); 356 if (art_method->IsNative()) { 357 bin = kBinArtMethodNative; 358 } else { 359 mirror::Class* declaring_class = art_method->GetDeclaringClass(); 360 if (declaring_class->GetStatus() != Class::kStatusInitialized) { 361 bin = kBinArtMethodNotInitialized; 362 } else { 363 // This is highly unlikely to dirty since there's no entry points to mutate. 364 bin = kBinArtMethodsManagedInitialized; 365 } 366 } 367 } else if (object->GetClass<kVerifyNone>()->IsStringClass()) { 368 bin = kBinString; // Strings are almost always immutable (except for object header). 369 } // else bin = kBinRegular 370 } 371 372 size_t current_offset = bin_slot_sizes_[bin]; // How many bytes the current bin is at (aligned). 373 // Move the current bin size up to accomodate the object we just assigned a bin slot. 374 size_t offset_delta = RoundUp(object_size, kObjectAlignment); // 64-bit alignment 375 bin_slot_sizes_[bin] += offset_delta; 376 377 BinSlot new_bin_slot(bin, current_offset); 378 SetImageBinSlot(object, new_bin_slot); 379 380 ++bin_slot_count_[bin]; 381 382 DCHECK_LT(GetBinSizeSum(), image_->Size()); 383 384 // Grow the image closer to the end by the object we just assigned. 385 image_end_ += offset_delta; 386 DCHECK_LT(image_end_, image_->Size()); 387 } 388 389 bool ImageWriter::IsImageBinSlotAssigned(mirror::Object* object) const { 390 DCHECK(object != nullptr); 391 392 // We always stash the bin slot into a lockword, in the 'forwarding address' state. 393 // If it's in some other state, then we haven't yet assigned an image bin slot. 394 if (object->GetLockWord(false).GetState() != LockWord::kForwardingAddress) { 395 return false; 396 } else if (kIsDebugBuild) { 397 LockWord lock_word = object->GetLockWord(false); 398 size_t offset = lock_word.ForwardingAddress(); 399 BinSlot bin_slot(offset); 400 DCHECK_LT(bin_slot.GetIndex(), bin_slot_sizes_[bin_slot.GetBin()]) 401 << "bin slot offset should not exceed the size of that bin"; 402 } 403 return true; 404 } 405 406 ImageWriter::BinSlot ImageWriter::GetImageBinSlot(mirror::Object* object) const { 407 DCHECK(object != nullptr); 408 DCHECK(IsImageBinSlotAssigned(object)); 409 410 LockWord lock_word = object->GetLockWord(false); 411 size_t offset = lock_word.ForwardingAddress(); // TODO: ForwardingAddress should be uint32_t 412 DCHECK_LE(offset, std::numeric_limits<uint32_t>::max()); 413 414 BinSlot bin_slot(static_cast<uint32_t>(offset)); 415 DCHECK_LT(bin_slot.GetIndex(), bin_slot_sizes_[bin_slot.GetBin()]); 416 417 return bin_slot; 418 } 419 420 bool ImageWriter::AllocMemory() { 421 size_t length = RoundUp(Runtime::Current()->GetHeap()->GetTotalMemory(), kPageSize); 422 std::string error_msg; 423 image_.reset(MemMap::MapAnonymous("image writer image", NULL, length, PROT_READ | PROT_WRITE, 424 true, &error_msg)); 425 if (UNLIKELY(image_.get() == nullptr)) { 426 LOG(ERROR) << "Failed to allocate memory for image file generation: " << error_msg; 427 return false; 428 } 429 430 // Create the image bitmap. 431 image_bitmap_.reset(gc::accounting::ContinuousSpaceBitmap::Create("image bitmap", image_->Begin(), 432 length)); 433 if (image_bitmap_.get() == nullptr) { 434 LOG(ERROR) << "Failed to allocate memory for image bitmap"; 435 return false; 436 } 437 return true; 438 } 439 440 void ImageWriter::ComputeLazyFieldsForImageClasses() { 441 ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); 442 class_linker->VisitClassesWithoutClassesLock(ComputeLazyFieldsForClassesVisitor, NULL); 443 } 444 445 bool ImageWriter::ComputeLazyFieldsForClassesVisitor(Class* c, void* /*arg*/) { 446 Thread* self = Thread::Current(); 447 StackHandleScope<1> hs(self); 448 mirror::Class::ComputeName(hs.NewHandle(c)); 449 return true; 450 } 451 452 // Count the number of strings in the heap and put the result in arg as a size_t pointer. 453 static void CountStringsCallback(Object* obj, void* arg) 454 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 455 if (obj->GetClass()->IsStringClass()) { 456 ++*reinterpret_cast<size_t*>(arg); 457 } 458 } 459 460 // Collect all the java.lang.String in the heap and put them in the output strings_ array. 461 class StringCollector { 462 public: 463 StringCollector(Handle<mirror::ObjectArray<mirror::String>> strings, size_t index) 464 : strings_(strings), index_(index) { 465 } 466 static void Callback(Object* obj, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 467 auto* collector = reinterpret_cast<StringCollector*>(arg); 468 if (obj->GetClass()->IsStringClass()) { 469 collector->strings_->SetWithoutChecks<false>(collector->index_++, obj->AsString()); 470 } 471 } 472 size_t GetIndex() const { 473 return index_; 474 } 475 476 private: 477 Handle<mirror::ObjectArray<mirror::String>> strings_; 478 size_t index_; 479 }; 480 481 // Compare strings based on length, used for sorting strings by length / reverse length. 482 class StringLengthComparator { 483 public: 484 explicit StringLengthComparator(Handle<mirror::ObjectArray<mirror::String>> strings) 485 : strings_(strings) { 486 } 487 bool operator()(size_t a, size_t b) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 488 return strings_->GetWithoutChecks(a)->GetLength() < strings_->GetWithoutChecks(b)->GetLength(); 489 } 490 491 private: 492 Handle<mirror::ObjectArray<mirror::String>> strings_; 493 }; 494 495 // Normal string < comparison through the chars_ array. 496 class SubstringComparator { 497 public: 498 explicit SubstringComparator(const std::vector<uint16_t>* const chars) : chars_(chars) { 499 } 500 bool operator()(const std::pair<size_t, size_t>& a, const std::pair<size_t, size_t>& b) { 501 return std::lexicographical_compare(chars_->begin() + a.first, 502 chars_->begin() + a.first + a.second, 503 chars_->begin() + b.first, 504 chars_->begin() + b.first + b.second); 505 } 506 507 private: 508 const std::vector<uint16_t>* const chars_; 509 }; 510 511 void ImageWriter::ProcessStrings() { 512 size_t total_strings = 0; 513 gc::Heap* heap = Runtime::Current()->GetHeap(); 514 ClassLinker* cl = Runtime::Current()->GetClassLinker(); 515 { 516 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 517 heap->VisitObjects(CountStringsCallback, &total_strings); // Count the strings. 518 } 519 Thread* self = Thread::Current(); 520 StackHandleScope<1> hs(self); 521 auto strings = hs.NewHandle(cl->AllocStringArray(self, total_strings)); 522 StringCollector string_collector(strings, 0U); 523 { 524 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 525 // Read strings into the array. 526 heap->VisitObjects(StringCollector::Callback, &string_collector); 527 } 528 // Some strings could have gotten freed if AllocStringArray caused a GC. 529 CHECK_LE(string_collector.GetIndex(), total_strings); 530 total_strings = string_collector.GetIndex(); 531 size_t total_length = 0; 532 std::vector<size_t> reverse_sorted_strings; 533 for (size_t i = 0; i < total_strings; ++i) { 534 mirror::String* s = strings->GetWithoutChecks(i); 535 // Look up the string in the array. 536 total_length += s->GetLength(); 537 reverse_sorted_strings.push_back(i); 538 } 539 // Sort by reverse length. 540 StringLengthComparator comparator(strings); 541 std::sort(reverse_sorted_strings.rbegin(), reverse_sorted_strings.rend(), comparator); 542 // Deduplicate prefixes and add strings to the char array. 543 std::vector<uint16_t> combined_chars(total_length, 0U); 544 size_t num_chars = 0; 545 // Characters of strings which are non equal prefix of another string (not the same string). 546 // We don't count the savings from equal strings since these would get interned later anyways. 547 size_t prefix_saved_chars = 0; 548 std::set<std::pair<size_t, size_t>, SubstringComparator> existing_strings(( 549 SubstringComparator(&combined_chars))); 550 for (size_t i = 0; i < total_strings; ++i) { 551 mirror::String* s = strings->GetWithoutChecks(reverse_sorted_strings[i]); 552 // Add the string to the end of the char array. 553 size_t length = s->GetLength(); 554 for (size_t j = 0; j < length; ++j) { 555 combined_chars[num_chars++] = s->CharAt(j); 556 } 557 // Try to see if the string exists as a prefix of an existing string. 558 size_t new_offset = 0; 559 std::pair<size_t, size_t> new_string(num_chars - length, length); 560 auto it = existing_strings.lower_bound(new_string); 561 bool is_prefix = true; 562 if (it == existing_strings.end()) { 563 is_prefix = false; 564 } else { 565 CHECK_LE(length, it->second); 566 for (size_t j = 0; j < length; ++j) { 567 if (combined_chars[it->first + j] != s->CharAt(j)) { 568 is_prefix = false; 569 break; 570 } 571 } 572 } 573 if (is_prefix) { 574 // Shares a prefix, set the offset to where the new offset will be. 575 new_offset = it->first; 576 // Remove the added chars. 577 num_chars -= length; 578 if (it->second != length) { 579 prefix_saved_chars += length; 580 } 581 } else { 582 new_offset = new_string.first; 583 existing_strings.insert(new_string); 584 } 585 s->SetOffset(new_offset); 586 } 587 // Allocate and update the char arrays. 588 auto* array = mirror::CharArray::Alloc(self, num_chars); 589 for (size_t i = 0; i < num_chars; ++i) { 590 array->SetWithoutChecks<false>(i, combined_chars[i]); 591 } 592 for (size_t i = 0; i < total_strings; ++i) { 593 strings->GetWithoutChecks(i)->SetArray(array); 594 } 595 if (kIsDebugBuild || VLOG_IS_ON(compiler)) { 596 LOG(INFO) << "Total # image strings=" << total_strings << " combined length=" 597 << total_length << " prefix saved chars=" << prefix_saved_chars; 598 } 599 ComputeEagerResolvedStrings(); 600 } 601 602 void ImageWriter::ComputeEagerResolvedStringsCallback(Object* obj, void* arg) { 603 if (!obj->GetClass()->IsStringClass()) { 604 return; 605 } 606 mirror::String* string = obj->AsString(); 607 const uint16_t* utf16_string = string->GetCharArray()->GetData() + string->GetOffset(); 608 ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); 609 ReaderMutexLock mu(Thread::Current(), *class_linker->DexLock()); 610 size_t dex_cache_count = class_linker->GetDexCacheCount(); 611 for (size_t i = 0; i < dex_cache_count; ++i) { 612 DexCache* dex_cache = class_linker->GetDexCache(i); 613 const DexFile& dex_file = *dex_cache->GetDexFile(); 614 const DexFile::StringId* string_id; 615 if (UNLIKELY(string->GetLength() == 0)) { 616 string_id = dex_file.FindStringId(""); 617 } else { 618 string_id = dex_file.FindStringId(utf16_string); 619 } 620 if (string_id != nullptr) { 621 // This string occurs in this dex file, assign the dex cache entry. 622 uint32_t string_idx = dex_file.GetIndexForStringId(*string_id); 623 if (dex_cache->GetResolvedString(string_idx) == NULL) { 624 dex_cache->SetResolvedString(string_idx, string); 625 } 626 } 627 } 628 } 629 630 void ImageWriter::ComputeEagerResolvedStrings() { 631 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 632 Runtime::Current()->GetHeap()->VisitObjects(ComputeEagerResolvedStringsCallback, this); 633 } 634 635 bool ImageWriter::IsImageClass(Class* klass) { 636 std::string temp; 637 return compiler_driver_.IsImageClass(klass->GetDescriptor(&temp)); 638 } 639 640 struct NonImageClasses { 641 ImageWriter* image_writer; 642 std::set<std::string>* non_image_classes; 643 }; 644 645 void ImageWriter::PruneNonImageClasses() { 646 if (compiler_driver_.GetImageClasses() == NULL) { 647 return; 648 } 649 Runtime* runtime = Runtime::Current(); 650 ClassLinker* class_linker = runtime->GetClassLinker(); 651 652 // Make a list of classes we would like to prune. 653 std::set<std::string> non_image_classes; 654 NonImageClasses context; 655 context.image_writer = this; 656 context.non_image_classes = &non_image_classes; 657 class_linker->VisitClasses(NonImageClassesVisitor, &context); 658 659 // Remove the undesired classes from the class roots. 660 for (const std::string& it : non_image_classes) { 661 bool result = class_linker->RemoveClass(it.c_str(), NULL); 662 DCHECK(result); 663 } 664 665 // Clear references to removed classes from the DexCaches. 666 ArtMethod* resolution_method = runtime->GetResolutionMethod(); 667 ReaderMutexLock mu(Thread::Current(), *class_linker->DexLock()); 668 size_t dex_cache_count = class_linker->GetDexCacheCount(); 669 for (size_t idx = 0; idx < dex_cache_count; ++idx) { 670 DexCache* dex_cache = class_linker->GetDexCache(idx); 671 for (size_t i = 0; i < dex_cache->NumResolvedTypes(); i++) { 672 Class* klass = dex_cache->GetResolvedType(i); 673 if (klass != NULL && !IsImageClass(klass)) { 674 dex_cache->SetResolvedType(i, NULL); 675 } 676 } 677 for (size_t i = 0; i < dex_cache->NumResolvedMethods(); i++) { 678 ArtMethod* method = dex_cache->GetResolvedMethod(i); 679 if (method != NULL && !IsImageClass(method->GetDeclaringClass())) { 680 dex_cache->SetResolvedMethod(i, resolution_method); 681 } 682 } 683 for (size_t i = 0; i < dex_cache->NumResolvedFields(); i++) { 684 ArtField* field = dex_cache->GetResolvedField(i); 685 if (field != NULL && !IsImageClass(field->GetDeclaringClass())) { 686 dex_cache->SetResolvedField(i, NULL); 687 } 688 } 689 } 690 } 691 692 bool ImageWriter::NonImageClassesVisitor(Class* klass, void* arg) { 693 NonImageClasses* context = reinterpret_cast<NonImageClasses*>(arg); 694 if (!context->image_writer->IsImageClass(klass)) { 695 std::string temp; 696 context->non_image_classes->insert(klass->GetDescriptor(&temp)); 697 } 698 return true; 699 } 700 701 void ImageWriter::CheckNonImageClassesRemoved() { 702 if (compiler_driver_.GetImageClasses() != nullptr) { 703 gc::Heap* heap = Runtime::Current()->GetHeap(); 704 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 705 heap->VisitObjects(CheckNonImageClassesRemovedCallback, this); 706 } 707 } 708 709 void ImageWriter::CheckNonImageClassesRemovedCallback(Object* obj, void* arg) { 710 ImageWriter* image_writer = reinterpret_cast<ImageWriter*>(arg); 711 if (obj->IsClass()) { 712 Class* klass = obj->AsClass(); 713 if (!image_writer->IsImageClass(klass)) { 714 image_writer->DumpImageClasses(); 715 std::string temp; 716 CHECK(image_writer->IsImageClass(klass)) << klass->GetDescriptor(&temp) 717 << " " << PrettyDescriptor(klass); 718 } 719 } 720 } 721 722 void ImageWriter::DumpImageClasses() { 723 const std::set<std::string>* image_classes = compiler_driver_.GetImageClasses(); 724 CHECK(image_classes != NULL); 725 for (const std::string& image_class : *image_classes) { 726 LOG(INFO) << " " << image_class; 727 } 728 } 729 730 void ImageWriter::CalculateObjectBinSlots(Object* obj) { 731 DCHECK(obj != NULL); 732 // if it is a string, we want to intern it if its not interned. 733 if (obj->GetClass()->IsStringClass()) { 734 // we must be an interned string that was forward referenced and already assigned 735 if (IsImageBinSlotAssigned(obj)) { 736 DCHECK_EQ(obj, obj->AsString()->Intern()); 737 return; 738 } 739 mirror::String* const interned = obj->AsString()->Intern(); 740 if (obj != interned) { 741 if (!IsImageBinSlotAssigned(interned)) { 742 // interned obj is after us, allocate its location early 743 AssignImageBinSlot(interned); 744 } 745 // point those looking for this object to the interned version. 746 SetImageBinSlot(obj, GetImageBinSlot(interned)); 747 return; 748 } 749 // else (obj == interned), nothing to do but fall through to the normal case 750 } 751 752 AssignImageBinSlot(obj); 753 } 754 755 ObjectArray<Object>* ImageWriter::CreateImageRoots() const { 756 Runtime* runtime = Runtime::Current(); 757 ClassLinker* class_linker = runtime->GetClassLinker(); 758 Thread* self = Thread::Current(); 759 StackHandleScope<3> hs(self); 760 Handle<Class> object_array_class(hs.NewHandle( 761 class_linker->FindSystemClass(self, "[Ljava/lang/Object;"))); 762 763 // build an Object[] of all the DexCaches used in the source_space_. 764 // Since we can't hold the dex lock when allocating the dex_caches 765 // ObjectArray, we lock the dex lock twice, first to get the number 766 // of dex caches first and then lock it again to copy the dex 767 // caches. We check that the number of dex caches does not change. 768 size_t dex_cache_count; 769 { 770 ReaderMutexLock mu(Thread::Current(), *class_linker->DexLock()); 771 dex_cache_count = class_linker->GetDexCacheCount(); 772 } 773 Handle<ObjectArray<Object>> dex_caches( 774 hs.NewHandle(ObjectArray<Object>::Alloc(self, object_array_class.Get(), 775 dex_cache_count))); 776 CHECK(dex_caches.Get() != nullptr) << "Failed to allocate a dex cache array."; 777 { 778 ReaderMutexLock mu(Thread::Current(), *class_linker->DexLock()); 779 CHECK_EQ(dex_cache_count, class_linker->GetDexCacheCount()) 780 << "The number of dex caches changed."; 781 for (size_t i = 0; i < dex_cache_count; ++i) { 782 dex_caches->Set<false>(i, class_linker->GetDexCache(i)); 783 } 784 } 785 786 // build an Object[] of the roots needed to restore the runtime 787 Handle<ObjectArray<Object>> image_roots(hs.NewHandle( 788 ObjectArray<Object>::Alloc(self, object_array_class.Get(), ImageHeader::kImageRootsMax))); 789 image_roots->Set<false>(ImageHeader::kResolutionMethod, runtime->GetResolutionMethod()); 790 image_roots->Set<false>(ImageHeader::kImtConflictMethod, runtime->GetImtConflictMethod()); 791 image_roots->Set<false>(ImageHeader::kImtUnimplementedMethod, 792 runtime->GetImtUnimplementedMethod()); 793 image_roots->Set<false>(ImageHeader::kDefaultImt, runtime->GetDefaultImt()); 794 image_roots->Set<false>(ImageHeader::kCalleeSaveMethod, 795 runtime->GetCalleeSaveMethod(Runtime::kSaveAll)); 796 image_roots->Set<false>(ImageHeader::kRefsOnlySaveMethod, 797 runtime->GetCalleeSaveMethod(Runtime::kRefsOnly)); 798 image_roots->Set<false>(ImageHeader::kRefsAndArgsSaveMethod, 799 runtime->GetCalleeSaveMethod(Runtime::kRefsAndArgs)); 800 image_roots->Set<false>(ImageHeader::kDexCaches, dex_caches.Get()); 801 image_roots->Set<false>(ImageHeader::kClassRoots, class_linker->GetClassRoots()); 802 for (int i = 0; i < ImageHeader::kImageRootsMax; i++) { 803 CHECK(image_roots->Get(i) != NULL); 804 } 805 return image_roots.Get(); 806 } 807 808 // Walk instance fields of the given Class. Separate function to allow recursion on the super 809 // class. 810 void ImageWriter::WalkInstanceFields(mirror::Object* obj, mirror::Class* klass) { 811 // Visit fields of parent classes first. 812 StackHandleScope<1> hs(Thread::Current()); 813 Handle<mirror::Class> h_class(hs.NewHandle(klass)); 814 mirror::Class* super = h_class->GetSuperClass(); 815 if (super != nullptr) { 816 WalkInstanceFields(obj, super); 817 } 818 // 819 size_t num_reference_fields = h_class->NumReferenceInstanceFields(); 820 MemberOffset field_offset = h_class->GetFirstReferenceInstanceFieldOffset(); 821 for (size_t i = 0; i < num_reference_fields; ++i) { 822 mirror::Object* value = obj->GetFieldObject<mirror::Object>(field_offset); 823 if (value != nullptr) { 824 WalkFieldsInOrder(value); 825 } 826 field_offset = MemberOffset(field_offset.Uint32Value() + 827 sizeof(mirror::HeapReference<mirror::Object>)); 828 } 829 } 830 831 // For an unvisited object, visit it then all its children found via fields. 832 void ImageWriter::WalkFieldsInOrder(mirror::Object* obj) { 833 // Use our own visitor routine (instead of GC visitor) to get better locality between 834 // an object and its fields 835 if (!IsImageBinSlotAssigned(obj)) { 836 // Walk instance fields of all objects 837 StackHandleScope<2> hs(Thread::Current()); 838 Handle<mirror::Object> h_obj(hs.NewHandle(obj)); 839 Handle<mirror::Class> klass(hs.NewHandle(obj->GetClass())); 840 // visit the object itself. 841 CalculateObjectBinSlots(h_obj.Get()); 842 WalkInstanceFields(h_obj.Get(), klass.Get()); 843 // Walk static fields of a Class. 844 if (h_obj->IsClass()) { 845 size_t num_static_fields = klass->NumReferenceStaticFields(); 846 MemberOffset field_offset = klass->GetFirstReferenceStaticFieldOffset(); 847 for (size_t i = 0; i < num_static_fields; ++i) { 848 mirror::Object* value = h_obj->GetFieldObject<mirror::Object>(field_offset); 849 if (value != nullptr) { 850 WalkFieldsInOrder(value); 851 } 852 field_offset = MemberOffset(field_offset.Uint32Value() + 853 sizeof(mirror::HeapReference<mirror::Object>)); 854 } 855 } else if (h_obj->IsObjectArray()) { 856 // Walk elements of an object array. 857 int32_t length = h_obj->AsObjectArray<mirror::Object>()->GetLength(); 858 for (int32_t i = 0; i < length; i++) { 859 mirror::ObjectArray<mirror::Object>* obj_array = h_obj->AsObjectArray<mirror::Object>(); 860 mirror::Object* value = obj_array->Get(i); 861 if (value != nullptr) { 862 WalkFieldsInOrder(value); 863 } 864 } 865 } 866 } 867 } 868 869 void ImageWriter::WalkFieldsCallback(mirror::Object* obj, void* arg) { 870 ImageWriter* writer = reinterpret_cast<ImageWriter*>(arg); 871 DCHECK(writer != nullptr); 872 writer->WalkFieldsInOrder(obj); 873 } 874 875 void ImageWriter::UnbinObjectsIntoOffsetCallback(mirror::Object* obj, void* arg) { 876 ImageWriter* writer = reinterpret_cast<ImageWriter*>(arg); 877 DCHECK(writer != nullptr); 878 writer->UnbinObjectsIntoOffset(obj); 879 } 880 881 void ImageWriter::UnbinObjectsIntoOffset(mirror::Object* obj) { 882 CHECK(obj != nullptr); 883 884 // We know the bin slot, and the total bin sizes for all objects by now, 885 // so calculate the object's final image offset. 886 887 DCHECK(IsImageBinSlotAssigned(obj)); 888 BinSlot bin_slot = GetImageBinSlot(obj); 889 // Change the lockword from a bin slot into an offset 890 AssignImageOffset(obj, bin_slot); 891 } 892 893 void ImageWriter::CalculateNewObjectOffsets(size_t oat_loaded_size, size_t oat_data_offset) { 894 CHECK_NE(0U, oat_loaded_size); 895 Thread* self = Thread::Current(); 896 StackHandleScope<1> hs(self); 897 Handle<ObjectArray<Object>> image_roots(hs.NewHandle(CreateImageRoots())); 898 899 gc::Heap* heap = Runtime::Current()->GetHeap(); 900 DCHECK_EQ(0U, image_end_); 901 902 // Leave space for the header, but do not write it yet, we need to 903 // know where image_roots is going to end up 904 image_end_ += RoundUp(sizeof(ImageHeader), kObjectAlignment); // 64-bit-alignment 905 906 { 907 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 908 // TODO: Image spaces only? 909 const char* old = self->StartAssertNoThreadSuspension("ImageWriter"); 910 DCHECK_LT(image_end_, image_->Size()); 911 image_objects_offset_begin_ = image_end_; 912 // Clear any pre-existing monitors which may have been in the monitor words, assign bin slots. 913 heap->VisitObjects(WalkFieldsCallback, this); 914 // Transform each object's bin slot into an offset which will be used to do the final copy. 915 heap->VisitObjects(UnbinObjectsIntoOffsetCallback, this); 916 DCHECK(saved_hashes_map_.empty()); // All binslot hashes should've been put into vector by now. 917 self->EndAssertNoThreadSuspension(old); 918 } 919 920 DCHECK_GT(image_end_, GetBinSizeSum()); 921 922 if (kIsDebugBuild) { 923 LOG(INFO) << "Bin summary (total size: " << GetBinSizeSum() << "): "; 924 for (size_t bin = 0; bin < kBinSize; ++bin) { 925 LOG(INFO) << " bin# " << bin << ", number objects: " << bin_slot_count_[bin] << ", " 926 << " total byte size: " << bin_slot_sizes_[bin]; 927 } 928 } 929 930 const byte* oat_file_begin = image_begin_ + RoundUp(image_end_, kPageSize); 931 const byte* oat_file_end = oat_file_begin + oat_loaded_size; 932 oat_data_begin_ = oat_file_begin + oat_data_offset; 933 const byte* oat_data_end = oat_data_begin_ + oat_file_->Size(); 934 935 // Return to write header at start of image with future location of image_roots. At this point, 936 // image_end_ is the size of the image (excluding bitmaps). 937 const size_t heap_bytes_per_bitmap_byte = kBitsPerByte * kObjectAlignment; 938 const size_t bitmap_bytes = RoundUp(image_end_, heap_bytes_per_bitmap_byte) / 939 heap_bytes_per_bitmap_byte; 940 ImageHeader image_header(PointerToLowMemUInt32(image_begin_), 941 static_cast<uint32_t>(image_end_), 942 RoundUp(image_end_, kPageSize), 943 RoundUp(bitmap_bytes, kPageSize), 944 PointerToLowMemUInt32(GetImageAddress(image_roots.Get())), 945 oat_file_->GetOatHeader().GetChecksum(), 946 PointerToLowMemUInt32(oat_file_begin), 947 PointerToLowMemUInt32(oat_data_begin_), 948 PointerToLowMemUInt32(oat_data_end), 949 PointerToLowMemUInt32(oat_file_end), 950 compile_pic_); 951 memcpy(image_->Begin(), &image_header, sizeof(image_header)); 952 953 // Note that image_end_ is left at end of used space 954 } 955 956 void ImageWriter::CopyAndFixupObjects() { 957 Thread* self = Thread::Current(); 958 const char* old_cause = self->StartAssertNoThreadSuspension("ImageWriter"); 959 gc::Heap* heap = Runtime::Current()->GetHeap(); 960 // TODO: heap validation can't handle this fix up pass 961 heap->DisableObjectValidation(); 962 // TODO: Image spaces only? 963 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 964 heap->VisitObjects(CopyAndFixupObjectsCallback, this); 965 // Fix up the object previously had hash codes. 966 for (const std::pair<mirror::Object*, uint32_t>& hash_pair : saved_hashes_) { 967 hash_pair.first->SetLockWord(LockWord::FromHashCode(hash_pair.second), false); 968 } 969 saved_hashes_.clear(); 970 self->EndAssertNoThreadSuspension(old_cause); 971 } 972 973 void ImageWriter::CopyAndFixupObjectsCallback(Object* obj, void* arg) { 974 DCHECK(obj != nullptr); 975 DCHECK(arg != nullptr); 976 ImageWriter* image_writer = reinterpret_cast<ImageWriter*>(arg); 977 // see GetLocalAddress for similar computation 978 size_t offset = image_writer->GetImageOffset(obj); 979 byte* dst = image_writer->image_->Begin() + offset; 980 const byte* src = reinterpret_cast<const byte*>(obj); 981 size_t n; 982 if (obj->IsArtMethod()) { 983 // Size without pointer fields since we don't want to overrun the buffer if target art method 984 // is 32 bits but source is 64 bits. 985 n = mirror::ArtMethod::SizeWithoutPointerFields(sizeof(void*)); 986 } else { 987 n = obj->SizeOf(); 988 } 989 DCHECK_LT(offset + n, image_writer->image_->Size()); 990 memcpy(dst, src, n); 991 Object* copy = reinterpret_cast<Object*>(dst); 992 // Write in a hash code of objects which have inflated monitors or a hash code in their monitor 993 // word. 994 copy->SetLockWord(LockWord(), false); 995 image_writer->FixupObject(obj, copy); 996 } 997 998 // Rewrite all the references in the copied object to point to their image address equivalent 999 class FixupVisitor { 1000 public: 1001 FixupVisitor(ImageWriter* image_writer, Object* copy) : image_writer_(image_writer), copy_(copy) { 1002 } 1003 1004 void operator()(Object* obj, MemberOffset offset, bool /*is_static*/) const 1005 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { 1006 Object* ref = obj->GetFieldObject<Object, kVerifyNone>(offset); 1007 // Use SetFieldObjectWithoutWriteBarrier to avoid card marking since we are writing to the 1008 // image. 1009 copy_->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>( 1010 offset, image_writer_->GetImageAddress(ref)); 1011 } 1012 1013 // java.lang.ref.Reference visitor. 1014 void operator()(mirror::Class* /*klass*/, mirror::Reference* ref) const 1015 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 1016 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 1017 copy_->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>( 1018 mirror::Reference::ReferentOffset(), image_writer_->GetImageAddress(ref->GetReferent())); 1019 } 1020 1021 protected: 1022 ImageWriter* const image_writer_; 1023 mirror::Object* const copy_; 1024 }; 1025 1026 class FixupClassVisitor FINAL : public FixupVisitor { 1027 public: 1028 FixupClassVisitor(ImageWriter* image_writer, Object* copy) : FixupVisitor(image_writer, copy) { 1029 } 1030 1031 void operator()(Object* obj, MemberOffset offset, bool /*is_static*/) const 1032 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { 1033 DCHECK(obj->IsClass()); 1034 FixupVisitor::operator()(obj, offset, /*is_static*/false); 1035 1036 // TODO: Remove dead code 1037 if (offset.Uint32Value() < mirror::Class::EmbeddedVTableOffset().Uint32Value()) { 1038 return; 1039 } 1040 } 1041 1042 void operator()(mirror::Class* /*klass*/, mirror::Reference* ref) const 1043 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 1044 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 1045 LOG(FATAL) << "Reference not expected here."; 1046 } 1047 }; 1048 1049 void ImageWriter::FixupObject(Object* orig, Object* copy) { 1050 DCHECK(orig != nullptr); 1051 DCHECK(copy != nullptr); 1052 if (kUseBakerOrBrooksReadBarrier) { 1053 orig->AssertReadBarrierPointer(); 1054 if (kUseBrooksReadBarrier) { 1055 // Note the address 'copy' isn't the same as the image address of 'orig'. 1056 copy->SetReadBarrierPointer(GetImageAddress(orig)); 1057 DCHECK_EQ(copy->GetReadBarrierPointer(), GetImageAddress(orig)); 1058 } 1059 } 1060 if (orig->IsClass() && orig->AsClass()->ShouldHaveEmbeddedImtAndVTable()) { 1061 FixupClassVisitor visitor(this, copy); 1062 orig->VisitReferences<true /*visit class*/>(visitor, visitor); 1063 } else { 1064 FixupVisitor visitor(this, copy); 1065 orig->VisitReferences<true /*visit class*/>(visitor, visitor); 1066 } 1067 if (orig->IsArtMethod<kVerifyNone>()) { 1068 FixupMethod(orig->AsArtMethod<kVerifyNone>(), down_cast<ArtMethod*>(copy)); 1069 } else if (orig->IsClass() && orig->AsClass()->IsArtMethodClass()) { 1070 // Set the right size for the target. 1071 size_t size = mirror::ArtMethod::InstanceSize(target_ptr_size_); 1072 down_cast<mirror::Class*>(copy)->SetObjectSizeWithoutChecks(size); 1073 } 1074 } 1075 1076 const byte* ImageWriter::GetQuickCode(mirror::ArtMethod* method, bool* quick_is_interpreted) { 1077 DCHECK(!method->IsResolutionMethod() && !method->IsImtConflictMethod() && 1078 !method->IsImtUnimplementedMethod() && !method->IsAbstract()) << PrettyMethod(method); 1079 1080 // Use original code if it exists. Otherwise, set the code pointer to the resolution 1081 // trampoline. 1082 1083 // Quick entrypoint: 1084 const byte* quick_code = GetOatAddress(method->GetQuickOatCodeOffset()); 1085 *quick_is_interpreted = false; 1086 if (quick_code != nullptr && 1087 (!method->IsStatic() || method->IsConstructor() || method->GetDeclaringClass()->IsInitialized())) { 1088 // We have code for a non-static or initialized method, just use the code. 1089 } else if (quick_code == nullptr && method->IsNative() && 1090 (!method->IsStatic() || method->GetDeclaringClass()->IsInitialized())) { 1091 // Non-static or initialized native method missing compiled code, use generic JNI version. 1092 quick_code = GetOatAddress(quick_generic_jni_trampoline_offset_); 1093 } else if (quick_code == nullptr && !method->IsNative()) { 1094 // We don't have code at all for a non-native method, use the interpreter. 1095 quick_code = GetOatAddress(quick_to_interpreter_bridge_offset_); 1096 *quick_is_interpreted = true; 1097 } else { 1098 CHECK(!method->GetDeclaringClass()->IsInitialized()); 1099 // We have code for a static method, but need to go through the resolution stub for class 1100 // initialization. 1101 quick_code = GetOatAddress(quick_resolution_trampoline_offset_); 1102 } 1103 return quick_code; 1104 } 1105 1106 const byte* ImageWriter::GetQuickEntryPoint(mirror::ArtMethod* method) { 1107 // Calculate the quick entry point following the same logic as FixupMethod() below. 1108 // The resolution method has a special trampoline to call. 1109 Runtime* runtime = Runtime::Current(); 1110 if (UNLIKELY(method == runtime->GetResolutionMethod())) { 1111 return GetOatAddress(quick_resolution_trampoline_offset_); 1112 } else if (UNLIKELY(method == runtime->GetImtConflictMethod() || 1113 method == runtime->GetImtUnimplementedMethod())) { 1114 return GetOatAddress(quick_imt_conflict_trampoline_offset_); 1115 } else { 1116 // We assume all methods have code. If they don't currently then we set them to the use the 1117 // resolution trampoline. Abstract methods never have code and so we need to make sure their 1118 // use results in an AbstractMethodError. We use the interpreter to achieve this. 1119 if (UNLIKELY(method->IsAbstract())) { 1120 return GetOatAddress(quick_to_interpreter_bridge_offset_); 1121 } else { 1122 bool quick_is_interpreted; 1123 return GetQuickCode(method, &quick_is_interpreted); 1124 } 1125 } 1126 } 1127 1128 void ImageWriter::FixupMethod(ArtMethod* orig, ArtMethod* copy) { 1129 // OatWriter replaces the code_ with an offset value. Here we re-adjust to a pointer relative to 1130 // oat_begin_ 1131 // For 64 bit targets we need to repack the current runtime pointer sized fields to the right 1132 // locations. 1133 // Copy all of the fields from the runtime methods to the target methods first since we did a 1134 // bytewise copy earlier. 1135 #if defined(ART_USE_PORTABLE_COMPILER) 1136 copy->SetEntryPointFromPortableCompiledCodePtrSize<kVerifyNone>( 1137 orig->GetEntryPointFromPortableCompiledCode(), target_ptr_size_); 1138 #endif 1139 copy->SetEntryPointFromInterpreterPtrSize<kVerifyNone>(orig->GetEntryPointFromInterpreter(), 1140 target_ptr_size_); 1141 copy->SetEntryPointFromJniPtrSize<kVerifyNone>(orig->GetEntryPointFromJni(), target_ptr_size_); 1142 copy->SetEntryPointFromQuickCompiledCodePtrSize<kVerifyNone>( 1143 orig->GetEntryPointFromQuickCompiledCode(), target_ptr_size_); 1144 1145 // The resolution method has a special trampoline to call. 1146 Runtime* runtime = Runtime::Current(); 1147 if (UNLIKELY(orig == runtime->GetResolutionMethod())) { 1148 #if defined(ART_USE_PORTABLE_COMPILER) 1149 copy->SetEntryPointFromPortableCompiledCodePtrSize<kVerifyNone>( 1150 GetOatAddress(portable_resolution_trampoline_offset_), target_ptr_size_); 1151 #endif 1152 copy->SetEntryPointFromQuickCompiledCodePtrSize<kVerifyNone>( 1153 GetOatAddress(quick_resolution_trampoline_offset_), target_ptr_size_); 1154 } else if (UNLIKELY(orig == runtime->GetImtConflictMethod() || 1155 orig == runtime->GetImtUnimplementedMethod())) { 1156 #if defined(ART_USE_PORTABLE_COMPILER) 1157 copy->SetEntryPointFromPortableCompiledCode<kVerifyNone>( 1158 GetOatAddress(portable_imt_conflict_trampoline_offset_), target_ptr_size_); 1159 #endif 1160 copy->SetEntryPointFromQuickCompiledCodePtrSize<kVerifyNone>( 1161 GetOatAddress(quick_imt_conflict_trampoline_offset_), target_ptr_size_); 1162 } else { 1163 // We assume all methods have code. If they don't currently then we set them to the use the 1164 // resolution trampoline. Abstract methods never have code and so we need to make sure their 1165 // use results in an AbstractMethodError. We use the interpreter to achieve this. 1166 if (UNLIKELY(orig->IsAbstract())) { 1167 #if defined(ART_USE_PORTABLE_COMPILER) 1168 copy->SetEntryPointFromPortableCompiledCode<kVerifyNone>( 1169 GetOatAddress(portable_to_interpreter_bridge_offset_), target_ptr_size_); 1170 #endif 1171 copy->SetEntryPointFromQuickCompiledCodePtrSize<kVerifyNone>( 1172 GetOatAddress(quick_to_interpreter_bridge_offset_), target_ptr_size_); 1173 copy->SetEntryPointFromInterpreterPtrSize<kVerifyNone>( 1174 reinterpret_cast<EntryPointFromInterpreter*>(const_cast<byte*>( 1175 GetOatAddress(interpreter_to_interpreter_bridge_offset_))), target_ptr_size_); 1176 } else { 1177 bool quick_is_interpreted; 1178 const byte* quick_code = GetQuickCode(orig, &quick_is_interpreted); 1179 copy->SetEntryPointFromQuickCompiledCodePtrSize<kVerifyNone>(quick_code, target_ptr_size_); 1180 1181 // Portable entrypoint: 1182 bool portable_is_interpreted = false; 1183 #if defined(ART_USE_PORTABLE_COMPILER) 1184 const byte* portable_code = GetOatAddress(orig->GetPortableOatCodeOffset()); 1185 if (portable_code != nullptr && (!orig->IsStatic() || orig->IsConstructor() || 1186 orig->GetDeclaringClass()->IsInitialized())) { 1187 // We have code for a non-static or initialized method, just use the code. 1188 } else if (portable_code == nullptr && orig->IsNative() && 1189 (!orig->IsStatic() || orig->GetDeclaringClass()->IsInitialized())) { 1190 // Non-static or initialized native method missing compiled code, use generic JNI version. 1191 // TODO: generic JNI support for LLVM. 1192 portable_code = GetOatAddress(portable_resolution_trampoline_offset_); 1193 } else if (portable_code == nullptr && !orig->IsNative()) { 1194 // We don't have code at all for a non-native method, use the interpreter. 1195 portable_code = GetOatAddress(portable_to_interpreter_bridge_offset_); 1196 portable_is_interpreted = true; 1197 } else { 1198 CHECK(!orig->GetDeclaringClass()->IsInitialized()); 1199 // We have code for a static method, but need to go through the resolution stub for class 1200 // initialization. 1201 portable_code = GetOatAddress(portable_resolution_trampoline_offset_); 1202 } 1203 copy->SetEntryPointFromPortableCompiledCodePtrSize<kVerifyNone>( 1204 portable_code, target_ptr_size_); 1205 #endif 1206 // JNI entrypoint: 1207 if (orig->IsNative()) { 1208 // The native method's pointer is set to a stub to lookup via dlsym. 1209 // Note this is not the code_ pointer, that is handled above. 1210 copy->SetEntryPointFromJniPtrSize<kVerifyNone>(GetOatAddress(jni_dlsym_lookup_offset_), 1211 target_ptr_size_); 1212 } 1213 1214 // Interpreter entrypoint: 1215 // Set the interpreter entrypoint depending on whether there is compiled code or not. 1216 uint32_t interpreter_code = (quick_is_interpreted && portable_is_interpreted) 1217 ? interpreter_to_interpreter_bridge_offset_ 1218 : interpreter_to_compiled_code_bridge_offset_; 1219 EntryPointFromInterpreter* interpreter_entrypoint = 1220 reinterpret_cast<EntryPointFromInterpreter*>( 1221 const_cast<byte*>(GetOatAddress(interpreter_code))); 1222 copy->SetEntryPointFromInterpreterPtrSize<kVerifyNone>( 1223 interpreter_entrypoint, target_ptr_size_); 1224 } 1225 } 1226 } 1227 1228 static OatHeader* GetOatHeaderFromElf(ElfFile* elf) { 1229 Elf32_Shdr* data_sec = elf->FindSectionByName(".rodata"); 1230 if (data_sec == nullptr) { 1231 return nullptr; 1232 } 1233 return reinterpret_cast<OatHeader*>(elf->Begin() + data_sec->sh_offset); 1234 } 1235 1236 void ImageWriter::PatchOatCodeAndMethods(File* elf_file) { 1237 std::string error_msg; 1238 std::unique_ptr<ElfFile> elf(ElfFile::Open(elf_file, PROT_READ|PROT_WRITE, 1239 MAP_SHARED, &error_msg)); 1240 if (elf.get() == nullptr) { 1241 LOG(FATAL) << "Unable patch oat file: " << error_msg; 1242 return; 1243 } 1244 if (!ElfPatcher::Patch(&compiler_driver_, elf.get(), oat_file_, 1245 reinterpret_cast<uintptr_t>(oat_data_begin_), 1246 GetImageAddressCallback, reinterpret_cast<void*>(this), 1247 &error_msg)) { 1248 LOG(FATAL) << "unable to patch oat file: " << error_msg; 1249 return; 1250 } 1251 OatHeader* oat_header = GetOatHeaderFromElf(elf.get()); 1252 CHECK(oat_header != nullptr); 1253 CHECK(oat_header->IsValid()); 1254 1255 ImageHeader* image_header = reinterpret_cast<ImageHeader*>(image_->Begin()); 1256 image_header->SetOatChecksum(oat_header->GetChecksum()); 1257 } 1258 1259 size_t ImageWriter::GetBinSizeSum(ImageWriter::Bin up_to) const { 1260 DCHECK_LE(up_to, kBinSize); 1261 return std::accumulate(&bin_slot_sizes_[0], &bin_slot_sizes_[up_to], /*init*/0); 1262 } 1263 1264 ImageWriter::BinSlot::BinSlot(uint32_t lockword) : lockword_(lockword) { 1265 // These values may need to get updated if more bins are added to the enum Bin 1266 static_assert(kBinBits == 3, "wrong number of bin bits"); 1267 static_assert(kBinShift == 29, "wrong number of shift"); 1268 static_assert(sizeof(BinSlot) == sizeof(LockWord), "BinSlot/LockWord must have equal sizes"); 1269 1270 DCHECK_LT(GetBin(), kBinSize); 1271 DCHECK_ALIGNED(GetIndex(), kObjectAlignment); 1272 } 1273 1274 ImageWriter::BinSlot::BinSlot(Bin bin, uint32_t index) 1275 : BinSlot(index | (static_cast<uint32_t>(bin) << kBinShift)) { 1276 DCHECK_EQ(index, GetIndex()); 1277 } 1278 1279 ImageWriter::Bin ImageWriter::BinSlot::GetBin() const { 1280 return static_cast<Bin>((lockword_ & kBinMask) >> kBinShift); 1281 } 1282 1283 uint32_t ImageWriter::BinSlot::GetIndex() const { 1284 return lockword_ & ~kBinMask; 1285 } 1286 1287 } // namespace art 1288