1 /* 2 * Copyright (C) 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #include <stdio.h> 18 #include <stdlib.h> 19 20 #include <fstream> 21 #include <functional> 22 #include <iostream> 23 #include <map> 24 #include <optional> 25 #include <set> 26 #include <string> 27 #include <unordered_set> 28 #include <vector> 29 30 #include <android-base/parseint.h> 31 #include "android-base/stringprintf.h" 32 33 #include "art_field-inl.h" 34 #include "art_method-inl.h" 35 #include "base/array_ref.h" 36 #include "base/os.h" 37 #include "base/string_view_cpp20.h" 38 #include "base/unix_file/fd_file.h" 39 #include "class_linker.h" 40 #include "gc/heap.h" 41 #include "gc/space/image_space.h" 42 #include "image-inl.h" 43 #include "mirror/class-inl.h" 44 #include "mirror/object-inl.h" 45 #include "oat.h" 46 #include "oat_file.h" 47 #include "oat_file_manager.h" 48 #include "scoped_thread_state_change-inl.h" 49 50 #include "backtrace/BacktraceMap.h" 51 #include "cmdline.h" 52 53 #include <signal.h> 54 #include <sys/stat.h> 55 #include <sys/types.h> 56 57 namespace art { 58 59 using android::base::StringPrintf; 60 61 namespace { 62 63 constexpr size_t kMaxAddressPrint = 5; 64 65 enum class ProcessType { 66 kZygote, 67 kRemote 68 }; 69 70 enum class RemoteProcesses { 71 kImageOnly, 72 kZygoteOnly, 73 kImageAndZygote 74 }; 75 76 struct MappingData { 77 // The count of pages that are considered dirty by the OS. 78 size_t dirty_pages = 0; 79 // The count of pages that differ by at least one byte. 80 size_t different_pages = 0; 81 // The count of differing bytes. 82 size_t different_bytes = 0; 83 // The count of differing four-byte units. 84 size_t different_int32s = 0; 85 // The count of pages that have mapping count == 1. 86 size_t private_pages = 0; 87 // The count of private pages that are also dirty. 88 size_t private_dirty_pages = 0; 89 // The count of pages that are marked dirty but do not differ. 90 size_t false_dirty_pages = 0; 91 // Set of the local virtual page indices that are dirty. 92 std::set<size_t> dirty_page_set; 93 }; 94 95 static std::string GetClassDescriptor(mirror::Class* klass) 96 REQUIRES_SHARED(Locks::mutator_lock_) { 97 CHECK(klass != nullptr); 98 99 std::string descriptor; 100 const char* descriptor_str = klass->GetDescriptor(&descriptor /*out*/); 101 102 return std::string(descriptor_str); 103 } 104 105 static std::string PrettyFieldValue(ArtField* field, mirror::Object* object) 106 REQUIRES_SHARED(Locks::mutator_lock_) { 107 std::ostringstream oss; 108 switch (field->GetTypeAsPrimitiveType()) { 109 case Primitive::kPrimNot: { 110 oss << object->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>( 111 field->GetOffset()); 112 break; 113 } 114 case Primitive::kPrimBoolean: { 115 oss << static_cast<bool>(object->GetFieldBoolean<kVerifyNone>(field->GetOffset())); 116 break; 117 } 118 case Primitive::kPrimByte: { 119 oss << static_cast<int32_t>(object->GetFieldByte<kVerifyNone>(field->GetOffset())); 120 break; 121 } 122 case Primitive::kPrimChar: { 123 oss << object->GetFieldChar<kVerifyNone>(field->GetOffset()); 124 break; 125 } 126 case Primitive::kPrimShort: { 127 oss << object->GetFieldShort<kVerifyNone>(field->GetOffset()); 128 break; 129 } 130 case Primitive::kPrimInt: { 131 oss << object->GetField32<kVerifyNone>(field->GetOffset()); 132 break; 133 } 134 case Primitive::kPrimLong: { 135 oss << object->GetField64<kVerifyNone>(field->GetOffset()); 136 break; 137 } 138 case Primitive::kPrimFloat: { 139 oss << object->GetField32<kVerifyNone>(field->GetOffset()); 140 break; 141 } 142 case Primitive::kPrimDouble: { 143 oss << object->GetField64<kVerifyNone>(field->GetOffset()); 144 break; 145 } 146 case Primitive::kPrimVoid: { 147 oss << "void"; 148 break; 149 } 150 } 151 return oss.str(); 152 } 153 154 template <typename K, typename V, typename D> 155 static std::vector<std::pair<V, K>> SortByValueDesc( 156 const std::map<K, D> map, 157 std::function<V(const D&)> value_mapper = [](const D& d) { return static_cast<V>(d); }) { 158 // Store value->key so that we can use the default sort from pair which 159 // sorts by value first and then key 160 std::vector<std::pair<V, K>> value_key_vector; 161 162 for (const auto& kv_pair : map) { 163 value_key_vector.push_back(std::make_pair(value_mapper(kv_pair.second), kv_pair.first)); 164 } 165 166 // Sort in reverse (descending order) 167 std::sort(value_key_vector.rbegin(), value_key_vector.rend()); 168 return value_key_vector; 169 } 170 171 // Fixup a remote pointer that we read from a foreign boot.art to point to our own memory. 172 // Returned pointer will point to inside of remote_contents. 173 template <typename T> 174 static ObjPtr<T> FixUpRemotePointer(ObjPtr<T> remote_ptr, 175 ArrayRef<uint8_t> remote_contents, 176 const backtrace_map_t& boot_map) 177 REQUIRES_SHARED(Locks::mutator_lock_) { 178 if (remote_ptr == nullptr) { 179 return nullptr; 180 } 181 182 uintptr_t remote = reinterpret_cast<uintptr_t>(remote_ptr.Ptr()); 183 184 // In the case the remote pointer is out of range, it probably belongs to another image. 185 // Just return null for this case. 186 if (remote < boot_map.start || remote >= boot_map.end) { 187 return nullptr; 188 } 189 190 off_t boot_offset = remote - boot_map.start; 191 192 return reinterpret_cast<T*>(&remote_contents[boot_offset]); 193 } 194 195 template <typename T> 196 static ObjPtr<T> RemoteContentsPointerToLocal(ObjPtr<T> remote_ptr, 197 ArrayRef<uint8_t> remote_contents, 198 const ImageHeader& image_header) 199 REQUIRES_SHARED(Locks::mutator_lock_) { 200 if (remote_ptr == nullptr) { 201 return nullptr; 202 } 203 204 uint8_t* remote = reinterpret_cast<uint8_t*>(remote_ptr.Ptr()); 205 ptrdiff_t boot_offset = remote - &remote_contents[0]; 206 207 const uint8_t* local_ptr = reinterpret_cast<const uint8_t*>(&image_header) + boot_offset; 208 209 return reinterpret_cast<T*>(const_cast<uint8_t*>(local_ptr)); 210 } 211 212 template <typename T> size_t EntrySize(T* entry); 213 template<> size_t EntrySize(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_) { 214 return object->SizeOf(); 215 } 216 template<> size_t EntrySize(ArtMethod* art_method) REQUIRES_SHARED(Locks::mutator_lock_) { 217 return sizeof(*art_method); 218 } 219 220 // entry1 and entry2 might be relocated, this means we must use the runtime image's entry 221 // (image_entry) to avoid crashes. 222 template <typename T> 223 static bool EntriesDiffer(T* image_entry, 224 T* entry1, 225 T* entry2) REQUIRES_SHARED(Locks::mutator_lock_) { 226 // Use the image entry since entry1 and entry2 might both be remote and relocated. 227 return memcmp(entry1, entry2, EntrySize(image_entry)) != 0; 228 } 229 230 template <typename T> 231 struct RegionCommon { 232 public: 233 RegionCommon(std::ostream* os, 234 ArrayRef<uint8_t> remote_contents, 235 ArrayRef<uint8_t> zygote_contents, 236 const backtrace_map_t& boot_map, 237 const ImageHeader& image_header) : 238 os_(*os), 239 remote_contents_(remote_contents), 240 zygote_contents_(zygote_contents), 241 boot_map_(boot_map), 242 image_header_(image_header), 243 different_entries_(0), 244 dirty_entry_bytes_(0), 245 false_dirty_entry_bytes_(0) { 246 CHECK(!remote_contents.empty()); 247 } 248 249 void DumpSamplesAndOffsetCount() { 250 os_ << " sample object addresses: "; 251 for (size_t i = 0; i < dirty_entries_.size() && i < kMaxAddressPrint; ++i) { 252 T* entry = dirty_entries_[i]; 253 os_ << reinterpret_cast<void*>(entry) << ", "; 254 } 255 os_ << "\n"; 256 os_ << " dirty byte +offset:count list = "; 257 std::vector<std::pair<size_t, off_t>> field_dirty_count_sorted = 258 SortByValueDesc<off_t, size_t, size_t>(field_dirty_count_); 259 for (const std::pair<size_t, off_t>& pair : field_dirty_count_sorted) { 260 off_t offset = pair.second; 261 size_t count = pair.first; 262 os_ << "+" << offset << ":" << count << ", "; 263 } 264 os_ << "\n"; 265 } 266 267 size_t GetDifferentEntryCount() const { return different_entries_; } 268 size_t GetDirtyEntryBytes() const { return dirty_entry_bytes_; } 269 size_t GetFalseDirtyEntryCount() const { return false_dirty_entries_.size(); } 270 size_t GetFalseDirtyEntryBytes() const { return false_dirty_entry_bytes_; } 271 size_t GetZygoteDirtyEntryCount() const { return zygote_dirty_entries_.size(); } 272 273 protected: 274 bool IsEntryOnDirtyPage(T* entry, const std::set<size_t>& dirty_pages) const 275 REQUIRES_SHARED(Locks::mutator_lock_) { 276 size_t size = EntrySize(entry); 277 size_t page_off = 0; 278 size_t current_page_idx; 279 uintptr_t entry_address = reinterpret_cast<uintptr_t>(entry); 280 // Iterate every page this entry belongs to 281 do { 282 current_page_idx = entry_address / kPageSize + page_off; 283 if (dirty_pages.find(current_page_idx) != dirty_pages.end()) { 284 // This entry is on a dirty page 285 return true; 286 } 287 page_off++; 288 } while ((current_page_idx * kPageSize) < RoundUp(entry_address + size, kObjectAlignment)); 289 return false; 290 } 291 292 void AddZygoteDirtyEntry(T* entry) REQUIRES_SHARED(Locks::mutator_lock_) { 293 zygote_dirty_entries_.insert(entry); 294 } 295 296 void AddImageDirtyEntry(T* entry) REQUIRES_SHARED(Locks::mutator_lock_) { 297 image_dirty_entries_.insert(entry); 298 } 299 300 void AddFalseDirtyEntry(T* entry) REQUIRES_SHARED(Locks::mutator_lock_) { 301 false_dirty_entries_.push_back(entry); 302 false_dirty_entry_bytes_ += EntrySize(entry); 303 } 304 305 // The output stream to write to. 306 std::ostream& os_; 307 // The byte contents of the remote (image) process' image. 308 ArrayRef<uint8_t> remote_contents_; 309 // The byte contents of the zygote process' image. 310 ArrayRef<uint8_t> zygote_contents_; 311 const backtrace_map_t& boot_map_; 312 const ImageHeader& image_header_; 313 314 // Count of entries that are different. 315 size_t different_entries_; 316 317 // Local entries that are dirty (differ in at least one byte). 318 size_t dirty_entry_bytes_; 319 std::vector<T*> dirty_entries_; 320 321 // Local entries that are clean, but located on dirty pages. 322 size_t false_dirty_entry_bytes_; 323 std::vector<T*> false_dirty_entries_; 324 325 // Image dirty entries 326 // If zygote_pid_only_ == true, these are shared dirty entries in the zygote. 327 // If zygote_pid_only_ == false, these are private dirty entries in the application. 328 std::set<T*> image_dirty_entries_; 329 330 // Zygote dirty entries (probably private dirty). 331 // We only add entries here if they differed in both the image and the zygote, so 332 // they are probably private dirty. 333 std::set<T*> zygote_dirty_entries_; 334 335 std::map<off_t /* field offset */, size_t /* count */> field_dirty_count_; 336 337 private: 338 DISALLOW_COPY_AND_ASSIGN(RegionCommon); 339 }; 340 341 template <typename T> 342 class RegionSpecializedBase : public RegionCommon<T> { 343 }; 344 345 // Region analysis for mirror::Objects 346 class ImgObjectVisitor : public ObjectVisitor { 347 public: 348 using ComputeDirtyFunc = std::function<void(mirror::Object* object, 349 const uint8_t* begin_image_ptr, 350 const std::set<size_t>& dirty_pages)>; 351 ImgObjectVisitor(ComputeDirtyFunc dirty_func, 352 const uint8_t* begin_image_ptr, 353 const std::set<size_t>& dirty_pages) : 354 dirty_func_(std::move(dirty_func)), 355 begin_image_ptr_(begin_image_ptr), 356 dirty_pages_(dirty_pages) { } 357 358 ~ImgObjectVisitor() override { } 359 360 void Visit(mirror::Object* object) override REQUIRES_SHARED(Locks::mutator_lock_) { 361 // Sanity check that we are reading a real mirror::Object 362 CHECK(object->GetClass() != nullptr) << "Image object at address " 363 << object 364 << " has null class"; 365 if (kUseBakerReadBarrier) { 366 object->AssertReadBarrierState(); 367 } 368 dirty_func_(object, begin_image_ptr_, dirty_pages_); 369 } 370 371 private: 372 const ComputeDirtyFunc dirty_func_; 373 const uint8_t* begin_image_ptr_; 374 const std::set<size_t>& dirty_pages_; 375 }; 376 377 template<> 378 class RegionSpecializedBase<mirror::Object> : public RegionCommon<mirror::Object> { 379 public: 380 RegionSpecializedBase(std::ostream* os, 381 ArrayRef<uint8_t> remote_contents, 382 ArrayRef<uint8_t> zygote_contents, 383 const backtrace_map_t& boot_map, 384 const ImageHeader& image_header, 385 bool dump_dirty_objects) 386 : RegionCommon<mirror::Object>(os, remote_contents, zygote_contents, boot_map, image_header), 387 os_(*os), 388 dump_dirty_objects_(dump_dirty_objects) { } 389 390 // Define a common public type name for use by RegionData. 391 using VisitorClass = ImgObjectVisitor; 392 393 void VisitEntries(VisitorClass* visitor, 394 uint8_t* base, 395 PointerSize pointer_size) 396 REQUIRES_SHARED(Locks::mutator_lock_) { 397 RegionCommon<mirror::Object>::image_header_.VisitObjects(visitor, base, pointer_size); 398 } 399 400 void VisitEntry(mirror::Object* entry) 401 REQUIRES_SHARED(Locks::mutator_lock_) { 402 // Unconditionally store the class descriptor in case we need it later 403 mirror::Class* klass = entry->GetClass(); 404 class_data_[klass].descriptor = GetClassDescriptor(klass); 405 } 406 407 void AddCleanEntry(mirror::Object* entry) 408 REQUIRES_SHARED(Locks::mutator_lock_) { 409 class_data_[entry->GetClass()].AddCleanObject(); 410 } 411 412 void AddFalseDirtyEntry(mirror::Object* entry) 413 REQUIRES_SHARED(Locks::mutator_lock_) { 414 RegionCommon<mirror::Object>::AddFalseDirtyEntry(entry); 415 class_data_[entry->GetClass()].AddFalseDirtyObject(entry); 416 } 417 418 void AddDirtyEntry(mirror::Object* entry, mirror::Object* entry_remote) 419 REQUIRES_SHARED(Locks::mutator_lock_) { 420 size_t entry_size = EntrySize(entry); 421 ++different_entries_; 422 dirty_entry_bytes_ += entry_size; 423 // Log dirty count and objects for class objects only. 424 mirror::Class* klass = entry->GetClass(); 425 if (klass->IsClassClass()) { 426 // Increment counts for the fields that are dirty 427 const uint8_t* current = reinterpret_cast<const uint8_t*>(entry); 428 const uint8_t* current_remote = reinterpret_cast<const uint8_t*>(entry_remote); 429 for (size_t i = 0; i < entry_size; ++i) { 430 if (current[i] != current_remote[i]) { 431 field_dirty_count_[i]++; 432 } 433 } 434 dirty_entries_.push_back(entry); 435 } 436 class_data_[klass].AddDirtyObject(entry, entry_remote); 437 } 438 439 void DiffEntryContents(mirror::Object* entry, 440 uint8_t* remote_bytes, 441 const uint8_t* base_ptr, 442 bool log_dirty_objects) 443 REQUIRES_SHARED(Locks::mutator_lock_) { 444 const char* tabs = " "; 445 // Attempt to find fields for all dirty bytes. 446 mirror::Class* klass = entry->GetClass(); 447 if (entry->IsClass()) { 448 os_ << tabs 449 << "Class " << mirror::Class::PrettyClass(entry->AsClass()) << " " << entry << "\n"; 450 } else { 451 os_ << tabs 452 << "Instance of " << mirror::Class::PrettyClass(klass) << " " << entry << "\n"; 453 } 454 455 std::unordered_set<ArtField*> dirty_instance_fields; 456 std::unordered_set<ArtField*> dirty_static_fields; 457 // Examine the bytes comprising the Object, computing which fields are dirty 458 // and recording them for later display. If the Object is an array object, 459 // compute the dirty entries. 460 mirror::Object* remote_entry = reinterpret_cast<mirror::Object*>(remote_bytes); 461 for (size_t i = 0, count = entry->SizeOf(); i < count; ++i) { 462 if (base_ptr[i] != remote_bytes[i]) { 463 ArtField* field = ArtField::FindInstanceFieldWithOffset</*exact*/false>(klass, i); 464 if (field != nullptr) { 465 dirty_instance_fields.insert(field); 466 } else if (entry->IsClass()) { 467 field = ArtField::FindStaticFieldWithOffset</*exact*/false>(entry->AsClass(), i); 468 if (field != nullptr) { 469 dirty_static_fields.insert(field); 470 } 471 } 472 if (field == nullptr) { 473 if (klass->IsArrayClass()) { 474 ObjPtr<mirror::Class> component_type = klass->GetComponentType(); 475 Primitive::Type primitive_type = component_type->GetPrimitiveType(); 476 size_t component_size = Primitive::ComponentSize(primitive_type); 477 size_t data_offset = mirror::Array::DataOffset(component_size).Uint32Value(); 478 DCHECK_ALIGNED_PARAM(data_offset, component_size); 479 if (i >= data_offset) { 480 os_ << tabs << "Dirty array element " << (i - data_offset) / component_size << "\n"; 481 // Skip the remaining bytes of this element to prevent spam. 482 DCHECK(IsPowerOfTwo(component_size)); 483 i |= component_size - 1; 484 continue; 485 } 486 } 487 os_ << tabs << "No field for byte offset " << i << "\n"; 488 } 489 } 490 } 491 // Dump different fields. 492 if (!dirty_instance_fields.empty()) { 493 os_ << tabs << "Dirty instance fields " << dirty_instance_fields.size() << "\n"; 494 for (ArtField* field : dirty_instance_fields) { 495 os_ << tabs << ArtField::PrettyField(field) 496 << " original=" << PrettyFieldValue(field, entry) 497 << " remote=" << PrettyFieldValue(field, remote_entry) << "\n"; 498 } 499 } 500 if (!dirty_static_fields.empty()) { 501 if (dump_dirty_objects_ && log_dirty_objects) { 502 dirty_objects_.insert(entry); 503 } 504 os_ << tabs << "Dirty static fields " << dirty_static_fields.size() << "\n"; 505 for (ArtField* field : dirty_static_fields) { 506 os_ << tabs << ArtField::PrettyField(field) 507 << " original=" << PrettyFieldValue(field, entry) 508 << " remote=" << PrettyFieldValue(field, remote_entry) << "\n"; 509 } 510 } 511 os_ << "\n"; 512 } 513 514 void DumpDirtyObjects() REQUIRES_SHARED(Locks::mutator_lock_) { 515 for (mirror::Object* obj : dirty_objects_) { 516 if (obj->IsClass()) { 517 os_ << "Private dirty object: " << obj->AsClass()->PrettyDescriptor() << "\n"; 518 } 519 } 520 } 521 522 void DumpDirtyEntries() REQUIRES_SHARED(Locks::mutator_lock_) { 523 // vector of pairs (size_t count, Class*) 524 auto dirty_object_class_values = 525 SortByValueDesc<mirror::Class*, size_t, ClassData>( 526 class_data_, 527 [](const ClassData& d) { return d.dirty_object_count; }); 528 os_ << "\n" << " Dirty object count by class:\n"; 529 for (const auto& vk_pair : dirty_object_class_values) { 530 size_t dirty_object_count = vk_pair.first; 531 mirror::Class* klass = vk_pair.second; 532 ClassData& class_data = class_data_[klass]; 533 size_t object_sizes = class_data.dirty_object_size_in_bytes; 534 float avg_dirty_bytes_per_class = 535 class_data.dirty_object_byte_count * 1.0f / object_sizes; 536 float avg_object_size = object_sizes * 1.0f / dirty_object_count; 537 const std::string& descriptor = class_data.descriptor; 538 os_ << " " << mirror::Class::PrettyClass(klass) << " (" 539 << "objects: " << dirty_object_count << ", " 540 << "avg dirty bytes: " << avg_dirty_bytes_per_class << ", " 541 << "avg object size: " << avg_object_size << ", " 542 << "class descriptor: '" << descriptor << "'" 543 << ")\n"; 544 if (strcmp(descriptor.c_str(), "Ljava/lang/Class;") == 0) { 545 DumpSamplesAndOffsetCount(); 546 os_ << " field contents:\n"; 547 for (mirror::Object* object : class_data.dirty_objects) { 548 // remote class object 549 ObjPtr<mirror::Class> remote_klass = 550 ObjPtr<mirror::Class>::DownCast<mirror::Object>(object); 551 // local class object 552 ObjPtr<mirror::Class> local_klass = 553 RemoteContentsPointerToLocal(remote_klass, 554 RegionCommon<mirror::Object>::remote_contents_, 555 RegionCommon<mirror::Object>::image_header_); 556 os_ << " " << reinterpret_cast<const void*>(object) << " "; 557 os_ << " class_status (remote): " << remote_klass->GetStatus() << ", "; 558 os_ << " class_status (local): " << local_klass->GetStatus(); 559 os_ << "\n"; 560 } 561 } 562 } 563 } 564 565 void DumpFalseDirtyEntries() REQUIRES_SHARED(Locks::mutator_lock_) { 566 // vector of pairs (size_t count, Class*) 567 auto false_dirty_object_class_values = 568 SortByValueDesc<mirror::Class*, size_t, ClassData>( 569 class_data_, 570 [](const ClassData& d) { return d.false_dirty_object_count; }); 571 os_ << "\n" << " False-dirty object count by class:\n"; 572 for (const auto& vk_pair : false_dirty_object_class_values) { 573 size_t object_count = vk_pair.first; 574 mirror::Class* klass = vk_pair.second; 575 ClassData& class_data = class_data_[klass]; 576 size_t object_sizes = class_data.false_dirty_byte_count; 577 float avg_object_size = object_sizes * 1.0f / object_count; 578 const std::string& descriptor = class_data.descriptor; 579 os_ << " " << mirror::Class::PrettyClass(klass) << " (" 580 << "objects: " << object_count << ", " 581 << "avg object size: " << avg_object_size << ", " 582 << "total bytes: " << object_sizes << ", " 583 << "class descriptor: '" << descriptor << "'" 584 << ")\n"; 585 } 586 } 587 588 void DumpCleanEntries() REQUIRES_SHARED(Locks::mutator_lock_) { 589 // vector of pairs (size_t count, Class*) 590 auto clean_object_class_values = 591 SortByValueDesc<mirror::Class*, size_t, ClassData>( 592 class_data_, 593 [](const ClassData& d) { return d.clean_object_count; }); 594 os_ << "\n" << " Clean object count by class:\n"; 595 for (const auto& vk_pair : clean_object_class_values) { 596 os_ << " " << mirror::Class::PrettyClass(vk_pair.second) << " (" << vk_pair.first << ")\n"; 597 } 598 } 599 600 private: 601 // Aggregate and detail class data from an image diff. 602 struct ClassData { 603 size_t dirty_object_count = 0; 604 // Track only the byte-per-byte dirtiness (in bytes) 605 size_t dirty_object_byte_count = 0; 606 // Track the object-by-object dirtiness (in bytes) 607 size_t dirty_object_size_in_bytes = 0; 608 size_t clean_object_count = 0; 609 std::string descriptor; 610 size_t false_dirty_byte_count = 0; 611 size_t false_dirty_object_count = 0; 612 std::vector<mirror::Object*> false_dirty_objects; 613 // Remote pointers to dirty objects 614 std::vector<mirror::Object*> dirty_objects; 615 616 void AddCleanObject() REQUIRES_SHARED(Locks::mutator_lock_) { 617 ++clean_object_count; 618 } 619 620 void AddDirtyObject(mirror::Object* object, mirror::Object* object_remote) 621 REQUIRES_SHARED(Locks::mutator_lock_) { 622 ++dirty_object_count; 623 dirty_object_byte_count += CountDirtyBytes(object, object_remote); 624 dirty_object_size_in_bytes += EntrySize(object); 625 dirty_objects.push_back(object_remote); 626 } 627 628 void AddFalseDirtyObject(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_) { 629 ++false_dirty_object_count; 630 false_dirty_objects.push_back(object); 631 false_dirty_byte_count += EntrySize(object); 632 } 633 634 private: 635 // Go byte-by-byte and figure out what exactly got dirtied 636 static size_t CountDirtyBytes(mirror::Object* object1, mirror::Object* object2) 637 REQUIRES_SHARED(Locks::mutator_lock_) { 638 const uint8_t* cur1 = reinterpret_cast<const uint8_t*>(object1); 639 const uint8_t* cur2 = reinterpret_cast<const uint8_t*>(object2); 640 size_t dirty_bytes = 0; 641 size_t object_size = EntrySize(object1); 642 for (size_t i = 0; i < object_size; ++i) { 643 if (cur1[i] != cur2[i]) { 644 dirty_bytes++; 645 } 646 } 647 return dirty_bytes; 648 } 649 }; 650 651 std::ostream& os_; 652 bool dump_dirty_objects_; 653 std::unordered_set<mirror::Object*> dirty_objects_; 654 std::map<mirror::Class*, ClassData> class_data_; 655 656 DISALLOW_COPY_AND_ASSIGN(RegionSpecializedBase); 657 }; 658 659 // Region analysis for ArtMethods. 660 class ImgArtMethodVisitor { 661 public: 662 using ComputeDirtyFunc = std::function<void(ArtMethod*, 663 const uint8_t*, 664 const std::set<size_t>&)>; 665 ImgArtMethodVisitor(ComputeDirtyFunc dirty_func, 666 const uint8_t* begin_image_ptr, 667 const std::set<size_t>& dirty_pages) : 668 dirty_func_(std::move(dirty_func)), 669 begin_image_ptr_(begin_image_ptr), 670 dirty_pages_(dirty_pages) { } 671 void operator()(ArtMethod& method) const { 672 dirty_func_(&method, begin_image_ptr_, dirty_pages_); 673 } 674 675 private: 676 const ComputeDirtyFunc dirty_func_; 677 const uint8_t* begin_image_ptr_; 678 const std::set<size_t>& dirty_pages_; 679 }; 680 681 // Struct and functor for computing offsets of members of ArtMethods. 682 // template <typename RegionType> 683 struct MemberInfo { 684 template <typename T> 685 void operator() (const ArtMethod* method, const T* member_address, const std::string& name) { 686 // Check that member_address is a pointer inside *method. 687 DCHECK(reinterpret_cast<uintptr_t>(method) <= reinterpret_cast<uintptr_t>(member_address)); 688 DCHECK(reinterpret_cast<uintptr_t>(member_address) + sizeof(T) <= 689 reinterpret_cast<uintptr_t>(method) + sizeof(ArtMethod)); 690 size_t offset = 691 reinterpret_cast<uintptr_t>(member_address) - reinterpret_cast<uintptr_t>(method); 692 offset_to_name_size_.insert({offset, NameAndSize(sizeof(T), name)}); 693 } 694 695 struct NameAndSize { 696 size_t size_; 697 std::string name_; 698 NameAndSize(size_t size, const std::string& name) : size_(size), name_(name) { } 699 NameAndSize() : size_(0), name_("INVALID") { } 700 }; 701 702 std::map<size_t, NameAndSize> offset_to_name_size_; 703 }; 704 705 template<> 706 class RegionSpecializedBase<ArtMethod> : public RegionCommon<ArtMethod> { 707 public: 708 RegionSpecializedBase(std::ostream* os, 709 ArrayRef<uint8_t> remote_contents, 710 ArrayRef<uint8_t> zygote_contents, 711 const backtrace_map_t& boot_map, 712 const ImageHeader& image_header, 713 bool dump_dirty_objects ATTRIBUTE_UNUSED) 714 : RegionCommon<ArtMethod>(os, remote_contents, zygote_contents, boot_map, image_header), 715 os_(*os) { 716 // Prepare the table for offset to member lookups. 717 ArtMethod* art_method = reinterpret_cast<ArtMethod*>(&remote_contents[0]); 718 art_method->VisitMembers(member_info_); 719 // Prepare the table for address to symbolic entry point names. 720 BuildEntryPointNames(); 721 class_linker_ = Runtime::Current()->GetClassLinker(); 722 } 723 724 // Define a common public type name for use by RegionData. 725 using VisitorClass = ImgArtMethodVisitor; 726 727 void VisitEntries(VisitorClass* visitor, 728 uint8_t* base, 729 PointerSize pointer_size) 730 REQUIRES_SHARED(Locks::mutator_lock_) { 731 RegionCommon<ArtMethod>::image_header_.VisitPackedArtMethods(*visitor, base, pointer_size); 732 } 733 734 void VisitEntry(ArtMethod* method ATTRIBUTE_UNUSED) 735 REQUIRES_SHARED(Locks::mutator_lock_) { 736 } 737 738 void AddCleanEntry(ArtMethod* method ATTRIBUTE_UNUSED) { 739 } 740 741 void AddFalseDirtyEntry(ArtMethod* method) 742 REQUIRES_SHARED(Locks::mutator_lock_) { 743 RegionCommon<ArtMethod>::AddFalseDirtyEntry(method); 744 } 745 746 void AddDirtyEntry(ArtMethod* method, ArtMethod* method_remote) 747 REQUIRES_SHARED(Locks::mutator_lock_) { 748 size_t entry_size = EntrySize(method); 749 ++different_entries_; 750 dirty_entry_bytes_ += entry_size; 751 // Increment counts for the fields that are dirty 752 const uint8_t* current = reinterpret_cast<const uint8_t*>(method); 753 const uint8_t* current_remote = reinterpret_cast<const uint8_t*>(method_remote); 754 // ArtMethods always log their dirty count and entries. 755 for (size_t i = 0; i < entry_size; ++i) { 756 if (current[i] != current_remote[i]) { 757 field_dirty_count_[i]++; 758 } 759 } 760 dirty_entries_.push_back(method); 761 } 762 763 void DiffEntryContents(ArtMethod* method, 764 uint8_t* remote_bytes, 765 const uint8_t* base_ptr, 766 bool log_dirty_objects ATTRIBUTE_UNUSED) 767 REQUIRES_SHARED(Locks::mutator_lock_) { 768 const char* tabs = " "; 769 os_ << tabs << "ArtMethod " << ArtMethod::PrettyMethod(method) << "\n"; 770 771 std::unordered_set<size_t> dirty_members; 772 // Examine the members comprising the ArtMethod, computing which members are dirty. 773 for (const std::pair<const size_t, 774 MemberInfo::NameAndSize>& p : member_info_.offset_to_name_size_) { 775 const size_t offset = p.first; 776 if (memcmp(base_ptr + offset, remote_bytes + offset, p.second.size_) != 0) { 777 dirty_members.insert(p.first); 778 } 779 } 780 // Dump different fields. 781 if (!dirty_members.empty()) { 782 os_ << tabs << "Dirty members " << dirty_members.size() << "\n"; 783 for (size_t offset : dirty_members) { 784 const MemberInfo::NameAndSize& member_info = member_info_.offset_to_name_size_[offset]; 785 os_ << tabs << member_info.name_ 786 << " original=" << StringFromBytes(base_ptr + offset, member_info.size_) 787 << " remote=" << StringFromBytes(remote_bytes + offset, member_info.size_) 788 << "\n"; 789 } 790 } 791 os_ << "\n"; 792 } 793 794 void DumpDirtyObjects() REQUIRES_SHARED(Locks::mutator_lock_) { 795 } 796 797 void DumpDirtyEntries() REQUIRES_SHARED(Locks::mutator_lock_) { 798 DumpSamplesAndOffsetCount(); 799 os_ << " offset to field map:\n"; 800 for (const std::pair<const size_t, 801 MemberInfo::NameAndSize>& p : member_info_.offset_to_name_size_) { 802 const size_t offset = p.first; 803 const size_t size = p.second.size_; 804 os_ << StringPrintf(" %zu-%zu: ", offset, offset + size - 1) 805 << p.second.name_ 806 << std::endl; 807 } 808 809 os_ << " field contents:\n"; 810 for (ArtMethod* method : dirty_entries_) { 811 // remote method 812 auto art_method = reinterpret_cast<ArtMethod*>(method); 813 // remote class 814 ObjPtr<mirror::Class> remote_declaring_class = 815 FixUpRemotePointer(art_method->GetDeclaringClass(), 816 RegionCommon<ArtMethod>::remote_contents_, 817 RegionCommon<ArtMethod>::boot_map_); 818 // local class 819 ObjPtr<mirror::Class> declaring_class = 820 RemoteContentsPointerToLocal(remote_declaring_class, 821 RegionCommon<ArtMethod>::remote_contents_, 822 RegionCommon<ArtMethod>::image_header_); 823 DumpOneArtMethod(art_method, declaring_class, remote_declaring_class); 824 } 825 } 826 827 void DumpFalseDirtyEntries() REQUIRES_SHARED(Locks::mutator_lock_) { 828 os_ << "\n" << " False-dirty ArtMethods\n"; 829 os_ << " field contents:\n"; 830 for (ArtMethod* method : false_dirty_entries_) { 831 // local class 832 ObjPtr<mirror::Class> declaring_class = method->GetDeclaringClass(); 833 DumpOneArtMethod(method, declaring_class, nullptr); 834 } 835 } 836 837 void DumpCleanEntries() REQUIRES_SHARED(Locks::mutator_lock_) { 838 } 839 840 private: 841 std::ostream& os_; 842 MemberInfo member_info_; 843 std::map<const void*, std::string> entry_point_names_; 844 ClassLinker* class_linker_; 845 846 // Compute a map of addresses to names in the boot OAT file(s). 847 void BuildEntryPointNames() { 848 OatFileManager& oat_file_manager = Runtime::Current()->GetOatFileManager(); 849 std::vector<const OatFile*> boot_oat_files = oat_file_manager.GetBootOatFiles(); 850 for (const OatFile* oat_file : boot_oat_files) { 851 const OatHeader& oat_header = oat_file->GetOatHeader(); 852 const void* jdl = oat_header.GetJniDlsymLookup(); 853 if (jdl != nullptr) { 854 entry_point_names_[jdl] = "JniDlsymLookup (from boot oat file)"; 855 } 856 const void* qgjt = oat_header.GetQuickGenericJniTrampoline(); 857 if (qgjt != nullptr) { 858 entry_point_names_[qgjt] = "QuickGenericJniTrampoline (from boot oat file)"; 859 } 860 const void* qrt = oat_header.GetQuickResolutionTrampoline(); 861 if (qrt != nullptr) { 862 entry_point_names_[qrt] = "QuickResolutionTrampoline (from boot oat file)"; 863 } 864 const void* qict = oat_header.GetQuickImtConflictTrampoline(); 865 if (qict != nullptr) { 866 entry_point_names_[qict] = "QuickImtConflictTrampoline (from boot oat file)"; 867 } 868 const void* q2ib = oat_header.GetQuickToInterpreterBridge(); 869 if (q2ib != nullptr) { 870 entry_point_names_[q2ib] = "QuickToInterpreterBridge (from boot oat file)"; 871 } 872 } 873 } 874 875 std::string StringFromBytes(const uint8_t* bytes, size_t size) { 876 switch (size) { 877 case 1: 878 return StringPrintf("%" PRIx8, *bytes); 879 case 2: 880 return StringPrintf("%" PRIx16, *reinterpret_cast<const uint16_t*>(bytes)); 881 case 4: 882 case 8: { 883 // Compute an address if the bytes might contain one. 884 uint64_t intval; 885 if (size == 4) { 886 intval = *reinterpret_cast<const uint32_t*>(bytes); 887 } else { 888 intval = *reinterpret_cast<const uint64_t*>(bytes); 889 } 890 const void* addr = reinterpret_cast<const void*>(intval); 891 // Match the address against those that have Is* methods in the ClassLinker. 892 if (class_linker_->IsQuickToInterpreterBridge(addr)) { 893 return "QuickToInterpreterBridge"; 894 } else if (class_linker_->IsQuickGenericJniStub(addr)) { 895 return "QuickGenericJniStub"; 896 } else if (class_linker_->IsQuickResolutionStub(addr)) { 897 return "QuickResolutionStub"; 898 } else if (class_linker_->IsJniDlsymLookupStub(addr)) { 899 return "JniDlsymLookupStub"; 900 } 901 // Match the address against those that we saved from the boot OAT files. 902 if (entry_point_names_.find(addr) != entry_point_names_.end()) { 903 return entry_point_names_[addr]; 904 } 905 return StringPrintf("%" PRIx64, intval); 906 } 907 default: 908 LOG(WARNING) << "Don't know how to convert " << size << " bytes to integer"; 909 return "<UNKNOWN>"; 910 } 911 } 912 913 void DumpOneArtMethod(ArtMethod* art_method, 914 ObjPtr<mirror::Class> declaring_class, 915 ObjPtr<mirror::Class> remote_declaring_class) 916 REQUIRES_SHARED(Locks::mutator_lock_) { 917 PointerSize pointer_size = InstructionSetPointerSize(Runtime::Current()->GetInstructionSet()); 918 os_ << " " << reinterpret_cast<const void*>(art_method) << " "; 919 os_ << " entryPointFromJni: " 920 << reinterpret_cast<const void*>(art_method->GetDataPtrSize(pointer_size)) << ", "; 921 os_ << " entryPointFromQuickCompiledCode: " 922 << reinterpret_cast<const void*>( 923 art_method->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size)) 924 << ", "; 925 os_ << " isNative? " << (art_method->IsNative() ? "yes" : "no") << ", "; 926 // Null for runtime metionds. 927 if (declaring_class != nullptr) { 928 os_ << " class_status (local): " << declaring_class->GetStatus(); 929 } 930 if (remote_declaring_class != nullptr) { 931 os_ << ", class_status (remote): " << remote_declaring_class->GetStatus(); 932 } 933 os_ << "\n"; 934 } 935 936 DISALLOW_COPY_AND_ASSIGN(RegionSpecializedBase); 937 }; 938 939 template <typename T> 940 class RegionData : public RegionSpecializedBase<T> { 941 public: 942 RegionData(std::ostream* os, 943 ArrayRef<uint8_t> remote_contents, 944 ArrayRef<uint8_t> zygote_contents, 945 const backtrace_map_t& boot_map, 946 const ImageHeader& image_header, 947 bool dump_dirty_objects) 948 : RegionSpecializedBase<T>(os, 949 remote_contents, 950 zygote_contents, 951 boot_map, 952 image_header, 953 dump_dirty_objects), 954 os_(*os) { 955 CHECK(!remote_contents.empty()); 956 } 957 958 // Walk over the type T entries in theregion between begin_image_ptr and end_image_ptr, 959 // collecting and reporting data regarding dirty, difference, etc. 960 void ProcessRegion(const MappingData& mapping_data, 961 RemoteProcesses remotes, 962 const uint8_t* begin_image_ptr) 963 REQUIRES_SHARED(Locks::mutator_lock_) { 964 typename RegionSpecializedBase<T>::VisitorClass visitor( 965 [this](T* entry, 966 const uint8_t* begin_image_ptr, 967 const std::set<size_t>& dirty_page_set) REQUIRES_SHARED(Locks::mutator_lock_) { 968 this->ComputeEntryDirty(entry, begin_image_ptr, dirty_page_set); 969 }, 970 begin_image_ptr, 971 mapping_data.dirty_page_set); 972 PointerSize pointer_size = InstructionSetPointerSize(Runtime::Current()->GetInstructionSet()); 973 RegionSpecializedBase<T>::VisitEntries(&visitor, 974 const_cast<uint8_t*>(begin_image_ptr), 975 pointer_size); 976 977 // Looking at only dirty pages, figure out how many of those bytes belong to dirty entries. 978 // TODO: fix this now that there are multiple regions in a mapping. 979 float true_dirtied_percent = 980 RegionCommon<T>::GetDirtyEntryBytes() * 1.0f / (mapping_data.dirty_pages * kPageSize); 981 982 // Entry specific statistics. 983 os_ << RegionCommon<T>::GetDifferentEntryCount() << " different entries, \n " 984 << RegionCommon<T>::GetDirtyEntryBytes() << " different entry [bytes], \n " 985 << RegionCommon<T>::GetFalseDirtyEntryCount() << " false dirty entries,\n " 986 << RegionCommon<T>::GetFalseDirtyEntryBytes() << " false dirty entry [bytes], \n " 987 << true_dirtied_percent << " different entries-vs-total in a dirty page;\n " 988 << "\n"; 989 990 const uint8_t* base_ptr = begin_image_ptr; 991 switch (remotes) { 992 case RemoteProcesses::kZygoteOnly: 993 os_ << " Zygote shared dirty entries: "; 994 break; 995 case RemoteProcesses::kImageAndZygote: 996 os_ << " Application dirty entries (private dirty): "; 997 // If we are dumping private dirty, diff against the zygote map to make it clearer what 998 // fields caused the page to be private dirty. 999 base_ptr = RegionCommon<T>::zygote_contents_.data(); 1000 break; 1001 case RemoteProcesses::kImageOnly: 1002 os_ << " Application dirty entries (unknown whether private or shared dirty): "; 1003 break; 1004 } 1005 DiffDirtyEntries(ProcessType::kRemote, 1006 begin_image_ptr, 1007 RegionCommon<T>::remote_contents_, 1008 base_ptr, 1009 /*log_dirty_objects=*/true); 1010 // Print shared dirty after since it's less important. 1011 if (RegionCommon<T>::GetZygoteDirtyEntryCount() != 0) { 1012 // We only reach this point if both pids were specified. Furthermore, 1013 // entries are only displayed here if they differed in both the image 1014 // and the zygote, so they are probably private dirty. 1015 CHECK(remotes == RemoteProcesses::kImageAndZygote); 1016 os_ << "\n" << " Zygote dirty entries (probably shared dirty): "; 1017 DiffDirtyEntries(ProcessType::kZygote, 1018 begin_image_ptr, 1019 RegionCommon<T>::zygote_contents_, 1020 begin_image_ptr, 1021 /*log_dirty_objects=*/false); 1022 } 1023 RegionSpecializedBase<T>::DumpDirtyObjects(); 1024 RegionSpecializedBase<T>::DumpDirtyEntries(); 1025 RegionSpecializedBase<T>::DumpFalseDirtyEntries(); 1026 RegionSpecializedBase<T>::DumpCleanEntries(); 1027 } 1028 1029 private: 1030 std::ostream& os_; 1031 1032 void DiffDirtyEntries(ProcessType process_type, 1033 const uint8_t* begin_image_ptr, 1034 ArrayRef<uint8_t> contents, 1035 const uint8_t* base_ptr, 1036 bool log_dirty_objects) 1037 REQUIRES_SHARED(Locks::mutator_lock_) { 1038 os_ << RegionCommon<T>::dirty_entries_.size() << "\n"; 1039 const std::set<T*>& entries = 1040 (process_type == ProcessType::kZygote) ? 1041 RegionCommon<T>::zygote_dirty_entries_: 1042 RegionCommon<T>::image_dirty_entries_; 1043 for (T* entry : entries) { 1044 uint8_t* entry_bytes = reinterpret_cast<uint8_t*>(entry); 1045 ptrdiff_t offset = entry_bytes - begin_image_ptr; 1046 uint8_t* remote_bytes = &contents[offset]; 1047 RegionSpecializedBase<T>::DiffEntryContents(entry, 1048 remote_bytes, 1049 &base_ptr[offset], 1050 log_dirty_objects); 1051 } 1052 } 1053 1054 void ComputeEntryDirty(T* entry, 1055 const uint8_t* begin_image_ptr, 1056 const std::set<size_t>& dirty_pages) 1057 REQUIRES_SHARED(Locks::mutator_lock_) { 1058 // Set up pointers in the remote and the zygote for comparison. 1059 uint8_t* current = reinterpret_cast<uint8_t*>(entry); 1060 ptrdiff_t offset = current - begin_image_ptr; 1061 T* entry_remote = 1062 reinterpret_cast<T*>(const_cast<uint8_t*>(&RegionCommon<T>::remote_contents_[offset])); 1063 const bool have_zygote = !RegionCommon<T>::zygote_contents_.empty(); 1064 const uint8_t* current_zygote = 1065 have_zygote ? &RegionCommon<T>::zygote_contents_[offset] : nullptr; 1066 T* entry_zygote = reinterpret_cast<T*>(const_cast<uint8_t*>(current_zygote)); 1067 // Visit and classify entries at the current location. 1068 RegionSpecializedBase<T>::VisitEntry(entry); 1069 1070 // Test private dirty first. 1071 bool is_dirty = false; 1072 if (have_zygote) { 1073 bool private_dirty = EntriesDiffer(entry, entry_zygote, entry_remote); 1074 if (private_dirty) { 1075 // Private dirty, app vs zygote. 1076 is_dirty = true; 1077 RegionCommon<T>::AddImageDirtyEntry(entry); 1078 } 1079 if (EntriesDiffer(entry, entry_zygote, entry)) { 1080 // Shared dirty, zygote vs image. 1081 is_dirty = true; 1082 RegionCommon<T>::AddZygoteDirtyEntry(entry); 1083 } 1084 } else if (EntriesDiffer(entry, entry_remote, entry)) { 1085 // Shared or private dirty, app vs image. 1086 is_dirty = true; 1087 RegionCommon<T>::AddImageDirtyEntry(entry); 1088 } 1089 if (is_dirty) { 1090 // TODO: Add support dirty entries in zygote and image. 1091 RegionSpecializedBase<T>::AddDirtyEntry(entry, entry_remote); 1092 } else { 1093 RegionSpecializedBase<T>::AddCleanEntry(entry); 1094 if (RegionCommon<T>::IsEntryOnDirtyPage(entry, dirty_pages)) { 1095 // This entry was either never mutated or got mutated back to the same value. 1096 // TODO: Do I want to distinguish a "different" vs a "dirty" page here? 1097 RegionSpecializedBase<T>::AddFalseDirtyEntry(entry); 1098 } 1099 } 1100 } 1101 1102 DISALLOW_COPY_AND_ASSIGN(RegionData); 1103 }; 1104 1105 } // namespace 1106 1107 1108 class ImgDiagDumper { 1109 public: 1110 explicit ImgDiagDumper(std::ostream* os, 1111 pid_t image_diff_pid, 1112 pid_t zygote_diff_pid, 1113 bool dump_dirty_objects) 1114 : os_(os), 1115 image_diff_pid_(image_diff_pid), 1116 zygote_diff_pid_(zygote_diff_pid), 1117 dump_dirty_objects_(dump_dirty_objects), 1118 zygote_pid_only_(false) {} 1119 1120 bool Init() { 1121 std::ostream& os = *os_; 1122 1123 if (image_diff_pid_ < 0 && zygote_diff_pid_ < 0) { 1124 os << "Either --image-diff-pid or --zygote-diff-pid (or both) must be specified.\n"; 1125 return false; 1126 } 1127 1128 // To avoid the combinations of command-line argument use cases: 1129 // If the user invoked with only --zygote-diff-pid, shuffle that to 1130 // image_diff_pid_, invalidate zygote_diff_pid_, and remember that 1131 // image_diff_pid_ is now special. 1132 if (image_diff_pid_ < 0) { 1133 image_diff_pid_ = zygote_diff_pid_; 1134 zygote_diff_pid_ = -1; 1135 zygote_pid_only_ = true; 1136 } 1137 1138 { 1139 struct stat sts; 1140 std::string proc_pid_str = 1141 StringPrintf("/proc/%ld", static_cast<long>(image_diff_pid_)); // NOLINT [runtime/int] 1142 if (stat(proc_pid_str.c_str(), &sts) == -1) { 1143 os << "Process does not exist"; 1144 return false; 1145 } 1146 } 1147 1148 auto open_proc_maps = [&os](pid_t pid, /*out*/ std::unique_ptr<BacktraceMap>* proc_maps) { 1149 // Open /proc/<pid>/maps to view memory maps. 1150 proc_maps->reset(BacktraceMap::Create(pid)); 1151 if (*proc_maps == nullptr) { 1152 os << "Could not read backtrace maps for " << pid; 1153 return false; 1154 } 1155 return true; 1156 }; 1157 auto open_file = [&os] (const char* file_name, /*out*/ std::unique_ptr<File>* file) { 1158 file->reset(OS::OpenFileForReading(file_name)); 1159 if (*file == nullptr) { 1160 os << "Failed to open " << file_name << " for reading"; 1161 return false; 1162 } 1163 return true; 1164 }; 1165 auto open_mem_file = [&open_file](pid_t pid, /*out*/ std::unique_ptr<File>* mem_file) { 1166 // Open /proc/<pid>/mem and for reading remote contents. 1167 std::string mem_file_name = 1168 StringPrintf("/proc/%ld/mem", static_cast<long>(pid)); // NOLINT [runtime/int] 1169 return open_file(mem_file_name.c_str(), mem_file); 1170 }; 1171 auto open_pagemap_file = [&open_file](pid_t pid, /*out*/ std::unique_ptr<File>* pagemap_file) { 1172 // Open /proc/<pid>/pagemap. 1173 std::string pagemap_file_name = StringPrintf( 1174 "/proc/%ld/pagemap", static_cast<long>(pid)); // NOLINT [runtime/int] 1175 return open_file(pagemap_file_name.c_str(), pagemap_file); 1176 }; 1177 1178 // Open files for inspecting image memory. 1179 std::unique_ptr<BacktraceMap> image_proc_maps; 1180 std::unique_ptr<File> image_mem_file; 1181 std::unique_ptr<File> image_pagemap_file; 1182 if (!open_proc_maps(image_diff_pid_, &image_proc_maps) || 1183 !open_mem_file(image_diff_pid_, &image_mem_file) || 1184 !open_pagemap_file(image_diff_pid_, &image_pagemap_file)) { 1185 return false; 1186 } 1187 1188 // If zygote_diff_pid_ != -1, open files for inspecting zygote memory. 1189 std::unique_ptr<BacktraceMap> zygote_proc_maps; 1190 std::unique_ptr<File> zygote_mem_file; 1191 std::unique_ptr<File> zygote_pagemap_file; 1192 if (zygote_diff_pid_ != -1) { 1193 if (!open_proc_maps(zygote_diff_pid_, &zygote_proc_maps) || 1194 !open_mem_file(zygote_diff_pid_, &zygote_mem_file) || 1195 !open_pagemap_file(zygote_diff_pid_, &zygote_pagemap_file)) { 1196 return false; 1197 } 1198 } 1199 1200 std::unique_ptr<File> clean_pagemap_file; 1201 std::unique_ptr<File> kpageflags_file; 1202 std::unique_ptr<File> kpagecount_file; 1203 if (!open_file("/proc/self/pagemap", &clean_pagemap_file) || 1204 !open_file("/proc/kpageflags", &kpageflags_file) || 1205 !open_file("/proc/kpagecount", &kpagecount_file)) { 1206 return false; 1207 } 1208 1209 // Note: the boot image is not really clean but close enough. 1210 // For now, log pages found to be dirty. 1211 // TODO: Rewrite imgdiag to load boot image without creating a runtime. 1212 // FIXME: The following does not reliably detect dirty pages. 1213 Runtime* runtime = Runtime::Current(); 1214 CHECK(!runtime->ShouldRelocate()); 1215 size_t total_dirty_pages = 0u; 1216 for (gc::space::ImageSpace* space : runtime->GetHeap()->GetBootImageSpaces()) { 1217 const ImageHeader& image_header = space->GetImageHeader(); 1218 const uint8_t* image_begin = image_header.GetImageBegin(); 1219 const uint8_t* image_end = AlignUp(image_begin + image_header.GetImageSize(), kPageSize); 1220 size_t virtual_page_idx_begin = reinterpret_cast<uintptr_t>(image_begin) / kPageSize; 1221 size_t virtual_page_idx_end = reinterpret_cast<uintptr_t>(image_end) / kPageSize; 1222 size_t num_virtual_pages = virtual_page_idx_end - virtual_page_idx_begin; 1223 1224 std::string error_msg; 1225 std::vector<uint64_t> page_frame_numbers(num_virtual_pages); 1226 if (!GetPageFrameNumbers(clean_pagemap_file.get(), 1227 virtual_page_idx_begin, 1228 ArrayRef<uint64_t>(page_frame_numbers), 1229 &error_msg)) { 1230 os << "Failed to get page frame numbers for image space " << space->GetImageLocation() 1231 << ", error: " << error_msg; 1232 return false; 1233 } 1234 1235 std::vector<uint64_t> page_flags(num_virtual_pages); 1236 if (!GetPageFlagsOrCounts(kpageflags_file.get(), 1237 ArrayRef<const uint64_t>(page_frame_numbers), 1238 ArrayRef<uint64_t>(page_flags), 1239 &error_msg)) { 1240 os << "Failed to get page flags for image space " << space->GetImageLocation() 1241 << ", error: " << error_msg; 1242 return false; 1243 } 1244 1245 size_t num_dirty_pages = 0u; 1246 std::optional<size_t> first_dirty_page; 1247 for (size_t i = 0u, size = page_flags.size(); i != size; ++i) { 1248 if (UNLIKELY((page_flags[i] & kPageFlagsDirtyMask) != 0u)) { 1249 ++num_dirty_pages; 1250 if (!first_dirty_page.has_value()) { 1251 first_dirty_page = i; 1252 } 1253 } 1254 } 1255 if (num_dirty_pages != 0u) { 1256 DCHECK(first_dirty_page.has_value()); 1257 os << "Found " << num_dirty_pages << " dirty pages for " << space->GetImageLocation() 1258 << ", first dirty page: " << first_dirty_page.value_or(0u); 1259 total_dirty_pages += num_dirty_pages; 1260 } 1261 } 1262 1263 // Commit the mappings and files. 1264 image_proc_maps_ = std::move(image_proc_maps); 1265 image_mem_file_ = std::move(*image_mem_file); 1266 image_pagemap_file_ = std::move(*image_pagemap_file); 1267 if (zygote_diff_pid_ != -1) { 1268 zygote_proc_maps_ = std::move(zygote_proc_maps); 1269 zygote_mem_file_ = std::move(*zygote_mem_file); 1270 zygote_pagemap_file_ = std::move(*zygote_pagemap_file); 1271 } 1272 clean_pagemap_file_ = std::move(*clean_pagemap_file); 1273 kpageflags_file_ = std::move(*kpageflags_file); 1274 kpagecount_file_ = std::move(*kpagecount_file); 1275 1276 return true; 1277 } 1278 1279 bool Dump(const ImageHeader& image_header, const std::string& image_location) 1280 REQUIRES_SHARED(Locks::mutator_lock_) { 1281 std::ostream& os = *os_; 1282 os << "IMAGE LOCATION: " << image_location << "\n\n"; 1283 1284 os << "MAGIC: " << image_header.GetMagic() << "\n\n"; 1285 1286 os << "IMAGE BEGIN: " << reinterpret_cast<void*>(image_header.GetImageBegin()) << "\n\n"; 1287 1288 PrintPidLine("IMAGE", image_diff_pid_); 1289 os << "\n\n"; 1290 PrintPidLine("ZYGOTE", zygote_diff_pid_); 1291 bool ret = true; 1292 if (image_diff_pid_ >= 0 || zygote_diff_pid_ >= 0) { 1293 ret = DumpImageDiff(image_header, image_location); 1294 os << "\n\n"; 1295 } 1296 1297 os << std::flush; 1298 1299 return ret; 1300 } 1301 1302 private: 1303 bool DumpImageDiff(const ImageHeader& image_header, const std::string& image_location) 1304 REQUIRES_SHARED(Locks::mutator_lock_) { 1305 return DumpImageDiffMap(image_header, image_location); 1306 } 1307 1308 bool ComputeDirtyBytes(const ImageHeader& image_header, 1309 const uint8_t* image_begin, 1310 const backtrace_map_t& boot_map, 1311 ArrayRef<uint8_t> remote_contents, 1312 MappingData* mapping_data /*out*/) { 1313 std::ostream& os = *os_; 1314 1315 size_t virtual_page_idx = 0; // Virtual page number (for an absolute memory address) 1316 size_t page_idx = 0; // Page index relative to 0 1317 size_t previous_page_idx = 0; // Previous page index relative to 0 1318 1319 1320 // Iterate through one page at a time. Boot map begin/end already implicitly aligned. 1321 for (uintptr_t begin = boot_map.start; begin != boot_map.end; begin += kPageSize) { 1322 ptrdiff_t offset = begin - boot_map.start; 1323 1324 // We treat the image header as part of the memory map for now 1325 // If we wanted to change this, we could pass base=start+sizeof(ImageHeader) 1326 // But it might still be interesting to see if any of the ImageHeader data mutated 1327 const uint8_t* local_ptr = reinterpret_cast<const uint8_t*>(&image_header) + offset; 1328 const uint8_t* remote_ptr = &remote_contents[offset]; 1329 1330 if (memcmp(local_ptr, remote_ptr, kPageSize) != 0) { 1331 mapping_data->different_pages++; 1332 1333 // Count the number of 32-bit integers that are different. 1334 for (size_t i = 0; i < kPageSize / sizeof(uint32_t); ++i) { 1335 const uint32_t* remote_ptr_int32 = reinterpret_cast<const uint32_t*>(remote_ptr); 1336 const uint32_t* local_ptr_int32 = reinterpret_cast<const uint32_t*>(local_ptr); 1337 1338 if (remote_ptr_int32[i] != local_ptr_int32[i]) { 1339 mapping_data->different_int32s++; 1340 } 1341 } 1342 } 1343 } 1344 1345 std::vector<size_t> private_dirty_pages_for_section(ImageHeader::kSectionCount, 0u); 1346 1347 // Iterate through one byte at a time. 1348 ptrdiff_t page_off_begin = image_header.GetImageBegin() - image_begin; 1349 for (uintptr_t begin = boot_map.start; begin != boot_map.end; ++begin) { 1350 previous_page_idx = page_idx; 1351 ptrdiff_t offset = begin - boot_map.start; 1352 1353 // We treat the image header as part of the memory map for now 1354 // If we wanted to change this, we could pass base=start+sizeof(ImageHeader) 1355 // But it might still be interesting to see if any of the ImageHeader data mutated 1356 const uint8_t* local_ptr = reinterpret_cast<const uint8_t*>(&image_header) + offset; 1357 const uint8_t* remote_ptr = &remote_contents[offset]; 1358 1359 virtual_page_idx = reinterpret_cast<uintptr_t>(local_ptr) / kPageSize; 1360 1361 // Calculate the page index, relative to the 0th page where the image begins 1362 page_idx = (offset + page_off_begin) / kPageSize; 1363 if (*local_ptr != *remote_ptr) { 1364 // Track number of bytes that are different 1365 mapping_data->different_bytes++; 1366 } 1367 1368 // Independently count the # of dirty pages on the remote side 1369 size_t remote_virtual_page_idx = begin / kPageSize; 1370 if (previous_page_idx != page_idx) { 1371 uint64_t page_count = 0xC0FFEE; 1372 // TODO: virtual_page_idx needs to be from the same process 1373 std::string error_msg; 1374 int dirtiness = (IsPageDirty(&image_pagemap_file_, // Image-diff-pid procmap 1375 &clean_pagemap_file_, // Self procmap 1376 &kpageflags_file_, 1377 &kpagecount_file_, 1378 remote_virtual_page_idx, // potentially "dirty" page 1379 virtual_page_idx, // true "clean" page 1380 &page_count, 1381 &error_msg)); 1382 if (dirtiness < 0) { 1383 os << error_msg; 1384 return false; 1385 } else if (dirtiness > 0) { 1386 mapping_data->dirty_pages++; 1387 mapping_data->dirty_page_set.insert(mapping_data->dirty_page_set.end(), virtual_page_idx); 1388 } 1389 1390 bool is_dirty = dirtiness > 0; 1391 bool is_private = page_count == 1; 1392 1393 if (page_count == 1) { 1394 mapping_data->private_pages++; 1395 } 1396 1397 if (is_dirty && is_private) { 1398 mapping_data->private_dirty_pages++; 1399 for (size_t i = 0; i < ImageHeader::kSectionCount; ++i) { 1400 const ImageHeader::ImageSections section = static_cast<ImageHeader::ImageSections>(i); 1401 if (image_header.GetImageSection(section).Contains(offset)) { 1402 ++private_dirty_pages_for_section[i]; 1403 } 1404 } 1405 } 1406 } 1407 } 1408 mapping_data->false_dirty_pages = mapping_data->dirty_pages - mapping_data->different_pages; 1409 // Print low-level (bytes, int32s, pages) statistics. 1410 os << mapping_data->different_bytes << " differing bytes,\n " 1411 << mapping_data->different_int32s << " differing int32s,\n " 1412 << mapping_data->different_pages << " differing pages,\n " 1413 << mapping_data->dirty_pages << " pages are dirty;\n " 1414 << mapping_data->false_dirty_pages << " pages are false dirty;\n " 1415 << mapping_data->private_pages << " pages are private;\n " 1416 << mapping_data->private_dirty_pages << " pages are Private_Dirty\n " 1417 << "\n"; 1418 1419 size_t total_private_dirty_pages = std::accumulate(private_dirty_pages_for_section.begin(), 1420 private_dirty_pages_for_section.end(), 1421 0u); 1422 os << "Image sections (total private dirty pages " << total_private_dirty_pages << ")\n"; 1423 for (size_t i = 0; i < ImageHeader::kSectionCount; ++i) { 1424 const ImageHeader::ImageSections section = static_cast<ImageHeader::ImageSections>(i); 1425 os << section << " " << image_header.GetImageSection(section) 1426 << " private dirty pages=" << private_dirty_pages_for_section[i] << "\n"; 1427 } 1428 os << "\n"; 1429 1430 return true; 1431 } 1432 1433 // Look at /proc/$pid/mem and only diff the things from there 1434 bool DumpImageDiffMap(const ImageHeader& image_header, const std::string& image_location) 1435 REQUIRES_SHARED(Locks::mutator_lock_) { 1436 std::ostream& os = *os_; 1437 std::string error_msg; 1438 1439 std::string image_location_base_name = GetImageLocationBaseName(image_location); 1440 // FIXME: BacktraceMap should provide a const_iterator so that we can take `maps` as const&. 1441 auto find_boot_map = [&os, &image_location_base_name](BacktraceMap& maps, const char* tag) 1442 -> std::optional<backtrace_map_t> { 1443 // Find the memory map for the current boot image component. 1444 for (const backtrace_map_t* map : maps) { 1445 if (EndsWith(map->name, image_location_base_name)) { 1446 if ((map->flags & PROT_WRITE) != 0) { 1447 return *map; 1448 } 1449 // In actuality there's more than 1 map, but the second one is read-only. 1450 // The one we care about is the write-able map. 1451 // The readonly maps are guaranteed to be identical, so its not interesting to compare 1452 // them. 1453 } 1454 } 1455 os << "Could not find map for " << image_location_base_name << " in " << tag; 1456 return std::nullopt; 1457 }; 1458 1459 // Find the current boot image mapping. 1460 std::optional<backtrace_map_t> maybe_boot_map = find_boot_map(*image_proc_maps_, "image"); 1461 if (maybe_boot_map == std::nullopt) { 1462 return false; 1463 } 1464 backtrace_map_t boot_map = maybe_boot_map.value_or(backtrace_map_t{}); 1465 // Sanity check boot_map_. 1466 CHECK(boot_map.end >= boot_map.start); 1467 // The size of the boot image mapping. 1468 size_t boot_map_size = boot_map.end - boot_map.start; 1469 1470 // If zygote_diff_pid_ != -1, check that the zygote boot map is the same. 1471 if (zygote_diff_pid_ != -1) { 1472 std::optional<backtrace_map_t> maybe_zygote_boot_map = 1473 find_boot_map(*zygote_proc_maps_, "zygote"); 1474 if (maybe_zygote_boot_map == std::nullopt) { 1475 return false; 1476 } 1477 backtrace_map_t zygote_boot_map = maybe_zygote_boot_map.value_or(backtrace_map_t{}); 1478 if (zygote_boot_map.start != boot_map.start || zygote_boot_map.end != boot_map.end) { 1479 os << "Zygote boot map does not match image boot map: " 1480 << "zygote begin " << reinterpret_cast<const void*>(zygote_boot_map.start) 1481 << ", zygote end " << reinterpret_cast<const void*>(zygote_boot_map.end) 1482 << ", image begin " << reinterpret_cast<const void*>(boot_map.start) 1483 << ", image end " << reinterpret_cast<const void*>(boot_map.end); 1484 return false; 1485 } 1486 } 1487 1488 // Walk the bytes and diff against our boot image 1489 os << "\nObserving boot image header at address " 1490 << reinterpret_cast<const void*>(&image_header) 1491 << "\n\n"; 1492 1493 const uint8_t* image_begin_unaligned = image_header.GetImageBegin(); 1494 const uint8_t* image_end_unaligned = image_begin_unaligned + image_header.GetImageSize(); 1495 1496 // Adjust range to nearest page 1497 const uint8_t* image_begin = AlignDown(image_begin_unaligned, kPageSize); 1498 const uint8_t* image_end = AlignUp(image_end_unaligned, kPageSize); 1499 1500 size_t image_size = image_end - image_begin; 1501 if (image_size != boot_map_size) { 1502 os << "Remote boot map size does not match local boot map size: " 1503 << "local size " << image_size 1504 << ", remote size " << boot_map_size; 1505 return false; 1506 } 1507 1508 auto read_contents = [&](File* mem_file, 1509 /*out*/ MemMap* map, 1510 /*out*/ ArrayRef<uint8_t>* contents) { 1511 DCHECK_ALIGNED(boot_map.start, kPageSize); 1512 DCHECK_ALIGNED(boot_map_size, kPageSize); 1513 std::string name = "Contents of " + mem_file->GetPath(); 1514 std::string local_error_msg; 1515 // We need to use low 4 GiB memory so that we can walk the objects using standard 1516 // functions that use ObjPtr<> which is checking that it fits into lower 4 GiB. 1517 *map = MemMap::MapAnonymous(name.c_str(), 1518 boot_map_size, 1519 PROT_READ | PROT_WRITE, 1520 /* low_4gb= */ true, 1521 &local_error_msg); 1522 if (!map->IsValid()) { 1523 os << "Failed to allocate anonymous mapping for " << boot_map_size << " bytes.\n"; 1524 return false; 1525 } 1526 if (!mem_file->PreadFully(map->Begin(), boot_map_size, boot_map.start)) { 1527 os << "Could not fully read file " << image_mem_file_.GetPath(); 1528 return false; 1529 } 1530 *contents = ArrayRef<uint8_t>(map->Begin(), boot_map_size); 1531 return true; 1532 }; 1533 // The contents of /proc/<image_diff_pid_>/mem. 1534 MemMap remote_contents_map; 1535 ArrayRef<uint8_t> remote_contents; 1536 if (!read_contents(&image_mem_file_, &remote_contents_map, &remote_contents)) { 1537 return false; 1538 } 1539 // The contents of /proc/<zygote_diff_pid_>/mem. 1540 MemMap zygote_contents_map; 1541 ArrayRef<uint8_t> zygote_contents; 1542 if (zygote_diff_pid_ != -1) { 1543 if (!read_contents(&zygote_mem_file_, &zygote_contents_map, &zygote_contents)) { 1544 return false; 1545 } 1546 } 1547 1548 // TODO: We need to update the entire diff to work with the ASLR. b/77856493 1549 // Since the images may be relocated, just check the sizes. 1550 if (static_cast<uintptr_t>(image_end - image_begin) != boot_map.end - boot_map.start) { 1551 os << "Remote boot map is a different size than local boot map: " << 1552 "local begin " << reinterpret_cast<const void*>(image_begin) << 1553 ", local end " << reinterpret_cast<const void*>(image_end) << 1554 ", remote begin " << reinterpret_cast<const void*>(boot_map.start) << 1555 ", remote end " << reinterpret_cast<const void*>(boot_map.end); 1556 return false; 1557 // For more validation should also check the ImageHeader from the file 1558 } 1559 1560 MappingData mapping_data; 1561 1562 os << "Mapping at [" << reinterpret_cast<void*>(boot_map.start) << ", " 1563 << reinterpret_cast<void*>(boot_map.end) << ") had:\n "; 1564 if (!ComputeDirtyBytes(image_header, image_begin, boot_map, remote_contents, &mapping_data)) { 1565 return false; 1566 } 1567 RemoteProcesses remotes; 1568 if (zygote_pid_only_) { 1569 remotes = RemoteProcesses::kZygoteOnly; 1570 } else if (zygote_diff_pid_ > 0) { 1571 remotes = RemoteProcesses::kImageAndZygote; 1572 } else { 1573 remotes = RemoteProcesses::kImageOnly; 1574 } 1575 1576 // Check all the mirror::Object entries in the image. 1577 RegionData<mirror::Object> object_region_data(os_, 1578 remote_contents, 1579 zygote_contents, 1580 boot_map, 1581 image_header, 1582 dump_dirty_objects_); 1583 object_region_data.ProcessRegion(mapping_data, 1584 remotes, 1585 image_begin_unaligned); 1586 1587 // Check all the ArtMethod entries in the image. 1588 RegionData<ArtMethod> artmethod_region_data(os_, 1589 remote_contents, 1590 zygote_contents, 1591 boot_map, 1592 image_header, 1593 dump_dirty_objects_); 1594 artmethod_region_data.ProcessRegion(mapping_data, 1595 remotes, 1596 image_begin_unaligned); 1597 return true; 1598 } 1599 1600 // Note: On failure, `*page_frame_number` shall be clobbered. 1601 static bool GetPageFrameNumber(File* page_map_file, 1602 size_t virtual_page_index, 1603 /*out*/ uint64_t* page_frame_number, 1604 /*out*/ std::string* error_msg) { 1605 CHECK(page_frame_number != nullptr); 1606 return GetPageFrameNumbers(page_map_file, 1607 virtual_page_index, 1608 ArrayRef<uint64_t>(page_frame_number, 1u), 1609 error_msg); 1610 } 1611 1612 // Note: On failure, `page_frame_numbers[.]` shall be clobbered. 1613 static bool GetPageFrameNumbers(File* page_map_file, 1614 size_t virtual_page_index, 1615 /*out*/ ArrayRef<uint64_t> page_frame_numbers, 1616 /*out*/ std::string* error_msg) { 1617 CHECK(page_map_file != nullptr); 1618 CHECK_NE(page_frame_numbers.size(), 0u); 1619 CHECK(page_frame_numbers.data() != nullptr); 1620 CHECK(error_msg != nullptr); 1621 1622 // Read 64-bit entries from /proc/$pid/pagemap to get the physical page frame numbers. 1623 if (!page_map_file->PreadFully(page_frame_numbers.data(), 1624 page_frame_numbers.size() * kPageMapEntrySize, 1625 virtual_page_index * kPageMapEntrySize)) { 1626 *error_msg = StringPrintf("Failed to read the virtual page index entries from %s, error: %s", 1627 page_map_file->GetPath().c_str(), 1628 strerror(errno)); 1629 return false; 1630 } 1631 1632 // Extract page frame numbers from pagemap entries. 1633 for (uint64_t& page_frame_number : page_frame_numbers) { 1634 page_frame_number &= kPageFrameNumberMask; 1635 } 1636 1637 return true; 1638 } 1639 1640 // Note: On failure, `page_flags_or_counts[.]` shall be clobbered. 1641 static bool GetPageFlagsOrCounts(File* kpage_file, 1642 ArrayRef<const uint64_t> page_frame_numbers, 1643 /*out*/ ArrayRef<uint64_t> page_flags_or_counts, 1644 /*out*/ std::string* error_msg) { 1645 static_assert(kPageFlagsEntrySize == kPageCountEntrySize, "entry size check"); 1646 CHECK_NE(page_frame_numbers.size(), 0u); 1647 CHECK_EQ(page_flags_or_counts.size(), page_frame_numbers.size()); 1648 CHECK(kpage_file != nullptr); 1649 CHECK(page_frame_numbers.data() != nullptr); 1650 CHECK(page_flags_or_counts.data() != nullptr); 1651 CHECK(error_msg != nullptr); 1652 1653 size_t size = page_frame_numbers.size(); 1654 size_t i = 0; 1655 while (i != size) { 1656 size_t start = i; 1657 ++i; 1658 while (i != size && page_frame_numbers[i] - page_frame_numbers[start] == i - start) { 1659 ++i; 1660 } 1661 // Read 64-bit entries from /proc/kpageflags or /proc/kpagecount. 1662 if (!kpage_file->PreadFully(page_flags_or_counts.data() + start, 1663 (i - start) * kPageMapEntrySize, 1664 page_frame_numbers[start] * kPageFlagsEntrySize)) { 1665 *error_msg = StringPrintf("Failed to read the page flags or counts from %s, error: %s", 1666 kpage_file->GetPath().c_str(), 1667 strerror(errno)); 1668 return false; 1669 } 1670 } 1671 1672 return true; 1673 } 1674 1675 static int IsPageDirty(File* page_map_file, 1676 File* clean_pagemap_file, 1677 File* kpageflags_file, 1678 File* kpagecount_file, 1679 size_t virtual_page_idx, 1680 size_t clean_virtual_page_idx, 1681 // Out parameters: 1682 uint64_t* page_count, std::string* error_msg) { 1683 CHECK(page_map_file != nullptr); 1684 CHECK(clean_pagemap_file != nullptr); 1685 CHECK_NE(page_map_file, clean_pagemap_file); 1686 CHECK(kpageflags_file != nullptr); 1687 CHECK(kpagecount_file != nullptr); 1688 CHECK(page_count != nullptr); 1689 CHECK(error_msg != nullptr); 1690 1691 // Constants are from https://www.kernel.org/doc/Documentation/vm/pagemap.txt 1692 1693 uint64_t page_frame_number = 0; 1694 if (!GetPageFrameNumber(page_map_file, virtual_page_idx, &page_frame_number, error_msg)) { 1695 return -1; 1696 } 1697 1698 uint64_t page_frame_number_clean = 0; 1699 if (!GetPageFrameNumber(clean_pagemap_file, clean_virtual_page_idx, &page_frame_number_clean, 1700 error_msg)) { 1701 return -1; 1702 } 1703 1704 // Read 64-bit entry from /proc/kpageflags to get the dirty bit for a page 1705 uint64_t kpage_flags_entry = 0; 1706 if (!kpageflags_file->PreadFully(&kpage_flags_entry, 1707 kPageFlagsEntrySize, 1708 page_frame_number * kPageFlagsEntrySize)) { 1709 *error_msg = StringPrintf("Failed to read the page flags from %s", 1710 kpageflags_file->GetPath().c_str()); 1711 return -1; 1712 } 1713 1714 // Read 64-bit entyry from /proc/kpagecount to get mapping counts for a page 1715 if (!kpagecount_file->PreadFully(page_count /*out*/, 1716 kPageCountEntrySize, 1717 page_frame_number * kPageCountEntrySize)) { 1718 *error_msg = StringPrintf("Failed to read the page count from %s", 1719 kpagecount_file->GetPath().c_str()); 1720 return -1; 1721 } 1722 1723 // There must be a page frame at the requested address. 1724 CHECK_EQ(kpage_flags_entry & kPageFlagsNoPageMask, 0u); 1725 // The page frame must be memory mapped 1726 CHECK_NE(kpage_flags_entry & kPageFlagsMmapMask, 0u); 1727 1728 // Page is dirty, i.e. has diverged from file, if the 4th bit is set to 1 1729 bool flags_dirty = (kpage_flags_entry & kPageFlagsDirtyMask) != 0; 1730 1731 // page_frame_number_clean must come from the *same* process 1732 // but a *different* mmap than page_frame_number 1733 if (flags_dirty) { 1734 // FIXME: This check sometimes fails and the reason is not understood. b/123852774 1735 if (page_frame_number != page_frame_number_clean) { 1736 LOG(ERROR) << "Check failed: page_frame_number != page_frame_number_clean " 1737 << "(page_frame_number=" << page_frame_number 1738 << ", page_frame_number_clean=" << page_frame_number_clean << ")" 1739 << " count: " << *page_count << " flags: 0x" << std::hex << kpage_flags_entry; 1740 } 1741 } 1742 1743 return page_frame_number != page_frame_number_clean; 1744 } 1745 1746 void PrintPidLine(const std::string& kind, pid_t pid) { 1747 if (pid < 0) { 1748 *os_ << kind << " DIFF PID: disabled\n\n"; 1749 } else { 1750 *os_ << kind << " DIFF PID (" << pid << "): "; 1751 } 1752 } 1753 1754 // Return suffix of the file path after the last /. (e.g. /foo/bar -> bar, bar -> bar) 1755 static std::string BaseName(const std::string& str) { 1756 size_t idx = str.rfind('/'); 1757 if (idx == std::string::npos) { 1758 return str; 1759 } 1760 1761 return str.substr(idx + 1); 1762 } 1763 1764 // Return the image location, stripped of any directories, e.g. "boot.art" or "core.art" 1765 static std::string GetImageLocationBaseName(const std::string& image_location) { 1766 return BaseName(std::string(image_location)); 1767 } 1768 1769 static constexpr size_t kPageMapEntrySize = sizeof(uint64_t); 1770 // bits 0-54 [in /proc/$pid/pagemap] 1771 static constexpr uint64_t kPageFrameNumberMask = (1ULL << 55) - 1; 1772 1773 static constexpr size_t kPageFlagsEntrySize = sizeof(uint64_t); 1774 static constexpr size_t kPageCountEntrySize = sizeof(uint64_t); 1775 static constexpr uint64_t kPageFlagsDirtyMask = (1ULL << 4); // in /proc/kpageflags 1776 static constexpr uint64_t kPageFlagsNoPageMask = (1ULL << 20); // in /proc/kpageflags 1777 static constexpr uint64_t kPageFlagsMmapMask = (1ULL << 11); // in /proc/kpageflags 1778 1779 1780 std::ostream* os_; 1781 pid_t image_diff_pid_; // Dump image diff against boot.art if pid is non-negative 1782 pid_t zygote_diff_pid_; // Dump image diff against zygote boot.art if pid is non-negative 1783 bool dump_dirty_objects_; // Adds dumping of objects that are dirty. 1784 bool zygote_pid_only_; // The user only specified a pid for the zygote. 1785 1786 // BacktraceMap used for finding the memory mapping of the image file. 1787 std::unique_ptr<BacktraceMap> image_proc_maps_; 1788 // A File for reading /proc/<image_diff_pid_>/mem. 1789 File image_mem_file_; 1790 // A File for reading /proc/<image_diff_pid_>/pagemap. 1791 File image_pagemap_file_; 1792 1793 // BacktraceMap used for finding the memory mapping of the zygote image file. 1794 std::unique_ptr<BacktraceMap> zygote_proc_maps_; 1795 // A File for reading /proc/<zygote_diff_pid_>/mem. 1796 File zygote_mem_file_; 1797 // A File for reading /proc/<zygote_diff_pid_>/pagemap. 1798 File zygote_pagemap_file_; 1799 1800 // A File for reading /proc/self/pagemap. 1801 File clean_pagemap_file_; 1802 // A File for reading /proc/kpageflags. 1803 File kpageflags_file_; 1804 // A File for reading /proc/kpagecount. 1805 File kpagecount_file_; 1806 1807 DISALLOW_COPY_AND_ASSIGN(ImgDiagDumper); 1808 }; 1809 1810 static int DumpImage(Runtime* runtime, 1811 std::ostream* os, 1812 pid_t image_diff_pid, 1813 pid_t zygote_diff_pid, 1814 bool dump_dirty_objects) { 1815 ScopedObjectAccess soa(Thread::Current()); 1816 gc::Heap* heap = runtime->GetHeap(); 1817 const std::vector<gc::space::ImageSpace*>& image_spaces = heap->GetBootImageSpaces(); 1818 CHECK(!image_spaces.empty()); 1819 ImgDiagDumper img_diag_dumper(os, 1820 image_diff_pid, 1821 zygote_diff_pid, 1822 dump_dirty_objects); 1823 if (!img_diag_dumper.Init()) { 1824 return EXIT_FAILURE; 1825 } 1826 for (gc::space::ImageSpace* image_space : image_spaces) { 1827 const ImageHeader& image_header = image_space->GetImageHeader(); 1828 if (!image_header.IsValid()) { 1829 fprintf(stderr, "Invalid image header %s\n", image_space->GetImageLocation().c_str()); 1830 return EXIT_FAILURE; 1831 } 1832 1833 if (!img_diag_dumper.Dump(image_header, image_space->GetImageLocation())) { 1834 return EXIT_FAILURE; 1835 } 1836 } 1837 return EXIT_SUCCESS; 1838 } 1839 1840 struct ImgDiagArgs : public CmdlineArgs { 1841 protected: 1842 using Base = CmdlineArgs; 1843 1844 ParseStatus ParseCustom(const char* raw_option, 1845 size_t raw_option_length, 1846 std::string* error_msg) override { 1847 DCHECK_EQ(strlen(raw_option), raw_option_length); 1848 { 1849 ParseStatus base_parse = Base::ParseCustom(raw_option, raw_option_length, error_msg); 1850 if (base_parse != kParseUnknownArgument) { 1851 return base_parse; 1852 } 1853 } 1854 1855 std::string_view option(raw_option, raw_option_length); 1856 if (StartsWith(option, "--image-diff-pid=")) { 1857 const char* image_diff_pid = raw_option + strlen("--image-diff-pid="); 1858 1859 if (!android::base::ParseInt(image_diff_pid, &image_diff_pid_)) { 1860 *error_msg = "Image diff pid out of range"; 1861 return kParseError; 1862 } 1863 } else if (StartsWith(option, "--zygote-diff-pid=")) { 1864 const char* zygote_diff_pid = raw_option + strlen("--zygote-diff-pid="); 1865 1866 if (!android::base::ParseInt(zygote_diff_pid, &zygote_diff_pid_)) { 1867 *error_msg = "Zygote diff pid out of range"; 1868 return kParseError; 1869 } 1870 } else if (option == "--dump-dirty-objects") { 1871 dump_dirty_objects_ = true; 1872 } else { 1873 return kParseUnknownArgument; 1874 } 1875 1876 return kParseOk; 1877 } 1878 1879 ParseStatus ParseChecks(std::string* error_msg) override { 1880 // Perform the parent checks. 1881 ParseStatus parent_checks = Base::ParseChecks(error_msg); 1882 if (parent_checks != kParseOk) { 1883 return parent_checks; 1884 } 1885 1886 // Perform our own checks. 1887 1888 if (kill(image_diff_pid_, 1889 /*sig*/0) != 0) { // No signal is sent, perform error-checking only. 1890 // Check if the pid exists before proceeding. 1891 if (errno == ESRCH) { 1892 *error_msg = "Process specified does not exist"; 1893 } else { 1894 *error_msg = StringPrintf("Failed to check process status: %s", strerror(errno)); 1895 } 1896 return kParseError; 1897 } else if (instruction_set_ != InstructionSet::kNone && instruction_set_ != kRuntimeISA) { 1898 // Don't allow different ISAs since the images are ISA-specific. 1899 // Right now the code assumes both the runtime ISA and the remote ISA are identical. 1900 *error_msg = "Must use the default runtime ISA; changing ISA is not supported."; 1901 return kParseError; 1902 } 1903 1904 return kParseOk; 1905 } 1906 1907 std::string GetUsage() const override { 1908 std::string usage; 1909 1910 usage += 1911 "Usage: imgdiag [options] ...\n" 1912 " Example: imgdiag --image-diff-pid=$(pidof dex2oat)\n" 1913 " Example: adb shell imgdiag --image-diff-pid=$(pid zygote)\n" 1914 "\n"; 1915 1916 usage += Base::GetUsage(); 1917 1918 usage += // Optional. 1919 " --image-diff-pid=<pid>: provide the PID of a process whose boot.art you want to diff.\n" 1920 " Example: --image-diff-pid=$(pid zygote)\n" 1921 " --zygote-diff-pid=<pid>: provide the PID of the zygote whose boot.art you want to diff " 1922 "against.\n" 1923 " Example: --zygote-diff-pid=$(pid zygote)\n" 1924 " --dump-dirty-objects: additionally output dirty objects of interest.\n" 1925 "\n"; 1926 1927 return usage; 1928 } 1929 1930 public: 1931 pid_t image_diff_pid_ = -1; 1932 pid_t zygote_diff_pid_ = -1; 1933 bool dump_dirty_objects_ = false; 1934 }; 1935 1936 struct ImgDiagMain : public CmdlineMain<ImgDiagArgs> { 1937 bool ExecuteWithRuntime(Runtime* runtime) override { 1938 CHECK(args_ != nullptr); 1939 1940 return DumpImage(runtime, 1941 args_->os_, 1942 args_->image_diff_pid_, 1943 args_->zygote_diff_pid_, 1944 args_->dump_dirty_objects_) == EXIT_SUCCESS; 1945 } 1946 }; 1947 1948 } // namespace art 1949 1950 int main(int argc, char** argv) { 1951 art::ImgDiagMain main; 1952 return main.Main(argc, argv); 1953 } 1954