1 /* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef ART_RUNTIME_GC_COLLECTOR_MARK_SWEEP_H_ 18 #define ART_RUNTIME_GC_COLLECTOR_MARK_SWEEP_H_ 19 20 #include "atomic_integer.h" 21 #include "barrier.h" 22 #include "base/macros.h" 23 #include "base/mutex.h" 24 #include "garbage_collector.h" 25 #include "offsets.h" 26 #include "root_visitor.h" 27 #include "UniquePtr.h" 28 29 namespace art { 30 31 namespace mirror { 32 class Class; 33 class Object; 34 template<class T> class ObjectArray; 35 } // namespace mirror 36 37 class StackVisitor; 38 class Thread; 39 40 namespace gc { 41 42 namespace accounting { 43 template <typename T> class AtomicStack; 44 class MarkIfReachesAllocspaceVisitor; 45 class ModUnionClearCardVisitor; 46 class ModUnionVisitor; 47 class ModUnionTableBitmap; 48 class MarkStackChunk; 49 typedef AtomicStack<mirror::Object*> ObjectStack; 50 class SpaceBitmap; 51 } // namespace accounting 52 53 namespace space { 54 class ContinuousSpace; 55 } // namespace space 56 57 class Heap; 58 59 namespace collector { 60 61 class MarkSweep : public GarbageCollector { 62 public: 63 explicit MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix = ""); 64 65 ~MarkSweep() {} 66 67 virtual void InitializePhase(); 68 virtual bool IsConcurrent() const; 69 virtual bool HandleDirtyObjectsPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); 70 virtual void MarkingPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 71 virtual void ReclaimPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 72 virtual void FinishPhase(); 73 virtual void MarkReachableObjects() 74 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 75 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 76 virtual GcType GetGcType() const { 77 return kGcTypeFull; 78 } 79 80 // Initializes internal structures. 81 void Init(); 82 83 // Find the default mark bitmap. 84 void FindDefaultMarkBitmap(); 85 86 // Marks the root set at the start of a garbage collection. 87 void MarkRoots() 88 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 89 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 90 91 void MarkNonThreadRoots() 92 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 93 94 void MarkConcurrentRoots(); 95 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 96 97 void MarkRootsCheckpoint(Thread* self) 98 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 99 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 100 101 // Verify that image roots point to only marked objects within the alloc space. 102 void VerifyImageRoots() 103 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 104 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 105 106 // Builds a mark stack and recursively mark until it empties. 107 void RecursiveMark() 108 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 109 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 110 111 // Make a space immune, immune spaces have all live objects marked - that is the mark and 112 // live bitmaps are bound together. 113 void ImmuneSpace(space::ContinuousSpace* space) 114 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 115 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 116 117 // Bind the live bits to the mark bits of bitmaps for spaces that are never collected, ie 118 // the image. Mark that portion of the heap as immune. 119 virtual void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 120 121 void BindLiveToMarkBitmap(space::ContinuousSpace* space) 122 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 123 124 void UnBindBitmaps() 125 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 126 127 // Builds a mark stack with objects on dirty cards and recursively mark until it empties. 128 void RecursiveMarkDirtyObjects(bool paused, byte minimum_age) 129 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 130 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 131 132 // Remarks the root set after completing the concurrent mark. 133 void ReMarkRoots() 134 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 135 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 136 137 void ProcessReferences(Thread* self) 138 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 139 140 // Sweeps unmarked objects to complete the garbage collection. 141 virtual void Sweep(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 142 143 // Sweeps unmarked objects to complete the garbage collection. 144 void SweepLargeObjects(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 145 146 // Sweep only pointers within an array. WARNING: Trashes objects. 147 void SweepArray(accounting::ObjectStack* allocation_stack_, bool swap_bitmaps) 148 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 149 150 mirror::Object* GetClearedReferences() { 151 return cleared_reference_list_; 152 } 153 154 // Proxy for external access to ScanObject. 155 void ScanRoot(const mirror::Object* obj) 156 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 157 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 158 159 // Blackens an object. 160 void ScanObject(const mirror::Object* obj) 161 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 162 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 163 164 // TODO: enable thread safety analysis when in use by multiple worker threads. 165 template <typename MarkVisitor> 166 void ScanObjectVisit(const mirror::Object* obj, const MarkVisitor& visitor) 167 NO_THREAD_SAFETY_ANALYSIS; 168 169 size_t GetFreedBytes() const { 170 return freed_bytes_; 171 } 172 173 size_t GetFreedLargeObjectBytes() const { 174 return freed_large_object_bytes_; 175 } 176 177 size_t GetFreedObjects() const { 178 return freed_objects_; 179 } 180 181 size_t GetFreedLargeObjects() const { 182 return freed_large_objects_; 183 } 184 185 uint64_t GetTotalTimeNs() const { 186 return total_time_ns_; 187 } 188 189 uint64_t GetTotalPausedTimeNs() const { 190 return total_paused_time_ns_; 191 } 192 193 uint64_t GetTotalFreedObjects() const { 194 return total_freed_objects_; 195 } 196 197 uint64_t GetTotalFreedBytes() const { 198 return total_freed_bytes_; 199 } 200 201 // Everything inside the immune range is assumed to be marked. 202 void SetImmuneRange(mirror::Object* begin, mirror::Object* end); 203 204 void SweepSystemWeaks() 205 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 206 207 static bool VerifyIsLiveCallback(const mirror::Object* obj, void* arg) 208 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 209 210 void VerifySystemWeaks() 211 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 212 213 // Verify that an object is live, either in a live bitmap or in the allocation stack. 214 void VerifyIsLive(const mirror::Object* obj) 215 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 216 217 template <typename Visitor> 218 static void VisitObjectReferences(const mirror::Object* obj, const Visitor& visitor) 219 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, 220 Locks::mutator_lock_); 221 222 static void MarkObjectCallback(const mirror::Object* root, void* arg) 223 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 224 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 225 226 static void MarkRootParallelCallback(const mirror::Object* root, void* arg); 227 228 // Marks an object. 229 void MarkObject(const mirror::Object* obj) 230 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 231 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 232 233 void MarkRoot(const mirror::Object* obj) 234 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 235 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 236 237 Barrier& GetBarrier() { 238 return *gc_barrier_; 239 } 240 241 protected: 242 // Returns true if the object has its bit set in the mark bitmap. 243 bool IsMarked(const mirror::Object* object) const; 244 245 static bool IsMarkedCallback(const mirror::Object* object, void* arg) 246 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 247 248 static bool IsMarkedArrayCallback(const mirror::Object* object, void* arg) 249 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 250 251 static void ReMarkObjectVisitor(const mirror::Object* root, void* arg) 252 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 253 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 254 255 static void VerifyImageRootVisitor(mirror::Object* root, void* arg) 256 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, 257 Locks::mutator_lock_); 258 259 void MarkObjectNonNull(const mirror::Object* obj) 260 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 261 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 262 263 // Unmarks an object by clearing the bit inside of the corresponding bitmap, or if it is in a 264 // space set, removing the object from the set. 265 void UnMarkObjectNonNull(const mirror::Object* obj) 266 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 267 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 268 269 // Mark the vm thread roots. 270 virtual void MarkThreadRoots(Thread* self) 271 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 272 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 273 274 // Marks an object atomically, safe to use from multiple threads. 275 void MarkObjectNonNullParallel(const mirror::Object* obj); 276 277 // Marks or unmarks a large object based on whether or not set is true. If set is true, then we 278 // mark, otherwise we unmark. 279 bool MarkLargeObject(const mirror::Object* obj, bool set) 280 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 281 282 // Returns true if we need to add obj to a mark stack. 283 bool MarkObjectParallel(const mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS; 284 285 static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg) 286 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 287 288 // Special sweep for zygote that just marks objects / dirties cards. 289 static void ZygoteSweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg) 290 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 291 292 void CheckReference(const mirror::Object* obj, const mirror::Object* ref, MemberOffset offset, 293 bool is_static) 294 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 295 296 void CheckObject(const mirror::Object* obj) 297 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 298 299 // Verify the roots of the heap and print out information related to any invalid roots. 300 // Called in MarkObject, so may we may not hold the mutator lock. 301 void VerifyRoots() 302 NO_THREAD_SAFETY_ANALYSIS; 303 304 // Expand mark stack to 2x its current size. 305 void ExpandMarkStack() EXCLUSIVE_LOCKS_REQUIRED(mark_stack_lock_); 306 void ResizeMarkStack(size_t new_size) EXCLUSIVE_LOCKS_REQUIRED(mark_stack_lock_); 307 308 // Returns how many threads we should use for the current GC phase based on if we are paused, 309 // whether or not we care about pauses. 310 size_t GetThreadCount(bool paused) const; 311 312 // Returns true if an object is inside of the immune region (assumed to be marked). 313 bool IsImmune(const mirror::Object* obj) const { 314 return obj >= immune_begin_ && obj < immune_end_; 315 } 316 317 static void VerifyRootCallback(const mirror::Object* root, void* arg, size_t vreg, 318 const StackVisitor *visitor); 319 320 void VerifyRoot(const mirror::Object* root, size_t vreg, const StackVisitor* visitor) 321 NO_THREAD_SAFETY_ANALYSIS; 322 323 template <typename Visitor> 324 static void VisitInstanceFieldsReferences(const mirror::Class* klass, const mirror::Object* obj, 325 const Visitor& visitor) 326 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 327 328 // Visit the header, static field references, and interface pointers of a class object. 329 template <typename Visitor> 330 static void VisitClassReferences(const mirror::Class* klass, const mirror::Object* obj, 331 const Visitor& visitor) 332 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 333 334 template <typename Visitor> 335 static void VisitStaticFieldsReferences(const mirror::Class* klass, const Visitor& visitor) 336 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 337 338 template <typename Visitor> 339 static void VisitFieldsReferences(const mirror::Object* obj, uint32_t ref_offsets, bool is_static, 340 const Visitor& visitor) 341 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 342 343 // Visit all of the references in an object array. 344 template <typename Visitor> 345 static void VisitObjectArrayReferences(const mirror::ObjectArray<mirror::Object>* array, 346 const Visitor& visitor) 347 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 348 349 // Visits the header and field references of a data object. 350 template <typename Visitor> 351 static void VisitOtherReferences(const mirror::Class* klass, const mirror::Object* obj, 352 const Visitor& visitor) 353 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { 354 return VisitInstanceFieldsReferences(klass, obj, visitor); 355 } 356 357 // Blackens objects grayed during a garbage collection. 358 void ScanGrayObjects(bool paused, byte minimum_age) 359 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 360 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 361 362 // Schedules an unmarked object for reference processing. 363 void DelayReferenceReferent(mirror::Class* klass, mirror::Object* reference) 364 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 365 366 // Recursively blackens objects on the mark stack. 367 void ProcessMarkStack(bool paused) 368 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 369 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 370 371 void ProcessMarkStackParallel(size_t thread_count) 372 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 373 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 374 375 void EnqueueFinalizerReferences(mirror::Object** ref) 376 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 377 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 378 379 void PreserveSomeSoftReferences(mirror::Object** ref) 380 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 381 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 382 383 void ClearWhiteReferences(mirror::Object** list) 384 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 385 386 void ProcessReferences(mirror::Object** soft_references, bool clear_soft_references, 387 mirror::Object** weak_references, 388 mirror::Object** finalizer_references, 389 mirror::Object** phantom_references) 390 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 391 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 392 393 void SweepJniWeakGlobals(IsMarkedTester is_marked, void* arg) 394 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 395 396 // Whether or not we count how many of each type of object were scanned. 397 static const bool kCountScannedTypes = false; 398 399 // Current space, we check this space first to avoid searching for the appropriate space for an 400 // object. 401 accounting::SpaceBitmap* current_mark_bitmap_; 402 403 // Cache java.lang.Class for optimization. 404 mirror::Class* java_lang_Class_; 405 406 accounting::ObjectStack* mark_stack_; 407 408 // Immune range, every object inside the immune range is assumed to be marked. 409 mirror::Object* immune_begin_; 410 mirror::Object* immune_end_; 411 412 mirror::Object* soft_reference_list_; 413 mirror::Object* weak_reference_list_; 414 mirror::Object* finalizer_reference_list_; 415 mirror::Object* phantom_reference_list_; 416 mirror::Object* cleared_reference_list_; 417 418 // Parallel finger. 419 AtomicInteger atomic_finger_; 420 // Number of non large object bytes freed in this collection. 421 AtomicInteger freed_bytes_; 422 // Number of large object bytes freed. 423 AtomicInteger freed_large_object_bytes_; 424 // Number of objects freed in this collection. 425 AtomicInteger freed_objects_; 426 // Number of freed large objects. 427 AtomicInteger freed_large_objects_; 428 // Number of classes scanned, if kCountScannedTypes. 429 AtomicInteger class_count_; 430 // Number of arrays scanned, if kCountScannedTypes. 431 AtomicInteger array_count_; 432 // Number of non-class/arrays scanned, if kCountScannedTypes. 433 AtomicInteger other_count_; 434 AtomicInteger large_object_test_; 435 AtomicInteger large_object_mark_; 436 AtomicInteger classes_marked_; 437 AtomicInteger overhead_time_; 438 AtomicInteger work_chunks_created_; 439 AtomicInteger work_chunks_deleted_; 440 AtomicInteger reference_count_; 441 AtomicInteger cards_scanned_; 442 443 // Verification. 444 size_t live_stack_freeze_size_; 445 446 UniquePtr<Barrier> gc_barrier_; 447 Mutex large_object_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; 448 Mutex mark_stack_lock_ ACQUIRED_AFTER(Locks::classlinker_classes_lock_); 449 450 const bool is_concurrent_; 451 452 bool clear_soft_references_; 453 454 private: 455 friend class AddIfReachesAllocSpaceVisitor; // Used by mod-union table. 456 friend class CardScanTask; 457 friend class CheckBitmapVisitor; 458 friend class CheckReferenceVisitor; 459 friend class art::gc::Heap; 460 friend class InternTableEntryIsUnmarked; 461 friend class MarkIfReachesAllocspaceVisitor; 462 friend class ModUnionCheckReferences; 463 friend class ModUnionClearCardVisitor; 464 friend class ModUnionReferenceVisitor; 465 friend class ModUnionVisitor; 466 friend class ModUnionTableBitmap; 467 friend class ModUnionTableReferenceCache; 468 friend class ModUnionScanImageRootVisitor; 469 friend class ScanBitmapVisitor; 470 friend class ScanImageRootVisitor; 471 template<bool kUseFinger> friend class MarkStackTask; 472 friend class FifoMarkStackChunk; 473 474 DISALLOW_COPY_AND_ASSIGN(MarkSweep); 475 }; 476 477 } // namespace collector 478 } // namespace gc 479 } // namespace art 480 481 #endif // ART_RUNTIME_GC_COLLECTOR_MARK_SWEEP_H_ 482