1 /* 2 * Copyright (C) 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #include "monitor_pool.h" 18 19 #include "base/logging.h" 20 #include "base/mutex-inl.h" 21 #include "thread-inl.h" 22 #include "monitor.h" 23 24 namespace art { 25 26 namespace mirror { 27 class Object; 28 } // namespace mirror 29 30 MonitorPool::MonitorPool() 31 : current_chunk_list_index_(0), num_chunks_(0), current_chunk_list_capacity_(0), 32 first_free_(nullptr) { 33 for (size_t i = 0; i < kMaxChunkLists; ++i) { 34 monitor_chunks_[i] = nullptr; // Not absolutely required, but ... 35 } 36 AllocateChunk(); // Get our first chunk. 37 } 38 39 // Assumes locks are held appropriately when necessary. 40 // We do not need a lock in the constructor, but we need one when in CreateMonitorInPool. 41 void MonitorPool::AllocateChunk() { 42 DCHECK(first_free_ == nullptr); 43 44 // Do we need to allocate another chunk list? 45 if (num_chunks_ == current_chunk_list_capacity_) { 46 if (current_chunk_list_capacity_ != 0U) { 47 ++current_chunk_list_index_; 48 CHECK_LT(current_chunk_list_index_, kMaxChunkLists) << "Out of space for inflated monitors"; 49 VLOG(monitor) << "Expanding to capacity " 50 << 2 * ChunkListCapacity(current_chunk_list_index_) - kInitialChunkStorage; 51 } // else we're initializing 52 current_chunk_list_capacity_ = ChunkListCapacity(current_chunk_list_index_); 53 uintptr_t* new_list = new uintptr_t[current_chunk_list_capacity_](); 54 DCHECK(monitor_chunks_[current_chunk_list_index_] == nullptr); 55 monitor_chunks_[current_chunk_list_index_] = new_list; 56 num_chunks_ = 0; 57 } 58 59 // Allocate the chunk. 60 void* chunk = allocator_.allocate(kChunkSize); 61 // Check we allocated memory. 62 CHECK_NE(reinterpret_cast<uintptr_t>(nullptr), reinterpret_cast<uintptr_t>(chunk)); 63 // Check it is aligned as we need it. 64 CHECK_EQ(0U, reinterpret_cast<uintptr_t>(chunk) % kMonitorAlignment); 65 66 // Add the chunk. 67 monitor_chunks_[current_chunk_list_index_][num_chunks_] = reinterpret_cast<uintptr_t>(chunk); 68 num_chunks_++; 69 70 // Set up the free list 71 Monitor* last = reinterpret_cast<Monitor*>(reinterpret_cast<uintptr_t>(chunk) + 72 (kChunkCapacity - 1) * kAlignedMonitorSize); 73 last->next_free_ = nullptr; 74 // Eagerly compute id. 75 last->monitor_id_ = OffsetToMonitorId(current_chunk_list_index_* (kMaxListSize * kChunkSize) 76 + (num_chunks_ - 1) * kChunkSize + (kChunkCapacity - 1) * kAlignedMonitorSize); 77 for (size_t i = 0; i < kChunkCapacity - 1; ++i) { 78 Monitor* before = reinterpret_cast<Monitor*>(reinterpret_cast<uintptr_t>(last) - 79 kAlignedMonitorSize); 80 before->next_free_ = last; 81 // Derive monitor_id from last. 82 before->monitor_id_ = OffsetToMonitorId(MonitorIdToOffset(last->monitor_id_) - 83 kAlignedMonitorSize); 84 85 last = before; 86 } 87 DCHECK(last == reinterpret_cast<Monitor*>(chunk)); 88 first_free_ = last; 89 } 90 91 void MonitorPool::FreeInternal() { 92 // This is on shutdown with NO_THREAD_SAFETY_ANALYSIS, can't/don't need to lock. 93 DCHECK_NE(current_chunk_list_capacity_, 0UL); 94 for (size_t i = 0; i <= current_chunk_list_index_; ++i) { 95 DCHECK_NE(monitor_chunks_[i], static_cast<uintptr_t*>(nullptr)); 96 for (size_t j = 0; j < ChunkListCapacity(i); ++j) { 97 if (i < current_chunk_list_index_ || j < num_chunks_) { 98 DCHECK_NE(monitor_chunks_[i][j], 0U); 99 allocator_.deallocate(reinterpret_cast<uint8_t*>(monitor_chunks_[i][j]), kChunkSize); 100 } else { 101 DCHECK_EQ(monitor_chunks_[i][j], 0U); 102 } 103 } 104 delete[] monitor_chunks_[i]; 105 } 106 } 107 108 Monitor* MonitorPool::CreateMonitorInPool(Thread* self, Thread* owner, mirror::Object* obj, 109 int32_t hash_code) 110 REQUIRES_SHARED(Locks::mutator_lock_) { 111 // We are gonna allocate, so acquire the writer lock. 112 MutexLock mu(self, *Locks::allocated_monitor_ids_lock_); 113 114 // Enough space, or need to resize? 115 if (first_free_ == nullptr) { 116 VLOG(monitor) << "Allocating a new chunk."; 117 AllocateChunk(); 118 } 119 120 Monitor* mon_uninitialized = first_free_; 121 first_free_ = first_free_->next_free_; 122 123 // Pull out the id which was preinitialized. 124 MonitorId id = mon_uninitialized->monitor_id_; 125 126 // Initialize it. 127 Monitor* monitor = new(mon_uninitialized) Monitor(self, owner, obj, hash_code, id); 128 129 return monitor; 130 } 131 132 void MonitorPool::ReleaseMonitorToPool(Thread* self, Monitor* monitor) { 133 // Might be racy with allocation, so acquire lock. 134 MutexLock mu(self, *Locks::allocated_monitor_ids_lock_); 135 136 // Keep the monitor id. Don't trust it's not cleared. 137 MonitorId id = monitor->monitor_id_; 138 139 // Call the destructor. 140 // TODO: Exception safety? 141 monitor->~Monitor(); 142 143 // Add to the head of the free list. 144 monitor->next_free_ = first_free_; 145 first_free_ = monitor; 146 147 // Rewrite monitor id. 148 monitor->monitor_id_ = id; 149 } 150 151 void MonitorPool::ReleaseMonitorsToPool(Thread* self, MonitorList::Monitors* monitors) { 152 for (Monitor* mon : *monitors) { 153 ReleaseMonitorToPool(self, mon); 154 } 155 } 156 157 } // namespace art 158