1 /* 2 * Copyright (C) 2009 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #include "indirect_reference_table-inl.h" 18 19 #include "base/systrace.h" 20 #include "jni_internal.h" 21 #include "nth_caller_visitor.h" 22 #include "reference_table.h" 23 #include "runtime.h" 24 #include "scoped_thread_state_change.h" 25 #include "thread.h" 26 #include "utils.h" 27 #include "verify_object-inl.h" 28 29 #include <cstdlib> 30 31 namespace art { 32 33 static constexpr bool kDumpStackOnNonLocalReference = false; 34 35 template<typename T> 36 class MutatorLockedDumpable { 37 public: 38 explicit MutatorLockedDumpable(T& value) 39 SHARED_REQUIRES(Locks::mutator_lock_) : value_(value) { 40 } 41 42 void Dump(std::ostream& os) const SHARED_REQUIRES(Locks::mutator_lock_) { 43 value_.Dump(os); 44 } 45 46 private: 47 T& value_; 48 49 DISALLOW_COPY_AND_ASSIGN(MutatorLockedDumpable); 50 }; 51 52 template<typename T> 53 std::ostream& operator<<(std::ostream& os, const MutatorLockedDumpable<T>& rhs) 54 // TODO: should be SHARED_REQUIRES(Locks::mutator_lock_) however annotalysis 55 // currently fails for this. 56 NO_THREAD_SAFETY_ANALYSIS { 57 rhs.Dump(os); 58 return os; 59 } 60 61 void IndirectReferenceTable::AbortIfNoCheckJNI() { 62 // If -Xcheck:jni is on, it'll give a more detailed error before aborting. 63 JavaVMExt* vm = Runtime::Current()->GetJavaVM(); 64 if (!vm->IsCheckJniEnabled()) { 65 // Otherwise, we want to abort rather than hand back a bad reference. 66 LOG(FATAL) << "JNI ERROR (app bug): see above."; 67 } 68 } 69 70 IndirectReferenceTable::IndirectReferenceTable(size_t initialCount, 71 size_t maxCount, IndirectRefKind desiredKind, 72 bool abort_on_error) 73 : kind_(desiredKind), 74 max_entries_(maxCount) { 75 CHECK_GT(initialCount, 0U); 76 CHECK_LE(initialCount, maxCount); 77 CHECK_NE(desiredKind, kHandleScopeOrInvalid); 78 79 std::string error_str; 80 const size_t table_bytes = maxCount * sizeof(IrtEntry); 81 table_mem_map_.reset(MemMap::MapAnonymous("indirect ref table", nullptr, table_bytes, 82 PROT_READ | PROT_WRITE, false, false, &error_str)); 83 if (abort_on_error) { 84 CHECK(table_mem_map_.get() != nullptr) << error_str; 85 CHECK_EQ(table_mem_map_->Size(), table_bytes); 86 CHECK(table_mem_map_->Begin() != nullptr); 87 } else if (table_mem_map_.get() == nullptr || 88 table_mem_map_->Size() != table_bytes || 89 table_mem_map_->Begin() == nullptr) { 90 table_mem_map_.reset(); 91 LOG(ERROR) << error_str; 92 return; 93 } 94 table_ = reinterpret_cast<IrtEntry*>(table_mem_map_->Begin()); 95 segment_state_.all = IRT_FIRST_SEGMENT; 96 } 97 98 IndirectReferenceTable::~IndirectReferenceTable() { 99 } 100 101 bool IndirectReferenceTable::IsValid() const { 102 return table_mem_map_.get() != nullptr; 103 } 104 105 IndirectRef IndirectReferenceTable::Add(uint32_t cookie, mirror::Object* obj) { 106 IRTSegmentState prevState; 107 prevState.all = cookie; 108 size_t topIndex = segment_state_.parts.topIndex; 109 110 CHECK(obj != nullptr); 111 VerifyObject(obj); 112 DCHECK(table_ != nullptr); 113 DCHECK_GE(segment_state_.parts.numHoles, prevState.parts.numHoles); 114 115 if (topIndex == max_entries_) { 116 LOG(FATAL) << "JNI ERROR (app bug): " << kind_ << " table overflow " 117 << "(max=" << max_entries_ << ")\n" 118 << MutatorLockedDumpable<IndirectReferenceTable>(*this); 119 } 120 121 // We know there's enough room in the table. Now we just need to find 122 // the right spot. If there's a hole, find it and fill it; otherwise, 123 // add to the end of the list. 124 IndirectRef result; 125 int numHoles = segment_state_.parts.numHoles - prevState.parts.numHoles; 126 size_t index; 127 if (numHoles > 0) { 128 DCHECK_GT(topIndex, 1U); 129 // Find the first hole; likely to be near the end of the list. 130 IrtEntry* pScan = &table_[topIndex - 1]; 131 DCHECK(!pScan->GetReference()->IsNull()); 132 --pScan; 133 while (!pScan->GetReference()->IsNull()) { 134 DCHECK_GE(pScan, table_ + prevState.parts.topIndex); 135 --pScan; 136 } 137 index = pScan - table_; 138 segment_state_.parts.numHoles--; 139 } else { 140 // Add to the end. 141 index = topIndex++; 142 segment_state_.parts.topIndex = topIndex; 143 } 144 table_[index].Add(obj); 145 result = ToIndirectRef(index); 146 if ((false)) { 147 LOG(INFO) << "+++ added at " << ExtractIndex(result) << " top=" << segment_state_.parts.topIndex 148 << " holes=" << segment_state_.parts.numHoles; 149 } 150 151 DCHECK(result != nullptr); 152 return result; 153 } 154 155 void IndirectReferenceTable::AssertEmpty() { 156 for (size_t i = 0; i < Capacity(); ++i) { 157 if (!table_[i].GetReference()->IsNull()) { 158 ScopedObjectAccess soa(Thread::Current()); 159 LOG(FATAL) << "Internal Error: non-empty local reference table\n" 160 << MutatorLockedDumpable<IndirectReferenceTable>(*this); 161 } 162 } 163 } 164 165 // Removes an object. We extract the table offset bits from "iref" 166 // and zap the corresponding entry, leaving a hole if it's not at the top. 167 // If the entry is not between the current top index and the bottom index 168 // specified by the cookie, we don't remove anything. This is the behavior 169 // required by JNI's DeleteLocalRef function. 170 // This method is not called when a local frame is popped; this is only used 171 // for explicit single removals. 172 // Returns "false" if nothing was removed. 173 bool IndirectReferenceTable::Remove(uint32_t cookie, IndirectRef iref) { 174 IRTSegmentState prevState; 175 prevState.all = cookie; 176 int topIndex = segment_state_.parts.topIndex; 177 int bottomIndex = prevState.parts.topIndex; 178 179 DCHECK(table_ != nullptr); 180 DCHECK_GE(segment_state_.parts.numHoles, prevState.parts.numHoles); 181 182 if (GetIndirectRefKind(iref) == kHandleScopeOrInvalid) { 183 auto* self = Thread::Current(); 184 if (self->HandleScopeContains(reinterpret_cast<jobject>(iref))) { 185 auto* env = self->GetJniEnv(); 186 DCHECK(env != nullptr); 187 if (env->check_jni) { 188 ScopedObjectAccess soa(self); 189 LOG(WARNING) << "Attempt to remove non-JNI local reference, dumping thread"; 190 if (kDumpStackOnNonLocalReference) { 191 self->Dump(LOG(WARNING)); 192 } 193 } 194 return true; 195 } 196 } 197 const int idx = ExtractIndex(iref); 198 if (idx < bottomIndex) { 199 // Wrong segment. 200 LOG(WARNING) << "Attempt to remove index outside index area (" << idx 201 << " vs " << bottomIndex << "-" << topIndex << ")"; 202 return false; 203 } 204 if (idx >= topIndex) { 205 // Bad --- stale reference? 206 LOG(WARNING) << "Attempt to remove invalid index " << idx 207 << " (bottom=" << bottomIndex << " top=" << topIndex << ")"; 208 return false; 209 } 210 211 if (idx == topIndex - 1) { 212 // Top-most entry. Scan up and consume holes. 213 214 if (!CheckEntry("remove", iref, idx)) { 215 return false; 216 } 217 218 *table_[idx].GetReference() = GcRoot<mirror::Object>(nullptr); 219 int numHoles = segment_state_.parts.numHoles - prevState.parts.numHoles; 220 if (numHoles != 0) { 221 while (--topIndex > bottomIndex && numHoles != 0) { 222 if ((false)) { 223 LOG(INFO) << "+++ checking for hole at " << topIndex - 1 224 << " (cookie=" << cookie << ") val=" 225 << table_[topIndex - 1].GetReference()->Read<kWithoutReadBarrier>(); 226 } 227 if (!table_[topIndex - 1].GetReference()->IsNull()) { 228 break; 229 } 230 if ((false)) { 231 LOG(INFO) << "+++ ate hole at " << (topIndex - 1); 232 } 233 numHoles--; 234 } 235 segment_state_.parts.numHoles = numHoles + prevState.parts.numHoles; 236 segment_state_.parts.topIndex = topIndex; 237 } else { 238 segment_state_.parts.topIndex = topIndex-1; 239 if ((false)) { 240 LOG(INFO) << "+++ ate last entry " << topIndex - 1; 241 } 242 } 243 } else { 244 // Not the top-most entry. This creates a hole. We null out the entry to prevent somebody 245 // from deleting it twice and screwing up the hole count. 246 if (table_[idx].GetReference()->IsNull()) { 247 LOG(INFO) << "--- WEIRD: removing null entry " << idx; 248 return false; 249 } 250 if (!CheckEntry("remove", iref, idx)) { 251 return false; 252 } 253 254 *table_[idx].GetReference() = GcRoot<mirror::Object>(nullptr); 255 segment_state_.parts.numHoles++; 256 if ((false)) { 257 LOG(INFO) << "+++ left hole at " << idx << ", holes=" << segment_state_.parts.numHoles; 258 } 259 } 260 261 return true; 262 } 263 264 void IndirectReferenceTable::Trim() { 265 ScopedTrace trace(__PRETTY_FUNCTION__); 266 const size_t top_index = Capacity(); 267 auto* release_start = AlignUp(reinterpret_cast<uint8_t*>(&table_[top_index]), kPageSize); 268 uint8_t* release_end = table_mem_map_->End(); 269 madvise(release_start, release_end - release_start, MADV_DONTNEED); 270 } 271 272 void IndirectReferenceTable::VisitRoots(RootVisitor* visitor, const RootInfo& root_info) { 273 BufferedRootVisitor<kDefaultBufferedRootCount> root_visitor(visitor, root_info); 274 for (auto ref : *this) { 275 if (!ref->IsNull()) { 276 root_visitor.VisitRoot(*ref); 277 DCHECK(!ref->IsNull()); 278 } 279 } 280 } 281 282 void IndirectReferenceTable::Dump(std::ostream& os) const { 283 os << kind_ << " table dump:\n"; 284 ReferenceTable::Table entries; 285 for (size_t i = 0; i < Capacity(); ++i) { 286 mirror::Object* obj = table_[i].GetReference()->Read<kWithoutReadBarrier>(); 287 if (obj != nullptr) { 288 obj = table_[i].GetReference()->Read(); 289 entries.push_back(GcRoot<mirror::Object>(obj)); 290 } 291 } 292 ReferenceTable::Dump(os, entries); 293 } 294 295 } // namespace art 296