1 /* 2 * Copyright (C) 2009 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #include "indirect_reference_table-inl.h" 18 19 #include "base/dumpable-inl.h" 20 #include "base/systrace.h" 21 #include "base/utils.h" 22 #include "java_vm_ext.h" 23 #include "jni_internal.h" 24 #include "nth_caller_visitor.h" 25 #include "reference_table.h" 26 #include "runtime.h" 27 #include "scoped_thread_state_change-inl.h" 28 #include "thread.h" 29 30 #include <cstdlib> 31 32 namespace art { 33 34 static constexpr bool kDumpStackOnNonLocalReference = false; 35 static constexpr bool kDebugIRT = false; 36 37 // Maximum table size we allow. 38 static constexpr size_t kMaxTableSizeInBytes = 128 * MB; 39 40 const char* GetIndirectRefKindString(const IndirectRefKind& kind) { 41 switch (kind) { 42 case kHandleScopeOrInvalid: 43 return "HandleScopeOrInvalid"; 44 case kLocal: 45 return "Local"; 46 case kGlobal: 47 return "Global"; 48 case kWeakGlobal: 49 return "WeakGlobal"; 50 } 51 return "IndirectRefKind Error"; 52 } 53 54 void IndirectReferenceTable::AbortIfNoCheckJNI(const std::string& msg) { 55 // If -Xcheck:jni is on, it'll give a more detailed error before aborting. 56 JavaVMExt* vm = Runtime::Current()->GetJavaVM(); 57 if (!vm->IsCheckJniEnabled()) { 58 // Otherwise, we want to abort rather than hand back a bad reference. 59 LOG(FATAL) << msg; 60 } else { 61 LOG(ERROR) << msg; 62 } 63 } 64 65 IndirectReferenceTable::IndirectReferenceTable(size_t max_count, 66 IndirectRefKind desired_kind, 67 ResizableCapacity resizable, 68 std::string* error_msg) 69 : segment_state_(kIRTFirstSegment), 70 kind_(desired_kind), 71 max_entries_(max_count), 72 current_num_holes_(0), 73 resizable_(resizable) { 74 CHECK(error_msg != nullptr); 75 CHECK_NE(desired_kind, kHandleScopeOrInvalid); 76 77 // Overflow and maximum check. 78 CHECK_LE(max_count, kMaxTableSizeInBytes / sizeof(IrtEntry)); 79 80 const size_t table_bytes = max_count * sizeof(IrtEntry); 81 table_mem_map_.reset(MemMap::MapAnonymous("indirect ref table", nullptr, table_bytes, 82 PROT_READ | PROT_WRITE, false, false, error_msg)); 83 if (table_mem_map_.get() == nullptr && error_msg->empty()) { 84 *error_msg = "Unable to map memory for indirect ref table"; 85 } 86 87 if (table_mem_map_.get() != nullptr) { 88 table_ = reinterpret_cast<IrtEntry*>(table_mem_map_->Begin()); 89 } else { 90 table_ = nullptr; 91 } 92 segment_state_ = kIRTFirstSegment; 93 last_known_previous_state_ = kIRTFirstSegment; 94 } 95 96 IndirectReferenceTable::~IndirectReferenceTable() { 97 } 98 99 void IndirectReferenceTable::ConstexprChecks() { 100 // Use this for some assertions. They can't be put into the header as C++ wants the class 101 // to be complete. 102 103 // Check kind. 104 static_assert((EncodeIndirectRefKind(kLocal) & (~kKindMask)) == 0, "Kind encoding error"); 105 static_assert((EncodeIndirectRefKind(kGlobal) & (~kKindMask)) == 0, "Kind encoding error"); 106 static_assert((EncodeIndirectRefKind(kWeakGlobal) & (~kKindMask)) == 0, "Kind encoding error"); 107 static_assert(DecodeIndirectRefKind(EncodeIndirectRefKind(kLocal)) == kLocal, 108 "Kind encoding error"); 109 static_assert(DecodeIndirectRefKind(EncodeIndirectRefKind(kGlobal)) == kGlobal, 110 "Kind encoding error"); 111 static_assert(DecodeIndirectRefKind(EncodeIndirectRefKind(kWeakGlobal)) == kWeakGlobal, 112 "Kind encoding error"); 113 114 // Check serial. 115 static_assert(DecodeSerial(EncodeSerial(0u)) == 0u, "Serial encoding error"); 116 static_assert(DecodeSerial(EncodeSerial(1u)) == 1u, "Serial encoding error"); 117 static_assert(DecodeSerial(EncodeSerial(2u)) == 2u, "Serial encoding error"); 118 static_assert(DecodeSerial(EncodeSerial(3u)) == 3u, "Serial encoding error"); 119 120 // Table index. 121 static_assert(DecodeIndex(EncodeIndex(0u)) == 0u, "Index encoding error"); 122 static_assert(DecodeIndex(EncodeIndex(1u)) == 1u, "Index encoding error"); 123 static_assert(DecodeIndex(EncodeIndex(2u)) == 2u, "Index encoding error"); 124 static_assert(DecodeIndex(EncodeIndex(3u)) == 3u, "Index encoding error"); 125 } 126 127 bool IndirectReferenceTable::IsValid() const { 128 return table_mem_map_.get() != nullptr; 129 } 130 131 // Holes: 132 // 133 // To keep the IRT compact, we want to fill "holes" created by non-stack-discipline Add & Remove 134 // operation sequences. For simplicity and lower memory overhead, we do not use a free list or 135 // similar. Instead, we scan for holes, with the expectation that we will find holes fast as they 136 // are usually near the end of the table (see the header, TODO: verify this assumption). To avoid 137 // scans when there are no holes, the number of known holes should be tracked. 138 // 139 // A previous implementation stored the top index and the number of holes as the segment state. 140 // This constraints the maximum number of references to 16-bit. We want to relax this, as it 141 // is easy to require more references (e.g., to list all classes in large applications). Thus, 142 // the implicitly stack-stored state, the IRTSegmentState, is only the top index. 143 // 144 // Thus, hole count is a local property of the current segment, and needs to be recovered when 145 // (or after) a frame is pushed or popped. To keep JNI transitions simple (and inlineable), we 146 // cannot do work when the segment changes. Thus, Add and Remove need to ensure the current 147 // hole count is correct. 148 // 149 // To be able to detect segment changes, we require an additional local field that can describe 150 // the known segment. This is last_known_previous_state_. The requirement will become clear with 151 // the following (some non-trivial) cases that have to be supported: 152 // 153 // 1) Segment with holes (current_num_holes_ > 0), push new segment, add/remove reference 154 // 2) Segment with holes (current_num_holes_ > 0), pop segment, add/remove reference 155 // 3) Segment with holes (current_num_holes_ > 0), push new segment, pop segment, add/remove 156 // reference 157 // 4) Empty segment, push new segment, create a hole, pop a segment, add/remove a reference 158 // 5) Base segment, push new segment, create a hole, pop a segment, push new segment, add/remove 159 // reference 160 // 161 // Storing the last known *previous* state (bottom index) allows conservatively detecting all the 162 // segment changes above. The condition is simply that the last known state is greater than or 163 // equal to the current previous state, and smaller than the current state (top index). The 164 // condition is conservative as it adds O(1) overhead to operations on an empty segment. 165 166 static size_t CountNullEntries(const IrtEntry* table, size_t from, size_t to) { 167 size_t count = 0; 168 for (size_t index = from; index != to; ++index) { 169 if (table[index].GetReference()->IsNull()) { 170 count++; 171 } 172 } 173 return count; 174 } 175 176 void IndirectReferenceTable::RecoverHoles(IRTSegmentState prev_state) { 177 if (last_known_previous_state_.top_index >= segment_state_.top_index || 178 last_known_previous_state_.top_index < prev_state.top_index) { 179 const size_t top_index = segment_state_.top_index; 180 size_t count = CountNullEntries(table_, prev_state.top_index, top_index); 181 182 if (kDebugIRT) { 183 LOG(INFO) << "+++ Recovered holes: " 184 << " Current prev=" << prev_state.top_index 185 << " Current top_index=" << top_index 186 << " Old num_holes=" << current_num_holes_ 187 << " New num_holes=" << count; 188 } 189 190 current_num_holes_ = count; 191 last_known_previous_state_ = prev_state; 192 } else if (kDebugIRT) { 193 LOG(INFO) << "No need to recover holes"; 194 } 195 } 196 197 ALWAYS_INLINE 198 static inline void CheckHoleCount(IrtEntry* table, 199 size_t exp_num_holes, 200 IRTSegmentState prev_state, 201 IRTSegmentState cur_state) { 202 if (kIsDebugBuild) { 203 size_t count = CountNullEntries(table, prev_state.top_index, cur_state.top_index); 204 CHECK_EQ(exp_num_holes, count) << "prevState=" << prev_state.top_index 205 << " topIndex=" << cur_state.top_index; 206 } 207 } 208 209 bool IndirectReferenceTable::Resize(size_t new_size, std::string* error_msg) { 210 CHECK_GT(new_size, max_entries_); 211 212 constexpr size_t kMaxEntries = kMaxTableSizeInBytes / sizeof(IrtEntry); 213 if (new_size > kMaxEntries) { 214 *error_msg = android::base::StringPrintf("Requested size exceeds maximum: %zu", new_size); 215 return false; 216 } 217 // Note: the above check also ensures that there is no overflow below. 218 219 const size_t table_bytes = new_size * sizeof(IrtEntry); 220 std::unique_ptr<MemMap> new_map(MemMap::MapAnonymous("indirect ref table", 221 nullptr, 222 table_bytes, 223 PROT_READ | PROT_WRITE, 224 false, 225 false, 226 error_msg)); 227 if (new_map == nullptr) { 228 return false; 229 } 230 231 memcpy(new_map->Begin(), table_mem_map_->Begin(), table_mem_map_->Size()); 232 table_mem_map_ = std::move(new_map); 233 table_ = reinterpret_cast<IrtEntry*>(table_mem_map_->Begin()); 234 max_entries_ = new_size; 235 236 return true; 237 } 238 239 IndirectRef IndirectReferenceTable::Add(IRTSegmentState previous_state, 240 ObjPtr<mirror::Object> obj, 241 std::string* error_msg) { 242 if (kDebugIRT) { 243 LOG(INFO) << "+++ Add: previous_state=" << previous_state.top_index 244 << " top_index=" << segment_state_.top_index 245 << " last_known_prev_top_index=" << last_known_previous_state_.top_index 246 << " holes=" << current_num_holes_; 247 } 248 249 size_t top_index = segment_state_.top_index; 250 251 CHECK(obj != nullptr); 252 VerifyObject(obj); 253 DCHECK(table_ != nullptr); 254 255 if (top_index == max_entries_) { 256 if (resizable_ == ResizableCapacity::kNo) { 257 std::ostringstream oss; 258 oss << "JNI ERROR (app bug): " << kind_ << " table overflow " 259 << "(max=" << max_entries_ << ")" 260 << MutatorLockedDumpable<IndirectReferenceTable>(*this); 261 *error_msg = oss.str(); 262 return nullptr; 263 } 264 265 // Try to double space. 266 if (std::numeric_limits<size_t>::max() / 2 < max_entries_) { 267 std::ostringstream oss; 268 oss << "JNI ERROR (app bug): " << kind_ << " table overflow " 269 << "(max=" << max_entries_ << ")" << std::endl 270 << MutatorLockedDumpable<IndirectReferenceTable>(*this) 271 << " Resizing failed: exceeds size_t"; 272 *error_msg = oss.str(); 273 return nullptr; 274 } 275 276 std::string inner_error_msg; 277 if (!Resize(max_entries_ * 2, &inner_error_msg)) { 278 std::ostringstream oss; 279 oss << "JNI ERROR (app bug): " << kind_ << " table overflow " 280 << "(max=" << max_entries_ << ")" << std::endl 281 << MutatorLockedDumpable<IndirectReferenceTable>(*this) 282 << " Resizing failed: " << inner_error_msg; 283 *error_msg = oss.str(); 284 return nullptr; 285 } 286 } 287 288 RecoverHoles(previous_state); 289 CheckHoleCount(table_, current_num_holes_, previous_state, segment_state_); 290 291 // We know there's enough room in the table. Now we just need to find 292 // the right spot. If there's a hole, find it and fill it; otherwise, 293 // add to the end of the list. 294 IndirectRef result; 295 size_t index; 296 if (current_num_holes_ > 0) { 297 DCHECK_GT(top_index, 1U); 298 // Find the first hole; likely to be near the end of the list. 299 IrtEntry* p_scan = &table_[top_index - 1]; 300 DCHECK(!p_scan->GetReference()->IsNull()); 301 --p_scan; 302 while (!p_scan->GetReference()->IsNull()) { 303 DCHECK_GE(p_scan, table_ + previous_state.top_index); 304 --p_scan; 305 } 306 index = p_scan - table_; 307 current_num_holes_--; 308 } else { 309 // Add to the end. 310 index = top_index++; 311 segment_state_.top_index = top_index; 312 } 313 table_[index].Add(obj); 314 result = ToIndirectRef(index); 315 if (kDebugIRT) { 316 LOG(INFO) << "+++ added at " << ExtractIndex(result) << " top=" << segment_state_.top_index 317 << " holes=" << current_num_holes_; 318 } 319 320 DCHECK(result != nullptr); 321 return result; 322 } 323 324 void IndirectReferenceTable::AssertEmpty() { 325 for (size_t i = 0; i < Capacity(); ++i) { 326 if (!table_[i].GetReference()->IsNull()) { 327 LOG(FATAL) << "Internal Error: non-empty local reference table\n" 328 << MutatorLockedDumpable<IndirectReferenceTable>(*this); 329 UNREACHABLE(); 330 } 331 } 332 } 333 334 // Removes an object. We extract the table offset bits from "iref" 335 // and zap the corresponding entry, leaving a hole if it's not at the top. 336 // If the entry is not between the current top index and the bottom index 337 // specified by the cookie, we don't remove anything. This is the behavior 338 // required by JNI's DeleteLocalRef function. 339 // This method is not called when a local frame is popped; this is only used 340 // for explicit single removals. 341 // Returns "false" if nothing was removed. 342 bool IndirectReferenceTable::Remove(IRTSegmentState previous_state, IndirectRef iref) { 343 if (kDebugIRT) { 344 LOG(INFO) << "+++ Remove: previous_state=" << previous_state.top_index 345 << " top_index=" << segment_state_.top_index 346 << " last_known_prev_top_index=" << last_known_previous_state_.top_index 347 << " holes=" << current_num_holes_; 348 } 349 350 const uint32_t top_index = segment_state_.top_index; 351 const uint32_t bottom_index = previous_state.top_index; 352 353 DCHECK(table_ != nullptr); 354 355 if (GetIndirectRefKind(iref) == kHandleScopeOrInvalid) { 356 auto* self = Thread::Current(); 357 if (self->HandleScopeContains(reinterpret_cast<jobject>(iref))) { 358 auto* env = self->GetJniEnv(); 359 DCHECK(env != nullptr); 360 if (env->IsCheckJniEnabled()) { 361 ScopedObjectAccess soa(self); 362 LOG(WARNING) << "Attempt to remove non-JNI local reference, dumping thread"; 363 if (kDumpStackOnNonLocalReference) { 364 self->Dump(LOG_STREAM(WARNING)); 365 } 366 } 367 return true; 368 } 369 } 370 const uint32_t idx = ExtractIndex(iref); 371 if (idx < bottom_index) { 372 // Wrong segment. 373 LOG(WARNING) << "Attempt to remove index outside index area (" << idx 374 << " vs " << bottom_index << "-" << top_index << ")"; 375 return false; 376 } 377 if (idx >= top_index) { 378 // Bad --- stale reference? 379 LOG(WARNING) << "Attempt to remove invalid index " << idx 380 << " (bottom=" << bottom_index << " top=" << top_index << ")"; 381 return false; 382 } 383 384 RecoverHoles(previous_state); 385 CheckHoleCount(table_, current_num_holes_, previous_state, segment_state_); 386 387 if (idx == top_index - 1) { 388 // Top-most entry. Scan up and consume holes. 389 390 if (!CheckEntry("remove", iref, idx)) { 391 return false; 392 } 393 394 *table_[idx].GetReference() = GcRoot<mirror::Object>(nullptr); 395 if (current_num_holes_ != 0) { 396 uint32_t collapse_top_index = top_index; 397 while (--collapse_top_index > bottom_index && current_num_holes_ != 0) { 398 if (kDebugIRT) { 399 ScopedObjectAccess soa(Thread::Current()); 400 LOG(INFO) << "+++ checking for hole at " << collapse_top_index - 1 401 << " (previous_state=" << bottom_index << ") val=" 402 << table_[collapse_top_index - 1].GetReference()->Read<kWithoutReadBarrier>(); 403 } 404 if (!table_[collapse_top_index - 1].GetReference()->IsNull()) { 405 break; 406 } 407 if (kDebugIRT) { 408 LOG(INFO) << "+++ ate hole at " << (collapse_top_index - 1); 409 } 410 current_num_holes_--; 411 } 412 segment_state_.top_index = collapse_top_index; 413 414 CheckHoleCount(table_, current_num_holes_, previous_state, segment_state_); 415 } else { 416 segment_state_.top_index = top_index - 1; 417 if (kDebugIRT) { 418 LOG(INFO) << "+++ ate last entry " << top_index - 1; 419 } 420 } 421 } else { 422 // Not the top-most entry. This creates a hole. We null out the entry to prevent somebody 423 // from deleting it twice and screwing up the hole count. 424 if (table_[idx].GetReference()->IsNull()) { 425 LOG(INFO) << "--- WEIRD: removing null entry " << idx; 426 return false; 427 } 428 if (!CheckEntry("remove", iref, idx)) { 429 return false; 430 } 431 432 *table_[idx].GetReference() = GcRoot<mirror::Object>(nullptr); 433 current_num_holes_++; 434 CheckHoleCount(table_, current_num_holes_, previous_state, segment_state_); 435 if (kDebugIRT) { 436 LOG(INFO) << "+++ left hole at " << idx << ", holes=" << current_num_holes_; 437 } 438 } 439 440 return true; 441 } 442 443 void IndirectReferenceTable::Trim() { 444 ScopedTrace trace(__PRETTY_FUNCTION__); 445 const size_t top_index = Capacity(); 446 auto* release_start = AlignUp(reinterpret_cast<uint8_t*>(&table_[top_index]), kPageSize); 447 uint8_t* release_end = table_mem_map_->End(); 448 madvise(release_start, release_end - release_start, MADV_DONTNEED); 449 } 450 451 void IndirectReferenceTable::VisitRoots(RootVisitor* visitor, const RootInfo& root_info) { 452 BufferedRootVisitor<kDefaultBufferedRootCount> root_visitor(visitor, root_info); 453 for (auto ref : *this) { 454 if (!ref->IsNull()) { 455 root_visitor.VisitRoot(*ref); 456 DCHECK(!ref->IsNull()); 457 } 458 } 459 } 460 461 void IndirectReferenceTable::Dump(std::ostream& os) const { 462 os << kind_ << " table dump:\n"; 463 ReferenceTable::Table entries; 464 for (size_t i = 0; i < Capacity(); ++i) { 465 ObjPtr<mirror::Object> obj = table_[i].GetReference()->Read<kWithoutReadBarrier>(); 466 if (obj != nullptr) { 467 obj = table_[i].GetReference()->Read(); 468 entries.push_back(GcRoot<mirror::Object>(obj)); 469 } 470 } 471 ReferenceTable::Dump(os, entries); 472 } 473 474 void IndirectReferenceTable::SetSegmentState(IRTSegmentState new_state) { 475 if (kDebugIRT) { 476 LOG(INFO) << "Setting segment state: " 477 << segment_state_.top_index 478 << " -> " 479 << new_state.top_index; 480 } 481 segment_state_ = new_state; 482 } 483 484 bool IndirectReferenceTable::EnsureFreeCapacity(size_t free_capacity, std::string* error_msg) { 485 size_t top_index = segment_state_.top_index; 486 if (top_index < max_entries_ && top_index + free_capacity <= max_entries_) { 487 return true; 488 } 489 490 // We're only gonna do a simple best-effort here, ensuring the asked-for capacity at the end. 491 if (resizable_ == ResizableCapacity::kNo) { 492 *error_msg = "Table is not resizable"; 493 return false; 494 } 495 496 // Try to increase the table size. 497 498 // Would this overflow? 499 if (std::numeric_limits<size_t>::max() - free_capacity < top_index) { 500 *error_msg = "Cannot resize table, overflow."; 501 return false; 502 } 503 504 if (!Resize(top_index + free_capacity, error_msg)) { 505 LOG(WARNING) << "JNI ERROR: Unable to reserve space in EnsureFreeCapacity (" << free_capacity 506 << "): " << std::endl 507 << MutatorLockedDumpable<IndirectReferenceTable>(*this) 508 << " Resizing failed: " << *error_msg; 509 return false; 510 } 511 return true; 512 } 513 514 size_t IndirectReferenceTable::FreeCapacity() const { 515 return max_entries_ - segment_state_.top_index; 516 } 517 518 } // namespace art 519