1 // Copyright (c) 2011 The LevelDB Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. See the AUTHORS file for names of contributors. 4 5 #include "db/version_set.h" 6 7 #include <algorithm> 8 #include <stdio.h> 9 #include "db/filename.h" 10 #include "db/log_reader.h" 11 #include "db/log_writer.h" 12 #include "db/memtable.h" 13 #include "db/table_cache.h" 14 #include "leveldb/env.h" 15 #include "leveldb/table_builder.h" 16 #include "table/merger.h" 17 #include "table/two_level_iterator.h" 18 #include "util/coding.h" 19 #include "util/logging.h" 20 21 namespace leveldb { 22 23 static const int kTargetFileSize = 2 * 1048576; 24 25 // Maximum bytes of overlaps in grandparent (i.e., level+2) before we 26 // stop building a single file in a level->level+1 compaction. 27 static const int64_t kMaxGrandParentOverlapBytes = 10 * kTargetFileSize; 28 29 // Maximum number of bytes in all compacted files. We avoid expanding 30 // the lower level file set of a compaction if it would make the 31 // total compaction cover more than this many bytes. 32 static const int64_t kExpandedCompactionByteSizeLimit = 25 * kTargetFileSize; 33 34 static double MaxBytesForLevel(int level) { 35 // Note: the result for level zero is not really used since we set 36 // the level-0 compaction threshold based on number of files. 37 double result = 10 * 1048576.0; // Result for both level-0 and level-1 38 while (level > 1) { 39 result *= 10; 40 level--; 41 } 42 return result; 43 } 44 45 static uint64_t MaxFileSizeForLevel(int level) { 46 return kTargetFileSize; // We could vary per level to reduce number of files? 47 } 48 49 static int64_t TotalFileSize(const std::vector<FileMetaData*>& files) { 50 int64_t sum = 0; 51 for (size_t i = 0; i < files.size(); i++) { 52 sum += files[i]->file_size; 53 } 54 return sum; 55 } 56 57 namespace { 58 std::string IntSetToString(const std::set<uint64_t>& s) { 59 std::string result = "{"; 60 for (std::set<uint64_t>::const_iterator it = s.begin(); 61 it != s.end(); 62 ++it) { 63 result += (result.size() > 1) ? "," : ""; 64 result += NumberToString(*it); 65 } 66 result += "}"; 67 return result; 68 } 69 } // namespace 70 71 Version::~Version() { 72 assert(refs_ == 0); 73 74 // Remove from linked list 75 prev_->next_ = next_; 76 next_->prev_ = prev_; 77 78 // Drop references to files 79 for (int level = 0; level < config::kNumLevels; level++) { 80 for (size_t i = 0; i < files_[level].size(); i++) { 81 FileMetaData* f = files_[level][i]; 82 assert(f->refs > 0); 83 f->refs--; 84 if (f->refs <= 0) { 85 delete f; 86 } 87 } 88 } 89 } 90 91 int FindFile(const InternalKeyComparator& icmp, 92 const std::vector<FileMetaData*>& files, 93 const Slice& key) { 94 uint32_t left = 0; 95 uint32_t right = files.size(); 96 while (left < right) { 97 uint32_t mid = (left + right) / 2; 98 const FileMetaData* f = files[mid]; 99 if (icmp.InternalKeyComparator::Compare(f->largest.Encode(), key) < 0) { 100 // Key at "mid.largest" is < "target". Therefore all 101 // files at or before "mid" are uninteresting. 102 left = mid + 1; 103 } else { 104 // Key at "mid.largest" is >= "target". Therefore all files 105 // after "mid" are uninteresting. 106 right = mid; 107 } 108 } 109 return right; 110 } 111 112 static bool AfterFile(const Comparator* ucmp, 113 const Slice* user_key, const FileMetaData* f) { 114 // NULL user_key occurs before all keys and is therefore never after *f 115 return (user_key != NULL && 116 ucmp->Compare(*user_key, f->largest.user_key()) > 0); 117 } 118 119 static bool BeforeFile(const Comparator* ucmp, 120 const Slice* user_key, const FileMetaData* f) { 121 // NULL user_key occurs after all keys and is therefore never before *f 122 return (user_key != NULL && 123 ucmp->Compare(*user_key, f->smallest.user_key()) < 0); 124 } 125 126 bool SomeFileOverlapsRange( 127 const InternalKeyComparator& icmp, 128 bool disjoint_sorted_files, 129 const std::vector<FileMetaData*>& files, 130 const Slice* smallest_user_key, 131 const Slice* largest_user_key) { 132 const Comparator* ucmp = icmp.user_comparator(); 133 if (!disjoint_sorted_files) { 134 // Need to check against all files 135 for (size_t i = 0; i < files.size(); i++) { 136 const FileMetaData* f = files[i]; 137 if (AfterFile(ucmp, smallest_user_key, f) || 138 BeforeFile(ucmp, largest_user_key, f)) { 139 // No overlap 140 } else { 141 return true; // Overlap 142 } 143 } 144 return false; 145 } 146 147 // Binary search over file list 148 uint32_t index = 0; 149 if (smallest_user_key != NULL) { 150 // Find the earliest possible internal key for smallest_user_key 151 InternalKey small(*smallest_user_key, kMaxSequenceNumber,kValueTypeForSeek); 152 index = FindFile(icmp, files, small.Encode()); 153 } 154 155 if (index >= files.size()) { 156 // beginning of range is after all files, so no overlap. 157 return false; 158 } 159 160 return !BeforeFile(ucmp, largest_user_key, files[index]); 161 } 162 163 // An internal iterator. For a given version/level pair, yields 164 // information about the files in the level. For a given entry, key() 165 // is the largest key that occurs in the file, and value() is an 166 // 16-byte value containing the file number and file size, both 167 // encoded using EncodeFixed64. 168 class Version::LevelFileNumIterator : public Iterator { 169 public: 170 LevelFileNumIterator(const InternalKeyComparator& icmp, 171 const std::vector<FileMetaData*>* flist) 172 : icmp_(icmp), 173 flist_(flist), 174 index_(flist->size()) { // Marks as invalid 175 } 176 virtual bool Valid() const { 177 return index_ < flist_->size(); 178 } 179 virtual void Seek(const Slice& target) { 180 index_ = FindFile(icmp_, *flist_, target); 181 } 182 virtual void SeekToFirst() { index_ = 0; } 183 virtual void SeekToLast() { 184 index_ = flist_->empty() ? 0 : flist_->size() - 1; 185 } 186 virtual void Next() { 187 assert(Valid()); 188 index_++; 189 } 190 virtual void Prev() { 191 assert(Valid()); 192 if (index_ == 0) { 193 index_ = flist_->size(); // Marks as invalid 194 } else { 195 index_--; 196 } 197 } 198 Slice key() const { 199 assert(Valid()); 200 return (*flist_)[index_]->largest.Encode(); 201 } 202 Slice value() const { 203 assert(Valid()); 204 EncodeFixed64(value_buf_, (*flist_)[index_]->number); 205 EncodeFixed64(value_buf_+8, (*flist_)[index_]->file_size); 206 return Slice(value_buf_, sizeof(value_buf_)); 207 } 208 virtual Status status() const { return Status::OK(); } 209 private: 210 const InternalKeyComparator icmp_; 211 const std::vector<FileMetaData*>* const flist_; 212 uint32_t index_; 213 214 // Backing store for value(). Holds the file number and size. 215 mutable char value_buf_[16]; 216 }; 217 218 static Iterator* GetFileIterator(void* arg, 219 const ReadOptions& options, 220 const Slice& file_value) { 221 TableCache* cache = reinterpret_cast<TableCache*>(arg); 222 if (file_value.size() != 16) { 223 return NewErrorIterator( 224 Status::Corruption("FileReader invoked with unexpected value")); 225 } else { 226 return cache->NewIterator(options, 227 DecodeFixed64(file_value.data()), 228 DecodeFixed64(file_value.data() + 8)); 229 } 230 } 231 232 Iterator* Version::NewConcatenatingIterator(const ReadOptions& options, 233 int level) const { 234 return NewTwoLevelIterator( 235 new LevelFileNumIterator(vset_->icmp_, &files_[level]), 236 &GetFileIterator, vset_->table_cache_, options); 237 } 238 239 void Version::AddIterators(const ReadOptions& options, 240 std::vector<Iterator*>* iters) { 241 // Merge all level zero files together since they may overlap 242 for (size_t i = 0; i < files_[0].size(); i++) { 243 iters->push_back( 244 vset_->table_cache_->NewIterator( 245 options, files_[0][i]->number, files_[0][i]->file_size)); 246 } 247 248 // For levels > 0, we can use a concatenating iterator that sequentially 249 // walks through the non-overlapping files in the level, opening them 250 // lazily. 251 for (int level = 1; level < config::kNumLevels; level++) { 252 if (!files_[level].empty()) { 253 iters->push_back(NewConcatenatingIterator(options, level)); 254 } 255 } 256 } 257 258 // Callback from TableCache::Get() 259 namespace { 260 enum SaverState { 261 kNotFound, 262 kFound, 263 kDeleted, 264 kCorrupt, 265 }; 266 struct Saver { 267 SaverState state; 268 const Comparator* ucmp; 269 Slice user_key; 270 std::string* value; 271 }; 272 } 273 static void SaveValue(void* arg, const Slice& ikey, const Slice& v) { 274 Saver* s = reinterpret_cast<Saver*>(arg); 275 ParsedInternalKey parsed_key; 276 if (!ParseInternalKey(ikey, &parsed_key)) { 277 s->state = kCorrupt; 278 } else { 279 if (s->ucmp->Compare(parsed_key.user_key, s->user_key) == 0) { 280 s->state = (parsed_key.type == kTypeValue) ? kFound : kDeleted; 281 if (s->state == kFound) { 282 s->value->assign(v.data(), v.size()); 283 } 284 } 285 } 286 } 287 288 static bool NewestFirst(FileMetaData* a, FileMetaData* b) { 289 return a->number > b->number; 290 } 291 292 void Version::ForEachOverlapping(Slice user_key, Slice internal_key, 293 void* arg, 294 bool (*func)(void*, int, FileMetaData*)) { 295 // TODO(sanjay): Change Version::Get() to use this function. 296 const Comparator* ucmp = vset_->icmp_.user_comparator(); 297 298 // Search level-0 in order from newest to oldest. 299 std::vector<FileMetaData*> tmp; 300 tmp.reserve(files_[0].size()); 301 for (uint32_t i = 0; i < files_[0].size(); i++) { 302 FileMetaData* f = files_[0][i]; 303 if (ucmp->Compare(user_key, f->smallest.user_key()) >= 0 && 304 ucmp->Compare(user_key, f->largest.user_key()) <= 0) { 305 tmp.push_back(f); 306 } 307 } 308 if (!tmp.empty()) { 309 std::sort(tmp.begin(), tmp.end(), NewestFirst); 310 for (uint32_t i = 0; i < tmp.size(); i++) { 311 if (!(*func)(arg, 0, tmp[i])) { 312 return; 313 } 314 } 315 } 316 317 // Search other levels. 318 for (int level = 1; level < config::kNumLevels; level++) { 319 size_t num_files = files_[level].size(); 320 if (num_files == 0) continue; 321 322 // Binary search to find earliest index whose largest key >= internal_key. 323 uint32_t index = FindFile(vset_->icmp_, files_[level], internal_key); 324 if (index < num_files) { 325 FileMetaData* f = files_[level][index]; 326 if (ucmp->Compare(user_key, f->smallest.user_key()) < 0) { 327 // All of "f" is past any data for user_key 328 } else { 329 if (!(*func)(arg, level, f)) { 330 return; 331 } 332 } 333 } 334 } 335 } 336 337 Status Version::Get(const ReadOptions& options, 338 const LookupKey& k, 339 std::string* value, 340 GetStats* stats) { 341 Slice ikey = k.internal_key(); 342 Slice user_key = k.user_key(); 343 const Comparator* ucmp = vset_->icmp_.user_comparator(); 344 Status s; 345 346 stats->seek_file = NULL; 347 stats->seek_file_level = -1; 348 FileMetaData* last_file_read = NULL; 349 int last_file_read_level = -1; 350 351 // We can search level-by-level since entries never hop across 352 // levels. Therefore we are guaranteed that if we find data 353 // in an smaller level, later levels are irrelevant. 354 std::vector<FileMetaData*> tmp; 355 FileMetaData* tmp2; 356 for (int level = 0; level < config::kNumLevels; level++) { 357 size_t num_files = files_[level].size(); 358 if (num_files == 0) continue; 359 360 // Get the list of files to search in this level 361 FileMetaData* const* files = &files_[level][0]; 362 if (level == 0) { 363 // Level-0 files may overlap each other. Find all files that 364 // overlap user_key and process them in order from newest to oldest. 365 tmp.reserve(num_files); 366 for (uint32_t i = 0; i < num_files; i++) { 367 FileMetaData* f = files[i]; 368 if (ucmp->Compare(user_key, f->smallest.user_key()) >= 0 && 369 ucmp->Compare(user_key, f->largest.user_key()) <= 0) { 370 tmp.push_back(f); 371 } 372 } 373 if (tmp.empty()) continue; 374 375 std::sort(tmp.begin(), tmp.end(), NewestFirst); 376 files = &tmp[0]; 377 num_files = tmp.size(); 378 } else { 379 // Binary search to find earliest index whose largest key >= ikey. 380 uint32_t index = FindFile(vset_->icmp_, files_[level], ikey); 381 if (index >= num_files) { 382 files = NULL; 383 num_files = 0; 384 } else { 385 tmp2 = files[index]; 386 if (ucmp->Compare(user_key, tmp2->smallest.user_key()) < 0) { 387 // All of "tmp2" is past any data for user_key 388 files = NULL; 389 num_files = 0; 390 } else { 391 files = &tmp2; 392 num_files = 1; 393 } 394 } 395 } 396 397 for (uint32_t i = 0; i < num_files; ++i) { 398 if (last_file_read != NULL && stats->seek_file == NULL) { 399 // We have had more than one seek for this read. Charge the 1st file. 400 stats->seek_file = last_file_read; 401 stats->seek_file_level = last_file_read_level; 402 } 403 404 FileMetaData* f = files[i]; 405 last_file_read = f; 406 last_file_read_level = level; 407 408 Saver saver; 409 saver.state = kNotFound; 410 saver.ucmp = ucmp; 411 saver.user_key = user_key; 412 saver.value = value; 413 s = vset_->table_cache_->Get(options, f->number, f->file_size, 414 ikey, &saver, SaveValue); 415 if (!s.ok()) { 416 return s; 417 } 418 switch (saver.state) { 419 case kNotFound: 420 break; // Keep searching in other files 421 case kFound: 422 return s; 423 case kDeleted: 424 s = Status::NotFound(Slice()); // Use empty error message for speed 425 return s; 426 case kCorrupt: 427 s = Status::Corruption("corrupted key for ", user_key); 428 return s; 429 } 430 } 431 } 432 433 return Status::NotFound(Slice()); // Use an empty error message for speed 434 } 435 436 bool Version::UpdateStats(const GetStats& stats) { 437 FileMetaData* f = stats.seek_file; 438 if (f != NULL) { 439 f->allowed_seeks--; 440 if (f->allowed_seeks <= 0 && file_to_compact_ == NULL) { 441 file_to_compact_ = f; 442 file_to_compact_level_ = stats.seek_file_level; 443 return true; 444 } 445 } 446 return false; 447 } 448 449 bool Version::RecordReadSample(Slice internal_key) { 450 ParsedInternalKey ikey; 451 if (!ParseInternalKey(internal_key, &ikey)) { 452 return false; 453 } 454 455 struct State { 456 GetStats stats; // Holds first matching file 457 int matches; 458 459 static bool Match(void* arg, int level, FileMetaData* f) { 460 State* state = reinterpret_cast<State*>(arg); 461 state->matches++; 462 if (state->matches == 1) { 463 // Remember first match. 464 state->stats.seek_file = f; 465 state->stats.seek_file_level = level; 466 } 467 // We can stop iterating once we have a second match. 468 return state->matches < 2; 469 } 470 }; 471 472 State state; 473 state.matches = 0; 474 ForEachOverlapping(ikey.user_key, internal_key, &state, &State::Match); 475 476 // Must have at least two matches since we want to merge across 477 // files. But what if we have a single file that contains many 478 // overwrites and deletions? Should we have another mechanism for 479 // finding such files? 480 if (state.matches >= 2) { 481 // 1MB cost is about 1 seek (see comment in Builder::Apply). 482 return UpdateStats(state.stats); 483 } 484 return false; 485 } 486 487 void Version::Ref() { 488 ++refs_; 489 } 490 491 void Version::Unref() { 492 assert(this != &vset_->dummy_versions_); 493 assert(refs_ >= 1); 494 --refs_; 495 if (refs_ == 0) { 496 delete this; 497 } 498 } 499 500 bool Version::OverlapInLevel(int level, 501 const Slice* smallest_user_key, 502 const Slice* largest_user_key) { 503 return SomeFileOverlapsRange(vset_->icmp_, (level > 0), files_[level], 504 smallest_user_key, largest_user_key); 505 } 506 507 int Version::PickLevelForMemTableOutput( 508 const Slice& smallest_user_key, 509 const Slice& largest_user_key) { 510 int level = 0; 511 if (!OverlapInLevel(0, &smallest_user_key, &largest_user_key)) { 512 // Push to next level if there is no overlap in next level, 513 // and the #bytes overlapping in the level after that are limited. 514 InternalKey start(smallest_user_key, kMaxSequenceNumber, kValueTypeForSeek); 515 InternalKey limit(largest_user_key, 0, static_cast<ValueType>(0)); 516 std::vector<FileMetaData*> overlaps; 517 while (level < config::kMaxMemCompactLevel) { 518 if (OverlapInLevel(level + 1, &smallest_user_key, &largest_user_key)) { 519 break; 520 } 521 if (level + 2 < config::kNumLevels) { 522 // Check that file does not overlap too many grandparent bytes. 523 GetOverlappingInputs(level + 2, &start, &limit, &overlaps); 524 const int64_t sum = TotalFileSize(overlaps); 525 if (sum > kMaxGrandParentOverlapBytes) { 526 break; 527 } 528 } 529 level++; 530 } 531 } 532 return level; 533 } 534 535 // Store in "*inputs" all files in "level" that overlap [begin,end] 536 void Version::GetOverlappingInputs( 537 int level, 538 const InternalKey* begin, 539 const InternalKey* end, 540 std::vector<FileMetaData*>* inputs) { 541 assert(level >= 0); 542 assert(level < config::kNumLevels); 543 inputs->clear(); 544 Slice user_begin, user_end; 545 if (begin != NULL) { 546 user_begin = begin->user_key(); 547 } 548 if (end != NULL) { 549 user_end = end->user_key(); 550 } 551 const Comparator* user_cmp = vset_->icmp_.user_comparator(); 552 for (size_t i = 0; i < files_[level].size(); ) { 553 FileMetaData* f = files_[level][i++]; 554 const Slice file_start = f->smallest.user_key(); 555 const Slice file_limit = f->largest.user_key(); 556 if (begin != NULL && user_cmp->Compare(file_limit, user_begin) < 0) { 557 // "f" is completely before specified range; skip it 558 } else if (end != NULL && user_cmp->Compare(file_start, user_end) > 0) { 559 // "f" is completely after specified range; skip it 560 } else { 561 inputs->push_back(f); 562 if (level == 0) { 563 // Level-0 files may overlap each other. So check if the newly 564 // added file has expanded the range. If so, restart search. 565 if (begin != NULL && user_cmp->Compare(file_start, user_begin) < 0) { 566 user_begin = file_start; 567 inputs->clear(); 568 i = 0; 569 } else if (end != NULL && user_cmp->Compare(file_limit, user_end) > 0) { 570 user_end = file_limit; 571 inputs->clear(); 572 i = 0; 573 } 574 } 575 } 576 } 577 } 578 579 std::string Version::DebugString() const { 580 std::string r; 581 for (int level = 0; level < config::kNumLevels; level++) { 582 // E.g., 583 // --- level 1 --- 584 // 17:123['a' .. 'd'] 585 // 20:43['e' .. 'g'] 586 r.append("--- level "); 587 AppendNumberTo(&r, level); 588 r.append(" ---\n"); 589 const std::vector<FileMetaData*>& files = files_[level]; 590 for (size_t i = 0; i < files.size(); i++) { 591 r.push_back(' '); 592 AppendNumberTo(&r, files[i]->number); 593 r.push_back(':'); 594 AppendNumberTo(&r, files[i]->file_size); 595 r.append("["); 596 r.append(files[i]->smallest.DebugString()); 597 r.append(" .. "); 598 r.append(files[i]->largest.DebugString()); 599 r.append("]\n"); 600 } 601 } 602 return r; 603 } 604 605 // A helper class so we can efficiently apply a whole sequence 606 // of edits to a particular state without creating intermediate 607 // Versions that contain full copies of the intermediate state. 608 class VersionSet::Builder { 609 private: 610 // Helper to sort by v->files_[file_number].smallest 611 struct BySmallestKey { 612 const InternalKeyComparator* internal_comparator; 613 614 bool operator()(FileMetaData* f1, FileMetaData* f2) const { 615 int r = internal_comparator->Compare(f1->smallest, f2->smallest); 616 if (r != 0) { 617 return (r < 0); 618 } else { 619 // Break ties by file number 620 return (f1->number < f2->number); 621 } 622 } 623 }; 624 625 typedef std::set<FileMetaData*, BySmallestKey> FileSet; 626 struct LevelState { 627 std::set<uint64_t> deleted_files; 628 FileSet* added_files; 629 }; 630 631 VersionSet* vset_; 632 Version* base_; 633 LevelState levels_[config::kNumLevels]; 634 635 public: 636 // Initialize a builder with the files from *base and other info from *vset 637 Builder(VersionSet* vset, Version* base) 638 : vset_(vset), 639 base_(base) { 640 base_->Ref(); 641 BySmallestKey cmp; 642 cmp.internal_comparator = &vset_->icmp_; 643 for (int level = 0; level < config::kNumLevels; level++) { 644 levels_[level].added_files = new FileSet(cmp); 645 } 646 } 647 648 ~Builder() { 649 for (int level = 0; level < config::kNumLevels; level++) { 650 const FileSet* added = levels_[level].added_files; 651 std::vector<FileMetaData*> to_unref; 652 to_unref.reserve(added->size()); 653 for (FileSet::const_iterator it = added->begin(); 654 it != added->end(); ++it) { 655 to_unref.push_back(*it); 656 } 657 delete added; 658 for (uint32_t i = 0; i < to_unref.size(); i++) { 659 FileMetaData* f = to_unref[i]; 660 f->refs--; 661 if (f->refs <= 0) { 662 delete f; 663 } 664 } 665 } 666 base_->Unref(); 667 } 668 669 // Apply all of the edits in *edit to the current state. 670 void Apply(VersionEdit* edit) { 671 // Update compaction pointers 672 for (size_t i = 0; i < edit->compact_pointers_.size(); i++) { 673 const int level = edit->compact_pointers_[i].first; 674 vset_->compact_pointer_[level] = 675 edit->compact_pointers_[i].second.Encode().ToString(); 676 } 677 678 // Delete files 679 const VersionEdit::DeletedFileSet& del = edit->deleted_files_; 680 for (VersionEdit::DeletedFileSet::const_iterator iter = del.begin(); 681 iter != del.end(); 682 ++iter) { 683 const int level = iter->first; 684 const uint64_t number = iter->second; 685 levels_[level].deleted_files.insert(number); 686 } 687 688 // Add new files 689 for (size_t i = 0; i < edit->new_files_.size(); i++) { 690 const int level = edit->new_files_[i].first; 691 FileMetaData* f = new FileMetaData(edit->new_files_[i].second); 692 f->refs = 1; 693 694 // We arrange to automatically compact this file after 695 // a certain number of seeks. Let's assume: 696 // (1) One seek costs 10ms 697 // (2) Writing or reading 1MB costs 10ms (100MB/s) 698 // (3) A compaction of 1MB does 25MB of IO: 699 // 1MB read from this level 700 // 10-12MB read from next level (boundaries may be misaligned) 701 // 10-12MB written to next level 702 // This implies that 25 seeks cost the same as the compaction 703 // of 1MB of data. I.e., one seek costs approximately the 704 // same as the compaction of 40KB of data. We are a little 705 // conservative and allow approximately one seek for every 16KB 706 // of data before triggering a compaction. 707 f->allowed_seeks = (f->file_size / 16384); 708 if (f->allowed_seeks < 100) f->allowed_seeks = 100; 709 710 levels_[level].deleted_files.erase(f->number); 711 levels_[level].added_files->insert(f); 712 } 713 } 714 715 // Save the current state in *v. 716 void SaveTo(Version* v) { 717 BySmallestKey cmp; 718 cmp.internal_comparator = &vset_->icmp_; 719 for (int level = 0; level < config::kNumLevels; level++) { 720 // Merge the set of added files with the set of pre-existing files. 721 // Drop any deleted files. Store the result in *v. 722 const std::vector<FileMetaData*>& base_files = base_->files_[level]; 723 std::vector<FileMetaData*>::const_iterator base_iter = base_files.begin(); 724 std::vector<FileMetaData*>::const_iterator base_end = base_files.end(); 725 const FileSet* added = levels_[level].added_files; 726 v->files_[level].reserve(base_files.size() + added->size()); 727 for (FileSet::const_iterator added_iter = added->begin(); 728 added_iter != added->end(); 729 ++added_iter) { 730 // Add all smaller files listed in base_ 731 for (std::vector<FileMetaData*>::const_iterator bpos 732 = std::upper_bound(base_iter, base_end, *added_iter, cmp); 733 base_iter != bpos; 734 ++base_iter) { 735 MaybeAddFile(v, level, *base_iter); 736 } 737 738 MaybeAddFile(v, level, *added_iter); 739 } 740 741 // Add remaining base files 742 for (; base_iter != base_end; ++base_iter) { 743 MaybeAddFile(v, level, *base_iter); 744 } 745 746 #ifndef NDEBUG 747 // Make sure there is no overlap in levels > 0 748 if (level > 0) { 749 for (uint32_t i = 1; i < v->files_[level].size(); i++) { 750 const InternalKey& prev_end = v->files_[level][i-1]->largest; 751 const InternalKey& this_begin = v->files_[level][i]->smallest; 752 if (vset_->icmp_.Compare(prev_end, this_begin) >= 0) { 753 fprintf(stderr, "overlapping ranges in same level %s vs. %s\n", 754 prev_end.DebugString().c_str(), 755 this_begin.DebugString().c_str()); 756 abort(); 757 } 758 } 759 } 760 #endif 761 } 762 } 763 764 void MaybeAddFile(Version* v, int level, FileMetaData* f) { 765 if (levels_[level].deleted_files.count(f->number) > 0) { 766 // File is deleted: do nothing 767 } else { 768 std::vector<FileMetaData*>* files = &v->files_[level]; 769 if (level > 0 && !files->empty()) { 770 // Must not overlap 771 assert(vset_->icmp_.Compare((*files)[files->size()-1]->largest, 772 f->smallest) < 0); 773 } 774 f->refs++; 775 files->push_back(f); 776 } 777 } 778 }; 779 780 VersionSet::VersionSet(const std::string& dbname, 781 const Options* options, 782 TableCache* table_cache, 783 const InternalKeyComparator* cmp) 784 : env_(options->env), 785 dbname_(dbname), 786 options_(options), 787 table_cache_(table_cache), 788 icmp_(*cmp), 789 next_file_number_(2), 790 manifest_file_number_(0), // Filled by Recover() 791 last_sequence_(0), 792 log_number_(0), 793 prev_log_number_(0), 794 descriptor_file_(NULL), 795 descriptor_log_(NULL), 796 dummy_versions_(this), 797 current_(NULL) { 798 AppendVersion(new Version(this)); 799 } 800 801 VersionSet::~VersionSet() { 802 current_->Unref(); 803 assert(dummy_versions_.next_ == &dummy_versions_); // List must be empty 804 delete descriptor_log_; 805 delete descriptor_file_; 806 } 807 808 void VersionSet::AppendVersion(Version* v) { 809 // Make "v" current 810 assert(v->refs_ == 0); 811 assert(v != current_); 812 if (current_ != NULL) { 813 current_->Unref(); 814 } 815 current_ = v; 816 v->Ref(); 817 818 // Append to linked list 819 v->prev_ = dummy_versions_.prev_; 820 v->next_ = &dummy_versions_; 821 v->prev_->next_ = v; 822 v->next_->prev_ = v; 823 } 824 825 Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu) { 826 if (edit->has_log_number_) { 827 assert(edit->log_number_ >= log_number_); 828 assert(edit->log_number_ < next_file_number_); 829 } else { 830 edit->SetLogNumber(log_number_); 831 } 832 833 if (!edit->has_prev_log_number_) { 834 edit->SetPrevLogNumber(prev_log_number_); 835 } 836 837 edit->SetNextFile(next_file_number_); 838 edit->SetLastSequence(last_sequence_); 839 840 Version* v = new Version(this); 841 { 842 Builder builder(this, current_); 843 builder.Apply(edit); 844 builder.SaveTo(v); 845 } 846 Finalize(v); 847 848 // Initialize new descriptor log file if necessary by creating 849 // a temporary file that contains a snapshot of the current version. 850 std::string new_manifest_file; 851 Status s; 852 if (descriptor_log_ == NULL) { 853 // No reason to unlock *mu here since we only hit this path in the 854 // first call to LogAndApply (when opening the database). 855 assert(descriptor_file_ == NULL); 856 new_manifest_file = DescriptorFileName(dbname_, manifest_file_number_); 857 edit->SetNextFile(next_file_number_); 858 s = env_->NewWritableFile(new_manifest_file, &descriptor_file_); 859 if (s.ok()) { 860 descriptor_log_ = new log::Writer(descriptor_file_); 861 s = WriteSnapshot(descriptor_log_); 862 } 863 } 864 865 // Unlock during expensive MANIFEST log write 866 { 867 mu->Unlock(); 868 869 // Write new record to MANIFEST log 870 if (s.ok()) { 871 std::string record; 872 edit->EncodeTo(&record); 873 s = descriptor_log_->AddRecord(record); 874 if (s.ok()) { 875 s = descriptor_file_->Sync(); 876 } 877 if (!s.ok()) { 878 Log(options_->info_log, "MANIFEST write: %s\n", s.ToString().c_str()); 879 } 880 } 881 882 // If we just created a new descriptor file, install it by writing a 883 // new CURRENT file that points to it. 884 if (s.ok() && !new_manifest_file.empty()) { 885 s = SetCurrentFile(env_, dbname_, manifest_file_number_); 886 } 887 888 mu->Lock(); 889 } 890 891 // Install the new version 892 if (s.ok()) { 893 AppendVersion(v); 894 log_number_ = edit->log_number_; 895 prev_log_number_ = edit->prev_log_number_; 896 } else { 897 delete v; 898 if (!new_manifest_file.empty()) { 899 delete descriptor_log_; 900 delete descriptor_file_; 901 descriptor_log_ = NULL; 902 descriptor_file_ = NULL; 903 env_->DeleteFile(new_manifest_file); 904 } 905 } 906 907 return s; 908 } 909 910 Status VersionSet::Recover() { 911 struct LogReporter : public log::Reader::Reporter { 912 Status* status; 913 virtual void Corruption(size_t bytes, const Status& s) { 914 if (this->status->ok()) *this->status = s; 915 } 916 }; 917 918 // Read "CURRENT" file, which contains a pointer to the current manifest file 919 std::string current; 920 Status s = ReadFileToString(env_, CurrentFileName(dbname_), ¤t); 921 if (!s.ok()) { 922 return s; 923 } 924 if (current.empty() || current[current.size()-1] != '\n') { 925 return Status::Corruption("CURRENT file does not end with newline"); 926 } 927 current.resize(current.size() - 1); 928 929 std::string dscname = dbname_ + "/" + current; 930 SequentialFile* file; 931 s = env_->NewSequentialFile(dscname, &file); 932 if (!s.ok()) { 933 return s; 934 } 935 936 bool have_log_number = false; 937 bool have_prev_log_number = false; 938 bool have_next_file = false; 939 bool have_last_sequence = false; 940 uint64_t next_file = 0; 941 uint64_t last_sequence = 0; 942 uint64_t log_number = 0; 943 uint64_t prev_log_number = 0; 944 Builder builder(this, current_); 945 946 { 947 LogReporter reporter; 948 reporter.status = &s; 949 log::Reader reader(file, &reporter, true/*checksum*/, 0/*initial_offset*/); 950 Slice record; 951 std::string scratch; 952 while (reader.ReadRecord(&record, &scratch) && s.ok()) { 953 VersionEdit edit; 954 s = edit.DecodeFrom(record); 955 if (s.ok()) { 956 if (edit.has_comparator_ && 957 edit.comparator_ != icmp_.user_comparator()->Name()) { 958 s = Status::InvalidArgument( 959 edit.comparator_ + " does not match existing comparator ", 960 icmp_.user_comparator()->Name()); 961 } 962 } 963 964 if (s.ok()) { 965 builder.Apply(&edit); 966 } 967 968 if (edit.has_log_number_) { 969 log_number = edit.log_number_; 970 have_log_number = true; 971 } 972 973 if (edit.has_prev_log_number_) { 974 prev_log_number = edit.prev_log_number_; 975 have_prev_log_number = true; 976 } 977 978 if (edit.has_next_file_number_) { 979 next_file = edit.next_file_number_; 980 have_next_file = true; 981 } 982 983 if (edit.has_last_sequence_) { 984 last_sequence = edit.last_sequence_; 985 have_last_sequence = true; 986 } 987 } 988 } 989 delete file; 990 file = NULL; 991 992 if (s.ok()) { 993 if (!have_next_file) { 994 s = Status::Corruption("no meta-nextfile entry in descriptor"); 995 } else if (!have_log_number) { 996 s = Status::Corruption("no meta-lognumber entry in descriptor"); 997 } else if (!have_last_sequence) { 998 s = Status::Corruption("no last-sequence-number entry in descriptor"); 999 } 1000 1001 if (!have_prev_log_number) { 1002 prev_log_number = 0; 1003 } 1004 1005 MarkFileNumberUsed(prev_log_number); 1006 MarkFileNumberUsed(log_number); 1007 } 1008 1009 if (s.ok()) { 1010 Version* v = new Version(this); 1011 builder.SaveTo(v); 1012 // Install recovered version 1013 Finalize(v); 1014 AppendVersion(v); 1015 manifest_file_number_ = next_file; 1016 next_file_number_ = next_file + 1; 1017 last_sequence_ = last_sequence; 1018 log_number_ = log_number; 1019 prev_log_number_ = prev_log_number; 1020 } 1021 1022 return s; 1023 } 1024 1025 void VersionSet::MarkFileNumberUsed(uint64_t number) { 1026 if (next_file_number_ <= number) { 1027 next_file_number_ = number + 1; 1028 } 1029 } 1030 1031 void VersionSet::Finalize(Version* v) { 1032 // Precomputed best level for next compaction 1033 int best_level = -1; 1034 double best_score = -1; 1035 1036 for (int level = 0; level < config::kNumLevels-1; level++) { 1037 double score; 1038 if (level == 0) { 1039 // We treat level-0 specially by bounding the number of files 1040 // instead of number of bytes for two reasons: 1041 // 1042 // (1) With larger write-buffer sizes, it is nice not to do too 1043 // many level-0 compactions. 1044 // 1045 // (2) The files in level-0 are merged on every read and 1046 // therefore we wish to avoid too many files when the individual 1047 // file size is small (perhaps because of a small write-buffer 1048 // setting, or very high compression ratios, or lots of 1049 // overwrites/deletions). 1050 score = v->files_[level].size() / 1051 static_cast<double>(config::kL0_CompactionTrigger); 1052 } else { 1053 // Compute the ratio of current size to size limit. 1054 const uint64_t level_bytes = TotalFileSize(v->files_[level]); 1055 score = static_cast<double>(level_bytes) / MaxBytesForLevel(level); 1056 } 1057 1058 if (score > best_score) { 1059 best_level = level; 1060 best_score = score; 1061 } 1062 } 1063 1064 v->compaction_level_ = best_level; 1065 v->compaction_score_ = best_score; 1066 } 1067 1068 Status VersionSet::WriteSnapshot(log::Writer* log) { 1069 // TODO: Break up into multiple records to reduce memory usage on recovery? 1070 1071 // Save metadata 1072 VersionEdit edit; 1073 edit.SetComparatorName(icmp_.user_comparator()->Name()); 1074 1075 // Save compaction pointers 1076 for (int level = 0; level < config::kNumLevels; level++) { 1077 if (!compact_pointer_[level].empty()) { 1078 InternalKey key; 1079 key.DecodeFrom(compact_pointer_[level]); 1080 edit.SetCompactPointer(level, key); 1081 } 1082 } 1083 1084 // Save files 1085 for (int level = 0; level < config::kNumLevels; level++) { 1086 const std::vector<FileMetaData*>& files = current_->files_[level]; 1087 for (size_t i = 0; i < files.size(); i++) { 1088 const FileMetaData* f = files[i]; 1089 edit.AddFile(level, f->number, f->file_size, f->smallest, f->largest); 1090 } 1091 } 1092 1093 std::string record; 1094 edit.EncodeTo(&record); 1095 return log->AddRecord(record); 1096 } 1097 1098 int VersionSet::NumLevelFiles(int level) const { 1099 assert(level >= 0); 1100 assert(level < config::kNumLevels); 1101 return current_->files_[level].size(); 1102 } 1103 1104 const char* VersionSet::LevelSummary(LevelSummaryStorage* scratch) const { 1105 // Update code if kNumLevels changes 1106 assert(config::kNumLevels == 7); 1107 snprintf(scratch->buffer, sizeof(scratch->buffer), 1108 "files[ %d %d %d %d %d %d %d ]", 1109 int(current_->files_[0].size()), 1110 int(current_->files_[1].size()), 1111 int(current_->files_[2].size()), 1112 int(current_->files_[3].size()), 1113 int(current_->files_[4].size()), 1114 int(current_->files_[5].size()), 1115 int(current_->files_[6].size())); 1116 return scratch->buffer; 1117 } 1118 1119 uint64_t VersionSet::ApproximateOffsetOf(Version* v, const InternalKey& ikey) { 1120 uint64_t result = 0; 1121 for (int level = 0; level < config::kNumLevels; level++) { 1122 const std::vector<FileMetaData*>& files = v->files_[level]; 1123 for (size_t i = 0; i < files.size(); i++) { 1124 if (icmp_.Compare(files[i]->largest, ikey) <= 0) { 1125 // Entire file is before "ikey", so just add the file size 1126 result += files[i]->file_size; 1127 } else if (icmp_.Compare(files[i]->smallest, ikey) > 0) { 1128 // Entire file is after "ikey", so ignore 1129 if (level > 0) { 1130 // Files other than level 0 are sorted by meta->smallest, so 1131 // no further files in this level will contain data for 1132 // "ikey". 1133 break; 1134 } 1135 } else { 1136 // "ikey" falls in the range for this table. Add the 1137 // approximate offset of "ikey" within the table. 1138 Table* tableptr; 1139 Iterator* iter = table_cache_->NewIterator( 1140 ReadOptions(), files[i]->number, files[i]->file_size, &tableptr); 1141 if (tableptr != NULL) { 1142 result += tableptr->ApproximateOffsetOf(ikey.Encode()); 1143 } 1144 delete iter; 1145 } 1146 } 1147 } 1148 return result; 1149 } 1150 1151 void VersionSet::AddLiveFiles(std::set<uint64_t>* live) { 1152 for (Version* v = dummy_versions_.next_; 1153 v != &dummy_versions_; 1154 v = v->next_) { 1155 for (int level = 0; level < config::kNumLevels; level++) { 1156 const std::vector<FileMetaData*>& files = v->files_[level]; 1157 for (size_t i = 0; i < files.size(); i++) { 1158 live->insert(files[i]->number); 1159 } 1160 } 1161 } 1162 } 1163 1164 int64_t VersionSet::NumLevelBytes(int level) const { 1165 assert(level >= 0); 1166 assert(level < config::kNumLevels); 1167 return TotalFileSize(current_->files_[level]); 1168 } 1169 1170 int64_t VersionSet::MaxNextLevelOverlappingBytes() { 1171 int64_t result = 0; 1172 std::vector<FileMetaData*> overlaps; 1173 for (int level = 1; level < config::kNumLevels - 1; level++) { 1174 for (size_t i = 0; i < current_->files_[level].size(); i++) { 1175 const FileMetaData* f = current_->files_[level][i]; 1176 current_->GetOverlappingInputs(level+1, &f->smallest, &f->largest, 1177 &overlaps); 1178 const int64_t sum = TotalFileSize(overlaps); 1179 if (sum > result) { 1180 result = sum; 1181 } 1182 } 1183 } 1184 return result; 1185 } 1186 1187 // Stores the minimal range that covers all entries in inputs in 1188 // *smallest, *largest. 1189 // REQUIRES: inputs is not empty 1190 void VersionSet::GetRange(const std::vector<FileMetaData*>& inputs, 1191 InternalKey* smallest, 1192 InternalKey* largest) { 1193 assert(!inputs.empty()); 1194 smallest->Clear(); 1195 largest->Clear(); 1196 for (size_t i = 0; i < inputs.size(); i++) { 1197 FileMetaData* f = inputs[i]; 1198 if (i == 0) { 1199 *smallest = f->smallest; 1200 *largest = f->largest; 1201 } else { 1202 if (icmp_.Compare(f->smallest, *smallest) < 0) { 1203 *smallest = f->smallest; 1204 } 1205 if (icmp_.Compare(f->largest, *largest) > 0) { 1206 *largest = f->largest; 1207 } 1208 } 1209 } 1210 } 1211 1212 // Stores the minimal range that covers all entries in inputs1 and inputs2 1213 // in *smallest, *largest. 1214 // REQUIRES: inputs is not empty 1215 void VersionSet::GetRange2(const std::vector<FileMetaData*>& inputs1, 1216 const std::vector<FileMetaData*>& inputs2, 1217 InternalKey* smallest, 1218 InternalKey* largest) { 1219 std::vector<FileMetaData*> all = inputs1; 1220 all.insert(all.end(), inputs2.begin(), inputs2.end()); 1221 GetRange(all, smallest, largest); 1222 } 1223 1224 Iterator* VersionSet::MakeInputIterator(Compaction* c) { 1225 ReadOptions options; 1226 options.verify_checksums = options_->paranoid_checks; 1227 options.fill_cache = false; 1228 1229 // Level-0 files have to be merged together. For other levels, 1230 // we will make a concatenating iterator per level. 1231 // TODO(opt): use concatenating iterator for level-0 if there is no overlap 1232 const int space = (c->level() == 0 ? c->inputs_[0].size() + 1 : 2); 1233 Iterator** list = new Iterator*[space]; 1234 int num = 0; 1235 for (int which = 0; which < 2; which++) { 1236 if (!c->inputs_[which].empty()) { 1237 if (c->level() + which == 0) { 1238 const std::vector<FileMetaData*>& files = c->inputs_[which]; 1239 for (size_t i = 0; i < files.size(); i++) { 1240 list[num++] = table_cache_->NewIterator( 1241 options, files[i]->number, files[i]->file_size); 1242 } 1243 } else { 1244 // Create concatenating iterator for the files from this level 1245 list[num++] = NewTwoLevelIterator( 1246 new Version::LevelFileNumIterator(icmp_, &c->inputs_[which]), 1247 &GetFileIterator, table_cache_, options); 1248 } 1249 } 1250 } 1251 assert(num <= space); 1252 Iterator* result = NewMergingIterator(&icmp_, list, num); 1253 delete[] list; 1254 return result; 1255 } 1256 1257 Compaction* VersionSet::PickCompaction() { 1258 Compaction* c; 1259 int level; 1260 1261 // We prefer compactions triggered by too much data in a level over 1262 // the compactions triggered by seeks. 1263 const bool size_compaction = (current_->compaction_score_ >= 1); 1264 const bool seek_compaction = (current_->file_to_compact_ != NULL); 1265 if (size_compaction) { 1266 level = current_->compaction_level_; 1267 assert(level >= 0); 1268 assert(level+1 < config::kNumLevels); 1269 c = new Compaction(level); 1270 1271 // Pick the first file that comes after compact_pointer_[level] 1272 for (size_t i = 0; i < current_->files_[level].size(); i++) { 1273 FileMetaData* f = current_->files_[level][i]; 1274 if (compact_pointer_[level].empty() || 1275 icmp_.Compare(f->largest.Encode(), compact_pointer_[level]) > 0) { 1276 c->inputs_[0].push_back(f); 1277 break; 1278 } 1279 } 1280 if (c->inputs_[0].empty()) { 1281 // Wrap-around to the beginning of the key space 1282 c->inputs_[0].push_back(current_->files_[level][0]); 1283 } 1284 } else if (seek_compaction) { 1285 level = current_->file_to_compact_level_; 1286 c = new Compaction(level); 1287 c->inputs_[0].push_back(current_->file_to_compact_); 1288 } else { 1289 return NULL; 1290 } 1291 1292 c->input_version_ = current_; 1293 c->input_version_->Ref(); 1294 1295 // Files in level 0 may overlap each other, so pick up all overlapping ones 1296 if (level == 0) { 1297 InternalKey smallest, largest; 1298 GetRange(c->inputs_[0], &smallest, &largest); 1299 // Note that the next call will discard the file we placed in 1300 // c->inputs_[0] earlier and replace it with an overlapping set 1301 // which will include the picked file. 1302 current_->GetOverlappingInputs(0, &smallest, &largest, &c->inputs_[0]); 1303 assert(!c->inputs_[0].empty()); 1304 } 1305 1306 SetupOtherInputs(c); 1307 1308 return c; 1309 } 1310 1311 void VersionSet::SetupOtherInputs(Compaction* c) { 1312 const int level = c->level(); 1313 InternalKey smallest, largest; 1314 GetRange(c->inputs_[0], &smallest, &largest); 1315 1316 current_->GetOverlappingInputs(level+1, &smallest, &largest, &c->inputs_[1]); 1317 1318 // Get entire range covered by compaction 1319 InternalKey all_start, all_limit; 1320 GetRange2(c->inputs_[0], c->inputs_[1], &all_start, &all_limit); 1321 1322 // See if we can grow the number of inputs in "level" without 1323 // changing the number of "level+1" files we pick up. 1324 if (!c->inputs_[1].empty()) { 1325 std::vector<FileMetaData*> expanded0; 1326 current_->GetOverlappingInputs(level, &all_start, &all_limit, &expanded0); 1327 const int64_t inputs0_size = TotalFileSize(c->inputs_[0]); 1328 const int64_t inputs1_size = TotalFileSize(c->inputs_[1]); 1329 const int64_t expanded0_size = TotalFileSize(expanded0); 1330 if (expanded0.size() > c->inputs_[0].size() && 1331 inputs1_size + expanded0_size < kExpandedCompactionByteSizeLimit) { 1332 InternalKey new_start, new_limit; 1333 GetRange(expanded0, &new_start, &new_limit); 1334 std::vector<FileMetaData*> expanded1; 1335 current_->GetOverlappingInputs(level+1, &new_start, &new_limit, 1336 &expanded1); 1337 if (expanded1.size() == c->inputs_[1].size()) { 1338 Log(options_->info_log, 1339 "Expanding@%d %d+%d (%ld+%ld bytes) to %d+%d (%ld+%ld bytes)\n", 1340 level, 1341 int(c->inputs_[0].size()), 1342 int(c->inputs_[1].size()), 1343 long(inputs0_size), long(inputs1_size), 1344 int(expanded0.size()), 1345 int(expanded1.size()), 1346 long(expanded0_size), long(inputs1_size)); 1347 smallest = new_start; 1348 largest = new_limit; 1349 c->inputs_[0] = expanded0; 1350 c->inputs_[1] = expanded1; 1351 GetRange2(c->inputs_[0], c->inputs_[1], &all_start, &all_limit); 1352 } 1353 } 1354 } 1355 1356 // Compute the set of grandparent files that overlap this compaction 1357 // (parent == level+1; grandparent == level+2) 1358 if (level + 2 < config::kNumLevels) { 1359 current_->GetOverlappingInputs(level + 2, &all_start, &all_limit, 1360 &c->grandparents_); 1361 } 1362 1363 if (false) { 1364 Log(options_->info_log, "Compacting %d '%s' .. '%s'", 1365 level, 1366 smallest.DebugString().c_str(), 1367 largest.DebugString().c_str()); 1368 } 1369 1370 // Update the place where we will do the next compaction for this level. 1371 // We update this immediately instead of waiting for the VersionEdit 1372 // to be applied so that if the compaction fails, we will try a different 1373 // key range next time. 1374 compact_pointer_[level] = largest.Encode().ToString(); 1375 c->edit_.SetCompactPointer(level, largest); 1376 } 1377 1378 Compaction* VersionSet::CompactRange( 1379 int level, 1380 const InternalKey* begin, 1381 const InternalKey* end) { 1382 std::vector<FileMetaData*> inputs; 1383 current_->GetOverlappingInputs(level, begin, end, &inputs); 1384 if (inputs.empty()) { 1385 return NULL; 1386 } 1387 1388 // Avoid compacting too much in one shot in case the range is large. 1389 // But we cannot do this for level-0 since level-0 files can overlap 1390 // and we must not pick one file and drop another older file if the 1391 // two files overlap. 1392 if (level > 0) { 1393 const uint64_t limit = MaxFileSizeForLevel(level); 1394 uint64_t total = 0; 1395 for (size_t i = 0; i < inputs.size(); i++) { 1396 uint64_t s = inputs[i]->file_size; 1397 total += s; 1398 if (total >= limit) { 1399 inputs.resize(i + 1); 1400 break; 1401 } 1402 } 1403 } 1404 1405 Compaction* c = new Compaction(level); 1406 c->input_version_ = current_; 1407 c->input_version_->Ref(); 1408 c->inputs_[0] = inputs; 1409 SetupOtherInputs(c); 1410 return c; 1411 } 1412 1413 Compaction::Compaction(int level) 1414 : level_(level), 1415 max_output_file_size_(MaxFileSizeForLevel(level)), 1416 input_version_(NULL), 1417 grandparent_index_(0), 1418 seen_key_(false), 1419 overlapped_bytes_(0) { 1420 for (int i = 0; i < config::kNumLevels; i++) { 1421 level_ptrs_[i] = 0; 1422 } 1423 } 1424 1425 Compaction::~Compaction() { 1426 if (input_version_ != NULL) { 1427 input_version_->Unref(); 1428 } 1429 } 1430 1431 bool Compaction::IsTrivialMove() const { 1432 // Avoid a move if there is lots of overlapping grandparent data. 1433 // Otherwise, the move could create a parent file that will require 1434 // a very expensive merge later on. 1435 return (num_input_files(0) == 1 && 1436 num_input_files(1) == 0 && 1437 TotalFileSize(grandparents_) <= kMaxGrandParentOverlapBytes); 1438 } 1439 1440 void Compaction::AddInputDeletions(VersionEdit* edit) { 1441 for (int which = 0; which < 2; which++) { 1442 for (size_t i = 0; i < inputs_[which].size(); i++) { 1443 edit->DeleteFile(level_ + which, inputs_[which][i]->number); 1444 } 1445 } 1446 } 1447 1448 bool Compaction::IsBaseLevelForKey(const Slice& user_key) { 1449 // Maybe use binary search to find right entry instead of linear search? 1450 const Comparator* user_cmp = input_version_->vset_->icmp_.user_comparator(); 1451 for (int lvl = level_ + 2; lvl < config::kNumLevels; lvl++) { 1452 const std::vector<FileMetaData*>& files = input_version_->files_[lvl]; 1453 for (; level_ptrs_[lvl] < files.size(); ) { 1454 FileMetaData* f = files[level_ptrs_[lvl]]; 1455 if (user_cmp->Compare(user_key, f->largest.user_key()) <= 0) { 1456 // We've advanced far enough 1457 if (user_cmp->Compare(user_key, f->smallest.user_key()) >= 0) { 1458 // Key falls in this file's range, so definitely not base level 1459 return false; 1460 } 1461 break; 1462 } 1463 level_ptrs_[lvl]++; 1464 } 1465 } 1466 return true; 1467 } 1468 1469 bool Compaction::ShouldStopBefore(const Slice& internal_key) { 1470 // Scan to find earliest grandparent file that contains key. 1471 const InternalKeyComparator* icmp = &input_version_->vset_->icmp_; 1472 while (grandparent_index_ < grandparents_.size() && 1473 icmp->Compare(internal_key, 1474 grandparents_[grandparent_index_]->largest.Encode()) > 0) { 1475 if (seen_key_) { 1476 overlapped_bytes_ += grandparents_[grandparent_index_]->file_size; 1477 } 1478 grandparent_index_++; 1479 } 1480 seen_key_ = true; 1481 1482 if (overlapped_bytes_ > kMaxGrandParentOverlapBytes) { 1483 // Too much overlap for current output; start new output 1484 overlapped_bytes_ = 0; 1485 return true; 1486 } else { 1487 return false; 1488 } 1489 } 1490 1491 void Compaction::ReleaseInputs() { 1492 if (input_version_ != NULL) { 1493 input_version_->Unref(); 1494 input_version_ = NULL; 1495 } 1496 } 1497 1498 } // namespace leveldb 1499