Home | History | Annotate | Download | only in syncable
      1 // Copyright 2013 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "sync/syncable/directory.h"
      6 
      7 #include <iterator>
      8 
      9 #include "base/base64.h"
     10 #include "base/debug/trace_event.h"
     11 #include "base/stl_util.h"
     12 #include "base/strings/string_number_conversions.h"
     13 #include "sync/internal_api/public/base/unique_position.h"
     14 #include "sync/internal_api/public/util/unrecoverable_error_handler.h"
     15 #include "sync/syncable/entry.h"
     16 #include "sync/syncable/entry_kernel.h"
     17 #include "sync/syncable/in_memory_directory_backing_store.h"
     18 #include "sync/syncable/on_disk_directory_backing_store.h"
     19 #include "sync/syncable/scoped_kernel_lock.h"
     20 #include "sync/syncable/scoped_parent_child_index_updater.h"
     21 #include "sync/syncable/syncable-inl.h"
     22 #include "sync/syncable/syncable_base_transaction.h"
     23 #include "sync/syncable/syncable_changes_version.h"
     24 #include "sync/syncable/syncable_read_transaction.h"
     25 #include "sync/syncable/syncable_util.h"
     26 #include "sync/syncable/syncable_write_transaction.h"
     27 
     28 using std::string;
     29 
     30 namespace syncer {
     31 namespace syncable {
     32 
     33 // static
     34 const base::FilePath::CharType Directory::kSyncDatabaseFilename[] =
     35     FILE_PATH_LITERAL("SyncData.sqlite3");
     36 
     37 Directory::PersistedKernelInfo::PersistedKernelInfo()
     38     : next_id(0) {
     39   ModelTypeSet protocol_types = ProtocolTypes();
     40   for (ModelTypeSet::Iterator iter = protocol_types.First(); iter.Good();
     41        iter.Inc()) {
     42     reset_download_progress(iter.Get());
     43     transaction_version[iter.Get()] = 0;
     44   }
     45 }
     46 
     47 Directory::PersistedKernelInfo::~PersistedKernelInfo() {}
     48 
     49 void Directory::PersistedKernelInfo::reset_download_progress(
     50     ModelType model_type) {
     51   download_progress[model_type].set_data_type_id(
     52       GetSpecificsFieldNumberFromModelType(model_type));
     53   // An empty-string token indicates no prior knowledge.
     54   download_progress[model_type].set_token(std::string());
     55 }
     56 
     57 Directory::SaveChangesSnapshot::SaveChangesSnapshot()
     58     : kernel_info_status(KERNEL_SHARE_INFO_INVALID) {
     59 }
     60 
     61 Directory::SaveChangesSnapshot::~SaveChangesSnapshot() {
     62   STLDeleteElements(&dirty_metas);
     63   STLDeleteElements(&delete_journals);
     64 }
     65 
     66 Directory::Kernel::Kernel(
     67     const std::string& name,
     68     const KernelLoadInfo& info, DirectoryChangeDelegate* delegate,
     69     const WeakHandle<TransactionObserver>& transaction_observer)
     70     : next_write_transaction_id(0),
     71       name(name),
     72       info_status(Directory::KERNEL_SHARE_INFO_VALID),
     73       persisted_info(info.kernel_info),
     74       cache_guid(info.cache_guid),
     75       next_metahandle(info.max_metahandle + 1),
     76       delegate(delegate),
     77       transaction_observer(transaction_observer) {
     78   DCHECK(delegate);
     79   DCHECK(transaction_observer.IsInitialized());
     80 }
     81 
     82 Directory::Kernel::~Kernel() {
     83   STLDeleteContainerPairSecondPointers(metahandles_map.begin(),
     84                                        metahandles_map.end());
     85 }
     86 
     87 Directory::Directory(
     88     DirectoryBackingStore* store,
     89     UnrecoverableErrorHandler* unrecoverable_error_handler,
     90     ReportUnrecoverableErrorFunction report_unrecoverable_error_function,
     91     NigoriHandler* nigori_handler,
     92     Cryptographer* cryptographer)
     93     : kernel_(NULL),
     94       store_(store),
     95       unrecoverable_error_handler_(unrecoverable_error_handler),
     96       report_unrecoverable_error_function_(
     97           report_unrecoverable_error_function),
     98       unrecoverable_error_set_(false),
     99       nigori_handler_(nigori_handler),
    100       cryptographer_(cryptographer),
    101       invariant_check_level_(VERIFY_CHANGES) {
    102 }
    103 
    104 Directory::~Directory() {
    105   Close();
    106 }
    107 
    108 DirOpenResult Directory::Open(
    109     const string& name,
    110     DirectoryChangeDelegate* delegate,
    111     const WeakHandle<TransactionObserver>& transaction_observer) {
    112   TRACE_EVENT0("sync", "SyncDatabaseOpen");
    113 
    114   const DirOpenResult result =
    115       OpenImpl(name, delegate, transaction_observer);
    116 
    117   if (OPENED != result)
    118     Close();
    119   return result;
    120 }
    121 
    122 void Directory::InitializeIndices(MetahandlesMap* handles_map) {
    123   kernel_->metahandles_map.swap(*handles_map);
    124   for (MetahandlesMap::const_iterator it = kernel_->metahandles_map.begin();
    125        it != kernel_->metahandles_map.end(); ++it) {
    126     EntryKernel* entry = it->second;
    127     if (ParentChildIndex::ShouldInclude(entry))
    128       kernel_->parent_child_index.Insert(entry);
    129     const int64 metahandle = entry->ref(META_HANDLE);
    130     if (entry->ref(IS_UNSYNCED))
    131       kernel_->unsynced_metahandles.insert(metahandle);
    132     if (entry->ref(IS_UNAPPLIED_UPDATE)) {
    133       const ModelType type = entry->GetServerModelType();
    134       kernel_->unapplied_update_metahandles[type].insert(metahandle);
    135     }
    136     if (!entry->ref(UNIQUE_SERVER_TAG).empty()) {
    137       DCHECK(kernel_->server_tags_map.find(entry->ref(UNIQUE_SERVER_TAG)) ==
    138              kernel_->server_tags_map.end())
    139           << "Unexpected duplicate use of client tag";
    140       kernel_->server_tags_map[entry->ref(UNIQUE_SERVER_TAG)] = entry;
    141     }
    142     if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) {
    143       DCHECK(kernel_->server_tags_map.find(entry->ref(UNIQUE_SERVER_TAG)) ==
    144              kernel_->server_tags_map.end())
    145           << "Unexpected duplicate use of server tag";
    146       kernel_->client_tags_map[entry->ref(UNIQUE_CLIENT_TAG)] = entry;
    147     }
    148     DCHECK(kernel_->ids_map.find(entry->ref(ID).value()) ==
    149            kernel_->ids_map.end()) << "Unexpected duplicate use of ID";
    150     kernel_->ids_map[entry->ref(ID).value()] = entry;
    151     DCHECK(!entry->is_dirty());
    152   }
    153 }
    154 
    155 DirOpenResult Directory::OpenImpl(
    156     const string& name,
    157     DirectoryChangeDelegate* delegate,
    158     const WeakHandle<TransactionObserver>&
    159         transaction_observer) {
    160   KernelLoadInfo info;
    161   // Temporary indices before kernel_ initialized in case Load fails. We 0(1)
    162   // swap these later.
    163   Directory::MetahandlesMap tmp_handles_map;
    164   JournalIndex delete_journals;
    165 
    166   DirOpenResult result =
    167       store_->Load(&tmp_handles_map, &delete_journals, &info);
    168   if (OPENED != result)
    169     return result;
    170 
    171   kernel_ = new Kernel(name, info, delegate, transaction_observer);
    172   delete_journal_.reset(new DeleteJournal(&delete_journals));
    173   InitializeIndices(&tmp_handles_map);
    174 
    175   // Write back the share info to reserve some space in 'next_id'.  This will
    176   // prevent local ID reuse in the case of an early crash.  See the comments in
    177   // TakeSnapshotForSaveChanges() or crbug.com/142987 for more information.
    178   kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
    179   if (!SaveChanges())
    180     return FAILED_INITIAL_WRITE;
    181 
    182   return OPENED;
    183 }
    184 
    185 DeleteJournal* Directory::delete_journal() {
    186   DCHECK(delete_journal_.get());
    187   return delete_journal_.get();
    188 }
    189 
    190 void Directory::Close() {
    191   store_.reset();
    192   if (kernel_) {
    193     delete kernel_;
    194     kernel_ = NULL;
    195   }
    196 }
    197 
    198 void Directory::OnUnrecoverableError(const BaseTransaction* trans,
    199                                      const tracked_objects::Location& location,
    200                                      const std::string & message) {
    201   DCHECK(trans != NULL);
    202   unrecoverable_error_set_ = true;
    203   unrecoverable_error_handler_->OnUnrecoverableError(location,
    204                                                      message);
    205 }
    206 
    207 EntryKernel* Directory::GetEntryById(const Id& id) {
    208   ScopedKernelLock lock(this);
    209   return GetEntryById(id, &lock);
    210 }
    211 
    212 EntryKernel* Directory::GetEntryById(const Id& id,
    213                                      ScopedKernelLock* const lock) {
    214   DCHECK(kernel_);
    215   // Find it in the in memory ID index.
    216   IdsMap::iterator id_found = kernel_->ids_map.find(id.value());
    217   if (id_found != kernel_->ids_map.end()) {
    218     return id_found->second;
    219   }
    220   return NULL;
    221 }
    222 
    223 EntryKernel* Directory::GetEntryByClientTag(const string& tag) {
    224   ScopedKernelLock lock(this);
    225   DCHECK(kernel_);
    226 
    227   TagsMap::iterator it = kernel_->client_tags_map.find(tag);
    228   if (it != kernel_->client_tags_map.end()) {
    229     return it->second;
    230   }
    231   return NULL;
    232 }
    233 
    234 EntryKernel* Directory::GetEntryByServerTag(const string& tag) {
    235   ScopedKernelLock lock(this);
    236   DCHECK(kernel_);
    237   TagsMap::iterator it = kernel_->server_tags_map.find(tag);
    238   if (it != kernel_->server_tags_map.end()) {
    239     return it->second;
    240   }
    241   return NULL;
    242 }
    243 
    244 EntryKernel* Directory::GetEntryByHandle(int64 metahandle) {
    245   ScopedKernelLock lock(this);
    246   return GetEntryByHandle(metahandle, &lock);
    247 }
    248 
    249 EntryKernel* Directory::GetEntryByHandle(int64 metahandle,
    250                                          ScopedKernelLock* lock) {
    251   // Look up in memory
    252   MetahandlesMap::iterator found =
    253       kernel_->metahandles_map.find(metahandle);
    254   if (found != kernel_->metahandles_map.end()) {
    255     // Found it in memory.  Easy.
    256     return found->second;
    257   }
    258   return NULL;
    259 }
    260 
    261 bool Directory::GetChildHandlesById(
    262     BaseTransaction* trans, const Id& parent_id,
    263     Directory::Metahandles* result) {
    264   if (!SyncAssert(this == trans->directory(), FROM_HERE,
    265                   "Directories don't match", trans))
    266     return false;
    267   result->clear();
    268 
    269   ScopedKernelLock lock(this);
    270   AppendChildHandles(lock, parent_id, result);
    271   return true;
    272 }
    273 
    274 bool Directory::GetChildHandlesByHandle(
    275     BaseTransaction* trans, int64 handle,
    276     Directory::Metahandles* result) {
    277   if (!SyncAssert(this == trans->directory(), FROM_HERE,
    278                   "Directories don't match", trans))
    279     return false;
    280 
    281   result->clear();
    282 
    283   ScopedKernelLock lock(this);
    284   EntryKernel* kernel = GetEntryByHandle(handle, &lock);
    285   if (!kernel)
    286     return true;
    287 
    288   AppendChildHandles(lock, kernel->ref(ID), result);
    289   return true;
    290 }
    291 
    292 int Directory::GetTotalNodeCount(
    293     BaseTransaction* trans,
    294     EntryKernel* kernel) const {
    295   if (!SyncAssert(this == trans->directory(), FROM_HERE,
    296                   "Directories don't match", trans))
    297     return false;
    298 
    299   int count = 1;
    300   std::deque<const OrderedChildSet*> child_sets;
    301 
    302   GetChildSetForKernel(trans, kernel, &child_sets);
    303   while (!child_sets.empty()) {
    304     const OrderedChildSet* set = child_sets.front();
    305     child_sets.pop_front();
    306     for (OrderedChildSet::const_iterator it = set->begin();
    307          it != set->end(); ++it) {
    308       count++;
    309       GetChildSetForKernel(trans, *it, &child_sets);
    310     }
    311   }
    312 
    313   return count;
    314 }
    315 
    316 void Directory::GetChildSetForKernel(
    317     BaseTransaction* trans,
    318     EntryKernel* kernel,
    319     std::deque<const OrderedChildSet*>* child_sets) const {
    320   if (!kernel->ref(IS_DIR))
    321     return;  // Not a directory => no children.
    322 
    323   const OrderedChildSet* descendants =
    324       kernel_->parent_child_index.GetChildren(kernel->ref(ID));
    325   if (!descendants)
    326     return;  // This directory has no children.
    327 
    328   // Add our children to the list of items to be traversed.
    329   child_sets->push_back(descendants);
    330 }
    331 
    332 int Directory::GetPositionIndex(
    333     BaseTransaction* trans,
    334     EntryKernel* kernel) const {
    335   const OrderedChildSet* siblings =
    336       kernel_->parent_child_index.GetChildren(kernel->ref(PARENT_ID));
    337 
    338   OrderedChildSet::const_iterator it = siblings->find(kernel);
    339   return std::distance(siblings->begin(), it);
    340 }
    341 
    342 EntryKernel* Directory::GetRootEntry() {
    343   return GetEntryById(Id());
    344 }
    345 
    346 bool Directory::InsertEntry(BaseWriteTransaction* trans, EntryKernel* entry) {
    347   ScopedKernelLock lock(this);
    348   return InsertEntry(trans, entry, &lock);
    349 }
    350 
    351 bool Directory::InsertEntry(BaseWriteTransaction* trans,
    352                             EntryKernel* entry,
    353                             ScopedKernelLock* lock) {
    354   DCHECK(NULL != lock);
    355   if (!SyncAssert(NULL != entry, FROM_HERE, "Entry is null", trans))
    356     return false;
    357 
    358   static const char error[] = "Entry already in memory index.";
    359 
    360   if (!SyncAssert(
    361           kernel_->metahandles_map.insert(
    362               std::make_pair(entry->ref(META_HANDLE), entry)).second,
    363           FROM_HERE,
    364           error,
    365           trans)) {
    366     return false;
    367   }
    368   if (!SyncAssert(
    369           kernel_->ids_map.insert(
    370               std::make_pair(entry->ref(ID).value(), entry)).second,
    371           FROM_HERE,
    372           error,
    373           trans)) {
    374     return false;
    375   }
    376   if (ParentChildIndex::ShouldInclude(entry)) {
    377     if (!SyncAssert(kernel_->parent_child_index.Insert(entry),
    378                     FROM_HERE,
    379                     error,
    380                     trans)) {
    381       return false;
    382     }
    383   }
    384 
    385   // Should NEVER be created with a client tag or server tag.
    386   if (!SyncAssert(entry->ref(UNIQUE_SERVER_TAG).empty(), FROM_HERE,
    387                   "Server tag should be empty", trans)) {
    388     return false;
    389   }
    390   if (!SyncAssert(entry->ref(UNIQUE_CLIENT_TAG).empty(), FROM_HERE,
    391                   "Client tag should be empty", trans))
    392     return false;
    393 
    394   return true;
    395 }
    396 
    397 bool Directory::ReindexId(BaseWriteTransaction* trans,
    398                           EntryKernel* const entry,
    399                           const Id& new_id) {
    400   ScopedKernelLock lock(this);
    401   if (NULL != GetEntryById(new_id, &lock))
    402     return false;
    403 
    404   {
    405     // Update the indices that depend on the ID field.
    406     ScopedParentChildIndexUpdater updater_b(lock, entry,
    407         &kernel_->parent_child_index);
    408     size_t num_erased = kernel_->ids_map.erase(entry->ref(ID).value());
    409     DCHECK_EQ(1U, num_erased);
    410     entry->put(ID, new_id);
    411     kernel_->ids_map[entry->ref(ID).value()] = entry;
    412   }
    413   return true;
    414 }
    415 
    416 bool Directory::ReindexParentId(BaseWriteTransaction* trans,
    417                                 EntryKernel* const entry,
    418                                 const Id& new_parent_id) {
    419   ScopedKernelLock lock(this);
    420 
    421   {
    422     // Update the indices that depend on the PARENT_ID field.
    423     ScopedParentChildIndexUpdater index_updater(lock, entry,
    424         &kernel_->parent_child_index);
    425     entry->put(PARENT_ID, new_parent_id);
    426   }
    427   return true;
    428 }
    429 
    430 bool Directory::unrecoverable_error_set(const BaseTransaction* trans) const {
    431   DCHECK(trans != NULL);
    432   return unrecoverable_error_set_;
    433 }
    434 
    435 void Directory::ClearDirtyMetahandles() {
    436   kernel_->transaction_mutex.AssertAcquired();
    437   kernel_->dirty_metahandles.clear();
    438 }
    439 
    440 bool Directory::SafeToPurgeFromMemory(WriteTransaction* trans,
    441                                       const EntryKernel* const entry) const {
    442   bool safe = entry->ref(IS_DEL) && !entry->is_dirty() &&
    443       !entry->ref(SYNCING) && !entry->ref(IS_UNAPPLIED_UPDATE) &&
    444       !entry->ref(IS_UNSYNCED);
    445 
    446   if (safe) {
    447     int64 handle = entry->ref(META_HANDLE);
    448     const ModelType type = entry->GetServerModelType();
    449     if (!SyncAssert(kernel_->dirty_metahandles.count(handle) == 0U,
    450                     FROM_HERE,
    451                     "Dirty metahandles should be empty", trans))
    452       return false;
    453     // TODO(tim): Bug 49278.
    454     if (!SyncAssert(!kernel_->unsynced_metahandles.count(handle),
    455                     FROM_HERE,
    456                     "Unsynced handles should be empty",
    457                     trans))
    458       return false;
    459     if (!SyncAssert(!kernel_->unapplied_update_metahandles[type].count(handle),
    460                     FROM_HERE,
    461                     "Unapplied metahandles should be empty",
    462                     trans))
    463       return false;
    464   }
    465 
    466   return safe;
    467 }
    468 
    469 void Directory::TakeSnapshotForSaveChanges(SaveChangesSnapshot* snapshot) {
    470   ReadTransaction trans(FROM_HERE, this);
    471   ScopedKernelLock lock(this);
    472 
    473   // If there is an unrecoverable error then just bail out.
    474   if (unrecoverable_error_set(&trans))
    475     return;
    476 
    477   // Deep copy dirty entries from kernel_->metahandles_index into snapshot and
    478   // clear dirty flags.
    479   for (MetahandleSet::const_iterator i = kernel_->dirty_metahandles.begin();
    480        i != kernel_->dirty_metahandles.end(); ++i) {
    481     EntryKernel* entry = GetEntryByHandle(*i, &lock);
    482     if (!entry)
    483       continue;
    484     // Skip over false positives; it happens relatively infrequently.
    485     if (!entry->is_dirty())
    486       continue;
    487     snapshot->dirty_metas.insert(snapshot->dirty_metas.end(),
    488                                  new EntryKernel(*entry));
    489     DCHECK_EQ(1U, kernel_->dirty_metahandles.count(*i));
    490     // We don't bother removing from the index here as we blow the entire thing
    491     // in a moment, and it unnecessarily complicates iteration.
    492     entry->clear_dirty(NULL);
    493   }
    494   ClearDirtyMetahandles();
    495 
    496   // Set purged handles.
    497   DCHECK(snapshot->metahandles_to_purge.empty());
    498   snapshot->metahandles_to_purge.swap(kernel_->metahandles_to_purge);
    499 
    500   // Fill kernel_info_status and kernel_info.
    501   snapshot->kernel_info = kernel_->persisted_info;
    502   // To avoid duplicates when the process crashes, we record the next_id to be
    503   // greater magnitude than could possibly be reached before the next save
    504   // changes.  In other words, it's effectively impossible for the user to
    505   // generate 65536 new bookmarks in 3 seconds.
    506   snapshot->kernel_info.next_id -= 65536;
    507   snapshot->kernel_info_status = kernel_->info_status;
    508   // This one we reset on failure.
    509   kernel_->info_status = KERNEL_SHARE_INFO_VALID;
    510 
    511   delete_journal_->TakeSnapshotAndClear(
    512       &trans, &snapshot->delete_journals, &snapshot->delete_journals_to_purge);
    513 }
    514 
    515 bool Directory::SaveChanges() {
    516   bool success = false;
    517 
    518   base::AutoLock scoped_lock(kernel_->save_changes_mutex);
    519 
    520   // Snapshot and save.
    521   SaveChangesSnapshot snapshot;
    522   TakeSnapshotForSaveChanges(&snapshot);
    523   success = store_->SaveChanges(snapshot);
    524 
    525   // Handle success or failure.
    526   if (success)
    527     success = VacuumAfterSaveChanges(snapshot);
    528   else
    529     HandleSaveChangesFailure(snapshot);
    530   return success;
    531 }
    532 
    533 bool Directory::VacuumAfterSaveChanges(const SaveChangesSnapshot& snapshot) {
    534   if (snapshot.dirty_metas.empty())
    535     return true;
    536 
    537   // Need a write transaction as we are about to permanently purge entries.
    538   WriteTransaction trans(FROM_HERE, VACUUM_AFTER_SAVE, this);
    539   ScopedKernelLock lock(this);
    540   // Now drop everything we can out of memory.
    541   for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
    542        i != snapshot.dirty_metas.end(); ++i) {
    543     MetahandlesMap::iterator found =
    544         kernel_->metahandles_map.find((*i)->ref(META_HANDLE));
    545     EntryKernel* entry = (found == kernel_->metahandles_map.end() ?
    546                           NULL : found->second);
    547     if (entry && SafeToPurgeFromMemory(&trans, entry)) {
    548       // We now drop deleted metahandles that are up to date on both the client
    549       // and the server.
    550       size_t num_erased = 0;
    551       num_erased = kernel_->metahandles_map.erase(entry->ref(META_HANDLE));
    552       DCHECK_EQ(1u, num_erased);
    553       num_erased = kernel_->ids_map.erase(entry->ref(ID).value());
    554       DCHECK_EQ(1u, num_erased);
    555       if (!entry->ref(UNIQUE_SERVER_TAG).empty()) {
    556         num_erased =
    557             kernel_->server_tags_map.erase(entry->ref(UNIQUE_SERVER_TAG));
    558         DCHECK_EQ(1u, num_erased);
    559       }
    560       if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) {
    561         num_erased =
    562             kernel_->client_tags_map.erase(entry->ref(UNIQUE_CLIENT_TAG));
    563         DCHECK_EQ(1u, num_erased);
    564       }
    565       if (!SyncAssert(!kernel_->parent_child_index.Contains(entry),
    566                       FROM_HERE,
    567                       "Deleted entry still present",
    568                       (&trans)))
    569         return false;
    570       delete entry;
    571     }
    572     if (trans.unrecoverable_error_set())
    573       return false;
    574   }
    575   return true;
    576 }
    577 
    578 void Directory::UnapplyEntry(EntryKernel* entry) {
    579   int64 handle = entry->ref(META_HANDLE);
    580   ModelType server_type = GetModelTypeFromSpecifics(
    581       entry->ref(SERVER_SPECIFICS));
    582 
    583   // Clear enough so that on the next sync cycle all local data will
    584   // be overwritten.
    585   // Note: do not modify the root node in order to preserve the
    586   // initial sync ended bit for this type (else on the next restart
    587   // this type will be treated as disabled and therefore fully purged).
    588   if (IsRealDataType(server_type) &&
    589       ModelTypeToRootTag(server_type) == entry->ref(UNIQUE_SERVER_TAG)) {
    590     return;
    591   }
    592 
    593   // Set the unapplied bit if this item has server data.
    594   if (IsRealDataType(server_type) && !entry->ref(IS_UNAPPLIED_UPDATE)) {
    595     entry->put(IS_UNAPPLIED_UPDATE, true);
    596     kernel_->unapplied_update_metahandles[server_type].insert(handle);
    597     entry->mark_dirty(&kernel_->dirty_metahandles);
    598   }
    599 
    600   // Unset the unsynced bit.
    601   if (entry->ref(IS_UNSYNCED)) {
    602     kernel_->unsynced_metahandles.erase(handle);
    603     entry->put(IS_UNSYNCED, false);
    604     entry->mark_dirty(&kernel_->dirty_metahandles);
    605   }
    606 
    607   // Mark the item as locally deleted. No deleted items are allowed in the
    608   // parent child index.
    609   if (!entry->ref(IS_DEL)) {
    610     kernel_->parent_child_index.Remove(entry);
    611     entry->put(IS_DEL, true);
    612     entry->mark_dirty(&kernel_->dirty_metahandles);
    613   }
    614 
    615   // Set the version to the "newly created" version.
    616   if (entry->ref(BASE_VERSION) != CHANGES_VERSION) {
    617     entry->put(BASE_VERSION, CHANGES_VERSION);
    618     entry->mark_dirty(&kernel_->dirty_metahandles);
    619   }
    620 
    621   // At this point locally created items that aren't synced will become locally
    622   // deleted items, and purged on the next snapshot. All other items will match
    623   // the state they would have had if they were just created via a server
    624   // update. See MutableEntry::MutableEntry(.., CreateNewUpdateItem, ..).
    625 }
    626 
    627 void Directory::DeleteEntry(bool save_to_journal,
    628                             EntryKernel* entry,
    629                             EntryKernelSet* entries_to_journal) {
    630   int64 handle = entry->ref(META_HANDLE);
    631   ModelType server_type = GetModelTypeFromSpecifics(
    632       entry->ref(SERVER_SPECIFICS));
    633 
    634   kernel_->metahandles_to_purge.insert(handle);
    635 
    636   size_t num_erased = 0;
    637   num_erased = kernel_->metahandles_map.erase(entry->ref(META_HANDLE));
    638   DCHECK_EQ(1u, num_erased);
    639   num_erased = kernel_->ids_map.erase(entry->ref(ID).value());
    640   DCHECK_EQ(1u, num_erased);
    641   num_erased = kernel_->unsynced_metahandles.erase(handle);
    642   DCHECK_EQ(entry->ref(IS_UNSYNCED), num_erased > 0);
    643   num_erased =
    644       kernel_->unapplied_update_metahandles[server_type].erase(handle);
    645   DCHECK_EQ(entry->ref(IS_UNAPPLIED_UPDATE), num_erased > 0);
    646   if (kernel_->parent_child_index.Contains(entry))
    647     kernel_->parent_child_index.Remove(entry);
    648 
    649   if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) {
    650     num_erased =
    651         kernel_->client_tags_map.erase(entry->ref(UNIQUE_CLIENT_TAG));
    652     DCHECK_EQ(1u, num_erased);
    653   }
    654   if (!entry->ref(UNIQUE_SERVER_TAG).empty()) {
    655     num_erased =
    656         kernel_->server_tags_map.erase(entry->ref(UNIQUE_SERVER_TAG));
    657     DCHECK_EQ(1u, num_erased);
    658   }
    659 
    660   if (save_to_journal) {
    661     entries_to_journal->insert(entry);
    662   } else {
    663     delete entry;
    664   }
    665 }
    666 
    667 bool Directory::PurgeEntriesWithTypeIn(ModelTypeSet disabled_types,
    668                                        ModelTypeSet types_to_journal,
    669                                        ModelTypeSet types_to_unapply) {
    670   disabled_types.RemoveAll(ProxyTypes());
    671 
    672   if (disabled_types.Empty())
    673     return true;
    674 
    675   {
    676     WriteTransaction trans(FROM_HERE, PURGE_ENTRIES, this);
    677 
    678     EntryKernelSet entries_to_journal;
    679     STLElementDeleter<EntryKernelSet> journal_deleter(&entries_to_journal);
    680 
    681     {
    682       ScopedKernelLock lock(this);
    683 
    684       // We iterate in two passes to avoid a bug in STLport (which is used in
    685       // the Android build).  There are some versions of that library where a
    686       // hash_map's iterators can be invalidated when an item is erased from the
    687       // hash_map.
    688       // See http://sourceforge.net/p/stlport/bugs/239/.
    689 
    690       std::set<EntryKernel*> to_purge;
    691       for (MetahandlesMap::iterator it = kernel_->metahandles_map.begin();
    692            it != kernel_->metahandles_map.end(); ++it) {
    693         const sync_pb::EntitySpecifics& local_specifics =
    694             it->second->ref(SPECIFICS);
    695         const sync_pb::EntitySpecifics& server_specifics =
    696             it->second->ref(SERVER_SPECIFICS);
    697         ModelType local_type = GetModelTypeFromSpecifics(local_specifics);
    698         ModelType server_type = GetModelTypeFromSpecifics(server_specifics);
    699 
    700         if ((IsRealDataType(local_type) && disabled_types.Has(local_type)) ||
    701             (IsRealDataType(server_type) && disabled_types.Has(server_type))) {
    702           to_purge.insert(it->second);
    703         }
    704       }
    705 
    706       for (std::set<EntryKernel*>::iterator it = to_purge.begin();
    707            it != to_purge.end(); ++it) {
    708         EntryKernel* entry = *it;
    709 
    710         const sync_pb::EntitySpecifics& local_specifics =
    711             (*it)->ref(SPECIFICS);
    712         const sync_pb::EntitySpecifics& server_specifics =
    713             (*it)->ref(SERVER_SPECIFICS);
    714         ModelType local_type = GetModelTypeFromSpecifics(local_specifics);
    715         ModelType server_type = GetModelTypeFromSpecifics(server_specifics);
    716 
    717         if (types_to_unapply.Has(local_type) ||
    718             types_to_unapply.Has(server_type)) {
    719           UnapplyEntry(entry);
    720         } else {
    721           bool save_to_journal =
    722               (types_to_journal.Has(local_type) ||
    723                types_to_journal.Has(server_type)) &&
    724               (delete_journal_->IsDeleteJournalEnabled(local_type) ||
    725                delete_journal_->IsDeleteJournalEnabled(server_type));
    726           DeleteEntry(save_to_journal, entry, &entries_to_journal);
    727         }
    728       }
    729 
    730       delete_journal_->AddJournalBatch(&trans, entries_to_journal);
    731 
    732       // Ensure meta tracking for these data types reflects the purged state.
    733       for (ModelTypeSet::Iterator it = disabled_types.First();
    734            it.Good(); it.Inc()) {
    735         kernel_->persisted_info.transaction_version[it.Get()] = 0;
    736 
    737         // Don't discard progress markers for unapplied types.
    738         if (!types_to_unapply.Has(it.Get()))
    739           kernel_->persisted_info.reset_download_progress(it.Get());
    740       }
    741     }
    742   }
    743   return true;
    744 }
    745 
    746 void Directory::HandleSaveChangesFailure(const SaveChangesSnapshot& snapshot) {
    747   WriteTransaction trans(FROM_HERE, HANDLE_SAVE_FAILURE, this);
    748   ScopedKernelLock lock(this);
    749   kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
    750 
    751   // Because we optimistically cleared the dirty bit on the real entries when
    752   // taking the snapshot, we must restore it on failure.  Not doing this could
    753   // cause lost data, if no other changes are made to the in-memory entries
    754   // that would cause the dirty bit to get set again. Setting the bit ensures
    755   // that SaveChanges will at least try again later.
    756   for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
    757        i != snapshot.dirty_metas.end(); ++i) {
    758     MetahandlesMap::iterator found =
    759         kernel_->metahandles_map.find((*i)->ref(META_HANDLE));
    760     if (found != kernel_->metahandles_map.end()) {
    761       found->second->mark_dirty(&kernel_->dirty_metahandles);
    762     }
    763   }
    764 
    765   kernel_->metahandles_to_purge.insert(snapshot.metahandles_to_purge.begin(),
    766                                        snapshot.metahandles_to_purge.end());
    767 
    768   // Restore delete journals.
    769   delete_journal_->AddJournalBatch(&trans, snapshot.delete_journals);
    770   delete_journal_->PurgeDeleteJournals(&trans,
    771                                        snapshot.delete_journals_to_purge);
    772 }
    773 
    774 void Directory::GetDownloadProgress(
    775     ModelType model_type,
    776     sync_pb::DataTypeProgressMarker* value_out) const {
    777   ScopedKernelLock lock(this);
    778   return value_out->CopyFrom(
    779       kernel_->persisted_info.download_progress[model_type]);
    780 }
    781 
    782 void Directory::GetDownloadProgressAsString(
    783     ModelType model_type,
    784     std::string* value_out) const {
    785   ScopedKernelLock lock(this);
    786   kernel_->persisted_info.download_progress[model_type].SerializeToString(
    787       value_out);
    788 }
    789 
    790 size_t Directory::GetEntriesCount() const {
    791   ScopedKernelLock lock(this);
    792   return kernel_->metahandles_map.size();
    793 }
    794 
    795 void Directory::SetDownloadProgress(
    796     ModelType model_type,
    797     const sync_pb::DataTypeProgressMarker& new_progress) {
    798   ScopedKernelLock lock(this);
    799   kernel_->persisted_info.download_progress[model_type].CopyFrom(new_progress);
    800   kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
    801 }
    802 
    803 int64 Directory::GetTransactionVersion(ModelType type) const {
    804   kernel_->transaction_mutex.AssertAcquired();
    805   return kernel_->persisted_info.transaction_version[type];
    806 }
    807 
    808 void Directory::IncrementTransactionVersion(ModelType type) {
    809   kernel_->transaction_mutex.AssertAcquired();
    810   kernel_->persisted_info.transaction_version[type]++;
    811 }
    812 
    813 ModelTypeSet Directory::InitialSyncEndedTypes() {
    814   syncable::ReadTransaction trans(FROM_HERE, this);
    815   ModelTypeSet protocol_types = ProtocolTypes();
    816   ModelTypeSet initial_sync_ended_types;
    817   for (ModelTypeSet::Iterator i = protocol_types.First(); i.Good(); i.Inc()) {
    818     if (InitialSyncEndedForType(&trans, i.Get())) {
    819       initial_sync_ended_types.Put(i.Get());
    820     }
    821   }
    822   return initial_sync_ended_types;
    823 }
    824 
    825 bool Directory::InitialSyncEndedForType(ModelType type) {
    826   syncable::ReadTransaction trans(FROM_HERE, this);
    827   return InitialSyncEndedForType(&trans, type);
    828 }
    829 
    830 bool Directory::InitialSyncEndedForType(
    831     BaseTransaction* trans, ModelType type) {
    832   // True iff the type's root node has been received and applied.
    833   syncable::Entry entry(trans,
    834                         syncable::GET_BY_SERVER_TAG,
    835                         ModelTypeToRootTag(type));
    836   return entry.good() && entry.GetBaseVersion() != CHANGES_VERSION;
    837 }
    838 
    839 string Directory::store_birthday() const {
    840   ScopedKernelLock lock(this);
    841   return kernel_->persisted_info.store_birthday;
    842 }
    843 
    844 void Directory::set_store_birthday(const string& store_birthday) {
    845   ScopedKernelLock lock(this);
    846   if (kernel_->persisted_info.store_birthday == store_birthday)
    847     return;
    848   kernel_->persisted_info.store_birthday = store_birthday;
    849   kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
    850 }
    851 
    852 string Directory::bag_of_chips() const {
    853   ScopedKernelLock lock(this);
    854   return kernel_->persisted_info.bag_of_chips;
    855 }
    856 
    857 void Directory::set_bag_of_chips(const string& bag_of_chips) {
    858   ScopedKernelLock lock(this);
    859   if (kernel_->persisted_info.bag_of_chips == bag_of_chips)
    860     return;
    861   kernel_->persisted_info.bag_of_chips = bag_of_chips;
    862   kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
    863 }
    864 
    865 
    866 string Directory::cache_guid() const {
    867   // No need to lock since nothing ever writes to it after load.
    868   return kernel_->cache_guid;
    869 }
    870 
    871 NigoriHandler* Directory::GetNigoriHandler() {
    872   return nigori_handler_;
    873 }
    874 
    875 Cryptographer* Directory::GetCryptographer(const BaseTransaction* trans) {
    876   DCHECK_EQ(this, trans->directory());
    877   return cryptographer_;
    878 }
    879 
    880 void Directory::GetAllMetaHandles(BaseTransaction* trans,
    881                                   MetahandleSet* result) {
    882   result->clear();
    883   ScopedKernelLock lock(this);
    884   for (MetahandlesMap::iterator i = kernel_->metahandles_map.begin();
    885        i != kernel_->metahandles_map.end(); ++i) {
    886     result->insert(i->first);
    887   }
    888 }
    889 
    890 void Directory::GetAllEntryKernels(BaseTransaction* trans,
    891                                    std::vector<const EntryKernel*>* result) {
    892   result->clear();
    893   ScopedKernelLock lock(this);
    894   for (MetahandlesMap::iterator i = kernel_->metahandles_map.begin();
    895        i != kernel_->metahandles_map.end(); ++i) {
    896     result->push_back(i->second);
    897   }
    898 }
    899 
    900 void Directory::GetUnsyncedMetaHandles(BaseTransaction* trans,
    901                                        Metahandles* result) {
    902   result->clear();
    903   ScopedKernelLock lock(this);
    904   copy(kernel_->unsynced_metahandles.begin(),
    905        kernel_->unsynced_metahandles.end(), back_inserter(*result));
    906 }
    907 
    908 int64 Directory::unsynced_entity_count() const {
    909   ScopedKernelLock lock(this);
    910   return kernel_->unsynced_metahandles.size();
    911 }
    912 
    913 bool Directory::TypeHasUnappliedUpdates(ModelType type) {
    914   ScopedKernelLock lock(this);
    915   return !kernel_->unapplied_update_metahandles[type].empty();
    916 }
    917 
    918 void Directory::GetUnappliedUpdateMetaHandles(
    919     BaseTransaction* trans,
    920     FullModelTypeSet server_types,
    921     std::vector<int64>* result) {
    922   result->clear();
    923   ScopedKernelLock lock(this);
    924   for (int i = UNSPECIFIED; i < MODEL_TYPE_COUNT; ++i) {
    925     const ModelType type = ModelTypeFromInt(i);
    926     if (server_types.Has(type)) {
    927       std::copy(kernel_->unapplied_update_metahandles[type].begin(),
    928                 kernel_->unapplied_update_metahandles[type].end(),
    929                 back_inserter(*result));
    930     }
    931   }
    932 }
    933 
    934 void Directory::CollectMetaHandleCounts(
    935     std::vector<int>* num_entries_by_type,
    936     std::vector<int>* num_to_delete_entries_by_type) {
    937   syncable::ReadTransaction trans(FROM_HERE, this);
    938   ScopedKernelLock lock(this);
    939 
    940   for (MetahandlesMap::iterator it = kernel_->metahandles_map.begin();
    941        it != kernel_->metahandles_map.end(); ++it) {
    942     EntryKernel* entry = it->second;
    943     const ModelType type = GetModelTypeFromSpecifics(entry->ref(SPECIFICS));
    944     (*num_entries_by_type)[type]++;
    945     if (entry->ref(IS_DEL))
    946       (*num_to_delete_entries_by_type)[type]++;
    947   }
    948 }
    949 
    950 bool Directory::CheckInvariantsOnTransactionClose(
    951     syncable::BaseTransaction* trans,
    952     const MetahandleSet& modified_handles) {
    953   // NOTE: The trans may be in the process of being destructed.  Be careful if
    954   // you wish to call any of its virtual methods.
    955   switch (invariant_check_level_) {
    956     case FULL_DB_VERIFICATION: {
    957       MetahandleSet all_handles;
    958       GetAllMetaHandles(trans, &all_handles);
    959       return CheckTreeInvariants(trans, all_handles);
    960     }
    961     case VERIFY_CHANGES: {
    962       return CheckTreeInvariants(trans, modified_handles);
    963     }
    964     case OFF: {
    965       return true;
    966     }
    967   }
    968   NOTREACHED();
    969   return false;
    970 }
    971 
    972 bool Directory::FullyCheckTreeInvariants(syncable::BaseTransaction* trans) {
    973   MetahandleSet handles;
    974   GetAllMetaHandles(trans, &handles);
    975   return CheckTreeInvariants(trans, handles);
    976 }
    977 
    978 bool Directory::CheckTreeInvariants(syncable::BaseTransaction* trans,
    979                                     const MetahandleSet& handles) {
    980   MetahandleSet::const_iterator i;
    981   for (i = handles.begin() ; i != handles.end() ; ++i) {
    982     int64 metahandle = *i;
    983     Entry e(trans, GET_BY_HANDLE, metahandle);
    984     if (!SyncAssert(e.good(), FROM_HERE, "Entry is bad", trans))
    985       return false;
    986     syncable::Id id = e.GetId();
    987     syncable::Id parentid = e.GetParentId();
    988 
    989     if (id.IsRoot()) {
    990       if (!SyncAssert(e.GetIsDir(), FROM_HERE,
    991                       "Entry should be a directory",
    992                       trans))
    993         return false;
    994       if (!SyncAssert(parentid.IsRoot(), FROM_HERE,
    995                       "Entry should be root",
    996                       trans))
    997          return false;
    998       if (!SyncAssert(!e.GetIsUnsynced(), FROM_HERE,
    999                       "Entry should be sycned",
   1000                       trans))
   1001          return false;
   1002       continue;
   1003     }
   1004 
   1005     if (!e.GetIsDel()) {
   1006       if (!SyncAssert(id != parentid, FROM_HERE,
   1007                       "Id should be different from parent id.",
   1008                       trans))
   1009          return false;
   1010       if (!SyncAssert(!e.GetNonUniqueName().empty(), FROM_HERE,
   1011                       "Non unique name should not be empty.",
   1012                       trans))
   1013         return false;
   1014       int safety_count = handles.size() + 1;
   1015       while (!parentid.IsRoot()) {
   1016         Entry parent(trans, GET_BY_ID, parentid);
   1017         if (!SyncAssert(parent.good(), FROM_HERE,
   1018                         "Parent entry is not valid.",
   1019                         trans))
   1020           return false;
   1021         if (handles.end() == handles.find(parent.GetMetahandle()))
   1022             break; // Skip further checking if parent was unmodified.
   1023         if (!SyncAssert(parent.GetIsDir(), FROM_HERE,
   1024                         "Parent should be a directory",
   1025                         trans))
   1026           return false;
   1027         if (!SyncAssert(!parent.GetIsDel(), FROM_HERE,
   1028                         "Parent should not have been marked for deletion.",
   1029                         trans))
   1030           return false;
   1031         if (!SyncAssert(handles.end() != handles.find(parent.GetMetahandle()),
   1032                         FROM_HERE,
   1033                         "Parent should be in the index.",
   1034                         trans))
   1035           return false;
   1036         parentid = parent.GetParentId();
   1037         if (!SyncAssert(--safety_count > 0, FROM_HERE,
   1038                         "Count should be greater than zero.",
   1039                         trans))
   1040           return false;
   1041       }
   1042     }
   1043     int64 base_version = e.GetBaseVersion();
   1044     int64 server_version = e.GetServerVersion();
   1045     bool using_unique_client_tag = !e.GetUniqueClientTag().empty();
   1046     if (CHANGES_VERSION == base_version || 0 == base_version) {
   1047       if (e.GetIsUnappliedUpdate()) {
   1048         // Must be a new item, or a de-duplicated unique client tag
   1049         // that was created both locally and remotely.
   1050         if (!using_unique_client_tag) {
   1051           if (!SyncAssert(e.GetIsDel(), FROM_HERE,
   1052                           "The entry should not have been deleted.",
   1053                           trans))
   1054             return false;
   1055         }
   1056         // It came from the server, so it must have a server ID.
   1057         if (!SyncAssert(id.ServerKnows(), FROM_HERE,
   1058                         "The id should be from a server.",
   1059                         trans))
   1060           return false;
   1061       } else {
   1062         if (e.GetIsDir()) {
   1063           // TODO(chron): Implement this mode if clients ever need it.
   1064           // For now, you can't combine a client tag and a directory.
   1065           if (!SyncAssert(!using_unique_client_tag, FROM_HERE,
   1066                           "Directory cannot have a client tag.",
   1067                           trans))
   1068             return false;
   1069         }
   1070         // Should be an uncomitted item, or a successfully deleted one.
   1071         if (!e.GetIsDel()) {
   1072           if (!SyncAssert(e.GetIsUnsynced(), FROM_HERE,
   1073                           "The item should be unsynced.",
   1074                           trans))
   1075             return false;
   1076         }
   1077         // If the next check failed, it would imply that an item exists
   1078         // on the server, isn't waiting for application locally, but either
   1079         // is an unsynced create or a sucessful delete in the local copy.
   1080         // Either way, that's a mismatch.
   1081         if (!SyncAssert(0 == server_version, FROM_HERE,
   1082                         "Server version should be zero.",
   1083                         trans))
   1084           return false;
   1085         // Items that aren't using the unique client tag should have a zero
   1086         // base version only if they have a local ID.  Items with unique client
   1087         // tags are allowed to use the zero base version for undeletion and
   1088         // de-duplication; the unique client tag trumps the server ID.
   1089         if (!using_unique_client_tag) {
   1090           if (!SyncAssert(!id.ServerKnows(), FROM_HERE,
   1091                           "Should be a client only id.",
   1092                           trans))
   1093             return false;
   1094         }
   1095       }
   1096     } else {
   1097       if (!SyncAssert(id.ServerKnows(),
   1098                       FROM_HERE,
   1099                       "Should be a server id.",
   1100                       trans))
   1101         return false;
   1102     }
   1103     // Server-unknown items that are locally deleted should not be sent up to
   1104     // the server.  They must be !IS_UNSYNCED.
   1105     if (!SyncAssert(!(!id.ServerKnows() && e.GetIsDel() && e.GetIsUnsynced()),
   1106                     FROM_HERE,
   1107                     "Locally deleted item must not be unsynced.",
   1108                     trans)) {
   1109       return false;
   1110     }
   1111   }
   1112   return true;
   1113 }
   1114 
   1115 void Directory::SetInvariantCheckLevel(InvariantCheckLevel check_level) {
   1116   invariant_check_level_ = check_level;
   1117 }
   1118 
   1119 int64 Directory::NextMetahandle() {
   1120   ScopedKernelLock lock(this);
   1121   int64 metahandle = (kernel_->next_metahandle)++;
   1122   return metahandle;
   1123 }
   1124 
   1125 // Always returns a client ID that is the string representation of a negative
   1126 // number.
   1127 Id Directory::NextId() {
   1128   int64 result;
   1129   {
   1130     ScopedKernelLock lock(this);
   1131     result = (kernel_->persisted_info.next_id)--;
   1132     kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
   1133   }
   1134   DCHECK_LT(result, 0);
   1135   return Id::CreateFromClientString(base::Int64ToString(result));
   1136 }
   1137 
   1138 bool Directory::HasChildren(BaseTransaction* trans, const Id& id) {
   1139   ScopedKernelLock lock(this);
   1140   return kernel_->parent_child_index.GetChildren(id) != NULL;
   1141 }
   1142 
   1143 Id Directory::GetFirstChildId(BaseTransaction* trans,
   1144                               const EntryKernel* parent) {
   1145   DCHECK(parent);
   1146   DCHECK(parent->ref(IS_DIR));
   1147 
   1148   ScopedKernelLock lock(this);
   1149   const OrderedChildSet* children =
   1150       kernel_->parent_child_index.GetChildren(parent->ref(ID));
   1151 
   1152   // We're expected to return root if there are no children.
   1153   if (!children)
   1154     return Id();
   1155 
   1156   return (*children->begin())->ref(ID);
   1157 }
   1158 
   1159 syncable::Id Directory::GetPredecessorId(EntryKernel* e) {
   1160   ScopedKernelLock lock(this);
   1161 
   1162   DCHECK(ParentChildIndex::ShouldInclude(e));
   1163   const OrderedChildSet* children =
   1164       kernel_->parent_child_index.GetChildren(e->ref(PARENT_ID));
   1165   DCHECK(children && !children->empty());
   1166   OrderedChildSet::const_iterator i = children->find(e);
   1167   DCHECK(i != children->end());
   1168 
   1169   if (i == children->begin()) {
   1170     return Id();
   1171   } else {
   1172     i--;
   1173     return (*i)->ref(ID);
   1174   }
   1175 }
   1176 
   1177 syncable::Id Directory::GetSuccessorId(EntryKernel* e) {
   1178   ScopedKernelLock lock(this);
   1179 
   1180   DCHECK(ParentChildIndex::ShouldInclude(e));
   1181   const OrderedChildSet* children =
   1182       kernel_->parent_child_index.GetChildren(e->ref(PARENT_ID));
   1183   DCHECK(children && !children->empty());
   1184   OrderedChildSet::const_iterator i = children->find(e);
   1185   DCHECK(i != children->end());
   1186 
   1187   i++;
   1188   if (i == children->end()) {
   1189     return Id();
   1190   } else {
   1191     return (*i)->ref(ID);
   1192   }
   1193 }
   1194 
   1195 // TODO(rlarocque): Remove all support for placing ShouldMaintainPosition()
   1196 // items as siblings of items that do not maintain postions.  It is required
   1197 // only for tests.  See crbug.com/178282.
   1198 void Directory::PutPredecessor(EntryKernel* e, EntryKernel* predecessor) {
   1199   DCHECK(!e->ref(IS_DEL));
   1200   if (!e->ShouldMaintainPosition()) {
   1201     DCHECK(!e->ref(UNIQUE_POSITION).IsValid());
   1202     return;
   1203   }
   1204   std::string suffix = e->ref(UNIQUE_BOOKMARK_TAG);
   1205   DCHECK(!suffix.empty());
   1206 
   1207   // Remove our item from the ParentChildIndex and remember to re-add it later.
   1208   ScopedKernelLock lock(this);
   1209   ScopedParentChildIndexUpdater updater(lock, e, &kernel_->parent_child_index);
   1210 
   1211   // Note: The ScopedParentChildIndexUpdater will update this set for us as we
   1212   // leave this function.
   1213   const OrderedChildSet* siblings =
   1214       kernel_->parent_child_index.GetChildren(e->ref(PARENT_ID));
   1215 
   1216   if (!siblings) {
   1217     // This parent currently has no other children.
   1218     DCHECK(predecessor->ref(ID).IsRoot());
   1219     UniquePosition pos = UniquePosition::InitialPosition(suffix);
   1220     e->put(UNIQUE_POSITION, pos);
   1221     return;
   1222   }
   1223 
   1224   if (predecessor->ref(ID).IsRoot()) {
   1225     // We have at least one sibling, and we're inserting to the left of them.
   1226     UniquePosition successor_pos = (*siblings->begin())->ref(UNIQUE_POSITION);
   1227 
   1228     UniquePosition pos;
   1229     if (!successor_pos.IsValid()) {
   1230       // If all our successors are of non-positionable types, just create an
   1231       // initial position.  We arbitrarily choose to sort invalid positions to
   1232       // the right of the valid positions.
   1233       //
   1234       // We really shouldn't need to support this.  See TODO above.
   1235       pos = UniquePosition::InitialPosition(suffix);
   1236     } else  {
   1237       DCHECK(!siblings->empty());
   1238       pos = UniquePosition::Before(successor_pos, suffix);
   1239     }
   1240 
   1241     e->put(UNIQUE_POSITION, pos);
   1242     return;
   1243   }
   1244 
   1245   // We can't support placing an item after an invalid position.  Fortunately,
   1246   // the tests don't exercise this particular case.  We should not support
   1247   // siblings with invalid positions at all.  See TODO above.
   1248   DCHECK(predecessor->ref(UNIQUE_POSITION).IsValid());
   1249 
   1250   OrderedChildSet::const_iterator neighbour = siblings->find(predecessor);
   1251   DCHECK(neighbour != siblings->end());
   1252 
   1253   ++neighbour;
   1254   if (neighbour == siblings->end()) {
   1255     // Inserting at the end of the list.
   1256     UniquePosition pos = UniquePosition::After(
   1257         predecessor->ref(UNIQUE_POSITION),
   1258         suffix);
   1259     e->put(UNIQUE_POSITION, pos);
   1260     return;
   1261   }
   1262 
   1263   EntryKernel* successor = *neighbour;
   1264 
   1265   // Another mixed valid and invalid position case.  This one could be supported
   1266   // in theory, but we're trying to deprecate support for siblings with and
   1267   // without valid positions.  See TODO above.
   1268   DCHECK(successor->ref(UNIQUE_POSITION).IsValid());
   1269 
   1270   // Finally, the normal case: inserting between two elements.
   1271   UniquePosition pos = UniquePosition::Between(
   1272       predecessor->ref(UNIQUE_POSITION),
   1273       successor->ref(UNIQUE_POSITION),
   1274       suffix);
   1275   e->put(UNIQUE_POSITION, pos);
   1276   return;
   1277 }
   1278 
   1279 // TODO(rlarocque): Avoid this indirection.  Just return the set.
   1280 void Directory::AppendChildHandles(const ScopedKernelLock& lock,
   1281                                    const Id& parent_id,
   1282                                    Directory::Metahandles* result) {
   1283   const OrderedChildSet* children =
   1284       kernel_->parent_child_index.GetChildren(parent_id);
   1285   if (!children)
   1286     return;
   1287 
   1288   for (OrderedChildSet::const_iterator i = children->begin();
   1289        i != children->end(); ++i) {
   1290     DCHECK_EQ(parent_id, (*i)->ref(PARENT_ID));
   1291     result->push_back((*i)->ref(META_HANDLE));
   1292   }
   1293 }
   1294 
   1295 }  // namespace syncable
   1296 }  // namespace syncer
   1297