Home | History | Annotate | Download | only in syncable
      1 // Copyright 2013 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "sync/syncable/directory.h"
      6 
      7 #include <iterator>
      8 
      9 #include "base/base64.h"
     10 #include "base/debug/trace_event.h"
     11 #include "base/stl_util.h"
     12 #include "base/strings/string_number_conversions.h"
     13 #include "sync/internal_api/public/base/unique_position.h"
     14 #include "sync/internal_api/public/util/unrecoverable_error_handler.h"
     15 #include "sync/syncable/entry.h"
     16 #include "sync/syncable/entry_kernel.h"
     17 #include "sync/syncable/in_memory_directory_backing_store.h"
     18 #include "sync/syncable/on_disk_directory_backing_store.h"
     19 #include "sync/syncable/scoped_kernel_lock.h"
     20 #include "sync/syncable/scoped_parent_child_index_updater.h"
     21 #include "sync/syncable/syncable-inl.h"
     22 #include "sync/syncable/syncable_base_transaction.h"
     23 #include "sync/syncable/syncable_changes_version.h"
     24 #include "sync/syncable/syncable_read_transaction.h"
     25 #include "sync/syncable/syncable_util.h"
     26 #include "sync/syncable/syncable_write_transaction.h"
     27 
     28 using std::string;
     29 
     30 namespace syncer {
     31 namespace syncable {
     32 
     33 // static
     34 const base::FilePath::CharType Directory::kSyncDatabaseFilename[] =
     35     FILE_PATH_LITERAL("SyncData.sqlite3");
     36 
     37 Directory::PersistedKernelInfo::PersistedKernelInfo()
     38     : next_id(0) {
     39   ModelTypeSet protocol_types = ProtocolTypes();
     40   for (ModelTypeSet::Iterator iter = protocol_types.First(); iter.Good();
     41        iter.Inc()) {
     42     reset_download_progress(iter.Get());
     43     transaction_version[iter.Get()] = 0;
     44   }
     45 }
     46 
     47 Directory::PersistedKernelInfo::~PersistedKernelInfo() {}
     48 
     49 void Directory::PersistedKernelInfo::reset_download_progress(
     50     ModelType model_type) {
     51   download_progress[model_type].set_data_type_id(
     52       GetSpecificsFieldNumberFromModelType(model_type));
     53   // An empty-string token indicates no prior knowledge.
     54   download_progress[model_type].set_token(std::string());
     55 }
     56 
     57 Directory::SaveChangesSnapshot::SaveChangesSnapshot()
     58     : kernel_info_status(KERNEL_SHARE_INFO_INVALID) {
     59 }
     60 
     61 Directory::SaveChangesSnapshot::~SaveChangesSnapshot() {
     62   STLDeleteElements(&dirty_metas);
     63   STLDeleteElements(&delete_journals);
     64 }
     65 
     66 Directory::Kernel::Kernel(
     67     const std::string& name,
     68     const KernelLoadInfo& info, DirectoryChangeDelegate* delegate,
     69     const WeakHandle<TransactionObserver>& transaction_observer)
     70     : next_write_transaction_id(0),
     71       name(name),
     72       info_status(Directory::KERNEL_SHARE_INFO_VALID),
     73       persisted_info(info.kernel_info),
     74       cache_guid(info.cache_guid),
     75       next_metahandle(info.max_metahandle + 1),
     76       delegate(delegate),
     77       transaction_observer(transaction_observer) {
     78   DCHECK(delegate);
     79   DCHECK(transaction_observer.IsInitialized());
     80 }
     81 
     82 Directory::Kernel::~Kernel() {
     83   STLDeleteContainerPairSecondPointers(metahandles_map.begin(),
     84                                        metahandles_map.end());
     85 }
     86 
     87 Directory::Directory(
     88     DirectoryBackingStore* store,
     89     UnrecoverableErrorHandler* unrecoverable_error_handler,
     90     ReportUnrecoverableErrorFunction report_unrecoverable_error_function,
     91     NigoriHandler* nigori_handler,
     92     Cryptographer* cryptographer)
     93     : kernel_(NULL),
     94       store_(store),
     95       unrecoverable_error_handler_(unrecoverable_error_handler),
     96       report_unrecoverable_error_function_(
     97           report_unrecoverable_error_function),
     98       unrecoverable_error_set_(false),
     99       nigori_handler_(nigori_handler),
    100       cryptographer_(cryptographer),
    101       invariant_check_level_(VERIFY_CHANGES) {
    102 }
    103 
    104 Directory::~Directory() {
    105   Close();
    106 }
    107 
    108 DirOpenResult Directory::Open(
    109     const string& name,
    110     DirectoryChangeDelegate* delegate,
    111     const WeakHandle<TransactionObserver>& transaction_observer) {
    112   TRACE_EVENT0("sync", "SyncDatabaseOpen");
    113 
    114   const DirOpenResult result =
    115       OpenImpl(name, delegate, transaction_observer);
    116 
    117   if (OPENED != result)
    118     Close();
    119   return result;
    120 }
    121 
    122 void Directory::InitializeIndices(MetahandlesMap* handles_map) {
    123   kernel_->metahandles_map.swap(*handles_map);
    124   for (MetahandlesMap::const_iterator it = kernel_->metahandles_map.begin();
    125        it != kernel_->metahandles_map.end(); ++it) {
    126     EntryKernel* entry = it->second;
    127     if (ParentChildIndex::ShouldInclude(entry))
    128       kernel_->parent_child_index.Insert(entry);
    129     const int64 metahandle = entry->ref(META_HANDLE);
    130     if (entry->ref(IS_UNSYNCED))
    131       kernel_->unsynced_metahandles.insert(metahandle);
    132     if (entry->ref(IS_UNAPPLIED_UPDATE)) {
    133       const ModelType type = entry->GetServerModelType();
    134       kernel_->unapplied_update_metahandles[type].insert(metahandle);
    135     }
    136     if (!entry->ref(UNIQUE_SERVER_TAG).empty()) {
    137       DCHECK(kernel_->server_tags_map.find(entry->ref(UNIQUE_SERVER_TAG)) ==
    138              kernel_->server_tags_map.end())
    139           << "Unexpected duplicate use of client tag";
    140       kernel_->server_tags_map[entry->ref(UNIQUE_SERVER_TAG)] = entry;
    141     }
    142     if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) {
    143       DCHECK(kernel_->server_tags_map.find(entry->ref(UNIQUE_SERVER_TAG)) ==
    144              kernel_->server_tags_map.end())
    145           << "Unexpected duplicate use of server tag";
    146       kernel_->client_tags_map[entry->ref(UNIQUE_CLIENT_TAG)] = entry;
    147     }
    148     DCHECK(kernel_->ids_map.find(entry->ref(ID).value()) ==
    149            kernel_->ids_map.end()) << "Unexpected duplicate use of ID";
    150     kernel_->ids_map[entry->ref(ID).value()] = entry;
    151     DCHECK(!entry->is_dirty());
    152   }
    153 }
    154 
    155 DirOpenResult Directory::OpenImpl(
    156     const string& name,
    157     DirectoryChangeDelegate* delegate,
    158     const WeakHandle<TransactionObserver>&
    159         transaction_observer) {
    160   KernelLoadInfo info;
    161   // Temporary indices before kernel_ initialized in case Load fails. We 0(1)
    162   // swap these later.
    163   Directory::MetahandlesMap tmp_handles_map;
    164   JournalIndex delete_journals;
    165 
    166   DirOpenResult result =
    167       store_->Load(&tmp_handles_map, &delete_journals, &info);
    168   if (OPENED != result)
    169     return result;
    170 
    171   kernel_ = new Kernel(name, info, delegate, transaction_observer);
    172   delete_journal_.reset(new DeleteJournal(&delete_journals));
    173   InitializeIndices(&tmp_handles_map);
    174 
    175   // Write back the share info to reserve some space in 'next_id'.  This will
    176   // prevent local ID reuse in the case of an early crash.  See the comments in
    177   // TakeSnapshotForSaveChanges() or crbug.com/142987 for more information.
    178   kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
    179   if (!SaveChanges())
    180     return FAILED_INITIAL_WRITE;
    181 
    182   return OPENED;
    183 }
    184 
    185 DeleteJournal* Directory::delete_journal() {
    186   DCHECK(delete_journal_.get());
    187   return delete_journal_.get();
    188 }
    189 
    190 void Directory::Close() {
    191   store_.reset();
    192   if (kernel_) {
    193     delete kernel_;
    194     kernel_ = NULL;
    195   }
    196 }
    197 
    198 void Directory::OnUnrecoverableError(const BaseTransaction* trans,
    199                                      const tracked_objects::Location& location,
    200                                      const std::string & message) {
    201   DCHECK(trans != NULL);
    202   unrecoverable_error_set_ = true;
    203   unrecoverable_error_handler_->OnUnrecoverableError(location,
    204                                                      message);
    205 }
    206 
    207 EntryKernel* Directory::GetEntryById(const Id& id) {
    208   ScopedKernelLock lock(this);
    209   return GetEntryById(id, &lock);
    210 }
    211 
    212 EntryKernel* Directory::GetEntryById(const Id& id,
    213                                      ScopedKernelLock* const lock) {
    214   DCHECK(kernel_);
    215   // Find it in the in memory ID index.
    216   IdsMap::iterator id_found = kernel_->ids_map.find(id.value());
    217   if (id_found != kernel_->ids_map.end()) {
    218     return id_found->second;
    219   }
    220   return NULL;
    221 }
    222 
    223 EntryKernel* Directory::GetEntryByClientTag(const string& tag) {
    224   ScopedKernelLock lock(this);
    225   DCHECK(kernel_);
    226 
    227   TagsMap::iterator it = kernel_->client_tags_map.find(tag);
    228   if (it != kernel_->client_tags_map.end()) {
    229     return it->second;
    230   }
    231   return NULL;
    232 }
    233 
    234 EntryKernel* Directory::GetEntryByServerTag(const string& tag) {
    235   ScopedKernelLock lock(this);
    236   DCHECK(kernel_);
    237   TagsMap::iterator it = kernel_->server_tags_map.find(tag);
    238   if (it != kernel_->server_tags_map.end()) {
    239     return it->second;
    240   }
    241   return NULL;
    242 }
    243 
    244 EntryKernel* Directory::GetEntryByHandle(int64 metahandle) {
    245   ScopedKernelLock lock(this);
    246   return GetEntryByHandle(metahandle, &lock);
    247 }
    248 
    249 EntryKernel* Directory::GetEntryByHandle(int64 metahandle,
    250                                          ScopedKernelLock* lock) {
    251   // Look up in memory
    252   MetahandlesMap::iterator found =
    253       kernel_->metahandles_map.find(metahandle);
    254   if (found != kernel_->metahandles_map.end()) {
    255     // Found it in memory.  Easy.
    256     return found->second;
    257   }
    258   return NULL;
    259 }
    260 
    261 bool Directory::GetChildHandlesById(
    262     BaseTransaction* trans, const Id& parent_id,
    263     Directory::Metahandles* result) {
    264   if (!SyncAssert(this == trans->directory(), FROM_HERE,
    265                   "Directories don't match", trans))
    266     return false;
    267   result->clear();
    268 
    269   ScopedKernelLock lock(this);
    270   AppendChildHandles(lock, parent_id, result);
    271   return true;
    272 }
    273 
    274 bool Directory::GetChildHandlesByHandle(
    275     BaseTransaction* trans, int64 handle,
    276     Directory::Metahandles* result) {
    277   if (!SyncAssert(this == trans->directory(), FROM_HERE,
    278                   "Directories don't match", trans))
    279     return false;
    280 
    281   result->clear();
    282 
    283   ScopedKernelLock lock(this);
    284   EntryKernel* kernel = GetEntryByHandle(handle, &lock);
    285   if (!kernel)
    286     return true;
    287 
    288   AppendChildHandles(lock, kernel->ref(ID), result);
    289   return true;
    290 }
    291 
    292 int Directory::GetTotalNodeCount(
    293     BaseTransaction* trans,
    294     EntryKernel* kernel) const {
    295   if (!SyncAssert(this == trans->directory(), FROM_HERE,
    296                   "Directories don't match", trans))
    297     return false;
    298 
    299   int count = 1;
    300   std::deque<const OrderedChildSet*> child_sets;
    301 
    302   GetChildSetForKernel(trans, kernel, &child_sets);
    303   while (!child_sets.empty()) {
    304     const OrderedChildSet* set = child_sets.front();
    305     child_sets.pop_front();
    306     for (OrderedChildSet::const_iterator it = set->begin();
    307          it != set->end(); ++it) {
    308       count++;
    309       GetChildSetForKernel(trans, *it, &child_sets);
    310     }
    311   }
    312 
    313   return count;
    314 }
    315 
    316 void Directory::GetChildSetForKernel(
    317     BaseTransaction* trans,
    318     EntryKernel* kernel,
    319     std::deque<const OrderedChildSet*>* child_sets) const {
    320   if (!kernel->ref(IS_DIR))
    321     return;  // Not a directory => no children.
    322 
    323   const OrderedChildSet* descendants =
    324       kernel_->parent_child_index.GetChildren(kernel->ref(ID));
    325   if (!descendants)
    326     return;  // This directory has no children.
    327 
    328   // Add our children to the list of items to be traversed.
    329   child_sets->push_back(descendants);
    330 }
    331 
    332 int Directory::GetPositionIndex(
    333     BaseTransaction* trans,
    334     EntryKernel* kernel) const {
    335   const OrderedChildSet* siblings =
    336       kernel_->parent_child_index.GetChildren(kernel->ref(PARENT_ID));
    337 
    338   OrderedChildSet::const_iterator it = siblings->find(kernel);
    339   return std::distance(siblings->begin(), it);
    340 }
    341 
    342 EntryKernel* Directory::GetRootEntry() {
    343   return GetEntryById(Id());
    344 }
    345 
    346 bool Directory::InsertEntry(WriteTransaction* trans, EntryKernel* entry) {
    347   ScopedKernelLock lock(this);
    348   return InsertEntry(trans, entry, &lock);
    349 }
    350 
    351 bool Directory::InsertEntry(WriteTransaction* trans,
    352                             EntryKernel* entry,
    353                             ScopedKernelLock* lock) {
    354   DCHECK(NULL != lock);
    355   if (!SyncAssert(NULL != entry, FROM_HERE, "Entry is null", trans))
    356     return false;
    357 
    358   static const char error[] = "Entry already in memory index.";
    359 
    360   if (!SyncAssert(
    361           kernel_->metahandles_map.insert(
    362               std::make_pair(entry->ref(META_HANDLE), entry)).second,
    363           FROM_HERE,
    364           error,
    365           trans)) {
    366     return false;
    367   }
    368   if (!SyncAssert(
    369           kernel_->ids_map.insert(
    370               std::make_pair(entry->ref(ID).value(), entry)).second,
    371           FROM_HERE,
    372           error,
    373           trans)) {
    374     return false;
    375   }
    376   if (ParentChildIndex::ShouldInclude(entry)) {
    377     if (!SyncAssert(kernel_->parent_child_index.Insert(entry),
    378                     FROM_HERE,
    379                     error,
    380                     trans)) {
    381       return false;
    382     }
    383   }
    384 
    385   // Should NEVER be created with a client tag or server tag.
    386   if (!SyncAssert(entry->ref(UNIQUE_SERVER_TAG).empty(), FROM_HERE,
    387                   "Server tag should be empty", trans)) {
    388     return false;
    389   }
    390   if (!SyncAssert(entry->ref(UNIQUE_CLIENT_TAG).empty(), FROM_HERE,
    391                   "Client tag should be empty", trans))
    392     return false;
    393 
    394   return true;
    395 }
    396 
    397 bool Directory::ReindexId(WriteTransaction* trans,
    398                          EntryKernel* const entry,
    399                          const Id& new_id) {
    400   ScopedKernelLock lock(this);
    401   if (NULL != GetEntryById(new_id, &lock))
    402     return false;
    403 
    404   {
    405     // Update the indices that depend on the ID field.
    406     ScopedParentChildIndexUpdater updater_b(lock, entry,
    407         &kernel_->parent_child_index);
    408     size_t num_erased = kernel_->ids_map.erase(entry->ref(ID).value());
    409     DCHECK_EQ(1U, num_erased);
    410     entry->put(ID, new_id);
    411     kernel_->ids_map[entry->ref(ID).value()] = entry;
    412   }
    413   return true;
    414 }
    415 
    416 bool Directory::ReindexParentId(WriteTransaction* trans,
    417                                 EntryKernel* const entry,
    418                                 const Id& new_parent_id) {
    419   ScopedKernelLock lock(this);
    420 
    421   {
    422     // Update the indices that depend on the PARENT_ID field.
    423     ScopedParentChildIndexUpdater index_updater(lock, entry,
    424         &kernel_->parent_child_index);
    425     entry->put(PARENT_ID, new_parent_id);
    426   }
    427   return true;
    428 }
    429 
    430 bool Directory::unrecoverable_error_set(const BaseTransaction* trans) const {
    431   DCHECK(trans != NULL);
    432   return unrecoverable_error_set_;
    433 }
    434 
    435 void Directory::ClearDirtyMetahandles() {
    436   kernel_->transaction_mutex.AssertAcquired();
    437   kernel_->dirty_metahandles.clear();
    438 }
    439 
    440 bool Directory::SafeToPurgeFromMemory(WriteTransaction* trans,
    441                                       const EntryKernel* const entry) const {
    442   bool safe = entry->ref(IS_DEL) && !entry->is_dirty() &&
    443       !entry->ref(SYNCING) && !entry->ref(IS_UNAPPLIED_UPDATE) &&
    444       !entry->ref(IS_UNSYNCED);
    445 
    446   if (safe) {
    447     int64 handle = entry->ref(META_HANDLE);
    448     const ModelType type = entry->GetServerModelType();
    449     if (!SyncAssert(kernel_->dirty_metahandles.count(handle) == 0U,
    450                     FROM_HERE,
    451                     "Dirty metahandles should be empty", trans))
    452       return false;
    453     // TODO(tim): Bug 49278.
    454     if (!SyncAssert(!kernel_->unsynced_metahandles.count(handle),
    455                     FROM_HERE,
    456                     "Unsynced handles should be empty",
    457                     trans))
    458       return false;
    459     if (!SyncAssert(!kernel_->unapplied_update_metahandles[type].count(handle),
    460                     FROM_HERE,
    461                     "Unapplied metahandles should be empty",
    462                     trans))
    463       return false;
    464   }
    465 
    466   return safe;
    467 }
    468 
    469 void Directory::TakeSnapshotForSaveChanges(SaveChangesSnapshot* snapshot) {
    470   ReadTransaction trans(FROM_HERE, this);
    471   ScopedKernelLock lock(this);
    472 
    473   // If there is an unrecoverable error then just bail out.
    474   if (unrecoverable_error_set(&trans))
    475     return;
    476 
    477   // Deep copy dirty entries from kernel_->metahandles_index into snapshot and
    478   // clear dirty flags.
    479   for (MetahandleSet::const_iterator i = kernel_->dirty_metahandles.begin();
    480        i != kernel_->dirty_metahandles.end(); ++i) {
    481     EntryKernel* entry = GetEntryByHandle(*i, &lock);
    482     if (!entry)
    483       continue;
    484     // Skip over false positives; it happens relatively infrequently.
    485     if (!entry->is_dirty())
    486       continue;
    487     snapshot->dirty_metas.insert(snapshot->dirty_metas.end(),
    488                                  new EntryKernel(*entry));
    489     DCHECK_EQ(1U, kernel_->dirty_metahandles.count(*i));
    490     // We don't bother removing from the index here as we blow the entire thing
    491     // in a moment, and it unnecessarily complicates iteration.
    492     entry->clear_dirty(NULL);
    493   }
    494   ClearDirtyMetahandles();
    495 
    496   // Set purged handles.
    497   DCHECK(snapshot->metahandles_to_purge.empty());
    498   snapshot->metahandles_to_purge.swap(kernel_->metahandles_to_purge);
    499 
    500   // Fill kernel_info_status and kernel_info.
    501   snapshot->kernel_info = kernel_->persisted_info;
    502   // To avoid duplicates when the process crashes, we record the next_id to be
    503   // greater magnitude than could possibly be reached before the next save
    504   // changes.  In other words, it's effectively impossible for the user to
    505   // generate 65536 new bookmarks in 3 seconds.
    506   snapshot->kernel_info.next_id -= 65536;
    507   snapshot->kernel_info_status = kernel_->info_status;
    508   // This one we reset on failure.
    509   kernel_->info_status = KERNEL_SHARE_INFO_VALID;
    510 
    511   delete_journal_->TakeSnapshotAndClear(
    512       &trans, &snapshot->delete_journals, &snapshot->delete_journals_to_purge);
    513 }
    514 
    515 bool Directory::SaveChanges() {
    516   bool success = false;
    517 
    518   base::AutoLock scoped_lock(kernel_->save_changes_mutex);
    519 
    520   // Snapshot and save.
    521   SaveChangesSnapshot snapshot;
    522   TakeSnapshotForSaveChanges(&snapshot);
    523   success = store_->SaveChanges(snapshot);
    524 
    525   // Handle success or failure.
    526   if (success)
    527     success = VacuumAfterSaveChanges(snapshot);
    528   else
    529     HandleSaveChangesFailure(snapshot);
    530   return success;
    531 }
    532 
    533 bool Directory::VacuumAfterSaveChanges(const SaveChangesSnapshot& snapshot) {
    534   if (snapshot.dirty_metas.empty())
    535     return true;
    536 
    537   // Need a write transaction as we are about to permanently purge entries.
    538   WriteTransaction trans(FROM_HERE, VACUUM_AFTER_SAVE, this);
    539   ScopedKernelLock lock(this);
    540   // Now drop everything we can out of memory.
    541   for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
    542        i != snapshot.dirty_metas.end(); ++i) {
    543     MetahandlesMap::iterator found =
    544         kernel_->metahandles_map.find((*i)->ref(META_HANDLE));
    545     EntryKernel* entry = (found == kernel_->metahandles_map.end() ?
    546                           NULL : found->second);
    547     if (entry && SafeToPurgeFromMemory(&trans, entry)) {
    548       // We now drop deleted metahandles that are up to date on both the client
    549       // and the server.
    550       size_t num_erased = 0;
    551       num_erased = kernel_->metahandles_map.erase(entry->ref(META_HANDLE));
    552       DCHECK_EQ(1u, num_erased);
    553       num_erased = kernel_->ids_map.erase(entry->ref(ID).value());
    554       DCHECK_EQ(1u, num_erased);
    555       if (!entry->ref(UNIQUE_SERVER_TAG).empty()) {
    556         num_erased =
    557             kernel_->server_tags_map.erase(entry->ref(UNIQUE_SERVER_TAG));
    558         DCHECK_EQ(1u, num_erased);
    559       }
    560       if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) {
    561         num_erased =
    562             kernel_->client_tags_map.erase(entry->ref(UNIQUE_CLIENT_TAG));
    563         DCHECK_EQ(1u, num_erased);
    564       }
    565       if (!SyncAssert(!kernel_->parent_child_index.Contains(entry),
    566                       FROM_HERE,
    567                       "Deleted entry still present",
    568                       (&trans)))
    569         return false;
    570       delete entry;
    571     }
    572     if (trans.unrecoverable_error_set())
    573       return false;
    574   }
    575   return true;
    576 }
    577 
    578 void Directory::UnapplyEntry(EntryKernel* entry) {
    579   int64 handle = entry->ref(META_HANDLE);
    580   ModelType server_type = GetModelTypeFromSpecifics(
    581       entry->ref(SERVER_SPECIFICS));
    582 
    583   // Clear enough so that on the next sync cycle all local data will
    584   // be overwritten.
    585   // Note: do not modify the root node in order to preserve the
    586   // initial sync ended bit for this type (else on the next restart
    587   // this type will be treated as disabled and therefore fully purged).
    588   if (IsRealDataType(server_type) &&
    589       ModelTypeToRootTag(server_type) == entry->ref(UNIQUE_SERVER_TAG)) {
    590     return;
    591   }
    592 
    593   // Set the unapplied bit if this item has server data.
    594   if (IsRealDataType(server_type) && !entry->ref(IS_UNAPPLIED_UPDATE)) {
    595     entry->put(IS_UNAPPLIED_UPDATE, true);
    596     kernel_->unapplied_update_metahandles[server_type].insert(handle);
    597     entry->mark_dirty(&kernel_->dirty_metahandles);
    598   }
    599 
    600   // Unset the unsynced bit.
    601   if (entry->ref(IS_UNSYNCED)) {
    602     kernel_->unsynced_metahandles.erase(handle);
    603     entry->put(IS_UNSYNCED, false);
    604     entry->mark_dirty(&kernel_->dirty_metahandles);
    605   }
    606 
    607   // Mark the item as locally deleted. No deleted items are allowed in the
    608   // parent child index.
    609   if (!entry->ref(IS_DEL)) {
    610     kernel_->parent_child_index.Remove(entry);
    611     entry->put(IS_DEL, true);
    612     entry->mark_dirty(&kernel_->dirty_metahandles);
    613   }
    614 
    615   // Set the version to the "newly created" version.
    616   if (entry->ref(BASE_VERSION) != CHANGES_VERSION) {
    617     entry->put(BASE_VERSION, CHANGES_VERSION);
    618     entry->mark_dirty(&kernel_->dirty_metahandles);
    619   }
    620 
    621   // At this point locally created items that aren't synced will become locally
    622   // deleted items, and purged on the next snapshot. All other items will match
    623   // the state they would have had if they were just created via a server
    624   // update. See MutableEntry::MutableEntry(.., CreateNewUpdateItem, ..).
    625 }
    626 
    627 void Directory::DeleteEntry(bool save_to_journal,
    628                             EntryKernel* entry,
    629                             EntryKernelSet* entries_to_journal) {
    630   int64 handle = entry->ref(META_HANDLE);
    631   ModelType server_type = GetModelTypeFromSpecifics(
    632       entry->ref(SERVER_SPECIFICS));
    633 
    634   kernel_->metahandles_to_purge.insert(handle);
    635 
    636   size_t num_erased = 0;
    637   num_erased = kernel_->metahandles_map.erase(entry->ref(META_HANDLE));
    638   DCHECK_EQ(1u, num_erased);
    639   num_erased = kernel_->ids_map.erase(entry->ref(ID).value());
    640   DCHECK_EQ(1u, num_erased);
    641   num_erased = kernel_->unsynced_metahandles.erase(handle);
    642   DCHECK_EQ(entry->ref(IS_UNSYNCED), num_erased > 0);
    643   num_erased =
    644       kernel_->unapplied_update_metahandles[server_type].erase(handle);
    645   DCHECK_EQ(entry->ref(IS_UNAPPLIED_UPDATE), num_erased > 0);
    646   if (kernel_->parent_child_index.Contains(entry))
    647     kernel_->parent_child_index.Remove(entry);
    648 
    649   if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) {
    650     num_erased =
    651         kernel_->client_tags_map.erase(entry->ref(UNIQUE_CLIENT_TAG));
    652     DCHECK_EQ(1u, num_erased);
    653   }
    654   if (!entry->ref(UNIQUE_SERVER_TAG).empty()) {
    655     num_erased =
    656         kernel_->server_tags_map.erase(entry->ref(UNIQUE_SERVER_TAG));
    657     DCHECK_EQ(1u, num_erased);
    658   }
    659 
    660   if (save_to_journal) {
    661     entries_to_journal->insert(entry);
    662   } else {
    663     delete entry;
    664   }
    665 }
    666 
    667 bool Directory::PurgeEntriesWithTypeIn(ModelTypeSet disabled_types,
    668                                        ModelTypeSet types_to_journal,
    669                                        ModelTypeSet types_to_unapply) {
    670   disabled_types.RemoveAll(ProxyTypes());
    671 
    672   if (disabled_types.Empty())
    673     return true;
    674 
    675   {
    676     WriteTransaction trans(FROM_HERE, PURGE_ENTRIES, this);
    677 
    678     EntryKernelSet entries_to_journal;
    679     STLElementDeleter<EntryKernelSet> journal_deleter(&entries_to_journal);
    680 
    681     {
    682       ScopedKernelLock lock(this);
    683 
    684       // We iterate in two passes to avoid a bug in STLport (which is used in
    685       // the Android build).  There are some versions of that library where a
    686       // hash_map's iterators can be invalidated when an item is erased from the
    687       // hash_map.
    688       // See http://sourceforge.net/p/stlport/bugs/239/.
    689 
    690       std::set<EntryKernel*> to_purge;
    691       for (MetahandlesMap::iterator it = kernel_->metahandles_map.begin();
    692            it != kernel_->metahandles_map.end(); ++it) {
    693         const sync_pb::EntitySpecifics& local_specifics =
    694             it->second->ref(SPECIFICS);
    695         const sync_pb::EntitySpecifics& server_specifics =
    696             it->second->ref(SERVER_SPECIFICS);
    697         ModelType local_type = GetModelTypeFromSpecifics(local_specifics);
    698         ModelType server_type = GetModelTypeFromSpecifics(server_specifics);
    699 
    700         if ((IsRealDataType(local_type) && disabled_types.Has(local_type)) ||
    701             (IsRealDataType(server_type) && disabled_types.Has(server_type))) {
    702           to_purge.insert(it->second);
    703         }
    704       }
    705 
    706       for (std::set<EntryKernel*>::iterator it = to_purge.begin();
    707            it != to_purge.end(); ++it) {
    708         EntryKernel* entry = *it;
    709 
    710         const sync_pb::EntitySpecifics& local_specifics =
    711             (*it)->ref(SPECIFICS);
    712         const sync_pb::EntitySpecifics& server_specifics =
    713             (*it)->ref(SERVER_SPECIFICS);
    714         ModelType local_type = GetModelTypeFromSpecifics(local_specifics);
    715         ModelType server_type = GetModelTypeFromSpecifics(server_specifics);
    716 
    717         if (types_to_unapply.Has(local_type) ||
    718             types_to_unapply.Has(server_type)) {
    719           UnapplyEntry(entry);
    720         } else {
    721           bool save_to_journal =
    722               (types_to_journal.Has(local_type) ||
    723                types_to_journal.Has(server_type)) &&
    724               (delete_journal_->IsDeleteJournalEnabled(local_type) ||
    725                delete_journal_->IsDeleteJournalEnabled(server_type));
    726           DeleteEntry(save_to_journal, entry, &entries_to_journal);
    727         }
    728       }
    729 
    730       delete_journal_->AddJournalBatch(&trans, entries_to_journal);
    731 
    732       // Ensure meta tracking for these data types reflects the purged state.
    733       for (ModelTypeSet::Iterator it = disabled_types.First();
    734            it.Good(); it.Inc()) {
    735         kernel_->persisted_info.transaction_version[it.Get()] = 0;
    736 
    737         // Don't discard progress markers for unapplied types.
    738         if (!types_to_unapply.Has(it.Get()))
    739           kernel_->persisted_info.reset_download_progress(it.Get());
    740       }
    741     }
    742   }
    743   return true;
    744 }
    745 
    746 void Directory::HandleSaveChangesFailure(const SaveChangesSnapshot& snapshot) {
    747   WriteTransaction trans(FROM_HERE, HANDLE_SAVE_FAILURE, this);
    748   ScopedKernelLock lock(this);
    749   kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
    750 
    751   // Because we optimistically cleared the dirty bit on the real entries when
    752   // taking the snapshot, we must restore it on failure.  Not doing this could
    753   // cause lost data, if no other changes are made to the in-memory entries
    754   // that would cause the dirty bit to get set again. Setting the bit ensures
    755   // that SaveChanges will at least try again later.
    756   for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
    757        i != snapshot.dirty_metas.end(); ++i) {
    758     MetahandlesMap::iterator found =
    759         kernel_->metahandles_map.find((*i)->ref(META_HANDLE));
    760     if (found != kernel_->metahandles_map.end()) {
    761       found->second->mark_dirty(&kernel_->dirty_metahandles);
    762     }
    763   }
    764 
    765   kernel_->metahandles_to_purge.insert(snapshot.metahandles_to_purge.begin(),
    766                                        snapshot.metahandles_to_purge.end());
    767 
    768   // Restore delete journals.
    769   delete_journal_->AddJournalBatch(&trans, snapshot.delete_journals);
    770   delete_journal_->PurgeDeleteJournals(&trans,
    771                                        snapshot.delete_journals_to_purge);
    772 }
    773 
    774 void Directory::GetDownloadProgress(
    775     ModelType model_type,
    776     sync_pb::DataTypeProgressMarker* value_out) const {
    777   ScopedKernelLock lock(this);
    778   return value_out->CopyFrom(
    779       kernel_->persisted_info.download_progress[model_type]);
    780 }
    781 
    782 void Directory::GetDownloadProgressAsString(
    783     ModelType model_type,
    784     std::string* value_out) const {
    785   ScopedKernelLock lock(this);
    786   kernel_->persisted_info.download_progress[model_type].SerializeToString(
    787       value_out);
    788 }
    789 
    790 size_t Directory::GetEntriesCount() const {
    791   ScopedKernelLock lock(this);
    792   return kernel_->metahandles_map.size();
    793 }
    794 
    795 void Directory::SetDownloadProgress(
    796     ModelType model_type,
    797     const sync_pb::DataTypeProgressMarker& new_progress) {
    798   ScopedKernelLock lock(this);
    799   kernel_->persisted_info.download_progress[model_type].CopyFrom(new_progress);
    800   kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
    801 }
    802 
    803 int64 Directory::GetTransactionVersion(ModelType type) const {
    804   kernel_->transaction_mutex.AssertAcquired();
    805   return kernel_->persisted_info.transaction_version[type];
    806 }
    807 
    808 void Directory::IncrementTransactionVersion(ModelType type) {
    809   kernel_->transaction_mutex.AssertAcquired();
    810   kernel_->persisted_info.transaction_version[type]++;
    811 }
    812 
    813 ModelTypeSet Directory::InitialSyncEndedTypes() {
    814   syncable::ReadTransaction trans(FROM_HERE, this);
    815   ModelTypeSet protocol_types = ProtocolTypes();
    816   ModelTypeSet initial_sync_ended_types;
    817   for (ModelTypeSet::Iterator i = protocol_types.First(); i.Good(); i.Inc()) {
    818     if (InitialSyncEndedForType(&trans, i.Get())) {
    819       initial_sync_ended_types.Put(i.Get());
    820     }
    821   }
    822   return initial_sync_ended_types;
    823 }
    824 
    825 bool Directory::InitialSyncEndedForType(ModelType type) {
    826   syncable::ReadTransaction trans(FROM_HERE, this);
    827   return InitialSyncEndedForType(&trans, type);
    828 }
    829 
    830 bool Directory::InitialSyncEndedForType(
    831     BaseTransaction* trans, ModelType type) {
    832   // True iff the type's root node has been received and applied.
    833   syncable::Entry entry(trans,
    834                         syncable::GET_BY_SERVER_TAG,
    835                         ModelTypeToRootTag(type));
    836   return entry.good() && entry.Get(syncable::BASE_VERSION) != CHANGES_VERSION;
    837 }
    838 
    839 string Directory::store_birthday() const {
    840   ScopedKernelLock lock(this);
    841   return kernel_->persisted_info.store_birthday;
    842 }
    843 
    844 void Directory::set_store_birthday(const string& store_birthday) {
    845   ScopedKernelLock lock(this);
    846   if (kernel_->persisted_info.store_birthday == store_birthday)
    847     return;
    848   kernel_->persisted_info.store_birthday = store_birthday;
    849   kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
    850 }
    851 
    852 string Directory::bag_of_chips() const {
    853   ScopedKernelLock lock(this);
    854   return kernel_->persisted_info.bag_of_chips;
    855 }
    856 
    857 void Directory::set_bag_of_chips(const string& bag_of_chips) {
    858   ScopedKernelLock lock(this);
    859   if (kernel_->persisted_info.bag_of_chips == bag_of_chips)
    860     return;
    861   kernel_->persisted_info.bag_of_chips = bag_of_chips;
    862   kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
    863 }
    864 
    865 
    866 string Directory::cache_guid() const {
    867   // No need to lock since nothing ever writes to it after load.
    868   return kernel_->cache_guid;
    869 }
    870 
    871 NigoriHandler* Directory::GetNigoriHandler() {
    872   return nigori_handler_;
    873 }
    874 
    875 Cryptographer* Directory::GetCryptographer(const BaseTransaction* trans) {
    876   DCHECK_EQ(this, trans->directory());
    877   return cryptographer_;
    878 }
    879 
    880 void Directory::GetAllMetaHandles(BaseTransaction* trans,
    881                                   MetahandleSet* result) {
    882   result->clear();
    883   ScopedKernelLock lock(this);
    884   for (MetahandlesMap::iterator i = kernel_->metahandles_map.begin();
    885        i != kernel_->metahandles_map.end(); ++i) {
    886     result->insert(i->first);
    887   }
    888 }
    889 
    890 void Directory::GetAllEntryKernels(BaseTransaction* trans,
    891                                    std::vector<const EntryKernel*>* result) {
    892   result->clear();
    893   ScopedKernelLock lock(this);
    894   for (MetahandlesMap::iterator i = kernel_->metahandles_map.begin();
    895        i != kernel_->metahandles_map.end(); ++i) {
    896     result->push_back(i->second);
    897   }
    898 }
    899 
    900 void Directory::GetUnsyncedMetaHandles(BaseTransaction* trans,
    901                                        Metahandles* result) {
    902   result->clear();
    903   ScopedKernelLock lock(this);
    904   copy(kernel_->unsynced_metahandles.begin(),
    905        kernel_->unsynced_metahandles.end(), back_inserter(*result));
    906 }
    907 
    908 int64 Directory::unsynced_entity_count() const {
    909   ScopedKernelLock lock(this);
    910   return kernel_->unsynced_metahandles.size();
    911 }
    912 
    913 FullModelTypeSet Directory::GetServerTypesWithUnappliedUpdates(
    914     BaseTransaction* trans) const {
    915   FullModelTypeSet server_types;
    916   ScopedKernelLock lock(this);
    917   for (int i = UNSPECIFIED; i < MODEL_TYPE_COUNT; ++i) {
    918     const ModelType type = ModelTypeFromInt(i);
    919     if (!kernel_->unapplied_update_metahandles[type].empty()) {
    920       server_types.Put(type);
    921     }
    922   }
    923   return server_types;
    924 }
    925 
    926 void Directory::GetUnappliedUpdateMetaHandles(
    927     BaseTransaction* trans,
    928     FullModelTypeSet server_types,
    929     std::vector<int64>* result) {
    930   result->clear();
    931   ScopedKernelLock lock(this);
    932   for (int i = UNSPECIFIED; i < MODEL_TYPE_COUNT; ++i) {
    933     const ModelType type = ModelTypeFromInt(i);
    934     if (server_types.Has(type)) {
    935       std::copy(kernel_->unapplied_update_metahandles[type].begin(),
    936                 kernel_->unapplied_update_metahandles[type].end(),
    937                 back_inserter(*result));
    938     }
    939   }
    940 }
    941 
    942 void Directory::CollectMetaHandleCounts(
    943     std::vector<int>* num_entries_by_type,
    944     std::vector<int>* num_to_delete_entries_by_type) {
    945   syncable::ReadTransaction trans(FROM_HERE, this);
    946   ScopedKernelLock lock(this);
    947 
    948   for (MetahandlesMap::iterator it = kernel_->metahandles_map.begin();
    949        it != kernel_->metahandles_map.end(); ++it) {
    950     EntryKernel* entry = it->second;
    951     const ModelType type = GetModelTypeFromSpecifics(entry->ref(SPECIFICS));
    952     (*num_entries_by_type)[type]++;
    953     if (entry->ref(IS_DEL))
    954       (*num_to_delete_entries_by_type)[type]++;
    955   }
    956 }
    957 
    958 bool Directory::CheckInvariantsOnTransactionClose(
    959     syncable::BaseTransaction* trans,
    960     const EntryKernelMutationMap& mutations) {
    961   // NOTE: The trans may be in the process of being destructed.  Be careful if
    962   // you wish to call any of its virtual methods.
    963   MetahandleSet handles;
    964 
    965   switch (invariant_check_level_) {
    966   case FULL_DB_VERIFICATION:
    967     GetAllMetaHandles(trans, &handles);
    968     break;
    969   case VERIFY_CHANGES:
    970     for (EntryKernelMutationMap::const_iterator i = mutations.begin();
    971          i != mutations.end(); ++i) {
    972       handles.insert(i->first);
    973     }
    974     break;
    975   case OFF:
    976     break;
    977   }
    978 
    979   return CheckTreeInvariants(trans, handles);
    980 }
    981 
    982 bool Directory::FullyCheckTreeInvariants(syncable::BaseTransaction* trans) {
    983   MetahandleSet handles;
    984   GetAllMetaHandles(trans, &handles);
    985   return CheckTreeInvariants(trans, handles);
    986 }
    987 
    988 bool Directory::CheckTreeInvariants(syncable::BaseTransaction* trans,
    989                                     const MetahandleSet& handles) {
    990   MetahandleSet::const_iterator i;
    991   for (i = handles.begin() ; i != handles.end() ; ++i) {
    992     int64 metahandle = *i;
    993     Entry e(trans, GET_BY_HANDLE, metahandle);
    994     if (!SyncAssert(e.good(), FROM_HERE, "Entry is bad", trans))
    995       return false;
    996     syncable::Id id = e.Get(ID);
    997     syncable::Id parentid = e.Get(PARENT_ID);
    998 
    999     if (id.IsRoot()) {
   1000       if (!SyncAssert(e.Get(IS_DIR), FROM_HERE,
   1001                       "Entry should be a directory",
   1002                       trans))
   1003         return false;
   1004       if (!SyncAssert(parentid.IsRoot(), FROM_HERE,
   1005                       "Entry should be root",
   1006                       trans))
   1007          return false;
   1008       if (!SyncAssert(!e.Get(IS_UNSYNCED), FROM_HERE,
   1009                       "Entry should be sycned",
   1010                       trans))
   1011          return false;
   1012       continue;
   1013     }
   1014 
   1015     if (!e.Get(IS_DEL)) {
   1016       if (!SyncAssert(id != parentid, FROM_HERE,
   1017                       "Id should be different from parent id.",
   1018                       trans))
   1019          return false;
   1020       if (!SyncAssert(!e.Get(NON_UNIQUE_NAME).empty(), FROM_HERE,
   1021                       "Non unique name should not be empty.",
   1022                       trans))
   1023         return false;
   1024       int safety_count = handles.size() + 1;
   1025       while (!parentid.IsRoot()) {
   1026         Entry parent(trans, GET_BY_ID, parentid);
   1027         if (!SyncAssert(parent.good(), FROM_HERE,
   1028                         "Parent entry is not valid.",
   1029                         trans))
   1030           return false;
   1031         if (handles.end() == handles.find(parent.Get(META_HANDLE)))
   1032             break; // Skip further checking if parent was unmodified.
   1033         if (!SyncAssert(parent.Get(IS_DIR), FROM_HERE,
   1034                         "Parent should be a directory",
   1035                         trans))
   1036           return false;
   1037         if (!SyncAssert(!parent.Get(IS_DEL), FROM_HERE,
   1038                         "Parent should not have been marked for deletion.",
   1039                         trans))
   1040           return false;
   1041         if (!SyncAssert(handles.end() != handles.find(parent.Get(META_HANDLE)),
   1042                         FROM_HERE,
   1043                         "Parent should be in the index.",
   1044                         trans))
   1045           return false;
   1046         parentid = parent.Get(PARENT_ID);
   1047         if (!SyncAssert(--safety_count > 0, FROM_HERE,
   1048                         "Count should be greater than zero.",
   1049                         trans))
   1050           return false;
   1051       }
   1052     }
   1053     int64 base_version = e.Get(BASE_VERSION);
   1054     int64 server_version = e.Get(SERVER_VERSION);
   1055     bool using_unique_client_tag = !e.Get(UNIQUE_CLIENT_TAG).empty();
   1056     if (CHANGES_VERSION == base_version || 0 == base_version) {
   1057       if (e.Get(IS_UNAPPLIED_UPDATE)) {
   1058         // Must be a new item, or a de-duplicated unique client tag
   1059         // that was created both locally and remotely.
   1060         if (!using_unique_client_tag) {
   1061           if (!SyncAssert(e.Get(IS_DEL), FROM_HERE,
   1062                           "The entry should not have been deleted.",
   1063                           trans))
   1064             return false;
   1065         }
   1066         // It came from the server, so it must have a server ID.
   1067         if (!SyncAssert(id.ServerKnows(), FROM_HERE,
   1068                         "The id should be from a server.",
   1069                         trans))
   1070           return false;
   1071       } else {
   1072         if (e.Get(IS_DIR)) {
   1073           // TODO(chron): Implement this mode if clients ever need it.
   1074           // For now, you can't combine a client tag and a directory.
   1075           if (!SyncAssert(!using_unique_client_tag, FROM_HERE,
   1076                           "Directory cannot have a client tag.",
   1077                           trans))
   1078             return false;
   1079         }
   1080         // Should be an uncomitted item, or a successfully deleted one.
   1081         if (!e.Get(IS_DEL)) {
   1082           if (!SyncAssert(e.Get(IS_UNSYNCED), FROM_HERE,
   1083                           "The item should be unsynced.",
   1084                           trans))
   1085             return false;
   1086         }
   1087         // If the next check failed, it would imply that an item exists
   1088         // on the server, isn't waiting for application locally, but either
   1089         // is an unsynced create or a sucessful delete in the local copy.
   1090         // Either way, that's a mismatch.
   1091         if (!SyncAssert(0 == server_version, FROM_HERE,
   1092                         "Server version should be zero.",
   1093                         trans))
   1094           return false;
   1095         // Items that aren't using the unique client tag should have a zero
   1096         // base version only if they have a local ID.  Items with unique client
   1097         // tags are allowed to use the zero base version for undeletion and
   1098         // de-duplication; the unique client tag trumps the server ID.
   1099         if (!using_unique_client_tag) {
   1100           if (!SyncAssert(!id.ServerKnows(), FROM_HERE,
   1101                           "Should be a client only id.",
   1102                           trans))
   1103             return false;
   1104         }
   1105       }
   1106     } else {
   1107       if (!SyncAssert(id.ServerKnows(),
   1108                       FROM_HERE,
   1109                       "Should be a server id.",
   1110                       trans))
   1111         return false;
   1112     }
   1113     // Server-unknown items that are locally deleted should not be sent up to
   1114     // the server.  They must be !IS_UNSYNCED.
   1115     if (!SyncAssert(!(!id.ServerKnows() &&
   1116                       e.Get(IS_DEL) &&
   1117                       e.Get(IS_UNSYNCED)), FROM_HERE,
   1118                     "Locally deleted item must not be unsynced.",
   1119                     trans)) {
   1120       return false;
   1121     }
   1122   }
   1123   return true;
   1124 }
   1125 
   1126 void Directory::SetInvariantCheckLevel(InvariantCheckLevel check_level) {
   1127   invariant_check_level_ = check_level;
   1128 }
   1129 
   1130 int64 Directory::NextMetahandle() {
   1131   ScopedKernelLock lock(this);
   1132   int64 metahandle = (kernel_->next_metahandle)++;
   1133   return metahandle;
   1134 }
   1135 
   1136 // Always returns a client ID that is the string representation of a negative
   1137 // number.
   1138 Id Directory::NextId() {
   1139   int64 result;
   1140   {
   1141     ScopedKernelLock lock(this);
   1142     result = (kernel_->persisted_info.next_id)--;
   1143     kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
   1144   }
   1145   DCHECK_LT(result, 0);
   1146   return Id::CreateFromClientString(base::Int64ToString(result));
   1147 }
   1148 
   1149 bool Directory::HasChildren(BaseTransaction* trans, const Id& id) {
   1150   ScopedKernelLock lock(this);
   1151   return kernel_->parent_child_index.GetChildren(id) != NULL;
   1152 }
   1153 
   1154 Id Directory::GetFirstChildId(BaseTransaction* trans,
   1155                               const EntryKernel* parent) {
   1156   DCHECK(parent);
   1157   DCHECK(parent->ref(IS_DIR));
   1158 
   1159   ScopedKernelLock lock(this);
   1160   const OrderedChildSet* children =
   1161       kernel_->parent_child_index.GetChildren(parent->ref(ID));
   1162 
   1163   // We're expected to return root if there are no children.
   1164   if (!children)
   1165     return Id();
   1166 
   1167   return (*children->begin())->ref(ID);
   1168 }
   1169 
   1170 syncable::Id Directory::GetPredecessorId(EntryKernel* e) {
   1171   ScopedKernelLock lock(this);
   1172 
   1173   DCHECK(ParentChildIndex::ShouldInclude(e));
   1174   const OrderedChildSet* children =
   1175       kernel_->parent_child_index.GetChildren(e->ref(PARENT_ID));
   1176   DCHECK(children && !children->empty());
   1177   OrderedChildSet::const_iterator i = children->find(e);
   1178   DCHECK(i != children->end());
   1179 
   1180   if (i == children->begin()) {
   1181     return Id();
   1182   } else {
   1183     i--;
   1184     return (*i)->ref(ID);
   1185   }
   1186 }
   1187 
   1188 syncable::Id Directory::GetSuccessorId(EntryKernel* e) {
   1189   ScopedKernelLock lock(this);
   1190 
   1191   DCHECK(ParentChildIndex::ShouldInclude(e));
   1192   const OrderedChildSet* children =
   1193       kernel_->parent_child_index.GetChildren(e->ref(PARENT_ID));
   1194   DCHECK(children && !children->empty());
   1195   OrderedChildSet::const_iterator i = children->find(e);
   1196   DCHECK(i != children->end());
   1197 
   1198   i++;
   1199   if (i == children->end()) {
   1200     return Id();
   1201   } else {
   1202     return (*i)->ref(ID);
   1203   }
   1204 }
   1205 
   1206 // TODO(rlarocque): Remove all support for placing ShouldMaintainPosition()
   1207 // items as siblings of items that do not maintain postions.  It is required
   1208 // only for tests.  See crbug.com/178282.
   1209 void Directory::PutPredecessor(EntryKernel* e, EntryKernel* predecessor) {
   1210   DCHECK(!e->ref(IS_DEL));
   1211   if (!e->ShouldMaintainPosition()) {
   1212     DCHECK(!e->ref(UNIQUE_POSITION).IsValid());
   1213     return;
   1214   }
   1215   std::string suffix = e->ref(UNIQUE_BOOKMARK_TAG);
   1216   DCHECK(!suffix.empty());
   1217 
   1218   // Remove our item from the ParentChildIndex and remember to re-add it later.
   1219   ScopedKernelLock lock(this);
   1220   ScopedParentChildIndexUpdater updater(lock, e, &kernel_->parent_child_index);
   1221 
   1222   // Note: The ScopedParentChildIndexUpdater will update this set for us as we
   1223   // leave this function.
   1224   const OrderedChildSet* siblings =
   1225       kernel_->parent_child_index.GetChildren(e->ref(PARENT_ID));
   1226 
   1227   if (!siblings) {
   1228     // This parent currently has no other children.
   1229     DCHECK(predecessor->ref(ID).IsRoot());
   1230     UniquePosition pos = UniquePosition::InitialPosition(suffix);
   1231     e->put(UNIQUE_POSITION, pos);
   1232     return;
   1233   }
   1234 
   1235   if (predecessor->ref(ID).IsRoot()) {
   1236     // We have at least one sibling, and we're inserting to the left of them.
   1237     UniquePosition successor_pos = (*siblings->begin())->ref(UNIQUE_POSITION);
   1238 
   1239     UniquePosition pos;
   1240     if (!successor_pos.IsValid()) {
   1241       // If all our successors are of non-positionable types, just create an
   1242       // initial position.  We arbitrarily choose to sort invalid positions to
   1243       // the right of the valid positions.
   1244       //
   1245       // We really shouldn't need to support this.  See TODO above.
   1246       pos = UniquePosition::InitialPosition(suffix);
   1247     } else  {
   1248       DCHECK(!siblings->empty());
   1249       pos = UniquePosition::Before(successor_pos, suffix);
   1250     }
   1251 
   1252     e->put(UNIQUE_POSITION, pos);
   1253     return;
   1254   }
   1255 
   1256   // We can't support placing an item after an invalid position.  Fortunately,
   1257   // the tests don't exercise this particular case.  We should not support
   1258   // siblings with invalid positions at all.  See TODO above.
   1259   DCHECK(predecessor->ref(UNIQUE_POSITION).IsValid());
   1260 
   1261   OrderedChildSet::const_iterator neighbour = siblings->find(predecessor);
   1262   DCHECK(neighbour != siblings->end());
   1263 
   1264   ++neighbour;
   1265   if (neighbour == siblings->end()) {
   1266     // Inserting at the end of the list.
   1267     UniquePosition pos = UniquePosition::After(
   1268         predecessor->ref(UNIQUE_POSITION),
   1269         suffix);
   1270     e->put(UNIQUE_POSITION, pos);
   1271     return;
   1272   }
   1273 
   1274   EntryKernel* successor = *neighbour;
   1275 
   1276   // Another mixed valid and invalid position case.  This one could be supported
   1277   // in theory, but we're trying to deprecate support for siblings with and
   1278   // without valid positions.  See TODO above.
   1279   DCHECK(successor->ref(UNIQUE_POSITION).IsValid());
   1280 
   1281   // Finally, the normal case: inserting between two elements.
   1282   UniquePosition pos = UniquePosition::Between(
   1283       predecessor->ref(UNIQUE_POSITION),
   1284       successor->ref(UNIQUE_POSITION),
   1285       suffix);
   1286   e->put(UNIQUE_POSITION, pos);
   1287   return;
   1288 }
   1289 
   1290 // TODO(rlarocque): Avoid this indirection.  Just return the set.
   1291 void Directory::AppendChildHandles(const ScopedKernelLock& lock,
   1292                                    const Id& parent_id,
   1293                                    Directory::Metahandles* result) {
   1294   const OrderedChildSet* children =
   1295       kernel_->parent_child_index.GetChildren(parent_id);
   1296   if (!children)
   1297     return;
   1298 
   1299   for (OrderedChildSet::const_iterator i = children->begin();
   1300        i != children->end(); ++i) {
   1301     DCHECK_EQ(parent_id, (*i)->ref(PARENT_ID));
   1302     result->push_back((*i)->ref(META_HANDLE));
   1303   }
   1304 }
   1305 
   1306 }  // namespace syncable
   1307 }  // namespace syncer
   1308