Home | History | Annotate | Download | only in syncable
      1 // Copyright 2012 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "sync/syncable/directory_backing_store.h"
      6 
      7 #include "build/build_config.h"
      8 
      9 #include <limits>
     10 
     11 #include "base/base64.h"
     12 #include "base/debug/trace_event.h"
     13 #include "base/logging.h"
     14 #include "base/rand_util.h"
     15 #include "base/strings/stringprintf.h"
     16 #include "base/time/time.h"
     17 #include "sql/connection.h"
     18 #include "sql/statement.h"
     19 #include "sql/transaction.h"
     20 #include "sync/internal_api/public/base/node_ordinal.h"
     21 #include "sync/protocol/bookmark_specifics.pb.h"
     22 #include "sync/protocol/sync.pb.h"
     23 #include "sync/syncable/syncable-inl.h"
     24 #include "sync/syncable/syncable_columns.h"
     25 #include "sync/syncable/syncable_util.h"
     26 #include "sync/util/time.h"
     27 
     28 using std::string;
     29 
     30 namespace syncer {
     31 namespace syncable {
     32 
     33 // This just has to be big enough to hold an UPDATE or INSERT statement that
     34 // modifies all the columns in the entry table.
     35 static const string::size_type kUpdateStatementBufferSize = 2048;
     36 
     37 // Increment this version whenever updating DB tables.
     38 const int32 kCurrentDBVersion = 89;
     39 
     40 // Iterate over the fields of |entry| and bind each to |statement| for
     41 // updating.  Returns the number of args bound.
     42 void BindFields(const EntryKernel& entry,
     43                 sql::Statement* statement) {
     44   int index = 0;
     45   int i = 0;
     46   for (i = BEGIN_FIELDS; i < INT64_FIELDS_END; ++i) {
     47     statement->BindInt64(index++, entry.ref(static_cast<Int64Field>(i)));
     48   }
     49   for ( ; i < TIME_FIELDS_END; ++i) {
     50     statement->BindInt64(index++,
     51                          TimeToProtoTime(
     52                              entry.ref(static_cast<TimeField>(i))));
     53   }
     54   for ( ; i < ID_FIELDS_END; ++i) {
     55     statement->BindString(index++, entry.ref(static_cast<IdField>(i)).s_);
     56   }
     57   for ( ; i < BIT_FIELDS_END; ++i) {
     58     statement->BindInt(index++, entry.ref(static_cast<BitField>(i)));
     59   }
     60   for ( ; i < STRING_FIELDS_END; ++i) {
     61     statement->BindString(index++, entry.ref(static_cast<StringField>(i)));
     62   }
     63   for ( ; i < PROTO_FIELDS_END; ++i) {
     64     std::string temp;
     65     entry.ref(static_cast<ProtoField>(i)).SerializeToString(&temp);
     66     statement->BindBlob(index++, temp.data(), temp.length());
     67   }
     68   for ( ; i < UNIQUE_POSITION_FIELDS_END; ++i) {
     69     std::string temp;
     70     entry.ref(static_cast<UniquePositionField>(i)).SerializeToString(&temp);
     71     statement->BindBlob(index++, temp.data(), temp.length());
     72   }
     73   for (; i < ATTACHMENT_METADATA_FIELDS_END; ++i) {
     74     std::string temp;
     75     entry.ref(static_cast<AttachmentMetadataField>(i)).SerializeToString(&temp);
     76     statement->BindBlob(index++, temp.data(), temp.length());
     77   }
     78 }
     79 
     80 // The caller owns the returned EntryKernel*.  Assumes the statement currently
     81 // points to a valid row in the metas table. Returns NULL to indicate that
     82 // it detected a corruption in the data on unpacking.
     83 scoped_ptr<EntryKernel> UnpackEntry(sql::Statement* statement) {
     84   scoped_ptr<EntryKernel> kernel(new EntryKernel());
     85   DCHECK_EQ(statement->ColumnCount(), static_cast<int>(FIELD_COUNT));
     86   int i = 0;
     87   for (i = BEGIN_FIELDS; i < INT64_FIELDS_END; ++i) {
     88     kernel->put(static_cast<Int64Field>(i), statement->ColumnInt64(i));
     89   }
     90   for ( ; i < TIME_FIELDS_END; ++i) {
     91     kernel->put(static_cast<TimeField>(i),
     92                 ProtoTimeToTime(statement->ColumnInt64(i)));
     93   }
     94   for ( ; i < ID_FIELDS_END; ++i) {
     95     kernel->mutable_ref(static_cast<IdField>(i)).s_ =
     96         statement->ColumnString(i);
     97   }
     98   for ( ; i < BIT_FIELDS_END; ++i) {
     99     kernel->put(static_cast<BitField>(i), (0 != statement->ColumnInt(i)));
    100   }
    101   for ( ; i < STRING_FIELDS_END; ++i) {
    102     kernel->put(static_cast<StringField>(i),
    103                 statement->ColumnString(i));
    104   }
    105   for ( ; i < PROTO_FIELDS_END; ++i) {
    106     kernel->mutable_ref(static_cast<ProtoField>(i)).ParseFromArray(
    107         statement->ColumnBlob(i), statement->ColumnByteLength(i));
    108   }
    109   for ( ; i < UNIQUE_POSITION_FIELDS_END; ++i) {
    110     std::string temp;
    111     statement->ColumnBlobAsString(i, &temp);
    112 
    113     sync_pb::UniquePosition proto;
    114     if (!proto.ParseFromString(temp)) {
    115       DVLOG(1) << "Unpacked invalid position.  Assuming the DB is corrupt";
    116       return scoped_ptr<EntryKernel>();
    117     }
    118 
    119     kernel->mutable_ref(static_cast<UniquePositionField>(i)) =
    120         UniquePosition::FromProto(proto);
    121   }
    122   for (; i < ATTACHMENT_METADATA_FIELDS_END; ++i) {
    123     kernel->mutable_ref(static_cast<AttachmentMetadataField>(i)).ParseFromArray(
    124         statement->ColumnBlob(i), statement->ColumnByteLength(i));
    125   }
    126 
    127   // Sanity check on positions.  We risk strange and rare crashes if our
    128   // assumptions about unique position values are broken.
    129   if (kernel->ShouldMaintainPosition() &&
    130       !kernel->ref(UNIQUE_POSITION).IsValid()) {
    131     DVLOG(1) << "Unpacked invalid position on an entity that should have a "
    132              << "valid position.  Assuming the DB is corrupt.";
    133     return scoped_ptr<EntryKernel>();
    134   }
    135 
    136   return kernel.Pass();
    137 }
    138 
    139 namespace {
    140 
    141 string ComposeCreateTableColumnSpecs() {
    142   const ColumnSpec* begin = g_metas_columns;
    143   const ColumnSpec* end = g_metas_columns + arraysize(g_metas_columns);
    144   string query;
    145   query.reserve(kUpdateStatementBufferSize);
    146   char separator = '(';
    147   for (const ColumnSpec* column = begin; column != end; ++column) {
    148     query.push_back(separator);
    149     separator = ',';
    150     query.append(column->name);
    151     query.push_back(' ');
    152     query.append(column->spec);
    153   }
    154   query.push_back(')');
    155   return query;
    156 }
    157 
    158 void AppendColumnList(std::string* output) {
    159   const char* joiner = " ";
    160   // Be explicit in SELECT order to match up with UnpackEntry.
    161   for (int i = BEGIN_FIELDS; i < FIELD_COUNT; ++i) {
    162     output->append(joiner);
    163     output->append(ColumnName(i));
    164     joiner = ", ";
    165   }
    166 }
    167 
    168 }  // namespace
    169 
    170 ///////////////////////////////////////////////////////////////////////////////
    171 // DirectoryBackingStore implementation.
    172 
    173 DirectoryBackingStore::DirectoryBackingStore(const string& dir_name)
    174   : db_(new sql::Connection()),
    175     dir_name_(dir_name),
    176     needs_column_refresh_(false) {
    177   db_->set_histogram_tag("SyncDirectory");
    178   db_->set_page_size(4096);
    179   db_->set_cache_size(32);
    180 }
    181 
    182 DirectoryBackingStore::DirectoryBackingStore(const string& dir_name,
    183                                              sql::Connection* db)
    184   : db_(db),
    185     dir_name_(dir_name),
    186     needs_column_refresh_(false) {
    187 }
    188 
    189 DirectoryBackingStore::~DirectoryBackingStore() {
    190 }
    191 
    192 bool DirectoryBackingStore::DeleteEntries(EntryTable from,
    193                                           const MetahandleSet& handles) {
    194   if (handles.empty())
    195     return true;
    196 
    197   sql::Statement statement;
    198   // Call GetCachedStatement() separately to get different statements for
    199   // different tables.
    200   switch (from) {
    201     case METAS_TABLE:
    202       statement.Assign(db_->GetCachedStatement(
    203           SQL_FROM_HERE, "DELETE FROM metas WHERE metahandle = ?"));
    204       break;
    205     case DELETE_JOURNAL_TABLE:
    206       statement.Assign(db_->GetCachedStatement(
    207           SQL_FROM_HERE, "DELETE FROM deleted_metas WHERE metahandle = ?"));
    208       break;
    209   }
    210 
    211   for (MetahandleSet::const_iterator i = handles.begin(); i != handles.end();
    212        ++i) {
    213     statement.BindInt64(0, *i);
    214     if (!statement.Run())
    215       return false;
    216     statement.Reset(true);
    217   }
    218   return true;
    219 }
    220 
    221 bool DirectoryBackingStore::SaveChanges(
    222     const Directory::SaveChangesSnapshot& snapshot) {
    223   DCHECK(CalledOnValidThread());
    224   DCHECK(db_->is_open());
    225 
    226   // Back out early if there is nothing to write.
    227   bool save_info =
    228     (Directory::KERNEL_SHARE_INFO_DIRTY == snapshot.kernel_info_status);
    229   if (snapshot.dirty_metas.empty() && snapshot.metahandles_to_purge.empty() &&
    230       snapshot.delete_journals.empty() &&
    231       snapshot.delete_journals_to_purge.empty() && !save_info) {
    232     return true;
    233   }
    234 
    235   sql::Transaction transaction(db_.get());
    236   if (!transaction.Begin())
    237     return false;
    238 
    239   PrepareSaveEntryStatement(METAS_TABLE, &save_meta_statment_);
    240   for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
    241        i != snapshot.dirty_metas.end(); ++i) {
    242     DCHECK((*i)->is_dirty());
    243     if (!SaveEntryToDB(&save_meta_statment_, **i))
    244       return false;
    245   }
    246 
    247   if (!DeleteEntries(METAS_TABLE, snapshot.metahandles_to_purge))
    248     return false;
    249 
    250   PrepareSaveEntryStatement(DELETE_JOURNAL_TABLE,
    251                             &save_delete_journal_statment_);
    252   for (EntryKernelSet::const_iterator i = snapshot.delete_journals.begin();
    253        i != snapshot.delete_journals.end(); ++i) {
    254     if (!SaveEntryToDB(&save_delete_journal_statment_, **i))
    255       return false;
    256   }
    257 
    258   if (!DeleteEntries(DELETE_JOURNAL_TABLE, snapshot.delete_journals_to_purge))
    259     return false;
    260 
    261   if (save_info) {
    262     const Directory::PersistedKernelInfo& info = snapshot.kernel_info;
    263     sql::Statement s1(db_->GetCachedStatement(
    264             SQL_FROM_HERE,
    265             "UPDATE share_info "
    266             "SET store_birthday = ?, "
    267             "next_id = ?, "
    268             "bag_of_chips = ?"));
    269     s1.BindString(0, info.store_birthday);
    270     s1.BindInt64(1, info.next_id);
    271     s1.BindBlob(2, info.bag_of_chips.data(), info.bag_of_chips.size());
    272 
    273     if (!s1.Run())
    274       return false;
    275     DCHECK_EQ(db_->GetLastChangeCount(), 1);
    276 
    277     sql::Statement s2(db_->GetCachedStatement(
    278             SQL_FROM_HERE,
    279             "INSERT OR REPLACE "
    280             "INTO models (model_id, "
    281                          "progress_marker, "
    282                          "transaction_version, "
    283                          "context) "
    284             "VALUES (?, ?, ?, ?)"));
    285 
    286     ModelTypeSet protocol_types = ProtocolTypes();
    287     for (ModelTypeSet::Iterator iter = protocol_types.First(); iter.Good();
    288          iter.Inc()) {
    289       ModelType type = iter.Get();
    290       // We persist not ModelType but rather a protobuf-derived ID.
    291       string model_id = ModelTypeEnumToModelId(type);
    292       string progress_marker;
    293       info.download_progress[type].SerializeToString(&progress_marker);
    294       s2.BindBlob(0, model_id.data(), model_id.length());
    295       s2.BindBlob(1, progress_marker.data(), progress_marker.length());
    296       s2.BindInt64(2, info.transaction_version[type]);
    297       string context;
    298       info.datatype_context[type].SerializeToString(&context);
    299       s2.BindBlob(3, context.data(), context.length());
    300       if (!s2.Run())
    301         return false;
    302       DCHECK_EQ(db_->GetLastChangeCount(), 1);
    303       s2.Reset(true);
    304     }
    305   }
    306 
    307   return transaction.Commit();
    308 }
    309 
    310 bool DirectoryBackingStore::InitializeTables() {
    311   sql::Transaction transaction(db_.get());
    312   if (!transaction.Begin())
    313     return false;
    314 
    315   int version_on_disk = GetVersion();
    316 
    317   // Upgrade from version 67. Version 67 was widely distributed as the original
    318   // Bookmark Sync release. Version 68 removed unique naming.
    319   if (version_on_disk == 67) {
    320     if (MigrateVersion67To68())
    321       version_on_disk = 68;
    322   }
    323   // Version 69 introduced additional datatypes.
    324   if (version_on_disk == 68) {
    325     if (MigrateVersion68To69())
    326       version_on_disk = 69;
    327   }
    328 
    329   if (version_on_disk == 69) {
    330     if (MigrateVersion69To70())
    331       version_on_disk = 70;
    332   }
    333 
    334   // Version 71 changed the sync progress information to be per-datatype.
    335   if (version_on_disk == 70) {
    336     if (MigrateVersion70To71())
    337       version_on_disk = 71;
    338   }
    339 
    340   // Version 72 removed extended attributes, a legacy way to do extensible
    341   // key/value information, stored in their own table.
    342   if (version_on_disk == 71) {
    343     if (MigrateVersion71To72())
    344       version_on_disk = 72;
    345   }
    346 
    347   // Version 73 added a field for notification state.
    348   if (version_on_disk == 72) {
    349     if (MigrateVersion72To73())
    350       version_on_disk = 73;
    351   }
    352 
    353   // Version 74 added state for the autofill migration.
    354   if (version_on_disk == 73) {
    355     if (MigrateVersion73To74())
    356       version_on_disk = 74;
    357   }
    358 
    359   // Version 75 migrated from int64-based timestamps to per-datatype tokens.
    360   if (version_on_disk == 74) {
    361     if (MigrateVersion74To75())
    362       version_on_disk = 75;
    363   }
    364 
    365   // Version 76 removed all (5) autofill migration related columns.
    366   if (version_on_disk == 75) {
    367     if (MigrateVersion75To76())
    368       version_on_disk = 76;
    369   }
    370 
    371   // Version 77 standardized all time fields to ms since the Unix
    372   // epoch.
    373   if (version_on_disk == 76) {
    374     if (MigrateVersion76To77())
    375       version_on_disk = 77;
    376   }
    377 
    378   // Version 78 added the column base_server_specifics to the metas table.
    379   if (version_on_disk == 77) {
    380     if (MigrateVersion77To78())
    381       version_on_disk = 78;
    382   }
    383 
    384   // Version 79 migration is a one-time fix for some users in a bad state.
    385   if (version_on_disk == 78) {
    386     if (MigrateVersion78To79())
    387       version_on_disk = 79;
    388   }
    389 
    390   // Version 80 migration is adding the bag_of_chips column.
    391   if (version_on_disk == 79) {
    392     if (MigrateVersion79To80())
    393       version_on_disk = 80;
    394   }
    395 
    396   // Version 81 replaces the int64 server_position_in_parent_field
    397   // with a blob server_ordinal_in_parent field.
    398   if (version_on_disk == 80) {
    399     if (MigrateVersion80To81())
    400       version_on_disk = 81;
    401   }
    402 
    403   // Version 82 migration added transaction_version column per data type.
    404   if (version_on_disk == 81) {
    405     if (MigrateVersion81To82())
    406       version_on_disk = 82;
    407   }
    408 
    409   // Version 83 migration added transaction_version column per sync entry.
    410   if (version_on_disk == 82) {
    411     if (MigrateVersion82To83())
    412       version_on_disk = 83;
    413   }
    414 
    415   // Version 84 migration added deleted_metas table.
    416   if (version_on_disk == 83) {
    417     if (MigrateVersion83To84())
    418       version_on_disk = 84;
    419   }
    420 
    421   // Version 85 migration removes the initial_sync_ended bits.
    422   if (version_on_disk == 84) {
    423     if (MigrateVersion84To85())
    424       version_on_disk = 85;
    425   }
    426 
    427   // Version 86 migration converts bookmarks to the unique positioning system.
    428   // It also introduces a new field to store a unique ID for each bookmark.
    429   if (version_on_disk == 85) {
    430     if (MigrateVersion85To86())
    431       version_on_disk = 86;
    432   }
    433 
    434   // Version 87 migration adds a collection of attachment ids per sync entry.
    435   if (version_on_disk == 86) {
    436     if (MigrateVersion86To87())
    437       version_on_disk = 87;
    438   }
    439 
    440   // Version 88 migration adds datatype contexts to the models table.
    441   if (version_on_disk == 87) {
    442     if (MigrateVersion87To88())
    443       version_on_disk = 88;
    444   }
    445 
    446   // Version 89 migration adds server attachment metadata to the metas table.
    447   if (version_on_disk == 88) {
    448     if (MigrateVersion88To89())
    449       version_on_disk = 89;
    450   }
    451 
    452   // If one of the migrations requested it, drop columns that aren't current.
    453   // It's only safe to do this after migrating all the way to the current
    454   // version.
    455   if (version_on_disk == kCurrentDBVersion && needs_column_refresh_) {
    456     if (!RefreshColumns())
    457       version_on_disk = 0;
    458   }
    459 
    460   // A final, alternative catch-all migration to simply re-sync everything.
    461   if (version_on_disk != kCurrentDBVersion) {
    462     if (version_on_disk > kCurrentDBVersion)
    463       return false;
    464 
    465     // Fallback (re-sync everything) migration path.
    466     DVLOG(1) << "Old/null sync database, version " << version_on_disk;
    467     // Delete the existing database (if any), and create a fresh one.
    468     DropAllTables();
    469     if (!CreateTables())
    470       return false;
    471   }
    472 
    473   sql::Statement s(db_->GetUniqueStatement(
    474           "SELECT db_create_version, db_create_time FROM share_info"));
    475   if (!s.Step())
    476     return false;
    477   string db_create_version = s.ColumnString(0);
    478   int db_create_time = s.ColumnInt(1);
    479   DVLOG(1) << "DB created at " << db_create_time << " by version " <<
    480       db_create_version;
    481 
    482   return transaction.Commit();
    483 }
    484 
    485 // This function drops unused columns by creating a new table that contains only
    486 // the currently used columns then copying all rows from the old tables into
    487 // this new one.  The tables are then rearranged so the new replaces the old.
    488 bool DirectoryBackingStore::RefreshColumns() {
    489   DCHECK(needs_column_refresh_);
    490 
    491   // Create a new table named temp_metas.
    492   SafeDropTable("temp_metas");
    493   if (!CreateMetasTable(true))
    494     return false;
    495 
    496   // Populate temp_metas from metas.
    497   //
    498   // At this point, the metas table may contain columns belonging to obsolete
    499   // schema versions.  This statement explicitly lists only the columns that
    500   // belong to the current schema version, so the obsolete columns will be
    501   // effectively dropped once we rename temp_metas over top of metas.
    502   std::string query = "INSERT INTO temp_metas (";
    503   AppendColumnList(&query);
    504   query.append(") SELECT ");
    505   AppendColumnList(&query);
    506   query.append(" FROM metas");
    507   if (!db_->Execute(query.c_str()))
    508     return false;
    509 
    510   // Drop metas.
    511   SafeDropTable("metas");
    512 
    513   // Rename temp_metas -> metas.
    514   if (!db_->Execute("ALTER TABLE temp_metas RENAME TO metas"))
    515     return false;
    516 
    517   // Repeat the process for share_info.
    518   SafeDropTable("temp_share_info");
    519   if (!CreateShareInfoTable(true))
    520     return false;
    521 
    522   // TODO(rlarocque, 124140): Remove notification_state.
    523   if (!db_->Execute(
    524           "INSERT INTO temp_share_info (id, name, store_birthday, "
    525           "db_create_version, db_create_time, next_id, cache_guid,"
    526           "notification_state, bag_of_chips) "
    527           "SELECT id, name, store_birthday, db_create_version, "
    528           "db_create_time, next_id, cache_guid, notification_state, "
    529           "bag_of_chips "
    530           "FROM share_info"))
    531     return false;
    532 
    533   SafeDropTable("share_info");
    534   if (!db_->Execute("ALTER TABLE temp_share_info RENAME TO share_info"))
    535     return false;
    536 
    537   needs_column_refresh_ = false;
    538   return true;
    539 }
    540 
    541 bool DirectoryBackingStore::LoadEntries(
    542     Directory::MetahandlesMap* handles_map) {
    543   string select;
    544   select.reserve(kUpdateStatementBufferSize);
    545   select.append("SELECT ");
    546   AppendColumnList(&select);
    547   select.append(" FROM metas");
    548 
    549   sql::Statement s(db_->GetUniqueStatement(select.c_str()));
    550 
    551   while (s.Step()) {
    552     scoped_ptr<EntryKernel> kernel = UnpackEntry(&s);
    553     // A null kernel is evidence of external data corruption.
    554     if (!kernel)
    555       return false;
    556 
    557     int64 handle = kernel->ref(META_HANDLE);
    558     (*handles_map)[handle] = kernel.release();
    559   }
    560   return s.Succeeded();
    561 }
    562 
    563 bool DirectoryBackingStore::LoadDeleteJournals(
    564     JournalIndex* delete_journals) {
    565   string select;
    566   select.reserve(kUpdateStatementBufferSize);
    567   select.append("SELECT ");
    568   AppendColumnList(&select);
    569   select.append(" FROM deleted_metas");
    570 
    571   sql::Statement s(db_->GetUniqueStatement(select.c_str()));
    572 
    573   while (s.Step()) {
    574     scoped_ptr<EntryKernel> kernel = UnpackEntry(&s);
    575     // A null kernel is evidence of external data corruption.
    576     if (!kernel)
    577       return false;
    578     delete_journals->insert(kernel.release());
    579   }
    580   return s.Succeeded();
    581 }
    582 
    583 bool DirectoryBackingStore::LoadInfo(Directory::KernelLoadInfo* info) {
    584   {
    585     sql::Statement s(
    586         db_->GetUniqueStatement(
    587             "SELECT store_birthday, next_id, cache_guid, bag_of_chips "
    588             "FROM share_info"));
    589     if (!s.Step())
    590       return false;
    591 
    592     info->kernel_info.store_birthday = s.ColumnString(0);
    593     info->kernel_info.next_id = s.ColumnInt64(1);
    594     info->cache_guid = s.ColumnString(2);
    595     s.ColumnBlobAsString(3, &(info->kernel_info.bag_of_chips));
    596 
    597     // Verify there was only one row returned.
    598     DCHECK(!s.Step());
    599     DCHECK(s.Succeeded());
    600   }
    601 
    602   {
    603     sql::Statement s(
    604         db_->GetUniqueStatement(
    605             "SELECT model_id, progress_marker, "
    606             "transaction_version, context FROM models"));
    607 
    608     while (s.Step()) {
    609       ModelType type = ModelIdToModelTypeEnum(s.ColumnBlob(0),
    610                                               s.ColumnByteLength(0));
    611       if (type != UNSPECIFIED && type != TOP_LEVEL_FOLDER) {
    612         info->kernel_info.download_progress[type].ParseFromArray(
    613             s.ColumnBlob(1), s.ColumnByteLength(1));
    614         info->kernel_info.transaction_version[type] = s.ColumnInt64(2);
    615         info->kernel_info.datatype_context[type].ParseFromArray(
    616             s.ColumnBlob(3), s.ColumnByteLength(3));
    617       }
    618     }
    619     if (!s.Succeeded())
    620       return false;
    621   }
    622   {
    623     sql::Statement s(
    624         db_->GetUniqueStatement(
    625             "SELECT MAX(metahandle) FROM metas"));
    626     if (!s.Step())
    627       return false;
    628 
    629     info->max_metahandle = s.ColumnInt64(0);
    630 
    631     // Verify only one row was returned.
    632     DCHECK(!s.Step());
    633     DCHECK(s.Succeeded());
    634   }
    635   return true;
    636 }
    637 
    638 /* static */
    639 bool DirectoryBackingStore::SaveEntryToDB(sql::Statement* save_statement,
    640                                           const EntryKernel& entry) {
    641   save_statement->Reset(true);
    642   BindFields(entry, save_statement);
    643   return save_statement->Run();
    644 }
    645 
    646 bool DirectoryBackingStore::DropDeletedEntries() {
    647   if (!db_->Execute("DELETE FROM metas "
    648                     "WHERE is_del > 0 "
    649                     "AND is_unsynced < 1 "
    650                     "AND is_unapplied_update < 1")) {
    651     return false;
    652   }
    653   if (!db_->Execute("DELETE FROM metas "
    654                     "WHERE is_del > 0 "
    655                     "AND id LIKE 'c%'")) {
    656     return false;
    657   }
    658   return true;
    659 }
    660 
    661 bool DirectoryBackingStore::SafeDropTable(const char* table_name) {
    662   string query = "DROP TABLE IF EXISTS ";
    663   query.append(table_name);
    664   return db_->Execute(query.c_str());
    665 }
    666 
    667 void DirectoryBackingStore::DropAllTables() {
    668   SafeDropTable("metas");
    669   SafeDropTable("temp_metas");
    670   SafeDropTable("share_info");
    671   SafeDropTable("temp_share_info");
    672   SafeDropTable("share_version");
    673   SafeDropTable("extended_attributes");
    674   SafeDropTable("models");
    675   SafeDropTable("temp_models");
    676   needs_column_refresh_ = false;
    677 }
    678 
    679 // static
    680 ModelType DirectoryBackingStore::ModelIdToModelTypeEnum(
    681     const void* data, int size) {
    682   sync_pb::EntitySpecifics specifics;
    683   if (!specifics.ParseFromArray(data, size))
    684     return UNSPECIFIED;
    685   return GetModelTypeFromSpecifics(specifics);
    686 }
    687 
    688 // static
    689 string DirectoryBackingStore::ModelTypeEnumToModelId(ModelType model_type) {
    690   sync_pb::EntitySpecifics specifics;
    691   AddDefaultFieldValue(model_type, &specifics);
    692   return specifics.SerializeAsString();
    693 }
    694 
    695 // static
    696 std::string DirectoryBackingStore::GenerateCacheGUID() {
    697   // Generate a GUID with 128 bits of randomness.
    698   const int kGuidBytes = 128 / 8;
    699   std::string guid;
    700   base::Base64Encode(base::RandBytesAsString(kGuidBytes), &guid);
    701   return guid;
    702 }
    703 
    704 bool DirectoryBackingStore::MigrateToSpecifics(
    705     const char* old_columns,
    706     const char* specifics_column,
    707     void (*handler_function)(sql::Statement* old_value_query,
    708                              int old_value_column,
    709                              sync_pb::EntitySpecifics* mutable_new_value)) {
    710   std::string query_sql = base::StringPrintf(
    711       "SELECT metahandle, %s, %s FROM metas", specifics_column, old_columns);
    712   std::string update_sql = base::StringPrintf(
    713       "UPDATE metas SET %s = ? WHERE metahandle = ?", specifics_column);
    714 
    715   sql::Statement query(db_->GetUniqueStatement(query_sql.c_str()));
    716   sql::Statement update(db_->GetUniqueStatement(update_sql.c_str()));
    717 
    718   while (query.Step()) {
    719     int64 metahandle = query.ColumnInt64(0);
    720     std::string new_value_bytes;
    721     query.ColumnBlobAsString(1, &new_value_bytes);
    722     sync_pb::EntitySpecifics new_value;
    723     new_value.ParseFromString(new_value_bytes);
    724     handler_function(&query, 2, &new_value);
    725     new_value.SerializeToString(&new_value_bytes);
    726 
    727     update.BindBlob(0, new_value_bytes.data(), new_value_bytes.length());
    728     update.BindInt64(1, metahandle);
    729     if (!update.Run())
    730       return false;
    731     update.Reset(true);
    732   }
    733   return query.Succeeded();
    734 }
    735 
    736 bool DirectoryBackingStore::SetVersion(int version) {
    737   sql::Statement s(db_->GetCachedStatement(
    738           SQL_FROM_HERE, "UPDATE share_version SET data = ?"));
    739   s.BindInt(0, version);
    740 
    741   return s.Run();
    742 }
    743 
    744 int DirectoryBackingStore::GetVersion() {
    745   if (!db_->DoesTableExist("share_version"))
    746     return 0;
    747 
    748   sql::Statement statement(db_->GetUniqueStatement(
    749           "SELECT data FROM share_version"));
    750   if (statement.Step()) {
    751     return statement.ColumnInt(0);
    752   } else {
    753     return 0;
    754   }
    755 }
    756 
    757 bool DirectoryBackingStore::MigrateVersion67To68() {
    758   // This change simply removed three columns:
    759   //   string NAME
    760   //   string UNSANITIZED_NAME
    761   //   string SERVER_NAME
    762   // No data migration is necessary, but we should do a column refresh.
    763   SetVersion(68);
    764   needs_column_refresh_ = true;
    765   return true;
    766 }
    767 
    768 bool DirectoryBackingStore::MigrateVersion69To70() {
    769   // Added "unique_client_tag", renamed "singleton_tag" to unique_server_tag
    770   SetVersion(70);
    771   if (!db_->Execute(
    772           "ALTER TABLE metas ADD COLUMN unique_server_tag varchar"))
    773     return false;
    774   if (!db_->Execute(
    775           "ALTER TABLE metas ADD COLUMN unique_client_tag varchar"))
    776     return false;
    777   needs_column_refresh_ = true;
    778 
    779   if (!db_->Execute(
    780           "UPDATE metas SET unique_server_tag = singleton_tag"))
    781     return false;
    782 
    783   return true;
    784 }
    785 
    786 namespace {
    787 
    788 // Callback passed to MigrateToSpecifics for the v68->v69 migration.  See
    789 // MigrateVersion68To69().
    790 void EncodeBookmarkURLAndFavicon(sql::Statement* old_value_query,
    791                                  int old_value_column,
    792                                  sync_pb::EntitySpecifics* mutable_new_value) {
    793   // Extract data from the column trio we expect.
    794   bool old_is_bookmark_object = old_value_query->ColumnBool(old_value_column);
    795   std::string old_url = old_value_query->ColumnString(old_value_column + 1);
    796   std::string old_favicon;
    797   old_value_query->ColumnBlobAsString(old_value_column + 2, &old_favicon);
    798   bool old_is_dir = old_value_query->ColumnBool(old_value_column + 3);
    799 
    800   if (old_is_bookmark_object) {
    801     sync_pb::BookmarkSpecifics* bookmark_data =
    802         mutable_new_value->mutable_bookmark();
    803     if (!old_is_dir) {
    804       bookmark_data->set_url(old_url);
    805       bookmark_data->set_favicon(old_favicon);
    806     }
    807   }
    808 }
    809 
    810 }  // namespace
    811 
    812 bool DirectoryBackingStore::MigrateVersion68To69() {
    813   // In Version 68, there were columns on table 'metas':
    814   //   string BOOKMARK_URL
    815   //   string SERVER_BOOKMARK_URL
    816   //   blob BOOKMARK_FAVICON
    817   //   blob SERVER_BOOKMARK_FAVICON
    818   // In version 69, these columns went away in favor of storing
    819   // a serialized EntrySpecifics protobuf in the columns:
    820   //   protobuf blob SPECIFICS
    821   //   protobuf blob SERVER_SPECIFICS
    822   // For bookmarks, EntrySpecifics is extended as per
    823   // bookmark_specifics.proto. This migration converts bookmarks from the
    824   // former scheme to the latter scheme.
    825 
    826   // First, add the two new columns to the schema.
    827   if (!db_->Execute(
    828           "ALTER TABLE metas ADD COLUMN specifics blob"))
    829     return false;
    830   if (!db_->Execute(
    831           "ALTER TABLE metas ADD COLUMN server_specifics blob"))
    832     return false;
    833 
    834   // Next, fold data from the old columns into the new protobuf columns.
    835   if (!MigrateToSpecifics(("is_bookmark_object, bookmark_url, "
    836                            "bookmark_favicon, is_dir"),
    837                           "specifics",
    838                           &EncodeBookmarkURLAndFavicon)) {
    839     return false;
    840   }
    841   if (!MigrateToSpecifics(("server_is_bookmark_object, "
    842                            "server_bookmark_url, "
    843                            "server_bookmark_favicon, "
    844                            "server_is_dir"),
    845                           "server_specifics",
    846                           &EncodeBookmarkURLAndFavicon)) {
    847     return false;
    848   }
    849 
    850   // Lastly, fix up the "Google Chrome" folder, which is of the TOP_LEVEL_FOLDER
    851   // ModelType: it shouldn't have BookmarkSpecifics.
    852   if (!db_->Execute(
    853           "UPDATE metas SET specifics = NULL, server_specifics = NULL WHERE "
    854           "singleton_tag IN ('google_chrome')"))
    855     return false;
    856 
    857   SetVersion(69);
    858   needs_column_refresh_ = true;  // Trigger deletion of old columns.
    859   return true;
    860 }
    861 
    862 // Version 71, the columns 'initial_sync_ended' and 'last_sync_timestamp'
    863 // were removed from the share_info table.  They were replaced by
    864 // the 'models' table, which has these values on a per-datatype basis.
    865 bool DirectoryBackingStore::MigrateVersion70To71() {
    866   if (!CreateV71ModelsTable())
    867     return false;
    868 
    869   // Move data from the old share_info columns to the new models table.
    870   {
    871     sql::Statement fetch(db_->GetUniqueStatement(
    872             "SELECT last_sync_timestamp, initial_sync_ended FROM share_info"));
    873     if (!fetch.Step())
    874       return false;
    875 
    876     int64 last_sync_timestamp = fetch.ColumnInt64(0);
    877     bool initial_sync_ended = fetch.ColumnBool(1);
    878 
    879     // Verify there were no additional rows returned.
    880     DCHECK(!fetch.Step());
    881     DCHECK(fetch.Succeeded());
    882 
    883     sql::Statement update(db_->GetUniqueStatement(
    884             "INSERT INTO models (model_id, "
    885             "last_download_timestamp, initial_sync_ended) VALUES (?, ?, ?)"));
    886     string bookmark_model_id = ModelTypeEnumToModelId(BOOKMARKS);
    887     update.BindBlob(0, bookmark_model_id.data(), bookmark_model_id.size());
    888     update.BindInt64(1, last_sync_timestamp);
    889     update.BindBool(2, initial_sync_ended);
    890 
    891     if (!update.Run())
    892       return false;
    893   }
    894 
    895   // Drop the columns from the old share_info table via a temp table.
    896   const bool kCreateAsTempShareInfo = true;
    897 
    898   if (!CreateShareInfoTableVersion71(kCreateAsTempShareInfo))
    899     return false;
    900   if (!db_->Execute(
    901           "INSERT INTO temp_share_info (id, name, store_birthday, "
    902           "db_create_version, db_create_time, next_id, cache_guid) "
    903           "SELECT id, name, store_birthday, db_create_version, "
    904           "db_create_time, next_id, cache_guid FROM share_info"))
    905     return false;
    906   SafeDropTable("share_info");
    907   if (!db_->Execute(
    908           "ALTER TABLE temp_share_info RENAME TO share_info"))
    909     return false;
    910   SetVersion(71);
    911   return true;
    912 }
    913 
    914 bool DirectoryBackingStore::MigrateVersion71To72() {
    915   // Version 72 removed a table 'extended_attributes', whose
    916   // contents didn't matter.
    917   SafeDropTable("extended_attributes");
    918   SetVersion(72);
    919   return true;
    920 }
    921 
    922 bool DirectoryBackingStore::MigrateVersion72To73() {
    923   // Version 73 added one column to the table 'share_info': notification_state
    924   if (!db_->Execute(
    925           "ALTER TABLE share_info ADD COLUMN notification_state BLOB"))
    926     return false;
    927   SetVersion(73);
    928   return true;
    929 }
    930 
    931 bool DirectoryBackingStore::MigrateVersion73To74() {
    932   // Version 74 added the following columns to the table 'share_info':
    933   //   autofill_migration_state
    934   //   bookmarks_added_during_autofill_migration
    935   //   autofill_migration_time
    936   //   autofill_entries_added_during_migration
    937   //   autofill_profiles_added_during_migration
    938 
    939   if (!db_->Execute(
    940           "ALTER TABLE share_info ADD COLUMN "
    941           "autofill_migration_state INT default 0"))
    942     return false;
    943 
    944   if (!db_->Execute(
    945           "ALTER TABLE share_info ADD COLUMN "
    946           "bookmarks_added_during_autofill_migration "
    947           "INT default 0"))
    948     return false;
    949 
    950   if (!db_->Execute(
    951           "ALTER TABLE share_info ADD COLUMN autofill_migration_time "
    952           "INT default 0"))
    953     return false;
    954 
    955   if (!db_->Execute(
    956           "ALTER TABLE share_info ADD COLUMN "
    957           "autofill_entries_added_during_migration "
    958           "INT default 0"))
    959     return false;
    960 
    961   if (!db_->Execute(
    962           "ALTER TABLE share_info ADD COLUMN "
    963           "autofill_profiles_added_during_migration "
    964           "INT default 0"))
    965     return false;
    966 
    967   SetVersion(74);
    968   return true;
    969 }
    970 
    971 bool DirectoryBackingStore::MigrateVersion74To75() {
    972   // In version 74, there was a table 'models':
    973   //     blob model_id (entity specifics, primary key)
    974   //     int last_download_timestamp
    975   //     boolean initial_sync_ended
    976   // In version 75, we deprecated the integer-valued last_download_timestamp,
    977   // using insted a protobuf-valued progress_marker field:
    978   //     blob progress_marker
    979   // The progress_marker values are initialized from the value of
    980   // last_download_timestamp, thereby preserving the download state.
    981 
    982   // Move aside the old table and create a new empty one at the current schema.
    983   if (!db_->Execute("ALTER TABLE models RENAME TO temp_models"))
    984     return false;
    985   if (!CreateV75ModelsTable())
    986     return false;
    987 
    988   sql::Statement query(db_->GetUniqueStatement(
    989           "SELECT model_id, last_download_timestamp, initial_sync_ended "
    990           "FROM temp_models"));
    991 
    992   sql::Statement update(db_->GetUniqueStatement(
    993           "INSERT INTO models (model_id, "
    994           "progress_marker, initial_sync_ended) VALUES (?, ?, ?)"));
    995 
    996   while (query.Step()) {
    997     ModelType type = ModelIdToModelTypeEnum(query.ColumnBlob(0),
    998                                             query.ColumnByteLength(0));
    999     if (type != UNSPECIFIED) {
   1000       // Set the |timestamp_token_for_migration| on a new
   1001       // DataTypeProgressMarker, using the old value of last_download_timestamp.
   1002       // The server will turn this into a real token on our behalf the next
   1003       // time we check for updates.
   1004       sync_pb::DataTypeProgressMarker progress_marker;
   1005       progress_marker.set_data_type_id(
   1006           GetSpecificsFieldNumberFromModelType(type));
   1007       progress_marker.set_timestamp_token_for_migration(query.ColumnInt64(1));
   1008       std::string progress_blob;
   1009       progress_marker.SerializeToString(&progress_blob);
   1010 
   1011       update.BindBlob(0, query.ColumnBlob(0), query.ColumnByteLength(0));
   1012       update.BindBlob(1, progress_blob.data(), progress_blob.length());
   1013       update.BindBool(2, query.ColumnBool(2));
   1014       if (!update.Run())
   1015         return false;
   1016       update.Reset(true);
   1017     }
   1018   }
   1019   if (!query.Succeeded())
   1020     return false;
   1021 
   1022   // Drop the old table.
   1023   SafeDropTable("temp_models");
   1024 
   1025   SetVersion(75);
   1026   return true;
   1027 }
   1028 
   1029 bool DirectoryBackingStore::MigrateVersion75To76() {
   1030   // This change removed five columns:
   1031   //   autofill_migration_state
   1032   //   bookmarks_added_during_autofill_migration
   1033   //   autofill_migration_time
   1034   //   autofill_entries_added_during_migration
   1035   //   autofill_profiles_added_during_migration
   1036   // No data migration is necessary, but we should do a column refresh.
   1037   SetVersion(76);
   1038   needs_column_refresh_ = true;
   1039   return true;
   1040 }
   1041 
   1042 bool DirectoryBackingStore::MigrateVersion76To77() {
   1043   // This change changes the format of stored timestamps to ms since
   1044   // the Unix epoch.
   1045 #if defined(OS_WIN)
   1046 // On Windows, we used to store timestamps in FILETIME format (100s of
   1047 // ns since Jan 1, 1601).  Magic numbers taken from
   1048 // http://stackoverflow.com/questions/5398557/
   1049 //     java-library-for-dealing-with-win32-filetime
   1050 // .
   1051 #define TO_UNIX_TIME_MS(x) #x " = " #x " / 10000 - 11644473600000"
   1052 #else
   1053 // On other platforms, we used to store timestamps in time_t format (s
   1054 // since the Unix epoch).
   1055 #define TO_UNIX_TIME_MS(x) #x " = " #x " * 1000"
   1056 #endif
   1057   sql::Statement update_timestamps(db_->GetUniqueStatement(
   1058           "UPDATE metas SET "
   1059           TO_UNIX_TIME_MS(mtime) ", "
   1060           TO_UNIX_TIME_MS(server_mtime) ", "
   1061           TO_UNIX_TIME_MS(ctime) ", "
   1062           TO_UNIX_TIME_MS(server_ctime)));
   1063 #undef TO_UNIX_TIME_MS
   1064   if (!update_timestamps.Run())
   1065     return false;
   1066   SetVersion(77);
   1067   return true;
   1068 }
   1069 
   1070 bool DirectoryBackingStore::MigrateVersion77To78() {
   1071   // Version 78 added one column to table 'metas': base_server_specifics.
   1072   if (!db_->Execute(
   1073           "ALTER TABLE metas ADD COLUMN base_server_specifics BLOB")) {
   1074     return false;
   1075   }
   1076   SetVersion(78);
   1077   return true;
   1078 }
   1079 
   1080 bool DirectoryBackingStore::MigrateVersion78To79() {
   1081   // Some users are stuck with a DB that causes them to reuse existing IDs.  We
   1082   // perform this one-time fixup on all users to help the few that are stuck.
   1083   // See crbug.com/142987 for details.
   1084   if (!db_->Execute(
   1085           "UPDATE share_info SET next_id = next_id - 65536")) {
   1086     return false;
   1087   }
   1088   SetVersion(79);
   1089   return true;
   1090 }
   1091 
   1092 bool DirectoryBackingStore::MigrateVersion79To80() {
   1093   if (!db_->Execute(
   1094           "ALTER TABLE share_info ADD COLUMN bag_of_chips BLOB"))
   1095     return false;
   1096   sql::Statement update(db_->GetUniqueStatement(
   1097           "UPDATE share_info SET bag_of_chips = ?"));
   1098   // An empty message is serialized to an empty string.
   1099   update.BindBlob(0, NULL, 0);
   1100   if (!update.Run())
   1101     return false;
   1102   SetVersion(80);
   1103   return true;
   1104 }
   1105 
   1106 bool DirectoryBackingStore::MigrateVersion80To81() {
   1107   if(!db_->Execute(
   1108          "ALTER TABLE metas ADD COLUMN server_ordinal_in_parent BLOB"))
   1109     return false;
   1110 
   1111   sql::Statement get_positions(db_->GetUniqueStatement(
   1112       "SELECT metahandle, server_position_in_parent FROM metas"));
   1113 
   1114   sql::Statement put_ordinals(db_->GetUniqueStatement(
   1115       "UPDATE metas SET server_ordinal_in_parent = ?"
   1116       "WHERE metahandle = ?"));
   1117 
   1118   while(get_positions.Step()) {
   1119     int64 metahandle = get_positions.ColumnInt64(0);
   1120     int64 position = get_positions.ColumnInt64(1);
   1121 
   1122     const std::string& ordinal = Int64ToNodeOrdinal(position).ToInternalValue();
   1123     put_ordinals.BindBlob(0, ordinal.data(), ordinal.length());
   1124     put_ordinals.BindInt64(1, metahandle);
   1125 
   1126     if(!put_ordinals.Run())
   1127       return false;
   1128     put_ordinals.Reset(true);
   1129   }
   1130 
   1131   SetVersion(81);
   1132   needs_column_refresh_ = true;
   1133   return true;
   1134 }
   1135 
   1136 bool DirectoryBackingStore::MigrateVersion81To82() {
   1137   if (!db_->Execute(
   1138       "ALTER TABLE models ADD COLUMN transaction_version BIGINT default 0"))
   1139     return false;
   1140   sql::Statement update(db_->GetUniqueStatement(
   1141       "UPDATE models SET transaction_version = 0"));
   1142   if (!update.Run())
   1143     return false;
   1144   SetVersion(82);
   1145   return true;
   1146 }
   1147 
   1148 bool DirectoryBackingStore::MigrateVersion82To83() {
   1149   // Version 83 added transaction_version on sync node.
   1150   if (!db_->Execute(
   1151       "ALTER TABLE metas ADD COLUMN transaction_version BIGINT default 0"))
   1152     return false;
   1153   sql::Statement update(db_->GetUniqueStatement(
   1154       "UPDATE metas SET transaction_version = 0"));
   1155   if (!update.Run())
   1156     return false;
   1157   SetVersion(83);
   1158   return true;
   1159 }
   1160 
   1161 bool DirectoryBackingStore::MigrateVersion83To84() {
   1162   // Version 84 added deleted_metas table to store deleted metas until we know
   1163   // for sure that the deletions are persisted in native models.
   1164   string query = "CREATE TABLE deleted_metas ";
   1165   query.append(ComposeCreateTableColumnSpecs());
   1166   if (!db_->Execute(query.c_str()))
   1167     return false;
   1168   SetVersion(84);
   1169   return true;
   1170 }
   1171 
   1172 bool DirectoryBackingStore::MigrateVersion84To85() {
   1173   // Version 85 removes the initial_sync_ended flag.
   1174   if (!db_->Execute("ALTER TABLE models RENAME TO temp_models"))
   1175     return false;
   1176   if (!CreateV81ModelsTable())
   1177     return false;
   1178   if (!db_->Execute("INSERT INTO models SELECT "
   1179                     "model_id, progress_marker, transaction_version "
   1180                     "FROM temp_models")) {
   1181     return false;
   1182   }
   1183   SafeDropTable("temp_models");
   1184 
   1185   SetVersion(85);
   1186   return true;
   1187 }
   1188 
   1189 bool DirectoryBackingStore::MigrateVersion85To86() {
   1190   // Version 86 removes both server ordinals and local NEXT_ID, PREV_ID and
   1191   // SERVER_{POSITION,ORDINAL}_IN_PARENT and replaces them with UNIQUE_POSITION
   1192   // and SERVER_UNIQUE_POSITION.
   1193   if (!db_->Execute("ALTER TABLE metas ADD COLUMN "
   1194                     "server_unique_position BLOB")) {
   1195     return false;
   1196   }
   1197   if (!db_->Execute("ALTER TABLE metas ADD COLUMN "
   1198                     "unique_position BLOB")) {
   1199     return false;
   1200   }
   1201   if (!db_->Execute("ALTER TABLE metas ADD COLUMN "
   1202                     "unique_bookmark_tag VARCHAR")) {
   1203     return false;
   1204   }
   1205 
   1206   // Fetch the cache_guid from the DB, because we don't otherwise have access to
   1207   // it from here.
   1208   sql::Statement get_cache_guid(db_->GetUniqueStatement(
   1209       "SELECT cache_guid FROM share_info"));
   1210   if (!get_cache_guid.Step()) {
   1211     return false;
   1212   }
   1213   std::string cache_guid = get_cache_guid.ColumnString(0);
   1214   DCHECK(!get_cache_guid.Step());
   1215   DCHECK(get_cache_guid.Succeeded());
   1216 
   1217   sql::Statement get(db_->GetUniqueStatement(
   1218       "SELECT "
   1219       "  metahandle, "
   1220       "  id, "
   1221       "  specifics, "
   1222       "  is_dir, "
   1223       "  unique_server_tag, "
   1224       "  server_ordinal_in_parent "
   1225       "FROM metas"));
   1226 
   1227   // Note that we set both the local and server position based on the server
   1228   // position.  We wll lose any unsynced local position changes.  Unfortunately,
   1229   // there's nothing we can do to avoid that.  The NEXT_ID / PREV_ID values
   1230   // can't be translated into a UNIQUE_POSTION in a reliable way.
   1231   sql::Statement put(db_->GetCachedStatement(
   1232       SQL_FROM_HERE,
   1233       "UPDATE metas SET"
   1234       "  server_unique_position = ?,"
   1235       "  unique_position = ?,"
   1236       "  unique_bookmark_tag = ?"
   1237       "WHERE metahandle = ?"));
   1238 
   1239   while (get.Step()) {
   1240     int64 metahandle = get.ColumnInt64(0);
   1241 
   1242     std::string id_string;
   1243     get.ColumnBlobAsString(1, &id_string);
   1244 
   1245     sync_pb::EntitySpecifics specifics;
   1246     specifics.ParseFromArray(
   1247         get.ColumnBlob(2), get.ColumnByteLength(2));
   1248 
   1249     bool is_dir = get.ColumnBool(3);
   1250 
   1251     std::string server_unique_tag = get.ColumnString(4);
   1252 
   1253     std::string ordinal_string;
   1254     get.ColumnBlobAsString(5, &ordinal_string);
   1255     NodeOrdinal ordinal(ordinal_string);
   1256 
   1257 
   1258     std::string unique_bookmark_tag;
   1259 
   1260     // We only maintain positions for bookmarks that are not server-defined
   1261     // top-level folders.
   1262     UniquePosition position;
   1263     if (GetModelTypeFromSpecifics(specifics) == BOOKMARKS
   1264         && !(is_dir && !server_unique_tag.empty())) {
   1265       if (id_string.at(0) == 'c') {
   1266         // We found an uncommitted item.  This is rare, but fortunate.  This
   1267         // means we can set the bookmark tag according to the originator client
   1268         // item ID and originator cache guid, because (unlike the other case) we
   1269         // know that this client is the originator.
   1270         unique_bookmark_tag = syncable::GenerateSyncableBookmarkHash(
   1271             cache_guid,
   1272             id_string.substr(1));
   1273       } else {
   1274         // If we've already committed the item, then we don't know who the
   1275         // originator was.  We do not have access to the originator client item
   1276         // ID and originator cache guid at this point.
   1277         //
   1278         // We will base our hash entirely on the server ID instead.  This is
   1279         // incorrect, but at least all clients that undergo this migration step
   1280         // will be incorrect in the same way.
   1281         //
   1282         // To get everyone back into a synced state, we will update the bookmark
   1283         // tag according to the originator_cache_guid and originator_item_id
   1284         // when we see updates for this item.  That should ensure that commonly
   1285         // modified items will end up with the proper tag values eventually.
   1286         unique_bookmark_tag = syncable::GenerateSyncableBookmarkHash(
   1287             std::string(), // cache_guid left intentionally blank.
   1288             id_string.substr(1));
   1289       }
   1290 
   1291       int64 int_position = NodeOrdinalToInt64(ordinal);
   1292       position = UniquePosition::FromInt64(int_position, unique_bookmark_tag);
   1293     } else {
   1294       // Leave bookmark_tag and position at their default (invalid) values.
   1295     }
   1296 
   1297     std::string position_blob;
   1298     position.SerializeToString(&position_blob);
   1299     put.BindBlob(0, position_blob.data(), position_blob.length());
   1300     put.BindBlob(1, position_blob.data(), position_blob.length());
   1301     put.BindBlob(2, unique_bookmark_tag.data(), unique_bookmark_tag.length());
   1302     put.BindInt64(3, metahandle);
   1303 
   1304     if (!put.Run())
   1305       return false;
   1306     put.Reset(true);
   1307   }
   1308 
   1309   SetVersion(86);
   1310   needs_column_refresh_ = true;
   1311   return true;
   1312 }
   1313 
   1314 bool DirectoryBackingStore::MigrateVersion86To87() {
   1315   // Version 87 adds AttachmentMetadata proto.
   1316   if (!db_->Execute(
   1317           "ALTER TABLE metas ADD COLUMN "
   1318           "attachment_metadata BLOB")) {
   1319     return false;
   1320   }
   1321   SetVersion(87);
   1322   needs_column_refresh_ = true;
   1323   return true;
   1324 }
   1325 
   1326 bool DirectoryBackingStore::MigrateVersion87To88() {
   1327   // Version 88 adds the datatype context to the models table.
   1328   if (!db_->Execute("ALTER TABLE models ADD COLUMN context blob"))
   1329     return false;
   1330 
   1331   SetVersion(88);
   1332   return true;
   1333 }
   1334 
   1335 bool DirectoryBackingStore::MigrateVersion88To89() {
   1336   // Version 89 adds server_attachment_metadata.
   1337   if (!db_->Execute(
   1338           "ALTER TABLE metas ADD COLUMN "
   1339           "server_attachment_metadata BLOB")) {
   1340     return false;
   1341   }
   1342   SetVersion(89);
   1343   needs_column_refresh_ = true;
   1344   return true;
   1345 }
   1346 
   1347 bool DirectoryBackingStore::CreateTables() {
   1348   DVLOG(1) << "First run, creating tables";
   1349   // Create two little tables share_version and share_info
   1350   if (!db_->Execute(
   1351           "CREATE TABLE share_version ("
   1352           "id VARCHAR(128) primary key, data INT)")) {
   1353     return false;
   1354   }
   1355 
   1356   {
   1357     sql::Statement s(db_->GetUniqueStatement(
   1358             "INSERT INTO share_version VALUES(?, ?)"));
   1359     s.BindString(0, dir_name_);
   1360     s.BindInt(1, kCurrentDBVersion);
   1361 
   1362     if (!s.Run())
   1363       return false;
   1364   }
   1365 
   1366   const bool kCreateAsTempShareInfo = false;
   1367   if (!CreateShareInfoTable(kCreateAsTempShareInfo)) {
   1368     return false;
   1369   }
   1370 
   1371   {
   1372     sql::Statement s(db_->GetUniqueStatement(
   1373             "INSERT INTO share_info VALUES"
   1374             "(?, "  // id
   1375             "?, "   // name
   1376             "?, "   // store_birthday
   1377             "?, "   // db_create_version
   1378             "?, "   // db_create_time
   1379             "-2, "  // next_id
   1380             "?, "   // cache_guid
   1381             // TODO(rlarocque, 124140): Remove notification_state field.
   1382             "?, "   // notification_state
   1383             "?);"));  // bag_of_chips
   1384     s.BindString(0, dir_name_);                   // id
   1385     s.BindString(1, dir_name_);                   // name
   1386     s.BindString(2, std::string());               // store_birthday
   1387     // TODO(akalin): Remove this unused db_create_version field. (Or
   1388     // actually use it for something.) http://crbug.com/118356
   1389     s.BindString(3, "Unknown");                   // db_create_version
   1390     s.BindInt(4, static_cast<int32>(time(0)));    // db_create_time
   1391     s.BindString(5, GenerateCacheGUID());         // cache_guid
   1392     // TODO(rlarocque, 124140): Remove this unused notification-state field.
   1393     s.BindBlob(6, NULL, 0);                       // notification_state
   1394     s.BindBlob(7, NULL, 0);                       // bag_of_chips
   1395     if (!s.Run())
   1396       return false;
   1397   }
   1398 
   1399   if (!CreateModelsTable())
   1400     return false;
   1401 
   1402   // Create the big metas table.
   1403   if (!CreateMetasTable(false))
   1404     return false;
   1405 
   1406   {
   1407     // Insert the entry for the root into the metas table.
   1408     const int64 now = TimeToProtoTime(base::Time::Now());
   1409     sql::Statement s(db_->GetUniqueStatement(
   1410             "INSERT INTO metas "
   1411             "( id, metahandle, is_dir, ctime, mtime ) "
   1412             "VALUES ( \"r\", 1, 1, ?, ? )"));
   1413     s.BindInt64(0, now);
   1414     s.BindInt64(1, now);
   1415 
   1416     if (!s.Run())
   1417       return false;
   1418   }
   1419 
   1420   return true;
   1421 }
   1422 
   1423 bool DirectoryBackingStore::CreateMetasTable(bool is_temporary) {
   1424   string query = "CREATE TABLE ";
   1425   query.append(is_temporary ? "temp_metas" : "metas");
   1426   query.append(ComposeCreateTableColumnSpecs());
   1427   if (!db_->Execute(query.c_str()))
   1428     return false;
   1429 
   1430   // Create a deleted_metas table to save copies of deleted metas until the
   1431   // deletions are persisted. For simplicity, don't try to migrate existing
   1432   // data because it's rarely used.
   1433   SafeDropTable("deleted_metas");
   1434   query = "CREATE TABLE deleted_metas ";
   1435   query.append(ComposeCreateTableColumnSpecs());
   1436   return db_->Execute(query.c_str());
   1437 }
   1438 
   1439 bool DirectoryBackingStore::CreateV71ModelsTable() {
   1440   // This is an old schema for the Models table, used from versions 71 to 74.
   1441   return db_->Execute(
   1442       "CREATE TABLE models ("
   1443       "model_id BLOB primary key, "
   1444       "last_download_timestamp INT, "
   1445       // Gets set if the syncer ever gets updates from the
   1446       // server and the server returns 0.  Lets us detect the
   1447       // end of the initial sync.
   1448       "initial_sync_ended BOOLEAN default 0)");
   1449 }
   1450 
   1451 bool DirectoryBackingStore::CreateV75ModelsTable() {
   1452   // This is an old schema for the Models table, used from versions 75 to 80.
   1453   return db_->Execute(
   1454       "CREATE TABLE models ("
   1455       "model_id BLOB primary key, "
   1456       "progress_marker BLOB, "
   1457       // Gets set if the syncer ever gets updates from the
   1458       // server and the server returns 0.  Lets us detect the
   1459       // end of the initial sync.
   1460       "initial_sync_ended BOOLEAN default 0)");
   1461 }
   1462 
   1463 bool DirectoryBackingStore::CreateV81ModelsTable() {
   1464   // This is an old schema for the Models table, used from versions 81 to 87.
   1465   return db_->Execute(
   1466       "CREATE TABLE models ("
   1467       "model_id BLOB primary key, "
   1468       "progress_marker BLOB, "
   1469       // Gets set if the syncer ever gets updates from the
   1470       // server and the server returns 0.  Lets us detect the
   1471       // end of the initial sync.
   1472       "transaction_version BIGINT default 0)");
   1473 }
   1474 
   1475 bool DirectoryBackingStore::CreateModelsTable() {
   1476   // This is the current schema for the Models table, from version 88
   1477   // onward.  If you change the schema, you'll probably want to double-check
   1478   // the use of this function in the v84-v85 migration.
   1479   return db_->Execute(
   1480       "CREATE TABLE models ("
   1481       "model_id BLOB primary key, "
   1482       "progress_marker BLOB, "
   1483       // Gets set if the syncer ever gets updates from the
   1484       // server and the server returns 0.  Lets us detect the
   1485       // end of the initial sync.
   1486       "transaction_version BIGINT default 0,"
   1487       "context BLOB)");
   1488 }
   1489 
   1490 bool DirectoryBackingStore::CreateShareInfoTable(bool is_temporary) {
   1491   const char* name = is_temporary ? "temp_share_info" : "share_info";
   1492   string query = "CREATE TABLE ";
   1493   query.append(name);
   1494   // This is the current schema for the ShareInfo table, from version 76
   1495   // onward.
   1496   query.append(" ("
   1497       "id TEXT primary key, "
   1498       "name TEXT, "
   1499       "store_birthday TEXT, "
   1500       "db_create_version TEXT, "
   1501       "db_create_time INT, "
   1502       "next_id INT default -2, "
   1503       "cache_guid TEXT, "
   1504       // TODO(rlarocque, 124140): Remove notification_state field.
   1505       "notification_state BLOB, "
   1506       "bag_of_chips BLOB"
   1507       ")");
   1508   return db_->Execute(query.c_str());
   1509 }
   1510 
   1511 bool DirectoryBackingStore::CreateShareInfoTableVersion71(
   1512     bool is_temporary) {
   1513   const char* name = is_temporary ? "temp_share_info" : "share_info";
   1514   string query = "CREATE TABLE ";
   1515   query.append(name);
   1516   // This is the schema for the ShareInfo table used from versions 71 to 72.
   1517   query.append(" ("
   1518       "id TEXT primary key, "
   1519       "name TEXT, "
   1520       "store_birthday TEXT, "
   1521       "db_create_version TEXT, "
   1522       "db_create_time INT, "
   1523       "next_id INT default -2, "
   1524       "cache_guid TEXT )");
   1525   return db_->Execute(query.c_str());
   1526 }
   1527 
   1528 // This function checks to see if the given list of Metahandles has any nodes
   1529 // whose PARENT_ID values refer to ID values that do not actually exist.
   1530 // Returns true on success.
   1531 bool DirectoryBackingStore::VerifyReferenceIntegrity(
   1532     const Directory::MetahandlesMap* handles_map) {
   1533   TRACE_EVENT0("sync", "SyncDatabaseIntegrityCheck");
   1534   using namespace syncable;
   1535   typedef base::hash_set<std::string> IdsSet;
   1536 
   1537   IdsSet ids_set;
   1538   bool is_ok = true;
   1539 
   1540   for (Directory::MetahandlesMap::const_iterator it = handles_map->begin();
   1541        it != handles_map->end(); ++it) {
   1542     EntryKernel* entry = it->second;
   1543     bool is_duplicate_id = !(ids_set.insert(entry->ref(ID).value()).second);
   1544     is_ok = is_ok && !is_duplicate_id;
   1545   }
   1546 
   1547   IdsSet::iterator end = ids_set.end();
   1548   for (Directory::MetahandlesMap::const_iterator it = handles_map->begin();
   1549        it != handles_map->end(); ++it) {
   1550     EntryKernel* entry = it->second;
   1551     bool parent_exists = (ids_set.find(entry->ref(PARENT_ID).value()) != end);
   1552     if (!parent_exists) {
   1553       return false;
   1554     }
   1555   }
   1556   return is_ok;
   1557 }
   1558 
   1559 void DirectoryBackingStore::PrepareSaveEntryStatement(
   1560     EntryTable table, sql::Statement* save_statement) {
   1561   if (save_statement->is_valid())
   1562     return;
   1563 
   1564   string query;
   1565   query.reserve(kUpdateStatementBufferSize);
   1566   switch (table) {
   1567     case METAS_TABLE:
   1568       query.append("INSERT OR REPLACE INTO metas ");
   1569       break;
   1570     case DELETE_JOURNAL_TABLE:
   1571       query.append("INSERT OR REPLACE INTO deleted_metas ");
   1572       break;
   1573   }
   1574 
   1575   string values;
   1576   values.reserve(kUpdateStatementBufferSize);
   1577   values.append(" VALUES ");
   1578   const char* separator = "( ";
   1579   int i = 0;
   1580   for (i = BEGIN_FIELDS; i < FIELD_COUNT; ++i) {
   1581     query.append(separator);
   1582     values.append(separator);
   1583     separator = ", ";
   1584     query.append(ColumnName(i));
   1585     values.append("?");
   1586   }
   1587   query.append(" ) ");
   1588   values.append(" )");
   1589   query.append(values);
   1590   save_statement->Assign(db_->GetUniqueStatement(
   1591       base::StringPrintf(query.c_str(), "metas").c_str()));
   1592 }
   1593 
   1594 }  // namespace syncable
   1595 }  // namespace syncer
   1596