1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #include "chrome/browser/sync/engine/syncer_util.h" 6 7 #include <algorithm> 8 #include <set> 9 #include <string> 10 #include <vector> 11 12 #include "chrome/browser/sync/engine/conflict_resolver.h" 13 #include "chrome/browser/sync/engine/syncer_proto_util.h" 14 #include "chrome/browser/sync/engine/syncer_types.h" 15 #include "chrome/browser/sync/engine/syncproto.h" 16 #include "chrome/browser/sync/protocol/bookmark_specifics.pb.h" 17 #include "chrome/browser/sync/protocol/nigori_specifics.pb.h" 18 #include "chrome/browser/sync/protocol/sync.pb.h" 19 #include "chrome/browser/sync/syncable/directory_manager.h" 20 #include "chrome/browser/sync/syncable/model_type.h" 21 #include "chrome/browser/sync/syncable/nigori_util.h" 22 #include "chrome/browser/sync/syncable/syncable.h" 23 #include "chrome/browser/sync/syncable/syncable_changes_version.h" 24 25 using syncable::BASE_VERSION; 26 using syncable::Blob; 27 using syncable::CHANGES_VERSION; 28 using syncable::CREATE; 29 using syncable::CREATE_NEW_UPDATE_ITEM; 30 using syncable::CTIME; 31 using syncable::Directory; 32 using syncable::Entry; 33 using syncable::GET_BY_HANDLE; 34 using syncable::GET_BY_ID; 35 using syncable::ID; 36 using syncable::IS_DEL; 37 using syncable::IS_DIR; 38 using syncable::IS_UNAPPLIED_UPDATE; 39 using syncable::IS_UNSYNCED; 40 using syncable::Id; 41 using syncable::META_HANDLE; 42 using syncable::MTIME; 43 using syncable::MutableEntry; 44 using syncable::NEXT_ID; 45 using syncable::NON_UNIQUE_NAME; 46 using syncable::PARENT_ID; 47 using syncable::PREV_ID; 48 using syncable::ReadTransaction; 49 using syncable::SERVER_CTIME; 50 using syncable::SERVER_IS_DEL; 51 using syncable::SERVER_IS_DIR; 52 using syncable::SERVER_MTIME; 53 using syncable::SERVER_NON_UNIQUE_NAME; 54 using syncable::SERVER_PARENT_ID; 55 using syncable::SERVER_POSITION_IN_PARENT; 56 using syncable::SERVER_SPECIFICS; 57 using syncable::SERVER_VERSION; 58 using syncable::UNIQUE_CLIENT_TAG; 59 using syncable::UNIQUE_SERVER_TAG; 60 using syncable::SPECIFICS; 61 using syncable::SYNCER; 62 using syncable::WriteTransaction; 63 64 namespace browser_sync { 65 66 // Returns the number of unsynced entries. 67 // static 68 int SyncerUtil::GetUnsyncedEntries(syncable::BaseTransaction* trans, 69 std::vector<int64> *handles) { 70 trans->directory()->GetUnsyncedMetaHandles(trans, handles); 71 VLOG_IF(1, !handles->empty()) << "Have " << handles->size() 72 << " unsynced items."; 73 return handles->size(); 74 } 75 76 // static 77 void SyncerUtil::ChangeEntryIDAndUpdateChildren( 78 syncable::WriteTransaction* trans, 79 syncable::MutableEntry* entry, 80 const syncable::Id& new_id, 81 syncable::Directory::ChildHandles* children) { 82 syncable::Id old_id = entry->Get(ID); 83 if (!entry->Put(ID, new_id)) { 84 Entry old_entry(trans, GET_BY_ID, new_id); 85 CHECK(old_entry.good()); 86 LOG(FATAL) << "Attempt to change ID to " << new_id 87 << " conflicts with existing entry.\n\n" 88 << *entry << "\n\n" << old_entry; 89 } 90 if (entry->Get(IS_DIR)) { 91 // Get all child entries of the old id. 92 trans->directory()->GetChildHandles(trans, old_id, children); 93 Directory::ChildHandles::iterator i = children->begin(); 94 while (i != children->end()) { 95 MutableEntry child_entry(trans, GET_BY_HANDLE, *i++); 96 CHECK(child_entry.good()); 97 // Use the unchecked setter here to avoid touching the child's NEXT_ID 98 // and PREV_ID fields (which Put(PARENT_ID) would normally do to 99 // maintain linked-list invariants). In this case, NEXT_ID and PREV_ID 100 // among the children will be valid after the loop, since we update all 101 // the children at once. 102 child_entry.PutParentIdPropertyOnly(new_id); 103 } 104 } 105 // Update Id references on the previous and next nodes in the sibling 106 // order. Do this by reinserting into the linked list; the first 107 // step in PutPredecessor is to Unlink from the existing order, which 108 // will overwrite the stale Id value from the adjacent nodes. 109 if (entry->Get(PREV_ID) == entry->Get(NEXT_ID) && 110 entry->Get(PREV_ID) == old_id) { 111 // We just need a shallow update to |entry|'s fields since it is already 112 // self looped. 113 entry->Put(NEXT_ID, new_id); 114 entry->Put(PREV_ID, new_id); 115 } else { 116 entry->PutPredecessor(entry->Get(PREV_ID)); 117 } 118 } 119 120 // static 121 void SyncerUtil::ChangeEntryIDAndUpdateChildren( 122 syncable::WriteTransaction* trans, 123 syncable::MutableEntry* entry, 124 const syncable::Id& new_id) { 125 syncable::Directory::ChildHandles children; 126 ChangeEntryIDAndUpdateChildren(trans, entry, new_id, &children); 127 } 128 129 // static 130 syncable::Id SyncerUtil::FindLocalIdToUpdate( 131 syncable::BaseTransaction* trans, 132 const SyncEntity& update) { 133 // Expected entry points of this function: 134 // SyncEntity has NOT been applied to SERVER fields. 135 // SyncEntity has NOT been applied to LOCAL fields. 136 // DB has not yet been modified, no entries created for this update. 137 138 const std::string& client_id = trans->directory()->cache_guid(); 139 140 if (update.has_client_defined_unique_tag() && 141 !update.client_defined_unique_tag().empty()) { 142 // When a server sends down a client tag, the following cases can occur: 143 // 1) Client has entry for tag already, ID is server style, matches 144 // 2) Client has entry for tag already, ID is server, doesn't match. 145 // 3) Client has entry for tag already, ID is local, (never matches) 146 // 4) Client has no entry for tag 147 148 // Case 1, we don't have to do anything since the update will 149 // work just fine. Update will end up in the proper entry, via ID lookup. 150 // Case 2 - Happens very rarely due to lax enforcement of client tags 151 // on the server, if two clients commit the same tag at the same time. 152 // When this happens, we pick the lexically-least ID and ignore all other 153 // items. 154 // Case 3 - We need to replace the local ID with the server ID so that 155 // this update gets targeted at the correct local entry; we expect conflict 156 // resolution to occur. 157 // Case 4 - Perfect. Same as case 1. 158 159 syncable::Entry local_entry(trans, syncable::GET_BY_CLIENT_TAG, 160 update.client_defined_unique_tag()); 161 162 // The SyncAPI equivalent of this function will return !good if IS_DEL. 163 // The syncable version will return good even if IS_DEL. 164 // TODO(chron): Unit test the case with IS_DEL and make sure. 165 if (local_entry.good()) { 166 if (local_entry.Get(ID).ServerKnows()) { 167 if (local_entry.Get(ID) != update.id()) { 168 // Case 2. 169 LOG(WARNING) << "Duplicated client tag."; 170 if (local_entry.Get(ID) < update.id()) { 171 // Signal an error; drop this update on the floor. Note that 172 // we don't server delete the item, because we don't allow it to 173 // exist locally at all. So the item will remain orphaned on 174 // the server, and we won't pay attention to it. 175 return syncable::kNullId; 176 } 177 } 178 // Target this change to the existing local entry; later, 179 // we'll change the ID of the local entry to update.id() 180 // if needed. 181 return local_entry.Get(ID); 182 } else { 183 // Case 3: We have a local entry with the same client tag. 184 // We should change the ID of the local entry to the server entry. 185 // This will result in an server ID with base version == 0, but that's 186 // a legal state for an item with a client tag. By changing the ID, 187 // update will now be applied to local_entry. 188 DCHECK(0 == local_entry.Get(BASE_VERSION) || 189 CHANGES_VERSION == local_entry.Get(BASE_VERSION)); 190 return local_entry.Get(ID); 191 } 192 } 193 } else if (update.has_originator_cache_guid() && 194 update.originator_cache_guid() == client_id) { 195 // If a commit succeeds, but the response does not come back fast enough 196 // then the syncer might assume that it was never committed. 197 // The server will track the client that sent up the original commit and 198 // return this in a get updates response. When this matches a local 199 // uncommitted item, we must mutate our local item and version to pick up 200 // the committed version of the same item whose commit response was lost. 201 // There is however still a race condition if the server has not 202 // completed the commit by the time the syncer tries to get updates 203 // again. To mitigate this, we need to have the server time out in 204 // a reasonable span, our commit batches have to be small enough 205 // to process within our HTTP response "assumed alive" time. 206 207 // We need to check if we have an entry that didn't get its server 208 // id updated correctly. The server sends down a client ID 209 // and a local (negative) id. If we have a entry by that 210 // description, we should update the ID and version to the 211 // server side ones to avoid multiple copies of the same thing. 212 213 syncable::Id client_item_id = syncable::Id::CreateFromClientString( 214 update.originator_client_item_id()); 215 DCHECK(!client_item_id.ServerKnows()); 216 syncable::Entry local_entry(trans, GET_BY_ID, client_item_id); 217 218 // If it exists, then our local client lost a commit response. Use 219 // the local entry. 220 if (local_entry.good() && !local_entry.Get(IS_DEL)) { 221 int64 old_version = local_entry.Get(BASE_VERSION); 222 int64 new_version = update.version(); 223 DCHECK_LE(old_version, 0); 224 DCHECK_GT(new_version, 0); 225 // Otherwise setting the base version could cause a consistency failure. 226 // An entry should never be version 0 and SYNCED. 227 DCHECK(local_entry.Get(IS_UNSYNCED)); 228 229 // Just a quick sanity check. 230 DCHECK(!local_entry.Get(ID).ServerKnows()); 231 232 VLOG(1) << "Reuniting lost commit response IDs. server id: " 233 << update.id() << " local id: " << local_entry.Get(ID) 234 << " new version: " << new_version; 235 236 return local_entry.Get(ID); 237 } 238 } 239 // Fallback: target an entry having the server ID, creating one if needed. 240 return update.id(); 241 } 242 243 // static 244 UpdateAttemptResponse SyncerUtil::AttemptToUpdateEntry( 245 syncable::WriteTransaction* const trans, 246 syncable::MutableEntry* const entry, 247 ConflictResolver* resolver, 248 Cryptographer* cryptographer) { 249 250 CHECK(entry->good()); 251 if (!entry->Get(IS_UNAPPLIED_UPDATE)) 252 return SUCCESS; // No work to do. 253 syncable::Id id = entry->Get(ID); 254 255 if (entry->Get(IS_UNSYNCED)) { 256 VLOG(1) << "Skipping update, returning conflict for: " << id 257 << " ; it's unsynced."; 258 return CONFLICT; 259 } 260 if (!entry->Get(SERVER_IS_DEL)) { 261 syncable::Id new_parent = entry->Get(SERVER_PARENT_ID); 262 Entry parent(trans, GET_BY_ID, new_parent); 263 // A note on non-directory parents: 264 // We catch most unfixable tree invariant errors at update receipt time, 265 // however we deal with this case here because we may receive the child 266 // first then the illegal parent. Instead of dealing with it twice in 267 // different ways we deal with it once here to reduce the amount of code and 268 // potential errors. 269 if (!parent.good() || parent.Get(IS_DEL) || !parent.Get(IS_DIR)) { 270 return CONFLICT; 271 } 272 if (entry->Get(PARENT_ID) != new_parent) { 273 if (!entry->Get(IS_DEL) && !IsLegalNewParent(trans, id, new_parent)) { 274 VLOG(1) << "Not updating item " << id 275 << ", illegal new parent (would cause loop)."; 276 return CONFLICT; 277 } 278 } 279 } else if (entry->Get(IS_DIR)) { 280 Directory::ChildHandles handles; 281 trans->directory()->GetChildHandles(trans, id, &handles); 282 if (!handles.empty()) { 283 // If we have still-existing children, then we need to deal with 284 // them before we can process this change. 285 VLOG(1) << "Not deleting directory; it's not empty " << *entry; 286 return CONFLICT; 287 } 288 } 289 290 // We intercept updates to the Nigori node, update the Cryptographer and 291 // encrypt any unsynced changes here because there is no Nigori 292 // ChangeProcessor. 293 const sync_pb::EntitySpecifics& specifics = entry->Get(SERVER_SPECIFICS); 294 if (specifics.HasExtension(sync_pb::nigori)) { 295 const sync_pb::NigoriSpecifics& nigori = 296 specifics.GetExtension(sync_pb::nigori); 297 if (!nigori.encrypted().blob().empty()) { 298 if (cryptographer->CanDecrypt(nigori.encrypted())) { 299 cryptographer->SetKeys(nigori.encrypted()); 300 } else { 301 cryptographer->SetPendingKeys(nigori.encrypted()); 302 } 303 } 304 305 // Make sure any unsynced changes are properly encrypted as necessary. 306 syncable::ModelTypeSet encrypted_types = 307 syncable::GetEncryptedDataTypesFromNigori(nigori); 308 if (!VerifyUnsyncedChangesAreEncrypted(trans, encrypted_types) && 309 (!cryptographer->is_ready() || 310 !syncable::ProcessUnsyncedChangesForEncryption(trans, encrypted_types, 311 cryptographer))) { 312 // We were unable to encrypt the changes, possibly due to a missing 313 // passphrase. We return conflict, even though the conflict is with the 314 // unsynced change and not the nigori node. We ensure foward progress 315 // because the cryptographer already has the pending keys set, so once 316 // the new passphrase is entered we should be able to encrypt properly. 317 // And, because this update will not be applied yet, next time around 318 // we will properly encrypt all appropriate unsynced data. 319 VLOG(1) << "Marking nigori node update as conflicting due to being unable" 320 << " to encrypt all necessary unsynced changes."; 321 return CONFLICT; 322 } 323 324 // Note that we don't bother to encrypt any synced data that now requires 325 // encryption. The machine that turned on encryption should encrypt 326 // everything itself. It's possible it could get interrupted during this 327 // process, but we currently reencrypt everything at startup as well, 328 // so as soon as a client is restarted with this datatype encrypted, all the 329 // data should be updated as necessary. 330 } 331 332 // Only apply updates that we can decrypt. Updates that can't be decrypted yet 333 // will stay in conflict until the user provides a passphrase that lets the 334 // Cryptographer decrypt them. 335 if (!entry->Get(SERVER_IS_DIR)) { 336 if (specifics.has_encrypted() && 337 !cryptographer->CanDecrypt(specifics.encrypted())) { 338 // We can't decrypt this node yet. 339 return CONFLICT; 340 } else if (specifics.HasExtension(sync_pb::password)) { 341 // Passwords use their own legacy encryption scheme. 342 const sync_pb::PasswordSpecifics& password = 343 specifics.GetExtension(sync_pb::password); 344 if (!cryptographer->CanDecrypt(password.encrypted())) { 345 return CONFLICT; 346 } 347 } 348 } 349 350 SyncerUtil::UpdateLocalDataFromServerData(trans, entry); 351 352 return SUCCESS; 353 } 354 355 namespace { 356 // Helper to synthesize a new-style sync_pb::EntitySpecifics for use locally, 357 // when the server speaks only the old sync_pb::SyncEntity_BookmarkData-based 358 // protocol. 359 void UpdateBookmarkSpecifics(const std::string& singleton_tag, 360 const std::string& url, 361 const std::string& favicon_bytes, 362 MutableEntry* local_entry) { 363 // In the new-style protocol, the server no longer sends bookmark info for 364 // the "google_chrome" folder. Mimic that here. 365 if (singleton_tag == "google_chrome") 366 return; 367 sync_pb::EntitySpecifics pb; 368 sync_pb::BookmarkSpecifics* bookmark = pb.MutableExtension(sync_pb::bookmark); 369 if (!url.empty()) 370 bookmark->set_url(url); 371 if (!favicon_bytes.empty()) 372 bookmark->set_favicon(favicon_bytes); 373 local_entry->Put(SERVER_SPECIFICS, pb); 374 } 375 376 } // namespace 377 378 // Pass in name and checksum because of UTF8 conversion. 379 // static 380 void SyncerUtil::UpdateServerFieldsFromUpdate( 381 MutableEntry* target, 382 const SyncEntity& update, 383 const std::string& name) { 384 if (update.deleted()) { 385 if (target->Get(SERVER_IS_DEL)) { 386 // If we already think the item is server-deleted, we're done. 387 // Skipping these cases prevents our committed deletions from coming 388 // back and overriding subsequent undeletions. For non-deleted items, 389 // the version number check has a similar effect. 390 return; 391 } 392 // The server returns very lightweight replies for deletions, so we don't 393 // clobber a bunch of fields on delete. 394 target->Put(SERVER_IS_DEL, true); 395 if (!target->Get(UNIQUE_CLIENT_TAG).empty()) { 396 // Items identified by the client unique tag are undeletable; when 397 // they're deleted, they go back to version 0. 398 target->Put(SERVER_VERSION, 0); 399 } else { 400 // Otherwise, fake a server version by bumping the local number. 401 target->Put(SERVER_VERSION, 402 std::max(target->Get(SERVER_VERSION), 403 target->Get(BASE_VERSION)) + 1); 404 } 405 target->Put(IS_UNAPPLIED_UPDATE, true); 406 return; 407 } 408 409 DCHECK(target->Get(ID) == update.id()) 410 << "ID Changing not supported here"; 411 target->Put(SERVER_PARENT_ID, update.parent_id()); 412 target->Put(SERVER_NON_UNIQUE_NAME, name); 413 target->Put(SERVER_VERSION, update.version()); 414 target->Put(SERVER_CTIME, 415 ServerTimeToClientTime(update.ctime())); 416 target->Put(SERVER_MTIME, 417 ServerTimeToClientTime(update.mtime())); 418 target->Put(SERVER_IS_DIR, update.IsFolder()); 419 if (update.has_server_defined_unique_tag()) { 420 const std::string& tag = update.server_defined_unique_tag(); 421 target->Put(UNIQUE_SERVER_TAG, tag); 422 } 423 if (update.has_client_defined_unique_tag()) { 424 const std::string& tag = update.client_defined_unique_tag(); 425 target->Put(UNIQUE_CLIENT_TAG, tag); 426 } 427 // Store the datatype-specific part as a protobuf. 428 if (update.has_specifics()) { 429 DCHECK(update.GetModelType() != syncable::UNSPECIFIED) 430 << "Storing unrecognized datatype in sync database."; 431 target->Put(SERVER_SPECIFICS, update.specifics()); 432 } else if (update.has_bookmarkdata()) { 433 // Legacy protocol response for bookmark data. 434 const SyncEntity::BookmarkData& bookmark = update.bookmarkdata(); 435 UpdateBookmarkSpecifics(update.server_defined_unique_tag(), 436 bookmark.bookmark_url(), 437 bookmark.bookmark_favicon(), 438 target); 439 } 440 if (update.has_position_in_parent()) 441 target->Put(SERVER_POSITION_IN_PARENT, update.position_in_parent()); 442 443 target->Put(SERVER_IS_DEL, update.deleted()); 444 // We only mark the entry as unapplied if its version is greater than the 445 // local data. If we're processing the update that corresponds to one of our 446 // commit we don't apply it as time differences may occur. 447 if (update.version() > target->Get(BASE_VERSION)) { 448 target->Put(IS_UNAPPLIED_UPDATE, true); 449 } 450 } 451 452 // Creates a new Entry iff no Entry exists with the given id. 453 // static 454 void SyncerUtil::CreateNewEntry(syncable::WriteTransaction *trans, 455 const syncable::Id& id) { 456 syncable::MutableEntry entry(trans, syncable::GET_BY_ID, id); 457 if (!entry.good()) { 458 syncable::MutableEntry new_entry(trans, syncable::CREATE_NEW_UPDATE_ITEM, 459 id); 460 } 461 } 462 463 // static 464 bool SyncerUtil::ServerAndLocalOrdersMatch(syncable::Entry* entry) { 465 // Find the closest up-to-date local sibling by walking the linked list. 466 syncable::Id local_up_to_date_predecessor = entry->Get(PREV_ID); 467 while (!local_up_to_date_predecessor.IsRoot()) { 468 Entry local_prev(entry->trans(), GET_BY_ID, local_up_to_date_predecessor); 469 if (!local_prev.good() || local_prev.Get(IS_DEL)) 470 return false; 471 if (!local_prev.Get(IS_UNAPPLIED_UPDATE) && !local_prev.Get(IS_UNSYNCED)) 472 break; 473 local_up_to_date_predecessor = local_prev.Get(PREV_ID); 474 } 475 476 // Now find the closest up-to-date sibling in the server order. 477 syncable::Id server_up_to_date_predecessor = 478 entry->ComputePrevIdFromServerPosition(entry->Get(SERVER_PARENT_ID)); 479 return server_up_to_date_predecessor == local_up_to_date_predecessor; 480 } 481 482 // static 483 bool SyncerUtil::ServerAndLocalEntriesMatch(syncable::Entry* entry) { 484 if (!ClientAndServerTimeMatch( 485 entry->Get(CTIME), ClientTimeToServerTime(entry->Get(SERVER_CTIME)))) { 486 LOG(WARNING) << "Client and server time mismatch"; 487 return false; 488 } 489 if (entry->Get(IS_DEL) && entry->Get(SERVER_IS_DEL)) 490 return true; 491 // Name should exactly match here. 492 if (!(entry->Get(NON_UNIQUE_NAME) == entry->Get(SERVER_NON_UNIQUE_NAME))) { 493 LOG(WARNING) << "Unsanitized name mismatch"; 494 return false; 495 } 496 497 if (entry->Get(PARENT_ID) != entry->Get(SERVER_PARENT_ID) || 498 entry->Get(IS_DIR) != entry->Get(SERVER_IS_DIR) || 499 entry->Get(IS_DEL) != entry->Get(SERVER_IS_DEL)) { 500 LOG(WARNING) << "Metabit mismatch"; 501 return false; 502 } 503 504 if (!ServerAndLocalOrdersMatch(entry)) { 505 LOG(WARNING) << "Server/local ordering mismatch"; 506 return false; 507 } 508 509 // TODO(ncarter): This is unfortunately heavyweight. Can we do better? 510 if (entry->Get(SPECIFICS).SerializeAsString() != 511 entry->Get(SERVER_SPECIFICS).SerializeAsString()) { 512 LOG(WARNING) << "Specifics mismatch"; 513 return false; 514 } 515 if (entry->Get(IS_DIR)) 516 return true; 517 // For historical reasons, a folder's MTIME changes when its contents change. 518 // TODO(ncarter): Remove the special casing of MTIME. 519 bool time_match = ClientAndServerTimeMatch(entry->Get(MTIME), 520 ClientTimeToServerTime(entry->Get(SERVER_MTIME))); 521 if (!time_match) { 522 LOG(WARNING) << "Time mismatch"; 523 } 524 return time_match; 525 } 526 527 // static 528 void SyncerUtil::SplitServerInformationIntoNewEntry( 529 syncable::WriteTransaction* trans, 530 syncable::MutableEntry* entry) { 531 syncable::Id id = entry->Get(ID); 532 ChangeEntryIDAndUpdateChildren(trans, entry, trans->directory()->NextId()); 533 entry->Put(BASE_VERSION, 0); 534 535 MutableEntry new_entry(trans, CREATE_NEW_UPDATE_ITEM, id); 536 CopyServerFields(entry, &new_entry); 537 ClearServerData(entry); 538 539 VLOG(1) << "Splitting server information, local entry: " << *entry 540 << " server entry: " << new_entry; 541 } 542 543 // This function is called on an entry when we can update the user-facing data 544 // from the server data. 545 // static 546 void SyncerUtil::UpdateLocalDataFromServerData( 547 syncable::WriteTransaction* trans, 548 syncable::MutableEntry* entry) { 549 DCHECK(!entry->Get(IS_UNSYNCED)); 550 DCHECK(entry->Get(IS_UNAPPLIED_UPDATE)); 551 552 VLOG(2) << "Updating entry : " << *entry; 553 // Start by setting the properties that determine the model_type. 554 entry->Put(SPECIFICS, entry->Get(SERVER_SPECIFICS)); 555 entry->Put(IS_DIR, entry->Get(SERVER_IS_DIR)); 556 // This strange dance around the IS_DEL flag avoids problems when setting 557 // the name. 558 // TODO(chron): Is this still an issue? Unit test this codepath. 559 if (entry->Get(SERVER_IS_DEL)) { 560 entry->Put(IS_DEL, true); 561 } else { 562 entry->Put(NON_UNIQUE_NAME, entry->Get(SERVER_NON_UNIQUE_NAME)); 563 entry->Put(PARENT_ID, entry->Get(SERVER_PARENT_ID)); 564 CHECK(entry->Put(IS_DEL, false)); 565 Id new_predecessor = 566 entry->ComputePrevIdFromServerPosition(entry->Get(SERVER_PARENT_ID)); 567 CHECK(entry->PutPredecessor(new_predecessor)) 568 << " Illegal predecessor after converting from server position."; 569 } 570 571 entry->Put(CTIME, entry->Get(SERVER_CTIME)); 572 entry->Put(MTIME, entry->Get(SERVER_MTIME)); 573 entry->Put(BASE_VERSION, entry->Get(SERVER_VERSION)); 574 entry->Put(IS_DEL, entry->Get(SERVER_IS_DEL)); 575 entry->Put(IS_UNAPPLIED_UPDATE, false); 576 } 577 578 // static 579 VerifyCommitResult SyncerUtil::ValidateCommitEntry( 580 syncable::Entry* entry) { 581 syncable::Id id = entry->Get(ID); 582 if (id == entry->Get(PARENT_ID)) { 583 CHECK(id.IsRoot()) << "Non-root item is self parenting." << *entry; 584 // If the root becomes unsynced it can cause us problems. 585 LOG(ERROR) << "Root item became unsynced " << *entry; 586 return VERIFY_UNSYNCABLE; 587 } 588 if (entry->IsRoot()) { 589 LOG(ERROR) << "Permanent item became unsynced " << *entry; 590 return VERIFY_UNSYNCABLE; 591 } 592 if (entry->Get(IS_DEL) && !entry->Get(ID).ServerKnows()) { 593 // Drop deleted uncommitted entries. 594 return VERIFY_UNSYNCABLE; 595 } 596 return VERIFY_OK; 597 } 598 599 // static 600 bool SyncerUtil::AddItemThenPredecessors( 601 syncable::BaseTransaction* trans, 602 syncable::Entry* item, 603 syncable::IndexedBitField inclusion_filter, 604 syncable::MetahandleSet* inserted_items, 605 std::vector<syncable::Id>* commit_ids) { 606 607 if (!inserted_items->insert(item->Get(META_HANDLE)).second) 608 return false; 609 commit_ids->push_back(item->Get(ID)); 610 if (item->Get(IS_DEL)) 611 return true; // Deleted items have no predecessors. 612 613 Id prev_id = item->Get(PREV_ID); 614 while (!prev_id.IsRoot()) { 615 Entry prev(trans, GET_BY_ID, prev_id); 616 CHECK(prev.good()) << "Bad id when walking predecessors."; 617 if (!prev.Get(inclusion_filter)) 618 break; 619 if (!inserted_items->insert(prev.Get(META_HANDLE)).second) 620 break; 621 commit_ids->push_back(prev_id); 622 prev_id = prev.Get(PREV_ID); 623 } 624 return true; 625 } 626 627 // static 628 void SyncerUtil::AddPredecessorsThenItem( 629 syncable::BaseTransaction* trans, 630 syncable::Entry* item, 631 syncable::IndexedBitField inclusion_filter, 632 syncable::MetahandleSet* inserted_items, 633 std::vector<syncable::Id>* commit_ids) { 634 size_t initial_size = commit_ids->size(); 635 if (!AddItemThenPredecessors(trans, item, inclusion_filter, inserted_items, 636 commit_ids)) 637 return; 638 // Reverse what we added to get the correct order. 639 std::reverse(commit_ids->begin() + initial_size, commit_ids->end()); 640 } 641 642 // static 643 void SyncerUtil::MarkDeletedChildrenSynced( 644 const syncable::ScopedDirLookup &dir, 645 std::set<syncable::Id>* deleted_folders) { 646 // There's two options here. 647 // 1. Scan deleted unsynced entries looking up their pre-delete tree for any 648 // of the deleted folders. 649 // 2. Take each folder and do a tree walk of all entries underneath it. 650 // #2 has a lower big O cost, but writing code to limit the time spent inside 651 // the transaction during each step is simpler with 1. Changing this decision 652 // may be sensible if this code shows up in profiling. 653 if (deleted_folders->empty()) 654 return; 655 Directory::UnsyncedMetaHandles handles; 656 { 657 ReadTransaction trans(dir, __FILE__, __LINE__); 658 dir->GetUnsyncedMetaHandles(&trans, &handles); 659 } 660 if (handles.empty()) 661 return; 662 Directory::UnsyncedMetaHandles::iterator it; 663 for (it = handles.begin() ; it != handles.end() ; ++it) { 664 // Single transaction / entry we deal with. 665 WriteTransaction trans(dir, SYNCER, __FILE__, __LINE__); 666 MutableEntry entry(&trans, GET_BY_HANDLE, *it); 667 if (!entry.Get(IS_UNSYNCED) || !entry.Get(IS_DEL)) 668 continue; 669 syncable::Id id = entry.Get(PARENT_ID); 670 while (id != trans.root_id()) { 671 if (deleted_folders->find(id) != deleted_folders->end()) { 672 // We've synced the deletion of this deleted entries parent. 673 entry.Put(IS_UNSYNCED, false); 674 break; 675 } 676 Entry parent(&trans, GET_BY_ID, id); 677 if (!parent.good() || !parent.Get(IS_DEL)) 678 break; 679 id = parent.Get(PARENT_ID); 680 } 681 } 682 } 683 684 // static 685 VerifyResult SyncerUtil::VerifyNewEntry( 686 const SyncEntity& update, 687 syncable::Entry* target, 688 const bool deleted) { 689 if (target->good()) { 690 // Not a new update. 691 return VERIFY_UNDECIDED; 692 } 693 if (deleted) { 694 // Deletion of an item we've never seen can be ignored. 695 return VERIFY_SKIP; 696 } 697 698 return VERIFY_SUCCESS; 699 } 700 701 // Assumes we have an existing entry; check here for updates that break 702 // consistency rules. 703 // static 704 VerifyResult SyncerUtil::VerifyUpdateConsistency( 705 syncable::WriteTransaction* trans, 706 const SyncEntity& update, 707 syncable::MutableEntry* target, 708 const bool deleted, 709 const bool is_directory, 710 syncable::ModelType model_type) { 711 712 CHECK(target->good()); 713 714 // If the update is a delete, we don't really need to worry at this stage. 715 if (deleted) 716 return VERIFY_SUCCESS; 717 718 if (model_type == syncable::UNSPECIFIED) { 719 // This update is to an item of a datatype we don't recognize. The server 720 // shouldn't have sent it to us. Throw it on the ground. 721 return VERIFY_SKIP; 722 } 723 724 if (target->Get(SERVER_VERSION) > 0) { 725 // Then we've had an update for this entry before. 726 if (is_directory != target->Get(SERVER_IS_DIR) || 727 model_type != target->GetServerModelType()) { 728 if (target->Get(IS_DEL)) { // If we've deleted the item, we don't care. 729 return VERIFY_SKIP; 730 } else { 731 LOG(ERROR) << "Server update doesn't agree with previous updates. "; 732 LOG(ERROR) << " Entry: " << *target; 733 LOG(ERROR) << " Update: " 734 << SyncerProtoUtil::SyncEntityDebugString(update); 735 return VERIFY_FAIL; 736 } 737 } 738 739 if (!deleted && (target->Get(ID) == update.id()) && 740 (target->Get(SERVER_IS_DEL) || 741 (!target->Get(IS_UNSYNCED) && target->Get(IS_DEL) && 742 target->Get(BASE_VERSION) > 0))) { 743 // An undelete. The latter case in the above condition is for 744 // when the server does not give us an update following the 745 // commit of a delete, before undeleting. 746 // Undeletion is common for items that reuse the client-unique tag. 747 VerifyResult result = 748 SyncerUtil::VerifyUndelete(trans, update, target); 749 if (VERIFY_UNDECIDED != result) 750 return result; 751 } 752 } 753 if (target->Get(BASE_VERSION) > 0) { 754 // We've committed this update in the past. 755 if (is_directory != target->Get(IS_DIR) || 756 model_type != target->GetModelType()) { 757 LOG(ERROR) << "Server update doesn't agree with committed item. "; 758 LOG(ERROR) << " Entry: " << *target; 759 LOG(ERROR) << " Update: " 760 << SyncerProtoUtil::SyncEntityDebugString(update); 761 return VERIFY_FAIL; 762 } 763 if (target->Get(ID) == update.id()) { 764 // Checks that are only valid if we're not changing the ID. 765 if (target->Get(BASE_VERSION) == update.version() && 766 !target->Get(IS_UNSYNCED) && 767 !SyncerProtoUtil::Compare(*target, update)) { 768 // TODO(sync): This constraint needs to be relaxed. For now it's OK to 769 // fail the verification and deal with it when we ApplyUpdates. 770 LOG(ERROR) << "Server update doesn't match local data with same " 771 "version. A bug should be filed. Entry: " << *target << 772 "Update: " << SyncerProtoUtil::SyncEntityDebugString(update); 773 return VERIFY_FAIL; 774 } 775 if (target->Get(SERVER_VERSION) > update.version()) { 776 LOG(WARNING) << "We've already seen a more recent version."; 777 LOG(WARNING) << " Entry: " << *target; 778 LOG(WARNING) << " Update: " 779 << SyncerProtoUtil::SyncEntityDebugString(update); 780 return VERIFY_SKIP; 781 } 782 } 783 } 784 return VERIFY_SUCCESS; 785 } 786 787 // Assumes we have an existing entry; verify an update that seems to be 788 // expressing an 'undelete' 789 // static 790 VerifyResult SyncerUtil::VerifyUndelete(syncable::WriteTransaction* trans, 791 const SyncEntity& update, 792 syncable::MutableEntry* target) { 793 // TODO(nick): We hit this path for items deleted items that the server 794 // tells us to re-create; only deleted items with positive base versions 795 // will hit this path. However, it's not clear how such an undeletion 796 // would actually succeed on the server; in the protocol, a base 797 // version of 0 is required to undelete an object. This codepath 798 // should be deprecated in favor of client-tag style undeletion 799 // (where items go to version 0 when they're deleted), or else 800 // removed entirely (if this type of undeletion is indeed impossible). 801 CHECK(target->good()); 802 VLOG(1) << "Server update is attempting undelete. " << *target 803 << "Update:" << SyncerProtoUtil::SyncEntityDebugString(update); 804 // Move the old one aside and start over. It's too tricky to get the old one 805 // back into a state that would pass CheckTreeInvariants(). 806 if (target->Get(IS_DEL)) { 807 DCHECK(target->Get(UNIQUE_CLIENT_TAG).empty()) 808 << "Doing move-aside undeletion on client-tagged item."; 809 target->Put(ID, trans->directory()->NextId()); 810 target->Put(UNIQUE_CLIENT_TAG, ""); 811 target->Put(BASE_VERSION, CHANGES_VERSION); 812 target->Put(SERVER_VERSION, 0); 813 return VERIFY_SUCCESS; 814 } 815 if (update.version() < target->Get(SERVER_VERSION)) { 816 LOG(WARNING) << "Update older than current server version for " 817 << *target << " Update:" 818 << SyncerProtoUtil::SyncEntityDebugString(update); 819 return VERIFY_SUCCESS; // Expected in new sync protocol. 820 } 821 return VERIFY_UNDECIDED; 822 } 823 824 } // namespace browser_sync 825