1 // Copyright 2012 The Chromium Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #include "sync/engine/syncer_util.h" 6 7 #include <algorithm> 8 #include <set> 9 #include <string> 10 #include <vector> 11 12 #include "base/base64.h" 13 #include "base/location.h" 14 #include "base/metrics/histogram.h" 15 #include "base/strings/string_number_conversions.h" 16 #include "sync/engine/conflict_resolver.h" 17 #include "sync/engine/syncer_proto_util.h" 18 #include "sync/engine/syncer_types.h" 19 #include "sync/internal_api/public/base/model_type.h" 20 #include "sync/internal_api/public/base/unique_position.h" 21 #include "sync/protocol/bookmark_specifics.pb.h" 22 #include "sync/protocol/password_specifics.pb.h" 23 #include "sync/protocol/sync.pb.h" 24 #include "sync/syncable/directory.h" 25 #include "sync/syncable/entry.h" 26 #include "sync/syncable/model_neutral_mutable_entry.h" 27 #include "sync/syncable/mutable_entry.h" 28 #include "sync/syncable/syncable_changes_version.h" 29 #include "sync/syncable/syncable_model_neutral_write_transaction.h" 30 #include "sync/syncable/syncable_proto_util.h" 31 #include "sync/syncable/syncable_read_transaction.h" 32 #include "sync/syncable/syncable_util.h" 33 #include "sync/syncable/syncable_write_transaction.h" 34 #include "sync/util/cryptographer.h" 35 #include "sync/util/time.h" 36 37 namespace syncer { 38 39 using syncable::BASE_SERVER_SPECIFICS; 40 using syncable::BASE_VERSION; 41 using syncable::CHANGES_VERSION; 42 using syncable::CREATE_NEW_UPDATE_ITEM; 43 using syncable::CTIME; 44 using syncable::Directory; 45 using syncable::Entry; 46 using syncable::GET_BY_HANDLE; 47 using syncable::GET_BY_ID; 48 using syncable::ID; 49 using syncable::IS_DEL; 50 using syncable::IS_DIR; 51 using syncable::IS_UNAPPLIED_UPDATE; 52 using syncable::IS_UNSYNCED; 53 using syncable::Id; 54 using syncable::META_HANDLE; 55 using syncable::MTIME; 56 using syncable::MutableEntry; 57 using syncable::NON_UNIQUE_NAME; 58 using syncable::PARENT_ID; 59 using syncable::SERVER_CTIME; 60 using syncable::SERVER_IS_DEL; 61 using syncable::SERVER_IS_DIR; 62 using syncable::SERVER_MTIME; 63 using syncable::SERVER_NON_UNIQUE_NAME; 64 using syncable::SERVER_PARENT_ID; 65 using syncable::SERVER_SPECIFICS; 66 using syncable::SERVER_UNIQUE_POSITION; 67 using syncable::SERVER_VERSION; 68 using syncable::SPECIFICS; 69 using syncable::SYNCER; 70 using syncable::UNIQUE_BOOKMARK_TAG; 71 using syncable::UNIQUE_CLIENT_TAG; 72 using syncable::UNIQUE_POSITION; 73 using syncable::UNIQUE_SERVER_TAG; 74 using syncable::WriteTransaction; 75 76 syncable::Id FindLocalIdToUpdate( 77 syncable::BaseTransaction* trans, 78 const sync_pb::SyncEntity& update) { 79 // Expected entry points of this function: 80 // SyncEntity has NOT been applied to SERVER fields. 81 // SyncEntity has NOT been applied to LOCAL fields. 82 // DB has not yet been modified, no entries created for this update. 83 84 const std::string& client_id = trans->directory()->cache_guid(); 85 const syncable::Id& update_id = SyncableIdFromProto(update.id_string()); 86 87 if (update.has_client_defined_unique_tag() && 88 !update.client_defined_unique_tag().empty()) { 89 // When a server sends down a client tag, the following cases can occur: 90 // 1) Client has entry for tag already, ID is server style, matches 91 // 2) Client has entry for tag already, ID is server, doesn't match. 92 // 3) Client has entry for tag already, ID is local, (never matches) 93 // 4) Client has no entry for tag 94 95 // Case 1, we don't have to do anything since the update will 96 // work just fine. Update will end up in the proper entry, via ID lookup. 97 // Case 2 - Happens very rarely due to lax enforcement of client tags 98 // on the server, if two clients commit the same tag at the same time. 99 // When this happens, we pick the lexically-least ID and ignore all other 100 // items. 101 // Case 3 - We need to replace the local ID with the server ID so that 102 // this update gets targeted at the correct local entry; we expect conflict 103 // resolution to occur. 104 // Case 4 - Perfect. Same as case 1. 105 106 syncable::Entry local_entry(trans, syncable::GET_BY_CLIENT_TAG, 107 update.client_defined_unique_tag()); 108 109 // The SyncAPI equivalent of this function will return !good if IS_DEL. 110 // The syncable version will return good even if IS_DEL. 111 // TODO(chron): Unit test the case with IS_DEL and make sure. 112 if (local_entry.good()) { 113 if (local_entry.GetId().ServerKnows()) { 114 if (local_entry.GetId() != update_id) { 115 // Case 2. 116 LOG(WARNING) << "Duplicated client tag."; 117 if (local_entry.GetId() < update_id) { 118 // Signal an error; drop this update on the floor. Note that 119 // we don't server delete the item, because we don't allow it to 120 // exist locally at all. So the item will remain orphaned on 121 // the server, and we won't pay attention to it. 122 return syncable::GetNullId(); 123 } 124 } 125 // Target this change to the existing local entry; later, 126 // we'll change the ID of the local entry to update_id 127 // if needed. 128 return local_entry.GetId(); 129 } else { 130 // Case 3: We have a local entry with the same client tag. 131 // We should change the ID of the local entry to the server entry. 132 // This will result in an server ID with base version == 0, but that's 133 // a legal state for an item with a client tag. By changing the ID, 134 // update will now be applied to local_entry. 135 DCHECK(0 == local_entry.GetBaseVersion() || 136 CHANGES_VERSION == local_entry.GetBaseVersion()); 137 return local_entry.GetId(); 138 } 139 } 140 } else if (update.has_originator_cache_guid() && 141 update.originator_cache_guid() == client_id) { 142 // If a commit succeeds, but the response does not come back fast enough 143 // then the syncer might assume that it was never committed. 144 // The server will track the client that sent up the original commit and 145 // return this in a get updates response. When this matches a local 146 // uncommitted item, we must mutate our local item and version to pick up 147 // the committed version of the same item whose commit response was lost. 148 // There is however still a race condition if the server has not 149 // completed the commit by the time the syncer tries to get updates 150 // again. To mitigate this, we need to have the server time out in 151 // a reasonable span, our commit batches have to be small enough 152 // to process within our HTTP response "assumed alive" time. 153 154 // We need to check if we have an entry that didn't get its server 155 // id updated correctly. The server sends down a client ID 156 // and a local (negative) id. If we have a entry by that 157 // description, we should update the ID and version to the 158 // server side ones to avoid multiple copies of the same thing. 159 160 syncable::Id client_item_id = syncable::Id::CreateFromClientString( 161 update.originator_client_item_id()); 162 DCHECK(!client_item_id.ServerKnows()); 163 syncable::Entry local_entry(trans, GET_BY_ID, client_item_id); 164 165 // If it exists, then our local client lost a commit response. Use 166 // the local entry. 167 if (local_entry.good() && !local_entry.GetIsDel()) { 168 int64 old_version = local_entry.GetBaseVersion(); 169 int64 new_version = update.version(); 170 DCHECK_LE(old_version, 0); 171 DCHECK_GT(new_version, 0); 172 // Otherwise setting the base version could cause a consistency failure. 173 // An entry should never be version 0 and SYNCED. 174 DCHECK(local_entry.GetIsUnsynced()); 175 176 // Just a quick sanity check. 177 DCHECK(!local_entry.GetId().ServerKnows()); 178 179 DVLOG(1) << "Reuniting lost commit response IDs. server id: " 180 << update_id << " local id: " << local_entry.GetId() 181 << " new version: " << new_version; 182 183 return local_entry.GetId(); 184 } 185 } 186 // Fallback: target an entry having the server ID, creating one if needed. 187 return update_id; 188 } 189 190 UpdateAttemptResponse AttemptToUpdateEntry( 191 syncable::WriteTransaction* const trans, 192 syncable::MutableEntry* const entry, 193 Cryptographer* cryptographer) { 194 CHECK(entry->good()); 195 if (!entry->GetIsUnappliedUpdate()) 196 return SUCCESS; // No work to do. 197 syncable::Id id = entry->GetId(); 198 const sync_pb::EntitySpecifics& specifics = entry->GetServerSpecifics(); 199 200 // Only apply updates that we can decrypt. If we can't decrypt the update, it 201 // is likely because the passphrase has not arrived yet. Because the 202 // passphrase may not arrive within this GetUpdates, we can't just return 203 // conflict, else we try to perform normal conflict resolution prematurely or 204 // the syncer may get stuck. As such, we return CONFLICT_ENCRYPTION, which is 205 // treated as an unresolvable conflict. See the description in syncer_types.h. 206 // This prevents any unsynced changes from commiting and postpones conflict 207 // resolution until all data can be decrypted. 208 if (specifics.has_encrypted() && 209 !cryptographer->CanDecrypt(specifics.encrypted())) { 210 // We can't decrypt this node yet. 211 DVLOG(1) << "Received an undecryptable " 212 << ModelTypeToString(entry->GetServerModelType()) 213 << " update, returning conflict_encryption."; 214 return CONFLICT_ENCRYPTION; 215 } else if (specifics.has_password() && 216 entry->GetUniqueServerTag().empty()) { 217 // Passwords use their own legacy encryption scheme. 218 const sync_pb::PasswordSpecifics& password = specifics.password(); 219 if (!cryptographer->CanDecrypt(password.encrypted())) { 220 DVLOG(1) << "Received an undecryptable password update, returning " 221 << "conflict_encryption."; 222 return CONFLICT_ENCRYPTION; 223 } 224 } 225 226 if (!entry->GetServerIsDel()) { 227 syncable::Id new_parent = entry->GetServerParentId(); 228 Entry parent(trans, GET_BY_ID, new_parent); 229 // A note on non-directory parents: 230 // We catch most unfixable tree invariant errors at update receipt time, 231 // however we deal with this case here because we may receive the child 232 // first then the illegal parent. Instead of dealing with it twice in 233 // different ways we deal with it once here to reduce the amount of code and 234 // potential errors. 235 if (!parent.good() || parent.GetIsDel() || !parent.GetIsDir()) { 236 DVLOG(1) << "Entry has bad parent, returning conflict_hierarchy."; 237 return CONFLICT_HIERARCHY; 238 } 239 if (entry->GetParentId() != new_parent) { 240 if (!entry->GetIsDel() && !IsLegalNewParent(trans, id, new_parent)) { 241 DVLOG(1) << "Not updating item " << id 242 << ", illegal new parent (would cause loop)."; 243 return CONFLICT_HIERARCHY; 244 } 245 } 246 } else if (entry->GetIsDir()) { 247 Directory::Metahandles handles; 248 trans->directory()->GetChildHandlesById(trans, id, &handles); 249 if (!handles.empty()) { 250 // If we have still-existing children, then we need to deal with 251 // them before we can process this change. 252 DVLOG(1) << "Not deleting directory; it's not empty " << *entry; 253 return CONFLICT_HIERARCHY; 254 } 255 } 256 257 if (entry->GetIsUnsynced()) { 258 DVLOG(1) << "Skipping update, returning conflict for: " << id 259 << " ; it's unsynced."; 260 return CONFLICT_SIMPLE; 261 } 262 263 if (specifics.has_encrypted()) { 264 DVLOG(2) << "Received a decryptable " 265 << ModelTypeToString(entry->GetServerModelType()) 266 << " update, applying normally."; 267 } else { 268 DVLOG(2) << "Received an unencrypted " 269 << ModelTypeToString(entry->GetServerModelType()) 270 << " update, applying normally."; 271 } 272 273 UpdateLocalDataFromServerData(trans, entry); 274 275 return SUCCESS; 276 } 277 278 std::string GetUniqueBookmarkTagFromUpdate(const sync_pb::SyncEntity& update) { 279 if (!update.has_originator_cache_guid() || 280 !update.has_originator_client_item_id()) { 281 return std::string(); 282 } 283 284 return syncable::GenerateSyncableBookmarkHash( 285 update.originator_cache_guid(), update.originator_client_item_id()); 286 } 287 288 UniquePosition GetUpdatePosition(const sync_pb::SyncEntity& update, 289 const std::string& suffix) { 290 DCHECK(UniquePosition::IsValidSuffix(suffix)); 291 if (!(SyncerProtoUtil::ShouldMaintainPosition(update))) { 292 return UniquePosition::CreateInvalid(); 293 } else if (update.has_unique_position()) { 294 return UniquePosition::FromProto(update.unique_position()); 295 } else if (update.has_position_in_parent()) { 296 return UniquePosition::FromInt64(update.position_in_parent(), suffix); 297 } else { 298 return UniquePosition::CreateInvalid(); 299 } 300 } 301 302 namespace { 303 304 // Helper to synthesize a new-style sync_pb::EntitySpecifics for use locally, 305 // when the server speaks only the old sync_pb::SyncEntity_BookmarkData-based 306 // protocol. 307 void UpdateBookmarkSpecifics(const std::string& singleton_tag, 308 const std::string& url, 309 const std::string& favicon_bytes, 310 syncable::ModelNeutralMutableEntry* local_entry) { 311 // In the new-style protocol, the server no longer sends bookmark info for 312 // the "google_chrome" folder. Mimic that here. 313 if (singleton_tag == "google_chrome") 314 return; 315 sync_pb::EntitySpecifics pb; 316 sync_pb::BookmarkSpecifics* bookmark = pb.mutable_bookmark(); 317 if (!url.empty()) 318 bookmark->set_url(url); 319 if (!favicon_bytes.empty()) 320 bookmark->set_favicon(favicon_bytes); 321 local_entry->PutServerSpecifics(pb); 322 } 323 324 void UpdateBookmarkPositioning( 325 const sync_pb::SyncEntity& update, 326 syncable::ModelNeutralMutableEntry* local_entry) { 327 // Update our unique bookmark tag. In many cases this will be identical to 328 // the tag we already have. However, clients that have recently upgraded to 329 // versions that support unique positions will have incorrect tags. See the 330 // v86 migration logic in directory_backing_store.cc for more information. 331 // 332 // Both the old and new values are unique to this element. Applying this 333 // update will not risk the creation of conflicting unique tags. 334 std::string bookmark_tag = GetUniqueBookmarkTagFromUpdate(update); 335 if (UniquePosition::IsValidSuffix(bookmark_tag)) { 336 local_entry->PutUniqueBookmarkTag(bookmark_tag); 337 } 338 339 // Update our position. 340 UniquePosition update_pos = 341 GetUpdatePosition(update, local_entry->GetUniqueBookmarkTag()); 342 if (update_pos.IsValid()) { 343 local_entry->PutServerUniquePosition(update_pos); 344 } else { 345 // TODO(sync): This and other cases of unexpected input should be handled 346 // better. 347 NOTREACHED(); 348 } 349 } 350 351 } // namespace 352 353 void UpdateServerFieldsFromUpdate( 354 syncable::ModelNeutralMutableEntry* target, 355 const sync_pb::SyncEntity& update, 356 const std::string& name) { 357 if (update.deleted()) { 358 if (target->GetServerIsDel()) { 359 // If we already think the item is server-deleted, we're done. 360 // Skipping these cases prevents our committed deletions from coming 361 // back and overriding subsequent undeletions. For non-deleted items, 362 // the version number check has a similar effect. 363 return; 364 } 365 // The server returns very lightweight replies for deletions, so we don't 366 // clobber a bunch of fields on delete. 367 target->PutServerIsDel(true); 368 if (!target->GetUniqueClientTag().empty()) { 369 // Items identified by the client unique tag are undeletable; when 370 // they're deleted, they go back to version 0. 371 target->PutServerVersion(0); 372 } else { 373 // Otherwise, fake a server version by bumping the local number. 374 target->PutServerVersion( 375 std::max(target->GetServerVersion(), target->GetBaseVersion()) + 1); 376 } 377 target->PutIsUnappliedUpdate(true); 378 return; 379 } 380 381 DCHECK_EQ(target->GetId(), SyncableIdFromProto(update.id_string())) 382 << "ID Changing not supported here"; 383 target->PutServerParentId(SyncableIdFromProto(update.parent_id_string())); 384 target->PutServerNonUniqueName(name); 385 target->PutServerVersion(update.version()); 386 target->PutServerCtime(ProtoTimeToTime(update.ctime())); 387 target->PutServerMtime(ProtoTimeToTime(update.mtime())); 388 target->PutServerIsDir(IsFolder(update)); 389 if (update.has_server_defined_unique_tag()) { 390 const std::string& tag = update.server_defined_unique_tag(); 391 target->PutUniqueServerTag(tag); 392 } 393 if (update.has_client_defined_unique_tag()) { 394 const std::string& tag = update.client_defined_unique_tag(); 395 target->PutUniqueClientTag(tag); 396 } 397 // Store the datatype-specific part as a protobuf. 398 if (update.has_specifics()) { 399 DCHECK_NE(GetModelType(update), UNSPECIFIED) 400 << "Storing unrecognized datatype in sync database."; 401 target->PutServerSpecifics(update.specifics()); 402 } else if (update.has_bookmarkdata()) { 403 // Legacy protocol response for bookmark data. 404 const sync_pb::SyncEntity::BookmarkData& bookmark = update.bookmarkdata(); 405 UpdateBookmarkSpecifics(update.server_defined_unique_tag(), 406 bookmark.bookmark_url(), 407 bookmark.bookmark_favicon(), 408 target); 409 } 410 if (SyncerProtoUtil::ShouldMaintainPosition(update)) { 411 UpdateBookmarkPositioning(update, target); 412 } 413 414 target->PutServerIsDel(update.deleted()); 415 // We only mark the entry as unapplied if its version is greater than the 416 // local data. If we're processing the update that corresponds to one of our 417 // commit we don't apply it as time differences may occur. 418 if (update.version() > target->GetBaseVersion()) { 419 target->PutIsUnappliedUpdate(true); 420 } 421 } 422 423 // Creates a new Entry iff no Entry exists with the given id. 424 void CreateNewEntry(syncable::ModelNeutralWriteTransaction *trans, 425 const syncable::Id& id) { 426 syncable::Entry entry(trans, GET_BY_ID, id); 427 if (!entry.good()) { 428 syncable::ModelNeutralMutableEntry new_entry( 429 trans, 430 syncable::CREATE_NEW_UPDATE_ITEM, 431 id); 432 } 433 } 434 435 // This function is called on an entry when we can update the user-facing data 436 // from the server data. 437 void UpdateLocalDataFromServerData( 438 syncable::WriteTransaction* trans, 439 syncable::MutableEntry* entry) { 440 DCHECK(!entry->GetIsUnsynced()); 441 DCHECK(entry->GetIsUnappliedUpdate()); 442 443 DVLOG(2) << "Updating entry : " << *entry; 444 // Start by setting the properties that determine the model_type. 445 entry->PutSpecifics(entry->GetServerSpecifics()); 446 // Clear the previous server specifics now that we're applying successfully. 447 entry->PutBaseServerSpecifics(sync_pb::EntitySpecifics()); 448 entry->PutIsDir(entry->GetServerIsDir()); 449 // This strange dance around the IS_DEL flag avoids problems when setting 450 // the name. 451 // TODO(chron): Is this still an issue? Unit test this codepath. 452 if (entry->GetServerIsDel()) { 453 entry->PutIsDel(true); 454 } else { 455 entry->PutNonUniqueName(entry->GetServerNonUniqueName()); 456 entry->PutParentId(entry->GetServerParentId()); 457 entry->PutUniquePosition(entry->GetServerUniquePosition()); 458 entry->PutIsDel(false); 459 } 460 461 entry->PutCtime(entry->GetServerCtime()); 462 entry->PutMtime(entry->GetServerMtime()); 463 entry->PutBaseVersion(entry->GetServerVersion()); 464 entry->PutIsDel(entry->GetServerIsDel()); 465 entry->PutIsUnappliedUpdate(false); 466 } 467 468 VerifyCommitResult ValidateCommitEntry(syncable::Entry* entry) { 469 syncable::Id id = entry->GetId(); 470 if (id == entry->GetParentId()) { 471 CHECK(id.IsRoot()) << "Non-root item is self parenting." << *entry; 472 // If the root becomes unsynced it can cause us problems. 473 LOG(ERROR) << "Root item became unsynced " << *entry; 474 return VERIFY_UNSYNCABLE; 475 } 476 if (entry->IsRoot()) { 477 LOG(ERROR) << "Permanent item became unsynced " << *entry; 478 return VERIFY_UNSYNCABLE; 479 } 480 if (entry->GetIsDel() && !entry->GetId().ServerKnows()) { 481 // Drop deleted uncommitted entries. 482 return VERIFY_UNSYNCABLE; 483 } 484 return VERIFY_OK; 485 } 486 487 void MarkDeletedChildrenSynced( 488 syncable::Directory* dir, 489 syncable::BaseWriteTransaction* trans, 490 std::set<syncable::Id>* deleted_folders) { 491 // There's two options here. 492 // 1. Scan deleted unsynced entries looking up their pre-delete tree for any 493 // of the deleted folders. 494 // 2. Take each folder and do a tree walk of all entries underneath it. 495 // #2 has a lower big O cost, but writing code to limit the time spent inside 496 // the transaction during each step is simpler with 1. Changing this decision 497 // may be sensible if this code shows up in profiling. 498 if (deleted_folders->empty()) 499 return; 500 Directory::Metahandles handles; 501 dir->GetUnsyncedMetaHandles(trans, &handles); 502 if (handles.empty()) 503 return; 504 Directory::Metahandles::iterator it; 505 for (it = handles.begin() ; it != handles.end() ; ++it) { 506 syncable::ModelNeutralMutableEntry entry(trans, GET_BY_HANDLE, *it); 507 if (!entry.GetIsUnsynced() || !entry.GetIsDel()) 508 continue; 509 syncable::Id id = entry.GetParentId(); 510 while (id != trans->root_id()) { 511 if (deleted_folders->find(id) != deleted_folders->end()) { 512 // We've synced the deletion of this deleted entries parent. 513 entry.PutIsUnsynced(false); 514 break; 515 } 516 Entry parent(trans, GET_BY_ID, id); 517 if (!parent.good() || !parent.GetIsDel()) 518 break; 519 id = parent.GetParentId(); 520 } 521 } 522 } 523 524 VerifyResult VerifyNewEntry( 525 const sync_pb::SyncEntity& update, 526 syncable::Entry* target, 527 const bool deleted) { 528 if (target->good()) { 529 // Not a new update. 530 return VERIFY_UNDECIDED; 531 } 532 if (deleted) { 533 // Deletion of an item we've never seen can be ignored. 534 return VERIFY_SKIP; 535 } 536 537 return VERIFY_SUCCESS; 538 } 539 540 // Assumes we have an existing entry; check here for updates that break 541 // consistency rules. 542 VerifyResult VerifyUpdateConsistency( 543 syncable::ModelNeutralWriteTransaction* trans, 544 const sync_pb::SyncEntity& update, 545 const bool deleted, 546 const bool is_directory, 547 ModelType model_type, 548 syncable::ModelNeutralMutableEntry* target) { 549 550 CHECK(target->good()); 551 const syncable::Id& update_id = SyncableIdFromProto(update.id_string()); 552 553 // If the update is a delete, we don't really need to worry at this stage. 554 if (deleted) 555 return VERIFY_SUCCESS; 556 557 if (model_type == UNSPECIFIED) { 558 // This update is to an item of a datatype we don't recognize. The server 559 // shouldn't have sent it to us. Throw it on the ground. 560 return VERIFY_SKIP; 561 } 562 563 if (target->GetServerVersion() > 0) { 564 // Then we've had an update for this entry before. 565 if (is_directory != target->GetServerIsDir() || 566 model_type != target->GetServerModelType()) { 567 if (target->GetIsDel()) { // If we've deleted the item, we don't care. 568 return VERIFY_SKIP; 569 } else { 570 LOG(ERROR) << "Server update doesn't agree with previous updates. "; 571 LOG(ERROR) << " Entry: " << *target; 572 LOG(ERROR) << " Update: " 573 << SyncerProtoUtil::SyncEntityDebugString(update); 574 return VERIFY_FAIL; 575 } 576 } 577 578 if (!deleted && (target->GetId() == update_id) && 579 (target->GetServerIsDel() || 580 (!target->GetIsUnsynced() && target->GetIsDel() && 581 target->GetBaseVersion() > 0))) { 582 // An undelete. The latter case in the above condition is for 583 // when the server does not give us an update following the 584 // commit of a delete, before undeleting. 585 // Undeletion is common for items that reuse the client-unique tag. 586 VerifyResult result = VerifyUndelete(trans, update, target); 587 if (VERIFY_UNDECIDED != result) 588 return result; 589 } 590 } 591 if (target->GetBaseVersion() > 0) { 592 // We've committed this update in the past. 593 if (is_directory != target->GetIsDir() || 594 model_type != target->GetModelType()) { 595 LOG(ERROR) << "Server update doesn't agree with committed item. "; 596 LOG(ERROR) << " Entry: " << *target; 597 LOG(ERROR) << " Update: " 598 << SyncerProtoUtil::SyncEntityDebugString(update); 599 return VERIFY_FAIL; 600 } 601 if (target->GetId() == update_id) { 602 if (target->GetServerVersion() > update.version()) { 603 LOG(WARNING) << "We've already seen a more recent version."; 604 LOG(WARNING) << " Entry: " << *target; 605 LOG(WARNING) << " Update: " 606 << SyncerProtoUtil::SyncEntityDebugString(update); 607 return VERIFY_SKIP; 608 } 609 } 610 } 611 return VERIFY_SUCCESS; 612 } 613 614 // Assumes we have an existing entry; verify an update that seems to be 615 // expressing an 'undelete' 616 VerifyResult VerifyUndelete(syncable::ModelNeutralWriteTransaction* trans, 617 const sync_pb::SyncEntity& update, 618 syncable::ModelNeutralMutableEntry* target) { 619 // TODO(nick): We hit this path for items deleted items that the server 620 // tells us to re-create; only deleted items with positive base versions 621 // will hit this path. However, it's not clear how such an undeletion 622 // would actually succeed on the server; in the protocol, a base 623 // version of 0 is required to undelete an object. This codepath 624 // should be deprecated in favor of client-tag style undeletion 625 // (where items go to version 0 when they're deleted), or else 626 // removed entirely (if this type of undeletion is indeed impossible). 627 CHECK(target->good()); 628 DVLOG(1) << "Server update is attempting undelete. " << *target 629 << "Update:" << SyncerProtoUtil::SyncEntityDebugString(update); 630 // Move the old one aside and start over. It's too tricky to get the old one 631 // back into a state that would pass CheckTreeInvariants(). 632 if (target->GetIsDel()) { 633 if (target->GetUniqueClientTag().empty()) 634 LOG(WARNING) << "Doing move-aside undeletion on client-tagged item."; 635 target->PutId(trans->directory()->NextId()); 636 target->PutUniqueClientTag(std::string()); 637 target->PutBaseVersion(CHANGES_VERSION); 638 target->PutServerVersion(0); 639 return VERIFY_SUCCESS; 640 } 641 if (update.version() < target->GetServerVersion()) { 642 LOG(WARNING) << "Update older than current server version for " 643 << *target << " Update:" 644 << SyncerProtoUtil::SyncEntityDebugString(update); 645 return VERIFY_SUCCESS; // Expected in new sync protocol. 646 } 647 return VERIFY_UNDECIDED; 648 } 649 650 } // namespace syncer 651