Home | History | Annotate | Download | only in safe_browsing
      1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "chrome/browser/safe_browsing/protocol_manager.h"
      6 
      7 #include "base/environment.h"
      8 #include "base/logging.h"
      9 #include "base/memory/scoped_vector.h"
     10 #include "base/metrics/histogram.h"
     11 #include "base/rand_util.h"
     12 #include "base/stl_util.h"
     13 #include "base/strings/string_util.h"
     14 #include "base/strings/stringprintf.h"
     15 #include "base/timer/timer.h"
     16 #include "chrome/browser/safe_browsing/protocol_parser.h"
     17 #include "chrome/common/chrome_version_info.h"
     18 #include "chrome/common/env_vars.h"
     19 #include "google_apis/google_api_keys.h"
     20 #include "net/base/escape.h"
     21 #include "net/base/load_flags.h"
     22 #include "net/base/net_errors.h"
     23 #include "net/url_request/url_fetcher.h"
     24 #include "net/url_request/url_request_context_getter.h"
     25 #include "net/url_request/url_request_status.h"
     26 
     27 #if defined(OS_ANDROID)
     28 #include "net/base/network_change_notifier.h"
     29 #endif
     30 
     31 using base::Time;
     32 using base::TimeDelta;
     33 
     34 namespace {
     35 
     36 // UpdateResult indicates what happened with the primary and/or backup update
     37 // requests. The ordering of the values must stay the same for UMA consistency,
     38 // and is also ordered in this way to match ProtocolManager::BackupUpdateReason.
     39 enum UpdateResult {
     40   UPDATE_RESULT_FAIL,
     41   UPDATE_RESULT_SUCCESS,
     42   UPDATE_RESULT_BACKUP_CONNECT_FAIL,
     43   UPDATE_RESULT_BACKUP_CONNECT_SUCCESS,
     44   UPDATE_RESULT_BACKUP_HTTP_FAIL,
     45   UPDATE_RESULT_BACKUP_HTTP_SUCCESS,
     46   UPDATE_RESULT_BACKUP_NETWORK_FAIL,
     47   UPDATE_RESULT_BACKUP_NETWORK_SUCCESS,
     48   UPDATE_RESULT_MAX,
     49   UPDATE_RESULT_BACKUP_START = UPDATE_RESULT_BACKUP_CONNECT_FAIL,
     50 };
     51 
     52 void RecordUpdateResult(UpdateResult result) {
     53   DCHECK(result >= 0 && result < UPDATE_RESULT_MAX);
     54   UMA_HISTOGRAM_ENUMERATION("SB2.UpdateResult", result, UPDATE_RESULT_MAX);
     55 }
     56 
     57 }  // namespace
     58 
     59 // Minimum time, in seconds, from start up before we must issue an update query.
     60 static const int kSbTimerStartIntervalSecMin = 60;
     61 
     62 // Maximum time, in seconds, from start up before we must issue an update query.
     63 static const int kSbTimerStartIntervalSecMax = 300;
     64 
     65 // The maximum time, in seconds, to wait for a response to an update request.
     66 static const int kSbMaxUpdateWaitSec = 30;
     67 
     68 // Maximum back off multiplier.
     69 static const size_t kSbMaxBackOff = 8;
     70 
     71 // The default SBProtocolManagerFactory.
     72 class SBProtocolManagerFactoryImpl : public SBProtocolManagerFactory {
     73  public:
     74   SBProtocolManagerFactoryImpl() { }
     75   virtual ~SBProtocolManagerFactoryImpl() { }
     76   virtual SafeBrowsingProtocolManager* CreateProtocolManager(
     77       SafeBrowsingProtocolManagerDelegate* delegate,
     78       net::URLRequestContextGetter* request_context_getter,
     79       const SafeBrowsingProtocolConfig& config) OVERRIDE {
     80     return new SafeBrowsingProtocolManager(
     81         delegate, request_context_getter, config);
     82   }
     83  private:
     84   DISALLOW_COPY_AND_ASSIGN(SBProtocolManagerFactoryImpl);
     85 };
     86 
     87 // SafeBrowsingProtocolManager implementation ----------------------------------
     88 
     89 // static
     90 SBProtocolManagerFactory* SafeBrowsingProtocolManager::factory_ = NULL;
     91 
     92 // static
     93 SafeBrowsingProtocolManager* SafeBrowsingProtocolManager::Create(
     94     SafeBrowsingProtocolManagerDelegate* delegate,
     95     net::URLRequestContextGetter* request_context_getter,
     96     const SafeBrowsingProtocolConfig& config) {
     97   if (!factory_)
     98     factory_ = new SBProtocolManagerFactoryImpl();
     99   return factory_->CreateProtocolManager(
    100       delegate, request_context_getter, config);
    101 }
    102 
    103 SafeBrowsingProtocolManager::SafeBrowsingProtocolManager(
    104     SafeBrowsingProtocolManagerDelegate* delegate,
    105     net::URLRequestContextGetter* request_context_getter,
    106     const SafeBrowsingProtocolConfig& config)
    107     : delegate_(delegate),
    108       request_type_(NO_REQUEST),
    109       update_error_count_(0),
    110       gethash_error_count_(0),
    111       update_back_off_mult_(1),
    112       gethash_back_off_mult_(1),
    113       next_update_interval_(base::TimeDelta::FromSeconds(
    114           base::RandInt(kSbTimerStartIntervalSecMin,
    115                         kSbTimerStartIntervalSecMax))),
    116       update_state_(FIRST_REQUEST),
    117       chunk_pending_to_write_(false),
    118       version_(config.version),
    119       update_size_(0),
    120       client_name_(config.client_name),
    121       request_context_getter_(request_context_getter),
    122       url_prefix_(config.url_prefix),
    123       backup_update_reason_(BACKUP_UPDATE_REASON_MAX),
    124       disable_auto_update_(config.disable_auto_update),
    125 #if defined(OS_ANDROID)
    126       disable_connection_check_(config.disable_connection_check),
    127 #endif
    128       url_fetcher_id_(0),
    129       app_in_foreground_(true) {
    130   DCHECK(!url_prefix_.empty());
    131 
    132   backup_url_prefixes_[BACKUP_UPDATE_REASON_CONNECT] =
    133       config.backup_connect_error_url_prefix;
    134   backup_url_prefixes_[BACKUP_UPDATE_REASON_HTTP] =
    135       config.backup_http_error_url_prefix;
    136   backup_url_prefixes_[BACKUP_UPDATE_REASON_NETWORK] =
    137       config.backup_network_error_url_prefix;
    138 
    139   // Set the backoff multiplier fuzz to a random value between 0 and 1.
    140   back_off_fuzz_ = static_cast<float>(base::RandDouble());
    141   if (version_.empty())
    142     version_ = SafeBrowsingProtocolManagerHelper::Version();
    143 }
    144 
    145 // static
    146 void SafeBrowsingProtocolManager::RecordGetHashResult(
    147     bool is_download, ResultType result_type) {
    148   if (is_download) {
    149     UMA_HISTOGRAM_ENUMERATION("SB2.GetHashResultDownload", result_type,
    150                               GET_HASH_RESULT_MAX);
    151   } else {
    152     UMA_HISTOGRAM_ENUMERATION("SB2.GetHashResult", result_type,
    153                               GET_HASH_RESULT_MAX);
    154   }
    155 }
    156 
    157 bool SafeBrowsingProtocolManager::IsUpdateScheduled() const {
    158   return update_timer_.IsRunning();
    159 }
    160 
    161 SafeBrowsingProtocolManager::~SafeBrowsingProtocolManager() {
    162   // Delete in-progress SafeBrowsing requests.
    163   STLDeleteContainerPairFirstPointers(hash_requests_.begin(),
    164                                       hash_requests_.end());
    165   hash_requests_.clear();
    166 }
    167 
    168 // We can only have one update or chunk request outstanding, but there may be
    169 // multiple GetHash requests pending since we don't want to serialize them and
    170 // slow down the user.
    171 void SafeBrowsingProtocolManager::GetFullHash(
    172     const std::vector<SBPrefix>& prefixes,
    173     FullHashCallback callback,
    174     bool is_download) {
    175   DCHECK(CalledOnValidThread());
    176   // If we are in GetHash backoff, we need to check if we're past the next
    177   // allowed time. If we are, we can proceed with the request. If not, we are
    178   // required to return empty results (i.e. treat the page as safe).
    179   if (gethash_error_count_ && Time::Now() <= next_gethash_time_) {
    180     RecordGetHashResult(is_download, GET_HASH_BACKOFF_ERROR);
    181     std::vector<SBFullHashResult> full_hashes;
    182     callback.Run(full_hashes, base::TimeDelta());
    183     return;
    184   }
    185   GURL gethash_url = GetHashUrl();
    186   net::URLFetcher* fetcher = net::URLFetcher::Create(
    187       url_fetcher_id_++, gethash_url, net::URLFetcher::POST, this);
    188   hash_requests_[fetcher] = FullHashDetails(callback, is_download);
    189 
    190   const std::string get_hash = safe_browsing::FormatGetHash(prefixes);
    191 
    192   fetcher->SetLoadFlags(net::LOAD_DISABLE_CACHE);
    193   fetcher->SetRequestContext(request_context_getter_.get());
    194   fetcher->SetUploadData("text/plain", get_hash);
    195   fetcher->Start();
    196 }
    197 
    198 void SafeBrowsingProtocolManager::GetNextUpdate() {
    199   DCHECK(CalledOnValidThread());
    200   if (request_.get() || request_type_ != NO_REQUEST)
    201     return;
    202 
    203 #if defined(OS_ANDROID)
    204   if (!disable_connection_check_) {
    205     net::NetworkChangeNotifier::ConnectionType type =
    206       net::NetworkChangeNotifier::GetConnectionType();
    207     if (type != net::NetworkChangeNotifier::CONNECTION_WIFI) {
    208       ScheduleNextUpdate(false /* no back off */);
    209       return;
    210     }
    211   }
    212 #endif
    213 
    214   IssueUpdateRequest();
    215 }
    216 
    217 // net::URLFetcherDelegate implementation ----------------------------------
    218 
    219 // All SafeBrowsing request responses are handled here.
    220 // TODO(paulg): Clarify with the SafeBrowsing team whether a failed parse of a
    221 //              chunk should retry the download and parse of that chunk (and
    222 //              what back off / how many times to try), and if that effects the
    223 //              update back off. For now, a failed parse of the chunk means we
    224 //              drop it. This isn't so bad because the next UPDATE_REQUEST we
    225 //              do will report all the chunks we have. If that chunk is still
    226 //              required, the SafeBrowsing servers will tell us to get it again.
    227 void SafeBrowsingProtocolManager::OnURLFetchComplete(
    228     const net::URLFetcher* source) {
    229   DCHECK(CalledOnValidThread());
    230   scoped_ptr<const net::URLFetcher> fetcher;
    231 
    232   HashRequests::iterator it = hash_requests_.find(source);
    233   if (it != hash_requests_.end()) {
    234     // GetHash response.
    235     fetcher.reset(it->first);
    236     const FullHashDetails& details = it->second;
    237     std::vector<SBFullHashResult> full_hashes;
    238     base::TimeDelta cache_lifetime;
    239     if (source->GetStatus().is_success() &&
    240         (source->GetResponseCode() == 200 ||
    241          source->GetResponseCode() == 204)) {
    242       // For tracking our GetHash false positive (204) rate, compared to real
    243       // (200) responses.
    244       if (source->GetResponseCode() == 200)
    245         RecordGetHashResult(details.is_download, GET_HASH_STATUS_200);
    246       else
    247         RecordGetHashResult(details.is_download, GET_HASH_STATUS_204);
    248 
    249       gethash_error_count_ = 0;
    250       gethash_back_off_mult_ = 1;
    251       std::string data;
    252       source->GetResponseAsString(&data);
    253       if (!safe_browsing::ParseGetHash(
    254               data.data(), data.length(), &cache_lifetime, &full_hashes)) {
    255         full_hashes.clear();
    256         RecordGetHashResult(details.is_download, GET_HASH_PARSE_ERROR);
    257         // TODO(cbentzel): Should cache_lifetime be set to 0 here? (See
    258         // http://crbug.com/360232.)
    259       }
    260     } else {
    261       HandleGetHashError(Time::Now());
    262       if (source->GetStatus().status() == net::URLRequestStatus::FAILED) {
    263         RecordGetHashResult(details.is_download, GET_HASH_NETWORK_ERROR);
    264         VLOG(1) << "SafeBrowsing GetHash request for: " << source->GetURL()
    265                 << " failed with error: " << source->GetStatus().error();
    266       } else {
    267         RecordGetHashResult(details.is_download, GET_HASH_HTTP_ERROR);
    268         VLOG(1) << "SafeBrowsing GetHash request for: " << source->GetURL()
    269                 << " failed with error: " << source->GetResponseCode();
    270       }
    271     }
    272 
    273     // Invoke the callback with full_hashes, even if there was a parse error or
    274     // an error response code (in which case full_hashes will be empty). The
    275     // caller can't be blocked indefinitely.
    276     details.callback.Run(full_hashes, cache_lifetime);
    277 
    278     hash_requests_.erase(it);
    279   } else {
    280     // Update or chunk response.
    281     fetcher.reset(request_.release());
    282 
    283     if (request_type_ == UPDATE_REQUEST ||
    284         request_type_ == BACKUP_UPDATE_REQUEST) {
    285       if (!fetcher.get()) {
    286         // We've timed out waiting for an update response, so we've cancelled
    287         // the update request and scheduled a new one. Ignore this response.
    288         return;
    289       }
    290 
    291       // Cancel the update response timeout now that we have the response.
    292       timeout_timer_.Stop();
    293     }
    294 
    295     net::URLRequestStatus status = source->GetStatus();
    296     if (status.is_success() && source->GetResponseCode() == 200) {
    297       // We have data from the SafeBrowsing service.
    298       std::string data;
    299       source->GetResponseAsString(&data);
    300 
    301       // TODO(shess): Cleanup the flow of this code so that |parsed_ok| can be
    302       // removed or omitted.
    303       const bool parsed_ok = HandleServiceResponse(
    304           source->GetURL(), data.data(), data.length());
    305       if (!parsed_ok) {
    306         VLOG(1) << "SafeBrowsing request for: " << source->GetURL()
    307                 << " failed parse.";
    308         chunk_request_urls_.clear();
    309         if (request_type_ == UPDATE_REQUEST &&
    310             IssueBackupUpdateRequest(BACKUP_UPDATE_REASON_HTTP)) {
    311           return;
    312         }
    313         UpdateFinished(false);
    314       }
    315 
    316       switch (request_type_) {
    317         case CHUNK_REQUEST:
    318           if (parsed_ok) {
    319             chunk_request_urls_.pop_front();
    320             if (chunk_request_urls_.empty() && !chunk_pending_to_write_)
    321               UpdateFinished(true);
    322           }
    323           break;
    324         case UPDATE_REQUEST:
    325         case BACKUP_UPDATE_REQUEST:
    326           if (chunk_request_urls_.empty() && parsed_ok) {
    327             // We are up to date since the servers gave us nothing new, so we
    328             // are done with this update cycle.
    329             UpdateFinished(true);
    330           }
    331           break;
    332         case NO_REQUEST:
    333           // This can happen if HandleServiceResponse fails above.
    334           break;
    335         default:
    336           NOTREACHED();
    337           break;
    338       }
    339     } else {
    340       if (status.status() == net::URLRequestStatus::FAILED) {
    341         VLOG(1) << "SafeBrowsing request for: " << source->GetURL()
    342                 << " failed with error: " << source->GetStatus().error();
    343       } else {
    344         VLOG(1) << "SafeBrowsing request for: " << source->GetURL()
    345                 << " failed with error: " << source->GetResponseCode();
    346       }
    347       if (request_type_ == CHUNK_REQUEST) {
    348         // The SafeBrowsing service error, or very bad response code: back off.
    349         chunk_request_urls_.clear();
    350       } else if (request_type_ == UPDATE_REQUEST) {
    351         BackupUpdateReason backup_update_reason = BACKUP_UPDATE_REASON_MAX;
    352         if (status.is_success()) {
    353           backup_update_reason = BACKUP_UPDATE_REASON_HTTP;
    354         } else {
    355           switch (status.error()) {
    356             case net::ERR_INTERNET_DISCONNECTED:
    357             case net::ERR_NETWORK_CHANGED:
    358               backup_update_reason = BACKUP_UPDATE_REASON_NETWORK;
    359               break;
    360             default:
    361               backup_update_reason = BACKUP_UPDATE_REASON_CONNECT;
    362               break;
    363           }
    364         }
    365         if (backup_update_reason != BACKUP_UPDATE_REASON_MAX &&
    366             IssueBackupUpdateRequest(backup_update_reason)) {
    367           return;
    368         }
    369       }
    370       UpdateFinished(false);
    371     }
    372   }
    373 
    374   // Get the next chunk if available.
    375   IssueChunkRequest();
    376 }
    377 
    378 bool SafeBrowsingProtocolManager::HandleServiceResponse(
    379     const GURL& url, const char* data, size_t length) {
    380   DCHECK(CalledOnValidThread());
    381 
    382   switch (request_type_) {
    383     case UPDATE_REQUEST:
    384     case BACKUP_UPDATE_REQUEST: {
    385       size_t next_update_sec = 0;
    386       bool reset = false;
    387       scoped_ptr<std::vector<SBChunkDelete> > chunk_deletes(
    388           new std::vector<SBChunkDelete>);
    389       std::vector<ChunkUrl> chunk_urls;
    390       if (!safe_browsing::ParseUpdate(data, length, &next_update_sec, &reset,
    391                                       chunk_deletes.get(), &chunk_urls)) {
    392         return false;
    393       }
    394 
    395       base::TimeDelta next_update_interval =
    396           base::TimeDelta::FromSeconds(next_update_sec);
    397       last_update_ = Time::Now();
    398 
    399       if (update_state_ == FIRST_REQUEST)
    400         update_state_ = SECOND_REQUEST;
    401       else if (update_state_ == SECOND_REQUEST)
    402         update_state_ = NORMAL_REQUEST;
    403 
    404       // New time for the next update.
    405       if (next_update_interval > base::TimeDelta()) {
    406         next_update_interval_ = next_update_interval;
    407       } else if (update_state_ == SECOND_REQUEST) {
    408         next_update_interval_ = base::TimeDelta::FromSeconds(
    409             base::RandInt(15, 45));
    410       }
    411 
    412       // New chunks to download.
    413       if (!chunk_urls.empty()) {
    414         UMA_HISTOGRAM_COUNTS("SB2.UpdateUrls", chunk_urls.size());
    415         for (size_t i = 0; i < chunk_urls.size(); ++i)
    416           chunk_request_urls_.push_back(chunk_urls[i]);
    417       }
    418 
    419       // Handle the case were the SafeBrowsing service tells us to dump our
    420       // database.
    421       if (reset) {
    422         delegate_->ResetDatabase();
    423         return true;
    424       }
    425 
    426       // Chunks to delete from our storage.
    427       if (!chunk_deletes->empty())
    428         delegate_->DeleteChunks(chunk_deletes.Pass());
    429 
    430       break;
    431     }
    432     case CHUNK_REQUEST: {
    433       UMA_HISTOGRAM_TIMES("SB2.ChunkRequest",
    434                           base::Time::Now() - chunk_request_start_);
    435 
    436       const ChunkUrl chunk_url = chunk_request_urls_.front();
    437       scoped_ptr<ScopedVector<SBChunkData> >
    438           chunks(new ScopedVector<SBChunkData>);
    439       UMA_HISTOGRAM_COUNTS("SB2.ChunkSize", length);
    440       update_size_ += length;
    441       if (!safe_browsing::ParseChunk(data, length, chunks.get()))
    442         return false;
    443 
    444       // Chunks to add to storage.  Pass ownership of |chunks|.
    445       if (!chunks->empty()) {
    446         chunk_pending_to_write_ = true;
    447         delegate_->AddChunks(
    448             chunk_url.list_name, chunks.Pass(),
    449             base::Bind(&SafeBrowsingProtocolManager::OnAddChunksComplete,
    450                        base::Unretained(this)));
    451       }
    452 
    453       break;
    454     }
    455 
    456     default:
    457       return false;
    458   }
    459 
    460   return true;
    461 }
    462 
    463 void SafeBrowsingProtocolManager::Initialize() {
    464   DCHECK(CalledOnValidThread());
    465   // Don't want to hit the safe browsing servers on build/chrome bots.
    466   scoped_ptr<base::Environment> env(base::Environment::Create());
    467   if (env->HasVar(env_vars::kHeadless))
    468     return;
    469   ScheduleNextUpdate(false /* no back off */);
    470 }
    471 
    472 void SafeBrowsingProtocolManager::ScheduleNextUpdate(bool back_off) {
    473   DCHECK(CalledOnValidThread());
    474   if (disable_auto_update_) {
    475     // Unschedule any current timer.
    476     update_timer_.Stop();
    477     return;
    478   }
    479   // Reschedule with the new update.
    480   base::TimeDelta next_update_interval = GetNextUpdateInterval(back_off);
    481   ForceScheduleNextUpdate(next_update_interval);
    482 }
    483 
    484 void SafeBrowsingProtocolManager::ForceScheduleNextUpdate(
    485     base::TimeDelta interval) {
    486   DCHECK(CalledOnValidThread());
    487   DCHECK(interval >= base::TimeDelta());
    488   // Unschedule any current timer.
    489   update_timer_.Stop();
    490   update_timer_.Start(FROM_HERE, interval, this,
    491                       &SafeBrowsingProtocolManager::GetNextUpdate);
    492 }
    493 
    494 // According to section 5 of the SafeBrowsing protocol specification, we must
    495 // back off after a certain number of errors. We only change |next_update_sec_|
    496 // when we receive a response from the SafeBrowsing service.
    497 base::TimeDelta SafeBrowsingProtocolManager::GetNextUpdateInterval(
    498     bool back_off) {
    499   DCHECK(CalledOnValidThread());
    500   DCHECK(next_update_interval_ > base::TimeDelta());
    501   base::TimeDelta next = next_update_interval_;
    502   if (back_off) {
    503     next = GetNextBackOffInterval(&update_error_count_, &update_back_off_mult_);
    504   } else {
    505     // Successful response means error reset.
    506     update_error_count_ = 0;
    507     update_back_off_mult_ = 1;
    508   }
    509   return next;
    510 }
    511 
    512 base::TimeDelta SafeBrowsingProtocolManager::GetNextBackOffInterval(
    513     size_t* error_count, size_t* multiplier) const {
    514   DCHECK(CalledOnValidThread());
    515   DCHECK(multiplier && error_count);
    516   (*error_count)++;
    517   if (*error_count > 1 && *error_count < 6) {
    518     base::TimeDelta next = base::TimeDelta::FromMinutes(
    519         *multiplier * (1 + back_off_fuzz_) * 30);
    520     *multiplier *= 2;
    521     if (*multiplier > kSbMaxBackOff)
    522       *multiplier = kSbMaxBackOff;
    523     return next;
    524   }
    525   if (*error_count >= 6)
    526     return base::TimeDelta::FromHours(8);
    527   return base::TimeDelta::FromMinutes(1);
    528 }
    529 
    530 // This request requires getting a list of all the chunks for each list from the
    531 // database asynchronously. The request will be issued when we're called back in
    532 // OnGetChunksComplete.
    533 // TODO(paulg): We should get this at start up and maintain a ChunkRange cache
    534 //              to avoid hitting the database with each update request. On the
    535 //              otherhand, this request will only occur ~20-30 minutes so there
    536 //              isn't that much overhead. Measure!
    537 void SafeBrowsingProtocolManager::IssueUpdateRequest() {
    538   DCHECK(CalledOnValidThread());
    539   request_type_ = UPDATE_REQUEST;
    540   delegate_->UpdateStarted();
    541   delegate_->GetChunks(
    542       base::Bind(&SafeBrowsingProtocolManager::OnGetChunksComplete,
    543                  base::Unretained(this)));
    544 }
    545 
    546 // The backup request can run immediately since the chunks have already been
    547 // retrieved from the DB.
    548 bool SafeBrowsingProtocolManager::IssueBackupUpdateRequest(
    549     BackupUpdateReason backup_update_reason) {
    550   DCHECK(CalledOnValidThread());
    551   DCHECK_EQ(request_type_, UPDATE_REQUEST);
    552   DCHECK(backup_update_reason >= 0 &&
    553          backup_update_reason < BACKUP_UPDATE_REASON_MAX);
    554   if (backup_url_prefixes_[backup_update_reason].empty())
    555     return false;
    556   request_type_ = BACKUP_UPDATE_REQUEST;
    557   backup_update_reason_ = backup_update_reason;
    558 
    559   GURL backup_update_url = BackupUpdateUrl(backup_update_reason);
    560   request_.reset(net::URLFetcher::Create(
    561       url_fetcher_id_++, backup_update_url, net::URLFetcher::POST, this));
    562   request_->SetLoadFlags(net::LOAD_DISABLE_CACHE);
    563   request_->SetRequestContext(request_context_getter_.get());
    564   request_->SetUploadData("text/plain", update_list_data_);
    565   request_->Start();
    566 
    567   // Begin the update request timeout.
    568   timeout_timer_.Start(FROM_HERE, TimeDelta::FromSeconds(kSbMaxUpdateWaitSec),
    569                        this,
    570                        &SafeBrowsingProtocolManager::UpdateResponseTimeout);
    571 
    572   return true;
    573 }
    574 
    575 void SafeBrowsingProtocolManager::IssueChunkRequest() {
    576   DCHECK(CalledOnValidThread());
    577   // We are only allowed to have one request outstanding at any time.  Also,
    578   // don't get the next url until the previous one has been written to disk so
    579   // that we don't use too much memory.
    580   if (request_.get() || chunk_request_urls_.empty() || chunk_pending_to_write_)
    581     return;
    582 
    583   ChunkUrl next_chunk = chunk_request_urls_.front();
    584   DCHECK(!next_chunk.url.empty());
    585   GURL chunk_url = NextChunkUrl(next_chunk.url);
    586   request_type_ = CHUNK_REQUEST;
    587   request_.reset(net::URLFetcher::Create(
    588       url_fetcher_id_++, chunk_url, net::URLFetcher::GET, this));
    589   request_->SetLoadFlags(net::LOAD_DISABLE_CACHE);
    590   request_->SetRequestContext(request_context_getter_.get());
    591   chunk_request_start_ = base::Time::Now();
    592   request_->Start();
    593 }
    594 
    595 void SafeBrowsingProtocolManager::OnGetChunksComplete(
    596     const std::vector<SBListChunkRanges>& lists, bool database_error) {
    597   DCHECK(CalledOnValidThread());
    598   DCHECK_EQ(request_type_, UPDATE_REQUEST);
    599   DCHECK(update_list_data_.empty());
    600   if (database_error) {
    601     // The update was not successful, but don't back off.
    602     UpdateFinished(false, false);
    603     return;
    604   }
    605 
    606   // Format our stored chunks:
    607   bool found_malware = false;
    608   bool found_phishing = false;
    609   for (size_t i = 0; i < lists.size(); ++i) {
    610     update_list_data_.append(safe_browsing::FormatList(lists[i]));
    611     if (lists[i].name == safe_browsing_util::kPhishingList)
    612       found_phishing = true;
    613 
    614     if (lists[i].name == safe_browsing_util::kMalwareList)
    615       found_malware = true;
    616   }
    617 
    618   // If we have an empty database, let the server know we want data for these
    619   // lists.
    620   // TODO(shess): These cases never happen because the database fills in the
    621   // lists in GetChunks().  Refactor the unit tests so that this code can be
    622   // removed.
    623   if (!found_phishing) {
    624     update_list_data_.append(safe_browsing::FormatList(
    625         SBListChunkRanges(safe_browsing_util::kPhishingList)));
    626   }
    627   if (!found_malware) {
    628     update_list_data_.append(safe_browsing::FormatList(
    629         SBListChunkRanges(safe_browsing_util::kMalwareList)));
    630   }
    631 
    632   // Large requests are (probably) a sign of database corruption.
    633   // Record stats to inform decisions about whether to automate
    634   // deletion of such databases.  http://crbug.com/120219
    635   UMA_HISTOGRAM_COUNTS("SB2.UpdateRequestSize", update_list_data_.size());
    636 
    637   GURL update_url = UpdateUrl();
    638   request_.reset(net::URLFetcher::Create(
    639       url_fetcher_id_++, update_url, net::URLFetcher::POST, this));
    640   request_->SetLoadFlags(net::LOAD_DISABLE_CACHE);
    641   request_->SetRequestContext(request_context_getter_.get());
    642   request_->SetUploadData("text/plain", update_list_data_);
    643   request_->Start();
    644 
    645   // Begin the update request timeout.
    646   timeout_timer_.Start(FROM_HERE, TimeDelta::FromSeconds(kSbMaxUpdateWaitSec),
    647                        this,
    648                        &SafeBrowsingProtocolManager::UpdateResponseTimeout);
    649 }
    650 
    651 // If we haven't heard back from the server with an update response, this method
    652 // will run. Close the current update session and schedule another update.
    653 void SafeBrowsingProtocolManager::UpdateResponseTimeout() {
    654   DCHECK(CalledOnValidThread());
    655   DCHECK(request_type_ == UPDATE_REQUEST ||
    656          request_type_ == BACKUP_UPDATE_REQUEST);
    657   request_.reset();
    658   if (request_type_ == UPDATE_REQUEST &&
    659       IssueBackupUpdateRequest(BACKUP_UPDATE_REASON_CONNECT)) {
    660     return;
    661   }
    662   UpdateFinished(false);
    663 }
    664 
    665 void SafeBrowsingProtocolManager::OnAddChunksComplete() {
    666   DCHECK(CalledOnValidThread());
    667   chunk_pending_to_write_ = false;
    668 
    669   if (chunk_request_urls_.empty()) {
    670     UMA_HISTOGRAM_LONG_TIMES("SB2.Update", Time::Now() - last_update_);
    671     UpdateFinished(true);
    672   } else {
    673     IssueChunkRequest();
    674   }
    675 }
    676 
    677 void SafeBrowsingProtocolManager::HandleGetHashError(const Time& now) {
    678   DCHECK(CalledOnValidThread());
    679   base::TimeDelta next = GetNextBackOffInterval(
    680       &gethash_error_count_, &gethash_back_off_mult_);
    681   next_gethash_time_ = now + next;
    682 }
    683 
    684 void SafeBrowsingProtocolManager::UpdateFinished(bool success) {
    685   UpdateFinished(success, !success);
    686 }
    687 
    688 void SafeBrowsingProtocolManager::UpdateFinished(bool success, bool back_off) {
    689   DCHECK(CalledOnValidThread());
    690 #if defined(OS_ANDROID)
    691   if (app_in_foreground_)
    692     UMA_HISTOGRAM_COUNTS("SB2.UpdateSizeForeground", update_size_);
    693   else
    694     UMA_HISTOGRAM_COUNTS("SB2.UpdateSizeBackground", update_size_);
    695 #endif
    696   UMA_HISTOGRAM_COUNTS("SB2.UpdateSize", update_size_);
    697   update_size_ = 0;
    698   bool update_success = success || request_type_ == CHUNK_REQUEST;
    699   if (backup_update_reason_ == BACKUP_UPDATE_REASON_MAX) {
    700     RecordUpdateResult(
    701         update_success ? UPDATE_RESULT_SUCCESS : UPDATE_RESULT_FAIL);
    702   } else {
    703     UpdateResult update_result = static_cast<UpdateResult>(
    704           UPDATE_RESULT_BACKUP_START +
    705           (static_cast<int>(backup_update_reason_) * 2) +
    706           update_success);
    707     RecordUpdateResult(update_result);
    708   }
    709   backup_update_reason_ = BACKUP_UPDATE_REASON_MAX;
    710   request_type_ = NO_REQUEST;
    711   update_list_data_.clear();
    712   delegate_->UpdateFinished(success);
    713   ScheduleNextUpdate(back_off);
    714 }
    715 
    716 GURL SafeBrowsingProtocolManager::UpdateUrl() const {
    717   std::string url = SafeBrowsingProtocolManagerHelper::ComposeUrl(
    718       url_prefix_, "downloads", client_name_, version_, additional_query_);
    719   return GURL(url);
    720 }
    721 
    722 GURL SafeBrowsingProtocolManager::BackupUpdateUrl(
    723     BackupUpdateReason backup_update_reason) const {
    724   DCHECK(backup_update_reason >= 0 &&
    725          backup_update_reason < BACKUP_UPDATE_REASON_MAX);
    726   DCHECK(!backup_url_prefixes_[backup_update_reason].empty());
    727   std::string url = SafeBrowsingProtocolManagerHelper::ComposeUrl(
    728       backup_url_prefixes_[backup_update_reason], "downloads", client_name_,
    729       version_, additional_query_);
    730   return GURL(url);
    731 }
    732 
    733 GURL SafeBrowsingProtocolManager::GetHashUrl() const {
    734   std::string url = SafeBrowsingProtocolManagerHelper::ComposeUrl(
    735       url_prefix_, "gethash", client_name_, version_, additional_query_);
    736   return GURL(url);
    737 }
    738 
    739 GURL SafeBrowsingProtocolManager::NextChunkUrl(const std::string& url) const {
    740   DCHECK(CalledOnValidThread());
    741   std::string next_url;
    742   if (!StartsWithASCII(url, "http://", false) &&
    743       !StartsWithASCII(url, "https://", false)) {
    744     // Use https if we updated via https, otherwise http (useful for testing).
    745     if (StartsWithASCII(url_prefix_, "https://", false))
    746       next_url.append("https://");
    747     else
    748       next_url.append("http://");
    749     next_url.append(url);
    750   } else {
    751     next_url = url;
    752   }
    753   if (!additional_query_.empty()) {
    754     if (next_url.find("?") != std::string::npos) {
    755       next_url.append("&");
    756     } else {
    757       next_url.append("?");
    758     }
    759     next_url.append(additional_query_);
    760   }
    761   return GURL(next_url);
    762 }
    763 
    764 SafeBrowsingProtocolManager::FullHashDetails::FullHashDetails()
    765     : callback(),
    766       is_download(false) {
    767 }
    768 
    769 SafeBrowsingProtocolManager::FullHashDetails::FullHashDetails(
    770     FullHashCallback callback, bool is_download)
    771     : callback(callback),
    772       is_download(is_download) {
    773 }
    774 
    775 SafeBrowsingProtocolManager::FullHashDetails::~FullHashDetails() {
    776 }
    777 
    778 SafeBrowsingProtocolManagerDelegate::~SafeBrowsingProtocolManagerDelegate() {
    779 }
    780