Home | History | Annotate | Download | only in speech
      1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "content/browser/speech/google_streaming_remote_engine.h"
      6 
      7 #include <vector>
      8 
      9 #include "base/bind.h"
     10 #include "base/command_line.h"
     11 #include "base/rand_util.h"
     12 #include "base/strings/string_number_conversions.h"
     13 #include "base/strings/string_util.h"
     14 #include "base/strings/utf_string_conversions.h"
     15 #include "base/time/time.h"
     16 #include "content/browser/speech/audio_buffer.h"
     17 #include "content/browser/speech/proto/google_streaming_api.pb.h"
     18 #include "content/public/common/content_switches.h"
     19 #include "content/public/common/speech_recognition_error.h"
     20 #include "content/public/common/speech_recognition_result.h"
     21 #include "google_apis/google_api_keys.h"
     22 #include "net/base/escape.h"
     23 #include "net/base/load_flags.h"
     24 #include "net/url_request/url_fetcher.h"
     25 #include "net/url_request/url_request_context.h"
     26 #include "net/url_request/url_request_context_getter.h"
     27 #include "net/url_request/url_request_status.h"
     28 
     29 using net::URLFetcher;
     30 
     31 namespace content {
     32 namespace {
     33 
     34 const char kWebServiceBaseUrl[] =
     35     "https://www.google.com/speech-api/full-duplex/v1";
     36 const char kDownstreamUrl[] = "/down?";
     37 const char kUpstreamUrl[] = "/up?";
     38 const AudioEncoder::Codec kDefaultAudioCodec = AudioEncoder::CODEC_FLAC;
     39 
     40 // This matches the maximum maxAlternatives value supported by the server.
     41 const uint32 kMaxMaxAlternatives = 30;
     42 
     43 // TODO(hans): Remove this and other logging when we don't need it anymore.
     44 void DumpResponse(const std::string& response) {
     45   DVLOG(1) << "------------";
     46   proto::SpeechRecognitionEvent event;
     47   if (!event.ParseFromString(response)) {
     48     DVLOG(1) << "Parse failed!";
     49     return;
     50   }
     51   if (event.has_status())
     52     DVLOG(1) << "STATUS\t" << event.status();
     53   for (int i = 0; i < event.result_size(); ++i) {
     54     DVLOG(1) << "RESULT #" << i << ":";
     55     const proto::SpeechRecognitionResult& res = event.result(i);
     56     if (res.has_final())
     57       DVLOG(1) << "  FINAL:\t" << res.final();
     58     if (res.has_stability())
     59       DVLOG(1) << "  STABILITY:\t" << res.stability();
     60     for (int j = 0; j < res.alternative_size(); ++j) {
     61       const proto::SpeechRecognitionAlternative& alt =
     62           res.alternative(j);
     63       if (alt.has_confidence())
     64         DVLOG(1) << "    CONFIDENCE:\t" << alt.confidence();
     65       if (alt.has_transcript())
     66         DVLOG(1) << "    TRANSCRIPT:\t" << alt.transcript();
     67     }
     68   }
     69 }
     70 
     71 std::string GetAPIKey() {
     72   const CommandLine& command_line = *CommandLine::ForCurrentProcess();
     73   if (command_line.HasSwitch(switches::kSpeechRecognitionWebserviceKey)) {
     74     DVLOG(1) << "GetAPIKey() used key from command-line.";
     75     return command_line.GetSwitchValueASCII(
     76         switches::kSpeechRecognitionWebserviceKey);
     77   }
     78 
     79   std::string api_key = google_apis::GetAPIKey();
     80   if (api_key.empty())
     81     DVLOG(1) << "GetAPIKey() returned empty string!";
     82 
     83   return api_key;
     84 }
     85 
     86 }  // namespace
     87 
     88 const int GoogleStreamingRemoteEngine::kAudioPacketIntervalMs = 100;
     89 const int GoogleStreamingRemoteEngine::kUpstreamUrlFetcherIdForTesting = 0;
     90 const int GoogleStreamingRemoteEngine::kDownstreamUrlFetcherIdForTesting = 1;
     91 const int GoogleStreamingRemoteEngine::kWebserviceStatusNoError = 0;
     92 const int GoogleStreamingRemoteEngine::kWebserviceStatusErrorNoMatch = 5;
     93 
     94 GoogleStreamingRemoteEngine::GoogleStreamingRemoteEngine(
     95     net::URLRequestContextGetter* context)
     96     : url_context_(context),
     97       previous_response_length_(0),
     98       got_last_definitive_result_(false),
     99       is_dispatching_event_(false),
    100       state_(STATE_IDLE) {}
    101 
    102 GoogleStreamingRemoteEngine::~GoogleStreamingRemoteEngine() {}
    103 
    104 void GoogleStreamingRemoteEngine::SetConfig(
    105     const SpeechRecognitionEngineConfig& config) {
    106   config_ = config;
    107 }
    108 
    109 void GoogleStreamingRemoteEngine::StartRecognition() {
    110   FSMEventArgs event_args(EVENT_START_RECOGNITION);
    111   DispatchEvent(event_args);
    112 }
    113 
    114 void GoogleStreamingRemoteEngine::EndRecognition() {
    115   FSMEventArgs event_args(EVENT_END_RECOGNITION);
    116   DispatchEvent(event_args);
    117 }
    118 
    119 void GoogleStreamingRemoteEngine::TakeAudioChunk(const AudioChunk& data) {
    120   FSMEventArgs event_args(EVENT_AUDIO_CHUNK);
    121   event_args.audio_data = &data;
    122   DispatchEvent(event_args);
    123 }
    124 
    125 void GoogleStreamingRemoteEngine::AudioChunksEnded() {
    126   FSMEventArgs event_args(EVENT_AUDIO_CHUNKS_ENDED);
    127   DispatchEvent(event_args);
    128 }
    129 
    130 void GoogleStreamingRemoteEngine::OnURLFetchComplete(const URLFetcher* source) {
    131   const bool kResponseComplete = true;
    132   DispatchHTTPResponse(source, kResponseComplete);
    133 }
    134 
    135 void GoogleStreamingRemoteEngine::OnURLFetchDownloadProgress(
    136     const URLFetcher* source, int64 current, int64 total) {
    137   const bool kPartialResponse = false;
    138   DispatchHTTPResponse(source, kPartialResponse);
    139 }
    140 
    141 void GoogleStreamingRemoteEngine::DispatchHTTPResponse(const URLFetcher* source,
    142                                                        bool end_of_response) {
    143   DCHECK(CalledOnValidThread());
    144   DCHECK(source);
    145   const bool response_is_good = source->GetStatus().is_success() &&
    146                                 source->GetResponseCode() == 200;
    147   std::string response;
    148   if (response_is_good)
    149     source->GetResponseAsString(&response);
    150   const size_t current_response_length = response.size();
    151 
    152   DVLOG(1) << (source == downstream_fetcher_.get() ? "Downstream" : "Upstream")
    153            << "HTTP, code: " << source->GetResponseCode()
    154            << "      length: " << current_response_length
    155            << "      eor: " << end_of_response;
    156 
    157   // URLFetcher provides always the entire response buffer, but we are only
    158   // interested in the fresh data introduced by the last chunk. Therefore, we
    159   // drop the previous content we have already processed.
    160   if (current_response_length != 0) {
    161     DCHECK_GE(current_response_length, previous_response_length_);
    162     response.erase(0, previous_response_length_);
    163     previous_response_length_ = current_response_length;
    164   }
    165 
    166   if (!response_is_good && source == downstream_fetcher_.get()) {
    167     DVLOG(1) << "Downstream error " << source->GetResponseCode();
    168     FSMEventArgs event_args(EVENT_DOWNSTREAM_ERROR);
    169     DispatchEvent(event_args);
    170     return;
    171   }
    172   if (!response_is_good && source == upstream_fetcher_.get()) {
    173     DVLOG(1) << "Upstream error " << source->GetResponseCode()
    174              << " EOR " << end_of_response;
    175     FSMEventArgs event_args(EVENT_UPSTREAM_ERROR);
    176     DispatchEvent(event_args);
    177     return;
    178   }
    179 
    180   // Ignore incoming data on the upstream connection.
    181   if (source == upstream_fetcher_.get())
    182     return;
    183 
    184   DCHECK(response_is_good && source == downstream_fetcher_.get());
    185 
    186   // The downstream response is organized in chunks, whose size is determined
    187   // by a 4 bytes prefix, transparently handled by the ChunkedByteBuffer class.
    188   // Such chunks are sent by the speech recognition webservice over the HTTP
    189   // downstream channel using HTTP chunked transfer (unrelated to our chunks).
    190   // This function is called every time an HTTP chunk is received by the
    191   // url fetcher. However there isn't any particular matching beween our
    192   // protocol chunks and HTTP chunks, in the sense that a single HTTP chunk can
    193   // contain a portion of one chunk or even more chunks together.
    194   chunked_byte_buffer_.Append(response);
    195 
    196   // A single HTTP chunk can contain more than one data chunk, thus the while.
    197   while (chunked_byte_buffer_.HasChunks()) {
    198     FSMEventArgs event_args(EVENT_DOWNSTREAM_RESPONSE);
    199     event_args.response = chunked_byte_buffer_.PopChunk();
    200     DCHECK(event_args.response.get());
    201     DumpResponse(std::string(event_args.response->begin(),
    202                              event_args.response->end()));
    203     DispatchEvent(event_args);
    204   }
    205   if (end_of_response) {
    206     FSMEventArgs event_args(EVENT_DOWNSTREAM_CLOSED);
    207     DispatchEvent(event_args);
    208   }
    209 }
    210 
    211 bool GoogleStreamingRemoteEngine::IsRecognitionPending() const {
    212   DCHECK(CalledOnValidThread());
    213   return state_ != STATE_IDLE;
    214 }
    215 
    216 int GoogleStreamingRemoteEngine::GetDesiredAudioChunkDurationMs() const {
    217   return kAudioPacketIntervalMs;
    218 }
    219 
    220 // -----------------------  Core FSM implementation ---------------------------
    221 
    222 void GoogleStreamingRemoteEngine::DispatchEvent(
    223     const FSMEventArgs& event_args) {
    224   DCHECK(CalledOnValidThread());
    225   DCHECK_LE(event_args.event, EVENT_MAX_VALUE);
    226   DCHECK_LE(state_, STATE_MAX_VALUE);
    227 
    228   // Event dispatching must be sequential, otherwise it will break all the rules
    229   // and the assumptions of the finite state automata model.
    230   DCHECK(!is_dispatching_event_);
    231   is_dispatching_event_ = true;
    232 
    233   state_ = ExecuteTransitionAndGetNextState(event_args);
    234 
    235   is_dispatching_event_ = false;
    236 }
    237 
    238 GoogleStreamingRemoteEngine::FSMState
    239 GoogleStreamingRemoteEngine::ExecuteTransitionAndGetNextState(
    240     const FSMEventArgs& event_args) {
    241   const FSMEvent event = event_args.event;
    242   switch (state_) {
    243     case STATE_IDLE:
    244       switch (event) {
    245         case EVENT_START_RECOGNITION:
    246           return ConnectBothStreams(event_args);
    247         case EVENT_END_RECOGNITION:
    248         // Note AUDIO_CHUNK and AUDIO_END events can remain enqueued in case of
    249         // abort, so we just silently drop them here.
    250         case EVENT_AUDIO_CHUNK:
    251         case EVENT_AUDIO_CHUNKS_ENDED:
    252         // DOWNSTREAM_CLOSED can be received if we end up here due to an error.
    253         case EVENT_DOWNSTREAM_CLOSED:
    254           return DoNothing(event_args);
    255         case EVENT_UPSTREAM_ERROR:
    256         case EVENT_DOWNSTREAM_ERROR:
    257         case EVENT_DOWNSTREAM_RESPONSE:
    258           return NotFeasible(event_args);
    259       }
    260       break;
    261     case STATE_BOTH_STREAMS_CONNECTED:
    262       switch (event) {
    263         case EVENT_AUDIO_CHUNK:
    264           return TransmitAudioUpstream(event_args);
    265         case EVENT_DOWNSTREAM_RESPONSE:
    266           return ProcessDownstreamResponse(event_args);
    267         case EVENT_AUDIO_CHUNKS_ENDED:
    268           return CloseUpstreamAndWaitForResults(event_args);
    269         case EVENT_END_RECOGNITION:
    270           return AbortSilently(event_args);
    271         case EVENT_UPSTREAM_ERROR:
    272         case EVENT_DOWNSTREAM_ERROR:
    273         case EVENT_DOWNSTREAM_CLOSED:
    274           return AbortWithError(event_args);
    275         case EVENT_START_RECOGNITION:
    276           return NotFeasible(event_args);
    277       }
    278       break;
    279     case STATE_WAITING_DOWNSTREAM_RESULTS:
    280       switch (event) {
    281         case EVENT_DOWNSTREAM_RESPONSE:
    282           return ProcessDownstreamResponse(event_args);
    283         case EVENT_DOWNSTREAM_CLOSED:
    284           return RaiseNoMatchErrorIfGotNoResults(event_args);
    285         case EVENT_END_RECOGNITION:
    286           return AbortSilently(event_args);
    287         case EVENT_UPSTREAM_ERROR:
    288         case EVENT_DOWNSTREAM_ERROR:
    289           return AbortWithError(event_args);
    290         case EVENT_START_RECOGNITION:
    291         case EVENT_AUDIO_CHUNK:
    292         case EVENT_AUDIO_CHUNKS_ENDED:
    293           return NotFeasible(event_args);
    294       }
    295       break;
    296   }
    297   return NotFeasible(event_args);
    298 }
    299 
    300 // ----------- Contract for all the FSM evolution functions below -------------
    301 //  - Are guaranteed to be executed in the same thread (IO, except for tests);
    302 //  - Are guaranteed to be not reentrant (themselves and each other);
    303 //  - event_args members are guaranteed to be stable during the call;
    304 
    305 GoogleStreamingRemoteEngine::FSMState
    306 GoogleStreamingRemoteEngine::ConnectBothStreams(const FSMEventArgs&) {
    307   DCHECK(!upstream_fetcher_.get());
    308   DCHECK(!downstream_fetcher_.get());
    309 
    310   encoder_.reset(AudioEncoder::Create(kDefaultAudioCodec,
    311                                       config_.audio_sample_rate,
    312                                       config_.audio_num_bits_per_sample));
    313   DCHECK(encoder_.get());
    314   const std::string request_key = GenerateRequestKey();
    315 
    316   // Setup downstream fetcher.
    317   std::vector<std::string> downstream_args;
    318   downstream_args.push_back(
    319       "key=" + net::EscapeQueryParamValue(GetAPIKey(), true));
    320   downstream_args.push_back("pair=" + request_key);
    321   downstream_args.push_back("output=pb");
    322   GURL downstream_url(std::string(kWebServiceBaseUrl) +
    323                       std::string(kDownstreamUrl) +
    324                       JoinString(downstream_args, '&'));
    325 
    326   downstream_fetcher_.reset(URLFetcher::Create(
    327       kDownstreamUrlFetcherIdForTesting, downstream_url, URLFetcher::GET,
    328       this));
    329   downstream_fetcher_->SetRequestContext(url_context_.get());
    330   downstream_fetcher_->SetLoadFlags(net::LOAD_DO_NOT_SAVE_COOKIES |
    331                                     net::LOAD_DO_NOT_SEND_COOKIES |
    332                                     net::LOAD_DO_NOT_SEND_AUTH_DATA);
    333   downstream_fetcher_->Start();
    334 
    335   // Setup upstream fetcher.
    336   // TODO(hans): Support for user-selected grammars.
    337   std::vector<std::string> upstream_args;
    338   upstream_args.push_back("key=" +
    339       net::EscapeQueryParamValue(GetAPIKey(), true));
    340   upstream_args.push_back("pair=" + request_key);
    341   upstream_args.push_back("output=pb");
    342   upstream_args.push_back(
    343       "lang=" + net::EscapeQueryParamValue(GetAcceptedLanguages(), true));
    344   upstream_args.push_back(
    345       config_.filter_profanities ? "pFilter=2" : "pFilter=0");
    346   if (config_.max_hypotheses > 0U) {
    347     int max_alternatives = std::min(kMaxMaxAlternatives,
    348                                     config_.max_hypotheses);
    349     upstream_args.push_back("maxAlternatives=" +
    350                             base::UintToString(max_alternatives));
    351   }
    352   upstream_args.push_back("client=chromium");
    353   if (!config_.hardware_info.empty()) {
    354     upstream_args.push_back(
    355         "xhw=" + net::EscapeQueryParamValue(config_.hardware_info, true));
    356   }
    357   if (config_.continuous)
    358     upstream_args.push_back("continuous");
    359   if (config_.interim_results)
    360     upstream_args.push_back("interim");
    361 
    362   GURL upstream_url(std::string(kWebServiceBaseUrl) +
    363                     std::string(kUpstreamUrl) +
    364                     JoinString(upstream_args, '&'));
    365 
    366   upstream_fetcher_.reset(URLFetcher::Create(
    367       kUpstreamUrlFetcherIdForTesting, upstream_url, URLFetcher::POST, this));
    368   upstream_fetcher_->SetChunkedUpload(encoder_->mime_type());
    369   upstream_fetcher_->SetRequestContext(url_context_.get());
    370   upstream_fetcher_->SetReferrer(config_.origin_url);
    371   upstream_fetcher_->SetLoadFlags(net::LOAD_DO_NOT_SAVE_COOKIES |
    372                                   net::LOAD_DO_NOT_SEND_COOKIES |
    373                                   net::LOAD_DO_NOT_SEND_AUTH_DATA);
    374   upstream_fetcher_->Start();
    375   previous_response_length_ = 0;
    376   return STATE_BOTH_STREAMS_CONNECTED;
    377 }
    378 
    379 GoogleStreamingRemoteEngine::FSMState
    380 GoogleStreamingRemoteEngine::TransmitAudioUpstream(
    381     const FSMEventArgs& event_args) {
    382   DCHECK(upstream_fetcher_.get());
    383   DCHECK(event_args.audio_data.get());
    384   const AudioChunk& audio = *(event_args.audio_data.get());
    385 
    386   DCHECK_EQ(audio.bytes_per_sample(), config_.audio_num_bits_per_sample / 8);
    387   encoder_->Encode(audio);
    388   scoped_refptr<AudioChunk> encoded_data(encoder_->GetEncodedDataAndClear());
    389   upstream_fetcher_->AppendChunkToUpload(encoded_data->AsString(), false);
    390   return state_;
    391 }
    392 
    393 GoogleStreamingRemoteEngine::FSMState
    394 GoogleStreamingRemoteEngine::ProcessDownstreamResponse(
    395     const FSMEventArgs& event_args) {
    396   DCHECK(event_args.response.get());
    397 
    398   proto::SpeechRecognitionEvent ws_event;
    399   if (!ws_event.ParseFromString(std::string(event_args.response->begin(),
    400                                             event_args.response->end())))
    401     return AbortWithError(event_args);
    402 
    403   // An empty (default) event is used to notify us that the upstream has
    404   // been connected. Ignore.
    405   if (!ws_event.result_size() && (!ws_event.has_status() ||
    406       ws_event.status() == proto::SpeechRecognitionEvent::STATUS_SUCCESS)) {
    407     DVLOG(1) << "Received empty response";
    408     return state_;
    409   }
    410 
    411   if (ws_event.has_status()) {
    412     switch (ws_event.status()) {
    413       case proto::SpeechRecognitionEvent::STATUS_SUCCESS:
    414         break;
    415       case proto::SpeechRecognitionEvent::STATUS_NO_SPEECH:
    416         return Abort(SPEECH_RECOGNITION_ERROR_NO_SPEECH);
    417       case proto::SpeechRecognitionEvent::STATUS_ABORTED:
    418         return Abort(SPEECH_RECOGNITION_ERROR_ABORTED);
    419       case proto::SpeechRecognitionEvent::STATUS_AUDIO_CAPTURE:
    420         return Abort(SPEECH_RECOGNITION_ERROR_AUDIO);
    421       case proto::SpeechRecognitionEvent::STATUS_NETWORK:
    422         return Abort(SPEECH_RECOGNITION_ERROR_NETWORK);
    423       case proto::SpeechRecognitionEvent::STATUS_NOT_ALLOWED:
    424         // TODO(hans): We need a better error code for this.
    425         return Abort(SPEECH_RECOGNITION_ERROR_ABORTED);
    426       case proto::SpeechRecognitionEvent::STATUS_SERVICE_NOT_ALLOWED:
    427         // TODO(hans): We need a better error code for this.
    428         return Abort(SPEECH_RECOGNITION_ERROR_ABORTED);
    429       case proto::SpeechRecognitionEvent::STATUS_BAD_GRAMMAR:
    430         return Abort(SPEECH_RECOGNITION_ERROR_BAD_GRAMMAR);
    431       case proto::SpeechRecognitionEvent::STATUS_LANGUAGE_NOT_SUPPORTED:
    432         // TODO(hans): We need a better error code for this.
    433         return Abort(SPEECH_RECOGNITION_ERROR_ABORTED);
    434     }
    435   }
    436 
    437   SpeechRecognitionResults results;
    438   for (int i = 0; i < ws_event.result_size(); ++i) {
    439     const proto::SpeechRecognitionResult& ws_result = ws_event.result(i);
    440     results.push_back(SpeechRecognitionResult());
    441     SpeechRecognitionResult& result = results.back();
    442     result.is_provisional = !(ws_result.has_final() && ws_result.final());
    443 
    444     if (!result.is_provisional)
    445       got_last_definitive_result_ = true;
    446 
    447     for (int j = 0; j < ws_result.alternative_size(); ++j) {
    448       const proto::SpeechRecognitionAlternative& ws_alternative =
    449           ws_result.alternative(j);
    450       SpeechRecognitionHypothesis hypothesis;
    451       if (ws_alternative.has_confidence())
    452         hypothesis.confidence = ws_alternative.confidence();
    453       else if (ws_result.has_stability())
    454         hypothesis.confidence = ws_result.stability();
    455       DCHECK(ws_alternative.has_transcript());
    456       // TODO(hans): Perhaps the transcript should be required in the proto?
    457       if (ws_alternative.has_transcript())
    458         hypothesis.utterance = UTF8ToUTF16(ws_alternative.transcript());
    459 
    460       result.hypotheses.push_back(hypothesis);
    461     }
    462   }
    463 
    464   delegate()->OnSpeechRecognitionEngineResults(results);
    465 
    466   return state_;
    467 }
    468 
    469 GoogleStreamingRemoteEngine::FSMState
    470 GoogleStreamingRemoteEngine::RaiseNoMatchErrorIfGotNoResults(
    471     const FSMEventArgs& event_args) {
    472   if (!got_last_definitive_result_) {
    473     // Provide an empty result to notify that recognition is ended with no
    474     // errors, yet neither any further results.
    475     delegate()->OnSpeechRecognitionEngineResults(SpeechRecognitionResults());
    476   }
    477   return AbortSilently(event_args);
    478 }
    479 
    480 GoogleStreamingRemoteEngine::FSMState
    481 GoogleStreamingRemoteEngine::CloseUpstreamAndWaitForResults(
    482     const FSMEventArgs&) {
    483   DCHECK(upstream_fetcher_.get());
    484   DCHECK(encoder_.get());
    485 
    486   DVLOG(1) <<  "Closing upstream.";
    487 
    488   // The encoder requires a non-empty final buffer. So we encode a packet
    489   // of silence in case encoder had no data already.
    490   std::vector<short> samples(
    491       config_.audio_sample_rate * kAudioPacketIntervalMs / 1000);
    492   scoped_refptr<AudioChunk> dummy_chunk =
    493       new AudioChunk(reinterpret_cast<uint8*>(&samples[0]),
    494                      samples.size() * sizeof(short),
    495                      encoder_->bits_per_sample() / 8);
    496   encoder_->Encode(*dummy_chunk.get());
    497   encoder_->Flush();
    498   scoped_refptr<AudioChunk> encoded_dummy_data =
    499       encoder_->GetEncodedDataAndClear();
    500   DCHECK(!encoded_dummy_data->IsEmpty());
    501   encoder_.reset();
    502 
    503   upstream_fetcher_->AppendChunkToUpload(encoded_dummy_data->AsString(), true);
    504   got_last_definitive_result_ = false;
    505   return STATE_WAITING_DOWNSTREAM_RESULTS;
    506 }
    507 
    508 GoogleStreamingRemoteEngine::FSMState
    509 GoogleStreamingRemoteEngine::CloseDownstream(const FSMEventArgs&) {
    510   DCHECK(!upstream_fetcher_.get());
    511   DCHECK(downstream_fetcher_.get());
    512 
    513   DVLOG(1) <<  "Closing downstream.";
    514   downstream_fetcher_.reset();
    515   return STATE_IDLE;
    516 }
    517 
    518 GoogleStreamingRemoteEngine::FSMState
    519 GoogleStreamingRemoteEngine::AbortSilently(const FSMEventArgs&) {
    520   return Abort(SPEECH_RECOGNITION_ERROR_NONE);
    521 }
    522 
    523 GoogleStreamingRemoteEngine::FSMState
    524 GoogleStreamingRemoteEngine::AbortWithError(const FSMEventArgs&) {
    525   return Abort(SPEECH_RECOGNITION_ERROR_NETWORK);
    526 }
    527 
    528 GoogleStreamingRemoteEngine::FSMState GoogleStreamingRemoteEngine::Abort(
    529     SpeechRecognitionErrorCode error_code) {
    530   DVLOG(1) << "Aborting with error " << error_code;
    531 
    532   if (error_code != SPEECH_RECOGNITION_ERROR_NONE) {
    533     delegate()->OnSpeechRecognitionEngineError(
    534         SpeechRecognitionError(error_code));
    535   }
    536   downstream_fetcher_.reset();
    537   upstream_fetcher_.reset();
    538   encoder_.reset();
    539   return STATE_IDLE;
    540 }
    541 
    542 GoogleStreamingRemoteEngine::FSMState
    543 GoogleStreamingRemoteEngine::DoNothing(const FSMEventArgs&) {
    544   return state_;
    545 }
    546 
    547 GoogleStreamingRemoteEngine::FSMState
    548 GoogleStreamingRemoteEngine::NotFeasible(const FSMEventArgs& event_args) {
    549   NOTREACHED() << "Unfeasible event " << event_args.event
    550                << " in state " << state_;
    551   return state_;
    552 }
    553 
    554 std::string GoogleStreamingRemoteEngine::GetAcceptedLanguages() const {
    555   std::string langs = config_.language;
    556   if (langs.empty() && url_context_.get()) {
    557     // If no language is provided then we use the first from the accepted
    558     // language list. If this list is empty then it defaults to "en-US".
    559     // Example of the contents of this list: "es,en-GB;q=0.8", ""
    560     net::URLRequestContext* request_context =
    561         url_context_->GetURLRequestContext();
    562     DCHECK(request_context);
    563     // TODO(pauljensen): GoogleStreamingRemoteEngine should be constructed with
    564     // a reference to the HttpUserAgentSettings rather than accessing the
    565     // accept language through the URLRequestContext.
    566     std::string accepted_language_list = request_context->GetAcceptLanguage();
    567     size_t separator = accepted_language_list.find_first_of(",;");
    568     if (separator != std::string::npos)
    569       langs = accepted_language_list.substr(0, separator);
    570   }
    571   if (langs.empty())
    572     langs = "en-US";
    573   return langs;
    574 }
    575 
    576 // TODO(primiano): Is there any utility in the codebase that already does this?
    577 std::string GoogleStreamingRemoteEngine::GenerateRequestKey() const {
    578   const int64 kKeepLowBytes = GG_LONGLONG(0x00000000FFFFFFFF);
    579   const int64 kKeepHighBytes = GG_LONGLONG(0xFFFFFFFF00000000);
    580 
    581   // Just keep the least significant bits of timestamp, in order to reduce
    582   // probability of collisions.
    583   int64 key = (base::Time::Now().ToInternalValue() & kKeepLowBytes) |
    584               (base::RandUint64() & kKeepHighBytes);
    585   return base::HexEncode(reinterpret_cast<void*>(&key), sizeof(key));
    586 }
    587 
    588 GoogleStreamingRemoteEngine::FSMEventArgs::FSMEventArgs(FSMEvent event_value)
    589     : event(event_value) {
    590 }
    591 
    592 GoogleStreamingRemoteEngine::FSMEventArgs::~FSMEventArgs() {
    593 }
    594 
    595 }  // namespace content
    596