Home | History | Annotate | Download | only in speech
      1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "content/browser/speech/google_streaming_remote_engine.h"
      6 
      7 #include <vector>
      8 
      9 #include "base/bind.h"
     10 #include "base/command_line.h"
     11 #include "base/rand_util.h"
     12 #include "base/strings/string_number_conversions.h"
     13 #include "base/strings/string_util.h"
     14 #include "base/strings/utf_string_conversions.h"
     15 #include "base/time/time.h"
     16 #include "content/browser/speech/audio_buffer.h"
     17 #include "content/browser/speech/proto/google_streaming_api.pb.h"
     18 #include "content/public/common/content_switches.h"
     19 #include "content/public/common/speech_recognition_error.h"
     20 #include "content/public/common/speech_recognition_result.h"
     21 #include "google_apis/google_api_keys.h"
     22 #include "net/base/escape.h"
     23 #include "net/base/load_flags.h"
     24 #include "net/url_request/url_fetcher.h"
     25 #include "net/url_request/url_request_context.h"
     26 #include "net/url_request/url_request_context_getter.h"
     27 #include "net/url_request/url_request_status.h"
     28 
     29 using net::URLFetcher;
     30 
     31 namespace content {
     32 namespace {
     33 
     34 const char kWebServiceBaseUrl[] =
     35     "https://www.google.com/speech-api/full-duplex/v1";
     36 const char kDownstreamUrl[] = "/down?";
     37 const char kUpstreamUrl[] = "/up?";
     38 const int kAudioPacketIntervalMs = 100;
     39 const AudioEncoder::Codec kDefaultAudioCodec = AudioEncoder::CODEC_FLAC;
     40 
     41 // This mathces the maximum maxAlternatives value supported by the server.
     42 const uint32 kMaxMaxAlternatives = 30;
     43 
     44 // TODO(hans): Remove this and other logging when we don't need it anymore.
     45 void DumpResponse(const std::string& response) {
     46   DVLOG(1) << "------------";
     47   proto::SpeechRecognitionEvent event;
     48   if (!event.ParseFromString(response)) {
     49     DVLOG(1) << "Parse failed!";
     50     return;
     51   }
     52   if (event.has_status())
     53     DVLOG(1) << "STATUS\t" << event.status();
     54   for (int i = 0; i < event.result_size(); ++i) {
     55     DVLOG(1) << "RESULT #" << i << ":";
     56     const proto::SpeechRecognitionResult& res = event.result(i);
     57     if (res.has_final())
     58       DVLOG(1) << "  FINAL:\t" << res.final();
     59     if (res.has_stability())
     60       DVLOG(1) << "  STABILITY:\t" << res.stability();
     61     for (int j = 0; j < res.alternative_size(); ++j) {
     62       const proto::SpeechRecognitionAlternative& alt =
     63           res.alternative(j);
     64       if (alt.has_confidence())
     65         DVLOG(1) << "    CONFIDENCE:\t" << alt.confidence();
     66       if (alt.has_transcript())
     67         DVLOG(1) << "    TRANSCRIPT:\t" << alt.transcript();
     68     }
     69   }
     70 }
     71 
     72 std::string GetAPIKey() {
     73   const CommandLine& command_line = *CommandLine::ForCurrentProcess();
     74   if (command_line.HasSwitch(switches::kSpeechRecognitionWebserviceKey)) {
     75     DVLOG(1) << "GetAPIKey() used key from command-line.";
     76     return command_line.GetSwitchValueASCII(
     77         switches::kSpeechRecognitionWebserviceKey);
     78   }
     79 
     80   std::string api_key = google_apis::GetAPIKey();
     81   if (api_key.empty())
     82     DVLOG(1) << "GetAPIKey() returned empty string!";
     83 
     84   return api_key;
     85 }
     86 
     87 }  // namespace
     88 
     89 const int GoogleStreamingRemoteEngine::kUpstreamUrlFetcherIdForTests = 0;
     90 const int GoogleStreamingRemoteEngine::kDownstreamUrlFetcherIdForTests = 1;
     91 const int GoogleStreamingRemoteEngine::kWebserviceStatusNoError = 0;
     92 const int GoogleStreamingRemoteEngine::kWebserviceStatusErrorNoMatch = 5;
     93 
     94 GoogleStreamingRemoteEngine::GoogleStreamingRemoteEngine(
     95     net::URLRequestContextGetter* context)
     96     : url_context_(context),
     97       previous_response_length_(0),
     98       got_last_definitive_result_(false),
     99       is_dispatching_event_(false),
    100       state_(STATE_IDLE) {}
    101 
    102 GoogleStreamingRemoteEngine::~GoogleStreamingRemoteEngine() {}
    103 
    104 void GoogleStreamingRemoteEngine::SetConfig(
    105     const SpeechRecognitionEngineConfig& config) {
    106   config_ = config;
    107 }
    108 
    109 void GoogleStreamingRemoteEngine::StartRecognition() {
    110   FSMEventArgs event_args(EVENT_START_RECOGNITION);
    111   DispatchEvent(event_args);
    112 }
    113 
    114 void GoogleStreamingRemoteEngine::EndRecognition() {
    115   FSMEventArgs event_args(EVENT_END_RECOGNITION);
    116   DispatchEvent(event_args);
    117 }
    118 
    119 void GoogleStreamingRemoteEngine::TakeAudioChunk(const AudioChunk& data) {
    120   FSMEventArgs event_args(EVENT_AUDIO_CHUNK);
    121   event_args.audio_data = &data;
    122   DispatchEvent(event_args);
    123 }
    124 
    125 void GoogleStreamingRemoteEngine::AudioChunksEnded() {
    126   FSMEventArgs event_args(EVENT_AUDIO_CHUNKS_ENDED);
    127   DispatchEvent(event_args);
    128 }
    129 
    130 void GoogleStreamingRemoteEngine::OnURLFetchComplete(const URLFetcher* source) {
    131   const bool kResponseComplete = true;
    132   DispatchHTTPResponse(source, kResponseComplete);
    133 }
    134 
    135 void GoogleStreamingRemoteEngine::OnURLFetchDownloadProgress(
    136     const URLFetcher* source, int64 current, int64 total) {
    137   const bool kPartialResponse = false;
    138   DispatchHTTPResponse(source, kPartialResponse);
    139 }
    140 
    141 void GoogleStreamingRemoteEngine::DispatchHTTPResponse(const URLFetcher* source,
    142                                                        bool end_of_response) {
    143   DCHECK(CalledOnValidThread());
    144   DCHECK(source);
    145   const bool response_is_good = source->GetStatus().is_success() &&
    146                                 source->GetResponseCode() == 200;
    147   std::string response;
    148   if (response_is_good)
    149     source->GetResponseAsString(&response);
    150   const size_t current_response_length = response.size();
    151 
    152   DVLOG(1) << (source == downstream_fetcher_.get() ? "Downstream" : "Upstream")
    153            << "HTTP, code: " << source->GetResponseCode()
    154            << "      length: " << current_response_length
    155            << "      eor: " << end_of_response;
    156 
    157   // URLFetcher provides always the entire response buffer, but we are only
    158   // interested in the fresh data introduced by the last chunk. Therefore, we
    159   // drop the previous content we have already processed.
    160   if (current_response_length != 0) {
    161     DCHECK_GE(current_response_length, previous_response_length_);
    162     response.erase(0, previous_response_length_);
    163     previous_response_length_ = current_response_length;
    164   }
    165 
    166   if (!response_is_good && source == downstream_fetcher_.get()) {
    167     DVLOG(1) << "Downstream error " << source->GetResponseCode();
    168     FSMEventArgs event_args(EVENT_DOWNSTREAM_ERROR);
    169     DispatchEvent(event_args);
    170     return;
    171   }
    172   if (!response_is_good && source == upstream_fetcher_.get()) {
    173     DVLOG(1) << "Upstream error " << source->GetResponseCode()
    174              << " EOR " << end_of_response;
    175     FSMEventArgs event_args(EVENT_UPSTREAM_ERROR);
    176     DispatchEvent(event_args);
    177     return;
    178   }
    179 
    180   // Ignore incoming data on the upstream connection.
    181   if (source == upstream_fetcher_.get())
    182     return;
    183 
    184   DCHECK(response_is_good && source == downstream_fetcher_.get());
    185 
    186   // The downstream response is organized in chunks, whose size is determined
    187   // by a 4 bytes prefix, transparently handled by the ChunkedByteBuffer class.
    188   // Such chunks are sent by the speech recognition webservice over the HTTP
    189   // downstream channel using HTTP chunked transfer (unrelated to our chunks).
    190   // This function is called every time an HTTP chunk is received by the
    191   // url fetcher. However there isn't any particular matching beween our
    192   // protocol chunks and HTTP chunks, in the sense that a single HTTP chunk can
    193   // contain a portion of one chunk or even more chunks together.
    194   chunked_byte_buffer_.Append(response);
    195 
    196   // A single HTTP chunk can contain more than one data chunk, thus the while.
    197   while (chunked_byte_buffer_.HasChunks()) {
    198     FSMEventArgs event_args(EVENT_DOWNSTREAM_RESPONSE);
    199     event_args.response = chunked_byte_buffer_.PopChunk();
    200     DCHECK(event_args.response.get());
    201     DumpResponse(std::string(event_args.response->begin(),
    202                              event_args.response->end()));
    203     DispatchEvent(event_args);
    204   }
    205   if (end_of_response) {
    206     FSMEventArgs event_args(EVENT_DOWNSTREAM_CLOSED);
    207     DispatchEvent(event_args);
    208   }
    209 }
    210 
    211 bool GoogleStreamingRemoteEngine::IsRecognitionPending() const {
    212   DCHECK(CalledOnValidThread());
    213   return state_ != STATE_IDLE;
    214 }
    215 
    216 int GoogleStreamingRemoteEngine::GetDesiredAudioChunkDurationMs() const {
    217   return kAudioPacketIntervalMs;
    218 }
    219 
    220 // -----------------------  Core FSM implementation ---------------------------
    221 
    222 void GoogleStreamingRemoteEngine::DispatchEvent(
    223     const FSMEventArgs& event_args) {
    224   DCHECK(CalledOnValidThread());
    225   DCHECK_LE(event_args.event, EVENT_MAX_VALUE);
    226   DCHECK_LE(state_, STATE_MAX_VALUE);
    227 
    228   // Event dispatching must be sequential, otherwise it will break all the rules
    229   // and the assumptions of the finite state automata model.
    230   DCHECK(!is_dispatching_event_);
    231   is_dispatching_event_ = true;
    232 
    233   state_ = ExecuteTransitionAndGetNextState(event_args);
    234 
    235   is_dispatching_event_ = false;
    236 }
    237 
    238 GoogleStreamingRemoteEngine::FSMState
    239 GoogleStreamingRemoteEngine::ExecuteTransitionAndGetNextState(
    240     const FSMEventArgs& event_args) {
    241   const FSMEvent event = event_args.event;
    242   switch (state_) {
    243     case STATE_IDLE:
    244       switch (event) {
    245         case EVENT_START_RECOGNITION:
    246           return ConnectBothStreams(event_args);
    247         case EVENT_END_RECOGNITION:
    248         // Note AUDIO_CHUNK and AUDIO_END events can remain enqueued in case of
    249         // abort, so we just silently drop them here.
    250         case EVENT_AUDIO_CHUNK:
    251         case EVENT_AUDIO_CHUNKS_ENDED:
    252         // DOWNSTREAM_CLOSED can be received if we end up here due to an error.
    253         case EVENT_DOWNSTREAM_CLOSED:
    254           return DoNothing(event_args);
    255         case EVENT_UPSTREAM_ERROR:
    256         case EVENT_DOWNSTREAM_ERROR:
    257         case EVENT_DOWNSTREAM_RESPONSE:
    258           return NotFeasible(event_args);
    259       }
    260       break;
    261     case STATE_BOTH_STREAMS_CONNECTED:
    262       switch (event) {
    263         case EVENT_AUDIO_CHUNK:
    264           return TransmitAudioUpstream(event_args);
    265         case EVENT_DOWNSTREAM_RESPONSE:
    266           return ProcessDownstreamResponse(event_args);
    267         case EVENT_AUDIO_CHUNKS_ENDED:
    268           return CloseUpstreamAndWaitForResults(event_args);
    269         case EVENT_END_RECOGNITION:
    270           return AbortSilently(event_args);
    271         case EVENT_UPSTREAM_ERROR:
    272         case EVENT_DOWNSTREAM_ERROR:
    273         case EVENT_DOWNSTREAM_CLOSED:
    274           return AbortWithError(event_args);
    275         case EVENT_START_RECOGNITION:
    276           return NotFeasible(event_args);
    277       }
    278       break;
    279     case STATE_WAITING_DOWNSTREAM_RESULTS:
    280       switch (event) {
    281         case EVENT_DOWNSTREAM_RESPONSE:
    282           return ProcessDownstreamResponse(event_args);
    283         case EVENT_DOWNSTREAM_CLOSED:
    284           return RaiseNoMatchErrorIfGotNoResults(event_args);
    285         case EVENT_END_RECOGNITION:
    286           return AbortSilently(event_args);
    287         case EVENT_UPSTREAM_ERROR:
    288         case EVENT_DOWNSTREAM_ERROR:
    289           return AbortWithError(event_args);
    290         case EVENT_START_RECOGNITION:
    291         case EVENT_AUDIO_CHUNK:
    292         case EVENT_AUDIO_CHUNKS_ENDED:
    293           return NotFeasible(event_args);
    294       }
    295       break;
    296   }
    297   return NotFeasible(event_args);
    298 }
    299 
    300 // ----------- Contract for all the FSM evolution functions below -------------
    301 //  - Are guaranteed to be executed in the same thread (IO, except for tests);
    302 //  - Are guaranteed to be not reentrant (themselves and each other);
    303 //  - event_args members are guaranteed to be stable during the call;
    304 
    305 GoogleStreamingRemoteEngine::FSMState
    306 GoogleStreamingRemoteEngine::ConnectBothStreams(const FSMEventArgs&) {
    307   DCHECK(!upstream_fetcher_.get());
    308   DCHECK(!downstream_fetcher_.get());
    309 
    310   encoder_.reset(AudioEncoder::Create(kDefaultAudioCodec,
    311                                       config_.audio_sample_rate,
    312                                       config_.audio_num_bits_per_sample));
    313   DCHECK(encoder_.get());
    314   const std::string request_key = GenerateRequestKey();
    315 
    316   // Setup downstream fetcher.
    317   std::vector<std::string> downstream_args;
    318   downstream_args.push_back(
    319       "key=" + net::EscapeQueryParamValue(GetAPIKey(), true));
    320   downstream_args.push_back("pair=" + request_key);
    321   downstream_args.push_back("output=pb");
    322   GURL downstream_url(std::string(kWebServiceBaseUrl) +
    323                       std::string(kDownstreamUrl) +
    324                       JoinString(downstream_args, '&'));
    325 
    326   downstream_fetcher_.reset(URLFetcher::Create(
    327       kDownstreamUrlFetcherIdForTests, downstream_url, URLFetcher::GET, this));
    328   downstream_fetcher_->SetRequestContext(url_context_.get());
    329   downstream_fetcher_->SetLoadFlags(net::LOAD_DO_NOT_SAVE_COOKIES |
    330                                     net::LOAD_DO_NOT_SEND_COOKIES |
    331                                     net::LOAD_DO_NOT_SEND_AUTH_DATA);
    332   downstream_fetcher_->Start();
    333 
    334   // Setup upstream fetcher.
    335   // TODO(hans): Support for user-selected grammars.
    336   std::vector<std::string> upstream_args;
    337   upstream_args.push_back("key=" +
    338       net::EscapeQueryParamValue(GetAPIKey(), true));
    339   upstream_args.push_back("pair=" + request_key);
    340   upstream_args.push_back("output=pb");
    341   upstream_args.push_back(
    342       "lang=" + net::EscapeQueryParamValue(GetAcceptedLanguages(), true));
    343   upstream_args.push_back(
    344       config_.filter_profanities ? "pFilter=2" : "pFilter=0");
    345   if (config_.max_hypotheses > 0U) {
    346     int max_alternatives = std::min(kMaxMaxAlternatives,
    347                                     config_.max_hypotheses);
    348     upstream_args.push_back("maxAlternatives=" +
    349                             base::UintToString(max_alternatives));
    350   }
    351   upstream_args.push_back("client=chromium");
    352   if (!config_.hardware_info.empty()) {
    353     upstream_args.push_back(
    354         "xhw=" + net::EscapeQueryParamValue(config_.hardware_info, true));
    355   }
    356   if (config_.continuous)
    357     upstream_args.push_back("continuous");
    358   if (config_.interim_results)
    359     upstream_args.push_back("interim");
    360 
    361   GURL upstream_url(std::string(kWebServiceBaseUrl) +
    362                     std::string(kUpstreamUrl) +
    363                     JoinString(upstream_args, '&'));
    364 
    365   upstream_fetcher_.reset(URLFetcher::Create(
    366       kUpstreamUrlFetcherIdForTests, upstream_url, URLFetcher::POST, this));
    367   upstream_fetcher_->SetChunkedUpload(encoder_->mime_type());
    368   upstream_fetcher_->SetRequestContext(url_context_.get());
    369   upstream_fetcher_->SetReferrer(config_.origin_url);
    370   upstream_fetcher_->SetLoadFlags(net::LOAD_DO_NOT_SAVE_COOKIES |
    371                                   net::LOAD_DO_NOT_SEND_COOKIES |
    372                                   net::LOAD_DO_NOT_SEND_AUTH_DATA);
    373   upstream_fetcher_->Start();
    374   previous_response_length_ = 0;
    375   return STATE_BOTH_STREAMS_CONNECTED;
    376 }
    377 
    378 GoogleStreamingRemoteEngine::FSMState
    379 GoogleStreamingRemoteEngine::TransmitAudioUpstream(
    380     const FSMEventArgs& event_args) {
    381   DCHECK(upstream_fetcher_.get());
    382   DCHECK(event_args.audio_data.get());
    383   const AudioChunk& audio = *(event_args.audio_data.get());
    384 
    385   DCHECK_EQ(audio.bytes_per_sample(), config_.audio_num_bits_per_sample / 8);
    386   encoder_->Encode(audio);
    387   scoped_refptr<AudioChunk> encoded_data(encoder_->GetEncodedDataAndClear());
    388   upstream_fetcher_->AppendChunkToUpload(encoded_data->AsString(), false);
    389   return state_;
    390 }
    391 
    392 GoogleStreamingRemoteEngine::FSMState
    393 GoogleStreamingRemoteEngine::ProcessDownstreamResponse(
    394     const FSMEventArgs& event_args) {
    395   DCHECK(event_args.response.get());
    396 
    397   proto::SpeechRecognitionEvent ws_event;
    398   if (!ws_event.ParseFromString(std::string(event_args.response->begin(),
    399                                             event_args.response->end())))
    400     return AbortWithError(event_args);
    401 
    402   // An empty (default) event is used to notify us that the upstream has
    403   // been connected. Ignore.
    404   if (!ws_event.result_size() && (!ws_event.has_status() ||
    405       ws_event.status() == proto::SpeechRecognitionEvent::STATUS_SUCCESS)) {
    406     DVLOG(1) << "Received empty response";
    407     return state_;
    408   }
    409 
    410   if (ws_event.has_status()) {
    411     switch (ws_event.status()) {
    412       case proto::SpeechRecognitionEvent::STATUS_SUCCESS:
    413         break;
    414       case proto::SpeechRecognitionEvent::STATUS_NO_SPEECH:
    415         return Abort(SPEECH_RECOGNITION_ERROR_NO_SPEECH);
    416       case proto::SpeechRecognitionEvent::STATUS_ABORTED:
    417         return Abort(SPEECH_RECOGNITION_ERROR_ABORTED);
    418       case proto::SpeechRecognitionEvent::STATUS_AUDIO_CAPTURE:
    419         return Abort(SPEECH_RECOGNITION_ERROR_AUDIO);
    420       case proto::SpeechRecognitionEvent::STATUS_NETWORK:
    421         return Abort(SPEECH_RECOGNITION_ERROR_NETWORK);
    422       case proto::SpeechRecognitionEvent::STATUS_NOT_ALLOWED:
    423         // TODO(hans): We need a better error code for this.
    424         return Abort(SPEECH_RECOGNITION_ERROR_ABORTED);
    425       case proto::SpeechRecognitionEvent::STATUS_SERVICE_NOT_ALLOWED:
    426         // TODO(hans): We need a better error code for this.
    427         return Abort(SPEECH_RECOGNITION_ERROR_ABORTED);
    428       case proto::SpeechRecognitionEvent::STATUS_BAD_GRAMMAR:
    429         return Abort(SPEECH_RECOGNITION_ERROR_BAD_GRAMMAR);
    430       case proto::SpeechRecognitionEvent::STATUS_LANGUAGE_NOT_SUPPORTED:
    431         // TODO(hans): We need a better error code for this.
    432         return Abort(SPEECH_RECOGNITION_ERROR_ABORTED);
    433     }
    434   }
    435 
    436   SpeechRecognitionResults results;
    437   for (int i = 0; i < ws_event.result_size(); ++i) {
    438     const proto::SpeechRecognitionResult& ws_result = ws_event.result(i);
    439     results.push_back(SpeechRecognitionResult());
    440     SpeechRecognitionResult& result = results.back();
    441     result.is_provisional = !(ws_result.has_final() && ws_result.final());
    442 
    443     if (!result.is_provisional)
    444       got_last_definitive_result_ = true;
    445 
    446     for (int j = 0; j < ws_result.alternative_size(); ++j) {
    447       const proto::SpeechRecognitionAlternative& ws_alternative =
    448           ws_result.alternative(j);
    449       SpeechRecognitionHypothesis hypothesis;
    450       if (ws_alternative.has_confidence())
    451         hypothesis.confidence = ws_alternative.confidence();
    452       else if (ws_result.has_stability())
    453         hypothesis.confidence = ws_result.stability();
    454       DCHECK(ws_alternative.has_transcript());
    455       // TODO(hans): Perhaps the transcript should be required in the proto?
    456       if (ws_alternative.has_transcript())
    457         hypothesis.utterance = UTF8ToUTF16(ws_alternative.transcript());
    458 
    459       result.hypotheses.push_back(hypothesis);
    460     }
    461   }
    462 
    463   delegate()->OnSpeechRecognitionEngineResults(results);
    464 
    465   return state_;
    466 }
    467 
    468 GoogleStreamingRemoteEngine::FSMState
    469 GoogleStreamingRemoteEngine::RaiseNoMatchErrorIfGotNoResults(
    470     const FSMEventArgs& event_args) {
    471   if (!got_last_definitive_result_) {
    472     // Provide an empty result to notify that recognition is ended with no
    473     // errors, yet neither any further results.
    474     delegate()->OnSpeechRecognitionEngineResults(SpeechRecognitionResults());
    475   }
    476   return AbortSilently(event_args);
    477 }
    478 
    479 GoogleStreamingRemoteEngine::FSMState
    480 GoogleStreamingRemoteEngine::CloseUpstreamAndWaitForResults(
    481     const FSMEventArgs&) {
    482   DCHECK(upstream_fetcher_.get());
    483   DCHECK(encoder_.get());
    484 
    485   DVLOG(1) <<  "Closing upstream.";
    486 
    487   // The encoder requires a non-empty final buffer. So we encode a packet
    488   // of silence in case encoder had no data already.
    489   std::vector<short> samples(
    490       config_.audio_sample_rate * kAudioPacketIntervalMs / 1000);
    491   scoped_refptr<AudioChunk> dummy_chunk =
    492       new AudioChunk(reinterpret_cast<uint8*>(&samples[0]),
    493                      samples.size() * sizeof(short),
    494                      encoder_->bits_per_sample() / 8);
    495   encoder_->Encode(*dummy_chunk.get());
    496   encoder_->Flush();
    497   scoped_refptr<AudioChunk> encoded_dummy_data =
    498       encoder_->GetEncodedDataAndClear();
    499   DCHECK(!encoded_dummy_data->IsEmpty());
    500   encoder_.reset();
    501 
    502   upstream_fetcher_->AppendChunkToUpload(encoded_dummy_data->AsString(), true);
    503   got_last_definitive_result_ = false;
    504   return STATE_WAITING_DOWNSTREAM_RESULTS;
    505 }
    506 
    507 GoogleStreamingRemoteEngine::FSMState
    508 GoogleStreamingRemoteEngine::CloseDownstream(const FSMEventArgs&) {
    509   DCHECK(!upstream_fetcher_.get());
    510   DCHECK(downstream_fetcher_.get());
    511 
    512   DVLOG(1) <<  "Closing downstream.";
    513   downstream_fetcher_.reset();
    514   return STATE_IDLE;
    515 }
    516 
    517 GoogleStreamingRemoteEngine::FSMState
    518 GoogleStreamingRemoteEngine::AbortSilently(const FSMEventArgs&) {
    519   return Abort(SPEECH_RECOGNITION_ERROR_NONE);
    520 }
    521 
    522 GoogleStreamingRemoteEngine::FSMState
    523 GoogleStreamingRemoteEngine::AbortWithError(const FSMEventArgs&) {
    524   return Abort(SPEECH_RECOGNITION_ERROR_NETWORK);
    525 }
    526 
    527 GoogleStreamingRemoteEngine::FSMState GoogleStreamingRemoteEngine::Abort(
    528     SpeechRecognitionErrorCode error_code) {
    529   DVLOG(1) << "Aborting with error " << error_code;
    530 
    531   if (error_code != SPEECH_RECOGNITION_ERROR_NONE) {
    532     delegate()->OnSpeechRecognitionEngineError(
    533         SpeechRecognitionError(error_code));
    534   }
    535   downstream_fetcher_.reset();
    536   upstream_fetcher_.reset();
    537   encoder_.reset();
    538   return STATE_IDLE;
    539 }
    540 
    541 GoogleStreamingRemoteEngine::FSMState
    542 GoogleStreamingRemoteEngine::DoNothing(const FSMEventArgs&) {
    543   return state_;
    544 }
    545 
    546 GoogleStreamingRemoteEngine::FSMState
    547 GoogleStreamingRemoteEngine::NotFeasible(const FSMEventArgs& event_args) {
    548   NOTREACHED() << "Unfeasible event " << event_args.event
    549                << " in state " << state_;
    550   return state_;
    551 }
    552 
    553 std::string GoogleStreamingRemoteEngine::GetAcceptedLanguages() const {
    554   std::string langs = config_.language;
    555   if (langs.empty() && url_context_.get()) {
    556     // If no language is provided then we use the first from the accepted
    557     // language list. If this list is empty then it defaults to "en-US".
    558     // Example of the contents of this list: "es,en-GB;q=0.8", ""
    559     net::URLRequestContext* request_context =
    560         url_context_->GetURLRequestContext();
    561     DCHECK(request_context);
    562     // TODO(pauljensen): GoogleStreamingRemoteEngine should be constructed with
    563     // a reference to the HttpUserAgentSettings rather than accessing the
    564     // accept language through the URLRequestContext.
    565     std::string accepted_language_list = request_context->GetAcceptLanguage();
    566     size_t separator = accepted_language_list.find_first_of(",;");
    567     if (separator != std::string::npos)
    568       langs = accepted_language_list.substr(0, separator);
    569   }
    570   if (langs.empty())
    571     langs = "en-US";
    572   return langs;
    573 }
    574 
    575 // TODO(primiano): Is there any utility in the codebase that already does this?
    576 std::string GoogleStreamingRemoteEngine::GenerateRequestKey() const {
    577   const int64 kKeepLowBytes = GG_LONGLONG(0x00000000FFFFFFFF);
    578   const int64 kKeepHighBytes = GG_LONGLONG(0xFFFFFFFF00000000);
    579 
    580   // Just keep the least significant bits of timestamp, in order to reduce
    581   // probability of collisions.
    582   int64 key = (base::Time::Now().ToInternalValue() & kKeepLowBytes) |
    583               (base::RandUint64() & kKeepHighBytes);
    584   return base::HexEncode(reinterpret_cast<void*>(&key), sizeof(key));
    585 }
    586 
    587 GoogleStreamingRemoteEngine::FSMEventArgs::FSMEventArgs(FSMEvent event_value)
    588     : event(event_value) {
    589 }
    590 
    591 GoogleStreamingRemoteEngine::FSMEventArgs::~FSMEventArgs() {
    592 }
    593 
    594 }  // namespace content
    595