Home | History | Annotate | Download | only in voice
      1 /*
      2  * Copyright (C) 2009 Google Inc.
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License"); you may not
      5  * use this file except in compliance with the License. You may obtain a copy of
      6  * the License at
      7  *
      8  * http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
     12  * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
     13  * License for the specific language governing permissions and limitations under
     14  * the License.
     15  */
     16 
     17 package com.android.inputmethod.voice;
     18 
     19 import com.android.inputmethod.latin.EditingUtil;
     20 import com.android.inputmethod.latin.R;
     21 
     22 import android.content.ContentResolver;
     23 import android.content.Context;
     24 import android.content.Intent;
     25 import android.os.Build;
     26 import android.os.Bundle;
     27 import android.os.Handler;
     28 import android.os.Message;
     29 import android.os.Parcelable;
     30 import android.speech.RecognitionListener;
     31 import android.speech.SpeechRecognizer;
     32 import android.speech.RecognizerIntent;
     33 import android.util.Log;
     34 import android.view.inputmethod.InputConnection;
     35 import android.view.View;
     36 import android.view.View.OnClickListener;
     37 
     38 import java.io.ByteArrayOutputStream;
     39 import java.io.IOException;
     40 import java.util.ArrayList;
     41 import java.util.HashMap;
     42 import java.util.List;
     43 import java.util.Locale;
     44 import java.util.Map;
     45 
     46 /**
     47  * Speech recognition input, including both user interface and a background
     48  * process to stream audio to the network recognizer. This class supplies a
     49  * View (getView()), which it updates as recognition occurs. The user of this
     50  * class is responsible for making the view visible to the user, as well as
     51  * handling various events returned through UiListener.
     52  */
     53 public class VoiceInput implements OnClickListener {
     54     private static final String TAG = "VoiceInput";
     55     private static final String EXTRA_RECOGNITION_CONTEXT =
     56             "android.speech.extras.RECOGNITION_CONTEXT";
     57     private static final String EXTRA_CALLING_PACKAGE = "calling_package";
     58     private static final String EXTRA_ALTERNATES = "android.speech.extra.ALTERNATES";
     59     private static final int MAX_ALT_LIST_LENGTH = 6;
     60 
     61     private static final String DEFAULT_RECOMMENDED_PACKAGES =
     62             "com.android.mms " +
     63             "com.google.android.gm " +
     64             "com.google.android.talk " +
     65             "com.google.android.apps.googlevoice " +
     66             "com.android.email " +
     67             "com.android.browser ";
     68 
     69     // WARNING! Before enabling this, fix the problem with calling getExtractedText() in
     70     // landscape view. It causes Extracted text updates to be rejected due to a token mismatch
     71     public static boolean ENABLE_WORD_CORRECTIONS = true;
     72 
     73     // Dummy word suggestion which means "delete current word"
     74     public static final String DELETE_SYMBOL = " \u00D7 ";  // times symbol
     75 
     76     private Whitelist mRecommendedList;
     77     private Whitelist mBlacklist;
     78 
     79     private VoiceInputLogger mLogger;
     80 
     81     // Names of a few extras defined in VoiceSearch's RecognitionController
     82     // Note, the version of voicesearch that shipped in Froyo returns the raw
     83     // RecognitionClientAlternates protocol buffer under the key "alternates",
     84     // so a VS market update must be installed on Froyo devices in order to see
     85     // alternatives.
     86     private static final String ALTERNATES_BUNDLE = "alternates_bundle";
     87 
     88     //  This is copied from the VoiceSearch app.
     89     private static final class AlternatesBundleKeys {
     90         public static final String ALTERNATES = "alternates";
     91         public static final String CONFIDENCE = "confidence";
     92         public static final String LENGTH = "length";
     93         public static final String MAX_SPAN_LENGTH = "max_span_length";
     94         public static final String SPANS = "spans";
     95         public static final String SPAN_KEY_DELIMITER = ":";
     96         public static final String START = "start";
     97         public static final String TEXT = "text";
     98     }
     99 
    100     // Names of a few intent extras defined in VoiceSearch's RecognitionService.
    101     // These let us tweak the endpointer parameters.
    102     private static final String EXTRA_SPEECH_MINIMUM_LENGTH_MILLIS =
    103             "android.speech.extras.SPEECH_INPUT_MINIMUM_LENGTH_MILLIS";
    104     private static final String EXTRA_SPEECH_INPUT_COMPLETE_SILENCE_LENGTH_MILLIS =
    105             "android.speech.extras.SPEECH_INPUT_COMPLETE_SILENCE_LENGTH_MILLIS";
    106     private static final String EXTRA_SPEECH_INPUT_POSSIBLY_COMPLETE_SILENCE_LENGTH_MILLIS =
    107             "android.speech.extras.SPEECH_INPUT_POSSIBLY_COMPLETE_SILENCE_LENGTH_MILLIS";
    108 
    109     // The usual endpointer default value for input complete silence length is 0.5 seconds,
    110     // but that's used for things like voice search. For dictation-like voice input like this,
    111     // we go with a more liberal value of 1 second. This value will only be used if a value
    112     // is not provided from Gservices.
    113     private static final String INPUT_COMPLETE_SILENCE_LENGTH_DEFAULT_VALUE_MILLIS = "1000";
    114 
    115     // Used to record part of that state for logging purposes.
    116     public static final int DEFAULT = 0;
    117     public static final int LISTENING = 1;
    118     public static final int WORKING = 2;
    119     public static final int ERROR = 3;
    120 
    121     private int mAfterVoiceInputDeleteCount = 0;
    122     private int mAfterVoiceInputInsertCount = 0;
    123     private int mAfterVoiceInputInsertPunctuationCount = 0;
    124     private int mAfterVoiceInputCursorPos = 0;
    125     private int mAfterVoiceInputSelectionSpan = 0;
    126 
    127     private int mState = DEFAULT;
    128 
    129     private final static int MSG_CLOSE_ERROR_DIALOG = 1;
    130 
    131     private final Handler mHandler = new Handler() {
    132         @Override
    133         public void handleMessage(Message msg) {
    134             if (msg.what == MSG_CLOSE_ERROR_DIALOG) {
    135                 mState = DEFAULT;
    136                 mRecognitionView.finish();
    137                 mUiListener.onCancelVoice();
    138             }
    139         }
    140     };
    141 
    142     /**
    143      * Events relating to the recognition UI. You must implement these.
    144      */
    145     public interface UiListener {
    146 
    147         /**
    148          * @param recognitionResults a set of transcripts for what the user
    149          *   spoke, sorted by likelihood.
    150          */
    151         public void onVoiceResults(
    152             List<String> recognitionResults,
    153             Map<String, List<CharSequence>> alternatives);
    154 
    155         /**
    156          * Called when the user cancels speech recognition.
    157          */
    158         public void onCancelVoice();
    159     }
    160 
    161     private SpeechRecognizer mSpeechRecognizer;
    162     private RecognitionListener mRecognitionListener;
    163     private RecognitionView mRecognitionView;
    164     private UiListener mUiListener;
    165     private Context mContext;
    166 
    167     /**
    168      * @param context the service or activity in which we're running.
    169      * @param uiHandler object to receive events from VoiceInput.
    170      */
    171     public VoiceInput(Context context, UiListener uiHandler) {
    172         mLogger = VoiceInputLogger.getLogger(context);
    173         mRecognitionListener = new ImeRecognitionListener();
    174         mSpeechRecognizer = SpeechRecognizer.createSpeechRecognizer(context);
    175         mSpeechRecognizer.setRecognitionListener(mRecognitionListener);
    176         mUiListener = uiHandler;
    177         mContext = context;
    178         newView();
    179 
    180         String recommendedPackages = SettingsUtil.getSettingsString(
    181                 context.getContentResolver(),
    182                 SettingsUtil.LATIN_IME_VOICE_INPUT_RECOMMENDED_PACKAGES,
    183                 DEFAULT_RECOMMENDED_PACKAGES);
    184 
    185         mRecommendedList = new Whitelist();
    186         for (String recommendedPackage : recommendedPackages.split("\\s+")) {
    187             mRecommendedList.addApp(recommendedPackage);
    188         }
    189 
    190         mBlacklist = new Whitelist();
    191         mBlacklist.addApp("com.android.setupwizard");
    192     }
    193 
    194     public void setCursorPos(int pos) {
    195         mAfterVoiceInputCursorPos = pos;
    196     }
    197 
    198     public int getCursorPos() {
    199         return mAfterVoiceInputCursorPos;
    200     }
    201 
    202     public void setSelectionSpan(int span) {
    203         mAfterVoiceInputSelectionSpan = span;
    204     }
    205 
    206     public int getSelectionSpan() {
    207         return mAfterVoiceInputSelectionSpan;
    208     }
    209 
    210     public void incrementTextModificationDeleteCount(int count){
    211         mAfterVoiceInputDeleteCount += count;
    212         // Send up intents for other text modification types
    213         if (mAfterVoiceInputInsertCount > 0) {
    214             logTextModifiedByTypingInsertion(mAfterVoiceInputInsertCount);
    215             mAfterVoiceInputInsertCount = 0;
    216         }
    217         if (mAfterVoiceInputInsertPunctuationCount > 0) {
    218             logTextModifiedByTypingInsertionPunctuation(mAfterVoiceInputInsertPunctuationCount);
    219             mAfterVoiceInputInsertPunctuationCount = 0;
    220         }
    221 
    222     }
    223 
    224     public void incrementTextModificationInsertCount(int count){
    225         mAfterVoiceInputInsertCount += count;
    226         if (mAfterVoiceInputSelectionSpan > 0) {
    227             // If text was highlighted before inserting the char, count this as
    228             // a delete.
    229             mAfterVoiceInputDeleteCount += mAfterVoiceInputSelectionSpan;
    230         }
    231         // Send up intents for other text modification types
    232         if (mAfterVoiceInputDeleteCount > 0) {
    233             logTextModifiedByTypingDeletion(mAfterVoiceInputDeleteCount);
    234             mAfterVoiceInputDeleteCount = 0;
    235         }
    236         if (mAfterVoiceInputInsertPunctuationCount > 0) {
    237             logTextModifiedByTypingInsertionPunctuation(mAfterVoiceInputInsertPunctuationCount);
    238             mAfterVoiceInputInsertPunctuationCount = 0;
    239         }
    240     }
    241 
    242     public void incrementTextModificationInsertPunctuationCount(int count){
    243         mAfterVoiceInputInsertPunctuationCount += 1;
    244         if (mAfterVoiceInputSelectionSpan > 0) {
    245             // If text was highlighted before inserting the char, count this as
    246             // a delete.
    247             mAfterVoiceInputDeleteCount += mAfterVoiceInputSelectionSpan;
    248         }
    249         // Send up intents for aggregated non-punctuation insertions
    250         if (mAfterVoiceInputDeleteCount > 0) {
    251             logTextModifiedByTypingDeletion(mAfterVoiceInputDeleteCount);
    252             mAfterVoiceInputDeleteCount = 0;
    253         }
    254         if (mAfterVoiceInputInsertCount > 0) {
    255             logTextModifiedByTypingInsertion(mAfterVoiceInputInsertCount);
    256             mAfterVoiceInputInsertCount = 0;
    257         }
    258     }
    259 
    260     public void flushAllTextModificationCounters() {
    261         if (mAfterVoiceInputInsertCount > 0) {
    262             logTextModifiedByTypingInsertion(mAfterVoiceInputInsertCount);
    263             mAfterVoiceInputInsertCount = 0;
    264         }
    265         if (mAfterVoiceInputDeleteCount > 0) {
    266             logTextModifiedByTypingDeletion(mAfterVoiceInputDeleteCount);
    267             mAfterVoiceInputDeleteCount = 0;
    268         }
    269         if (mAfterVoiceInputInsertPunctuationCount > 0) {
    270             logTextModifiedByTypingInsertionPunctuation(mAfterVoiceInputInsertPunctuationCount);
    271             mAfterVoiceInputInsertPunctuationCount = 0;
    272         }
    273     }
    274 
    275     /**
    276      * The configuration of the IME changed and may have caused the views to be layed out
    277      * again. Restore the state of the recognition view.
    278      */
    279     public void onConfigurationChanged() {
    280         mRecognitionView.restoreState();
    281     }
    282 
    283     /**
    284      * @return true if field is blacklisted for voice
    285      */
    286     public boolean isBlacklistedField(FieldContext context) {
    287         return mBlacklist.matches(context);
    288     }
    289 
    290     /**
    291      * Used to decide whether to show voice input hints for this field, etc.
    292      *
    293      * @return true if field is recommended for voice
    294      */
    295     public boolean isRecommendedField(FieldContext context) {
    296         return mRecommendedList.matches(context);
    297     }
    298 
    299     /**
    300      * Start listening for speech from the user. This will grab the microphone
    301      * and start updating the view provided by getView(). It is the caller's
    302      * responsibility to ensure that the view is visible to the user at this stage.
    303      *
    304      * @param context the same FieldContext supplied to voiceIsEnabled()
    305      * @param swipe whether this voice input was started by swipe, for logging purposes
    306      */
    307     public void startListening(FieldContext context, boolean swipe) {
    308         mState = DEFAULT;
    309 
    310         Locale locale = Locale.getDefault();
    311         String localeString = locale.getLanguage() + "-" + locale.getCountry();
    312 
    313         mLogger.start(localeString, swipe);
    314 
    315         mState = LISTENING;
    316 
    317         mRecognitionView.showInitializing();
    318         startListeningAfterInitialization(context);
    319     }
    320 
    321     /**
    322      * Called only when the recognition manager's initialization completed
    323      *
    324      * @param context context with which {@link #startListening(FieldContext, boolean)} was executed
    325      */
    326     private void startListeningAfterInitialization(FieldContext context) {
    327         Intent intent = makeIntent();
    328         intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL, "");
    329         intent.putExtra(EXTRA_RECOGNITION_CONTEXT, context.getBundle());
    330         intent.putExtra(EXTRA_CALLING_PACKAGE, "VoiceIME");
    331         intent.putExtra(EXTRA_ALTERNATES, true);
    332         intent.putExtra(RecognizerIntent.EXTRA_MAX_RESULTS,
    333                 SettingsUtil.getSettingsInt(
    334                         mContext.getContentResolver(),
    335                         SettingsUtil.LATIN_IME_MAX_VOICE_RESULTS,
    336                         1));
    337         // Get endpointer params from Gservices.
    338         // TODO: Consider caching these values for improved performance on slower devices.
    339         final ContentResolver cr = mContext.getContentResolver();
    340         putEndpointerExtra(
    341                 cr,
    342                 intent,
    343                 SettingsUtil.LATIN_IME_SPEECH_MINIMUM_LENGTH_MILLIS,
    344                 EXTRA_SPEECH_MINIMUM_LENGTH_MILLIS,
    345                 null  /* rely on endpointer default */);
    346         putEndpointerExtra(
    347                 cr,
    348                 intent,
    349                 SettingsUtil.LATIN_IME_SPEECH_INPUT_COMPLETE_SILENCE_LENGTH_MILLIS,
    350                 EXTRA_SPEECH_INPUT_COMPLETE_SILENCE_LENGTH_MILLIS,
    351                 INPUT_COMPLETE_SILENCE_LENGTH_DEFAULT_VALUE_MILLIS
    352                 /* our default value is different from the endpointer's */);
    353         putEndpointerExtra(
    354                 cr,
    355                 intent,
    356                 SettingsUtil.
    357                         LATIN_IME_SPEECH_INPUT_POSSIBLY_COMPLETE_SILENCE_LENGTH_MILLIS,
    358                 EXTRA_SPEECH_INPUT_POSSIBLY_COMPLETE_SILENCE_LENGTH_MILLIS,
    359                 null  /* rely on endpointer default */);
    360 
    361         mSpeechRecognizer.startListening(intent);
    362     }
    363 
    364     /**
    365      * Gets the value of the provided Gservices key, attempts to parse it into a long,
    366      * and if successful, puts the long value as an extra in the provided intent.
    367      */
    368     private void putEndpointerExtra(ContentResolver cr, Intent i,
    369             String gservicesKey, String intentExtraKey, String defaultValue) {
    370         long l = -1;
    371         String s = SettingsUtil.getSettingsString(cr, gservicesKey, defaultValue);
    372         if (s != null) {
    373             try {
    374                 l = Long.valueOf(s);
    375             } catch (NumberFormatException e) {
    376                 Log.e(TAG, "could not parse value for " + gservicesKey + ": " + s);
    377             }
    378         }
    379 
    380         if (l != -1) i.putExtra(intentExtraKey, l);
    381     }
    382 
    383     public void destroy() {
    384         mSpeechRecognizer.destroy();
    385     }
    386 
    387     /**
    388      * Creates a new instance of the view that is returned by {@link #getView()}
    389      * Clients should use this when a previously returned view is stuck in a
    390      * layout that is being thrown away and a new one is need to show to the
    391      * user.
    392      */
    393     public void newView() {
    394         mRecognitionView = new RecognitionView(mContext, this);
    395     }
    396 
    397     /**
    398      * @return a view that shows the recognition flow--e.g., "Speak now" and
    399      * "working" dialogs.
    400      */
    401     public View getView() {
    402         return mRecognitionView.getView();
    403     }
    404 
    405     /**
    406      * Handle the cancel button.
    407      */
    408     public void onClick(View view) {
    409         switch(view.getId()) {
    410             case R.id.button:
    411                 cancel();
    412                 break;
    413         }
    414     }
    415 
    416     public void logTextModifiedByTypingInsertion(int length) {
    417         mLogger.textModifiedByTypingInsertion(length);
    418     }
    419 
    420     public void logTextModifiedByTypingInsertionPunctuation(int length) {
    421         mLogger.textModifiedByTypingInsertionPunctuation(length);
    422     }
    423 
    424     public void logTextModifiedByTypingDeletion(int length) {
    425         mLogger.textModifiedByTypingDeletion(length);
    426     }
    427 
    428     public void logTextModifiedByChooseSuggestion(String suggestion, int index,
    429                                                   String wordSeparators, InputConnection ic) {
    430         EditingUtil.Range range = new EditingUtil.Range();
    431         String wordToBeReplaced = EditingUtil.getWordAtCursor(ic, wordSeparators, range);
    432         // If we enable phrase-based alternatives, only send up the first word
    433         // in suggestion and wordToBeReplaced.
    434         mLogger.textModifiedByChooseSuggestion(suggestion.length(), wordToBeReplaced.length(),
    435                                                index, wordToBeReplaced, suggestion);
    436     }
    437 
    438     public void logKeyboardWarningDialogShown() {
    439         mLogger.keyboardWarningDialogShown();
    440     }
    441 
    442     public void logKeyboardWarningDialogDismissed() {
    443         mLogger.keyboardWarningDialogDismissed();
    444     }
    445 
    446     public void logKeyboardWarningDialogOk() {
    447         mLogger.keyboardWarningDialogOk();
    448     }
    449 
    450     public void logKeyboardWarningDialogCancel() {
    451         mLogger.keyboardWarningDialogCancel();
    452     }
    453 
    454     public void logSwipeHintDisplayed() {
    455         mLogger.swipeHintDisplayed();
    456     }
    457 
    458     public void logPunctuationHintDisplayed() {
    459         mLogger.punctuationHintDisplayed();
    460     }
    461 
    462     public void logVoiceInputDelivered(int length) {
    463         mLogger.voiceInputDelivered(length);
    464     }
    465 
    466     public void logInputEnded() {
    467         mLogger.inputEnded();
    468     }
    469 
    470     public void flushLogs() {
    471         mLogger.flush();
    472     }
    473 
    474     private static Intent makeIntent() {
    475         Intent intent = new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH);
    476 
    477         // On Cupcake, use VoiceIMEHelper since VoiceSearch doesn't support.
    478         // On Donut, always use VoiceSearch, since VoiceIMEHelper and
    479         // VoiceSearch may conflict.
    480         if (Build.VERSION.RELEASE.equals("1.5")) {
    481             intent = intent.setClassName(
    482               "com.google.android.voiceservice",
    483               "com.google.android.voiceservice.IMERecognitionService");
    484         } else {
    485             intent = intent.setClassName(
    486               "com.google.android.voicesearch",
    487               "com.google.android.voicesearch.RecognitionService");
    488         }
    489 
    490         return intent;
    491     }
    492 
    493     /**
    494      * Cancel in-progress speech recognition.
    495      */
    496     public void cancel() {
    497         switch (mState) {
    498         case LISTENING:
    499             mLogger.cancelDuringListening();
    500             break;
    501         case WORKING:
    502             mLogger.cancelDuringWorking();
    503             break;
    504         case ERROR:
    505             mLogger.cancelDuringError();
    506             break;
    507         }
    508         mState = DEFAULT;
    509 
    510         // Remove all pending tasks (e.g., timers to cancel voice input)
    511         mHandler.removeMessages(MSG_CLOSE_ERROR_DIALOG);
    512 
    513         mSpeechRecognizer.cancel();
    514         mUiListener.onCancelVoice();
    515         mRecognitionView.finish();
    516     }
    517 
    518     private int getErrorStringId(int errorType, boolean endpointed) {
    519         switch (errorType) {
    520             // We use CLIENT_ERROR to signify that voice search is not available on the device.
    521             case SpeechRecognizer.ERROR_CLIENT:
    522                 return R.string.voice_not_installed;
    523             case SpeechRecognizer.ERROR_NETWORK:
    524                 return R.string.voice_network_error;
    525             case SpeechRecognizer.ERROR_NETWORK_TIMEOUT:
    526                 return endpointed ?
    527                         R.string.voice_network_error : R.string.voice_too_much_speech;
    528             case SpeechRecognizer.ERROR_AUDIO:
    529                 return R.string.voice_audio_error;
    530             case SpeechRecognizer.ERROR_SERVER:
    531                 return R.string.voice_server_error;
    532             case SpeechRecognizer.ERROR_SPEECH_TIMEOUT:
    533                 return R.string.voice_speech_timeout;
    534             case SpeechRecognizer.ERROR_NO_MATCH:
    535                 return R.string.voice_no_match;
    536             default: return R.string.voice_error;
    537         }
    538     }
    539 
    540     private void onError(int errorType, boolean endpointed) {
    541         Log.i(TAG, "error " + errorType);
    542         mLogger.error(errorType);
    543         onError(mContext.getString(getErrorStringId(errorType, endpointed)));
    544     }
    545 
    546     private void onError(String error) {
    547         mState = ERROR;
    548         mRecognitionView.showError(error);
    549         // Wait a couple seconds and then automatically dismiss message.
    550         mHandler.sendMessageDelayed(Message.obtain(mHandler, MSG_CLOSE_ERROR_DIALOG), 2000);
    551     }
    552 
    553     private class ImeRecognitionListener implements RecognitionListener {
    554         // Waveform data
    555         final ByteArrayOutputStream mWaveBuffer = new ByteArrayOutputStream();
    556         int mSpeechStart;
    557         private boolean mEndpointed = false;
    558 
    559         public void onReadyForSpeech(Bundle noiseParams) {
    560             mRecognitionView.showListening();
    561         }
    562 
    563         public void onBeginningOfSpeech() {
    564             mEndpointed = false;
    565             mSpeechStart = mWaveBuffer.size();
    566         }
    567 
    568         public void onRmsChanged(float rmsdB) {
    569             mRecognitionView.updateVoiceMeter(rmsdB);
    570         }
    571 
    572         public void onBufferReceived(byte[] buf) {
    573             try {
    574                 mWaveBuffer.write(buf);
    575             } catch (IOException e) {}
    576         }
    577 
    578         public void onEndOfSpeech() {
    579             mEndpointed = true;
    580             mState = WORKING;
    581             mRecognitionView.showWorking(mWaveBuffer, mSpeechStart, mWaveBuffer.size());
    582         }
    583 
    584         public void onError(int errorType) {
    585             mState = ERROR;
    586             VoiceInput.this.onError(errorType, mEndpointed);
    587         }
    588 
    589         public void onResults(Bundle resultsBundle) {
    590             List<String> results = resultsBundle
    591                     .getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION);
    592             // VS Market update is needed for IME froyo clients to access the alternatesBundle
    593             // TODO: verify this.
    594             Bundle alternatesBundle = resultsBundle.getBundle(ALTERNATES_BUNDLE);
    595             mState = DEFAULT;
    596 
    597             final Map<String, List<CharSequence>> alternatives =
    598                 new HashMap<String, List<CharSequence>>();
    599 
    600             if (ENABLE_WORD_CORRECTIONS && alternatesBundle != null && results.size() > 0) {
    601                 // Use the top recognition result to map each alternative's start:length to a word.
    602                 String[] words = results.get(0).split(" ");
    603                 Bundle spansBundle = alternatesBundle.getBundle(AlternatesBundleKeys.SPANS);
    604                 for (String key : spansBundle.keySet()) {
    605                     // Get the word for which these alternates correspond to.
    606                     Bundle spanBundle = spansBundle.getBundle(key);
    607                     int start = spanBundle.getInt(AlternatesBundleKeys.START);
    608                     int length = spanBundle.getInt(AlternatesBundleKeys.LENGTH);
    609                     // Only keep single-word based alternatives.
    610                     if (length == 1 && start < words.length) {
    611                         // Get the alternatives associated with the span.
    612                         // If a word appears twice in a recognition result,
    613                         // concatenate the alternatives for the word.
    614                         List<CharSequence> altList = alternatives.get(words[start]);
    615                         if (altList == null) {
    616                             altList = new ArrayList<CharSequence>();
    617                             alternatives.put(words[start], altList);
    618                         }
    619                         Parcelable[] alternatesArr = spanBundle
    620                             .getParcelableArray(AlternatesBundleKeys.ALTERNATES);
    621                         for (int j = 0; j < alternatesArr.length &&
    622                                  altList.size() < MAX_ALT_LIST_LENGTH; j++) {
    623                             Bundle alternateBundle = (Bundle) alternatesArr[j];
    624                             String alternate = alternateBundle.getString(AlternatesBundleKeys.TEXT);
    625                             // Don't allow duplicates in the alternates list.
    626                             if (!altList.contains(alternate)) {
    627                                 altList.add(alternate);
    628                             }
    629                         }
    630                     }
    631                 }
    632             }
    633 
    634             if (results.size() > 5) {
    635                 results = results.subList(0, 5);
    636             }
    637             mUiListener.onVoiceResults(results, alternatives);
    638             mRecognitionView.finish();
    639         }
    640 
    641         public void onPartialResults(final Bundle partialResults) {
    642             // currently - do nothing
    643         }
    644 
    645         public void onEvent(int eventType, Bundle params) {
    646             // do nothing - reserved for events that might be added in the future
    647         }
    648     }
    649 }
    650