Home | History | Annotate | Download | only in voice
      1 /*
      2  * Copyright (C) 2009 Google Inc.
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License"); you may not
      5  * use this file except in compliance with the License. You may obtain a copy of
      6  * the License at
      7  *
      8  * http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
     12  * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
     13  * License for the specific language governing permissions and limitations under
     14  * the License.
     15  */
     16 
     17 package com.android.inputmethod.voice;
     18 
     19 import com.android.inputmethod.latin.R;
     20 
     21 import android.content.ContentResolver;
     22 import android.content.Context;
     23 import android.content.Intent;
     24 import android.os.Build;
     25 import android.os.Bundle;
     26 import android.os.Handler;
     27 import android.os.Message;
     28 import android.os.Parcelable;
     29 import android.speech.RecognitionListener;
     30 import android.speech.SpeechRecognizer;
     31 import android.speech.RecognizerIntent;
     32 import android.util.Log;
     33 import android.view.View;
     34 import android.view.View.OnClickListener;
     35 
     36 import java.io.ByteArrayOutputStream;
     37 import java.io.IOException;
     38 import java.util.ArrayList;
     39 import java.util.HashMap;
     40 import java.util.List;
     41 import java.util.Locale;
     42 import java.util.Map;
     43 
     44 /**
     45  * Speech recognition input, including both user interface and a background
     46  * process to stream audio to the network recognizer. This class supplies a
     47  * View (getView()), which it updates as recognition occurs. The user of this
     48  * class is responsible for making the view visible to the user, as well as
     49  * handling various events returned through UiListener.
     50  */
     51 public class VoiceInput implements OnClickListener {
     52     private static final String TAG = "VoiceInput";
     53     private static final String EXTRA_RECOGNITION_CONTEXT =
     54             "android.speech.extras.RECOGNITION_CONTEXT";
     55     private static final String EXTRA_CALLING_PACKAGE = "calling_package";
     56     private static final String EXTRA_ALTERNATES = "android.speech.extra.ALTERNATES";
     57     private static final int MAX_ALT_LIST_LENGTH = 6;
     58 
     59     private static final String DEFAULT_RECOMMENDED_PACKAGES =
     60             "com.android.mms " +
     61             "com.google.android.gm " +
     62             "com.google.android.talk " +
     63             "com.google.android.apps.googlevoice " +
     64             "com.android.email " +
     65             "com.android.browser ";
     66 
     67     // WARNING! Before enabling this, fix the problem with calling getExtractedText() in
     68     // landscape view. It causes Extracted text updates to be rejected due to a token mismatch
     69     public static boolean ENABLE_WORD_CORRECTIONS = true;
     70 
     71     // Dummy word suggestion which means "delete current word"
     72     public static final String DELETE_SYMBOL = " \u00D7 ";  // times symbol
     73 
     74     private Whitelist mRecommendedList;
     75     private Whitelist mBlacklist;
     76 
     77     private VoiceInputLogger mLogger;
     78 
     79     // Names of a few extras defined in VoiceSearch's RecognitionController
     80     // Note, the version of voicesearch that shipped in Froyo returns the raw
     81     // RecognitionClientAlternates protocol buffer under the key "alternates",
     82     // so a VS market update must be installed on Froyo devices in order to see
     83     // alternatives.
     84     private static final String ALTERNATES_BUNDLE = "alternates_bundle";
     85 
     86     //  This is copied from the VoiceSearch app.
     87     private static final class AlternatesBundleKeys {
     88         public static final String ALTERNATES = "alternates";
     89         public static final String CONFIDENCE = "confidence";
     90         public static final String LENGTH = "length";
     91         public static final String MAX_SPAN_LENGTH = "max_span_length";
     92         public static final String SPANS = "spans";
     93         public static final String SPAN_KEY_DELIMITER = ":";
     94         public static final String START = "start";
     95         public static final String TEXT = "text";
     96     }
     97 
     98     // Names of a few intent extras defined in VoiceSearch's RecognitionService.
     99     // These let us tweak the endpointer parameters.
    100     private static final String EXTRA_SPEECH_MINIMUM_LENGTH_MILLIS =
    101             "android.speech.extras.SPEECH_INPUT_MINIMUM_LENGTH_MILLIS";
    102     private static final String EXTRA_SPEECH_INPUT_COMPLETE_SILENCE_LENGTH_MILLIS =
    103             "android.speech.extras.SPEECH_INPUT_COMPLETE_SILENCE_LENGTH_MILLIS";
    104     private static final String EXTRA_SPEECH_INPUT_POSSIBLY_COMPLETE_SILENCE_LENGTH_MILLIS =
    105             "android.speech.extras.SPEECH_INPUT_POSSIBLY_COMPLETE_SILENCE_LENGTH_MILLIS";
    106 
    107     // The usual endpointer default value for input complete silence length is 0.5 seconds,
    108     // but that's used for things like voice search. For dictation-like voice input like this,
    109     // we go with a more liberal value of 1 second. This value will only be used if a value
    110     // is not provided from Gservices.
    111     private static final String INPUT_COMPLETE_SILENCE_LENGTH_DEFAULT_VALUE_MILLIS = "1000";
    112 
    113     // Used to record part of that state for logging purposes.
    114     public static final int DEFAULT = 0;
    115     public static final int LISTENING = 1;
    116     public static final int WORKING = 2;
    117     public static final int ERROR = 3;
    118 
    119     private int mAfterVoiceInputDeleteCount = 0;
    120     private int mAfterVoiceInputInsertCount = 0;
    121     private int mAfterVoiceInputInsertPunctuationCount = 0;
    122     private int mAfterVoiceInputCursorPos = 0;
    123     private int mAfterVoiceInputSelectionSpan = 0;
    124 
    125     private int mState = DEFAULT;
    126 
    127     private final static int MSG_CLOSE_ERROR_DIALOG = 1;
    128 
    129     private final Handler mHandler = new Handler() {
    130         @Override
    131         public void handleMessage(Message msg) {
    132             if (msg.what == MSG_CLOSE_ERROR_DIALOG) {
    133                 mState = DEFAULT;
    134                 mRecognitionView.finish();
    135                 mUiListener.onCancelVoice();
    136             }
    137         }
    138     };
    139 
    140     /**
    141      * Events relating to the recognition UI. You must implement these.
    142      */
    143     public interface UiListener {
    144 
    145         /**
    146          * @param recognitionResults a set of transcripts for what the user
    147          *   spoke, sorted by likelihood.
    148          */
    149         public void onVoiceResults(
    150             List<String> recognitionResults,
    151             Map<String, List<CharSequence>> alternatives);
    152 
    153         /**
    154          * Called when the user cancels speech recognition.
    155          */
    156         public void onCancelVoice();
    157     }
    158 
    159     private SpeechRecognizer mSpeechRecognizer;
    160     private RecognitionListener mRecognitionListener;
    161     private RecognitionView mRecognitionView;
    162     private UiListener mUiListener;
    163     private Context mContext;
    164 
    165     /**
    166      * @param context the service or activity in which we're running.
    167      * @param uiHandler object to receive events from VoiceInput.
    168      */
    169     public VoiceInput(Context context, UiListener uiHandler) {
    170         mLogger = VoiceInputLogger.getLogger(context);
    171         mRecognitionListener = new ImeRecognitionListener();
    172         mSpeechRecognizer = SpeechRecognizer.createSpeechRecognizer(context);
    173         mSpeechRecognizer.setRecognitionListener(mRecognitionListener);
    174         mUiListener = uiHandler;
    175         mContext = context;
    176         newView();
    177 
    178         String recommendedPackages = SettingsUtil.getSettingsString(
    179                 context.getContentResolver(),
    180                 SettingsUtil.LATIN_IME_VOICE_INPUT_RECOMMENDED_PACKAGES,
    181                 DEFAULT_RECOMMENDED_PACKAGES);
    182 
    183         mRecommendedList = new Whitelist();
    184         for (String recommendedPackage : recommendedPackages.split("\\s+")) {
    185             mRecommendedList.addApp(recommendedPackage);
    186         }
    187 
    188         mBlacklist = new Whitelist();
    189         mBlacklist.addApp("com.android.setupwizard");
    190     }
    191 
    192     public void setCursorPos(int pos) {
    193         mAfterVoiceInputCursorPos = pos;
    194     }
    195 
    196     public int getCursorPos() {
    197         return mAfterVoiceInputCursorPos;
    198     }
    199 
    200     public void setSelectionSpan(int span) {
    201         mAfterVoiceInputSelectionSpan = span;
    202     }
    203 
    204     public int getSelectionSpan() {
    205         return mAfterVoiceInputSelectionSpan;
    206     }
    207 
    208     public void incrementTextModificationDeleteCount(int count){
    209         mAfterVoiceInputDeleteCount += count;
    210         // Send up intents for other text modification types
    211         if (mAfterVoiceInputInsertCount > 0) {
    212             logTextModifiedByTypingInsertion(mAfterVoiceInputInsertCount);
    213             mAfterVoiceInputInsertCount = 0;
    214         }
    215         if (mAfterVoiceInputInsertPunctuationCount > 0) {
    216             logTextModifiedByTypingInsertionPunctuation(mAfterVoiceInputInsertPunctuationCount);
    217             mAfterVoiceInputInsertPunctuationCount = 0;
    218         }
    219 
    220     }
    221 
    222     public void incrementTextModificationInsertCount(int count){
    223         mAfterVoiceInputInsertCount += count;
    224         if (mAfterVoiceInputSelectionSpan > 0) {
    225             // If text was highlighted before inserting the char, count this as
    226             // a delete.
    227             mAfterVoiceInputDeleteCount += mAfterVoiceInputSelectionSpan;
    228         }
    229         // Send up intents for other text modification types
    230         if (mAfterVoiceInputDeleteCount > 0) {
    231             logTextModifiedByTypingDeletion(mAfterVoiceInputDeleteCount);
    232             mAfterVoiceInputDeleteCount = 0;
    233         }
    234         if (mAfterVoiceInputInsertPunctuationCount > 0) {
    235             logTextModifiedByTypingInsertionPunctuation(mAfterVoiceInputInsertPunctuationCount);
    236             mAfterVoiceInputInsertPunctuationCount = 0;
    237         }
    238     }
    239 
    240     public void incrementTextModificationInsertPunctuationCount(int count){
    241         mAfterVoiceInputInsertPunctuationCount += 1;
    242         if (mAfterVoiceInputSelectionSpan > 0) {
    243             // If text was highlighted before inserting the char, count this as
    244             // a delete.
    245             mAfterVoiceInputDeleteCount += mAfterVoiceInputSelectionSpan;
    246         }
    247         // Send up intents for aggregated non-punctuation insertions
    248         if (mAfterVoiceInputDeleteCount > 0) {
    249             logTextModifiedByTypingDeletion(mAfterVoiceInputDeleteCount);
    250             mAfterVoiceInputDeleteCount = 0;
    251         }
    252         if (mAfterVoiceInputInsertCount > 0) {
    253             logTextModifiedByTypingInsertion(mAfterVoiceInputInsertCount);
    254             mAfterVoiceInputInsertCount = 0;
    255         }
    256     }
    257 
    258     public void flushAllTextModificationCounters() {
    259         if (mAfterVoiceInputInsertCount > 0) {
    260             logTextModifiedByTypingInsertion(mAfterVoiceInputInsertCount);
    261             mAfterVoiceInputInsertCount = 0;
    262         }
    263         if (mAfterVoiceInputDeleteCount > 0) {
    264             logTextModifiedByTypingDeletion(mAfterVoiceInputDeleteCount);
    265             mAfterVoiceInputDeleteCount = 0;
    266         }
    267         if (mAfterVoiceInputInsertPunctuationCount > 0) {
    268             logTextModifiedByTypingInsertionPunctuation(mAfterVoiceInputInsertPunctuationCount);
    269             mAfterVoiceInputInsertPunctuationCount = 0;
    270         }
    271     }
    272 
    273     /**
    274      * The configuration of the IME changed and may have caused the views to be layed out
    275      * again. Restore the state of the recognition view.
    276      */
    277     public void onConfigurationChanged() {
    278         mRecognitionView.restoreState();
    279     }
    280 
    281     /**
    282      * @return true if field is blacklisted for voice
    283      */
    284     public boolean isBlacklistedField(FieldContext context) {
    285         return mBlacklist.matches(context);
    286     }
    287 
    288     /**
    289      * Used to decide whether to show voice input hints for this field, etc.
    290      *
    291      * @return true if field is recommended for voice
    292      */
    293     public boolean isRecommendedField(FieldContext context) {
    294         return mRecommendedList.matches(context);
    295     }
    296 
    297     /**
    298      * Start listening for speech from the user. This will grab the microphone
    299      * and start updating the view provided by getView(). It is the caller's
    300      * responsibility to ensure that the view is visible to the user at this stage.
    301      *
    302      * @param context the same FieldContext supplied to voiceIsEnabled()
    303      * @param swipe whether this voice input was started by swipe, for logging purposes
    304      */
    305     public void startListening(FieldContext context, boolean swipe) {
    306         mState = DEFAULT;
    307 
    308         Locale locale = Locale.getDefault();
    309         String localeString = locale.getLanguage() + "-" + locale.getCountry();
    310 
    311         mLogger.start(localeString, swipe);
    312 
    313         mState = LISTENING;
    314 
    315         mRecognitionView.showInitializing();
    316         startListeningAfterInitialization(context);
    317     }
    318 
    319     /**
    320      * Called only when the recognition manager's initialization completed
    321      *
    322      * @param context context with which {@link #startListening(FieldContext, boolean)} was executed
    323      */
    324     private void startListeningAfterInitialization(FieldContext context) {
    325         Intent intent = makeIntent();
    326         intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL, "");
    327         intent.putExtra(EXTRA_RECOGNITION_CONTEXT, context.getBundle());
    328         intent.putExtra(EXTRA_CALLING_PACKAGE, "VoiceIME");
    329         intent.putExtra(EXTRA_ALTERNATES, true);
    330         intent.putExtra(RecognizerIntent.EXTRA_MAX_RESULTS,
    331                 SettingsUtil.getSettingsInt(
    332                         mContext.getContentResolver(),
    333                         SettingsUtil.LATIN_IME_MAX_VOICE_RESULTS,
    334                         1));
    335         // Get endpointer params from Gservices.
    336         // TODO: Consider caching these values for improved performance on slower devices.
    337         final ContentResolver cr = mContext.getContentResolver();
    338         putEndpointerExtra(
    339                 cr,
    340                 intent,
    341                 SettingsUtil.LATIN_IME_SPEECH_MINIMUM_LENGTH_MILLIS,
    342                 EXTRA_SPEECH_MINIMUM_LENGTH_MILLIS,
    343                 null  /* rely on endpointer default */);
    344         putEndpointerExtra(
    345                 cr,
    346                 intent,
    347                 SettingsUtil.LATIN_IME_SPEECH_INPUT_COMPLETE_SILENCE_LENGTH_MILLIS,
    348                 EXTRA_SPEECH_INPUT_COMPLETE_SILENCE_LENGTH_MILLIS,
    349                 INPUT_COMPLETE_SILENCE_LENGTH_DEFAULT_VALUE_MILLIS
    350                 /* our default value is different from the endpointer's */);
    351         putEndpointerExtra(
    352                 cr,
    353                 intent,
    354                 SettingsUtil.
    355                         LATIN_IME_SPEECH_INPUT_POSSIBLY_COMPLETE_SILENCE_LENGTH_MILLIS,
    356                 EXTRA_SPEECH_INPUT_POSSIBLY_COMPLETE_SILENCE_LENGTH_MILLIS,
    357                 null  /* rely on endpointer default */);
    358 
    359         mSpeechRecognizer.startListening(intent);
    360     }
    361 
    362     /**
    363      * Gets the value of the provided Gservices key, attempts to parse it into a long,
    364      * and if successful, puts the long value as an extra in the provided intent.
    365      */
    366     private void putEndpointerExtra(ContentResolver cr, Intent i,
    367             String gservicesKey, String intentExtraKey, String defaultValue) {
    368         long l = -1;
    369         String s = SettingsUtil.getSettingsString(cr, gservicesKey, defaultValue);
    370         if (s != null) {
    371             try {
    372                 l = Long.valueOf(s);
    373             } catch (NumberFormatException e) {
    374                 Log.e(TAG, "could not parse value for " + gservicesKey + ": " + s);
    375             }
    376         }
    377 
    378         if (l != -1) i.putExtra(intentExtraKey, l);
    379     }
    380 
    381     public void destroy() {
    382         mSpeechRecognizer.destroy();
    383     }
    384 
    385     /**
    386      * Creates a new instance of the view that is returned by {@link #getView()}
    387      * Clients should use this when a previously returned view is stuck in a
    388      * layout that is being thrown away and a new one is need to show to the
    389      * user.
    390      */
    391     public void newView() {
    392         mRecognitionView = new RecognitionView(mContext, this);
    393     }
    394 
    395     /**
    396      * @return a view that shows the recognition flow--e.g., "Speak now" and
    397      * "working" dialogs.
    398      */
    399     public View getView() {
    400         return mRecognitionView.getView();
    401     }
    402 
    403     /**
    404      * Handle the cancel button.
    405      */
    406     public void onClick(View view) {
    407         switch(view.getId()) {
    408             case R.id.button:
    409                 cancel();
    410                 break;
    411         }
    412     }
    413 
    414     public void logTextModifiedByTypingInsertion(int length) {
    415         mLogger.textModifiedByTypingInsertion(length);
    416     }
    417 
    418     public void logTextModifiedByTypingInsertionPunctuation(int length) {
    419         mLogger.textModifiedByTypingInsertionPunctuation(length);
    420     }
    421 
    422     public void logTextModifiedByTypingDeletion(int length) {
    423         mLogger.textModifiedByTypingDeletion(length);
    424     }
    425 
    426     public void logTextModifiedByChooseSuggestion(int length) {
    427         mLogger.textModifiedByChooseSuggestion(length);
    428     }
    429 
    430     public void logKeyboardWarningDialogShown() {
    431         mLogger.keyboardWarningDialogShown();
    432     }
    433 
    434     public void logKeyboardWarningDialogDismissed() {
    435         mLogger.keyboardWarningDialogDismissed();
    436     }
    437 
    438     public void logKeyboardWarningDialogOk() {
    439         mLogger.keyboardWarningDialogOk();
    440     }
    441 
    442     public void logKeyboardWarningDialogCancel() {
    443         mLogger.keyboardWarningDialogCancel();
    444     }
    445 
    446     public void logSwipeHintDisplayed() {
    447         mLogger.swipeHintDisplayed();
    448     }
    449 
    450     public void logPunctuationHintDisplayed() {
    451         mLogger.punctuationHintDisplayed();
    452     }
    453 
    454     public void logVoiceInputDelivered(int length) {
    455         mLogger.voiceInputDelivered(length);
    456     }
    457 
    458     public void logNBestChoose(int index) {
    459         mLogger.nBestChoose(index);
    460     }
    461 
    462     public void logInputEnded() {
    463         mLogger.inputEnded();
    464     }
    465 
    466     public void flushLogs() {
    467         mLogger.flush();
    468     }
    469 
    470     private static Intent makeIntent() {
    471         Intent intent = new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH);
    472 
    473         // On Cupcake, use VoiceIMEHelper since VoiceSearch doesn't support.
    474         // On Donut, always use VoiceSearch, since VoiceIMEHelper and
    475         // VoiceSearch may conflict.
    476         if (Build.VERSION.RELEASE.equals("1.5")) {
    477             intent = intent.setClassName(
    478               "com.google.android.voiceservice",
    479               "com.google.android.voiceservice.IMERecognitionService");
    480         } else {
    481             intent = intent.setClassName(
    482               "com.google.android.voicesearch",
    483               "com.google.android.voicesearch.RecognitionService");
    484         }
    485 
    486         return intent;
    487     }
    488 
    489     /**
    490      * Cancel in-progress speech recognition.
    491      */
    492     public void cancel() {
    493         switch (mState) {
    494         case LISTENING:
    495             mLogger.cancelDuringListening();
    496             break;
    497         case WORKING:
    498             mLogger.cancelDuringWorking();
    499             break;
    500         case ERROR:
    501             mLogger.cancelDuringError();
    502             break;
    503         }
    504         mState = DEFAULT;
    505 
    506         // Remove all pending tasks (e.g., timers to cancel voice input)
    507         mHandler.removeMessages(MSG_CLOSE_ERROR_DIALOG);
    508 
    509         mSpeechRecognizer.cancel();
    510         mUiListener.onCancelVoice();
    511         mRecognitionView.finish();
    512     }
    513 
    514     private int getErrorStringId(int errorType, boolean endpointed) {
    515         switch (errorType) {
    516             // We use CLIENT_ERROR to signify that voice search is not available on the device.
    517             case SpeechRecognizer.ERROR_CLIENT:
    518                 return R.string.voice_not_installed;
    519             case SpeechRecognizer.ERROR_NETWORK:
    520                 return R.string.voice_network_error;
    521             case SpeechRecognizer.ERROR_NETWORK_TIMEOUT:
    522                 return endpointed ?
    523                         R.string.voice_network_error : R.string.voice_too_much_speech;
    524             case SpeechRecognizer.ERROR_AUDIO:
    525                 return R.string.voice_audio_error;
    526             case SpeechRecognizer.ERROR_SERVER:
    527                 return R.string.voice_server_error;
    528             case SpeechRecognizer.ERROR_SPEECH_TIMEOUT:
    529                 return R.string.voice_speech_timeout;
    530             case SpeechRecognizer.ERROR_NO_MATCH:
    531                 return R.string.voice_no_match;
    532             default: return R.string.voice_error;
    533         }
    534     }
    535 
    536     private void onError(int errorType, boolean endpointed) {
    537         Log.i(TAG, "error " + errorType);
    538         mLogger.error(errorType);
    539         onError(mContext.getString(getErrorStringId(errorType, endpointed)));
    540     }
    541 
    542     private void onError(String error) {
    543         mState = ERROR;
    544         mRecognitionView.showError(error);
    545         // Wait a couple seconds and then automatically dismiss message.
    546         mHandler.sendMessageDelayed(Message.obtain(mHandler, MSG_CLOSE_ERROR_DIALOG), 2000);
    547     }
    548 
    549     private class ImeRecognitionListener implements RecognitionListener {
    550         // Waveform data
    551         final ByteArrayOutputStream mWaveBuffer = new ByteArrayOutputStream();
    552         int mSpeechStart;
    553         private boolean mEndpointed = false;
    554 
    555         public void onReadyForSpeech(Bundle noiseParams) {
    556             mRecognitionView.showListening();
    557         }
    558 
    559         public void onBeginningOfSpeech() {
    560             mEndpointed = false;
    561             mSpeechStart = mWaveBuffer.size();
    562         }
    563 
    564         public void onRmsChanged(float rmsdB) {
    565             mRecognitionView.updateVoiceMeter(rmsdB);
    566         }
    567 
    568         public void onBufferReceived(byte[] buf) {
    569             try {
    570                 mWaveBuffer.write(buf);
    571             } catch (IOException e) {}
    572         }
    573 
    574         public void onEndOfSpeech() {
    575             mEndpointed = true;
    576             mState = WORKING;
    577             mRecognitionView.showWorking(mWaveBuffer, mSpeechStart, mWaveBuffer.size());
    578         }
    579 
    580         public void onError(int errorType) {
    581             mState = ERROR;
    582             VoiceInput.this.onError(errorType, mEndpointed);
    583         }
    584 
    585         public void onResults(Bundle resultsBundle) {
    586             List<String> results = resultsBundle
    587                     .getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION);
    588             // VS Market update is needed for IME froyo clients to access the alternatesBundle
    589             // TODO: verify this.
    590             Bundle alternatesBundle = resultsBundle.getBundle(ALTERNATES_BUNDLE);
    591             mState = DEFAULT;
    592 
    593             final Map<String, List<CharSequence>> alternatives =
    594                 new HashMap<String, List<CharSequence>>();
    595 
    596             if (ENABLE_WORD_CORRECTIONS && alternatesBundle != null && results.size() > 0) {
    597                 // Use the top recognition result to map each alternative's start:length to a word.
    598                 String[] words = results.get(0).split(" ");
    599                 Bundle spansBundle = alternatesBundle.getBundle(AlternatesBundleKeys.SPANS);
    600                 for (String key : spansBundle.keySet()) {
    601                     // Get the word for which these alternates correspond to.
    602                     Bundle spanBundle = spansBundle.getBundle(key);
    603                     int start = spanBundle.getInt(AlternatesBundleKeys.START);
    604                     int length = spanBundle.getInt(AlternatesBundleKeys.LENGTH);
    605                     // Only keep single-word based alternatives.
    606                     if (length == 1 && start < words.length) {
    607                         // Get the alternatives associated with the span.
    608                         // If a word appears twice in a recognition result,
    609                         // concatenate the alternatives for the word.
    610                         List<CharSequence> altList = alternatives.get(words[start]);
    611                         if (altList == null) {
    612                             altList = new ArrayList<CharSequence>();
    613                             alternatives.put(words[start], altList);
    614                         }
    615                         Parcelable[] alternatesArr = spanBundle
    616                             .getParcelableArray(AlternatesBundleKeys.ALTERNATES);
    617                         for (int j = 0; j < alternatesArr.length &&
    618                                  altList.size() < MAX_ALT_LIST_LENGTH; j++) {
    619                             Bundle alternateBundle = (Bundle) alternatesArr[j];
    620                             String alternate = alternateBundle.getString(AlternatesBundleKeys.TEXT);
    621                             // Don't allow duplicates in the alternates list.
    622                             if (!altList.contains(alternate)) {
    623                                 altList.add(alternate);
    624                             }
    625                         }
    626                     }
    627                 }
    628             }
    629 
    630             if (results.size() > 5) {
    631                 results = results.subList(0, 5);
    632             }
    633             mUiListener.onVoiceResults(results, alternatives);
    634             mRecognitionView.finish();
    635         }
    636 
    637         public void onPartialResults(final Bundle partialResults) {
    638             // currently - do nothing
    639         }
    640 
    641         public void onEvent(int eventType, Bundle params) {
    642             // do nothing - reserved for events that might be added in the future
    643         }
    644     }
    645 }
    646