1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 // Implements a custom word iterator used for our spellchecker. 6 7 #include "chrome/renderer/spellchecker/spellcheck_worditerator.h" 8 9 #include <map> 10 #include <string> 11 12 #include "base/basictypes.h" 13 #include "base/i18n/break_iterator.h" 14 #include "base/logging.h" 15 #include "base/strings/stringprintf.h" 16 #include "base/strings/utf_string_conversions.h" 17 #include "chrome/renderer/spellchecker/spellcheck.h" 18 #include "third_party/icu/source/common/unicode/normlzr.h" 19 #include "third_party/icu/source/common/unicode/schriter.h" 20 #include "third_party/icu/source/common/unicode/uscript.h" 21 #include "third_party/icu/source/i18n/unicode/ulocdata.h" 22 23 // SpellcheckCharAttribute implementation: 24 25 SpellcheckCharAttribute::SpellcheckCharAttribute() 26 : script_code_(USCRIPT_LATIN) { 27 } 28 29 SpellcheckCharAttribute::~SpellcheckCharAttribute() { 30 } 31 32 void SpellcheckCharAttribute::SetDefaultLanguage(const std::string& language) { 33 CreateRuleSets(language); 34 } 35 36 base::string16 SpellcheckCharAttribute::GetRuleSet( 37 bool allow_contraction) const { 38 return allow_contraction ? 39 ruleset_allow_contraction_ : ruleset_disallow_contraction_; 40 } 41 42 void SpellcheckCharAttribute::CreateRuleSets(const std::string& language) { 43 // The template for our custom rule sets, which is based on the word-break 44 // rules of ICU 4.0: 45 // <http://source.icu-project.org/repos/icu/icu/tags/release-4-0/source/data/brkitr/word.txt>. 46 // The major differences from the original one are listed below: 47 // * It discards comments in the original rules. 48 // * It discards characters not needed by our spellchecker (e.g. numbers, 49 // punctuation characters, Hiraganas, Katakanas, CJK Ideographs, and so on). 50 // * It allows customization of the $ALetter value (i.e. word characters). 51 // * It allows customization of the $ALetterPlus value (i.e. whether or not to 52 // use the dictionary data). 53 // * It allows choosing whether or not to split a text at contraction 54 // characters. 55 // This template only changes the forward-iteration rules. So, calling 56 // ubrk_prev() returns the same results as the original template. 57 static const char kRuleTemplate[] = 58 "!!chain;" 59 "$CR = [\\p{Word_Break = CR}];" 60 "$LF = [\\p{Word_Break = LF}];" 61 "$Newline = [\\p{Word_Break = Newline}];" 62 "$Extend = [\\p{Word_Break = Extend}];" 63 "$Format = [\\p{Word_Break = Format}];" 64 "$Katakana = [\\p{Word_Break = Katakana}];" 65 // Not all the characters in a given script are ALetter. 66 // For instance, U+05F4 is MidLetter. So, this may be 67 // better, but it leads to an empty set error in Thai. 68 // "$ALetter = [[\\p{script=%s}] & [\\p{Word_Break = ALetter}]];" 69 "$ALetter = [\\p{script=%s}%s];" 70 // U+0027 (single quote/apostrophe) is not in MidNumLet any more 71 // in UAX 29 rev 21 or later. For our purpose, U+0027 72 // has to be treated as MidNumLet. ( http://crbug.com/364072 ) 73 "$MidNumLet = [\\p{Word_Break = MidNumLet} \\u0027];" 74 "$MidLetter = [\\p{Word_Break = MidLetter}%s];" 75 "$MidNum = [\\p{Word_Break = MidNum}];" 76 "$Numeric = [\\p{Word_Break = Numeric}];" 77 "$ExtendNumLet = [\\p{Word_Break = ExtendNumLet}];" 78 79 "$Control = [\\p{Grapheme_Cluster_Break = Control}]; " 80 "%s" // ALetterPlus 81 82 "$KatakanaEx = $Katakana ($Extend | $Format)*;" 83 "$ALetterEx = $ALetterPlus ($Extend | $Format)*;" 84 "$MidNumLetEx = $MidNumLet ($Extend | $Format)*;" 85 "$MidLetterEx = $MidLetter ($Extend | $Format)*;" 86 "$MidNumEx = $MidNum ($Extend | $Format)*;" 87 "$NumericEx = $Numeric ($Extend | $Format)*;" 88 "$ExtendNumLetEx = $ExtendNumLet ($Extend | $Format)*;" 89 90 "$Hiragana = [\\p{script=Hiragana}];" 91 "$Ideographic = [\\p{Ideographic}];" 92 "$HiraganaEx = $Hiragana ($Extend | $Format)*;" 93 "$IdeographicEx = $Ideographic ($Extend | $Format)*;" 94 95 "!!forward;" 96 "$CR $LF;" 97 "[^$CR $LF $Newline]? ($Extend | $Format)+;" 98 "$ALetterEx {200};" 99 "$ALetterEx $ALetterEx {200};" 100 "%s" // (Allow|Disallow) Contraction 101 102 "!!reverse;" 103 "$BackALetterEx = ($Format | $Extend)* $ALetterPlus;" 104 "$BackMidNumLetEx = ($Format | $Extend)* $MidNumLet;" 105 "$BackNumericEx = ($Format | $Extend)* $Numeric;" 106 "$BackMidNumEx = ($Format | $Extend)* $MidNum;" 107 "$BackMidLetterEx = ($Format | $Extend)* $MidLetter;" 108 "$BackKatakanaEx = ($Format | $Extend)* $Katakana;" 109 "$BackExtendNumLetEx= ($Format | $Extend)* $ExtendNumLet;" 110 "$LF $CR;" 111 "($Format | $Extend)* [^$CR $LF $Newline]?;" 112 "$BackALetterEx $BackALetterEx;" 113 "$BackALetterEx ($BackMidLetterEx | $BackMidNumLetEx) $BackALetterEx;" 114 "$BackNumericEx $BackNumericEx;" 115 "$BackNumericEx $BackALetterEx;" 116 "$BackALetterEx $BackNumericEx;" 117 "$BackNumericEx ($BackMidNumEx | $BackMidNumLetEx) $BackNumericEx;" 118 "$BackKatakanaEx $BackKatakanaEx;" 119 "$BackExtendNumLetEx ($BackALetterEx | $BackNumericEx |" 120 " $BackKatakanaEx | $BackExtendNumLetEx);" 121 "($BackALetterEx | $BackNumericEx | $BackKatakanaEx)" 122 " $BackExtendNumLetEx;" 123 124 "!!safe_reverse;" 125 "($Extend | $Format)+ .?;" 126 "($MidLetter | $MidNumLet) $BackALetterEx;" 127 "($MidNum | $MidNumLet) $BackNumericEx;" 128 129 "!!safe_forward;" 130 "($Extend | $Format)+ .?;" 131 "($MidLetterEx | $MidNumLetEx) $ALetterEx;" 132 "($MidNumEx | $MidNumLetEx) $NumericEx;"; 133 134 // Retrieve the script codes used by the given language from ICU. When the 135 // given language consists of two or more scripts, we just use the first 136 // script. The size of returned script codes is always < 8. Therefore, we use 137 // an array of size 8 so we can include all script codes without insufficient 138 // buffer errors. 139 UErrorCode error = U_ZERO_ERROR; 140 UScriptCode script_code[8]; 141 int scripts = uscript_getCode(language.c_str(), script_code, 142 arraysize(script_code), &error); 143 if (U_SUCCESS(error) && scripts >= 1) 144 script_code_ = script_code[0]; 145 146 // Retrieve the values for $ALetter and $ALetterPlus. We use the dictionary 147 // only for the languages which need it (i.e. Korean and Thai) to prevent ICU 148 // from returning dictionary words (i.e. Korean or Thai words) for languages 149 // which don't need them. 150 const char* aletter = uscript_getName(script_code_); 151 if (!aletter) 152 aletter = "Latin"; 153 154 const char kWithDictionary[] = 155 "$dictionary = [:LineBreak = Complex_Context:];" 156 "$ALetterPlus = [$ALetter [$dictionary-$Extend-$Control]];"; 157 const char kWithoutDictionary[] = "$ALetterPlus = $ALetter;"; 158 const char* aletter_plus = kWithoutDictionary; 159 if (script_code_ == USCRIPT_HANGUL || script_code_ == USCRIPT_THAI || 160 script_code_ == USCRIPT_LAO || script_code_ == USCRIPT_KHMER) 161 aletter_plus = kWithDictionary; 162 163 // Treat numbers as word characters except for Arabic and Hebrew. 164 const char* aletter_extra = " [0123456789]"; 165 if (script_code_ == USCRIPT_HEBREW || script_code_ == USCRIPT_ARABIC) 166 aletter_extra = ""; 167 168 const char kMidLetterExtra[] = ""; 169 // For Hebrew, treat single/double quoation marks as MidLetter. 170 const char kMidLetterExtraHebrew[] = "\"'"; 171 const char* midletter_extra = kMidLetterExtra; 172 if (script_code_ == USCRIPT_HEBREW) 173 midletter_extra = kMidLetterExtraHebrew; 174 175 // Create two custom rule-sets: one allows contraction and the other does not. 176 // We save these strings in UTF-16 so we can use it without conversions. (ICU 177 // needs UTF-16 strings.) 178 const char kAllowContraction[] = 179 "$ALetterEx ($MidLetterEx | $MidNumLetEx) $ALetterEx {200};"; 180 const char kDisallowContraction[] = ""; 181 182 ruleset_allow_contraction_ = base::ASCIIToUTF16( 183 base::StringPrintf(kRuleTemplate, 184 aletter, 185 aletter_extra, 186 midletter_extra, 187 aletter_plus, 188 kAllowContraction)); 189 ruleset_disallow_contraction_ = base::ASCIIToUTF16( 190 base::StringPrintf(kRuleTemplate, 191 aletter, 192 aletter_extra, 193 midletter_extra, 194 aletter_plus, 195 kDisallowContraction)); 196 } 197 198 bool SpellcheckCharAttribute::OutputChar(UChar c, 199 base::string16* output) const { 200 // Call the language-specific function if necessary. 201 // Otherwise, we call the default one. 202 switch (script_code_) { 203 case USCRIPT_ARABIC: 204 return OutputArabic(c, output); 205 206 case USCRIPT_HANGUL: 207 return OutputHangul(c, output); 208 209 case USCRIPT_HEBREW: 210 return OutputHebrew(c, output); 211 212 default: 213 return OutputDefault(c, output); 214 } 215 } 216 217 bool SpellcheckCharAttribute::OutputArabic(UChar c, 218 base::string16* output) const { 219 // Discard characters not from Arabic alphabets. We also discard vowel marks 220 // of Arabic (Damma, Fatha, Kasra, etc.) to prevent our Arabic dictionary from 221 // marking an Arabic word including vowel marks as misspelled. (We need to 222 // check these vowel marks manually and filter them out since their script 223 // codes are USCRIPT_ARABIC.) 224 if (0x0621 <= c && c <= 0x064D) 225 output->push_back(c); 226 return true; 227 } 228 229 bool SpellcheckCharAttribute::OutputHangul(UChar c, 230 base::string16* output) const { 231 // Decompose a Hangul character to a Hangul vowel and consonants used by our 232 // spellchecker. A Hangul character of Unicode is a ligature consisting of a 233 // Hangul vowel and consonants, e.g. U+AC01 "Gag" consists of U+1100 "G", 234 // U+1161 "a", and U+11A8 "g". That is, we can treat each Hangul character as 235 // a point of a cubic linear space consisting of (first consonant, vowel, last 236 // consonant). Therefore, we can compose a Hangul character from a vowel and 237 // two consonants with linear composition: 238 // character = 0xAC00 + 239 // (first consonant - 0x1100) * 28 * 21 + 240 // (vowel - 0x1161) * 28 + 241 // (last consonant - 0x11A7); 242 // We can also decompose a Hangul character with linear decomposition: 243 // first consonant = (character - 0xAC00) / 28 / 21; 244 // vowel = (character - 0xAC00) / 28 % 21; 245 // last consonant = (character - 0xAC00) % 28; 246 // This code is copied from Unicode Standard Annex #15 247 // <http://unicode.org/reports/tr15> and added some comments. 248 const int kSBase = 0xAC00; // U+AC00: the top of Hangul characters. 249 const int kLBase = 0x1100; // U+1100: the top of Hangul first consonants. 250 const int kVBase = 0x1161; // U+1161: the top of Hangul vowels. 251 const int kTBase = 0x11A7; // U+11A7: the top of Hangul last consonants. 252 const int kLCount = 19; // The number of Hangul first consonants. 253 const int kVCount = 21; // The number of Hangul vowels. 254 const int kTCount = 28; // The number of Hangul last consonants. 255 const int kNCount = kVCount * kTCount; 256 const int kSCount = kLCount * kNCount; 257 258 int index = c - kSBase; 259 if (index < 0 || index >= kSBase + kSCount) { 260 // This is not a Hangul syllable. Call the default output function since we 261 // should output this character when it is a Hangul syllable. 262 return OutputDefault(c, output); 263 } 264 265 // This is a Hangul character. Decompose this characters into Hangul vowels 266 // and consonants. 267 int l = kLBase + index / kNCount; 268 int v = kVBase + (index % kNCount) / kTCount; 269 int t = kTBase + index % kTCount; 270 output->push_back(l); 271 output->push_back(v); 272 if (t != kTBase) 273 output->push_back(t); 274 return true; 275 } 276 277 bool SpellcheckCharAttribute::OutputHebrew(UChar c, 278 base::string16* output) const { 279 // Discard characters except Hebrew alphabets. We also discard Hebrew niqquds 280 // to prevent our Hebrew dictionary from marking a Hebrew word including 281 // niqquds as misspelled. (Same as Arabic vowel marks, we need to check 282 // niqquds manually and filter them out since their script codes are 283 // USCRIPT_HEBREW.) 284 // Pass through ASCII single/double quotation marks and Hebrew Geresh and 285 // Gershayim. 286 if ((0x05D0 <= c && c <= 0x05EA) || c == 0x22 || c == 0x27 || 287 c == 0x05F4 || c == 0x05F3) 288 output->push_back(c); 289 return true; 290 } 291 292 bool SpellcheckCharAttribute::OutputDefault(UChar c, 293 base::string16* output) const { 294 // Check the script code of this character and output only if it is the one 295 // used by the spellchecker language. 296 UErrorCode status = U_ZERO_ERROR; 297 UScriptCode script_code = uscript_getScript(c, &status); 298 if (script_code == script_code_ || script_code == USCRIPT_COMMON) 299 output->push_back(c); 300 return true; 301 } 302 303 // SpellcheckWordIterator implementation: 304 305 SpellcheckWordIterator::SpellcheckWordIterator() 306 : text_(NULL), 307 attribute_(NULL), 308 iterator_() { 309 } 310 311 SpellcheckWordIterator::~SpellcheckWordIterator() { 312 Reset(); 313 } 314 315 bool SpellcheckWordIterator::Initialize( 316 const SpellcheckCharAttribute* attribute, 317 bool allow_contraction) { 318 // Create a custom ICU break iterator with empty text used in this object. (We 319 // allow setting text later so we can re-use this iterator.) 320 DCHECK(attribute); 321 const base::string16 rule(attribute->GetRuleSet(allow_contraction)); 322 323 // If there is no rule set, the attributes were invalid. 324 if (rule.empty()) 325 return false; 326 327 scoped_ptr<base::i18n::BreakIterator> iterator( 328 new base::i18n::BreakIterator(base::string16(), rule)); 329 if (!iterator->Init()) { 330 // Since we're not passing in any text, the only reason this could fail 331 // is if we fail to parse the rules. Since the rules are hardcoded, 332 // that would be a bug in this class. 333 NOTREACHED() << "failed to open iterator (broken rules)"; 334 return false; 335 } 336 iterator_ = iterator.Pass(); 337 338 // Set the character attributes so we can normalize the words extracted by 339 // this iterator. 340 attribute_ = attribute; 341 return true; 342 } 343 344 bool SpellcheckWordIterator::IsInitialized() const { 345 // Return true iff we have an iterator. 346 return !!iterator_; 347 } 348 349 bool SpellcheckWordIterator::SetText(const base::char16* text, size_t length) { 350 DCHECK(!!iterator_); 351 352 // Set the text to be split by this iterator. 353 if (!iterator_->SetText(text, length)) { 354 LOG(ERROR) << "failed to set text"; 355 return false; 356 } 357 358 text_ = text; 359 return true; 360 } 361 362 bool SpellcheckWordIterator::GetNextWord(base::string16* word_string, 363 int* word_start, 364 int* word_length) { 365 DCHECK(!!text_); 366 367 word_string->clear(); 368 *word_start = 0; 369 *word_length = 0; 370 371 if (!text_) { 372 return false; 373 } 374 375 // Find a word that can be checked for spelling. Our rule sets filter out 376 // invalid words (e.g. numbers and characters not supported by the 377 // spellchecker language) so this ubrk_getRuleStatus() call returns 378 // UBRK_WORD_NONE when this iterator finds an invalid word. So, we skip such 379 // words until we can find a valid word or reach the end of the input string. 380 while (iterator_->Advance()) { 381 const size_t start = iterator_->prev(); 382 const size_t length = iterator_->pos() - start; 383 if (iterator_->IsWord()) { 384 if (Normalize(start, length, word_string)) { 385 *word_start = start; 386 *word_length = length; 387 return true; 388 } 389 } 390 } 391 392 // There aren't any more words in the given text. 393 return false; 394 } 395 396 void SpellcheckWordIterator::Reset() { 397 iterator_.reset(); 398 } 399 400 bool SpellcheckWordIterator::Normalize(int input_start, 401 int input_length, 402 base::string16* output_string) const { 403 // We use NFKC (Normalization Form, Compatible decomposition, followed by 404 // canonical Composition) defined in Unicode Standard Annex #15 to normalize 405 // this token because it it the most suitable normalization algorithm for our 406 // spellchecker. Nevertheless, it is not a perfect algorithm for our 407 // spellchecker and we need manual normalization as well. The normalized 408 // text does not have to be NUL-terminated since its characters are copied to 409 // string16, which adds a NUL character when we need. 410 icu::UnicodeString input(FALSE, &text_[input_start], input_length); 411 UErrorCode status = U_ZERO_ERROR; 412 icu::UnicodeString output; 413 icu::Normalizer::normalize(input, UNORM_NFKC, 0, output, status); 414 if (status != U_ZERO_ERROR && status != U_STRING_NOT_TERMINATED_WARNING) 415 return false; 416 417 // Copy the normalized text to the output. 418 icu::StringCharacterIterator it(output); 419 for (UChar c = it.first(); c != icu::CharacterIterator::DONE; c = it.next()) 420 attribute_->OutputChar(c, output_string); 421 422 return !output_string->empty(); 423 } 424