Home | History | Annotate | Download | only in common
      1 /**
      2  *******************************************************************************
      3  * Copyright (C) 2006-2012, International Business Machines Corporation
      4  * and others. All Rights Reserved.
      5  *******************************************************************************
      6  */
      7 
      8 #include "unicode/utypes.h"
      9 
     10 #if !UCONFIG_NO_BREAK_ITERATION
     11 
     12 #include "brkeng.h"
     13 #include "dictbe.h"
     14 #include "unicode/uniset.h"
     15 #include "unicode/chariter.h"
     16 #include "unicode/ubrk.h"
     17 #include "uvector.h"
     18 #include "uassert.h"
     19 #include "unicode/normlzr.h"
     20 #include "cmemory.h"
     21 #include "dictionarydata.h"
     22 
     23 U_NAMESPACE_BEGIN
     24 
     25 /*
     26  ******************************************************************
     27  */
     28 
     29 DictionaryBreakEngine::DictionaryBreakEngine(uint32_t breakTypes) {
     30     fTypes = breakTypes;
     31 }
     32 
     33 DictionaryBreakEngine::~DictionaryBreakEngine() {
     34 }
     35 
     36 UBool
     37 DictionaryBreakEngine::handles(UChar32 c, int32_t breakType) const {
     38     return (breakType >= 0 && breakType < 32 && (((uint32_t)1 << breakType) & fTypes)
     39             && fSet.contains(c));
     40 }
     41 
     42 int32_t
     43 DictionaryBreakEngine::findBreaks( UText *text,
     44                                  int32_t startPos,
     45                                  int32_t endPos,
     46                                  UBool reverse,
     47                                  int32_t breakType,
     48                                  UStack &foundBreaks ) const {
     49     int32_t result = 0;
     50 
     51     // Find the span of characters included in the set.
     52     int32_t start = (int32_t)utext_getNativeIndex(text);
     53     int32_t current;
     54     int32_t rangeStart;
     55     int32_t rangeEnd;
     56     UChar32 c = utext_current32(text);
     57     if (reverse) {
     58         UBool   isDict = fSet.contains(c);
     59         while((current = (int32_t)utext_getNativeIndex(text)) > startPos && isDict) {
     60             c = utext_previous32(text);
     61             isDict = fSet.contains(c);
     62         }
     63         rangeStart = (current < startPos) ? startPos : current+(isDict ? 0 : 1);
     64         rangeEnd = start + 1;
     65     }
     66     else {
     67         while((current = (int32_t)utext_getNativeIndex(text)) < endPos && fSet.contains(c)) {
     68             utext_next32(text);         // TODO:  recast loop for postincrement
     69             c = utext_current32(text);
     70         }
     71         rangeStart = start;
     72         rangeEnd = current;
     73     }
     74     if (breakType >= 0 && breakType < 32 && (((uint32_t)1 << breakType) & fTypes)) {
     75         result = divideUpDictionaryRange(text, rangeStart, rangeEnd, foundBreaks);
     76         utext_setNativeIndex(text, current);
     77     }
     78 
     79     return result;
     80 }
     81 
     82 void
     83 DictionaryBreakEngine::setCharacters( const UnicodeSet &set ) {
     84     fSet = set;
     85     // Compact for caching
     86     fSet.compact();
     87 }
     88 
     89 /*
     90  ******************************************************************
     91  */
     92 
     93 
     94 // Helper class for improving readability of the Thai word break
     95 // algorithm. The implementation is completely inline.
     96 
     97 // List size, limited by the maximum number of words in the dictionary
     98 // that form a nested sequence.
     99 #define POSSIBLE_WORD_LIST_MAX 20
    100 
    101 class PossibleWord {
    102 private:
    103     // list of word candidate lengths, in increasing length order
    104     int32_t   lengths[POSSIBLE_WORD_LIST_MAX];
    105     int32_t   count;      // Count of candidates
    106     int32_t   prefix;     // The longest match with a dictionary word
    107     int32_t   offset;     // Offset in the text of these candidates
    108     int       mark;       // The preferred candidate's offset
    109     int       current;    // The candidate we're currently looking at
    110 
    111 public:
    112     PossibleWord();
    113     ~PossibleWord();
    114 
    115     // Fill the list of candidates if needed, select the longest, and return the number found
    116     int       candidates( UText *text, DictionaryMatcher *dict, int32_t rangeEnd );
    117 
    118     // Select the currently marked candidate, point after it in the text, and invalidate self
    119     int32_t   acceptMarked( UText *text );
    120 
    121     // Back up from the current candidate to the next shorter one; return TRUE if that exists
    122     // and point the text after it
    123     UBool     backUp( UText *text );
    124 
    125     // Return the longest prefix this candidate location shares with a dictionary word
    126     int32_t   longestPrefix();
    127 
    128     // Mark the current candidate as the one we like
    129     void      markCurrent();
    130 };
    131 
    132 inline
    133 PossibleWord::PossibleWord() {
    134     offset = -1;
    135 }
    136 
    137 inline
    138 PossibleWord::~PossibleWord() {
    139 }
    140 
    141 inline int
    142 PossibleWord::candidates( UText *text, DictionaryMatcher *dict, int32_t rangeEnd ) {
    143     // TODO: If getIndex is too slow, use offset < 0 and add discardAll()
    144     int32_t start = (int32_t)utext_getNativeIndex(text);
    145     if (start != offset) {
    146         offset = start;
    147         prefix = dict->matches(text, rangeEnd-start, lengths, count, sizeof(lengths)/sizeof(lengths[0]));
    148         // Dictionary leaves text after longest prefix, not longest word. Back up.
    149         if (count <= 0) {
    150             utext_setNativeIndex(text, start);
    151         }
    152     }
    153     if (count > 0) {
    154         utext_setNativeIndex(text, start+lengths[count-1]);
    155     }
    156     current = count-1;
    157     mark = current;
    158     return count;
    159 }
    160 
    161 inline int32_t
    162 PossibleWord::acceptMarked( UText *text ) {
    163     utext_setNativeIndex(text, offset + lengths[mark]);
    164     return lengths[mark];
    165 }
    166 
    167 inline UBool
    168 PossibleWord::backUp( UText *text ) {
    169     if (current > 0) {
    170         utext_setNativeIndex(text, offset + lengths[--current]);
    171         return TRUE;
    172     }
    173     return FALSE;
    174 }
    175 
    176 inline int32_t
    177 PossibleWord::longestPrefix() {
    178     return prefix;
    179 }
    180 
    181 inline void
    182 PossibleWord::markCurrent() {
    183     mark = current;
    184 }
    185 
    186 // How many words in a row are "good enough"?
    187 #define THAI_LOOKAHEAD 3
    188 
    189 // Will not combine a non-word with a preceding dictionary word longer than this
    190 #define THAI_ROOT_COMBINE_THRESHOLD 3
    191 
    192 // Will not combine a non-word that shares at least this much prefix with a
    193 // dictionary word, with a preceding word
    194 #define THAI_PREFIX_COMBINE_THRESHOLD 3
    195 
    196 // Ellision character
    197 #define THAI_PAIYANNOI 0x0E2F
    198 
    199 // Repeat character
    200 #define THAI_MAIYAMOK 0x0E46
    201 
    202 // Minimum word size
    203 #define THAI_MIN_WORD 2
    204 
    205 // Minimum number of characters for two words
    206 #define THAI_MIN_WORD_SPAN (THAI_MIN_WORD * 2)
    207 
    208 ThaiBreakEngine::ThaiBreakEngine(DictionaryMatcher *adoptDictionary, UErrorCode &status)
    209     : DictionaryBreakEngine((1<<UBRK_WORD) | (1<<UBRK_LINE)),
    210       fDictionary(adoptDictionary)
    211 {
    212     fThaiWordSet.applyPattern(UNICODE_STRING_SIMPLE("[[:Thai:]&[:LineBreak=SA:]]"), status);
    213     if (U_SUCCESS(status)) {
    214         setCharacters(fThaiWordSet);
    215     }
    216     fMarkSet.applyPattern(UNICODE_STRING_SIMPLE("[[:Thai:]&[:LineBreak=SA:]&[:M:]]"), status);
    217     fMarkSet.add(0x0020);
    218     fEndWordSet = fThaiWordSet;
    219     fEndWordSet.remove(0x0E31);             // MAI HAN-AKAT
    220     fEndWordSet.remove(0x0E40, 0x0E44);     // SARA E through SARA AI MAIMALAI
    221     fBeginWordSet.add(0x0E01, 0x0E2E);      // KO KAI through HO NOKHUK
    222     fBeginWordSet.add(0x0E40, 0x0E44);      // SARA E through SARA AI MAIMALAI
    223     fSuffixSet.add(THAI_PAIYANNOI);
    224     fSuffixSet.add(THAI_MAIYAMOK);
    225 
    226     // Compact for caching.
    227     fMarkSet.compact();
    228     fEndWordSet.compact();
    229     fBeginWordSet.compact();
    230     fSuffixSet.compact();
    231 }
    232 
    233 ThaiBreakEngine::~ThaiBreakEngine() {
    234     delete fDictionary;
    235 }
    236 
    237 int32_t
    238 ThaiBreakEngine::divideUpDictionaryRange( UText *text,
    239                                                 int32_t rangeStart,
    240                                                 int32_t rangeEnd,
    241                                                 UStack &foundBreaks ) const {
    242     if ((rangeEnd - rangeStart) < THAI_MIN_WORD_SPAN) {
    243         return 0;       // Not enough characters for two words
    244     }
    245 
    246     uint32_t wordsFound = 0;
    247     int32_t wordLength;
    248     int32_t current;
    249     UErrorCode status = U_ZERO_ERROR;
    250     PossibleWord words[THAI_LOOKAHEAD];
    251     UChar32 uc;
    252 
    253     utext_setNativeIndex(text, rangeStart);
    254 
    255     while (U_SUCCESS(status) && (current = (int32_t)utext_getNativeIndex(text)) < rangeEnd) {
    256         wordLength = 0;
    257 
    258         // Look for candidate words at the current position
    259         int candidates = words[wordsFound%THAI_LOOKAHEAD].candidates(text, fDictionary, rangeEnd);
    260 
    261         // If we found exactly one, use that
    262         if (candidates == 1) {
    263             wordLength = words[wordsFound % THAI_LOOKAHEAD].acceptMarked(text);
    264             wordsFound += 1;
    265         }
    266         // If there was more than one, see which one can take us forward the most words
    267         else if (candidates > 1) {
    268             // If we're already at the end of the range, we're done
    269             if ((int32_t)utext_getNativeIndex(text) >= rangeEnd) {
    270                 goto foundBest;
    271             }
    272             do {
    273                 int wordsMatched = 1;
    274                 if (words[(wordsFound + 1) % THAI_LOOKAHEAD].candidates(text, fDictionary, rangeEnd) > 0) {
    275                     if (wordsMatched < 2) {
    276                         // Followed by another dictionary word; mark first word as a good candidate
    277                         words[wordsFound%THAI_LOOKAHEAD].markCurrent();
    278                         wordsMatched = 2;
    279                     }
    280 
    281                     // If we're already at the end of the range, we're done
    282                     if ((int32_t)utext_getNativeIndex(text) >= rangeEnd) {
    283                         goto foundBest;
    284                     }
    285 
    286                     // See if any of the possible second words is followed by a third word
    287                     do {
    288                         // If we find a third word, stop right away
    289                         if (words[(wordsFound + 2) % THAI_LOOKAHEAD].candidates(text, fDictionary, rangeEnd)) {
    290                             words[wordsFound % THAI_LOOKAHEAD].markCurrent();
    291                             goto foundBest;
    292                         }
    293                     }
    294                     while (words[(wordsFound + 1) % THAI_LOOKAHEAD].backUp(text));
    295                 }
    296             }
    297             while (words[wordsFound % THAI_LOOKAHEAD].backUp(text));
    298 foundBest:
    299             wordLength = words[wordsFound % THAI_LOOKAHEAD].acceptMarked(text);
    300             wordsFound += 1;
    301         }
    302 
    303         // We come here after having either found a word or not. We look ahead to the
    304         // next word. If it's not a dictionary word, we will combine it withe the word we
    305         // just found (if there is one), but only if the preceding word does not exceed
    306         // the threshold.
    307         // The text iterator should now be positioned at the end of the word we found.
    308         if ((int32_t)utext_getNativeIndex(text) < rangeEnd && wordLength < THAI_ROOT_COMBINE_THRESHOLD) {
    309             // if it is a dictionary word, do nothing. If it isn't, then if there is
    310             // no preceding word, or the non-word shares less than the minimum threshold
    311             // of characters with a dictionary word, then scan to resynchronize
    312             if (words[wordsFound % THAI_LOOKAHEAD].candidates(text, fDictionary, rangeEnd) <= 0
    313                   && (wordLength == 0
    314                       || words[wordsFound%THAI_LOOKAHEAD].longestPrefix() < THAI_PREFIX_COMBINE_THRESHOLD)) {
    315                 // Look for a plausible word boundary
    316                 //TODO: This section will need a rework for UText.
    317                 int32_t remaining = rangeEnd - (current+wordLength);
    318                 UChar32 pc = utext_current32(text);
    319                 int32_t chars = 0;
    320                 for (;;) {
    321                     utext_next32(text);
    322                     uc = utext_current32(text);
    323                     // TODO: Here we're counting on the fact that the SA languages are all
    324                     // in the BMP. This should get fixed with the UText rework.
    325                     chars += 1;
    326                     if (--remaining <= 0) {
    327                         break;
    328                     }
    329                     if (fEndWordSet.contains(pc) && fBeginWordSet.contains(uc)) {
    330                         // Maybe. See if it's in the dictionary.
    331                         // NOTE: In the original Apple code, checked that the next
    332                         // two characters after uc were not 0x0E4C THANTHAKHAT before
    333                         // checking the dictionary. That is just a performance filter,
    334                         // but it's not clear it's faster than checking the trie.
    335                         int candidates = words[(wordsFound + 1) % THAI_LOOKAHEAD].candidates(text, fDictionary, rangeEnd);
    336                         utext_setNativeIndex(text, current + wordLength + chars);
    337                         if (candidates > 0) {
    338                             break;
    339                         }
    340                     }
    341                     pc = uc;
    342                 }
    343 
    344                 // Bump the word count if there wasn't already one
    345                 if (wordLength <= 0) {
    346                     wordsFound += 1;
    347                 }
    348 
    349                 // Update the length with the passed-over characters
    350                 wordLength += chars;
    351             }
    352             else {
    353                 // Back up to where we were for next iteration
    354                 utext_setNativeIndex(text, current+wordLength);
    355             }
    356         }
    357 
    358         // Never stop before a combining mark.
    359         int32_t currPos;
    360         while ((currPos = (int32_t)utext_getNativeIndex(text)) < rangeEnd && fMarkSet.contains(utext_current32(text))) {
    361             utext_next32(text);
    362             wordLength += (int32_t)utext_getNativeIndex(text) - currPos;
    363         }
    364 
    365         // Look ahead for possible suffixes if a dictionary word does not follow.
    366         // We do this in code rather than using a rule so that the heuristic
    367         // resynch continues to function. For example, one of the suffix characters
    368         // could be a typo in the middle of a word.
    369         if ((int32_t)utext_getNativeIndex(text) < rangeEnd && wordLength > 0) {
    370             if (words[wordsFound%THAI_LOOKAHEAD].candidates(text, fDictionary, rangeEnd) <= 0
    371                 && fSuffixSet.contains(uc = utext_current32(text))) {
    372                 if (uc == THAI_PAIYANNOI) {
    373                     if (!fSuffixSet.contains(utext_previous32(text))) {
    374                         // Skip over previous end and PAIYANNOI
    375                         utext_next32(text);
    376                         utext_next32(text);
    377                         wordLength += 1;            // Add PAIYANNOI to word
    378                         uc = utext_current32(text);     // Fetch next character
    379                     }
    380                     else {
    381                         // Restore prior position
    382                         utext_next32(text);
    383                     }
    384                 }
    385                 if (uc == THAI_MAIYAMOK) {
    386                     if (utext_previous32(text) != THAI_MAIYAMOK) {
    387                         // Skip over previous end and MAIYAMOK
    388                         utext_next32(text);
    389                         utext_next32(text);
    390                         wordLength += 1;            // Add MAIYAMOK to word
    391                     }
    392                     else {
    393                         // Restore prior position
    394                         utext_next32(text);
    395                     }
    396                 }
    397             }
    398             else {
    399                 utext_setNativeIndex(text, current+wordLength);
    400             }
    401         }
    402 
    403         // Did we find a word on this iteration? If so, push it on the break stack
    404         if (wordLength > 0) {
    405             foundBreaks.push((current+wordLength), status);
    406         }
    407     }
    408 
    409     // Don't return a break for the end of the dictionary range if there is one there.
    410     if (foundBreaks.peeki() >= rangeEnd) {
    411         (void) foundBreaks.popi();
    412         wordsFound -= 1;
    413     }
    414 
    415     return wordsFound;
    416 }
    417 
    418 // How many words in a row are "good enough"?
    419 #define KHMER_LOOKAHEAD 3
    420 
    421 // Will not combine a non-word with a preceding dictionary word longer than this
    422 #define KHMER_ROOT_COMBINE_THRESHOLD 3
    423 
    424 // Will not combine a non-word that shares at least this much prefix with a
    425 // dictionary word, with a preceding word
    426 #define KHMER_PREFIX_COMBINE_THRESHOLD 3
    427 
    428 // Minimum word size
    429 #define KHMER_MIN_WORD 2
    430 
    431 // Minimum number of characters for two words
    432 #define KHMER_MIN_WORD_SPAN (KHMER_MIN_WORD * 2)
    433 
    434 KhmerBreakEngine::KhmerBreakEngine(DictionaryMatcher *adoptDictionary, UErrorCode &status)
    435     : DictionaryBreakEngine((1 << UBRK_WORD) | (1 << UBRK_LINE)),
    436       fDictionary(adoptDictionary)
    437 {
    438     fKhmerWordSet.applyPattern(UNICODE_STRING_SIMPLE("[[:Khmr:]&[:LineBreak=SA:]]"), status);
    439     if (U_SUCCESS(status)) {
    440         setCharacters(fKhmerWordSet);
    441     }
    442     fMarkSet.applyPattern(UNICODE_STRING_SIMPLE("[[:Khmr:]&[:LineBreak=SA:]&[:M:]]"), status);
    443     fMarkSet.add(0x0020);
    444     fEndWordSet = fKhmerWordSet;
    445     fBeginWordSet.add(0x1780, 0x17B3);
    446     //fBeginWordSet.add(0x17A3, 0x17A4);      // deprecated vowels
    447     //fEndWordSet.remove(0x17A5, 0x17A9);     // Khmer independent vowels that can't end a word
    448     //fEndWordSet.remove(0x17B2);             // Khmer independent vowel that can't end a word
    449     fEndWordSet.remove(0x17D2);             // KHMER SIGN COENG that combines some following characters
    450     //fEndWordSet.remove(0x17B6, 0x17C5);     // Remove dependent vowels
    451 //    fEndWordSet.remove(0x0E31);             // MAI HAN-AKAT
    452 //    fEndWordSet.remove(0x0E40, 0x0E44);     // SARA E through SARA AI MAIMALAI
    453 //    fBeginWordSet.add(0x0E01, 0x0E2E);      // KO KAI through HO NOKHUK
    454 //    fBeginWordSet.add(0x0E40, 0x0E44);      // SARA E through SARA AI MAIMALAI
    455 //    fSuffixSet.add(THAI_PAIYANNOI);
    456 //    fSuffixSet.add(THAI_MAIYAMOK);
    457 
    458     // Compact for caching.
    459     fMarkSet.compact();
    460     fEndWordSet.compact();
    461     fBeginWordSet.compact();
    462 //    fSuffixSet.compact();
    463 }
    464 
    465 KhmerBreakEngine::~KhmerBreakEngine() {
    466     delete fDictionary;
    467 }
    468 
    469 int32_t
    470 KhmerBreakEngine::divideUpDictionaryRange( UText *text,
    471                                                 int32_t rangeStart,
    472                                                 int32_t rangeEnd,
    473                                                 UStack &foundBreaks ) const {
    474     if ((rangeEnd - rangeStart) < KHMER_MIN_WORD_SPAN) {
    475         return 0;       // Not enough characters for two words
    476     }
    477 
    478     uint32_t wordsFound = 0;
    479     int32_t wordLength;
    480     int32_t current;
    481     UErrorCode status = U_ZERO_ERROR;
    482     PossibleWord words[KHMER_LOOKAHEAD];
    483     UChar32 uc;
    484 
    485     utext_setNativeIndex(text, rangeStart);
    486 
    487     while (U_SUCCESS(status) && (current = (int32_t)utext_getNativeIndex(text)) < rangeEnd) {
    488         wordLength = 0;
    489 
    490         // Look for candidate words at the current position
    491         int candidates = words[wordsFound%KHMER_LOOKAHEAD].candidates(text, fDictionary, rangeEnd);
    492 
    493         // If we found exactly one, use that
    494         if (candidates == 1) {
    495             wordLength = words[wordsFound%KHMER_LOOKAHEAD].acceptMarked(text);
    496             wordsFound += 1;
    497         }
    498 
    499         // If there was more than one, see which one can take us forward the most words
    500         else if (candidates > 1) {
    501             // If we're already at the end of the range, we're done
    502             if ((int32_t)utext_getNativeIndex(text) >= rangeEnd) {
    503                 goto foundBest;
    504             }
    505             do {
    506                 int wordsMatched = 1;
    507                 if (words[(wordsFound + 1) % KHMER_LOOKAHEAD].candidates(text, fDictionary, rangeEnd) > 0) {
    508                     if (wordsMatched < 2) {
    509                         // Followed by another dictionary word; mark first word as a good candidate
    510                         words[wordsFound % KHMER_LOOKAHEAD].markCurrent();
    511                         wordsMatched = 2;
    512                     }
    513 
    514                     // If we're already at the end of the range, we're done
    515                     if ((int32_t)utext_getNativeIndex(text) >= rangeEnd) {
    516                         goto foundBest;
    517                     }
    518 
    519                     // See if any of the possible second words is followed by a third word
    520                     do {
    521                         // If we find a third word, stop right away
    522                         if (words[(wordsFound + 2) % KHMER_LOOKAHEAD].candidates(text, fDictionary, rangeEnd)) {
    523                             words[wordsFound % KHMER_LOOKAHEAD].markCurrent();
    524                             goto foundBest;
    525                         }
    526                     }
    527                     while (words[(wordsFound + 1) % KHMER_LOOKAHEAD].backUp(text));
    528                 }
    529             }
    530             while (words[wordsFound % KHMER_LOOKAHEAD].backUp(text));
    531 foundBest:
    532             wordLength = words[wordsFound % KHMER_LOOKAHEAD].acceptMarked(text);
    533             wordsFound += 1;
    534         }
    535 
    536         // We come here after having either found a word or not. We look ahead to the
    537         // next word. If it's not a dictionary word, we will combine it with the word we
    538         // just found (if there is one), but only if the preceding word does not exceed
    539         // the threshold.
    540         // The text iterator should now be positioned at the end of the word we found.
    541         if ((int32_t)utext_getNativeIndex(text) < rangeEnd && wordLength < KHMER_ROOT_COMBINE_THRESHOLD) {
    542             // if it is a dictionary word, do nothing. If it isn't, then if there is
    543             // no preceding word, or the non-word shares less than the minimum threshold
    544             // of characters with a dictionary word, then scan to resynchronize
    545             if (words[wordsFound % KHMER_LOOKAHEAD].candidates(text, fDictionary, rangeEnd) <= 0
    546                   && (wordLength == 0
    547                       || words[wordsFound % KHMER_LOOKAHEAD].longestPrefix() < KHMER_PREFIX_COMBINE_THRESHOLD)) {
    548                 // Look for a plausible word boundary
    549                 //TODO: This section will need a rework for UText.
    550                 int32_t remaining = rangeEnd - (current+wordLength);
    551                 UChar32 pc = utext_current32(text);
    552                 int32_t chars = 0;
    553                 for (;;) {
    554                     utext_next32(text);
    555                     uc = utext_current32(text);
    556                     // TODO: Here we're counting on the fact that the SA languages are all
    557                     // in the BMP. This should get fixed with the UText rework.
    558                     chars += 1;
    559                     if (--remaining <= 0) {
    560                         break;
    561                     }
    562                     if (fEndWordSet.contains(pc) && fBeginWordSet.contains(uc)) {
    563                         // Maybe. See if it's in the dictionary.
    564                         int candidates = words[(wordsFound + 1) % KHMER_LOOKAHEAD].candidates(text, fDictionary, rangeEnd);
    565                         utext_setNativeIndex(text, current+wordLength+chars);
    566                         if (candidates > 0) {
    567                             break;
    568                         }
    569                     }
    570                     pc = uc;
    571                 }
    572 
    573                 // Bump the word count if there wasn't already one
    574                 if (wordLength <= 0) {
    575                     wordsFound += 1;
    576                 }
    577 
    578                 // Update the length with the passed-over characters
    579                 wordLength += chars;
    580             }
    581             else {
    582                 // Back up to where we were for next iteration
    583                 utext_setNativeIndex(text, current+wordLength);
    584             }
    585         }
    586 
    587         // Never stop before a combining mark.
    588         int32_t currPos;
    589         while ((currPos = (int32_t)utext_getNativeIndex(text)) < rangeEnd && fMarkSet.contains(utext_current32(text))) {
    590             utext_next32(text);
    591             wordLength += (int32_t)utext_getNativeIndex(text) - currPos;
    592         }
    593 
    594         // Look ahead for possible suffixes if a dictionary word does not follow.
    595         // We do this in code rather than using a rule so that the heuristic
    596         // resynch continues to function. For example, one of the suffix characters
    597         // could be a typo in the middle of a word.
    598 //        if ((int32_t)utext_getNativeIndex(text) < rangeEnd && wordLength > 0) {
    599 //            if (words[wordsFound%KHMER_LOOKAHEAD].candidates(text, fDictionary, rangeEnd) <= 0
    600 //                && fSuffixSet.contains(uc = utext_current32(text))) {
    601 //                if (uc == KHMER_PAIYANNOI) {
    602 //                    if (!fSuffixSet.contains(utext_previous32(text))) {
    603 //                        // Skip over previous end and PAIYANNOI
    604 //                        utext_next32(text);
    605 //                        utext_next32(text);
    606 //                        wordLength += 1;            // Add PAIYANNOI to word
    607 //                        uc = utext_current32(text);     // Fetch next character
    608 //                    }
    609 //                    else {
    610 //                        // Restore prior position
    611 //                        utext_next32(text);
    612 //                    }
    613 //                }
    614 //                if (uc == KHMER_MAIYAMOK) {
    615 //                    if (utext_previous32(text) != KHMER_MAIYAMOK) {
    616 //                        // Skip over previous end and MAIYAMOK
    617 //                        utext_next32(text);
    618 //                        utext_next32(text);
    619 //                        wordLength += 1;            // Add MAIYAMOK to word
    620 //                    }
    621 //                    else {
    622 //                        // Restore prior position
    623 //                        utext_next32(text);
    624 //                    }
    625 //                }
    626 //            }
    627 //            else {
    628 //                utext_setNativeIndex(text, current+wordLength);
    629 //            }
    630 //        }
    631 
    632         // Did we find a word on this iteration? If so, push it on the break stack
    633         if (wordLength > 0) {
    634             foundBreaks.push((current+wordLength), status);
    635         }
    636     }
    637 
    638     // Don't return a break for the end of the dictionary range if there is one there.
    639     if (foundBreaks.peeki() >= rangeEnd) {
    640         (void) foundBreaks.popi();
    641         wordsFound -= 1;
    642     }
    643 
    644     return wordsFound;
    645 }
    646 
    647 #if !UCONFIG_NO_NORMALIZATION
    648 /*
    649  ******************************************************************
    650  * CjkBreakEngine
    651  */
    652 static const uint32_t kuint32max = 0xFFFFFFFF;
    653 CjkBreakEngine::CjkBreakEngine(DictionaryMatcher *adoptDictionary, LanguageType type, UErrorCode &status)
    654 : DictionaryBreakEngine(1 << UBRK_WORD), fDictionary(adoptDictionary) {
    655     // Korean dictionary only includes Hangul syllables
    656     fHangulWordSet.applyPattern(UNICODE_STRING_SIMPLE("[\\uac00-\\ud7a3]"), status);
    657     fHanWordSet.applyPattern(UNICODE_STRING_SIMPLE("[:Han:]"), status);
    658     fKatakanaWordSet.applyPattern(UNICODE_STRING_SIMPLE("[[:Katakana:]\\uff9e\\uff9f]"), status);
    659     fHiraganaWordSet.applyPattern(UNICODE_STRING_SIMPLE("[:Hiragana:]"), status);
    660 
    661     if (U_SUCCESS(status)) {
    662         // handle Korean and Japanese/Chinese using different dictionaries
    663         if (type == kKorean) {
    664             setCharacters(fHangulWordSet);
    665         } else { //Chinese and Japanese
    666             UnicodeSet cjSet;
    667             cjSet.addAll(fHanWordSet);
    668             cjSet.addAll(fKatakanaWordSet);
    669             cjSet.addAll(fHiraganaWordSet);
    670             cjSet.add(UNICODE_STRING_SIMPLE("\\uff70\\u30fc"));
    671             setCharacters(cjSet);
    672         }
    673     }
    674 }
    675 
    676 CjkBreakEngine::~CjkBreakEngine(){
    677     delete fDictionary;
    678 }
    679 
    680 // The katakanaCost values below are based on the length frequencies of all
    681 // katakana phrases in the dictionary
    682 static const int kMaxKatakanaLength = 8;
    683 static const int kMaxKatakanaGroupLength = 20;
    684 static const uint32_t maxSnlp = 255;
    685 
    686 static inline uint32_t getKatakanaCost(int wordLength){
    687     //TODO: fill array with actual values from dictionary!
    688     static const uint32_t katakanaCost[kMaxKatakanaLength + 1]
    689                                        = {8192, 984, 408, 240, 204, 252, 300, 372, 480};
    690     return (wordLength > kMaxKatakanaLength) ? 8192 : katakanaCost[wordLength];
    691 }
    692 
    693 static inline bool isKatakana(uint16_t value) {
    694     return (value >= 0x30A1u && value <= 0x30FEu && value != 0x30FBu) ||
    695             (value >= 0xFF66u && value <= 0xFF9fu);
    696 }
    697 
    698 // A very simple helper class to streamline the buffer handling in
    699 // divideUpDictionaryRange.
    700 template<class T, size_t N>
    701 class AutoBuffer {
    702 public:
    703     AutoBuffer(size_t size) : buffer(stackBuffer), capacity(N) {
    704         if (size > N) {
    705             buffer = reinterpret_cast<T*>(uprv_malloc(sizeof(T)*size));
    706             capacity = size;
    707         }
    708     }
    709     ~AutoBuffer() {
    710         if (buffer != stackBuffer)
    711             uprv_free(buffer);
    712     }
    713 
    714     T* elems() {
    715         return buffer;
    716     }
    717 
    718     const T& operator[] (size_t i) const {
    719         return buffer[i];
    720     }
    721 
    722     T& operator[] (size_t i) {
    723         return buffer[i];
    724     }
    725 
    726     // resize without copy
    727     void resize(size_t size) {
    728         if (size <= capacity)
    729             return;
    730         if (buffer != stackBuffer)
    731             uprv_free(buffer);
    732         buffer = reinterpret_cast<T*>(uprv_malloc(sizeof(T)*size));
    733         capacity = size;
    734     }
    735 
    736 private:
    737     T stackBuffer[N];
    738     T* buffer;
    739     AutoBuffer();
    740     size_t capacity;
    741 };
    742 
    743 
    744 /*
    745  * @param text A UText representing the text
    746  * @param rangeStart The start of the range of dictionary characters
    747  * @param rangeEnd The end of the range of dictionary characters
    748  * @param foundBreaks Output of C array of int32_t break positions, or 0
    749  * @return The number of breaks found
    750  */
    751 int32_t
    752 CjkBreakEngine::divideUpDictionaryRange( UText *text,
    753         int32_t rangeStart,
    754         int32_t rangeEnd,
    755         UStack &foundBreaks ) const {
    756     if (rangeStart >= rangeEnd) {
    757         return 0;
    758     }
    759 
    760     const size_t defaultInputLength = 80;
    761     size_t inputLength = rangeEnd - rangeStart;
    762     // TODO: Replace by UnicodeString.
    763     AutoBuffer<UChar, defaultInputLength> charString(inputLength);
    764 
    765     // Normalize the input string and put it in normalizedText.
    766     // The map from the indices of the normalized input to the raw
    767     // input is kept in charPositions.
    768     UErrorCode status = U_ZERO_ERROR;
    769     utext_extract(text, rangeStart, rangeEnd, charString.elems(), inputLength, &status);
    770     if (U_FAILURE(status)) {
    771         return 0;
    772     }
    773 
    774     UnicodeString inputString(charString.elems(), inputLength);
    775     // TODO: Use Normalizer2.
    776     UNormalizationMode norm_mode = UNORM_NFKC;
    777     UBool isNormalized =
    778         Normalizer::quickCheck(inputString, norm_mode, status) == UNORM_YES ||
    779         Normalizer::isNormalized(inputString, norm_mode, status);
    780 
    781     // TODO: Replace by UVector32.
    782     AutoBuffer<int32_t, defaultInputLength> charPositions(inputLength + 1);
    783     int numChars = 0;
    784     UText normalizedText = UTEXT_INITIALIZER;
    785     // Needs to be declared here because normalizedText holds onto its buffer.
    786     UnicodeString normalizedString;
    787     if (isNormalized) {
    788         int32_t index = 0;
    789         charPositions[0] = 0;
    790         while(index < inputString.length()) {
    791             index = inputString.moveIndex32(index, 1);
    792             charPositions[++numChars] = index;
    793         }
    794         utext_openUnicodeString(&normalizedText, &inputString, &status);
    795     }
    796     else {
    797         Normalizer::normalize(inputString, norm_mode, 0, normalizedString, status);
    798         if (U_FAILURE(status)) {
    799             return 0;
    800         }
    801         charPositions.resize(normalizedString.length() + 1);
    802         Normalizer normalizer(charString.elems(), inputLength, norm_mode);
    803         int32_t index = 0;
    804         charPositions[0] = 0;
    805         while(index < normalizer.endIndex()){
    806             /* UChar32 uc = */ normalizer.next();
    807             charPositions[++numChars] = index = normalizer.getIndex();
    808         }
    809         utext_openUnicodeString(&normalizedText, &normalizedString, &status);
    810     }
    811 
    812     if (U_FAILURE(status)) {
    813         return 0;
    814     }
    815 
    816     // From this point on, all the indices refer to the indices of
    817     // the normalized input string.
    818 
    819     // bestSnlp[i] is the snlp of the best segmentation of the first i
    820     // characters in the range to be matched.
    821     // TODO: Replace by UVector32.
    822     AutoBuffer<uint32_t, defaultInputLength> bestSnlp(numChars + 1);
    823     bestSnlp[0] = 0;
    824     for(int i = 1; i <= numChars; i++) {
    825         bestSnlp[i] = kuint32max;
    826     }
    827 
    828     // prev[i] is the index of the last CJK character in the previous word in
    829     // the best segmentation of the first i characters.
    830     // TODO: Replace by UVector32.
    831     AutoBuffer<int, defaultInputLength> prev(numChars + 1);
    832     for(int i = 0; i <= numChars; i++){
    833         prev[i] = -1;
    834     }
    835 
    836     const size_t maxWordSize = 20;
    837     // TODO: Replace both with UVector32.
    838     AutoBuffer<int32_t, maxWordSize> values(numChars);
    839     AutoBuffer<int32_t, maxWordSize> lengths(numChars);
    840 
    841     // Dynamic programming to find the best segmentation.
    842     bool is_prev_katakana = false;
    843     for (int32_t i = 0; i < numChars; ++i) {
    844         //utext_setNativeIndex(text, rangeStart + i);
    845         utext_setNativeIndex(&normalizedText, i);
    846         if (bestSnlp[i] == kuint32max)
    847             continue;
    848 
    849         int32_t count;
    850         // limit maximum word length matched to size of current substring
    851         int32_t maxSearchLength = (i + maxWordSize < (size_t) numChars)? maxWordSize : (numChars - i);
    852 
    853         fDictionary->matches(&normalizedText, maxSearchLength, lengths.elems(), count, maxSearchLength, values.elems());
    854 
    855         // if there are no single character matches found in the dictionary
    856         // starting with this charcter, treat character as a 1-character word
    857         // with the highest value possible, i.e. the least likely to occur.
    858         // Exclude Korean characters from this treatment, as they should be left
    859         // together by default.
    860         if((count == 0 || lengths[0] != 1) &&
    861                 !fHangulWordSet.contains(utext_current32(&normalizedText))) {
    862             values[count] = maxSnlp;
    863             lengths[count++] = 1;
    864         }
    865 
    866         for (int j = 0; j < count; j++) {
    867             uint32_t newSnlp = bestSnlp[i] + values[j];
    868             if (newSnlp < bestSnlp[lengths[j] + i]) {
    869                 bestSnlp[lengths[j] + i] = newSnlp;
    870                 prev[lengths[j] + i] = i;
    871             }
    872         }
    873 
    874         // In Japanese,
    875         // Katakana word in single character is pretty rare. So we apply
    876         // the following heuristic to Katakana: any continuous run of Katakana
    877         // characters is considered a candidate word with a default cost
    878         // specified in the katakanaCost table according to its length.
    879         //utext_setNativeIndex(text, rangeStart + i);
    880         utext_setNativeIndex(&normalizedText, i);
    881         bool is_katakana = isKatakana(utext_current32(&normalizedText));
    882         if (!is_prev_katakana && is_katakana) {
    883             int j = i + 1;
    884             utext_next32(&normalizedText);
    885             // Find the end of the continuous run of Katakana characters
    886             while (j < numChars && (j - i) < kMaxKatakanaGroupLength &&
    887                     isKatakana(utext_current32(&normalizedText))) {
    888                 utext_next32(&normalizedText);
    889                 ++j;
    890             }
    891             if ((j - i) < kMaxKatakanaGroupLength) {
    892                 uint32_t newSnlp = bestSnlp[i] + getKatakanaCost(j - i);
    893                 if (newSnlp < bestSnlp[j]) {
    894                     bestSnlp[j] = newSnlp;
    895                     prev[j] = i;
    896                 }
    897             }
    898         }
    899         is_prev_katakana = is_katakana;
    900     }
    901 
    902     // Start pushing the optimal offset index into t_boundary (t for tentative).
    903     // prev[numChars] is guaranteed to be meaningful.
    904     // We'll first push in the reverse order, i.e.,
    905     // t_boundary[0] = numChars, and afterwards do a swap.
    906     // TODO: Replace by UVector32.
    907     AutoBuffer<int, maxWordSize> t_boundary(numChars + 1);
    908 
    909     int numBreaks = 0;
    910     // No segmentation found, set boundary to end of range
    911     if (bestSnlp[numChars] == kuint32max) {
    912         t_boundary[numBreaks++] = numChars;
    913     } else {
    914         for (int i = numChars; i > 0; i = prev[i]) {
    915             t_boundary[numBreaks++] = i;
    916         }
    917         U_ASSERT(prev[t_boundary[numBreaks - 1]] == 0);
    918     }
    919 
    920     // Reverse offset index in t_boundary.
    921     // Don't add a break for the start of the dictionary range if there is one
    922     // there already.
    923     if (foundBreaks.size() == 0 || foundBreaks.peeki() < rangeStart) {
    924         t_boundary[numBreaks++] = 0;
    925     }
    926 
    927     // Now that we're done, convert positions in t_bdry[] (indices in
    928     // the normalized input string) back to indices in the raw input string
    929     // while reversing t_bdry and pushing values to foundBreaks.
    930     for (int i = numBreaks-1; i >= 0; i--) {
    931         foundBreaks.push(charPositions[t_boundary[i]] + rangeStart, status);
    932     }
    933 
    934     utext_close(&normalizedText);
    935     return numBreaks;
    936 }
    937 #endif
    938 
    939 U_NAMESPACE_END
    940 
    941 #endif /* #if !UCONFIG_NO_BREAK_ITERATION */
    942 
    943