Home | History | Annotate | Download | only in makedict
      1 /*
      2  * Copyright (C) 2013 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 package com.android.inputmethod.latin.makedict;
     18 
     19 import com.android.inputmethod.latin.makedict.BinaryDictDecoderUtils.CharEncoding;
     20 import com.android.inputmethod.latin.makedict.FormatSpec.FormatOptions;
     21 import com.android.inputmethod.latin.makedict.FusionDictionary.PtNode;
     22 import com.android.inputmethod.latin.makedict.FusionDictionary.PtNodeArray;
     23 
     24 import java.io.ByteArrayOutputStream;
     25 import java.io.IOException;
     26 import java.io.OutputStream;
     27 import java.util.ArrayList;
     28 import java.util.HashMap;
     29 import java.util.Map.Entry;
     30 
     31 /**
     32  * Encodes binary files for a FusionDictionary.
     33  *
     34  * All the methods in this class are static.
     35  *
     36  * TODO: Rename this class to DictEncoderUtils.
     37  */
     38 public class BinaryDictEncoderUtils {
     39 
     40     private static final boolean DBG = MakedictLog.DBG;
     41 
     42     private BinaryDictEncoderUtils() {
     43         // This utility class is not publicly instantiable.
     44     }
     45 
     46     // Arbitrary limit to how much passes we consider address size compression should
     47     // terminate in. At the time of this writing, our largest dictionary completes
     48     // compression in five passes.
     49     // If the number of passes exceeds this number, makedict bails with an exception on
     50     // suspicion that a bug might be causing an infinite loop.
     51     private static final int MAX_PASSES = 24;
     52 
     53     /**
     54      * Compute the binary size of the character array.
     55      *
     56      * If only one character, this is the size of this character. If many, it's the sum of their
     57      * sizes + 1 byte for the terminator.
     58      *
     59      * @param characters the character array
     60      * @return the size of the char array, including the terminator if any
     61      */
     62     static int getPtNodeCharactersSize(final int[] characters,
     63             final HashMap<Integer, Integer> codePointToOneByteCodeMap) {
     64         int size = CharEncoding.getCharArraySize(characters, codePointToOneByteCodeMap);
     65         if (characters.length > 1) size += FormatSpec.PTNODE_TERMINATOR_SIZE;
     66         return size;
     67     }
     68 
     69     /**
     70      * Compute the binary size of the character array in a PtNode
     71      *
     72      * If only one character, this is the size of this character. If many, it's the sum of their
     73      * sizes + 1 byte for the terminator.
     74      *
     75      * @param ptNode the PtNode
     76      * @return the size of the char array, including the terminator if any
     77      */
     78     private static int getPtNodeCharactersSize(final PtNode ptNode,
     79             final HashMap<Integer, Integer> codePointToOneByteCodeMap) {
     80         return getPtNodeCharactersSize(ptNode.mChars, codePointToOneByteCodeMap);
     81     }
     82 
     83     /**
     84      * Compute the binary size of the PtNode count for a node array.
     85      * @param nodeArray the nodeArray
     86      * @return the size of the PtNode count, either 1 or 2 bytes.
     87      */
     88     private static int getPtNodeCountSize(final PtNodeArray nodeArray) {
     89         return BinaryDictIOUtils.getPtNodeCountSize(nodeArray.mData.size());
     90     }
     91 
     92     /**
     93      * Compute the maximum size of a PtNode, assuming 3-byte addresses for everything.
     94      *
     95      * @param ptNode the PtNode to compute the size of.
     96      * @return the maximum size of the PtNode.
     97      */
     98     private static int getPtNodeMaximumSize(final PtNode ptNode,
     99             final HashMap<Integer, Integer> codePointToOneByteCodeMap) {
    100         int size = getNodeHeaderSize(ptNode, codePointToOneByteCodeMap);
    101         if (ptNode.isTerminal()) {
    102             // If terminal, one byte for the frequency.
    103             size += FormatSpec.PTNODE_FREQUENCY_SIZE;
    104         }
    105         size += FormatSpec.PTNODE_MAX_ADDRESS_SIZE; // For children address
    106         if (null != ptNode.mBigrams) {
    107             size += (FormatSpec.PTNODE_ATTRIBUTE_FLAGS_SIZE
    108                     + FormatSpec.PTNODE_ATTRIBUTE_MAX_ADDRESS_SIZE)
    109                     * ptNode.mBigrams.size();
    110         }
    111         return size;
    112     }
    113 
    114     /**
    115      * Compute the maximum size of each PtNode of a PtNode array, assuming 3-byte addresses for
    116      * everything, and caches it in the `mCachedSize' member of the nodes; deduce the size of
    117      * the containing node array, and cache it it its 'mCachedSize' member.
    118      *
    119      * @param ptNodeArray the node array to compute the maximum size of.
    120      */
    121     private static void calculatePtNodeArrayMaximumSize(final PtNodeArray ptNodeArray,
    122             final HashMap<Integer, Integer> codePointToOneByteCodeMap) {
    123         int size = getPtNodeCountSize(ptNodeArray);
    124         for (PtNode node : ptNodeArray.mData) {
    125             final int nodeSize = getPtNodeMaximumSize(node, codePointToOneByteCodeMap);
    126             node.mCachedSize = nodeSize;
    127             size += nodeSize;
    128         }
    129         ptNodeArray.mCachedSize = size;
    130     }
    131 
    132     /**
    133      * Compute the size of the header (flag + [parent address] + characters size) of a PtNode.
    134      *
    135      * @param ptNode the PtNode of which to compute the size of the header
    136      */
    137     private static int getNodeHeaderSize(final PtNode ptNode,
    138             final HashMap<Integer, Integer> codePointToOneByteCodeMap) {
    139         return FormatSpec.PTNODE_FLAGS_SIZE + getPtNodeCharactersSize(ptNode,
    140                 codePointToOneByteCodeMap);
    141     }
    142 
    143     /**
    144      * Compute the size, in bytes, that an address will occupy.
    145      *
    146      * This can be used either for children addresses (which are always positive) or for
    147      * attribute, which may be positive or negative but
    148      * store their sign bit separately.
    149      *
    150      * @param address the address
    151      * @return the byte size.
    152      */
    153     static int getByteSize(final int address) {
    154         assert(address <= FormatSpec.UINT24_MAX);
    155         if (!BinaryDictIOUtils.hasChildrenAddress(address)) {
    156             return 0;
    157         } else if (Math.abs(address) <= FormatSpec.UINT8_MAX) {
    158             return 1;
    159         } else if (Math.abs(address) <= FormatSpec.UINT16_MAX) {
    160             return 2;
    161         } else {
    162             return 3;
    163         }
    164     }
    165 
    166     static int writeUIntToBuffer(final byte[] buffer, final int fromPosition, final int value,
    167             final int size) {
    168         int position = fromPosition;
    169         switch(size) {
    170             case 4:
    171                 buffer[position++] = (byte) ((value >> 24) & 0xFF);
    172                 /* fall through */
    173             case 3:
    174                 buffer[position++] = (byte) ((value >> 16) & 0xFF);
    175                 /* fall through */
    176             case 2:
    177                 buffer[position++] = (byte) ((value >> 8) & 0xFF);
    178                 /* fall through */
    179             case 1:
    180                 buffer[position++] = (byte) (value & 0xFF);
    181                 break;
    182             default:
    183                 /* nop */
    184         }
    185         return position;
    186     }
    187 
    188     static void writeUIntToStream(final OutputStream stream, final int value, final int size)
    189             throws IOException {
    190         switch(size) {
    191             case 4:
    192                 stream.write((value >> 24) & 0xFF);
    193                 /* fall through */
    194             case 3:
    195                 stream.write((value >> 16) & 0xFF);
    196                 /* fall through */
    197             case 2:
    198                 stream.write((value >> 8) & 0xFF);
    199                 /* fall through */
    200             case 1:
    201                 stream.write(value & 0xFF);
    202                 break;
    203             default:
    204                 /* nop */
    205         }
    206     }
    207 
    208     // End utility methods
    209 
    210     // This method is responsible for finding a nice ordering of the nodes that favors run-time
    211     // cache performance and dictionary size.
    212     /* package for tests */ static ArrayList<PtNodeArray> flattenTree(
    213             final PtNodeArray rootNodeArray) {
    214         final int treeSize = FusionDictionary.countPtNodes(rootNodeArray);
    215         MakedictLog.i("Counted nodes : " + treeSize);
    216         final ArrayList<PtNodeArray> flatTree = new ArrayList<>(treeSize);
    217         return flattenTreeInner(flatTree, rootNodeArray);
    218     }
    219 
    220     private static ArrayList<PtNodeArray> flattenTreeInner(final ArrayList<PtNodeArray> list,
    221             final PtNodeArray ptNodeArray) {
    222         // Removing the node is necessary if the tails are merged, because we would then
    223         // add the same node several times when we only want it once. A number of places in
    224         // the code also depends on any node being only once in the list.
    225         // Merging tails can only be done if there are no attributes. Searching for attributes
    226         // in LatinIME code depends on a total breadth-first ordering, which merging tails
    227         // breaks. If there are no attributes, it should be fine (and reduce the file size)
    228         // to merge tails, and removing the node from the list would be necessary. However,
    229         // we don't merge tails because breaking the breadth-first ordering would result in
    230         // extreme overhead at bigram lookup time (it would make the search function O(n) instead
    231         // of the current O(log(n)), where n=number of nodes in the dictionary which is pretty
    232         // high).
    233         // If no nodes are ever merged, we can't have the same node twice in the list, hence
    234         // searching for duplicates in unnecessary. It is also very performance consuming,
    235         // since `list' is an ArrayList so it's an O(n) operation that runs on all nodes, making
    236         // this simple list.remove operation O(n*n) overall. On Android this overhead is very
    237         // high.
    238         // For future reference, the code to remove duplicate is a simple : list.remove(node);
    239         list.add(ptNodeArray);
    240         final ArrayList<PtNode> branches = ptNodeArray.mData;
    241         for (PtNode ptNode : branches) {
    242             if (null != ptNode.mChildren) flattenTreeInner(list, ptNode.mChildren);
    243         }
    244         return list;
    245     }
    246 
    247     /**
    248      * Get the offset from a position inside a current node array to a target node array, during
    249      * update.
    250      *
    251      * If the current node array is before the target node array, the target node array has not
    252      * been updated yet, so we should return the offset from the old position of the current node
    253      * array to the old position of the target node array. If on the other hand the target is
    254      * before the current node array, it already has been updated, so we should return the offset
    255      * from the new position in the current node array to the new position in the target node
    256      * array.
    257      *
    258      * @param currentNodeArray node array containing the PtNode where the offset will be written
    259      * @param offsetFromStartOfCurrentNodeArray offset, in bytes, from the start of currentNodeArray
    260      * @param targetNodeArray the target node array to get the offset to
    261      * @return the offset to the target node array
    262      */
    263     private static int getOffsetToTargetNodeArrayDuringUpdate(final PtNodeArray currentNodeArray,
    264             final int offsetFromStartOfCurrentNodeArray, final PtNodeArray targetNodeArray) {
    265         final boolean isTargetBeforeCurrent = (targetNodeArray.mCachedAddressBeforeUpdate
    266                 < currentNodeArray.mCachedAddressBeforeUpdate);
    267         if (isTargetBeforeCurrent) {
    268             return targetNodeArray.mCachedAddressAfterUpdate
    269                     - (currentNodeArray.mCachedAddressAfterUpdate
    270                             + offsetFromStartOfCurrentNodeArray);
    271         }
    272         return targetNodeArray.mCachedAddressBeforeUpdate
    273                 - (currentNodeArray.mCachedAddressBeforeUpdate + offsetFromStartOfCurrentNodeArray);
    274     }
    275 
    276     /**
    277      * Get the offset from a position inside a current node array to a target PtNode, during
    278      * update.
    279      *
    280      * @param currentNodeArray node array containing the PtNode where the offset will be written
    281      * @param offsetFromStartOfCurrentNodeArray offset, in bytes, from the start of currentNodeArray
    282      * @param targetPtNode the target PtNode to get the offset to
    283      * @return the offset to the target PtNode
    284      */
    285     // TODO: is there any way to factorize this method with the one above?
    286     private static int getOffsetToTargetPtNodeDuringUpdate(final PtNodeArray currentNodeArray,
    287             final int offsetFromStartOfCurrentNodeArray, final PtNode targetPtNode) {
    288         final int oldOffsetBasePoint = currentNodeArray.mCachedAddressBeforeUpdate
    289                 + offsetFromStartOfCurrentNodeArray;
    290         final boolean isTargetBeforeCurrent = (targetPtNode.mCachedAddressBeforeUpdate
    291                 < oldOffsetBasePoint);
    292         // If the target is before the current node array, then its address has already been
    293         // updated. We can use the AfterUpdate member, and compare it to our own member after
    294         // update. Otherwise, the AfterUpdate member is not updated yet, so we need to use the
    295         // BeforeUpdate member, and of course we have to compare this to our own address before
    296         // update.
    297         if (isTargetBeforeCurrent) {
    298             final int newOffsetBasePoint = currentNodeArray.mCachedAddressAfterUpdate
    299                     + offsetFromStartOfCurrentNodeArray;
    300             return targetPtNode.mCachedAddressAfterUpdate - newOffsetBasePoint;
    301         }
    302         return targetPtNode.mCachedAddressBeforeUpdate - oldOffsetBasePoint;
    303     }
    304 
    305     /**
    306      * Computes the actual node array size, based on the cached addresses of the children nodes.
    307      *
    308      * Each node array stores its tentative address. During dictionary address computing, these
    309      * are not final, but they can be used to compute the node array size (the node array size
    310      * depends on the address of the children because the number of bytes necessary to store an
    311      * address depends on its numeric value. The return value indicates whether the node array
    312      * contents (as in, any of the addresses stored in the cache fields) have changed with
    313      * respect to their previous value.
    314      *
    315      * @param ptNodeArray the node array to compute the size of.
    316      * @param dict the dictionary in which the word/attributes are to be found.
    317      * @return false if none of the cached addresses inside the node array changed, true otherwise.
    318      */
    319     private static boolean computeActualPtNodeArraySize(final PtNodeArray ptNodeArray,
    320             final FusionDictionary dict,
    321             final HashMap<Integer, Integer> codePointToOneByteCodeMap) {
    322         boolean changed = false;
    323         int size = getPtNodeCountSize(ptNodeArray);
    324         for (PtNode ptNode : ptNodeArray.mData) {
    325             ptNode.mCachedAddressAfterUpdate = ptNodeArray.mCachedAddressAfterUpdate + size;
    326             if (ptNode.mCachedAddressAfterUpdate != ptNode.mCachedAddressBeforeUpdate) {
    327                 changed = true;
    328             }
    329             int nodeSize = getNodeHeaderSize(ptNode, codePointToOneByteCodeMap);
    330             if (ptNode.isTerminal()) {
    331                 nodeSize += FormatSpec.PTNODE_FREQUENCY_SIZE;
    332             }
    333             if (null != ptNode.mChildren) {
    334                 nodeSize += getByteSize(getOffsetToTargetNodeArrayDuringUpdate(ptNodeArray,
    335                         nodeSize + size, ptNode.mChildren));
    336             }
    337             if (null != ptNode.mBigrams) {
    338                 for (WeightedString bigram : ptNode.mBigrams) {
    339                     final int offset = getOffsetToTargetPtNodeDuringUpdate(ptNodeArray,
    340                             nodeSize + size + FormatSpec.PTNODE_ATTRIBUTE_FLAGS_SIZE,
    341                             FusionDictionary.findWordInTree(dict.mRootNodeArray, bigram.mWord));
    342                     nodeSize += getByteSize(offset) + FormatSpec.PTNODE_ATTRIBUTE_FLAGS_SIZE;
    343                 }
    344             }
    345             ptNode.mCachedSize = nodeSize;
    346             size += nodeSize;
    347         }
    348         if (ptNodeArray.mCachedSize != size) {
    349             ptNodeArray.mCachedSize = size;
    350             changed = true;
    351         }
    352         return changed;
    353     }
    354 
    355     /**
    356      * Initializes the cached addresses of node arrays and their containing nodes from their size.
    357      *
    358      * @param flatNodes the list of node arrays.
    359      * @return the byte size of the entire stack.
    360      */
    361     private static int initializePtNodeArraysCachedAddresses(
    362             final ArrayList<PtNodeArray> flatNodes) {
    363         int nodeArrayOffset = 0;
    364         for (final PtNodeArray nodeArray : flatNodes) {
    365             nodeArray.mCachedAddressBeforeUpdate = nodeArrayOffset;
    366             int nodeCountSize = getPtNodeCountSize(nodeArray);
    367             int nodeffset = 0;
    368             for (final PtNode ptNode : nodeArray.mData) {
    369                 ptNode.mCachedAddressBeforeUpdate = ptNode.mCachedAddressAfterUpdate =
    370                         nodeCountSize + nodeArrayOffset + nodeffset;
    371                 nodeffset += ptNode.mCachedSize;
    372             }
    373             nodeArrayOffset += nodeArray.mCachedSize;
    374         }
    375         return nodeArrayOffset;
    376     }
    377 
    378     /**
    379      * Updates the cached addresses of node arrays after recomputing their new positions.
    380      *
    381      * @param flatNodes the list of node arrays.
    382      */
    383     private static void updatePtNodeArraysCachedAddresses(final ArrayList<PtNodeArray> flatNodes) {
    384         for (final PtNodeArray nodeArray : flatNodes) {
    385             nodeArray.mCachedAddressBeforeUpdate = nodeArray.mCachedAddressAfterUpdate;
    386             for (final PtNode ptNode : nodeArray.mData) {
    387                 ptNode.mCachedAddressBeforeUpdate = ptNode.mCachedAddressAfterUpdate;
    388             }
    389         }
    390     }
    391 
    392     /**
    393      * Compute the addresses and sizes of an ordered list of PtNode arrays.
    394      *
    395      * This method takes a list of PtNode arrays and will update their cached address and size
    396      * values so that they can be written into a file. It determines the smallest size each of the
    397      * PtNode arrays can be given the addresses of its children and attributes, and store that into
    398      * each PtNode.
    399      * The order of the PtNode is given by the order of the array. This method makes no effort
    400      * to find a good order; it only mechanically computes the size this order results in.
    401      *
    402      * @param dict the dictionary
    403      * @param flatNodes the ordered list of PtNode arrays
    404      * @return the same array it was passed. The nodes have been updated for address and size.
    405      */
    406     /* package */ static ArrayList<PtNodeArray> computeAddresses(final FusionDictionary dict,
    407             final ArrayList<PtNodeArray> flatNodes,
    408             final HashMap<Integer, Integer> codePointToOneByteCodeMap) {
    409         // First get the worst possible sizes and offsets
    410         for (final PtNodeArray n : flatNodes) {
    411             calculatePtNodeArrayMaximumSize(n, codePointToOneByteCodeMap);
    412         }
    413         final int offset = initializePtNodeArraysCachedAddresses(flatNodes);
    414 
    415         MakedictLog.i("Compressing the array addresses. Original size : " + offset);
    416         MakedictLog.i("(Recursively seen size : " + offset + ")");
    417 
    418         int passes = 0;
    419         boolean changesDone = false;
    420         do {
    421             changesDone = false;
    422             int ptNodeArrayStartOffset = 0;
    423             for (final PtNodeArray ptNodeArray : flatNodes) {
    424                 ptNodeArray.mCachedAddressAfterUpdate = ptNodeArrayStartOffset;
    425                 final int oldNodeArraySize = ptNodeArray.mCachedSize;
    426                 final boolean changed = computeActualPtNodeArraySize(ptNodeArray, dict,
    427                         codePointToOneByteCodeMap);
    428                 final int newNodeArraySize = ptNodeArray.mCachedSize;
    429                 if (oldNodeArraySize < newNodeArraySize) {
    430                     throw new RuntimeException("Increased size ?!");
    431                 }
    432                 ptNodeArrayStartOffset += newNodeArraySize;
    433                 changesDone |= changed;
    434             }
    435             updatePtNodeArraysCachedAddresses(flatNodes);
    436             ++passes;
    437             if (passes > MAX_PASSES) throw new RuntimeException("Too many passes - probably a bug");
    438         } while (changesDone);
    439 
    440         final PtNodeArray lastPtNodeArray = flatNodes.get(flatNodes.size() - 1);
    441         MakedictLog.i("Compression complete in " + passes + " passes.");
    442         MakedictLog.i("After address compression : "
    443                 + (lastPtNodeArray.mCachedAddressAfterUpdate + lastPtNodeArray.mCachedSize));
    444 
    445         return flatNodes;
    446     }
    447 
    448     /**
    449      * Sanity-checking method.
    450      *
    451      * This method checks a list of PtNode arrays for juxtaposition, that is, it will do
    452      * nothing if each node array's cached address is actually the previous node array's address
    453      * plus the previous node's size.
    454      * If this is not the case, it will throw an exception.
    455      *
    456      * @param arrays the list of node arrays to check
    457      */
    458     /* package */ static void checkFlatPtNodeArrayList(final ArrayList<PtNodeArray> arrays) {
    459         int offset = 0;
    460         int index = 0;
    461         for (final PtNodeArray ptNodeArray : arrays) {
    462             // BeforeUpdate and AfterUpdate addresses are the same here, so it does not matter
    463             // which we use.
    464             if (ptNodeArray.mCachedAddressAfterUpdate != offset) {
    465                 throw new RuntimeException("Wrong address for node " + index
    466                         + " : expected " + offset + ", got " +
    467                         ptNodeArray.mCachedAddressAfterUpdate);
    468             }
    469             ++index;
    470             offset += ptNodeArray.mCachedSize;
    471         }
    472     }
    473 
    474     /**
    475      * Helper method to write a children position to a file.
    476      *
    477      * @param buffer the buffer to write to.
    478      * @param fromIndex the index in the buffer to write the address to.
    479      * @param position the position to write.
    480      * @return the size in bytes the address actually took.
    481      */
    482     /* package */ static int writeChildrenPosition(final byte[] buffer, final int fromIndex,
    483             final int position) {
    484         int index = fromIndex;
    485         switch (getByteSize(position)) {
    486         case 1:
    487             buffer[index++] = (byte)position;
    488             return 1;
    489         case 2:
    490             buffer[index++] = (byte)(0xFF & (position >> 8));
    491             buffer[index++] = (byte)(0xFF & position);
    492             return 2;
    493         case 3:
    494             buffer[index++] = (byte)(0xFF & (position >> 16));
    495             buffer[index++] = (byte)(0xFF & (position >> 8));
    496             buffer[index++] = (byte)(0xFF & position);
    497             return 3;
    498         case 0:
    499             return 0;
    500         default:
    501             throw new RuntimeException("Position " + position + " has a strange size");
    502         }
    503     }
    504 
    505     /**
    506      * Makes the flag value for a PtNode.
    507      *
    508      * @param hasMultipleChars whether the PtNode has multiple chars.
    509      * @param isTerminal whether the PtNode is terminal.
    510      * @param childrenAddressSize the size of a children address.
    511      * @param hasBigrams whether the PtNode has bigrams.
    512      * @param isNotAWord whether the PtNode is not a word.
    513      * @param isPossiblyOffensive whether the PtNode is a possibly offensive entry.
    514      * @return the flags
    515      */
    516     static int makePtNodeFlags(final boolean hasMultipleChars, final boolean isTerminal,
    517             final int childrenAddressSize, final boolean hasBigrams,
    518             final boolean isNotAWord, final boolean isPossiblyOffensive) {
    519         byte flags = 0;
    520         if (hasMultipleChars) flags |= FormatSpec.FLAG_HAS_MULTIPLE_CHARS;
    521         if (isTerminal) flags |= FormatSpec.FLAG_IS_TERMINAL;
    522         switch (childrenAddressSize) {
    523             case 1:
    524                 flags |= FormatSpec.FLAG_CHILDREN_ADDRESS_TYPE_ONEBYTE;
    525                 break;
    526             case 2:
    527                 flags |= FormatSpec.FLAG_CHILDREN_ADDRESS_TYPE_TWOBYTES;
    528                 break;
    529             case 3:
    530                 flags |= FormatSpec.FLAG_CHILDREN_ADDRESS_TYPE_THREEBYTES;
    531                 break;
    532             case 0:
    533                 flags |= FormatSpec.FLAG_CHILDREN_ADDRESS_TYPE_NOADDRESS;
    534                 break;
    535             default:
    536                 throw new RuntimeException("Node with a strange address");
    537         }
    538         if (hasBigrams) flags |= FormatSpec.FLAG_HAS_BIGRAMS;
    539         if (isNotAWord) flags |= FormatSpec.FLAG_IS_NOT_A_WORD;
    540         if (isPossiblyOffensive) flags |= FormatSpec.FLAG_IS_POSSIBLY_OFFENSIVE;
    541         return flags;
    542     }
    543 
    544     /* package */ static byte makePtNodeFlags(final PtNode node, final int childrenOffset) {
    545         return (byte) makePtNodeFlags(node.mChars.length > 1, node.isTerminal(),
    546                 getByteSize(childrenOffset),
    547                 node.mBigrams != null && !node.mBigrams.isEmpty(),
    548                 node.mIsNotAWord, node.mIsPossiblyOffensive);
    549     }
    550 
    551     /**
    552      * Makes the flag value for a bigram.
    553      *
    554      * @param more whether there are more bigrams after this one.
    555      * @param offset the offset of the bigram.
    556      * @param bigramFrequency the frequency of the bigram, 0..255.
    557      * @param unigramFrequency the unigram frequency of the same word, 0..255.
    558      * @param word the second bigram, for debugging purposes
    559      * @return the flags
    560      */
    561     /* package */ static int makeBigramFlags(final boolean more, final int offset,
    562             final int bigramFrequency, final int unigramFrequency, final String word) {
    563         int bigramFlags = (more ? FormatSpec.FLAG_BIGRAM_SHORTCUT_ATTR_HAS_NEXT : 0)
    564                 + (offset < 0 ? FormatSpec.FLAG_BIGRAM_ATTR_OFFSET_NEGATIVE : 0);
    565         switch (getByteSize(offset)) {
    566         case 1:
    567             bigramFlags |= FormatSpec.FLAG_BIGRAM_ATTR_ADDRESS_TYPE_ONEBYTE;
    568             break;
    569         case 2:
    570             bigramFlags |= FormatSpec.FLAG_BIGRAM_ATTR_ADDRESS_TYPE_TWOBYTES;
    571             break;
    572         case 3:
    573             bigramFlags |= FormatSpec.FLAG_BIGRAM_ATTR_ADDRESS_TYPE_THREEBYTES;
    574             break;
    575         default:
    576             throw new RuntimeException("Strange offset size");
    577         }
    578         final int frequency;
    579         if (unigramFrequency > bigramFrequency) {
    580             MakedictLog.e("Unigram freq is superior to bigram freq for \"" + word
    581                     + "\". Bigram freq is " + bigramFrequency + ", unigram freq for "
    582                     + word + " is " + unigramFrequency);
    583             frequency = unigramFrequency;
    584         } else {
    585             frequency = bigramFrequency;
    586         }
    587         bigramFlags += getBigramFrequencyDiff(unigramFrequency, frequency)
    588                 & FormatSpec.FLAG_BIGRAM_SHORTCUT_ATTR_FREQUENCY;
    589         return bigramFlags;
    590     }
    591 
    592     public static int getBigramFrequencyDiff(final int unigramFrequency,
    593             final int bigramFrequency) {
    594         // We compute the difference between 255 (which means probability = 1) and the
    595         // unigram score. We split this into a number of discrete steps.
    596         // Now, the steps are numbered 0~15; 0 represents an increase of 1 step while 15
    597         // represents an increase of 16 steps: a value of 15 will be interpreted as the median
    598         // value of the 16th step. In all justice, if the bigram frequency is low enough to be
    599         // rounded below the first step (which means it is less than half a step higher than the
    600         // unigram frequency) then the unigram frequency itself is the best approximation of the
    601         // bigram freq that we could possibly supply, hence we should *not* include this bigram
    602         // in the file at all.
    603         // until this is done, we'll write 0 and slightly overestimate this case.
    604         // In other words, 0 means "between 0.5 step and 1.5 step", 1 means "between 1.5 step
    605         // and 2.5 steps", and 15 means "between 15.5 steps and 16.5 steps". So we want to
    606         // divide our range [unigramFreq..MAX_TERMINAL_FREQUENCY] in 16.5 steps to get the
    607         // step size. Then we compute the start of the first step (the one where value 0 starts)
    608         // by adding half-a-step to the unigramFrequency. From there, we compute the integer
    609         // number of steps to the bigramFrequency. One last thing: we want our steps to include
    610         // their lower bound and exclude their higher bound so we need to have the first step
    611         // start at exactly 1 unit higher than floor(unigramFreq + half a step).
    612         // Note : to reconstruct the score, the dictionary reader will need to divide
    613         // MAX_TERMINAL_FREQUENCY - unigramFreq by 16.5 likewise to get the value of the step,
    614         // and add (discretizedFrequency + 0.5 + 0.5) times this value to get the best
    615         // approximation. (0.5 to get the first step start, and 0.5 to get the middle of the
    616         // step pointed by the discretized frequency.
    617         final float stepSize =
    618                 (FormatSpec.MAX_TERMINAL_FREQUENCY - unigramFrequency)
    619                 / (1.5f + FormatSpec.MAX_BIGRAM_FREQUENCY);
    620         final float firstStepStart = 1 + unigramFrequency + (stepSize / 2.0f);
    621         final int discretizedFrequency = (int)((bigramFrequency - firstStepStart) / stepSize);
    622         // If the bigram freq is less than half-a-step higher than the unigram freq, we get -1
    623         // here. The best approximation would be the unigram freq itself, so we should not
    624         // include this bigram in the dictionary. For now, register as 0, and live with the
    625         // small over-estimation that we get in this case. TODO: actually remove this bigram
    626         // if discretizedFrequency < 0.
    627         return discretizedFrequency > 0 ? discretizedFrequency : 0;
    628     }
    629 
    630     /* package */ static int getChildrenPosition(final PtNode ptNode,
    631             final HashMap<Integer, Integer> codePointToOneByteCodeMap) {
    632         int positionOfChildrenPosField = ptNode.mCachedAddressAfterUpdate
    633                 + getNodeHeaderSize(ptNode, codePointToOneByteCodeMap);
    634         if (ptNode.isTerminal()) {
    635             // A terminal node has the frequency.
    636             // If positionOfChildrenPosField is incorrect, we may crash when jumping to the children
    637             // position.
    638             positionOfChildrenPosField += FormatSpec.PTNODE_FREQUENCY_SIZE;
    639         }
    640         return null == ptNode.mChildren ? FormatSpec.NO_CHILDREN_ADDRESS
    641                 : ptNode.mChildren.mCachedAddressAfterUpdate - positionOfChildrenPosField;
    642     }
    643 
    644     /**
    645      * Write a PtNodeArray. The PtNodeArray is expected to have its final position cached.
    646      *
    647      * @param dict the dictionary the node array is a part of (for relative offsets).
    648      * @param dictEncoder the dictionary encoder.
    649      * @param ptNodeArray the node array to write.
    650      * @param codePointToOneByteCodeMap the map to convert the code points.
    651      */
    652     /* package */ static void writePlacedPtNodeArray(final FusionDictionary dict,
    653             final DictEncoder dictEncoder, final PtNodeArray ptNodeArray,
    654             final HashMap<Integer, Integer> codePointToOneByteCodeMap) {
    655         // TODO: Make the code in common with BinaryDictIOUtils#writePtNode
    656         dictEncoder.setPosition(ptNodeArray.mCachedAddressAfterUpdate);
    657 
    658         final int ptNodeCount = ptNodeArray.mData.size();
    659         dictEncoder.writePtNodeCount(ptNodeCount);
    660         for (int i = 0; i < ptNodeCount; ++i) {
    661             final PtNode ptNode = ptNodeArray.mData.get(i);
    662             if (dictEncoder.getPosition() != ptNode.mCachedAddressAfterUpdate) {
    663                 throw new RuntimeException("Bug: write index is not the same as the cached address "
    664                         + "of the node : " + dictEncoder.getPosition() + " <> "
    665                         + ptNode.mCachedAddressAfterUpdate);
    666             }
    667             // Sanity checks.
    668             if (DBG && ptNode.getProbability() > FormatSpec.MAX_TERMINAL_FREQUENCY) {
    669                 throw new RuntimeException("A node has a frequency > "
    670                         + FormatSpec.MAX_TERMINAL_FREQUENCY
    671                         + " : " + ptNode.mProbabilityInfo.toString());
    672             }
    673             dictEncoder.writePtNode(ptNode, dict, codePointToOneByteCodeMap);
    674         }
    675         if (dictEncoder.getPosition() != ptNodeArray.mCachedAddressAfterUpdate
    676                 + ptNodeArray.mCachedSize) {
    677             throw new RuntimeException("Not the same size : written "
    678                      + (dictEncoder.getPosition() - ptNodeArray.mCachedAddressAfterUpdate)
    679                      + " bytes from a node that should have " + ptNodeArray.mCachedSize + " bytes");
    680         }
    681     }
    682 
    683     /**
    684      * Dumps a collection of useful statistics about a list of PtNode arrays.
    685      *
    686      * This prints purely informative stuff, like the total estimated file size, the
    687      * number of PtNode arrays, of PtNodes, the repartition of each address size, etc
    688      *
    689      * @param ptNodeArrays the list of PtNode arrays.
    690      */
    691     /* package */ static void showStatistics(ArrayList<PtNodeArray> ptNodeArrays) {
    692         int firstTerminalAddress = Integer.MAX_VALUE;
    693         int lastTerminalAddress = Integer.MIN_VALUE;
    694         int size = 0;
    695         int ptNodes = 0;
    696         int maxNodes = 0;
    697         int maxRuns = 0;
    698         for (final PtNodeArray ptNodeArray : ptNodeArrays) {
    699             if (maxNodes < ptNodeArray.mData.size()) maxNodes = ptNodeArray.mData.size();
    700             for (final PtNode ptNode : ptNodeArray.mData) {
    701                 ++ptNodes;
    702                 if (ptNode.mChars.length > maxRuns) maxRuns = ptNode.mChars.length;
    703                 if (ptNode.isTerminal()) {
    704                     if (ptNodeArray.mCachedAddressAfterUpdate < firstTerminalAddress)
    705                         firstTerminalAddress = ptNodeArray.mCachedAddressAfterUpdate;
    706                     if (ptNodeArray.mCachedAddressAfterUpdate > lastTerminalAddress)
    707                         lastTerminalAddress = ptNodeArray.mCachedAddressAfterUpdate;
    708                 }
    709             }
    710             if (ptNodeArray.mCachedAddressAfterUpdate + ptNodeArray.mCachedSize > size) {
    711                 size = ptNodeArray.mCachedAddressAfterUpdate + ptNodeArray.mCachedSize;
    712             }
    713         }
    714         final int[] ptNodeCounts = new int[maxNodes + 1];
    715         final int[] runCounts = new int[maxRuns + 1];
    716         for (final PtNodeArray ptNodeArray : ptNodeArrays) {
    717             ++ptNodeCounts[ptNodeArray.mData.size()];
    718             for (final PtNode ptNode : ptNodeArray.mData) {
    719                 ++runCounts[ptNode.mChars.length];
    720             }
    721         }
    722 
    723         MakedictLog.i("Statistics:\n"
    724                 + "  Total file size " + size + "\n"
    725                 + "  " + ptNodeArrays.size() + " node arrays\n"
    726                 + "  " + ptNodes + " PtNodes (" + ((float)ptNodes / ptNodeArrays.size())
    727                         + " PtNodes per node)\n"
    728                 + "  First terminal at " + firstTerminalAddress + "\n"
    729                 + "  Last terminal at " + lastTerminalAddress + "\n"
    730                 + "  PtNode stats : max = " + maxNodes);
    731     }
    732 
    733     /**
    734      * Writes a file header to an output stream.
    735      *
    736      * @param destination the stream to write the file header to.
    737      * @param dict the dictionary to write.
    738      * @param formatOptions file format options.
    739      * @param codePointOccurrenceArray code points ordered by occurrence count.
    740      * @return the size of the header.
    741      */
    742     /* package */ static int writeDictionaryHeader(final OutputStream destination,
    743             final FusionDictionary dict, final FormatOptions formatOptions,
    744             final ArrayList<Entry<Integer, Integer>> codePointOccurrenceArray)
    745                     throws IOException, UnsupportedFormatException {
    746         final int version = formatOptions.mVersion;
    747         if ((version >= FormatSpec.MINIMUM_SUPPORTED_STATIC_VERSION &&
    748                 version <= FormatSpec.MAXIMUM_SUPPORTED_STATIC_VERSION) || (
    749                 version >= FormatSpec.MINIMUM_SUPPORTED_DYNAMIC_VERSION &&
    750                 version <= FormatSpec.MAXIMUM_SUPPORTED_DYNAMIC_VERSION)) {
    751             // Dictionary is valid
    752         } else {
    753             throw new UnsupportedFormatException("Requested file format version " + version
    754                     + ", but this implementation only supports static versions "
    755                     + FormatSpec.MINIMUM_SUPPORTED_STATIC_VERSION + " through "
    756                     + FormatSpec.MAXIMUM_SUPPORTED_STATIC_VERSION + " and dynamic versions "
    757                     + FormatSpec.MINIMUM_SUPPORTED_DYNAMIC_VERSION + " through "
    758                     + FormatSpec.MAXIMUM_SUPPORTED_DYNAMIC_VERSION);
    759         }
    760 
    761         ByteArrayOutputStream headerBuffer = new ByteArrayOutputStream(256);
    762 
    763         // The magic number in big-endian order.
    764         // Magic number for all versions.
    765         headerBuffer.write((byte) (0xFF & (FormatSpec.MAGIC_NUMBER >> 24)));
    766         headerBuffer.write((byte) (0xFF & (FormatSpec.MAGIC_NUMBER >> 16)));
    767         headerBuffer.write((byte) (0xFF & (FormatSpec.MAGIC_NUMBER >> 8)));
    768         headerBuffer.write((byte) (0xFF & FormatSpec.MAGIC_NUMBER));
    769         // Dictionary version.
    770         headerBuffer.write((byte) (0xFF & (version >> 8)));
    771         headerBuffer.write((byte) (0xFF & version));
    772 
    773         // Options flags
    774         // TODO: Remove this field.
    775         final int options = 0;
    776         headerBuffer.write((byte) (0xFF & (options >> 8)));
    777         headerBuffer.write((byte) (0xFF & options));
    778         final int headerSizeOffset = headerBuffer.size();
    779         // Placeholder to be written later with header size.
    780         for (int i = 0; i < 4; ++i) {
    781             headerBuffer.write(0);
    782         }
    783         // Write out the options.
    784         for (final String key : dict.mOptions.mAttributes.keySet()) {
    785             final String value = dict.mOptions.mAttributes.get(key);
    786             CharEncoding.writeString(headerBuffer, key, null);
    787             CharEncoding.writeString(headerBuffer, value, null);
    788         }
    789         // Write out the codePointTable if there is codePointOccurrenceArray.
    790         if (codePointOccurrenceArray != null) {
    791             final String codePointTableString =
    792                     encodeCodePointTable(codePointOccurrenceArray);
    793             CharEncoding.writeString(headerBuffer, DictionaryHeader.CODE_POINT_TABLE_KEY, null);
    794             CharEncoding.writeString(headerBuffer, codePointTableString, null);
    795         }
    796         final int size = headerBuffer.size();
    797         final byte[] bytes = headerBuffer.toByteArray();
    798         // Write out the header size.
    799         bytes[headerSizeOffset] = (byte) (0xFF & (size >> 24));
    800         bytes[headerSizeOffset + 1] = (byte) (0xFF & (size >> 16));
    801         bytes[headerSizeOffset + 2] = (byte) (0xFF & (size >> 8));
    802         bytes[headerSizeOffset + 3] = (byte) (0xFF & (size >> 0));
    803         destination.write(bytes);
    804 
    805         headerBuffer.close();
    806         return size;
    807     }
    808 
    809     static final class CodePointTable {
    810         final HashMap<Integer, Integer> mCodePointToOneByteCodeMap;
    811         final ArrayList<Entry<Integer, Integer>> mCodePointOccurrenceArray;
    812 
    813         // Let code point table empty for version 200 dictionary which used in test
    814         CodePointTable() {
    815             mCodePointToOneByteCodeMap = null;
    816             mCodePointOccurrenceArray = null;
    817         }
    818 
    819         CodePointTable(final HashMap<Integer, Integer> codePointToOneByteCodeMap,
    820                 final ArrayList<Entry<Integer, Integer>> codePointOccurrenceArray) {
    821             mCodePointToOneByteCodeMap = codePointToOneByteCodeMap;
    822             mCodePointOccurrenceArray = codePointOccurrenceArray;
    823         }
    824     }
    825 
    826     private static String encodeCodePointTable(
    827             final ArrayList<Entry<Integer, Integer>> codePointOccurrenceArray) {
    828         final StringBuilder codePointTableString = new StringBuilder();
    829         int currentCodePointTableIndex = FormatSpec.MINIMAL_ONE_BYTE_CHARACTER_VALUE;
    830         for (final Entry<Integer, Integer> entry : codePointOccurrenceArray) {
    831             // Native reads the table as a string
    832             codePointTableString.appendCodePoint(entry.getKey());
    833             if (FormatSpec.MAXIMAL_ONE_BYTE_CHARACTER_VALUE < ++currentCodePointTableIndex) {
    834                 break;
    835             }
    836         }
    837         return codePointTableString.toString();
    838     }
    839 }
    840