Home | History | Annotate | Download | only in util
      1 /*
      2  * Copyright (C) 2012 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 // This is an on-disk cache which maps a 64-bits key to a byte array.
     18 //
     19 // It consists of three files: one index file and two data files. One of the
     20 // data files is "active", and the other is "inactive". New entries are
     21 // appended into the active region until it reaches the size limit. At that
     22 // point the active file and the inactive file are swapped, and the new active
     23 // file is truncated to empty (and the index for that file is also cleared).
     24 // The index is a hash table with linear probing. When the load factor reaches
     25 // 0.5, it does the same thing like when the size limit is reached.
     26 //
     27 // The index file format: (all numbers are stored in little-endian)
     28 // [0]  Magic number: 0xB3273030
     29 // [4]  MaxEntries: Max number of hash entries per region.
     30 // [8]  MaxBytes: Max number of data bytes per region (including header).
     31 // [12] ActiveRegion: The active growing region: 0 or 1.
     32 // [16] ActiveEntries: The number of hash entries used in the active region.
     33 // [20] ActiveBytes: The number of data bytes used in the active region.
     34 // [24] Version number.
     35 // [28] Checksum of [0..28).
     36 // [32] Hash entries for region 0. The size is X = (12 * MaxEntries bytes).
     37 // [32 + X] Hash entries for region 1. The size is also X.
     38 //
     39 // Each hash entry is 12 bytes: 8 bytes key and 4 bytes offset into the data
     40 // file. The offset is 0 when the slot is free. Note that 0 is a valid value
     41 // for key. The keys are used directly as index into a hash table, so they
     42 // should be suitably distributed.
     43 //
     44 // Each data file stores data for one region. The data file is concatenated
     45 // blobs followed by the magic number 0xBD248510.
     46 //
     47 // The blob format:
     48 // [0]  Key of this blob
     49 // [8]  Checksum of this blob
     50 // [12] Offset of this blob
     51 // [16] Length of this blob (not including header)
     52 // [20] Blob
     53 //
     54 // Below are the interface for BlobCache. The instance of this class does not
     55 // support concurrent use by multiple threads.
     56 //
     57 // public BlobCache(String path, int maxEntries, int maxBytes, boolean reset) throws IOException;
     58 // public void insert(long key, byte[] data) throws IOException;
     59 // public byte[] lookup(long key) throws IOException;
     60 // public void lookup(LookupRequest req) throws IOException;
     61 // public void close();
     62 // public void syncIndex();
     63 // public void syncAll();
     64 // public static void deleteFiles(String path);
     65 //
     66 package com.android.mms.util;
     67 
     68 import com.android.mms.LogTag;
     69 
     70 import java.io.Closeable;
     71 import java.io.File;
     72 import java.io.IOException;
     73 import java.io.RandomAccessFile;
     74 import java.nio.ByteOrder;
     75 import java.nio.MappedByteBuffer;
     76 import java.nio.channels.FileChannel;
     77 import java.util.zip.Adler32;
     78 
     79 import android.util.Log;
     80 
     81 public class BlobCache implements Closeable {
     82     private static final String TAG = LogTag.TAG;
     83 
     84     private static final int MAGIC_INDEX_FILE = 0xB3273030;
     85     private static final int MAGIC_DATA_FILE = 0xBD248510;
     86 
     87     // index header offset
     88     private static final int IH_MAGIC = 0;
     89     private static final int IH_MAX_ENTRIES = 4;
     90     private static final int IH_MAX_BYTES = 8;
     91     private static final int IH_ACTIVE_REGION = 12;
     92     private static final int IH_ACTIVE_ENTRIES = 16;
     93     private static final int IH_ACTIVE_BYTES = 20;
     94     private static final int IH_VERSION = 24;
     95     private static final int IH_CHECKSUM = 28;
     96     private static final int INDEX_HEADER_SIZE = 32;
     97 
     98     private static final int DATA_HEADER_SIZE = 4;
     99 
    100     // blob header offset
    101     private static final int BH_KEY = 0;
    102     private static final int BH_CHECKSUM = 8;
    103     private static final int BH_OFFSET = 12;
    104     private static final int BH_LENGTH = 16;
    105     private static final int BLOB_HEADER_SIZE = 20;
    106 
    107     private RandomAccessFile mIndexFile;
    108     private RandomAccessFile mDataFile0;
    109     private RandomAccessFile mDataFile1;
    110     private FileChannel mIndexChannel;
    111     private MappedByteBuffer mIndexBuffer;
    112 
    113     private int mMaxEntries;
    114     private int mMaxBytes;
    115     private int mActiveRegion;
    116     private int mActiveEntries;
    117     private int mActiveBytes;
    118     private int mVersion;
    119 
    120     private RandomAccessFile mActiveDataFile;
    121     private RandomAccessFile mInactiveDataFile;
    122     private int mActiveHashStart;
    123     private int mInactiveHashStart;
    124     private byte[] mIndexHeader = new byte[INDEX_HEADER_SIZE];
    125     private byte[] mBlobHeader = new byte[BLOB_HEADER_SIZE];
    126     private Adler32 mAdler32 = new Adler32();
    127 
    128     // Creates the cache. Three files will be created:
    129     // path + ".idx", path + ".0", and path + ".1"
    130     // The ".0" file and the ".1" file each stores data for a region. Each of
    131     // them can grow to the size specified by maxBytes. The maxEntries parameter
    132     // specifies the maximum number of entries each region can have. If the
    133     // "reset" parameter is true, the cache will be cleared before use.
    134     public BlobCache(String path, int maxEntries, int maxBytes, boolean reset)
    135             throws IOException {
    136         this(path, maxEntries, maxBytes, reset, 0);
    137     }
    138 
    139     public BlobCache(String path, int maxEntries, int maxBytes, boolean reset,
    140             int version) throws IOException {
    141         mIndexFile = new RandomAccessFile(path + ".idx", "rw");
    142         mDataFile0 = new RandomAccessFile(path + ".0", "rw");
    143         mDataFile1 = new RandomAccessFile(path + ".1", "rw");
    144         mVersion = version;
    145 
    146         if (!reset && loadIndex()) {
    147             return;
    148         }
    149 
    150         resetCache(maxEntries, maxBytes);
    151 
    152         if (!loadIndex()) {
    153             closeAll();
    154             throw new IOException("unable to load index");
    155         }
    156     }
    157 
    158     // Delete the files associated with the given path previously created
    159     // by the BlobCache constructor.
    160     public static void deleteFiles(String path) {
    161         deleteFileSilently(path + ".idx");
    162         deleteFileSilently(path + ".0");
    163         deleteFileSilently(path + ".1");
    164     }
    165 
    166     private static void deleteFileSilently(String path) {
    167         try {
    168             new File(path).delete();
    169         } catch (Throwable t) {
    170             // ignore;
    171         }
    172     }
    173 
    174     // Close the cache. All resources are released. No other method should be
    175     // called after this is called.
    176     @Override
    177     public void close() {
    178         syncAll();
    179         closeAll();
    180     }
    181 
    182     private void closeAll() {
    183         closeSilently(mIndexChannel);
    184         closeSilently(mIndexFile);
    185         closeSilently(mDataFile0);
    186         closeSilently(mDataFile1);
    187     }
    188 
    189     // Returns true if loading index is successful. After this method is called,
    190     // mIndexHeader and index header in file should be kept sync.
    191     private boolean loadIndex() {
    192         try {
    193             mIndexFile.seek(0);
    194             mDataFile0.seek(0);
    195             mDataFile1.seek(0);
    196 
    197             byte[] buf = mIndexHeader;
    198             if (mIndexFile.read(buf) != INDEX_HEADER_SIZE) {
    199                 Log.w(TAG, "cannot read header");
    200                 return false;
    201             }
    202 
    203             if (readInt(buf, IH_MAGIC) != MAGIC_INDEX_FILE) {
    204                 Log.w(TAG, "cannot read header magic");
    205                 return false;
    206             }
    207 
    208             if (readInt(buf, IH_VERSION) != mVersion) {
    209                 Log.w(TAG, "version mismatch");
    210                 return false;
    211             }
    212 
    213             mMaxEntries = readInt(buf, IH_MAX_ENTRIES);
    214             mMaxBytes = readInt(buf, IH_MAX_BYTES);
    215             mActiveRegion = readInt(buf, IH_ACTIVE_REGION);
    216             mActiveEntries = readInt(buf, IH_ACTIVE_ENTRIES);
    217             mActiveBytes = readInt(buf, IH_ACTIVE_BYTES);
    218 
    219             int sum = readInt(buf, IH_CHECKSUM);
    220             if (checkSum(buf, 0, IH_CHECKSUM) != sum) {
    221                 Log.w(TAG, "header checksum does not match");
    222                 return false;
    223             }
    224 
    225             // Sanity check
    226             if (mMaxEntries <= 0) {
    227                 Log.w(TAG, "invalid max entries");
    228                 return false;
    229             }
    230             if (mMaxBytes <= 0) {
    231                 Log.w(TAG, "invalid max bytes");
    232                 return false;
    233             }
    234             if (mActiveRegion != 0 && mActiveRegion != 1) {
    235                 Log.w(TAG, "invalid active region");
    236                 return false;
    237             }
    238             if (mActiveEntries < 0 || mActiveEntries > mMaxEntries) {
    239                 Log.w(TAG, "invalid active entries");
    240                 return false;
    241             }
    242             if (mActiveBytes < DATA_HEADER_SIZE || mActiveBytes > mMaxBytes) {
    243                 Log.w(TAG, "invalid active bytes");
    244                 return false;
    245             }
    246             if (mIndexFile.length() !=
    247                     INDEX_HEADER_SIZE + mMaxEntries * 12 * 2) {
    248                 Log.w(TAG, "invalid index file length");
    249                 return false;
    250             }
    251 
    252             // Make sure data file has magic
    253             byte[] magic = new byte[4];
    254             if (mDataFile0.read(magic) != 4) {
    255                 Log.w(TAG, "cannot read data file magic");
    256                 return false;
    257             }
    258             if (readInt(magic, 0) != MAGIC_DATA_FILE) {
    259                 Log.w(TAG, "invalid data file magic");
    260                 return false;
    261             }
    262             if (mDataFile1.read(magic) != 4) {
    263                 Log.w(TAG, "cannot read data file magic");
    264                 return false;
    265             }
    266             if (readInt(magic, 0) != MAGIC_DATA_FILE) {
    267                 Log.w(TAG, "invalid data file magic");
    268                 return false;
    269             }
    270 
    271             // Map index file to memory
    272             mIndexChannel = mIndexFile.getChannel();
    273             mIndexBuffer = mIndexChannel.map(FileChannel.MapMode.READ_WRITE,
    274                     0, mIndexFile.length());
    275             mIndexBuffer.order(ByteOrder.LITTLE_ENDIAN);
    276 
    277             setActiveVariables();
    278             return true;
    279         } catch (IOException ex) {
    280             Log.e(TAG, "loadIndex failed.", ex);
    281             return false;
    282         }
    283     }
    284 
    285     private void setActiveVariables() throws IOException {
    286         mActiveDataFile = (mActiveRegion == 0) ? mDataFile0 : mDataFile1;
    287         mInactiveDataFile = (mActiveRegion == 1) ? mDataFile0 : mDataFile1;
    288         mActiveDataFile.setLength(mActiveBytes);
    289         mActiveDataFile.seek(mActiveBytes);
    290 
    291         mActiveHashStart = INDEX_HEADER_SIZE;
    292         mInactiveHashStart = INDEX_HEADER_SIZE;
    293 
    294         if (mActiveRegion == 0) {
    295             mInactiveHashStart += mMaxEntries * 12;
    296         } else {
    297             mActiveHashStart += mMaxEntries * 12;
    298         }
    299     }
    300 
    301     private void resetCache(int maxEntries, int maxBytes) throws IOException {
    302         mIndexFile.setLength(0);  // truncate to zero the index
    303         mIndexFile.setLength(INDEX_HEADER_SIZE + maxEntries * 12 * 2);
    304         mIndexFile.seek(0);
    305         byte[] buf = mIndexHeader;
    306         writeInt(buf, IH_MAGIC, MAGIC_INDEX_FILE);
    307         writeInt(buf, IH_MAX_ENTRIES, maxEntries);
    308         writeInt(buf, IH_MAX_BYTES, maxBytes);
    309         writeInt(buf, IH_ACTIVE_REGION, 0);
    310         writeInt(buf, IH_ACTIVE_ENTRIES, 0);
    311         writeInt(buf, IH_ACTIVE_BYTES, DATA_HEADER_SIZE);
    312         writeInt(buf, IH_VERSION, mVersion);
    313         writeInt(buf, IH_CHECKSUM, checkSum(buf, 0, IH_CHECKSUM));
    314         mIndexFile.write(buf);
    315         // This is only needed if setLength does not zero the extended part.
    316         // writeZero(mIndexFile, maxEntries * 12 * 2);
    317 
    318         mDataFile0.setLength(0);
    319         mDataFile1.setLength(0);
    320         mDataFile0.seek(0);
    321         mDataFile1.seek(0);
    322         writeInt(buf, 0, MAGIC_DATA_FILE);
    323         mDataFile0.write(buf, 0, 4);
    324         mDataFile1.write(buf, 0, 4);
    325     }
    326 
    327     // Flip the active region and the inactive region.
    328     private void flipRegion() throws IOException {
    329         mActiveRegion = 1 - mActiveRegion;
    330         mActiveEntries = 0;
    331         mActiveBytes = DATA_HEADER_SIZE;
    332 
    333         writeInt(mIndexHeader, IH_ACTIVE_REGION, mActiveRegion);
    334         writeInt(mIndexHeader, IH_ACTIVE_ENTRIES, mActiveEntries);
    335         writeInt(mIndexHeader, IH_ACTIVE_BYTES, mActiveBytes);
    336         updateIndexHeader();
    337 
    338         setActiveVariables();
    339         clearHash(mActiveHashStart);
    340         syncIndex();
    341     }
    342 
    343     // Sync mIndexHeader to the index file.
    344     private void updateIndexHeader() {
    345         writeInt(mIndexHeader, IH_CHECKSUM,
    346                 checkSum(mIndexHeader, 0, IH_CHECKSUM));
    347         mIndexBuffer.position(0);
    348         mIndexBuffer.put(mIndexHeader);
    349     }
    350 
    351     // Clear the hash table starting from the specified offset.
    352     private void clearHash(int hashStart) {
    353         byte[] zero = new byte[1024];
    354         mIndexBuffer.position(hashStart);
    355         for (int count = mMaxEntries * 12; count > 0;) {
    356             int todo = Math.min(count, 1024);
    357             mIndexBuffer.put(zero, 0, todo);
    358             count -= todo;
    359         }
    360     }
    361 
    362     // Inserts a (key, data) pair into the cache.
    363     public void insert(long key, byte[] data) throws IOException {
    364         if (DATA_HEADER_SIZE + BLOB_HEADER_SIZE + data.length > mMaxBytes) {
    365             throw new RuntimeException("blob is too large!");
    366         }
    367 
    368         if (mActiveBytes + BLOB_HEADER_SIZE + data.length > mMaxBytes
    369                 || mActiveEntries * 2 >= mMaxEntries) {
    370             flipRegion();
    371         }
    372 
    373         if (!lookupInternal(key, mActiveHashStart)) {
    374             // If we don't have an existing entry with the same key, increase
    375             // the entry count.
    376             mActiveEntries++;
    377             writeInt(mIndexHeader, IH_ACTIVE_ENTRIES, mActiveEntries);
    378         }
    379 
    380         insertInternal(key, data, data.length);
    381         updateIndexHeader();
    382     }
    383 
    384     // Appends the data to the active file. It also updates the hash entry.
    385     // The proper hash entry (suitable for insertion or replacement) must be
    386     // pointed by mSlotOffset.
    387     private void insertInternal(long key, byte[] data, int length)
    388             throws IOException {
    389         byte[] header = mBlobHeader;
    390         int sum = checkSum(data);
    391         writeLong(header, BH_KEY, key);
    392         writeInt(header, BH_CHECKSUM, sum);
    393         writeInt(header, BH_OFFSET, mActiveBytes);
    394         writeInt(header, BH_LENGTH, length);
    395         mActiveDataFile.write(header);
    396         mActiveDataFile.write(data, 0, length);
    397 
    398         mIndexBuffer.putLong(mSlotOffset, key);
    399         mIndexBuffer.putInt(mSlotOffset + 8, mActiveBytes);
    400         mActiveBytes += BLOB_HEADER_SIZE + length;
    401         writeInt(mIndexHeader, IH_ACTIVE_BYTES, mActiveBytes);
    402     }
    403 
    404     public static class LookupRequest {
    405         public long key;        // input: the key to find
    406         public byte[] buffer;   // input/output: the buffer to store the blob
    407         public int length;      // output: the length of the blob
    408     }
    409 
    410     // This method is for one-off lookup. For repeated lookup, use the version
    411     // accepting LookupRequest to avoid repeated memory allocation.
    412     private LookupRequest mLookupRequest = new LookupRequest();
    413     public byte[] lookup(long key) throws IOException {
    414         mLookupRequest.key = key;
    415         mLookupRequest.buffer = null;
    416         if (lookup(mLookupRequest)) {
    417             return mLookupRequest.buffer;
    418         } else {
    419             return null;
    420         }
    421     }
    422 
    423     // Returns true if the associated blob for the given key is available.
    424     // The blob is stored in the buffer pointed by req.buffer, and the length
    425     // is in stored in the req.length variable.
    426     //
    427     // The user can input a non-null value in req.buffer, and this method will
    428     // try to use that buffer. If that buffer is not large enough, this method
    429     // will allocate a new buffer and assign it to req.buffer.
    430     //
    431     // This method tries not to throw IOException even if the data file is
    432     // corrupted, but it can still throw IOException if things get strange.
    433     public boolean lookup(LookupRequest req) throws IOException {
    434         // Look up in the active region first.
    435         if (lookupInternal(req.key, mActiveHashStart)) {
    436             if (getBlob(mActiveDataFile, mFileOffset, req)) {
    437                 return true;
    438             }
    439         }
    440 
    441         // We want to copy the data from the inactive file to the active file
    442         // if it's available. So we keep the offset of the hash entry so we can
    443         // avoid looking it up again.
    444         int insertOffset = mSlotOffset;
    445 
    446         // Look up in the inactive region.
    447         if (lookupInternal(req.key, mInactiveHashStart)) {
    448             if (getBlob(mInactiveDataFile, mFileOffset, req)) {
    449                 // If we don't have enough space to insert this blob into
    450                 // the active file, just return it.
    451                 if (mActiveBytes + BLOB_HEADER_SIZE + req.length > mMaxBytes
    452                     || mActiveEntries * 2 >= mMaxEntries) {
    453                     return true;
    454                 }
    455                 // Otherwise copy it over.
    456                 mSlotOffset = insertOffset;
    457                 try {
    458                     insertInternal(req.key, req.buffer, req.length);
    459                     mActiveEntries++;
    460                     writeInt(mIndexHeader, IH_ACTIVE_ENTRIES, mActiveEntries);
    461                     updateIndexHeader();
    462                 } catch (Throwable t) {
    463                     Log.e(TAG, "cannot copy over");
    464                 }
    465                 return true;
    466             }
    467         }
    468 
    469         return false;
    470     }
    471 
    472 
    473     // Copies the blob for the specified offset in the specified file to
    474     // req.buffer. If req.buffer is null or too small, allocate a buffer and
    475     // assign it to req.buffer.
    476     // Returns false if the blob is not available (either the index file is
    477     // not sync with the data file, or one of them is corrupted). The length
    478     // of the blob is stored in the req.length variable.
    479     private boolean getBlob(RandomAccessFile file, int offset,
    480             LookupRequest req) throws IOException {
    481         byte[] header = mBlobHeader;
    482         long oldPosition = file.getFilePointer();
    483         try {
    484             file.seek(offset);
    485             if (file.read(header) != BLOB_HEADER_SIZE) {
    486                 Log.w(TAG, "cannot read blob header");
    487                 return false;
    488             }
    489             long blobKey = readLong(header, BH_KEY);
    490             if (blobKey != req.key) {
    491                 Log.w(TAG, "blob key does not match: " + blobKey);
    492                 return false;
    493             }
    494             int sum = readInt(header, BH_CHECKSUM);
    495             int blobOffset = readInt(header, BH_OFFSET);
    496             if (blobOffset != offset) {
    497                 Log.w(TAG, "blob offset does not match: " + blobOffset);
    498                 return false;
    499             }
    500             int length = readInt(header, BH_LENGTH);
    501             if (length < 0 || length > mMaxBytes - offset - BLOB_HEADER_SIZE) {
    502                 Log.w(TAG, "invalid blob length: " + length);
    503                 return false;
    504             }
    505             if (req.buffer == null || req.buffer.length < length) {
    506                 req.buffer = new byte[length];
    507             }
    508 
    509             byte[] blob = req.buffer;
    510             req.length = length;
    511 
    512             if (file.read(blob, 0, length) != length) {
    513                 Log.w(TAG, "cannot read blob data");
    514                 return false;
    515             }
    516             if (checkSum(blob, 0, length) != sum) {
    517                 Log.w(TAG, "blob checksum does not match: " + sum);
    518                 return false;
    519             }
    520             return true;
    521         } catch (Throwable t)  {
    522             Log.e(TAG, "getBlob failed.", t);
    523             return false;
    524         } finally {
    525             file.seek(oldPosition);
    526         }
    527     }
    528 
    529     // Tries to look up a key in the specified hash region.
    530     // Returns true if the lookup is successful.
    531     // The slot offset in the index file is saved in mSlotOffset. If the lookup
    532     // is successful, it's the slot found. Otherwise it's the slot suitable for
    533     // insertion.
    534     // If the lookup is successful, the file offset is also saved in
    535     // mFileOffset.
    536     private int mSlotOffset;
    537     private int mFileOffset;
    538     private boolean lookupInternal(long key, int hashStart) {
    539         int slot = (int) (key % mMaxEntries);
    540         if (slot < 0) slot += mMaxEntries;
    541         int slotBegin = slot;
    542         while (true) {
    543             int offset = hashStart + slot * 12;
    544             long candidateKey = mIndexBuffer.getLong(offset);
    545             int candidateOffset = mIndexBuffer.getInt(offset + 8);
    546             if (candidateOffset == 0) {
    547                 mSlotOffset = offset;
    548                 return false;
    549             } else if (candidateKey == key) {
    550                 mSlotOffset = offset;
    551                 mFileOffset = candidateOffset;
    552                 return true;
    553             } else {
    554                 if (++slot >= mMaxEntries) {
    555                     slot = 0;
    556                 }
    557                 if (slot == slotBegin) {
    558                     Log.w(TAG, "corrupted index: clear the slot.");
    559                     mIndexBuffer.putInt(hashStart + slot * 12 + 8, 0);
    560                 }
    561             }
    562         }
    563     }
    564 
    565     public void syncIndex() {
    566         try {
    567             mIndexBuffer.force();
    568         } catch (Throwable t) {
    569             Log.w(TAG, "sync index failed", t);
    570         }
    571     }
    572 
    573     public void syncAll() {
    574         syncIndex();
    575         try {
    576             mDataFile0.getFD().sync();
    577         } catch (Throwable t) {
    578             Log.w(TAG, "sync data file 0 failed", t);
    579         }
    580         try {
    581             mDataFile1.getFD().sync();
    582         } catch (Throwable t) {
    583             Log.w(TAG, "sync data file 1 failed", t);
    584         }
    585     }
    586 
    587     // This is for testing only.
    588     //
    589     // Returns the active count (mActiveEntries). This also verifies that
    590     // the active count matches matches what's inside the hash region.
    591     int getActiveCount() {
    592         int count = 0;
    593         for (int i = 0; i < mMaxEntries; i++) {
    594             int offset = mActiveHashStart + i * 12;
    595             long candidateKey = mIndexBuffer.getLong(offset);
    596             int candidateOffset = mIndexBuffer.getInt(offset + 8);
    597             if (candidateOffset != 0) ++count;
    598         }
    599         if (count == mActiveEntries) {
    600             return count;
    601         } else {
    602             Log.e(TAG, "wrong active count: " + mActiveEntries + " vs " + count);
    603             return -1;  // signal failure.
    604         }
    605     }
    606 
    607     int checkSum(byte[] data) {
    608         mAdler32.reset();
    609         mAdler32.update(data);
    610         return (int) mAdler32.getValue();
    611     }
    612 
    613     int checkSum(byte[] data, int offset, int nbytes) {
    614         mAdler32.reset();
    615         mAdler32.update(data, offset, nbytes);
    616         return (int) mAdler32.getValue();
    617     }
    618 
    619     static void closeSilently(Closeable c) {
    620         if (c == null) return;
    621         try {
    622             c.close();
    623         } catch (Throwable t) {
    624             // do nothing
    625         }
    626     }
    627 
    628     static int readInt(byte[] buf, int offset) {
    629         return (buf[offset] & 0xff)
    630                 | ((buf[offset + 1] & 0xff) << 8)
    631                 | ((buf[offset + 2] & 0xff) << 16)
    632                 | ((buf[offset + 3] & 0xff) << 24);
    633     }
    634 
    635     static long readLong(byte[] buf, int offset) {
    636         long result = buf[offset + 7] & 0xff;
    637         for (int i = 6; i >= 0; i--) {
    638             result = (result << 8) | (buf[offset + i] & 0xff);
    639         }
    640         return result;
    641     }
    642 
    643     static void writeInt(byte[] buf, int offset, int value) {
    644         for (int i = 0; i < 4; i++) {
    645             buf[offset + i] = (byte) (value & 0xff);
    646             value >>= 8;
    647         }
    648     }
    649 
    650     static void writeLong(byte[] buf, int offset, long value) {
    651         for (int i = 0; i < 8; i++) {
    652             buf[offset + i] = (byte) (value & 0xff);
    653             value >>= 8;
    654         }
    655     }
    656 }
    657