Home | History | Annotate | Download | only in media
      1 /*
      2  * Copyright (C) 2009 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 package com.cooliris.media;
     18 
     19 import java.io.BufferedInputStream;
     20 import java.io.BufferedOutputStream;
     21 import java.io.DataInputStream;
     22 import java.io.DataOutputStream;
     23 import java.io.File;
     24 import java.io.FileInputStream;
     25 import java.io.FileNotFoundException;
     26 import java.io.FileOutputStream;
     27 import java.io.IOException;
     28 import java.io.RandomAccessFile;
     29 
     30 import com.cooliris.cache.CacheService;
     31 
     32 import android.util.Log;
     33 
     34 public final class DiskCache {
     35     private static final String TAG = "DiskCache";
     36     private static final int CHUNK_SIZE = 1048576; // 1MB.
     37     private static final int INDEX_HEADER_MAGIC = 0xcafe;
     38     private static final int INDEX_HEADER_VERSION = 2;
     39     private static final String INDEX_FILE_NAME = "index";
     40     private static final String CHUNK_FILE_PREFIX = "chunk_";
     41     private final String mCacheDirectoryPath;
     42     private LongSparseArray<Record> mIndexMap;
     43     private final LongSparseArray<RandomAccessFile> mChunkFiles = new LongSparseArray<RandomAccessFile>();
     44     private int mTailChunk = 0;
     45     private int mNumInsertions = 0;
     46 
     47     public DiskCache(String cacheDirectoryName) {
     48         String cacheDirectoryPath = CacheService.getCachePath(cacheDirectoryName);
     49 
     50         // Create the cache directory if needed.
     51         File cacheDirectory = new File(cacheDirectoryPath);
     52         if (!cacheDirectory.isDirectory() && !cacheDirectory.mkdirs()) {
     53             Log.e(TAG, "Unable to create cache directory " + cacheDirectoryPath);
     54         }
     55         mCacheDirectoryPath = cacheDirectoryPath;
     56         loadIndex();
     57     }
     58 
     59     @Override
     60     public void finalize() {
     61         shutdown();
     62     }
     63 
     64     public byte[] get(long key, long timestamp) {
     65         // Look up the record for the given key.
     66         Record record = null;
     67         synchronized (mIndexMap) {
     68             record = mIndexMap.get(key);
     69         }
     70         if (record != null) {
     71             // Read the chunk from the file.
     72             if (record.timestamp < timestamp) {
     73                 Log.i(TAG, "File has been updated to " + timestamp + " since the last time " + record.timestamp
     74                         + " stored in cache.");
     75                 return null;
     76             }
     77             try {
     78                 RandomAccessFile chunkFile = getChunkFile(record.chunk);
     79                 if (chunkFile != null) {
     80                     byte[] data = new byte[record.size];
     81                     chunkFile.seek(record.offset);
     82                     chunkFile.readFully(data);
     83                     return data;
     84                 }
     85             } catch (Exception e) {
     86                 Log.e(TAG, "Unable to read from chunk file");
     87             }
     88         }
     89         return null;
     90     }
     91 
     92     public boolean isDataAvailable(long key, long timestamp) {
     93         Record record = null;
     94         synchronized (mIndexMap) {
     95             record = mIndexMap.get(key);
     96         }
     97         if (record == null) {
     98             return false;
     99         }
    100         if (record.timestamp < timestamp) {
    101             return false;
    102         }
    103         if (record.size == 0)
    104             return false;
    105         return true;
    106     }
    107 
    108     public void put(long key, byte[] data, long timestamp) {
    109         // Check to see if the record already exists.
    110         Record record = null;
    111         synchronized (mIndexMap) {
    112             record = mIndexMap.get(key);
    113         }
    114         if (record != null && data.length <= record.sizeOnDisk) {
    115             // We just replace the chunk.
    116             int currentChunk = record.chunk;
    117             try {
    118                 RandomAccessFile chunkFile = getChunkFile(record.chunk);
    119                 if (chunkFile != null) {
    120                     chunkFile.seek(record.offset);
    121                     chunkFile.write(data);
    122                     synchronized (mIndexMap) {
    123                         mIndexMap.put(key, new Record(currentChunk, record.offset, data.length, record.sizeOnDisk, timestamp));
    124                     }
    125                     if (++mNumInsertions == 32) { // CR: 32 => constant
    126                         // Flush the index file at a regular interval. To avoid
    127                         // writing the entire
    128                         // index each time the format could be changed to an
    129                         // append-only journal with
    130                         // a snapshot generated on exit.
    131                         flush();
    132                     }
    133                     return;
    134                 }
    135             } catch (Exception e) {
    136                 Log.e(TAG, "Unable to read from chunk file");
    137             }
    138         }
    139         // Append a new chunk to the current chunk.
    140         final int chunk = mTailChunk;
    141         final RandomAccessFile chunkFile = getChunkFile(chunk);
    142         if (chunkFile != null) {
    143             try {
    144                 final int offset = (int) chunkFile.length();
    145                 chunkFile.seek(offset);
    146                 chunkFile.write(data);
    147                 synchronized (mIndexMap) {
    148                     mIndexMap.put(key, new Record(chunk, offset, data.length, data.length, timestamp));
    149                 }
    150                 if (offset + data.length > CHUNK_SIZE) {
    151                     ++mTailChunk;
    152                 }
    153 
    154                 if (++mNumInsertions == 32) { // CR: 32 => constant
    155                     // Flush the index file at a regular interval. To avoid
    156                     // writing the entire
    157                     // index each time the format could be changed to an
    158                     // append-only journal with
    159                     // a snapshot generated on exit.
    160                     flush();
    161                 }
    162             } catch (IOException e) {
    163                 Log.e(TAG, "Unable to write new entry to chunk file");
    164             }
    165         } else {
    166             Log.e(TAG, "getChunkFile() returned null");
    167         }
    168     }
    169 
    170     public void delete(long key) {
    171         synchronized (mIndexMap) {
    172             mIndexMap.remove(key);
    173         }
    174     }
    175 
    176     public void deleteAll() {
    177         // Close all open files and clear data structures.
    178         shutdown();
    179 
    180         // Delete all cache files.
    181         File cacheDirectory = new File(mCacheDirectoryPath);
    182         String[] cacheFiles = cacheDirectory.list();
    183         if (cacheFiles == null)
    184             return;
    185         for (String cacheFile : cacheFiles) {
    186             new File(cacheDirectory, cacheFile).delete();
    187         }
    188     }
    189 
    190     public void flush() {
    191         if (mNumInsertions != 0) {
    192             mNumInsertions = 0;
    193             writeIndex();
    194         }
    195     }
    196 
    197     public void close() {
    198         writeIndex();
    199         shutdown();
    200     }
    201 
    202     private void shutdown() {
    203         synchronized (mChunkFiles) {
    204             for (int i = 0, size = mChunkFiles.size(); i < size; ++i) {
    205                 try {
    206                     mChunkFiles.valueAt(i).close();
    207                 } catch (Exception e) {
    208                     Log.e(TAG, "Unable to close chunk file");
    209                 }
    210             }
    211             mChunkFiles.clear();
    212         }
    213         if (mIndexMap != null) {
    214             synchronized (mIndexMap) {
    215                 if (mIndexMap != null) {
    216                     mIndexMap.clear();
    217                 }
    218             }
    219         }
    220     }
    221 
    222     private String getIndexFilePath() {
    223         return mCacheDirectoryPath + INDEX_FILE_NAME;
    224     }
    225 
    226     private void loadIndex() {
    227         final String indexFilePath = getIndexFilePath();
    228         try {
    229             // Open the input stream.
    230             final FileInputStream fileInput = new FileInputStream(indexFilePath);
    231             final BufferedInputStream bufferedInput = new BufferedInputStream(fileInput, 1024);
    232             final DataInputStream dataInput = new DataInputStream(bufferedInput);
    233 
    234             // Read the header.
    235             final int magic = dataInput.readInt();
    236             final int version = dataInput.readInt();
    237             boolean valid = true;
    238             if (magic != INDEX_HEADER_MAGIC) {
    239                 Log.e(TAG, "Index file appears to be corrupt (" + magic + " != " + INDEX_HEADER_MAGIC + "), " + indexFilePath);
    240                 valid = false;
    241             }
    242             if (valid && version != INDEX_HEADER_VERSION) {
    243                 // Future versions can implement upgrade in this case.
    244                 Log.e(TAG, "Index file version " + version + " not supported");
    245                 valid = false;
    246             }
    247             if (valid) {
    248                 mTailChunk = dataInput.readShort();
    249             }
    250 
    251             // Read the entries.
    252             if (valid) {
    253                 // Parse the index file body into the in-memory map.
    254                 final int numEntries = dataInput.readInt();
    255                 mIndexMap = new LongSparseArray<Record>(numEntries);
    256                 synchronized (mIndexMap) {
    257                     for (int i = 0; i < numEntries; ++i) {
    258                         final long key = dataInput.readLong();
    259                         final int chunk = dataInput.readShort();
    260                         final int offset = dataInput.readInt();
    261                         final int size = dataInput.readInt();
    262                         final int sizeOnDisk = dataInput.readInt();
    263                         final long timestamp = dataInput.readLong();
    264                         mIndexMap.append(key, new Record(chunk, offset, size, sizeOnDisk, timestamp));
    265                     }
    266                 }
    267             }
    268 
    269             dataInput.close();
    270             if (!valid) {
    271                 deleteAll();
    272             }
    273 
    274         } catch (FileNotFoundException e) {
    275             // If the file does not exist the cache is empty, so just continue.
    276         } catch (IOException e) {
    277             Log.e(TAG, "Unable to read the index file " + indexFilePath);
    278         } finally {
    279             if (mIndexMap == null) {
    280                 mIndexMap = new LongSparseArray<Record>();
    281             }
    282         }
    283     }
    284 
    285     private void writeIndex() {
    286         File tempFile = null;
    287         final String tempFilePath = mCacheDirectoryPath;
    288         final String indexFilePath = getIndexFilePath();
    289         try {
    290             tempFile = File.createTempFile("DiskCache", null, new File(tempFilePath));
    291         } catch (Exception e) {
    292             Log.e(TAG, "Unable to create or tempFile " + tempFilePath);
    293             return;
    294         }
    295         try {
    296             final FileOutputStream fileOutput = new FileOutputStream(tempFile);
    297             final BufferedOutputStream bufferedOutput = new BufferedOutputStream(fileOutput, 1024);
    298             final DataOutputStream dataOutput = new DataOutputStream(bufferedOutput);
    299 
    300             // Write the index header.
    301             final int numRecords = mIndexMap.size();
    302             dataOutput.writeInt(INDEX_HEADER_MAGIC);
    303             dataOutput.writeInt(INDEX_HEADER_VERSION);
    304             dataOutput.writeShort(mTailChunk);
    305             dataOutput.writeInt(numRecords);
    306 
    307             // Write the records.
    308             for (int i = 0; i < numRecords; ++i) {
    309                 final long key = mIndexMap.keyAt(i);
    310                 final Record record = mIndexMap.valueAt(i);
    311                 dataOutput.writeLong(key);
    312                 dataOutput.writeShort(record.chunk);
    313                 dataOutput.writeInt(record.offset);
    314                 dataOutput.writeInt(record.size);
    315                 dataOutput.writeInt(record.sizeOnDisk);
    316                 dataOutput.writeLong(record.timestamp);
    317             }
    318 
    319             // Close the file.
    320             dataOutput.close();
    321 
    322             // Log.d(TAG, "Wrote index with " + numRecords + " records.");
    323 
    324             // Atomically overwrite the old index file.
    325             tempFile.renameTo(new File(indexFilePath));
    326         } catch (Exception e) {
    327             // Was unable to perform the operation, we delete the temp file
    328             Log.e(TAG, "Unable to write the index file " + indexFilePath);
    329             tempFile.delete();
    330         }
    331     }
    332 
    333     private RandomAccessFile getChunkFile(int chunk) {
    334         RandomAccessFile chunkFile = null;
    335         synchronized (mChunkFiles) {
    336             chunkFile = mChunkFiles.get(chunk);
    337         }
    338         if (chunkFile == null) {
    339             final String chunkFilePath = mCacheDirectoryPath + CHUNK_FILE_PREFIX + chunk;
    340             try {
    341                 chunkFile = new RandomAccessFile(chunkFilePath, "rw");
    342             } catch (FileNotFoundException e) {
    343                 Log.e(TAG, "Unable to create or open the chunk file " + chunkFilePath);
    344             }
    345             synchronized (mChunkFiles) {
    346                 mChunkFiles.put(chunk, chunkFile);
    347             }
    348         }
    349         return chunkFile;
    350     }
    351 
    352     private static final class Record {
    353         public Record(int chunk, int offset, int size, int sizeOnDisk, long timestamp) {
    354             this.chunk = chunk;
    355             this.offset = offset;
    356             this.size = size;
    357             this.timestamp = timestamp;
    358             this.sizeOnDisk = sizeOnDisk;
    359         }
    360 
    361         public final long timestamp;
    362         public final int chunk;
    363         public final int offset;
    364         public final int size;
    365         public final int sizeOnDisk;
    366     }
    367 }
    368