Home | History | Annotate | Download | only in EGL
      1 /*
      2  ** Copyright 2011, The Android Open Source Project
      3  **
      4  ** Licensed under the Apache License, Version 2.0 (the "License");
      5  ** you may not use this file except in compliance with the License.
      6  ** You may obtain a copy of the License at
      7  **
      8  **     http://www.apache.org/licenses/LICENSE-2.0
      9  **
     10  ** Unless required by applicable law or agreed to in writing, software
     11  ** distributed under the License is distributed on an "AS IS" BASIS,
     12  ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  ** See the License for the specific language governing permissions and
     14  ** limitations under the License.
     15  */
     16 
     17 //#define LOG_NDEBUG 0
     18 
     19 #include "BlobCache.h"
     20 
     21 #include <inttypes.h>
     22 
     23 #include <cutils/properties.h>
     24 #include <log/log.h>
     25 #include <chrono>
     26 
     27 namespace android {
     28 
     29 // BlobCache::Header::mMagicNumber value
     30 static const uint32_t blobCacheMagic = ('_' << 24) + ('B' << 16) + ('b' << 8) + '$';
     31 
     32 // BlobCache::Header::mBlobCacheVersion value
     33 static const uint32_t blobCacheVersion = 3;
     34 
     35 // BlobCache::Header::mDeviceVersion value
     36 static const uint32_t blobCacheDeviceVersion = 1;
     37 
     38 BlobCache::BlobCache(size_t maxKeySize, size_t maxValueSize, size_t maxTotalSize):
     39         mMaxKeySize(maxKeySize),
     40         mMaxValueSize(maxValueSize),
     41         mMaxTotalSize(maxTotalSize),
     42         mTotalSize(0) {
     43     int64_t now = std::chrono::steady_clock::now().time_since_epoch().count();
     44 #ifdef _WIN32
     45     srand(now);
     46 #else
     47     mRandState[0] = (now >> 0) & 0xFFFF;
     48     mRandState[1] = (now >> 16) & 0xFFFF;
     49     mRandState[2] = (now >> 32) & 0xFFFF;
     50 #endif
     51     ALOGV("initializing random seed using %lld", (unsigned long long)now);
     52 }
     53 
     54 void BlobCache::set(const void* key, size_t keySize, const void* value,
     55         size_t valueSize) {
     56     if (mMaxKeySize < keySize) {
     57         ALOGV("set: not caching because the key is too large: %zu (limit: %zu)",
     58                 keySize, mMaxKeySize);
     59         return;
     60     }
     61     if (mMaxValueSize < valueSize) {
     62         ALOGV("set: not caching because the value is too large: %zu (limit: %zu)",
     63                 valueSize, mMaxValueSize);
     64         return;
     65     }
     66     if (mMaxTotalSize < keySize + valueSize) {
     67         ALOGV("set: not caching because the combined key/value size is too "
     68                 "large: %zu (limit: %zu)", keySize + valueSize, mMaxTotalSize);
     69         return;
     70     }
     71     if (keySize == 0) {
     72         ALOGW("set: not caching because keySize is 0");
     73         return;
     74     }
     75     if (valueSize <= 0) {
     76         ALOGW("set: not caching because valueSize is 0");
     77         return;
     78     }
     79 
     80     std::shared_ptr<Blob> dummyKey(new Blob(key, keySize, false));
     81     CacheEntry dummyEntry(dummyKey, NULL);
     82 
     83     while (true) {
     84         auto index = std::lower_bound(mCacheEntries.begin(), mCacheEntries.end(), dummyEntry);
     85         if (index == mCacheEntries.end() || dummyEntry < *index) {
     86             // Create a new cache entry.
     87             std::shared_ptr<Blob> keyBlob(new Blob(key, keySize, true));
     88             std::shared_ptr<Blob> valueBlob(new Blob(value, valueSize, true));
     89             size_t newTotalSize = mTotalSize + keySize + valueSize;
     90             if (mMaxTotalSize < newTotalSize) {
     91                 if (isCleanable()) {
     92                     // Clean the cache and try again.
     93                     clean();
     94                     continue;
     95                 } else {
     96                     ALOGV("set: not caching new key/value pair because the "
     97                             "total cache size limit would be exceeded: %zu "
     98                             "(limit: %zu)",
     99                             keySize + valueSize, mMaxTotalSize);
    100                     break;
    101                 }
    102             }
    103             mCacheEntries.insert(index, CacheEntry(keyBlob, valueBlob));
    104             mTotalSize = newTotalSize;
    105             ALOGV("set: created new cache entry with %zu byte key and %zu byte value",
    106                     keySize, valueSize);
    107         } else {
    108             // Update the existing cache entry.
    109             std::shared_ptr<Blob> valueBlob(new Blob(value, valueSize, true));
    110             std::shared_ptr<Blob> oldValueBlob(index->getValue());
    111             size_t newTotalSize = mTotalSize + valueSize - oldValueBlob->getSize();
    112             if (mMaxTotalSize < newTotalSize) {
    113                 if (isCleanable()) {
    114                     // Clean the cache and try again.
    115                     clean();
    116                     continue;
    117                 } else {
    118                     ALOGV("set: not caching new value because the total cache "
    119                             "size limit would be exceeded: %zu (limit: %zu)",
    120                             keySize + valueSize, mMaxTotalSize);
    121                     break;
    122                 }
    123             }
    124             index->setValue(valueBlob);
    125             mTotalSize = newTotalSize;
    126             ALOGV("set: updated existing cache entry with %zu byte key and %zu byte "
    127                     "value", keySize, valueSize);
    128         }
    129         break;
    130     }
    131 }
    132 
    133 size_t BlobCache::get(const void* key, size_t keySize, void* value,
    134         size_t valueSize) {
    135     if (mMaxKeySize < keySize) {
    136         ALOGV("get: not searching because the key is too large: %zu (limit %zu)",
    137                 keySize, mMaxKeySize);
    138         return 0;
    139     }
    140     std::shared_ptr<Blob> dummyKey(new Blob(key, keySize, false));
    141     CacheEntry dummyEntry(dummyKey, NULL);
    142     auto index = std::lower_bound(mCacheEntries.begin(), mCacheEntries.end(), dummyEntry);
    143     if (index == mCacheEntries.end() || dummyEntry < *index) {
    144         ALOGV("get: no cache entry found for key of size %zu", keySize);
    145         return 0;
    146     }
    147 
    148     // The key was found. Return the value if the caller's buffer is large
    149     // enough.
    150     std::shared_ptr<Blob> valueBlob(index->getValue());
    151     size_t valueBlobSize = valueBlob->getSize();
    152     if (valueBlobSize <= valueSize) {
    153         ALOGV("get: copying %zu bytes to caller's buffer", valueBlobSize);
    154         memcpy(value, valueBlob->getData(), valueBlobSize);
    155     } else {
    156         ALOGV("get: caller's buffer is too small for value: %zu (needs %zu)",
    157                 valueSize, valueBlobSize);
    158     }
    159     return valueBlobSize;
    160 }
    161 
    162 static inline size_t align4(size_t size) {
    163     return (size + 3) & ~3;
    164 }
    165 
    166 size_t BlobCache::getFlattenedSize() const {
    167     size_t size = align4(sizeof(Header) + PROPERTY_VALUE_MAX);
    168     for (const CacheEntry& e :  mCacheEntries) {
    169         std::shared_ptr<Blob> const& keyBlob = e.getKey();
    170         std::shared_ptr<Blob> const& valueBlob = e.getValue();
    171         size += align4(sizeof(EntryHeader) + keyBlob->getSize() + valueBlob->getSize());
    172     }
    173     return size;
    174 }
    175 
    176 int BlobCache::flatten(void* buffer, size_t size) const {
    177     // Write the cache header
    178     if (size < sizeof(Header)) {
    179         ALOGE("flatten: not enough room for cache header");
    180         return 0;
    181     }
    182     Header* header = reinterpret_cast<Header*>(buffer);
    183     header->mMagicNumber = blobCacheMagic;
    184     header->mBlobCacheVersion = blobCacheVersion;
    185     header->mDeviceVersion = blobCacheDeviceVersion;
    186     header->mNumEntries = mCacheEntries.size();
    187     char buildId[PROPERTY_VALUE_MAX];
    188     header->mBuildIdLength = property_get("ro.build.id", buildId, "");
    189     memcpy(header->mBuildId, buildId, header->mBuildIdLength);
    190 
    191     // Write cache entries
    192     uint8_t* byteBuffer = reinterpret_cast<uint8_t*>(buffer);
    193     off_t byteOffset = align4(sizeof(Header) + header->mBuildIdLength);
    194     for (const CacheEntry& e :  mCacheEntries) {
    195         std::shared_ptr<Blob> const& keyBlob = e.getKey();
    196         std::shared_ptr<Blob> const& valueBlob = e.getValue();
    197         size_t keySize = keyBlob->getSize();
    198         size_t valueSize = valueBlob->getSize();
    199 
    200         size_t entrySize = sizeof(EntryHeader) + keySize + valueSize;
    201         size_t totalSize = align4(entrySize);
    202         if (byteOffset + totalSize > size) {
    203             ALOGE("flatten: not enough room for cache entries");
    204             return -EINVAL;
    205         }
    206 
    207         EntryHeader* eheader = reinterpret_cast<EntryHeader*>(&byteBuffer[byteOffset]);
    208         eheader->mKeySize = keySize;
    209         eheader->mValueSize = valueSize;
    210 
    211         memcpy(eheader->mData, keyBlob->getData(), keySize);
    212         memcpy(eheader->mData + keySize, valueBlob->getData(), valueSize);
    213 
    214         if (totalSize > entrySize) {
    215             // We have padding bytes. Those will get written to storage, and contribute to the CRC,
    216             // so make sure we zero-them to have reproducible results.
    217             memset(eheader->mData + keySize + valueSize, 0, totalSize - entrySize);
    218         }
    219 
    220         byteOffset += totalSize;
    221     }
    222 
    223     return 0;
    224 }
    225 
    226 int BlobCache::unflatten(void const* buffer, size_t size) {
    227     // All errors should result in the BlobCache being in an empty state.
    228     mCacheEntries.clear();
    229 
    230     // Read the cache header
    231     if (size < sizeof(Header)) {
    232         ALOGE("unflatten: not enough room for cache header");
    233         return -EINVAL;
    234     }
    235     const Header* header = reinterpret_cast<const Header*>(buffer);
    236     if (header->mMagicNumber != blobCacheMagic) {
    237         ALOGE("unflatten: bad magic number: %" PRIu32, header->mMagicNumber);
    238         return -EINVAL;
    239     }
    240     char buildId[PROPERTY_VALUE_MAX];
    241     int len = property_get("ro.build.id", buildId, "");
    242     if (header->mBlobCacheVersion != blobCacheVersion ||
    243             header->mDeviceVersion != blobCacheDeviceVersion ||
    244             len != header->mBuildIdLength ||
    245             strncmp(buildId, header->mBuildId, len)) {
    246         // We treat version mismatches as an empty cache.
    247         return 0;
    248     }
    249 
    250     // Read cache entries
    251     const uint8_t* byteBuffer = reinterpret_cast<const uint8_t*>(buffer);
    252     off_t byteOffset = align4(sizeof(Header) + header->mBuildIdLength);
    253     size_t numEntries = header->mNumEntries;
    254     for (size_t i = 0; i < numEntries; i++) {
    255         if (byteOffset + sizeof(EntryHeader) > size) {
    256             mCacheEntries.clear();
    257             ALOGE("unflatten: not enough room for cache entry headers");
    258             return -EINVAL;
    259         }
    260 
    261         const EntryHeader* eheader = reinterpret_cast<const EntryHeader*>(
    262                 &byteBuffer[byteOffset]);
    263         size_t keySize = eheader->mKeySize;
    264         size_t valueSize = eheader->mValueSize;
    265         size_t entrySize = sizeof(EntryHeader) + keySize + valueSize;
    266 
    267         size_t totalSize = align4(entrySize);
    268         if (byteOffset + totalSize > size) {
    269             mCacheEntries.clear();
    270             ALOGE("unflatten: not enough room for cache entry headers");
    271             return -EINVAL;
    272         }
    273 
    274         const uint8_t* data = eheader->mData;
    275         set(data, keySize, data + keySize, valueSize);
    276 
    277         byteOffset += totalSize;
    278     }
    279 
    280     return 0;
    281 }
    282 
    283 long int BlobCache::blob_random() {
    284 #ifdef _WIN32
    285     return rand();
    286 #else
    287     return nrand48(mRandState);
    288 #endif
    289 }
    290 
    291 void BlobCache::clean() {
    292     // Remove a random cache entry until the total cache size gets below half
    293     // the maximum total cache size.
    294     while (mTotalSize > mMaxTotalSize / 2) {
    295         size_t i = size_t(blob_random() % (mCacheEntries.size()));
    296         const CacheEntry& entry(mCacheEntries[i]);
    297         mTotalSize -= entry.getKey()->getSize() + entry.getValue()->getSize();
    298         mCacheEntries.erase(mCacheEntries.begin() + i);
    299     }
    300 }
    301 
    302 bool BlobCache::isCleanable() const {
    303     return mTotalSize > mMaxTotalSize / 2;
    304 }
    305 
    306 BlobCache::Blob::Blob(const void* data, size_t size, bool copyData) :
    307         mData(copyData ? malloc(size) : data),
    308         mSize(size),
    309         mOwnsData(copyData) {
    310     if (data != NULL && copyData) {
    311         memcpy(const_cast<void*>(mData), data, size);
    312     }
    313 }
    314 
    315 BlobCache::Blob::~Blob() {
    316     if (mOwnsData) {
    317         free(const_cast<void*>(mData));
    318     }
    319 }
    320 
    321 bool BlobCache::Blob::operator<(const Blob& rhs) const {
    322     if (mSize == rhs.mSize) {
    323         return memcmp(mData, rhs.mData, mSize) < 0;
    324     } else {
    325         return mSize < rhs.mSize;
    326     }
    327 }
    328 
    329 const void* BlobCache::Blob::getData() const {
    330     return mData;
    331 }
    332 
    333 size_t BlobCache::Blob::getSize() const {
    334     return mSize;
    335 }
    336 
    337 BlobCache::CacheEntry::CacheEntry() {
    338 }
    339 
    340 BlobCache::CacheEntry::CacheEntry(
    341         const std::shared_ptr<Blob>& key, const std::shared_ptr<Blob>& value):
    342         mKey(key),
    343         mValue(value) {
    344 }
    345 
    346 BlobCache::CacheEntry::CacheEntry(const CacheEntry& ce):
    347         mKey(ce.mKey),
    348         mValue(ce.mValue) {
    349 }
    350 
    351 bool BlobCache::CacheEntry::operator<(const CacheEntry& rhs) const {
    352     return *mKey < *rhs.mKey;
    353 }
    354 
    355 const BlobCache::CacheEntry& BlobCache::CacheEntry::operator=(const CacheEntry& rhs) {
    356     mKey = rhs.mKey;
    357     mValue = rhs.mValue;
    358     return *this;
    359 }
    360 
    361 std::shared_ptr<BlobCache::Blob> BlobCache::CacheEntry::getKey() const {
    362     return mKey;
    363 }
    364 
    365 std::shared_ptr<BlobCache::Blob> BlobCache::CacheEntry::getValue() const {
    366     return mValue;
    367 }
    368 
    369 void BlobCache::CacheEntry::setValue(const std::shared_ptr<Blob>& value) {
    370     mValue = value;
    371 }
    372 
    373 } // namespace android
    374