1 /* 2 ** Copyright 2011, The Android Open Source Project 3 ** 4 ** Licensed under the Apache License, Version 2.0 (the "License"); 5 ** you may not use this file except in compliance with the License. 6 ** You may obtain a copy of the License at 7 ** 8 ** http://www.apache.org/licenses/LICENSE-2.0 9 ** 10 ** Unless required by applicable law or agreed to in writing, software 11 ** distributed under the License is distributed on an "AS IS" BASIS, 12 ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 ** See the License for the specific language governing permissions and 14 ** limitations under the License. 15 */ 16 17 //#define LOG_NDEBUG 0 18 19 #include "BlobCache.h" 20 21 #include <errno.h> 22 #include <inttypes.h> 23 24 #include <cutils/properties.h> 25 #include <log/log.h> 26 #include <chrono> 27 28 namespace android { 29 30 // BlobCache::Header::mMagicNumber value 31 static const uint32_t blobCacheMagic = ('_' << 24) + ('B' << 16) + ('b' << 8) + '$'; 32 33 // BlobCache::Header::mBlobCacheVersion value 34 static const uint32_t blobCacheVersion = 3; 35 36 // BlobCache::Header::mDeviceVersion value 37 static const uint32_t blobCacheDeviceVersion = 1; 38 39 BlobCache::BlobCache(size_t maxKeySize, size_t maxValueSize, size_t maxTotalSize): 40 mMaxTotalSize(maxTotalSize), 41 mMaxKeySize(maxKeySize), 42 mMaxValueSize(maxValueSize), 43 mTotalSize(0) { 44 int64_t now = std::chrono::steady_clock::now().time_since_epoch().count(); 45 #ifdef _WIN32 46 srand(now); 47 #else 48 mRandState[0] = (now >> 0) & 0xFFFF; 49 mRandState[1] = (now >> 16) & 0xFFFF; 50 mRandState[2] = (now >> 32) & 0xFFFF; 51 #endif 52 ALOGV("initializing random seed using %lld", (unsigned long long)now); 53 } 54 55 void BlobCache::set(const void* key, size_t keySize, const void* value, 56 size_t valueSize) { 57 if (mMaxKeySize < keySize) { 58 ALOGV("set: not caching because the key is too large: %zu (limit: %zu)", 59 keySize, mMaxKeySize); 60 return; 61 } 62 if (mMaxValueSize < valueSize) { 63 ALOGV("set: not caching because the value is too large: %zu (limit: %zu)", 64 valueSize, mMaxValueSize); 65 return; 66 } 67 if (mMaxTotalSize < keySize + valueSize) { 68 ALOGV("set: not caching because the combined key/value size is too " 69 "large: %zu (limit: %zu)", keySize + valueSize, mMaxTotalSize); 70 return; 71 } 72 if (keySize == 0) { 73 ALOGW("set: not caching because keySize is 0"); 74 return; 75 } 76 if (valueSize <= 0) { 77 ALOGW("set: not caching because valueSize is 0"); 78 return; 79 } 80 81 std::shared_ptr<Blob> dummyKey(new Blob(key, keySize, false)); 82 CacheEntry dummyEntry(dummyKey, nullptr); 83 84 while (true) { 85 auto index = std::lower_bound(mCacheEntries.begin(), mCacheEntries.end(), dummyEntry); 86 if (index == mCacheEntries.end() || dummyEntry < *index) { 87 // Create a new cache entry. 88 std::shared_ptr<Blob> keyBlob(new Blob(key, keySize, true)); 89 std::shared_ptr<Blob> valueBlob(new Blob(value, valueSize, true)); 90 size_t newTotalSize = mTotalSize + keySize + valueSize; 91 if (mMaxTotalSize < newTotalSize) { 92 if (isCleanable()) { 93 // Clean the cache and try again. 94 clean(); 95 continue; 96 } else { 97 ALOGV("set: not caching new key/value pair because the " 98 "total cache size limit would be exceeded: %zu " 99 "(limit: %zu)", 100 keySize + valueSize, mMaxTotalSize); 101 break; 102 } 103 } 104 mCacheEntries.insert(index, CacheEntry(keyBlob, valueBlob)); 105 mTotalSize = newTotalSize; 106 ALOGV("set: created new cache entry with %zu byte key and %zu byte value", 107 keySize, valueSize); 108 } else { 109 // Update the existing cache entry. 110 std::shared_ptr<Blob> valueBlob(new Blob(value, valueSize, true)); 111 std::shared_ptr<Blob> oldValueBlob(index->getValue()); 112 size_t newTotalSize = mTotalSize + valueSize - oldValueBlob->getSize(); 113 if (mMaxTotalSize < newTotalSize) { 114 if (isCleanable()) { 115 // Clean the cache and try again. 116 clean(); 117 continue; 118 } else { 119 ALOGV("set: not caching new value because the total cache " 120 "size limit would be exceeded: %zu (limit: %zu)", 121 keySize + valueSize, mMaxTotalSize); 122 break; 123 } 124 } 125 index->setValue(valueBlob); 126 mTotalSize = newTotalSize; 127 ALOGV("set: updated existing cache entry with %zu byte key and %zu byte " 128 "value", keySize, valueSize); 129 } 130 break; 131 } 132 } 133 134 size_t BlobCache::get(const void* key, size_t keySize, void* value, 135 size_t valueSize) { 136 if (mMaxKeySize < keySize) { 137 ALOGV("get: not searching because the key is too large: %zu (limit %zu)", 138 keySize, mMaxKeySize); 139 return 0; 140 } 141 std::shared_ptr<Blob> dummyKey(new Blob(key, keySize, false)); 142 CacheEntry dummyEntry(dummyKey, nullptr); 143 auto index = std::lower_bound(mCacheEntries.begin(), mCacheEntries.end(), dummyEntry); 144 if (index == mCacheEntries.end() || dummyEntry < *index) { 145 ALOGV("get: no cache entry found for key of size %zu", keySize); 146 return 0; 147 } 148 149 // The key was found. Return the value if the caller's buffer is large 150 // enough. 151 std::shared_ptr<Blob> valueBlob(index->getValue()); 152 size_t valueBlobSize = valueBlob->getSize(); 153 if (valueBlobSize <= valueSize) { 154 ALOGV("get: copying %zu bytes to caller's buffer", valueBlobSize); 155 memcpy(value, valueBlob->getData(), valueBlobSize); 156 } else { 157 ALOGV("get: caller's buffer is too small for value: %zu (needs %zu)", 158 valueSize, valueBlobSize); 159 } 160 return valueBlobSize; 161 } 162 163 static inline size_t align4(size_t size) { 164 return (size + 3) & ~3; 165 } 166 167 size_t BlobCache::getFlattenedSize() const { 168 size_t size = align4(sizeof(Header) + PROPERTY_VALUE_MAX); 169 for (const CacheEntry& e : mCacheEntries) { 170 std::shared_ptr<Blob> const& keyBlob = e.getKey(); 171 std::shared_ptr<Blob> const& valueBlob = e.getValue(); 172 size += align4(sizeof(EntryHeader) + keyBlob->getSize() + valueBlob->getSize()); 173 } 174 return size; 175 } 176 177 int BlobCache::flatten(void* buffer, size_t size) const { 178 // Write the cache header 179 if (size < sizeof(Header)) { 180 ALOGE("flatten: not enough room for cache header"); 181 return 0; 182 } 183 Header* header = reinterpret_cast<Header*>(buffer); 184 header->mMagicNumber = blobCacheMagic; 185 header->mBlobCacheVersion = blobCacheVersion; 186 header->mDeviceVersion = blobCacheDeviceVersion; 187 header->mNumEntries = mCacheEntries.size(); 188 char buildId[PROPERTY_VALUE_MAX]; 189 header->mBuildIdLength = property_get("ro.build.id", buildId, ""); 190 memcpy(header->mBuildId, buildId, header->mBuildIdLength); 191 192 // Write cache entries 193 uint8_t* byteBuffer = reinterpret_cast<uint8_t*>(buffer); 194 off_t byteOffset = align4(sizeof(Header) + header->mBuildIdLength); 195 for (const CacheEntry& e : mCacheEntries) { 196 std::shared_ptr<Blob> const& keyBlob = e.getKey(); 197 std::shared_ptr<Blob> const& valueBlob = e.getValue(); 198 size_t keySize = keyBlob->getSize(); 199 size_t valueSize = valueBlob->getSize(); 200 201 size_t entrySize = sizeof(EntryHeader) + keySize + valueSize; 202 size_t totalSize = align4(entrySize); 203 if (byteOffset + totalSize > size) { 204 ALOGE("flatten: not enough room for cache entries"); 205 return -EINVAL; 206 } 207 208 EntryHeader* eheader = reinterpret_cast<EntryHeader*>(&byteBuffer[byteOffset]); 209 eheader->mKeySize = keySize; 210 eheader->mValueSize = valueSize; 211 212 memcpy(eheader->mData, keyBlob->getData(), keySize); 213 memcpy(eheader->mData + keySize, valueBlob->getData(), valueSize); 214 215 if (totalSize > entrySize) { 216 // We have padding bytes. Those will get written to storage, and contribute to the CRC, 217 // so make sure we zero-them to have reproducible results. 218 memset(eheader->mData + keySize + valueSize, 0, totalSize - entrySize); 219 } 220 221 byteOffset += totalSize; 222 } 223 224 return 0; 225 } 226 227 int BlobCache::unflatten(void const* buffer, size_t size) { 228 // All errors should result in the BlobCache being in an empty state. 229 mCacheEntries.clear(); 230 231 // Read the cache header 232 if (size < sizeof(Header)) { 233 ALOGE("unflatten: not enough room for cache header"); 234 return -EINVAL; 235 } 236 const Header* header = reinterpret_cast<const Header*>(buffer); 237 if (header->mMagicNumber != blobCacheMagic) { 238 ALOGE("unflatten: bad magic number: %" PRIu32, header->mMagicNumber); 239 return -EINVAL; 240 } 241 char buildId[PROPERTY_VALUE_MAX]; 242 int len = property_get("ro.build.id", buildId, ""); 243 if (header->mBlobCacheVersion != blobCacheVersion || 244 header->mDeviceVersion != blobCacheDeviceVersion || 245 len != header->mBuildIdLength || 246 strncmp(buildId, header->mBuildId, len)) { 247 // We treat version mismatches as an empty cache. 248 return 0; 249 } 250 251 // Read cache entries 252 const uint8_t* byteBuffer = reinterpret_cast<const uint8_t*>(buffer); 253 off_t byteOffset = align4(sizeof(Header) + header->mBuildIdLength); 254 size_t numEntries = header->mNumEntries; 255 for (size_t i = 0; i < numEntries; i++) { 256 if (byteOffset + sizeof(EntryHeader) > size) { 257 mCacheEntries.clear(); 258 ALOGE("unflatten: not enough room for cache entry headers"); 259 return -EINVAL; 260 } 261 262 const EntryHeader* eheader = reinterpret_cast<const EntryHeader*>( 263 &byteBuffer[byteOffset]); 264 size_t keySize = eheader->mKeySize; 265 size_t valueSize = eheader->mValueSize; 266 size_t entrySize = sizeof(EntryHeader) + keySize + valueSize; 267 268 size_t totalSize = align4(entrySize); 269 if (byteOffset + totalSize > size) { 270 mCacheEntries.clear(); 271 ALOGE("unflatten: not enough room for cache entry headers"); 272 return -EINVAL; 273 } 274 275 const uint8_t* data = eheader->mData; 276 set(data, keySize, data + keySize, valueSize); 277 278 byteOffset += totalSize; 279 } 280 281 return 0; 282 } 283 284 long int BlobCache::blob_random() { 285 #ifdef _WIN32 286 return rand(); 287 #else 288 return nrand48(mRandState); 289 #endif 290 } 291 292 void BlobCache::clean() { 293 // Remove a random cache entry until the total cache size gets below half 294 // the maximum total cache size. 295 while (mTotalSize > mMaxTotalSize / 2) { 296 size_t i = size_t(blob_random() % (mCacheEntries.size())); 297 const CacheEntry& entry(mCacheEntries[i]); 298 mTotalSize -= entry.getKey()->getSize() + entry.getValue()->getSize(); 299 mCacheEntries.erase(mCacheEntries.begin() + i); 300 } 301 } 302 303 bool BlobCache::isCleanable() const { 304 return mTotalSize > mMaxTotalSize / 2; 305 } 306 307 BlobCache::Blob::Blob(const void* data, size_t size, bool copyData) : 308 mData(copyData ? malloc(size) : data), 309 mSize(size), 310 mOwnsData(copyData) { 311 if (data != nullptr && copyData) { 312 memcpy(const_cast<void*>(mData), data, size); 313 } 314 } 315 316 BlobCache::Blob::~Blob() { 317 if (mOwnsData) { 318 free(const_cast<void*>(mData)); 319 } 320 } 321 322 bool BlobCache::Blob::operator<(const Blob& rhs) const { 323 if (mSize == rhs.mSize) { 324 return memcmp(mData, rhs.mData, mSize) < 0; 325 } else { 326 return mSize < rhs.mSize; 327 } 328 } 329 330 const void* BlobCache::Blob::getData() const { 331 return mData; 332 } 333 334 size_t BlobCache::Blob::getSize() const { 335 return mSize; 336 } 337 338 BlobCache::CacheEntry::CacheEntry() { 339 } 340 341 BlobCache::CacheEntry::CacheEntry( 342 const std::shared_ptr<Blob>& key, const std::shared_ptr<Blob>& value): 343 mKey(key), 344 mValue(value) { 345 } 346 347 BlobCache::CacheEntry::CacheEntry(const CacheEntry& ce): 348 mKey(ce.mKey), 349 mValue(ce.mValue) { 350 } 351 352 bool BlobCache::CacheEntry::operator<(const CacheEntry& rhs) const { 353 return *mKey < *rhs.mKey; 354 } 355 356 const BlobCache::CacheEntry& BlobCache::CacheEntry::operator=(const CacheEntry& rhs) { 357 mKey = rhs.mKey; 358 mValue = rhs.mValue; 359 return *this; 360 } 361 362 std::shared_ptr<BlobCache::Blob> BlobCache::CacheEntry::getKey() const { 363 return mKey; 364 } 365 366 std::shared_ptr<BlobCache::Blob> BlobCache::CacheEntry::getValue() const { 367 return mValue; 368 } 369 370 void BlobCache::CacheEntry::setValue(const std::shared_ptr<Blob>& value) { 371 mValue = value; 372 } 373 374 } // namespace android 375