1 /*
2  ** Copyright 2011, The Android Open Source Project
3  **
4  ** Licensed under the Apache License, Version 2.0 (the "License");
5  ** you may not use this file except in compliance with the License.
6  ** You may obtain a copy of the License at
7  **
8  **     http://www.apache.org/licenses/LICENSE-2.0
9  **
10  ** Unless required by applicable law or agreed to in writing, software
11  ** distributed under the License is distributed on an "AS IS" BASIS,
12  ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  ** See the License for the specific language governing permissions and
14  ** limitations under the License.
15  */
16 
17 //#define LOG_NDEBUG 0
18 
19 #include "BlobCache.h"
20 
21 #include <errno.h>
22 #include <inttypes.h>
23 
24 #if defined(__ANDROID__)
25 #include <cutils/properties.h>
26 #else
27 #include <string.h>
28 
29 #include <algorithm>
30 static const char property_value[] = "[HOST]";
31 #define PROPERTY_VALUE_MAX (sizeof(property_value) - 1)
property_get(const char * key,char * value,const char * default_value)32 static int property_get(const char* key, char* value, const char* default_value) {
33     if (!strcmp(key, "ro.build.id")) {
34         memcpy(value, property_value, PROPERTY_VALUE_MAX);
35         return PROPERTY_VALUE_MAX;
36     }
37     if (default_value) {
38         const size_t len = std::max(strlen(default_value) + 1, size_t(PROPERTY_VALUE_MAX));
39         memcpy(value, default_value, len);
40     }
41     return 0;
42 }
43 #endif
44 
45 #include <log/log.h>
46 
47 #include <algorithm>
48 #include <chrono>
49 #include <functional>
50 #include <memory>
51 
52 namespace android {
53 
54 // BlobCache::Header::mMagicNumber value
55 static const uint32_t blobCacheMagic = ('_' << 24) + ('B' << 16) + ('b' << 8) + '$';
56 
57 // BlobCache::Header::mBlobCacheVersion value
58 static const uint32_t blobCacheVersion = 3;
59 
60 // BlobCache::Header::mDeviceVersion value
61 static const uint32_t blobCacheDeviceVersion = 1;
62 
BlobCache(size_t maxKeySize,size_t maxValueSize,size_t maxTotalSize,Policy policy)63 BlobCache::BlobCache(size_t maxKeySize, size_t maxValueSize, size_t maxTotalSize, Policy policy)
64     : mMaxKeySize(maxKeySize),
65       mMaxValueSize(maxValueSize),
66       mMaxTotalSize(maxTotalSize),
67       mPolicySelect(policy.first),
68       mPolicyCapacity(policy.second),
69       mTotalSize(0),
70       mAccessCount(0) {
71     int64_t now = std::chrono::steady_clock::now().time_since_epoch().count();
72 #ifdef _WIN32
73     srand(now);
74 #else
75     mRandState[0] = (now >> 0) & 0xFFFF;
76     mRandState[1] = (now >> 16) & 0xFFFF;
77     mRandState[2] = (now >> 32) & 0xFFFF;
78 #endif
79     ALOGV("initializing random seed using %lld", (unsigned long long)now);
80 }
81 
set(const void * key,size_t keySize,const void * value,size_t valueSize)82 void BlobCache::set(const void* key, size_t keySize, const void* value, size_t valueSize) {
83     if (mMaxKeySize < keySize) {
84         ALOGV("set: not caching because the key is too large: %zu (limit: %zu)", keySize,
85               mMaxKeySize);
86         return;
87     }
88     if (mMaxValueSize < valueSize) {
89         ALOGV("set: not caching because the value is too large: %zu (limit: %zu)", valueSize,
90               mMaxValueSize);
91         return;
92     }
93     if (mMaxTotalSize < keySize + valueSize) {
94         ALOGV("set: not caching because the combined key/value size is too "
95               "large: %zu (limit: %zu)",
96               keySize + valueSize, mMaxTotalSize);
97         return;
98     }
99     if (keySize == 0) {
100         ALOGW("set: not caching because keySize is 0");
101         return;
102     }
103     if (valueSize <= 0) {
104         ALOGW("set: not caching because valueSize is 0");
105         return;
106     }
107 
108     std::shared_ptr<Blob> dummyKey(new Blob(key, keySize, false));
109     CacheEntry dummyEntry(dummyKey, NULL, 0);
110 
111     while (true) {
112         auto index = std::lower_bound(mCacheEntries.begin(), mCacheEntries.end(), dummyEntry);
113         if (index == mCacheEntries.end() || dummyEntry < *index) {
114             // Create a new cache entry.
115             std::shared_ptr<Blob> keyBlob(new Blob(key, keySize, true));
116             std::shared_ptr<Blob> valueBlob(new Blob(value, valueSize, true));
117             size_t newEntrySize = keySize + valueSize;
118             size_t newTotalSize = mTotalSize + newEntrySize;
119             if (mMaxTotalSize < newTotalSize) {
120                 if (isCleanable()) {
121                     // Clean the cache and try again.
122                     if (!clean(newEntrySize, NoEntry)) {
123                         // We have some kind of logic error -- perhaps
124                         // an inconsistency between isCleanable() and
125                         // findDownTo().
126                         ALOGE("set: not caching new key/value pair because "
127                               "cleaning failed");
128                         break;
129                     }
130                     continue;
131                 } else {
132                     ALOGV("set: not caching new key/value pair because the "
133                           "total cache size limit would be exceeded: %zu "
134                           "(limit: %zu)",
135                           keySize + valueSize, mMaxTotalSize);
136                     break;
137                 }
138             }
139             mCacheEntries.insert(index, CacheEntry(keyBlob, valueBlob, ++mAccessCount));
140             mTotalSize = newTotalSize;
141             ALOGV("set: created new cache entry with %zu byte key and %zu byte value", keySize,
142                   valueSize);
143         } else {
144             // Update the existing cache entry.
145             std::shared_ptr<Blob> valueBlob(new Blob(value, valueSize, true));
146             std::shared_ptr<Blob> oldValueBlob(index->getValue());
147             size_t newTotalSize = mTotalSize + valueSize - oldValueBlob->getSize();
148             if (mMaxTotalSize < newTotalSize) {
149                 if (isCleanable()) {
150                     // Clean the cache and try again.
151                     if (!clean(index->getKey()->getSize() + valueSize,
152                                index - mCacheEntries.begin())) {
153                         // We have some kind of logic error -- perhaps
154                         // an inconsistency between isCleanable() and
155                         // findDownTo().
156                         ALOGE("set: not caching new value because "
157                               "cleaning failed");
158                         break;
159                     }
160                     continue;
161                 } else {
162                     ALOGV("set: not caching new value because the total cache "
163                           "size limit would be exceeded: %zu (limit: %zu)",
164                           keySize + valueSize, mMaxTotalSize);
165                     break;
166                 }
167             }
168             index->setValue(valueBlob);
169             index->setRecency(++mAccessCount);
170             mTotalSize = newTotalSize;
171             ALOGV("set: updated existing cache entry with %zu byte key and %zu byte "
172                   "value",
173                   keySize, valueSize);
174         }
175         break;
176     }
177 }
178 
get(const void * key,size_t keySize,void * value,size_t valueSize)179 size_t BlobCache::get(const void* key, size_t keySize, void* value, size_t valueSize) {
180     void* dummy;
181     return get(key, keySize, &dummy, [value, valueSize](size_t allocSize) {
182         return (allocSize <= valueSize ? value : nullptr);
183     });
184 }
185 
get(const void * key,size_t keySize,void ** value,std::function<void * (size_t)> alloc)186 size_t BlobCache::get(const void* key, size_t keySize, void** value,
187                       std::function<void*(size_t)> alloc) {
188     if (mMaxKeySize < keySize) {
189         ALOGV("get: not searching because the key is too large: %zu (limit %zu)", keySize,
190               mMaxKeySize);
191         *value = nullptr;
192         return 0;
193     }
194     std::shared_ptr<Blob> dummyKey(new Blob(key, keySize, false));
195     CacheEntry dummyEntry(dummyKey, NULL, 0);
196     auto index = std::lower_bound(mCacheEntries.begin(), mCacheEntries.end(), dummyEntry);
197     if (index == mCacheEntries.end() || dummyEntry < *index) {
198         ALOGV("get: no cache entry found for key of size %zu", keySize);
199         *value = nullptr;
200         return 0;
201     }
202 
203     // The key was found. Return the value if we can allocate a buffer.
204     std::shared_ptr<Blob> valueBlob(index->getValue());
205     size_t valueBlobSize = valueBlob->getSize();
206     void* buf = alloc(valueBlobSize);
207     if (buf != nullptr) {
208         ALOGV("get: copying %zu bytes to caller's buffer", valueBlobSize);
209         memcpy(buf, valueBlob->getData(), valueBlobSize);
210         *value = buf;
211         index->setRecency(++mAccessCount);
212     } else {
213         ALOGV("get: cannot allocate caller's buffer: needs %zu", valueBlobSize);
214         *value = nullptr;
215     }
216     return valueBlobSize;
217 }
218 
align_sizet(size_t size)219 static inline size_t align_sizet(size_t size) {
220     constexpr size_t alignment = alignof(size_t) - 1;
221     return (size + alignment) & ~alignment;
222 }
223 
getFlattenedSize() const224 size_t BlobCache::getFlattenedSize() const {
225     size_t size = align_sizet(sizeof(Header) + PROPERTY_VALUE_MAX);
226     for (const CacheEntry& e : mCacheEntries) {
227         std::shared_ptr<Blob> const& keyBlob = e.getKey();
228         std::shared_ptr<Blob> const& valueBlob = e.getValue();
229         size += align_sizet(sizeof(EntryHeader) + keyBlob->getSize() + valueBlob->getSize());
230     }
231     return size;
232 }
233 
flatten(void * buffer,size_t size) const234 int BlobCache::flatten(void* buffer, size_t size) const {
235     // Write the cache header
236     if (size < sizeof(Header)) {
237         ALOGE("flatten: not enough room for cache header");
238         return 0;
239     }
240     Header* header = reinterpret_cast<Header*>(buffer);
241     header->mMagicNumber = blobCacheMagic;
242     header->mBlobCacheVersion = blobCacheVersion;
243     header->mDeviceVersion = blobCacheDeviceVersion;
244     header->mNumEntries = mCacheEntries.size();
245     char buildId[PROPERTY_VALUE_MAX];
246     header->mBuildIdLength = property_get("ro.build.id", buildId, "");
247     memcpy(header->mBuildId, buildId, header->mBuildIdLength);
248 
249     // Write cache entries
250     uint8_t* byteBuffer = reinterpret_cast<uint8_t*>(buffer);
251     off_t byteOffset = align_sizet(sizeof(Header) + header->mBuildIdLength);
252     for (const CacheEntry& e : mCacheEntries) {
253         std::shared_ptr<Blob> const& keyBlob = e.getKey();
254         std::shared_ptr<Blob> const& valueBlob = e.getValue();
255         size_t keySize = keyBlob->getSize();
256         size_t valueSize = valueBlob->getSize();
257 
258         size_t entrySize = sizeof(EntryHeader) + keySize + valueSize;
259         size_t totalSize = align_sizet(entrySize);
260         if (byteOffset + totalSize > size) {
261             ALOGE("flatten: not enough room for cache entries");
262             return -EINVAL;
263         }
264 
265         EntryHeader* eheader = reinterpret_cast<EntryHeader*>(&byteBuffer[byteOffset]);
266         eheader->mKeySize = keySize;
267         eheader->mValueSize = valueSize;
268 
269         memcpy(eheader->mData, keyBlob->getData(), keySize);
270         memcpy(eheader->mData + keySize, valueBlob->getData(), valueSize);
271 
272         if (totalSize > entrySize) {
273             // We have padding bytes. Those will get written to storage, and contribute to the CRC,
274             // so make sure we zero-them to have reproducible results.
275             memset(eheader->mData + keySize + valueSize, 0, totalSize - entrySize);
276         }
277 
278         byteOffset += totalSize;
279     }
280 
281     return 0;
282 }
283 
unflatten(void const * buffer,size_t size)284 int BlobCache::unflatten(void const* buffer, size_t size) {
285     // All errors should result in the BlobCache being in an empty state.
286     mCacheEntries.clear();
287 
288     // Read the cache header
289     if (size < sizeof(Header)) {
290         ALOGE("unflatten: not enough room for cache header");
291         return -EINVAL;
292     }
293     const Header* header = reinterpret_cast<const Header*>(buffer);
294     if (header->mMagicNumber != blobCacheMagic) {
295         ALOGE("unflatten: bad magic number: %" PRIu32, header->mMagicNumber);
296         return -EINVAL;
297     }
298     char buildId[PROPERTY_VALUE_MAX];
299     int len = property_get("ro.build.id", buildId, "");
300     if (header->mBlobCacheVersion != blobCacheVersion ||
301         header->mDeviceVersion != blobCacheDeviceVersion || len != header->mBuildIdLength ||
302         strncmp(buildId, header->mBuildId, len)) {
303         // We treat version mismatches as an empty cache.
304         return 0;
305     }
306 
307     // Read cache entries
308     const uint8_t* byteBuffer = reinterpret_cast<const uint8_t*>(buffer);
309     off_t byteOffset = align_sizet(sizeof(Header) + header->mBuildIdLength);
310     size_t numEntries = header->mNumEntries;
311     for (size_t i = 0; i < numEntries; i++) {
312         if (byteOffset + sizeof(EntryHeader) > size) {
313             mCacheEntries.clear();
314             ALOGE("unflatten: not enough room for cache entry header");
315             return -EINVAL;
316         }
317 
318         const EntryHeader* eheader = reinterpret_cast<const EntryHeader*>(&byteBuffer[byteOffset]);
319         size_t keySize = eheader->mKeySize;
320         size_t valueSize = eheader->mValueSize;
321         size_t entrySize = sizeof(EntryHeader) + keySize + valueSize;
322 
323         size_t totalSize = align_sizet(entrySize);
324         if (byteOffset + totalSize > size) {
325             mCacheEntries.clear();
326             ALOGE("unflatten: not enough room for cache entry");
327             return -EINVAL;
328         }
329 
330         const uint8_t* data = eheader->mData;
331         set(data, keySize, data + keySize, valueSize);
332 
333         byteOffset += totalSize;
334     }
335 
336     return 0;
337 }
338 
blob_random()339 long int BlobCache::blob_random() {
340 #ifdef _WIN32
341     return rand();
342 #else
343     return nrand48(mRandState);
344 #endif
345 }
346 
findVictim()347 size_t BlobCache::findVictim() {
348     switch (mPolicySelect) {
349         case Select::RANDOM:
350             return size_t(blob_random() % (mCacheEntries.size()));
351         case Select::LRU:
352             return std::min_element(mCacheEntries.begin(), mCacheEntries.end(),
353                                     [](const CacheEntry& a, const CacheEntry& b) {
354                                         return a.getRecency() < b.getRecency();
355                                     }) -
356                    mCacheEntries.begin();
357         default:
358             ALOGE("findVictim: unknown mPolicySelect: %d", mPolicySelect);
359             return 0;
360     }
361 }
362 
findDownTo(size_t newEntrySize,size_t onBehalfOf)363 size_t BlobCache::findDownTo(size_t newEntrySize, size_t onBehalfOf) {
364     auto oldEntrySize = [this, onBehalfOf]() -> size_t {
365         if (onBehalfOf == NoEntry) return 0;
366         const auto& entry = mCacheEntries[onBehalfOf];
367         return entry.getKey()->getSize() + entry.getValue()->getSize();
368     };
369     switch (mPolicyCapacity) {
370         case Capacity::HALVE:
371             return mMaxTotalSize / 2;
372         case Capacity::FIT:
373             return mMaxTotalSize - (newEntrySize - oldEntrySize());
374         case Capacity::FIT_HALVE:
375             return std::min(mMaxTotalSize - (newEntrySize - oldEntrySize()), mMaxTotalSize / 2);
376         default:
377             ALOGE("findDownTo: unknown mPolicyCapacity: %d", mPolicyCapacity);
378             return 0;
379     }
380 }
381 
isFit(Capacity capacity)382 bool BlobCache::isFit(Capacity capacity) {
383     switch (capacity) {
384         case Capacity::HALVE:
385             return false;
386         case Capacity::FIT:
387         case Capacity::FIT_HALVE:
388             return true;
389         default:
390             ALOGE("isFit: unknown capacity: %d", capacity);
391             return false;
392     }
393 }
394 
clean(size_t newEntrySize,size_t onBehalfOf)395 bool BlobCache::clean(size_t newEntrySize, size_t onBehalfOf) {
396     // Remove a selected cache entry until the total cache size does
397     // not exceed downTo.
398     const size_t downTo = findDownTo(newEntrySize, onBehalfOf);
399 
400     bool cleaned = false;
401     while (mTotalSize > downTo) {
402         const size_t i = findVictim();
403         const CacheEntry& entry(mCacheEntries[i]);
404         const size_t entrySize = entry.getKey()->getSize() + entry.getValue()->getSize();
405         mTotalSize -= entrySize;
406         mCacheEntries.erase(mCacheEntries.begin() + i);
407         cleaned = true;
408     }
409     return cleaned;
410 }
411 
isCleanable() const412 bool BlobCache::isCleanable() const {
413     switch (mPolicyCapacity) {
414         case Capacity::HALVE:
415             return mTotalSize > mMaxTotalSize / 2;
416         default:
417             ALOGE("isCleanable: unknown mPolicyCapacity: %d", mPolicyCapacity);
418             [[fallthrough]];
419         case Capacity::FIT:
420         case Capacity::FIT_HALVE:
421             return mTotalSize > 0;
422     }
423 }
424 
Blob(const void * data,size_t size,bool copyData)425 BlobCache::Blob::Blob(const void* data, size_t size, bool copyData)
426     : mData(copyData ? malloc(size) : data), mSize(size), mOwnsData(copyData) {
427     if (data != NULL && copyData) {
428         memcpy(const_cast<void*>(mData), data, size);
429     }
430 }
431 
~Blob()432 BlobCache::Blob::~Blob() {
433     if (mOwnsData) {
434         free(const_cast<void*>(mData));
435     }
436 }
437 
operator <(const Blob & rhs) const438 bool BlobCache::Blob::operator<(const Blob& rhs) const {
439     if (mSize == rhs.mSize) {
440         return memcmp(mData, rhs.mData, mSize) < 0;
441     } else {
442         return mSize < rhs.mSize;
443     }
444 }
445 
getData() const446 const void* BlobCache::Blob::getData() const {
447     return mData;
448 }
449 
getSize() const450 size_t BlobCache::Blob::getSize() const {
451     return mSize;
452 }
453 
CacheEntry()454 BlobCache::CacheEntry::CacheEntry() : mRecency(0) {}
455 
CacheEntry(const std::shared_ptr<Blob> & key,const std::shared_ptr<Blob> & value,uint32_t recency)456 BlobCache::CacheEntry::CacheEntry(const std::shared_ptr<Blob>& key,
457                                   const std::shared_ptr<Blob>& value, uint32_t recency)
458     : mKey(key), mValue(value), mRecency(recency) {}
459 
CacheEntry(const CacheEntry & ce)460 BlobCache::CacheEntry::CacheEntry(const CacheEntry& ce)
461     : mKey(ce.mKey), mValue(ce.mValue), mRecency(ce.mRecency) {}
462 
operator <(const CacheEntry & rhs) const463 bool BlobCache::CacheEntry::operator<(const CacheEntry& rhs) const {
464     return *mKey < *rhs.mKey;
465 }
466 
operator =(const CacheEntry & rhs)467 const BlobCache::CacheEntry& BlobCache::CacheEntry::operator=(const CacheEntry& rhs) {
468     mKey = rhs.mKey;
469     mValue = rhs.mValue;
470     mRecency = rhs.mRecency;
471     return *this;
472 }
473 
getKey() const474 std::shared_ptr<BlobCache::Blob> BlobCache::CacheEntry::getKey() const {
475     return mKey;
476 }
477 
getValue() const478 std::shared_ptr<BlobCache::Blob> BlobCache::CacheEntry::getValue() const {
479     return mValue;
480 }
481 
setValue(const std::shared_ptr<Blob> & value)482 void BlobCache::CacheEntry::setValue(const std::shared_ptr<Blob>& value) {
483     mValue = value;
484 }
485 
getRecency() const486 uint32_t BlobCache::CacheEntry::getRecency() const {
487     return mRecency;
488 }
489 
setRecency(uint32_t recency)490 void BlobCache::CacheEntry::setRecency(uint32_t recency) {
491     mRecency = recency;
492 }
493 
494 }  // namespace android
495