1 /*
2  ** Copyright 2011, The Android Open Source Project
3  **
4  ** Licensed under the Apache License, Version 2.0 (the "License");
5  ** you may not use this file except in compliance with the License.
6  ** You may obtain a copy of the License at
7  **
8  **     http://www.apache.org/licenses/LICENSE-2.0
9  **
10  ** Unless required by applicable law or agreed to in writing, software
11  ** distributed under the License is distributed on an "AS IS" BASIS,
12  ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  ** See the License for the specific language governing permissions and
14  ** limitations under the License.
15  */
16 
17 //#define LOG_NDEBUG 0
18 
19 #include "BlobCache.h"
20 
21 #include <errno.h>
22 #include <inttypes.h>
23 
24 #include <android-base/properties.h>
25 #include <log/log.h>
26 #include <chrono>
27 
28 namespace android {
29 
30 // BlobCache::Header::mMagicNumber value
31 static const uint32_t blobCacheMagic = ('_' << 24) + ('B' << 16) + ('b' << 8) + '$';
32 
33 // BlobCache::Header::mBlobCacheVersion value
34 static const uint32_t blobCacheVersion = 3;
35 
36 // BlobCache::Header::mDeviceVersion value
37 static const uint32_t blobCacheDeviceVersion = 1;
38 
BlobCache(size_t maxKeySize,size_t maxValueSize,size_t maxTotalSize)39 BlobCache::BlobCache(size_t maxKeySize, size_t maxValueSize, size_t maxTotalSize):
40         mMaxTotalSize(maxTotalSize),
41         mMaxKeySize(maxKeySize),
42         mMaxValueSize(maxValueSize),
43         mTotalSize(0) {
44     int64_t now = std::chrono::steady_clock::now().time_since_epoch().count();
45 #ifdef _WIN32
46     srand(now);
47 #else
48     mRandState[0] = (now >> 0) & 0xFFFF;
49     mRandState[1] = (now >> 16) & 0xFFFF;
50     mRandState[2] = (now >> 32) & 0xFFFF;
51 #endif
52     ALOGV("initializing random seed using %lld", (unsigned long long)now);
53 }
54 
set(const void * key,size_t keySize,const void * value,size_t valueSize)55 void BlobCache::set(const void* key, size_t keySize, const void* value,
56         size_t valueSize) {
57     if (mMaxKeySize < keySize) {
58         ALOGV("set: not caching because the key is too large: %zu (limit: %zu)",
59                 keySize, mMaxKeySize);
60         return;
61     }
62     if (mMaxValueSize < valueSize) {
63         ALOGV("set: not caching because the value is too large: %zu (limit: %zu)",
64                 valueSize, mMaxValueSize);
65         return;
66     }
67     if (mMaxTotalSize < keySize + valueSize) {
68         ALOGV("set: not caching because the combined key/value size is too "
69                 "large: %zu (limit: %zu)", keySize + valueSize, mMaxTotalSize);
70         return;
71     }
72     if (keySize == 0) {
73         ALOGW("set: not caching because keySize is 0");
74         return;
75     }
76     if (valueSize <= 0) {
77         ALOGW("set: not caching because valueSize is 0");
78         return;
79     }
80 
81     std::shared_ptr<Blob> dummyKey(new Blob(key, keySize, false));
82     CacheEntry dummyEntry(dummyKey, nullptr);
83 
84     while (true) {
85         auto index = std::lower_bound(mCacheEntries.begin(), mCacheEntries.end(), dummyEntry);
86         if (index == mCacheEntries.end() || dummyEntry < *index) {
87             // Create a new cache entry.
88             std::shared_ptr<Blob> keyBlob(new Blob(key, keySize, true));
89             std::shared_ptr<Blob> valueBlob(new Blob(value, valueSize, true));
90             size_t newTotalSize = mTotalSize + keySize + valueSize;
91             if (mMaxTotalSize < newTotalSize) {
92                 if (isCleanable()) {
93                     // Clean the cache and try again.
94                     clean();
95                     continue;
96                 } else {
97                     ALOGV("set: not caching new key/value pair because the "
98                             "total cache size limit would be exceeded: %zu "
99                             "(limit: %zu)",
100                             keySize + valueSize, mMaxTotalSize);
101                     break;
102                 }
103             }
104             mCacheEntries.insert(index, CacheEntry(keyBlob, valueBlob));
105             mTotalSize = newTotalSize;
106             ALOGV("set: created new cache entry with %zu byte key and %zu byte value",
107                     keySize, valueSize);
108         } else {
109             // Update the existing cache entry.
110             std::shared_ptr<Blob> valueBlob(new Blob(value, valueSize, true));
111             std::shared_ptr<Blob> oldValueBlob(index->getValue());
112             size_t newTotalSize = mTotalSize + valueSize - oldValueBlob->getSize();
113             if (mMaxTotalSize < newTotalSize) {
114                 if (isCleanable()) {
115                     // Clean the cache and try again.
116                     clean();
117                     continue;
118                 } else {
119                     ALOGV("set: not caching new value because the total cache "
120                             "size limit would be exceeded: %zu (limit: %zu)",
121                             keySize + valueSize, mMaxTotalSize);
122                     break;
123                 }
124             }
125             index->setValue(valueBlob);
126             mTotalSize = newTotalSize;
127             ALOGV("set: updated existing cache entry with %zu byte key and %zu byte "
128                     "value", keySize, valueSize);
129         }
130         break;
131     }
132 }
133 
get(const void * key,size_t keySize,void * value,size_t valueSize)134 size_t BlobCache::get(const void* key, size_t keySize, void* value,
135         size_t valueSize) {
136     if (mMaxKeySize < keySize) {
137         ALOGV("get: not searching because the key is too large: %zu (limit %zu)",
138                 keySize, mMaxKeySize);
139         return 0;
140     }
141     std::shared_ptr<Blob> dummyKey(new Blob(key, keySize, false));
142     CacheEntry dummyEntry(dummyKey, nullptr);
143     auto index = std::lower_bound(mCacheEntries.begin(), mCacheEntries.end(), dummyEntry);
144     if (index == mCacheEntries.end() || dummyEntry < *index) {
145         ALOGV("get: no cache entry found for key of size %zu", keySize);
146         return 0;
147     }
148 
149     // The key was found. Return the value if the caller's buffer is large
150     // enough.
151     std::shared_ptr<Blob> valueBlob(index->getValue());
152     size_t valueBlobSize = valueBlob->getSize();
153     if (valueBlobSize <= valueSize) {
154         ALOGV("get: copying %zu bytes to caller's buffer", valueBlobSize);
155         memcpy(value, valueBlob->getData(), valueBlobSize);
156     } else {
157         ALOGV("get: caller's buffer is too small for value: %zu (needs %zu)",
158                 valueSize, valueBlobSize);
159     }
160     return valueBlobSize;
161 }
162 
align4(size_t size)163 static inline size_t align4(size_t size) {
164     return (size + 3) & ~3;
165 }
166 
getFlattenedSize() const167 size_t BlobCache::getFlattenedSize() const {
168     auto buildId = base::GetProperty("ro.build.id", "");
169     size_t size = align4(sizeof(Header) + buildId.size());
170     for (const CacheEntry& e :  mCacheEntries) {
171         std::shared_ptr<Blob> const& keyBlob = e.getKey();
172         std::shared_ptr<Blob> const& valueBlob = e.getValue();
173         size += align4(sizeof(EntryHeader) + keyBlob->getSize() + valueBlob->getSize());
174     }
175     return size;
176 }
177 
flatten(void * buffer,size_t size) const178 int BlobCache::flatten(void* buffer, size_t size) const {
179     // Write the cache header
180     if (size < sizeof(Header)) {
181         ALOGE("flatten: not enough room for cache header");
182         return 0;
183     }
184     Header* header = reinterpret_cast<Header*>(buffer);
185     header->mMagicNumber = blobCacheMagic;
186     header->mBlobCacheVersion = blobCacheVersion;
187     header->mDeviceVersion = blobCacheDeviceVersion;
188     header->mNumEntries = mCacheEntries.size();
189     auto buildId = base::GetProperty("ro.build.id", "");
190     header->mBuildIdLength = buildId.size();
191     memcpy(header->mBuildId, buildId.c_str(), header->mBuildIdLength);
192 
193     // Write cache entries
194     uint8_t* byteBuffer = reinterpret_cast<uint8_t*>(buffer);
195     off_t byteOffset = align4(sizeof(Header) + header->mBuildIdLength);
196     for (const CacheEntry& e :  mCacheEntries) {
197         std::shared_ptr<Blob> const& keyBlob = e.getKey();
198         std::shared_ptr<Blob> const& valueBlob = e.getValue();
199         size_t keySize = keyBlob->getSize();
200         size_t valueSize = valueBlob->getSize();
201 
202         size_t entrySize = sizeof(EntryHeader) + keySize + valueSize;
203         size_t totalSize = align4(entrySize);
204         if (byteOffset + totalSize > size) {
205             ALOGE("flatten: not enough room for cache entries");
206             return -EINVAL;
207         }
208 
209         EntryHeader* eheader = reinterpret_cast<EntryHeader*>(&byteBuffer[byteOffset]);
210         eheader->mKeySize = keySize;
211         eheader->mValueSize = valueSize;
212 
213         memcpy(eheader->mData, keyBlob->getData(), keySize);
214         memcpy(eheader->mData + keySize, valueBlob->getData(), valueSize);
215 
216         if (totalSize > entrySize) {
217             // We have padding bytes. Those will get written to storage, and contribute to the CRC,
218             // so make sure we zero-them to have reproducible results.
219             memset(eheader->mData + keySize + valueSize, 0, totalSize - entrySize);
220         }
221 
222         byteOffset += totalSize;
223     }
224 
225     return 0;
226 }
227 
unflatten(void const * buffer,size_t size)228 int BlobCache::unflatten(void const* buffer, size_t size) {
229     // All errors should result in the BlobCache being in an empty state.
230     mCacheEntries.clear();
231 
232     // Read the cache header
233     if (size < sizeof(Header)) {
234         ALOGE("unflatten: not enough room for cache header");
235         return -EINVAL;
236     }
237     const Header* header = reinterpret_cast<const Header*>(buffer);
238     if (header->mMagicNumber != blobCacheMagic) {
239         ALOGE("unflatten: bad magic number: %" PRIu32, header->mMagicNumber);
240         return -EINVAL;
241     }
242     auto buildId = base::GetProperty("ro.build.id", "");
243     if (header->mBlobCacheVersion != blobCacheVersion ||
244         header->mDeviceVersion != blobCacheDeviceVersion ||
245         buildId.size() != header->mBuildIdLength ||
246         strncmp(buildId.c_str(), header->mBuildId, buildId.size())) {
247         // We treat version mismatches as an empty cache.
248         return 0;
249     }
250 
251     // Read cache entries
252     const uint8_t* byteBuffer = reinterpret_cast<const uint8_t*>(buffer);
253     off_t byteOffset = align4(sizeof(Header) + header->mBuildIdLength);
254     size_t numEntries = header->mNumEntries;
255     for (size_t i = 0; i < numEntries; i++) {
256         if (byteOffset + sizeof(EntryHeader) > size) {
257             mCacheEntries.clear();
258             ALOGE("unflatten: not enough room for cache entry headers");
259             return -EINVAL;
260         }
261 
262         const EntryHeader* eheader = reinterpret_cast<const EntryHeader*>(
263                 &byteBuffer[byteOffset]);
264         size_t keySize = eheader->mKeySize;
265         size_t valueSize = eheader->mValueSize;
266         size_t entrySize = sizeof(EntryHeader) + keySize + valueSize;
267 
268         size_t totalSize = align4(entrySize);
269         if (byteOffset + totalSize > size) {
270             mCacheEntries.clear();
271             ALOGE("unflatten: not enough room for cache entry headers");
272             return -EINVAL;
273         }
274 
275         const uint8_t* data = eheader->mData;
276         set(data, keySize, data + keySize, valueSize);
277 
278         byteOffset += totalSize;
279     }
280 
281     return 0;
282 }
283 
blob_random()284 long int BlobCache::blob_random() {
285 #ifdef _WIN32
286     return rand();
287 #else
288     return nrand48(mRandState);
289 #endif
290 }
291 
clean()292 void BlobCache::clean() {
293     // Remove a random cache entry until the total cache size gets below half
294     // the maximum total cache size.
295     while (mTotalSize > mMaxTotalSize / 2) {
296         size_t i = size_t(blob_random() % (mCacheEntries.size()));
297         const CacheEntry& entry(mCacheEntries[i]);
298         mTotalSize -= entry.getKey()->getSize() + entry.getValue()->getSize();
299         mCacheEntries.erase(mCacheEntries.begin() + i);
300     }
301 }
302 
isCleanable() const303 bool BlobCache::isCleanable() const {
304     return mTotalSize > mMaxTotalSize / 2;
305 }
306 
Blob(const void * data,size_t size,bool copyData)307 BlobCache::Blob::Blob(const void* data, size_t size, bool copyData) :
308         mData(copyData ? malloc(size) : data),
309         mSize(size),
310         mOwnsData(copyData) {
311     if (data != nullptr && copyData) {
312         memcpy(const_cast<void*>(mData), data, size);
313     }
314 }
315 
~Blob()316 BlobCache::Blob::~Blob() {
317     if (mOwnsData) {
318         free(const_cast<void*>(mData));
319     }
320 }
321 
operator <(const Blob & rhs) const322 bool BlobCache::Blob::operator<(const Blob& rhs) const {
323     if (mSize == rhs.mSize) {
324         return memcmp(mData, rhs.mData, mSize) < 0;
325     } else {
326         return mSize < rhs.mSize;
327     }
328 }
329 
getData() const330 const void* BlobCache::Blob::getData() const {
331     return mData;
332 }
333 
getSize() const334 size_t BlobCache::Blob::getSize() const {
335     return mSize;
336 }
337 
CacheEntry()338 BlobCache::CacheEntry::CacheEntry() {
339 }
340 
CacheEntry(const std::shared_ptr<Blob> & key,const std::shared_ptr<Blob> & value)341 BlobCache::CacheEntry::CacheEntry(
342         const std::shared_ptr<Blob>& key, const std::shared_ptr<Blob>& value):
343         mKey(key),
344         mValue(value) {
345 }
346 
CacheEntry(const CacheEntry & ce)347 BlobCache::CacheEntry::CacheEntry(const CacheEntry& ce):
348         mKey(ce.mKey),
349         mValue(ce.mValue) {
350 }
351 
operator <(const CacheEntry & rhs) const352 bool BlobCache::CacheEntry::operator<(const CacheEntry& rhs) const {
353     return *mKey < *rhs.mKey;
354 }
355 
operator =(const CacheEntry & rhs)356 const BlobCache::CacheEntry& BlobCache::CacheEntry::operator=(const CacheEntry& rhs) {
357     mKey = rhs.mKey;
358     mValue = rhs.mValue;
359     return *this;
360 }
361 
getKey() const362 std::shared_ptr<BlobCache::Blob> BlobCache::CacheEntry::getKey() const {
363     return mKey;
364 }
365 
getValue() const366 std::shared_ptr<BlobCache::Blob> BlobCache::CacheEntry::getValue() const {
367     return mValue;
368 }
369 
setValue(const std::shared_ptr<Blob> & value)370 void BlobCache::CacheEntry::setValue(const std::shared_ptr<Blob>& value) {
371     mValue = value;
372 }
373 
374 } // namespace android
375