1 /*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "IMemory"
18
19 #include <atomic>
20 #include <stdatomic.h>
21
22 #include <fcntl.h>
23 #include <stdint.h>
24 #include <stdio.h>
25 #include <stdlib.h>
26 #include <sys/types.h>
27 #include <sys/mman.h>
28 #include <unistd.h>
29
30 #include <binder/IMemory.h>
31 #include <binder/Parcel.h>
32 #include <log/log.h>
33
34 #include <utils/KeyedVector.h>
35 #include <utils/threads.h>
36
37 #define VERBOSE 0
38
39 namespace android {
40 // ---------------------------------------------------------------------------
41
42 class HeapCache : public IBinder::DeathRecipient
43 {
44 public:
45 HeapCache();
46 virtual ~HeapCache();
47
48 virtual void binderDied(const wp<IBinder>& who);
49
50 sp<IMemoryHeap> find_heap(const sp<IBinder>& binder);
51 void free_heap(const sp<IBinder>& binder);
52 sp<IMemoryHeap> get_heap(const sp<IBinder>& binder);
53 void dump_heaps();
54
55 private:
56 // For IMemory.cpp
57 struct heap_info_t {
58 sp<IMemoryHeap> heap;
59 int32_t count;
60 // Note that this cannot be meaningfully copied.
61 };
62
63 void free_heap(const wp<IBinder>& binder);
64
65 Mutex mHeapCacheLock; // Protects entire vector below.
66 KeyedVector< wp<IBinder>, heap_info_t > mHeapCache;
67 // We do not use the copy-on-write capabilities of KeyedVector.
68 // TODO: Reimplemement based on standard C++ container?
69 };
70
71 static sp<HeapCache> gHeapCache = new HeapCache();
72
73 /******************************************************************************/
74
75 enum {
76 HEAP_ID = IBinder::FIRST_CALL_TRANSACTION
77 };
78
79 class BpMemoryHeap : public BpInterface<IMemoryHeap>
80 {
81 public:
82 explicit BpMemoryHeap(const sp<IBinder>& impl);
83 virtual ~BpMemoryHeap();
84
85 virtual int getHeapID() const;
86 virtual void* getBase() const;
87 virtual size_t getSize() const;
88 virtual uint32_t getFlags() const;
89 off_t getOffset() const override;
90
91 private:
92 friend class IMemory;
93 friend class HeapCache;
94
95 // for debugging in this module
find_heap(const sp<IBinder> & binder)96 static inline sp<IMemoryHeap> find_heap(const sp<IBinder>& binder) {
97 return gHeapCache->find_heap(binder);
98 }
free_heap(const sp<IBinder> & binder)99 static inline void free_heap(const sp<IBinder>& binder) {
100 gHeapCache->free_heap(binder);
101 }
get_heap(const sp<IBinder> & binder)102 static inline sp<IMemoryHeap> get_heap(const sp<IBinder>& binder) {
103 return gHeapCache->get_heap(binder);
104 }
dump_heaps()105 static inline void dump_heaps() {
106 gHeapCache->dump_heaps();
107 }
108
109 void assertMapped() const;
110 void assertReallyMapped() const;
111
112 mutable std::atomic<int32_t> mHeapId;
113 mutable void* mBase;
114 mutable size_t mSize;
115 mutable uint32_t mFlags;
116 mutable off_t mOffset;
117 mutable bool mRealHeap;
118 mutable Mutex mLock;
119 };
120
121 // ----------------------------------------------------------------------------
122
123 enum {
124 GET_MEMORY = IBinder::FIRST_CALL_TRANSACTION
125 };
126
127 class BpMemory : public BpInterface<IMemory>
128 {
129 public:
130 explicit BpMemory(const sp<IBinder>& impl);
131 virtual ~BpMemory();
132 // NOLINTNEXTLINE(google-default-arguments)
133 virtual sp<IMemoryHeap> getMemory(ssize_t* offset=nullptr, size_t* size=nullptr) const;
134
135 private:
136 mutable sp<IMemoryHeap> mHeap;
137 mutable ssize_t mOffset;
138 mutable size_t mSize;
139 };
140
141 /******************************************************************************/
142
fastPointer(const sp<IBinder> & binder,ssize_t offset) const143 void* IMemory::fastPointer(const sp<IBinder>& binder, ssize_t offset) const
144 {
145 sp<IMemoryHeap> realHeap = BpMemoryHeap::get_heap(binder);
146 void* const base = realHeap->base();
147 if (base == MAP_FAILED)
148 return nullptr;
149 return static_cast<char*>(base) + offset;
150 }
151
unsecurePointer() const152 void* IMemory::unsecurePointer() const {
153 ssize_t offset;
154 sp<IMemoryHeap> heap = getMemory(&offset);
155 void* const base = heap!=nullptr ? heap->base() : MAP_FAILED;
156 if (base == MAP_FAILED)
157 return nullptr;
158 return static_cast<char*>(base) + offset;
159 }
160
pointer() const161 void* IMemory::pointer() const { return unsecurePointer(); }
162
size() const163 size_t IMemory::size() const {
164 size_t size;
165 getMemory(nullptr, &size);
166 return size;
167 }
168
offset() const169 ssize_t IMemory::offset() const {
170 ssize_t offset;
171 getMemory(&offset);
172 return offset;
173 }
174
175 /******************************************************************************/
176
BpMemory(const sp<IBinder> & impl)177 BpMemory::BpMemory(const sp<IBinder>& impl)
178 : BpInterface<IMemory>(impl), mOffset(0), mSize(0)
179 {
180 }
181
~BpMemory()182 BpMemory::~BpMemory()
183 {
184 }
185
186 // NOLINTNEXTLINE(google-default-arguments)
getMemory(ssize_t * offset,size_t * size) const187 sp<IMemoryHeap> BpMemory::getMemory(ssize_t* offset, size_t* size) const
188 {
189 if (mHeap == nullptr) {
190 Parcel data, reply;
191 data.writeInterfaceToken(IMemory::getInterfaceDescriptor());
192 if (remote()->transact(GET_MEMORY, data, &reply) == NO_ERROR) {
193 sp<IBinder> heap = reply.readStrongBinder();
194 if (heap != nullptr) {
195 mHeap = interface_cast<IMemoryHeap>(heap);
196 if (mHeap != nullptr) {
197 const int64_t offset64 = reply.readInt64();
198 const uint64_t size64 = reply.readUint64();
199 const ssize_t o = (ssize_t)offset64;
200 const size_t s = (size_t)size64;
201 size_t heapSize = mHeap->getSize();
202 if (s == size64 && o == offset64 // ILP32 bounds check
203 && s <= heapSize
204 && o >= 0
205 && (static_cast<size_t>(o) <= heapSize - s)) {
206 mOffset = o;
207 mSize = s;
208 } else {
209 // Hm.
210 android_errorWriteWithInfoLog(0x534e4554,
211 "26877992", -1, nullptr, 0);
212 mOffset = 0;
213 mSize = 0;
214 }
215 }
216 }
217 }
218 }
219 if (offset) *offset = mOffset;
220 if (size) *size = mSize;
221 return (mSize > 0) ? mHeap : nullptr;
222 }
223
224 // ---------------------------------------------------------------------------
225
226 IMPLEMENT_META_INTERFACE(Memory, "android.utils.IMemory");
227
BnMemory()228 BnMemory::BnMemory() {
229 }
230
~BnMemory()231 BnMemory::~BnMemory() {
232 }
233
234 // NOLINTNEXTLINE(google-default-arguments)
onTransact(uint32_t code,const Parcel & data,Parcel * reply,uint32_t flags)235 status_t BnMemory::onTransact(
236 uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
237 {
238 switch(code) {
239 case GET_MEMORY: {
240 CHECK_INTERFACE(IMemory, data, reply);
241 ssize_t offset;
242 size_t size;
243 reply->writeStrongBinder( IInterface::asBinder(getMemory(&offset, &size)) );
244 reply->writeInt64(offset);
245 reply->writeUint64(size);
246 return NO_ERROR;
247 } break;
248 default:
249 return BBinder::onTransact(code, data, reply, flags);
250 }
251 }
252
253
254 /******************************************************************************/
255
BpMemoryHeap(const sp<IBinder> & impl)256 BpMemoryHeap::BpMemoryHeap(const sp<IBinder>& impl)
257 : BpInterface<IMemoryHeap>(impl),
258 mHeapId(-1), mBase(MAP_FAILED), mSize(0), mFlags(0), mOffset(0), mRealHeap(false)
259 {
260 }
261
~BpMemoryHeap()262 BpMemoryHeap::~BpMemoryHeap() {
263 int32_t heapId = mHeapId.load(memory_order_relaxed);
264 if (heapId != -1) {
265 close(heapId);
266 if (mRealHeap) {
267 // by construction we're the last one
268 if (mBase != MAP_FAILED) {
269 sp<IBinder> binder = IInterface::asBinder(this);
270
271 if (VERBOSE) {
272 ALOGD("UNMAPPING binder=%p, heap=%p, size=%zu, fd=%d",
273 binder.get(), this, mSize, heapId);
274 }
275
276 munmap(mBase, mSize);
277 }
278 } else {
279 // remove from list only if it was mapped before
280 sp<IBinder> binder = IInterface::asBinder(this);
281 free_heap(binder);
282 }
283 }
284 }
285
assertMapped() const286 void BpMemoryHeap::assertMapped() const
287 {
288 int32_t heapId = mHeapId.load(memory_order_acquire);
289 if (heapId == -1) {
290 sp<IBinder> binder(IInterface::asBinder(const_cast<BpMemoryHeap*>(this)));
291 sp<BpMemoryHeap> heap(static_cast<BpMemoryHeap*>(find_heap(binder).get()));
292 heap->assertReallyMapped();
293 if (heap->mBase != MAP_FAILED) {
294 Mutex::Autolock _l(mLock);
295 if (mHeapId.load(memory_order_relaxed) == -1) {
296 mBase = heap->mBase;
297 mSize = heap->mSize;
298 mOffset = heap->mOffset;
299 int fd = fcntl(heap->mHeapId.load(memory_order_relaxed), F_DUPFD_CLOEXEC, 0);
300 ALOGE_IF(fd==-1, "cannot dup fd=%d",
301 heap->mHeapId.load(memory_order_relaxed));
302 mHeapId.store(fd, memory_order_release);
303 }
304 } else {
305 // something went wrong
306 free_heap(binder);
307 }
308 }
309 }
310
assertReallyMapped() const311 void BpMemoryHeap::assertReallyMapped() const
312 {
313 int32_t heapId = mHeapId.load(memory_order_acquire);
314 if (heapId == -1) {
315
316 // remote call without mLock held, worse case scenario, we end up
317 // calling transact() from multiple threads, but that's not a problem,
318 // only mmap below must be in the critical section.
319
320 Parcel data, reply;
321 data.writeInterfaceToken(IMemoryHeap::getInterfaceDescriptor());
322 status_t err = remote()->transact(HEAP_ID, data, &reply);
323 int parcel_fd = reply.readFileDescriptor();
324 const uint64_t size64 = reply.readUint64();
325 const int64_t offset64 = reply.readInt64();
326 const uint32_t flags = reply.readUint32();
327 const size_t size = (size_t)size64;
328 const off_t offset = (off_t)offset64;
329 if (err != NO_ERROR || // failed transaction
330 size != size64 || offset != offset64) { // ILP32 size check
331 ALOGE("binder=%p transaction failed fd=%d, size=%zu, err=%d (%s)",
332 IInterface::asBinder(this).get(),
333 parcel_fd, size, err, strerror(-err));
334 return;
335 }
336
337 Mutex::Autolock _l(mLock);
338 if (mHeapId.load(memory_order_relaxed) == -1) {
339 int fd = fcntl(parcel_fd, F_DUPFD_CLOEXEC, 0);
340 ALOGE_IF(fd == -1, "cannot dup fd=%d, size=%zu, err=%d (%s)",
341 parcel_fd, size, err, strerror(errno));
342
343 int access = PROT_READ;
344 if (!(flags & READ_ONLY)) {
345 access |= PROT_WRITE;
346 }
347 mRealHeap = true;
348 mBase = mmap(nullptr, size, access, MAP_SHARED, fd, offset);
349 if (mBase == MAP_FAILED) {
350 ALOGE("cannot map BpMemoryHeap (binder=%p), size=%zu, fd=%d (%s)",
351 IInterface::asBinder(this).get(), size, fd, strerror(errno));
352 close(fd);
353 } else {
354 mSize = size;
355 mFlags = flags;
356 mOffset = offset;
357 mHeapId.store(fd, memory_order_release);
358 }
359 }
360 }
361 }
362
getHeapID() const363 int BpMemoryHeap::getHeapID() const {
364 assertMapped();
365 // We either stored mHeapId ourselves, or loaded it with acquire semantics.
366 return mHeapId.load(memory_order_relaxed);
367 }
368
getBase() const369 void* BpMemoryHeap::getBase() const {
370 assertMapped();
371 return mBase;
372 }
373
getSize() const374 size_t BpMemoryHeap::getSize() const {
375 assertMapped();
376 return mSize;
377 }
378
getFlags() const379 uint32_t BpMemoryHeap::getFlags() const {
380 assertMapped();
381 return mFlags;
382 }
383
getOffset() const384 off_t BpMemoryHeap::getOffset() const {
385 assertMapped();
386 return mOffset;
387 }
388
389 // ---------------------------------------------------------------------------
390
391 IMPLEMENT_META_INTERFACE(MemoryHeap, "android.utils.IMemoryHeap");
392
BnMemoryHeap()393 BnMemoryHeap::BnMemoryHeap() {
394 }
395
~BnMemoryHeap()396 BnMemoryHeap::~BnMemoryHeap() {
397 }
398
399 // NOLINTNEXTLINE(google-default-arguments)
onTransact(uint32_t code,const Parcel & data,Parcel * reply,uint32_t flags)400 status_t BnMemoryHeap::onTransact(
401 uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
402 {
403 switch(code) {
404 case HEAP_ID: {
405 CHECK_INTERFACE(IMemoryHeap, data, reply);
406 reply->writeFileDescriptor(getHeapID());
407 reply->writeUint64(getSize());
408 reply->writeInt64(getOffset());
409 reply->writeUint32(getFlags());
410 return NO_ERROR;
411 } break;
412 default:
413 return BBinder::onTransact(code, data, reply, flags);
414 }
415 }
416
417 /*****************************************************************************/
418
HeapCache()419 HeapCache::HeapCache()
420 : DeathRecipient()
421 {
422 }
423
~HeapCache()424 HeapCache::~HeapCache()
425 {
426 }
427
binderDied(const wp<IBinder> & binder)428 void HeapCache::binderDied(const wp<IBinder>& binder)
429 {
430 //ALOGD("binderDied binder=%p", binder.unsafe_get());
431 free_heap(binder);
432 }
433
find_heap(const sp<IBinder> & binder)434 sp<IMemoryHeap> HeapCache::find_heap(const sp<IBinder>& binder)
435 {
436 Mutex::Autolock _l(mHeapCacheLock);
437 ssize_t i = mHeapCache.indexOfKey(binder);
438 if (i>=0) {
439 heap_info_t& info = mHeapCache.editValueAt(i);
440 ALOGD_IF(VERBOSE,
441 "found binder=%p, heap=%p, size=%zu, fd=%d, count=%d",
442 binder.get(), info.heap.get(),
443 static_cast<BpMemoryHeap*>(info.heap.get())->mSize,
444 static_cast<BpMemoryHeap*>(info.heap.get())
445 ->mHeapId.load(memory_order_relaxed),
446 info.count);
447 ++info.count;
448 return info.heap;
449 } else {
450 heap_info_t info;
451 info.heap = interface_cast<IMemoryHeap>(binder);
452 info.count = 1;
453 //ALOGD("adding binder=%p, heap=%p, count=%d",
454 // binder.get(), info.heap.get(), info.count);
455 mHeapCache.add(binder, info);
456 return info.heap;
457 }
458 }
459
free_heap(const sp<IBinder> & binder)460 void HeapCache::free_heap(const sp<IBinder>& binder) {
461 free_heap( wp<IBinder>(binder) );
462 }
463
free_heap(const wp<IBinder> & binder)464 void HeapCache::free_heap(const wp<IBinder>& binder)
465 {
466 sp<IMemoryHeap> rel;
467 {
468 Mutex::Autolock _l(mHeapCacheLock);
469 ssize_t i = mHeapCache.indexOfKey(binder);
470 if (i>=0) {
471 heap_info_t& info(mHeapCache.editValueAt(i));
472 if (--info.count == 0) {
473 ALOGD_IF(VERBOSE,
474 "removing binder=%p, heap=%p, size=%zu, fd=%d, count=%d",
475 binder.unsafe_get(), info.heap.get(),
476 static_cast<BpMemoryHeap*>(info.heap.get())->mSize,
477 static_cast<BpMemoryHeap*>(info.heap.get())
478 ->mHeapId.load(memory_order_relaxed),
479 info.count);
480 rel = mHeapCache.valueAt(i).heap;
481 mHeapCache.removeItemsAt(i);
482 }
483 } else {
484 ALOGE("free_heap binder=%p not found!!!", binder.unsafe_get());
485 }
486 }
487 }
488
get_heap(const sp<IBinder> & binder)489 sp<IMemoryHeap> HeapCache::get_heap(const sp<IBinder>& binder)
490 {
491 sp<IMemoryHeap> realHeap;
492 Mutex::Autolock _l(mHeapCacheLock);
493 ssize_t i = mHeapCache.indexOfKey(binder);
494 if (i>=0) realHeap = mHeapCache.valueAt(i).heap;
495 else realHeap = interface_cast<IMemoryHeap>(binder);
496 return realHeap;
497 }
498
dump_heaps()499 void HeapCache::dump_heaps()
500 {
501 Mutex::Autolock _l(mHeapCacheLock);
502 int c = mHeapCache.size();
503 for (int i=0 ; i<c ; i++) {
504 const heap_info_t& info = mHeapCache.valueAt(i);
505 BpMemoryHeap const* h(static_cast<BpMemoryHeap const *>(info.heap.get()));
506 ALOGD("hey=%p, heap=%p, count=%d, (fd=%d, base=%p, size=%zu)",
507 mHeapCache.keyAt(i).unsafe_get(),
508 info.heap.get(), info.count,
509 h->mHeapId.load(memory_order_relaxed), h->mBase, h->mSize);
510 }
511 }
512
513
514 // ---------------------------------------------------------------------------
515 } // namespace android
516