1 /*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "IMemory"
18
19 #include <stdint.h>
20 #include <stdio.h>
21 #include <stdlib.h>
22 #include <fcntl.h>
23 #include <unistd.h>
24
25 #include <sys/types.h>
26 #include <sys/mman.h>
27
28 #include <binder/IMemory.h>
29 #include <cutils/log.h>
30 #include <utils/KeyedVector.h>
31 #include <utils/threads.h>
32 #include <utils/Atomic.h>
33 #include <binder/Parcel.h>
34 #include <utils/CallStack.h>
35
36 #define VERBOSE 0
37
38 namespace android {
39 // ---------------------------------------------------------------------------
40
41 class HeapCache : public IBinder::DeathRecipient
42 {
43 public:
44 HeapCache();
45 virtual ~HeapCache();
46
47 virtual void binderDied(const wp<IBinder>& who);
48
49 sp<IMemoryHeap> find_heap(const sp<IBinder>& binder);
50 void free_heap(const sp<IBinder>& binder);
51 sp<IMemoryHeap> get_heap(const sp<IBinder>& binder);
52 void dump_heaps();
53
54 private:
55 // For IMemory.cpp
56 struct heap_info_t {
57 sp<IMemoryHeap> heap;
58 int32_t count;
59 };
60
61 void free_heap(const wp<IBinder>& binder);
62
63 Mutex mHeapCacheLock;
64 KeyedVector< wp<IBinder>, heap_info_t > mHeapCache;
65 };
66
67 static sp<HeapCache> gHeapCache = new HeapCache();
68
69 /******************************************************************************/
70
71 enum {
72 HEAP_ID = IBinder::FIRST_CALL_TRANSACTION
73 };
74
75 class BpMemoryHeap : public BpInterface<IMemoryHeap>
76 {
77 public:
78 BpMemoryHeap(const sp<IBinder>& impl);
79 virtual ~BpMemoryHeap();
80
81 virtual int getHeapID() const;
82 virtual void* getBase() const;
83 virtual size_t getSize() const;
84 virtual uint32_t getFlags() const;
85 virtual uint32_t getOffset() const;
86
87 private:
88 friend class IMemory;
89 friend class HeapCache;
90
91 // for debugging in this module
find_heap(const sp<IBinder> & binder)92 static inline sp<IMemoryHeap> find_heap(const sp<IBinder>& binder) {
93 return gHeapCache->find_heap(binder);
94 }
free_heap(const sp<IBinder> & binder)95 static inline void free_heap(const sp<IBinder>& binder) {
96 gHeapCache->free_heap(binder);
97 }
get_heap(const sp<IBinder> & binder)98 static inline sp<IMemoryHeap> get_heap(const sp<IBinder>& binder) {
99 return gHeapCache->get_heap(binder);
100 }
dump_heaps()101 static inline void dump_heaps() {
102 gHeapCache->dump_heaps();
103 }
104
105 void assertMapped() const;
106 void assertReallyMapped() const;
107
108 mutable volatile int32_t mHeapId;
109 mutable void* mBase;
110 mutable size_t mSize;
111 mutable uint32_t mFlags;
112 mutable uint32_t mOffset;
113 mutable bool mRealHeap;
114 mutable Mutex mLock;
115 };
116
117 // ----------------------------------------------------------------------------
118
119 enum {
120 GET_MEMORY = IBinder::FIRST_CALL_TRANSACTION
121 };
122
123 class BpMemory : public BpInterface<IMemory>
124 {
125 public:
126 BpMemory(const sp<IBinder>& impl);
127 virtual ~BpMemory();
128 virtual sp<IMemoryHeap> getMemory(ssize_t* offset=0, size_t* size=0) const;
129
130 private:
131 mutable sp<IMemoryHeap> mHeap;
132 mutable ssize_t mOffset;
133 mutable size_t mSize;
134 };
135
136 /******************************************************************************/
137
fastPointer(const sp<IBinder> & binder,ssize_t offset) const138 void* IMemory::fastPointer(const sp<IBinder>& binder, ssize_t offset) const
139 {
140 sp<IMemoryHeap> realHeap = BpMemoryHeap::get_heap(binder);
141 void* const base = realHeap->base();
142 if (base == MAP_FAILED)
143 return 0;
144 return static_cast<char*>(base) + offset;
145 }
146
pointer() const147 void* IMemory::pointer() const {
148 ssize_t offset;
149 sp<IMemoryHeap> heap = getMemory(&offset);
150 void* const base = heap!=0 ? heap->base() : MAP_FAILED;
151 if (base == MAP_FAILED)
152 return 0;
153 return static_cast<char*>(base) + offset;
154 }
155
size() const156 size_t IMemory::size() const {
157 size_t size;
158 getMemory(NULL, &size);
159 return size;
160 }
161
offset() const162 ssize_t IMemory::offset() const {
163 ssize_t offset;
164 getMemory(&offset);
165 return offset;
166 }
167
168 /******************************************************************************/
169
BpMemory(const sp<IBinder> & impl)170 BpMemory::BpMemory(const sp<IBinder>& impl)
171 : BpInterface<IMemory>(impl), mOffset(0), mSize(0)
172 {
173 }
174
~BpMemory()175 BpMemory::~BpMemory()
176 {
177 }
178
getMemory(ssize_t * offset,size_t * size) const179 sp<IMemoryHeap> BpMemory::getMemory(ssize_t* offset, size_t* size) const
180 {
181 if (mHeap == 0) {
182 Parcel data, reply;
183 data.writeInterfaceToken(IMemory::getInterfaceDescriptor());
184 if (remote()->transact(GET_MEMORY, data, &reply) == NO_ERROR) {
185 sp<IBinder> heap = reply.readStrongBinder();
186 ssize_t o = reply.readInt32();
187 size_t s = reply.readInt32();
188 if (heap != 0) {
189 mHeap = interface_cast<IMemoryHeap>(heap);
190 if (mHeap != 0) {
191 size_t heapSize = mHeap->getSize();
192 if (s <= heapSize
193 && o >= 0
194 && (static_cast<size_t>(o) <= heapSize - s)) {
195 mOffset = o;
196 mSize = s;
197 } else {
198 // Hm.
199 android_errorWriteWithInfoLog(0x534e4554,
200 "26877992", -1, NULL, 0);
201 mOffset = 0;
202 mSize = 0;
203 }
204 }
205 }
206 }
207 }
208 if (offset) *offset = mOffset;
209 if (size) *size = mSize;
210 return (mSize > 0) ? mHeap : 0;
211 }
212
213 // ---------------------------------------------------------------------------
214
215 IMPLEMENT_META_INTERFACE(Memory, "android.utils.IMemory");
216
BnMemory()217 BnMemory::BnMemory() {
218 }
219
~BnMemory()220 BnMemory::~BnMemory() {
221 }
222
onTransact(uint32_t code,const Parcel & data,Parcel * reply,uint32_t flags)223 status_t BnMemory::onTransact(
224 uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
225 {
226 switch(code) {
227 case GET_MEMORY: {
228 CHECK_INTERFACE(IMemory, data, reply);
229 ssize_t offset;
230 size_t size;
231 reply->writeStrongBinder( IInterface::asBinder(getMemory(&offset, &size)) );
232 reply->writeInt32(offset);
233 reply->writeInt32(size);
234 return NO_ERROR;
235 } break;
236 default:
237 return BBinder::onTransact(code, data, reply, flags);
238 }
239 }
240
241
242 /******************************************************************************/
243
BpMemoryHeap(const sp<IBinder> & impl)244 BpMemoryHeap::BpMemoryHeap(const sp<IBinder>& impl)
245 : BpInterface<IMemoryHeap>(impl),
246 mHeapId(-1), mBase(MAP_FAILED), mSize(0), mFlags(0), mOffset(0), mRealHeap(false)
247 {
248 }
249
~BpMemoryHeap()250 BpMemoryHeap::~BpMemoryHeap() {
251 if (mHeapId != -1) {
252 close(mHeapId);
253 if (mRealHeap) {
254 // by construction we're the last one
255 if (mBase != MAP_FAILED) {
256 sp<IBinder> binder = IInterface::asBinder(this);
257
258 if (VERBOSE) {
259 ALOGD("UNMAPPING binder=%p, heap=%p, size=%zu, fd=%d",
260 binder.get(), this, mSize, mHeapId);
261 CallStack stack(LOG_TAG);
262 }
263
264 munmap(mBase, mSize);
265 }
266 } else {
267 // remove from list only if it was mapped before
268 sp<IBinder> binder = IInterface::asBinder(this);
269 free_heap(binder);
270 }
271 }
272 }
273
assertMapped() const274 void BpMemoryHeap::assertMapped() const
275 {
276 if (mHeapId == -1) {
277 sp<IBinder> binder(IInterface::asBinder(const_cast<BpMemoryHeap*>(this)));
278 sp<BpMemoryHeap> heap(static_cast<BpMemoryHeap*>(find_heap(binder).get()));
279 heap->assertReallyMapped();
280 if (heap->mBase != MAP_FAILED) {
281 Mutex::Autolock _l(mLock);
282 if (mHeapId == -1) {
283 mBase = heap->mBase;
284 mSize = heap->mSize;
285 mOffset = heap->mOffset;
286 android_atomic_write( dup( heap->mHeapId ), &mHeapId );
287 }
288 } else {
289 // something went wrong
290 free_heap(binder);
291 }
292 }
293 }
294
assertReallyMapped() const295 void BpMemoryHeap::assertReallyMapped() const
296 {
297 if (mHeapId == -1) {
298
299 // remote call without mLock held, worse case scenario, we end up
300 // calling transact() from multiple threads, but that's not a problem,
301 // only mmap below must be in the critical section.
302
303 Parcel data, reply;
304 data.writeInterfaceToken(IMemoryHeap::getInterfaceDescriptor());
305 status_t err = remote()->transact(HEAP_ID, data, &reply);
306 int parcel_fd = reply.readFileDescriptor();
307 ssize_t size = reply.readInt32();
308 uint32_t flags = reply.readInt32();
309 uint32_t offset = reply.readInt32();
310
311 ALOGE_IF(err, "binder=%p transaction failed fd=%d, size=%zd, err=%d (%s)",
312 IInterface::asBinder(this).get(),
313 parcel_fd, size, err, strerror(-err));
314
315 Mutex::Autolock _l(mLock);
316 if (mHeapId == -1) {
317 int fd = dup( parcel_fd );
318 ALOGE_IF(fd==-1, "cannot dup fd=%d, size=%zd, err=%d (%s)",
319 parcel_fd, size, err, strerror(errno));
320
321 int access = PROT_READ;
322 if (!(flags & READ_ONLY)) {
323 access |= PROT_WRITE;
324 }
325
326 mRealHeap = true;
327 mBase = mmap(0, size, access, MAP_SHARED, fd, offset);
328 if (mBase == MAP_FAILED) {
329 ALOGE("cannot map BpMemoryHeap (binder=%p), size=%zd, fd=%d (%s)",
330 IInterface::asBinder(this).get(), size, fd, strerror(errno));
331 close(fd);
332 } else {
333 mSize = size;
334 mFlags = flags;
335 mOffset = offset;
336 android_atomic_write(fd, &mHeapId);
337 }
338 }
339 }
340 }
341
getHeapID() const342 int BpMemoryHeap::getHeapID() const {
343 assertMapped();
344 return mHeapId;
345 }
346
getBase() const347 void* BpMemoryHeap::getBase() const {
348 assertMapped();
349 return mBase;
350 }
351
getSize() const352 size_t BpMemoryHeap::getSize() const {
353 assertMapped();
354 return mSize;
355 }
356
getFlags() const357 uint32_t BpMemoryHeap::getFlags() const {
358 assertMapped();
359 return mFlags;
360 }
361
getOffset() const362 uint32_t BpMemoryHeap::getOffset() const {
363 assertMapped();
364 return mOffset;
365 }
366
367 // ---------------------------------------------------------------------------
368
369 IMPLEMENT_META_INTERFACE(MemoryHeap, "android.utils.IMemoryHeap");
370
BnMemoryHeap()371 BnMemoryHeap::BnMemoryHeap() {
372 }
373
~BnMemoryHeap()374 BnMemoryHeap::~BnMemoryHeap() {
375 }
376
onTransact(uint32_t code,const Parcel & data,Parcel * reply,uint32_t flags)377 status_t BnMemoryHeap::onTransact(
378 uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
379 {
380 switch(code) {
381 case HEAP_ID: {
382 CHECK_INTERFACE(IMemoryHeap, data, reply);
383 reply->writeFileDescriptor(getHeapID());
384 reply->writeInt32(getSize());
385 reply->writeInt32(getFlags());
386 reply->writeInt32(getOffset());
387 return NO_ERROR;
388 } break;
389 default:
390 return BBinder::onTransact(code, data, reply, flags);
391 }
392 }
393
394 /*****************************************************************************/
395
HeapCache()396 HeapCache::HeapCache()
397 : DeathRecipient()
398 {
399 }
400
~HeapCache()401 HeapCache::~HeapCache()
402 {
403 }
404
binderDied(const wp<IBinder> & binder)405 void HeapCache::binderDied(const wp<IBinder>& binder)
406 {
407 //ALOGD("binderDied binder=%p", binder.unsafe_get());
408 free_heap(binder);
409 }
410
find_heap(const sp<IBinder> & binder)411 sp<IMemoryHeap> HeapCache::find_heap(const sp<IBinder>& binder)
412 {
413 Mutex::Autolock _l(mHeapCacheLock);
414 ssize_t i = mHeapCache.indexOfKey(binder);
415 if (i>=0) {
416 heap_info_t& info = mHeapCache.editValueAt(i);
417 ALOGD_IF(VERBOSE,
418 "found binder=%p, heap=%p, size=%zu, fd=%d, count=%d",
419 binder.get(), info.heap.get(),
420 static_cast<BpMemoryHeap*>(info.heap.get())->mSize,
421 static_cast<BpMemoryHeap*>(info.heap.get())->mHeapId,
422 info.count);
423 android_atomic_inc(&info.count);
424 return info.heap;
425 } else {
426 heap_info_t info;
427 info.heap = interface_cast<IMemoryHeap>(binder);
428 info.count = 1;
429 //ALOGD("adding binder=%p, heap=%p, count=%d",
430 // binder.get(), info.heap.get(), info.count);
431 mHeapCache.add(binder, info);
432 return info.heap;
433 }
434 }
435
free_heap(const sp<IBinder> & binder)436 void HeapCache::free_heap(const sp<IBinder>& binder) {
437 free_heap( wp<IBinder>(binder) );
438 }
439
free_heap(const wp<IBinder> & binder)440 void HeapCache::free_heap(const wp<IBinder>& binder)
441 {
442 sp<IMemoryHeap> rel;
443 {
444 Mutex::Autolock _l(mHeapCacheLock);
445 ssize_t i = mHeapCache.indexOfKey(binder);
446 if (i>=0) {
447 heap_info_t& info(mHeapCache.editValueAt(i));
448 int32_t c = android_atomic_dec(&info.count);
449 if (c == 1) {
450 ALOGD_IF(VERBOSE,
451 "removing binder=%p, heap=%p, size=%zu, fd=%d, count=%d",
452 binder.unsafe_get(), info.heap.get(),
453 static_cast<BpMemoryHeap*>(info.heap.get())->mSize,
454 static_cast<BpMemoryHeap*>(info.heap.get())->mHeapId,
455 info.count);
456 rel = mHeapCache.valueAt(i).heap;
457 mHeapCache.removeItemsAt(i);
458 }
459 } else {
460 ALOGE("free_heap binder=%p not found!!!", binder.unsafe_get());
461 }
462 }
463 }
464
get_heap(const sp<IBinder> & binder)465 sp<IMemoryHeap> HeapCache::get_heap(const sp<IBinder>& binder)
466 {
467 sp<IMemoryHeap> realHeap;
468 Mutex::Autolock _l(mHeapCacheLock);
469 ssize_t i = mHeapCache.indexOfKey(binder);
470 if (i>=0) realHeap = mHeapCache.valueAt(i).heap;
471 else realHeap = interface_cast<IMemoryHeap>(binder);
472 return realHeap;
473 }
474
dump_heaps()475 void HeapCache::dump_heaps()
476 {
477 Mutex::Autolock _l(mHeapCacheLock);
478 int c = mHeapCache.size();
479 for (int i=0 ; i<c ; i++) {
480 const heap_info_t& info = mHeapCache.valueAt(i);
481 BpMemoryHeap const* h(static_cast<BpMemoryHeap const *>(info.heap.get()));
482 ALOGD("hey=%p, heap=%p, count=%d, (fd=%d, base=%p, size=%zu)",
483 mHeapCache.keyAt(i).unsafe_get(),
484 info.heap.get(), info.count,
485 h->mHeapId, h->mBase, h->mSize);
486 }
487 }
488
489
490 // ---------------------------------------------------------------------------
491 }; // namespace android
492