1 /*
2 * Copyright (C) 2005 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "Parcel"
18 //#define LOG_NDEBUG 0
19
20 #include <binder/Parcel.h>
21
22 #include <binder/IPCThreadState.h>
23 #include <binder/Binder.h>
24 #include <binder/BpBinder.h>
25 #include <binder/ProcessState.h>
26 #include <binder/TextOutput.h>
27
28 #include <errno.h>
29 #include <utils/Debug.h>
30 #include <utils/Log.h>
31 #include <utils/String8.h>
32 #include <utils/String16.h>
33 #include <utils/misc.h>
34 #include <utils/Flattenable.h>
35 #include <cutils/ashmem.h>
36
37 #include <private/binder/binder_module.h>
38 #include <private/binder/Static.h>
39
40 #include <inttypes.h>
41 #include <stdio.h>
42 #include <stdlib.h>
43 #include <stdint.h>
44 #include <sys/mman.h>
45
46 #ifndef INT32_MAX
47 #define INT32_MAX ((int32_t)(2147483647))
48 #endif
49
50 #define LOG_REFS(...)
51 //#define LOG_REFS(...) ALOG(LOG_DEBUG, "Parcel", __VA_ARGS__)
52 #define LOG_ALLOC(...)
53 //#define LOG_ALLOC(...) ALOG(LOG_DEBUG, "Parcel", __VA_ARGS__)
54
55 // ---------------------------------------------------------------------------
56
57 // This macro should never be used at runtime, as a too large value
58 // of s could cause an integer overflow. Instead, you should always
59 // use the wrapper function pad_size()
60 #define PAD_SIZE_UNSAFE(s) (((s)+3)&~3)
61
pad_size(size_t s)62 static size_t pad_size(size_t s) {
63 if (s > (SIZE_T_MAX - 3)) {
64 abort();
65 }
66 return PAD_SIZE_UNSAFE(s);
67 }
68
69 // Note: must be kept in sync with android/os/StrictMode.java's PENALTY_GATHER
70 #define STRICT_MODE_PENALTY_GATHER (0x40 << 16)
71
72 // Note: must be kept in sync with android/os/Parcel.java's EX_HAS_REPLY_HEADER
73 #define EX_HAS_REPLY_HEADER -128
74
75 // XXX This can be made public if we want to provide
76 // support for typed data.
77 struct small_flat_data
78 {
79 uint32_t type;
80 uint32_t data;
81 };
82
83 namespace android {
84
85 static pthread_mutex_t gParcelGlobalAllocSizeLock = PTHREAD_MUTEX_INITIALIZER;
86 static size_t gParcelGlobalAllocSize = 0;
87 static size_t gParcelGlobalAllocCount = 0;
88
89 // Maximum size of a blob to transfer in-place.
90 static const size_t BLOB_INPLACE_LIMIT = 16 * 1024;
91
92 enum {
93 BLOB_INPLACE = 0,
94 BLOB_ASHMEM_IMMUTABLE = 1,
95 BLOB_ASHMEM_MUTABLE = 2,
96 };
97
acquire_object(const sp<ProcessState> & proc,const flat_binder_object & obj,const void * who)98 void acquire_object(const sp<ProcessState>& proc,
99 const flat_binder_object& obj, const void* who)
100 {
101 switch (obj.type) {
102 case BINDER_TYPE_BINDER:
103 if (obj.binder) {
104 LOG_REFS("Parcel %p acquiring reference on local %p", who, obj.cookie);
105 reinterpret_cast<IBinder*>(obj.cookie)->incStrong(who);
106 }
107 return;
108 case BINDER_TYPE_WEAK_BINDER:
109 if (obj.binder)
110 reinterpret_cast<RefBase::weakref_type*>(obj.binder)->incWeak(who);
111 return;
112 case BINDER_TYPE_HANDLE: {
113 const sp<IBinder> b = proc->getStrongProxyForHandle(obj.handle);
114 if (b != NULL) {
115 LOG_REFS("Parcel %p acquiring reference on remote %p", who, b.get());
116 b->incStrong(who);
117 }
118 return;
119 }
120 case BINDER_TYPE_WEAK_HANDLE: {
121 const wp<IBinder> b = proc->getWeakProxyForHandle(obj.handle);
122 if (b != NULL) b.get_refs()->incWeak(who);
123 return;
124 }
125 case BINDER_TYPE_FD: {
126 // intentionally blank -- nothing to do to acquire this, but we do
127 // recognize it as a legitimate object type.
128 return;
129 }
130 }
131
132 ALOGD("Invalid object type 0x%08x", obj.type);
133 }
134
release_object(const sp<ProcessState> & proc,const flat_binder_object & obj,const void * who)135 void release_object(const sp<ProcessState>& proc,
136 const flat_binder_object& obj, const void* who)
137 {
138 switch (obj.type) {
139 case BINDER_TYPE_BINDER:
140 if (obj.binder) {
141 LOG_REFS("Parcel %p releasing reference on local %p", who, obj.cookie);
142 reinterpret_cast<IBinder*>(obj.cookie)->decStrong(who);
143 }
144 return;
145 case BINDER_TYPE_WEAK_BINDER:
146 if (obj.binder)
147 reinterpret_cast<RefBase::weakref_type*>(obj.binder)->decWeak(who);
148 return;
149 case BINDER_TYPE_HANDLE: {
150 const sp<IBinder> b = proc->getStrongProxyForHandle(obj.handle);
151 if (b != NULL) {
152 LOG_REFS("Parcel %p releasing reference on remote %p", who, b.get());
153 b->decStrong(who);
154 }
155 return;
156 }
157 case BINDER_TYPE_WEAK_HANDLE: {
158 const wp<IBinder> b = proc->getWeakProxyForHandle(obj.handle);
159 if (b != NULL) b.get_refs()->decWeak(who);
160 return;
161 }
162 case BINDER_TYPE_FD: {
163 if (obj.cookie != 0) close(obj.handle);
164 return;
165 }
166 }
167
168 ALOGE("Invalid object type 0x%08x", obj.type);
169 }
170
finish_flatten_binder(const sp<IBinder> &,const flat_binder_object & flat,Parcel * out)171 inline static status_t finish_flatten_binder(
172 const sp<IBinder>& /*binder*/, const flat_binder_object& flat, Parcel* out)
173 {
174 return out->writeObject(flat, false);
175 }
176
flatten_binder(const sp<ProcessState> &,const sp<IBinder> & binder,Parcel * out)177 status_t flatten_binder(const sp<ProcessState>& /*proc*/,
178 const sp<IBinder>& binder, Parcel* out)
179 {
180 flat_binder_object obj;
181
182 obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
183 if (binder != NULL) {
184 IBinder *local = binder->localBinder();
185 if (!local) {
186 BpBinder *proxy = binder->remoteBinder();
187 if (proxy == NULL) {
188 ALOGE("null proxy");
189 }
190 const int32_t handle = proxy ? proxy->handle() : 0;
191 obj.type = BINDER_TYPE_HANDLE;
192 obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */
193 obj.handle = handle;
194 obj.cookie = 0;
195 } else {
196 obj.type = BINDER_TYPE_BINDER;
197 obj.binder = reinterpret_cast<uintptr_t>(local->getWeakRefs());
198 obj.cookie = reinterpret_cast<uintptr_t>(local);
199 }
200 } else {
201 obj.type = BINDER_TYPE_BINDER;
202 obj.binder = 0;
203 obj.cookie = 0;
204 }
205
206 return finish_flatten_binder(binder, obj, out);
207 }
208
flatten_binder(const sp<ProcessState> &,const wp<IBinder> & binder,Parcel * out)209 status_t flatten_binder(const sp<ProcessState>& /*proc*/,
210 const wp<IBinder>& binder, Parcel* out)
211 {
212 flat_binder_object obj;
213
214 obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
215 if (binder != NULL) {
216 sp<IBinder> real = binder.promote();
217 if (real != NULL) {
218 IBinder *local = real->localBinder();
219 if (!local) {
220 BpBinder *proxy = real->remoteBinder();
221 if (proxy == NULL) {
222 ALOGE("null proxy");
223 }
224 const int32_t handle = proxy ? proxy->handle() : 0;
225 obj.type = BINDER_TYPE_WEAK_HANDLE;
226 obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */
227 obj.handle = handle;
228 obj.cookie = 0;
229 } else {
230 obj.type = BINDER_TYPE_WEAK_BINDER;
231 obj.binder = reinterpret_cast<uintptr_t>(binder.get_refs());
232 obj.cookie = reinterpret_cast<uintptr_t>(binder.unsafe_get());
233 }
234 return finish_flatten_binder(real, obj, out);
235 }
236
237 // XXX How to deal? In order to flatten the given binder,
238 // we need to probe it for information, which requires a primary
239 // reference... but we don't have one.
240 //
241 // The OpenBinder implementation uses a dynamic_cast<> here,
242 // but we can't do that with the different reference counting
243 // implementation we are using.
244 ALOGE("Unable to unflatten Binder weak reference!");
245 obj.type = BINDER_TYPE_BINDER;
246 obj.binder = 0;
247 obj.cookie = 0;
248 return finish_flatten_binder(NULL, obj, out);
249
250 } else {
251 obj.type = BINDER_TYPE_BINDER;
252 obj.binder = 0;
253 obj.cookie = 0;
254 return finish_flatten_binder(NULL, obj, out);
255 }
256 }
257
finish_unflatten_binder(BpBinder *,const flat_binder_object &,const Parcel &)258 inline static status_t finish_unflatten_binder(
259 BpBinder* /*proxy*/, const flat_binder_object& /*flat*/,
260 const Parcel& /*in*/)
261 {
262 return NO_ERROR;
263 }
264
unflatten_binder(const sp<ProcessState> & proc,const Parcel & in,sp<IBinder> * out)265 status_t unflatten_binder(const sp<ProcessState>& proc,
266 const Parcel& in, sp<IBinder>* out)
267 {
268 const flat_binder_object* flat = in.readObject(false);
269
270 if (flat) {
271 switch (flat->type) {
272 case BINDER_TYPE_BINDER:
273 *out = reinterpret_cast<IBinder*>(flat->cookie);
274 return finish_unflatten_binder(NULL, *flat, in);
275 case BINDER_TYPE_HANDLE:
276 *out = proc->getStrongProxyForHandle(flat->handle);
277 return finish_unflatten_binder(
278 static_cast<BpBinder*>(out->get()), *flat, in);
279 }
280 }
281 return BAD_TYPE;
282 }
283
unflatten_binder(const sp<ProcessState> & proc,const Parcel & in,wp<IBinder> * out)284 status_t unflatten_binder(const sp<ProcessState>& proc,
285 const Parcel& in, wp<IBinder>* out)
286 {
287 const flat_binder_object* flat = in.readObject(false);
288
289 if (flat) {
290 switch (flat->type) {
291 case BINDER_TYPE_BINDER:
292 *out = reinterpret_cast<IBinder*>(flat->cookie);
293 return finish_unflatten_binder(NULL, *flat, in);
294 case BINDER_TYPE_WEAK_BINDER:
295 if (flat->binder != 0) {
296 out->set_object_and_refs(
297 reinterpret_cast<IBinder*>(flat->cookie),
298 reinterpret_cast<RefBase::weakref_type*>(flat->binder));
299 } else {
300 *out = NULL;
301 }
302 return finish_unflatten_binder(NULL, *flat, in);
303 case BINDER_TYPE_HANDLE:
304 case BINDER_TYPE_WEAK_HANDLE:
305 *out = proc->getWeakProxyForHandle(flat->handle);
306 return finish_unflatten_binder(
307 static_cast<BpBinder*>(out->unsafe_get()), *flat, in);
308 }
309 }
310 return BAD_TYPE;
311 }
312
313 // ---------------------------------------------------------------------------
314
Parcel()315 Parcel::Parcel()
316 {
317 LOG_ALLOC("Parcel %p: constructing", this);
318 initState();
319 }
320
~Parcel()321 Parcel::~Parcel()
322 {
323 freeDataNoInit();
324 LOG_ALLOC("Parcel %p: destroyed", this);
325 }
326
getGlobalAllocSize()327 size_t Parcel::getGlobalAllocSize() {
328 pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
329 size_t size = gParcelGlobalAllocSize;
330 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
331 return size;
332 }
333
getGlobalAllocCount()334 size_t Parcel::getGlobalAllocCount() {
335 pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
336 size_t count = gParcelGlobalAllocCount;
337 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
338 return count;
339 }
340
data() const341 const uint8_t* Parcel::data() const
342 {
343 return mData;
344 }
345
dataSize() const346 size_t Parcel::dataSize() const
347 {
348 return (mDataSize > mDataPos ? mDataSize : mDataPos);
349 }
350
dataAvail() const351 size_t Parcel::dataAvail() const
352 {
353 // TODO: decide what to do about the possibility that this can
354 // report an available-data size that exceeds a Java int's max
355 // positive value, causing havoc. Fortunately this will only
356 // happen if someone constructs a Parcel containing more than two
357 // gigabytes of data, which on typical phone hardware is simply
358 // not possible.
359 return dataSize() - dataPosition();
360 }
361
dataPosition() const362 size_t Parcel::dataPosition() const
363 {
364 return mDataPos;
365 }
366
dataCapacity() const367 size_t Parcel::dataCapacity() const
368 {
369 return mDataCapacity;
370 }
371
setDataSize(size_t size)372 status_t Parcel::setDataSize(size_t size)
373 {
374 if (size > INT32_MAX) {
375 // don't accept size_t values which may have come from an
376 // inadvertent conversion from a negative int.
377 return BAD_VALUE;
378 }
379
380 status_t err;
381 err = continueWrite(size);
382 if (err == NO_ERROR) {
383 mDataSize = size;
384 ALOGV("setDataSize Setting data size of %p to %zu", this, mDataSize);
385 }
386 return err;
387 }
388
setDataPosition(size_t pos) const389 void Parcel::setDataPosition(size_t pos) const
390 {
391 if (pos > INT32_MAX) {
392 // don't accept size_t values which may have come from an
393 // inadvertent conversion from a negative int.
394 abort();
395 }
396
397 mDataPos = pos;
398 mNextObjectHint = 0;
399 }
400
setDataCapacity(size_t size)401 status_t Parcel::setDataCapacity(size_t size)
402 {
403 if (size > INT32_MAX) {
404 // don't accept size_t values which may have come from an
405 // inadvertent conversion from a negative int.
406 return BAD_VALUE;
407 }
408
409 if (size > mDataCapacity) return continueWrite(size);
410 return NO_ERROR;
411 }
412
setData(const uint8_t * buffer,size_t len)413 status_t Parcel::setData(const uint8_t* buffer, size_t len)
414 {
415 if (len > INT32_MAX) {
416 // don't accept size_t values which may have come from an
417 // inadvertent conversion from a negative int.
418 return BAD_VALUE;
419 }
420
421 status_t err = restartWrite(len);
422 if (err == NO_ERROR) {
423 memcpy(const_cast<uint8_t*>(data()), buffer, len);
424 mDataSize = len;
425 mFdsKnown = false;
426 }
427 return err;
428 }
429
appendFrom(const Parcel * parcel,size_t offset,size_t len)430 status_t Parcel::appendFrom(const Parcel *parcel, size_t offset, size_t len)
431 {
432 const sp<ProcessState> proc(ProcessState::self());
433 status_t err;
434 const uint8_t *data = parcel->mData;
435 const binder_size_t *objects = parcel->mObjects;
436 size_t size = parcel->mObjectsSize;
437 int startPos = mDataPos;
438 int firstIndex = -1, lastIndex = -2;
439
440 if (len == 0) {
441 return NO_ERROR;
442 }
443
444 if (len > INT32_MAX) {
445 // don't accept size_t values which may have come from an
446 // inadvertent conversion from a negative int.
447 return BAD_VALUE;
448 }
449
450 // range checks against the source parcel size
451 if ((offset > parcel->mDataSize)
452 || (len > parcel->mDataSize)
453 || (offset + len > parcel->mDataSize)) {
454 return BAD_VALUE;
455 }
456
457 // Count objects in range
458 for (int i = 0; i < (int) size; i++) {
459 size_t off = objects[i];
460 if ((off >= offset) && (off + sizeof(flat_binder_object) <= offset + len)) {
461 if (firstIndex == -1) {
462 firstIndex = i;
463 }
464 lastIndex = i;
465 }
466 }
467 int numObjects = lastIndex - firstIndex + 1;
468
469 if ((mDataSize+len) > mDataCapacity) {
470 // grow data
471 err = growData(len);
472 if (err != NO_ERROR) {
473 return err;
474 }
475 }
476
477 // append data
478 memcpy(mData + mDataPos, data + offset, len);
479 mDataPos += len;
480 mDataSize += len;
481
482 err = NO_ERROR;
483
484 if (numObjects > 0) {
485 // grow objects
486 if (mObjectsCapacity < mObjectsSize + numObjects) {
487 size_t newSize = ((mObjectsSize + numObjects)*3)/2;
488 if (newSize < mObjectsSize) return NO_MEMORY; // overflow
489 binder_size_t *objects =
490 (binder_size_t*)realloc(mObjects, newSize*sizeof(binder_size_t));
491 if (objects == (binder_size_t*)0) {
492 return NO_MEMORY;
493 }
494 mObjects = objects;
495 mObjectsCapacity = newSize;
496 }
497
498 // append and acquire objects
499 int idx = mObjectsSize;
500 for (int i = firstIndex; i <= lastIndex; i++) {
501 size_t off = objects[i] - offset + startPos;
502 mObjects[idx++] = off;
503 mObjectsSize++;
504
505 flat_binder_object* flat
506 = reinterpret_cast<flat_binder_object*>(mData + off);
507 acquire_object(proc, *flat, this);
508
509 if (flat->type == BINDER_TYPE_FD) {
510 // If this is a file descriptor, we need to dup it so the
511 // new Parcel now owns its own fd, and can declare that we
512 // officially know we have fds.
513 flat->handle = dup(flat->handle);
514 flat->cookie = 1;
515 mHasFds = mFdsKnown = true;
516 if (!mAllowFds) {
517 err = FDS_NOT_ALLOWED;
518 }
519 }
520 }
521 }
522
523 return err;
524 }
525
allowFds() const526 bool Parcel::allowFds() const
527 {
528 return mAllowFds;
529 }
530
pushAllowFds(bool allowFds)531 bool Parcel::pushAllowFds(bool allowFds)
532 {
533 const bool origValue = mAllowFds;
534 if (!allowFds) {
535 mAllowFds = false;
536 }
537 return origValue;
538 }
539
restoreAllowFds(bool lastValue)540 void Parcel::restoreAllowFds(bool lastValue)
541 {
542 mAllowFds = lastValue;
543 }
544
hasFileDescriptors() const545 bool Parcel::hasFileDescriptors() const
546 {
547 if (!mFdsKnown) {
548 scanForFds();
549 }
550 return mHasFds;
551 }
552
553 // Write RPC headers. (previously just the interface token)
writeInterfaceToken(const String16 & interface)554 status_t Parcel::writeInterfaceToken(const String16& interface)
555 {
556 writeInt32(IPCThreadState::self()->getStrictModePolicy() |
557 STRICT_MODE_PENALTY_GATHER);
558 // currently the interface identification token is just its name as a string
559 return writeString16(interface);
560 }
561
checkInterface(IBinder * binder) const562 bool Parcel::checkInterface(IBinder* binder) const
563 {
564 return enforceInterface(binder->getInterfaceDescriptor());
565 }
566
enforceInterface(const String16 & interface,IPCThreadState * threadState) const567 bool Parcel::enforceInterface(const String16& interface,
568 IPCThreadState* threadState) const
569 {
570 int32_t strictPolicy = readInt32();
571 if (threadState == NULL) {
572 threadState = IPCThreadState::self();
573 }
574 if ((threadState->getLastTransactionBinderFlags() &
575 IBinder::FLAG_ONEWAY) != 0) {
576 // For one-way calls, the callee is running entirely
577 // disconnected from the caller, so disable StrictMode entirely.
578 // Not only does disk/network usage not impact the caller, but
579 // there's no way to commuicate back any violations anyway.
580 threadState->setStrictModePolicy(0);
581 } else {
582 threadState->setStrictModePolicy(strictPolicy);
583 }
584 const String16 str(readString16());
585 if (str == interface) {
586 return true;
587 } else {
588 ALOGW("**** enforceInterface() expected '%s' but read '%s'",
589 String8(interface).string(), String8(str).string());
590 return false;
591 }
592 }
593
objects() const594 const binder_size_t* Parcel::objects() const
595 {
596 return mObjects;
597 }
598
objectsCount() const599 size_t Parcel::objectsCount() const
600 {
601 return mObjectsSize;
602 }
603
errorCheck() const604 status_t Parcel::errorCheck() const
605 {
606 return mError;
607 }
608
setError(status_t err)609 void Parcel::setError(status_t err)
610 {
611 mError = err;
612 }
613
finishWrite(size_t len)614 status_t Parcel::finishWrite(size_t len)
615 {
616 if (len > INT32_MAX) {
617 // don't accept size_t values which may have come from an
618 // inadvertent conversion from a negative int.
619 return BAD_VALUE;
620 }
621
622 //printf("Finish write of %d\n", len);
623 mDataPos += len;
624 ALOGV("finishWrite Setting data pos of %p to %zu", this, mDataPos);
625 if (mDataPos > mDataSize) {
626 mDataSize = mDataPos;
627 ALOGV("finishWrite Setting data size of %p to %zu", this, mDataSize);
628 }
629 //printf("New pos=%d, size=%d\n", mDataPos, mDataSize);
630 return NO_ERROR;
631 }
632
writeUnpadded(const void * data,size_t len)633 status_t Parcel::writeUnpadded(const void* data, size_t len)
634 {
635 if (len > INT32_MAX) {
636 // don't accept size_t values which may have come from an
637 // inadvertent conversion from a negative int.
638 return BAD_VALUE;
639 }
640
641 size_t end = mDataPos + len;
642 if (end < mDataPos) {
643 // integer overflow
644 return BAD_VALUE;
645 }
646
647 if (end <= mDataCapacity) {
648 restart_write:
649 memcpy(mData+mDataPos, data, len);
650 return finishWrite(len);
651 }
652
653 status_t err = growData(len);
654 if (err == NO_ERROR) goto restart_write;
655 return err;
656 }
657
write(const void * data,size_t len)658 status_t Parcel::write(const void* data, size_t len)
659 {
660 if (len > INT32_MAX) {
661 // don't accept size_t values which may have come from an
662 // inadvertent conversion from a negative int.
663 return BAD_VALUE;
664 }
665
666 void* const d = writeInplace(len);
667 if (d) {
668 memcpy(d, data, len);
669 return NO_ERROR;
670 }
671 return mError;
672 }
673
writeInplace(size_t len)674 void* Parcel::writeInplace(size_t len)
675 {
676 if (len > INT32_MAX) {
677 // don't accept size_t values which may have come from an
678 // inadvertent conversion from a negative int.
679 return NULL;
680 }
681
682 const size_t padded = pad_size(len);
683
684 // sanity check for integer overflow
685 if (mDataPos+padded < mDataPos) {
686 return NULL;
687 }
688
689 if ((mDataPos+padded) <= mDataCapacity) {
690 restart_write:
691 //printf("Writing %ld bytes, padded to %ld\n", len, padded);
692 uint8_t* const data = mData+mDataPos;
693
694 // Need to pad at end?
695 if (padded != len) {
696 #if BYTE_ORDER == BIG_ENDIAN
697 static const uint32_t mask[4] = {
698 0x00000000, 0xffffff00, 0xffff0000, 0xff000000
699 };
700 #endif
701 #if BYTE_ORDER == LITTLE_ENDIAN
702 static const uint32_t mask[4] = {
703 0x00000000, 0x00ffffff, 0x0000ffff, 0x000000ff
704 };
705 #endif
706 //printf("Applying pad mask: %p to %p\n", (void*)mask[padded-len],
707 // *reinterpret_cast<void**>(data+padded-4));
708 *reinterpret_cast<uint32_t*>(data+padded-4) &= mask[padded-len];
709 }
710
711 finishWrite(padded);
712 return data;
713 }
714
715 status_t err = growData(padded);
716 if (err == NO_ERROR) goto restart_write;
717 return NULL;
718 }
719
writeInt32(int32_t val)720 status_t Parcel::writeInt32(int32_t val)
721 {
722 return writeAligned(val);
723 }
724
writeUint32(uint32_t val)725 status_t Parcel::writeUint32(uint32_t val)
726 {
727 return writeAligned(val);
728 }
729
writeInt32Array(size_t len,const int32_t * val)730 status_t Parcel::writeInt32Array(size_t len, const int32_t *val) {
731 if (len > INT32_MAX) {
732 // don't accept size_t values which may have come from an
733 // inadvertent conversion from a negative int.
734 return BAD_VALUE;
735 }
736
737 if (!val) {
738 return writeInt32(-1);
739 }
740 status_t ret = writeInt32(static_cast<uint32_t>(len));
741 if (ret == NO_ERROR) {
742 ret = write(val, len * sizeof(*val));
743 }
744 return ret;
745 }
writeByteArray(size_t len,const uint8_t * val)746 status_t Parcel::writeByteArray(size_t len, const uint8_t *val) {
747 if (len > INT32_MAX) {
748 // don't accept size_t values which may have come from an
749 // inadvertent conversion from a negative int.
750 return BAD_VALUE;
751 }
752
753 if (!val) {
754 return writeInt32(-1);
755 }
756 status_t ret = writeInt32(static_cast<uint32_t>(len));
757 if (ret == NO_ERROR) {
758 ret = write(val, len * sizeof(*val));
759 }
760 return ret;
761 }
762
writeInt64(int64_t val)763 status_t Parcel::writeInt64(int64_t val)
764 {
765 return writeAligned(val);
766 }
767
writeUint64(uint64_t val)768 status_t Parcel::writeUint64(uint64_t val)
769 {
770 return writeAligned(val);
771 }
772
writePointer(uintptr_t val)773 status_t Parcel::writePointer(uintptr_t val)
774 {
775 return writeAligned<binder_uintptr_t>(val);
776 }
777
writeFloat(float val)778 status_t Parcel::writeFloat(float val)
779 {
780 return writeAligned(val);
781 }
782
783 #if defined(__mips__) && defined(__mips_hard_float)
784
writeDouble(double val)785 status_t Parcel::writeDouble(double val)
786 {
787 union {
788 double d;
789 unsigned long long ll;
790 } u;
791 u.d = val;
792 return writeAligned(u.ll);
793 }
794
795 #else
796
writeDouble(double val)797 status_t Parcel::writeDouble(double val)
798 {
799 return writeAligned(val);
800 }
801
802 #endif
803
writeCString(const char * str)804 status_t Parcel::writeCString(const char* str)
805 {
806 return write(str, strlen(str)+1);
807 }
808
writeString8(const String8 & str)809 status_t Parcel::writeString8(const String8& str)
810 {
811 status_t err = writeInt32(str.bytes());
812 // only write string if its length is more than zero characters,
813 // as readString8 will only read if the length field is non-zero.
814 // this is slightly different from how writeString16 works.
815 if (str.bytes() > 0 && err == NO_ERROR) {
816 err = write(str.string(), str.bytes()+1);
817 }
818 return err;
819 }
820
writeString16(const String16 & str)821 status_t Parcel::writeString16(const String16& str)
822 {
823 return writeString16(str.string(), str.size());
824 }
825
writeString16(const char16_t * str,size_t len)826 status_t Parcel::writeString16(const char16_t* str, size_t len)
827 {
828 if (str == NULL) return writeInt32(-1);
829
830 status_t err = writeInt32(len);
831 if (err == NO_ERROR) {
832 len *= sizeof(char16_t);
833 uint8_t* data = (uint8_t*)writeInplace(len+sizeof(char16_t));
834 if (data) {
835 memcpy(data, str, len);
836 *reinterpret_cast<char16_t*>(data+len) = 0;
837 return NO_ERROR;
838 }
839 err = mError;
840 }
841 return err;
842 }
843
writeStrongBinder(const sp<IBinder> & val)844 status_t Parcel::writeStrongBinder(const sp<IBinder>& val)
845 {
846 return flatten_binder(ProcessState::self(), val, this);
847 }
848
writeWeakBinder(const wp<IBinder> & val)849 status_t Parcel::writeWeakBinder(const wp<IBinder>& val)
850 {
851 return flatten_binder(ProcessState::self(), val, this);
852 }
853
writeNativeHandle(const native_handle * handle)854 status_t Parcel::writeNativeHandle(const native_handle* handle)
855 {
856 if (!handle || handle->version != sizeof(native_handle))
857 return BAD_TYPE;
858
859 status_t err;
860 err = writeInt32(handle->numFds);
861 if (err != NO_ERROR) return err;
862
863 err = writeInt32(handle->numInts);
864 if (err != NO_ERROR) return err;
865
866 for (int i=0 ; err==NO_ERROR && i<handle->numFds ; i++)
867 err = writeDupFileDescriptor(handle->data[i]);
868
869 if (err != NO_ERROR) {
870 ALOGD("write native handle, write dup fd failed");
871 return err;
872 }
873 err = write(handle->data + handle->numFds, sizeof(int)*handle->numInts);
874 return err;
875 }
876
writeFileDescriptor(int fd,bool takeOwnership)877 status_t Parcel::writeFileDescriptor(int fd, bool takeOwnership)
878 {
879 flat_binder_object obj;
880 obj.type = BINDER_TYPE_FD;
881 obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
882 obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */
883 obj.handle = fd;
884 obj.cookie = takeOwnership ? 1 : 0;
885 return writeObject(obj, true);
886 }
887
writeDupFileDescriptor(int fd)888 status_t Parcel::writeDupFileDescriptor(int fd)
889 {
890 int dupFd = dup(fd);
891 if (dupFd < 0) {
892 return -errno;
893 }
894 status_t err = writeFileDescriptor(dupFd, true /*takeOwnership*/);
895 if (err) {
896 close(dupFd);
897 }
898 return err;
899 }
900
writeBlob(size_t len,bool mutableCopy,WritableBlob * outBlob)901 status_t Parcel::writeBlob(size_t len, bool mutableCopy, WritableBlob* outBlob)
902 {
903 if (len > INT32_MAX) {
904 // don't accept size_t values which may have come from an
905 // inadvertent conversion from a negative int.
906 return BAD_VALUE;
907 }
908
909 status_t status;
910 if (!mAllowFds || len <= BLOB_INPLACE_LIMIT) {
911 ALOGV("writeBlob: write in place");
912 status = writeInt32(BLOB_INPLACE);
913 if (status) return status;
914
915 void* ptr = writeInplace(len);
916 if (!ptr) return NO_MEMORY;
917
918 outBlob->init(-1, ptr, len, false);
919 return NO_ERROR;
920 }
921
922 ALOGV("writeBlob: write to ashmem");
923 int fd = ashmem_create_region("Parcel Blob", len);
924 if (fd < 0) return NO_MEMORY;
925
926 mBlobAshmemSize += len;
927
928 int result = ashmem_set_prot_region(fd, PROT_READ | PROT_WRITE);
929 if (result < 0) {
930 status = result;
931 } else {
932 void* ptr = ::mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
933 if (ptr == MAP_FAILED) {
934 status = -errno;
935 } else {
936 if (!mutableCopy) {
937 result = ashmem_set_prot_region(fd, PROT_READ);
938 }
939 if (result < 0) {
940 status = result;
941 } else {
942 status = writeInt32(mutableCopy ? BLOB_ASHMEM_MUTABLE : BLOB_ASHMEM_IMMUTABLE);
943 if (!status) {
944 status = writeFileDescriptor(fd, true /*takeOwnership*/);
945 if (!status) {
946 outBlob->init(fd, ptr, len, mutableCopy);
947 return NO_ERROR;
948 }
949 }
950 }
951 }
952 ::munmap(ptr, len);
953 }
954 ::close(fd);
955 return status;
956 }
957
writeDupImmutableBlobFileDescriptor(int fd)958 status_t Parcel::writeDupImmutableBlobFileDescriptor(int fd)
959 {
960 // Must match up with what's done in writeBlob.
961 if (!mAllowFds) return FDS_NOT_ALLOWED;
962 status_t status = writeInt32(BLOB_ASHMEM_IMMUTABLE);
963 if (status) return status;
964 return writeDupFileDescriptor(fd);
965 }
966
write(const FlattenableHelperInterface & val)967 status_t Parcel::write(const FlattenableHelperInterface& val)
968 {
969 status_t err;
970
971 // size if needed
972 const size_t len = val.getFlattenedSize();
973 const size_t fd_count = val.getFdCount();
974
975 if ((len > INT32_MAX) || (fd_count > INT32_MAX)) {
976 // don't accept size_t values which may have come from an
977 // inadvertent conversion from a negative int.
978 return BAD_VALUE;
979 }
980
981 err = this->writeInt32(len);
982 if (err) return err;
983
984 err = this->writeInt32(fd_count);
985 if (err) return err;
986
987 // payload
988 void* const buf = this->writeInplace(pad_size(len));
989 if (buf == NULL)
990 return BAD_VALUE;
991
992 int* fds = NULL;
993 if (fd_count) {
994 fds = new int[fd_count];
995 }
996
997 err = val.flatten(buf, len, fds, fd_count);
998 for (size_t i=0 ; i<fd_count && err==NO_ERROR ; i++) {
999 err = this->writeDupFileDescriptor( fds[i] );
1000 }
1001
1002 if (fd_count) {
1003 delete [] fds;
1004 }
1005
1006 return err;
1007 }
1008
writeObject(const flat_binder_object & val,bool nullMetaData)1009 status_t Parcel::writeObject(const flat_binder_object& val, bool nullMetaData)
1010 {
1011 const bool enoughData = (mDataPos+sizeof(val)) <= mDataCapacity;
1012 const bool enoughObjects = mObjectsSize < mObjectsCapacity;
1013 if (enoughData && enoughObjects) {
1014 restart_write:
1015 *reinterpret_cast<flat_binder_object*>(mData+mDataPos) = val;
1016
1017 // remember if it's a file descriptor
1018 if (val.type == BINDER_TYPE_FD) {
1019 if (!mAllowFds) {
1020 // fail before modifying our object index
1021 return FDS_NOT_ALLOWED;
1022 }
1023 mHasFds = mFdsKnown = true;
1024 }
1025
1026 // Need to write meta-data?
1027 if (nullMetaData || val.binder != 0) {
1028 mObjects[mObjectsSize] = mDataPos;
1029 acquire_object(ProcessState::self(), val, this);
1030 mObjectsSize++;
1031 }
1032
1033 return finishWrite(sizeof(flat_binder_object));
1034 }
1035
1036 if (!enoughData) {
1037 const status_t err = growData(sizeof(val));
1038 if (err != NO_ERROR) return err;
1039 }
1040 if (!enoughObjects) {
1041 size_t newSize = ((mObjectsSize+2)*3)/2;
1042 if (newSize < mObjectsSize) return NO_MEMORY; // overflow
1043 binder_size_t* objects = (binder_size_t*)realloc(mObjects, newSize*sizeof(binder_size_t));
1044 if (objects == NULL) return NO_MEMORY;
1045 mObjects = objects;
1046 mObjectsCapacity = newSize;
1047 }
1048
1049 goto restart_write;
1050 }
1051
writeNoException()1052 status_t Parcel::writeNoException()
1053 {
1054 return writeInt32(0);
1055 }
1056
remove(size_t,size_t)1057 void Parcel::remove(size_t /*start*/, size_t /*amt*/)
1058 {
1059 LOG_ALWAYS_FATAL("Parcel::remove() not yet implemented!");
1060 }
1061
read(void * outData,size_t len) const1062 status_t Parcel::read(void* outData, size_t len) const
1063 {
1064 if (len > INT32_MAX) {
1065 // don't accept size_t values which may have come from an
1066 // inadvertent conversion from a negative int.
1067 return BAD_VALUE;
1068 }
1069
1070 if ((mDataPos+pad_size(len)) >= mDataPos && (mDataPos+pad_size(len)) <= mDataSize
1071 && len <= pad_size(len)) {
1072 memcpy(outData, mData+mDataPos, len);
1073 mDataPos += pad_size(len);
1074 ALOGV("read Setting data pos of %p to %zu", this, mDataPos);
1075 return NO_ERROR;
1076 }
1077 return NOT_ENOUGH_DATA;
1078 }
1079
readInplace(size_t len) const1080 const void* Parcel::readInplace(size_t len) const
1081 {
1082 if (len > INT32_MAX) {
1083 // don't accept size_t values which may have come from an
1084 // inadvertent conversion from a negative int.
1085 return NULL;
1086 }
1087
1088 if ((mDataPos+pad_size(len)) >= mDataPos && (mDataPos+pad_size(len)) <= mDataSize
1089 && len <= pad_size(len)) {
1090 const void* data = mData+mDataPos;
1091 mDataPos += pad_size(len);
1092 ALOGV("readInplace Setting data pos of %p to %zu", this, mDataPos);
1093 return data;
1094 }
1095 return NULL;
1096 }
1097
1098 template<class T>
readAligned(T * pArg) const1099 status_t Parcel::readAligned(T *pArg) const {
1100 COMPILE_TIME_ASSERT_FUNCTION_SCOPE(PAD_SIZE_UNSAFE(sizeof(T)) == sizeof(T));
1101
1102 if ((mDataPos+sizeof(T)) <= mDataSize) {
1103 const void* data = mData+mDataPos;
1104 mDataPos += sizeof(T);
1105 *pArg = *reinterpret_cast<const T*>(data);
1106 return NO_ERROR;
1107 } else {
1108 return NOT_ENOUGH_DATA;
1109 }
1110 }
1111
1112 template<class T>
readAligned() const1113 T Parcel::readAligned() const {
1114 T result;
1115 if (readAligned(&result) != NO_ERROR) {
1116 result = 0;
1117 }
1118
1119 return result;
1120 }
1121
1122 template<class T>
writeAligned(T val)1123 status_t Parcel::writeAligned(T val) {
1124 COMPILE_TIME_ASSERT_FUNCTION_SCOPE(PAD_SIZE_UNSAFE(sizeof(T)) == sizeof(T));
1125
1126 if ((mDataPos+sizeof(val)) <= mDataCapacity) {
1127 restart_write:
1128 *reinterpret_cast<T*>(mData+mDataPos) = val;
1129 return finishWrite(sizeof(val));
1130 }
1131
1132 status_t err = growData(sizeof(val));
1133 if (err == NO_ERROR) goto restart_write;
1134 return err;
1135 }
1136
readInt32(int32_t * pArg) const1137 status_t Parcel::readInt32(int32_t *pArg) const
1138 {
1139 return readAligned(pArg);
1140 }
1141
readInt32() const1142 int32_t Parcel::readInt32() const
1143 {
1144 return readAligned<int32_t>();
1145 }
1146
readUint32(uint32_t * pArg) const1147 status_t Parcel::readUint32(uint32_t *pArg) const
1148 {
1149 return readAligned(pArg);
1150 }
1151
readUint32() const1152 uint32_t Parcel::readUint32() const
1153 {
1154 return readAligned<uint32_t>();
1155 }
1156
readInt64(int64_t * pArg) const1157 status_t Parcel::readInt64(int64_t *pArg) const
1158 {
1159 return readAligned(pArg);
1160 }
1161
1162
readInt64() const1163 int64_t Parcel::readInt64() const
1164 {
1165 return readAligned<int64_t>();
1166 }
1167
readUint64(uint64_t * pArg) const1168 status_t Parcel::readUint64(uint64_t *pArg) const
1169 {
1170 return readAligned(pArg);
1171 }
1172
readUint64() const1173 uint64_t Parcel::readUint64() const
1174 {
1175 return readAligned<uint64_t>();
1176 }
1177
readPointer(uintptr_t * pArg) const1178 status_t Parcel::readPointer(uintptr_t *pArg) const
1179 {
1180 status_t ret;
1181 binder_uintptr_t ptr;
1182 ret = readAligned(&ptr);
1183 if (!ret)
1184 *pArg = ptr;
1185 return ret;
1186 }
1187
readPointer() const1188 uintptr_t Parcel::readPointer() const
1189 {
1190 return readAligned<binder_uintptr_t>();
1191 }
1192
1193
readFloat(float * pArg) const1194 status_t Parcel::readFloat(float *pArg) const
1195 {
1196 return readAligned(pArg);
1197 }
1198
1199
readFloat() const1200 float Parcel::readFloat() const
1201 {
1202 return readAligned<float>();
1203 }
1204
1205 #if defined(__mips__) && defined(__mips_hard_float)
1206
readDouble(double * pArg) const1207 status_t Parcel::readDouble(double *pArg) const
1208 {
1209 union {
1210 double d;
1211 unsigned long long ll;
1212 } u;
1213 u.d = 0;
1214 status_t status;
1215 status = readAligned(&u.ll);
1216 *pArg = u.d;
1217 return status;
1218 }
1219
readDouble() const1220 double Parcel::readDouble() const
1221 {
1222 union {
1223 double d;
1224 unsigned long long ll;
1225 } u;
1226 u.ll = readAligned<unsigned long long>();
1227 return u.d;
1228 }
1229
1230 #else
1231
readDouble(double * pArg) const1232 status_t Parcel::readDouble(double *pArg) const
1233 {
1234 return readAligned(pArg);
1235 }
1236
readDouble() const1237 double Parcel::readDouble() const
1238 {
1239 return readAligned<double>();
1240 }
1241
1242 #endif
1243
readIntPtr(intptr_t * pArg) const1244 status_t Parcel::readIntPtr(intptr_t *pArg) const
1245 {
1246 return readAligned(pArg);
1247 }
1248
1249
readIntPtr() const1250 intptr_t Parcel::readIntPtr() const
1251 {
1252 return readAligned<intptr_t>();
1253 }
1254
1255
readCString() const1256 const char* Parcel::readCString() const
1257 {
1258 const size_t avail = mDataSize-mDataPos;
1259 if (avail > 0) {
1260 const char* str = reinterpret_cast<const char*>(mData+mDataPos);
1261 // is the string's trailing NUL within the parcel's valid bounds?
1262 const char* eos = reinterpret_cast<const char*>(memchr(str, 0, avail));
1263 if (eos) {
1264 const size_t len = eos - str;
1265 mDataPos += pad_size(len+1);
1266 ALOGV("readCString Setting data pos of %p to %zu", this, mDataPos);
1267 return str;
1268 }
1269 }
1270 return NULL;
1271 }
1272
readString8() const1273 String8 Parcel::readString8() const
1274 {
1275 int32_t size = readInt32();
1276 // watch for potential int overflow adding 1 for trailing NUL
1277 if (size > 0 && size < INT32_MAX) {
1278 const char* str = (const char*)readInplace(size+1);
1279 if (str) return String8(str, size);
1280 }
1281 return String8();
1282 }
1283
readString16() const1284 String16 Parcel::readString16() const
1285 {
1286 size_t len;
1287 const char16_t* str = readString16Inplace(&len);
1288 if (str) return String16(str, len);
1289 ALOGE("Reading a NULL string not supported here.");
1290 return String16();
1291 }
1292
readString16Inplace(size_t * outLen) const1293 const char16_t* Parcel::readString16Inplace(size_t* outLen) const
1294 {
1295 int32_t size = readInt32();
1296 // watch for potential int overflow from size+1
1297 if (size >= 0 && size < INT32_MAX) {
1298 *outLen = size;
1299 const char16_t* str = (const char16_t*)readInplace((size+1)*sizeof(char16_t));
1300 if (str != NULL) {
1301 return str;
1302 }
1303 }
1304 *outLen = 0;
1305 return NULL;
1306 }
1307
readStrongBinder() const1308 sp<IBinder> Parcel::readStrongBinder() const
1309 {
1310 sp<IBinder> val;
1311 unflatten_binder(ProcessState::self(), *this, &val);
1312 return val;
1313 }
1314
readWeakBinder() const1315 wp<IBinder> Parcel::readWeakBinder() const
1316 {
1317 wp<IBinder> val;
1318 unflatten_binder(ProcessState::self(), *this, &val);
1319 return val;
1320 }
1321
readExceptionCode() const1322 int32_t Parcel::readExceptionCode() const
1323 {
1324 int32_t exception_code = readAligned<int32_t>();
1325 if (exception_code == EX_HAS_REPLY_HEADER) {
1326 int32_t header_start = dataPosition();
1327 int32_t header_size = readAligned<int32_t>();
1328 // Skip over fat responses headers. Not used (or propagated) in
1329 // native code
1330 setDataPosition(header_start + header_size);
1331 // And fat response headers are currently only used when there are no
1332 // exceptions, so return no error:
1333 return 0;
1334 }
1335 return exception_code;
1336 }
1337
readNativeHandle() const1338 native_handle* Parcel::readNativeHandle() const
1339 {
1340 int numFds, numInts;
1341 status_t err;
1342 err = readInt32(&numFds);
1343 if (err != NO_ERROR) return 0;
1344 err = readInt32(&numInts);
1345 if (err != NO_ERROR) return 0;
1346
1347 native_handle* h = native_handle_create(numFds, numInts);
1348 if (!h) {
1349 return 0;
1350 }
1351
1352 for (int i=0 ; err==NO_ERROR && i<numFds ; i++) {
1353 h->data[i] = dup(readFileDescriptor());
1354 if (h->data[i] < 0) err = BAD_VALUE;
1355 }
1356 err = read(h->data + numFds, sizeof(int)*numInts);
1357 if (err != NO_ERROR) {
1358 native_handle_close(h);
1359 native_handle_delete(h);
1360 h = 0;
1361 }
1362 return h;
1363 }
1364
1365
readFileDescriptor() const1366 int Parcel::readFileDescriptor() const
1367 {
1368 const flat_binder_object* flat = readObject(true);
1369 if (flat) {
1370 switch (flat->type) {
1371 case BINDER_TYPE_FD:
1372 //ALOGI("Returning file descriptor %ld from parcel %p", flat->handle, this);
1373 return flat->handle;
1374 }
1375 }
1376 return BAD_TYPE;
1377 }
1378
readBlob(size_t len,ReadableBlob * outBlob) const1379 status_t Parcel::readBlob(size_t len, ReadableBlob* outBlob) const
1380 {
1381 int32_t blobType;
1382 status_t status = readInt32(&blobType);
1383 if (status) return status;
1384
1385 if (blobType == BLOB_INPLACE) {
1386 ALOGV("readBlob: read in place");
1387 const void* ptr = readInplace(len);
1388 if (!ptr) return BAD_VALUE;
1389
1390 outBlob->init(-1, const_cast<void*>(ptr), len, false);
1391 return NO_ERROR;
1392 }
1393
1394 ALOGV("readBlob: read from ashmem");
1395 bool isMutable = (blobType == BLOB_ASHMEM_MUTABLE);
1396 int fd = readFileDescriptor();
1397 if (fd == int(BAD_TYPE)) return BAD_VALUE;
1398
1399 void* ptr = ::mmap(NULL, len, isMutable ? PROT_READ | PROT_WRITE : PROT_READ,
1400 MAP_SHARED, fd, 0);
1401 if (ptr == MAP_FAILED) return NO_MEMORY;
1402
1403 outBlob->init(fd, ptr, len, isMutable);
1404 return NO_ERROR;
1405 }
1406
read(FlattenableHelperInterface & val) const1407 status_t Parcel::read(FlattenableHelperInterface& val) const
1408 {
1409 // size
1410 const size_t len = this->readInt32();
1411 const size_t fd_count = this->readInt32();
1412
1413 if (len > INT32_MAX) {
1414 // don't accept size_t values which may have come from an
1415 // inadvertent conversion from a negative int.
1416 return BAD_VALUE;
1417 }
1418
1419 // payload
1420 void const* const buf = this->readInplace(pad_size(len));
1421 if (buf == NULL)
1422 return BAD_VALUE;
1423
1424 int* fds = NULL;
1425 if (fd_count) {
1426 fds = new int[fd_count];
1427 }
1428
1429 status_t err = NO_ERROR;
1430 for (size_t i=0 ; i<fd_count && err==NO_ERROR ; i++) {
1431 fds[i] = dup(this->readFileDescriptor());
1432 if (fds[i] < 0) {
1433 err = BAD_VALUE;
1434 ALOGE("dup() failed in Parcel::read, i is %zu, fds[i] is %d, fd_count is %zu, error: %s",
1435 i, fds[i], fd_count, strerror(errno));
1436 }
1437 }
1438
1439 if (err == NO_ERROR) {
1440 err = val.unflatten(buf, len, fds, fd_count);
1441 }
1442
1443 if (fd_count) {
1444 delete [] fds;
1445 }
1446
1447 return err;
1448 }
readObject(bool nullMetaData) const1449 const flat_binder_object* Parcel::readObject(bool nullMetaData) const
1450 {
1451 const size_t DPOS = mDataPos;
1452 if ((DPOS+sizeof(flat_binder_object)) <= mDataSize) {
1453 const flat_binder_object* obj
1454 = reinterpret_cast<const flat_binder_object*>(mData+DPOS);
1455 mDataPos = DPOS + sizeof(flat_binder_object);
1456 if (!nullMetaData && (obj->cookie == 0 && obj->binder == 0)) {
1457 // When transferring a NULL object, we don't write it into
1458 // the object list, so we don't want to check for it when
1459 // reading.
1460 ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos);
1461 return obj;
1462 }
1463
1464 // Ensure that this object is valid...
1465 binder_size_t* const OBJS = mObjects;
1466 const size_t N = mObjectsSize;
1467 size_t opos = mNextObjectHint;
1468
1469 if (N > 0) {
1470 ALOGV("Parcel %p looking for obj at %zu, hint=%zu",
1471 this, DPOS, opos);
1472
1473 // Start at the current hint position, looking for an object at
1474 // the current data position.
1475 if (opos < N) {
1476 while (opos < (N-1) && OBJS[opos] < DPOS) {
1477 opos++;
1478 }
1479 } else {
1480 opos = N-1;
1481 }
1482 if (OBJS[opos] == DPOS) {
1483 // Found it!
1484 ALOGV("Parcel %p found obj %zu at index %zu with forward search",
1485 this, DPOS, opos);
1486 mNextObjectHint = opos+1;
1487 ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos);
1488 return obj;
1489 }
1490
1491 // Look backwards for it...
1492 while (opos > 0 && OBJS[opos] > DPOS) {
1493 opos--;
1494 }
1495 if (OBJS[opos] == DPOS) {
1496 // Found it!
1497 ALOGV("Parcel %p found obj %zu at index %zu with backward search",
1498 this, DPOS, opos);
1499 mNextObjectHint = opos+1;
1500 ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos);
1501 return obj;
1502 }
1503 }
1504 ALOGW("Attempt to read object from Parcel %p at offset %zu that is not in the object list",
1505 this, DPOS);
1506 }
1507 return NULL;
1508 }
1509
closeFileDescriptors()1510 void Parcel::closeFileDescriptors()
1511 {
1512 size_t i = mObjectsSize;
1513 if (i > 0) {
1514 //ALOGI("Closing file descriptors for %zu objects...", i);
1515 }
1516 while (i > 0) {
1517 i--;
1518 const flat_binder_object* flat
1519 = reinterpret_cast<flat_binder_object*>(mData+mObjects[i]);
1520 if (flat->type == BINDER_TYPE_FD) {
1521 //ALOGI("Closing fd: %ld", flat->handle);
1522 close(flat->handle);
1523 }
1524 }
1525 }
1526
ipcData() const1527 uintptr_t Parcel::ipcData() const
1528 {
1529 return reinterpret_cast<uintptr_t>(mData);
1530 }
1531
ipcDataSize() const1532 size_t Parcel::ipcDataSize() const
1533 {
1534 return (mDataSize > mDataPos ? mDataSize : mDataPos);
1535 }
1536
ipcObjects() const1537 uintptr_t Parcel::ipcObjects() const
1538 {
1539 return reinterpret_cast<uintptr_t>(mObjects);
1540 }
1541
ipcObjectsCount() const1542 size_t Parcel::ipcObjectsCount() const
1543 {
1544 return mObjectsSize;
1545 }
1546
ipcSetDataReference(const uint8_t * data,size_t dataSize,const binder_size_t * objects,size_t objectsCount,release_func relFunc,void * relCookie)1547 void Parcel::ipcSetDataReference(const uint8_t* data, size_t dataSize,
1548 const binder_size_t* objects, size_t objectsCount, release_func relFunc, void* relCookie)
1549 {
1550 binder_size_t minOffset = 0;
1551 freeDataNoInit();
1552 mError = NO_ERROR;
1553 mData = const_cast<uint8_t*>(data);
1554 mDataSize = mDataCapacity = dataSize;
1555 //ALOGI("setDataReference Setting data size of %p to %lu (pid=%d)", this, mDataSize, getpid());
1556 mDataPos = 0;
1557 ALOGV("setDataReference Setting data pos of %p to %zu", this, mDataPos);
1558 mObjects = const_cast<binder_size_t*>(objects);
1559 mObjectsSize = mObjectsCapacity = objectsCount;
1560 mNextObjectHint = 0;
1561 mOwner = relFunc;
1562 mOwnerCookie = relCookie;
1563 for (size_t i = 0; i < mObjectsSize; i++) {
1564 binder_size_t offset = mObjects[i];
1565 if (offset < minOffset) {
1566 ALOGE("%s: bad object offset %" PRIu64 " < %" PRIu64 "\n",
1567 __func__, (uint64_t)offset, (uint64_t)minOffset);
1568 mObjectsSize = 0;
1569 break;
1570 }
1571 minOffset = offset + sizeof(flat_binder_object);
1572 }
1573 scanForFds();
1574 }
1575
print(TextOutput & to,uint32_t) const1576 void Parcel::print(TextOutput& to, uint32_t /*flags*/) const
1577 {
1578 to << "Parcel(";
1579
1580 if (errorCheck() != NO_ERROR) {
1581 const status_t err = errorCheck();
1582 to << "Error: " << (void*)(intptr_t)err << " \"" << strerror(-err) << "\"";
1583 } else if (dataSize() > 0) {
1584 const uint8_t* DATA = data();
1585 to << indent << HexDump(DATA, dataSize()) << dedent;
1586 const binder_size_t* OBJS = objects();
1587 const size_t N = objectsCount();
1588 for (size_t i=0; i<N; i++) {
1589 const flat_binder_object* flat
1590 = reinterpret_cast<const flat_binder_object*>(DATA+OBJS[i]);
1591 to << endl << "Object #" << i << " @ " << (void*)OBJS[i] << ": "
1592 << TypeCode(flat->type & 0x7f7f7f00)
1593 << " = " << flat->binder;
1594 }
1595 } else {
1596 to << "NULL";
1597 }
1598
1599 to << ")";
1600 }
1601
releaseObjects()1602 void Parcel::releaseObjects()
1603 {
1604 const sp<ProcessState> proc(ProcessState::self());
1605 size_t i = mObjectsSize;
1606 uint8_t* const data = mData;
1607 binder_size_t* const objects = mObjects;
1608 while (i > 0) {
1609 i--;
1610 const flat_binder_object* flat
1611 = reinterpret_cast<flat_binder_object*>(data+objects[i]);
1612 release_object(proc, *flat, this);
1613 }
1614 }
1615
acquireObjects()1616 void Parcel::acquireObjects()
1617 {
1618 const sp<ProcessState> proc(ProcessState::self());
1619 size_t i = mObjectsSize;
1620 uint8_t* const data = mData;
1621 binder_size_t* const objects = mObjects;
1622 while (i > 0) {
1623 i--;
1624 const flat_binder_object* flat
1625 = reinterpret_cast<flat_binder_object*>(data+objects[i]);
1626 acquire_object(proc, *flat, this);
1627 }
1628 }
1629
freeData()1630 void Parcel::freeData()
1631 {
1632 freeDataNoInit();
1633 initState();
1634 }
1635
freeDataNoInit()1636 void Parcel::freeDataNoInit()
1637 {
1638 if (mOwner) {
1639 LOG_ALLOC("Parcel %p: freeing other owner data", this);
1640 //ALOGI("Freeing data ref of %p (pid=%d)", this, getpid());
1641 mOwner(this, mData, mDataSize, mObjects, mObjectsSize, mOwnerCookie);
1642 } else {
1643 LOG_ALLOC("Parcel %p: freeing allocated data", this);
1644 releaseObjects();
1645 if (mData) {
1646 LOG_ALLOC("Parcel %p: freeing with %zu capacity", this, mDataCapacity);
1647 pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
1648 gParcelGlobalAllocSize -= mDataCapacity;
1649 gParcelGlobalAllocCount--;
1650 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
1651 free(mData);
1652 }
1653 if (mObjects) free(mObjects);
1654 }
1655 }
1656
growData(size_t len)1657 status_t Parcel::growData(size_t len)
1658 {
1659 if (len > INT32_MAX) {
1660 // don't accept size_t values which may have come from an
1661 // inadvertent conversion from a negative int.
1662 return BAD_VALUE;
1663 }
1664
1665 size_t newSize = ((mDataSize+len)*3)/2;
1666 return (newSize <= mDataSize)
1667 ? (status_t) NO_MEMORY
1668 : continueWrite(newSize);
1669 }
1670
restartWrite(size_t desired)1671 status_t Parcel::restartWrite(size_t desired)
1672 {
1673 if (desired > INT32_MAX) {
1674 // don't accept size_t values which may have come from an
1675 // inadvertent conversion from a negative int.
1676 return BAD_VALUE;
1677 }
1678
1679 if (mOwner) {
1680 freeData();
1681 return continueWrite(desired);
1682 }
1683
1684 uint8_t* data = (uint8_t*)realloc(mData, desired);
1685 if (!data && desired > mDataCapacity) {
1686 mError = NO_MEMORY;
1687 return NO_MEMORY;
1688 }
1689
1690 releaseObjects();
1691
1692 if (data) {
1693 LOG_ALLOC("Parcel %p: restart from %zu to %zu capacity", this, mDataCapacity, desired);
1694 pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
1695 gParcelGlobalAllocSize += desired;
1696 gParcelGlobalAllocSize -= mDataCapacity;
1697 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
1698 mData = data;
1699 mDataCapacity = desired;
1700 }
1701
1702 mDataSize = mDataPos = 0;
1703 ALOGV("restartWrite Setting data size of %p to %zu", this, mDataSize);
1704 ALOGV("restartWrite Setting data pos of %p to %zu", this, mDataPos);
1705
1706 free(mObjects);
1707 mObjects = NULL;
1708 mObjectsSize = mObjectsCapacity = 0;
1709 mNextObjectHint = 0;
1710 mHasFds = false;
1711 mFdsKnown = true;
1712 mAllowFds = true;
1713
1714 return NO_ERROR;
1715 }
1716
continueWrite(size_t desired)1717 status_t Parcel::continueWrite(size_t desired)
1718 {
1719 if (desired > INT32_MAX) {
1720 // don't accept size_t values which may have come from an
1721 // inadvertent conversion from a negative int.
1722 return BAD_VALUE;
1723 }
1724
1725 // If shrinking, first adjust for any objects that appear
1726 // after the new data size.
1727 size_t objectsSize = mObjectsSize;
1728 if (desired < mDataSize) {
1729 if (desired == 0) {
1730 objectsSize = 0;
1731 } else {
1732 while (objectsSize > 0) {
1733 if (mObjects[objectsSize-1] < desired)
1734 break;
1735 objectsSize--;
1736 }
1737 }
1738 }
1739
1740 if (mOwner) {
1741 // If the size is going to zero, just release the owner's data.
1742 if (desired == 0) {
1743 freeData();
1744 return NO_ERROR;
1745 }
1746
1747 // If there is a different owner, we need to take
1748 // posession.
1749 uint8_t* data = (uint8_t*)malloc(desired);
1750 if (!data) {
1751 mError = NO_MEMORY;
1752 return NO_MEMORY;
1753 }
1754 binder_size_t* objects = NULL;
1755
1756 if (objectsSize) {
1757 objects = (binder_size_t*)calloc(objectsSize, sizeof(binder_size_t));
1758 if (!objects) {
1759 free(data);
1760
1761 mError = NO_MEMORY;
1762 return NO_MEMORY;
1763 }
1764
1765 // Little hack to only acquire references on objects
1766 // we will be keeping.
1767 size_t oldObjectsSize = mObjectsSize;
1768 mObjectsSize = objectsSize;
1769 acquireObjects();
1770 mObjectsSize = oldObjectsSize;
1771 }
1772
1773 if (mData) {
1774 memcpy(data, mData, mDataSize < desired ? mDataSize : desired);
1775 }
1776 if (objects && mObjects) {
1777 memcpy(objects, mObjects, objectsSize*sizeof(binder_size_t));
1778 }
1779 //ALOGI("Freeing data ref of %p (pid=%d)", this, getpid());
1780 mOwner(this, mData, mDataSize, mObjects, mObjectsSize, mOwnerCookie);
1781 mOwner = NULL;
1782
1783 LOG_ALLOC("Parcel %p: taking ownership of %zu capacity", this, desired);
1784 pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
1785 gParcelGlobalAllocSize += desired;
1786 gParcelGlobalAllocCount++;
1787 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
1788
1789 mData = data;
1790 mObjects = objects;
1791 mDataSize = (mDataSize < desired) ? mDataSize : desired;
1792 ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize);
1793 mDataCapacity = desired;
1794 mObjectsSize = mObjectsCapacity = objectsSize;
1795 mNextObjectHint = 0;
1796
1797 } else if (mData) {
1798 if (objectsSize < mObjectsSize) {
1799 // Need to release refs on any objects we are dropping.
1800 const sp<ProcessState> proc(ProcessState::self());
1801 for (size_t i=objectsSize; i<mObjectsSize; i++) {
1802 const flat_binder_object* flat
1803 = reinterpret_cast<flat_binder_object*>(mData+mObjects[i]);
1804 if (flat->type == BINDER_TYPE_FD) {
1805 // will need to rescan because we may have lopped off the only FDs
1806 mFdsKnown = false;
1807 }
1808 release_object(proc, *flat, this);
1809 }
1810 binder_size_t* objects =
1811 (binder_size_t*)realloc(mObjects, objectsSize*sizeof(binder_size_t));
1812 if (objects) {
1813 mObjects = objects;
1814 }
1815 mObjectsSize = objectsSize;
1816 mNextObjectHint = 0;
1817 }
1818
1819 // We own the data, so we can just do a realloc().
1820 if (desired > mDataCapacity) {
1821 uint8_t* data = (uint8_t*)realloc(mData, desired);
1822 if (data) {
1823 LOG_ALLOC("Parcel %p: continue from %zu to %zu capacity", this, mDataCapacity,
1824 desired);
1825 pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
1826 gParcelGlobalAllocSize += desired;
1827 gParcelGlobalAllocSize -= mDataCapacity;
1828 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
1829 mData = data;
1830 mDataCapacity = desired;
1831 } else if (desired > mDataCapacity) {
1832 mError = NO_MEMORY;
1833 return NO_MEMORY;
1834 }
1835 } else {
1836 if (mDataSize > desired) {
1837 mDataSize = desired;
1838 ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize);
1839 }
1840 if (mDataPos > desired) {
1841 mDataPos = desired;
1842 ALOGV("continueWrite Setting data pos of %p to %zu", this, mDataPos);
1843 }
1844 }
1845
1846 } else {
1847 // This is the first data. Easy!
1848 uint8_t* data = (uint8_t*)malloc(desired);
1849 if (!data) {
1850 mError = NO_MEMORY;
1851 return NO_MEMORY;
1852 }
1853
1854 if(!(mDataCapacity == 0 && mObjects == NULL
1855 && mObjectsCapacity == 0)) {
1856 ALOGE("continueWrite: %zu/%p/%zu/%zu", mDataCapacity, mObjects, mObjectsCapacity, desired);
1857 }
1858
1859 LOG_ALLOC("Parcel %p: allocating with %zu capacity", this, desired);
1860 pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
1861 gParcelGlobalAllocSize += desired;
1862 gParcelGlobalAllocCount++;
1863 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
1864
1865 mData = data;
1866 mDataSize = mDataPos = 0;
1867 ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize);
1868 ALOGV("continueWrite Setting data pos of %p to %zu", this, mDataPos);
1869 mDataCapacity = desired;
1870 }
1871
1872 return NO_ERROR;
1873 }
1874
initState()1875 void Parcel::initState()
1876 {
1877 LOG_ALLOC("Parcel %p: initState", this);
1878 mError = NO_ERROR;
1879 mData = 0;
1880 mDataSize = 0;
1881 mDataCapacity = 0;
1882 mDataPos = 0;
1883 ALOGV("initState Setting data size of %p to %zu", this, mDataSize);
1884 ALOGV("initState Setting data pos of %p to %zu", this, mDataPos);
1885 mObjects = NULL;
1886 mObjectsSize = 0;
1887 mObjectsCapacity = 0;
1888 mNextObjectHint = 0;
1889 mHasFds = false;
1890 mFdsKnown = true;
1891 mAllowFds = true;
1892 mOwner = NULL;
1893 mBlobAshmemSize = 0;
1894 }
1895
scanForFds() const1896 void Parcel::scanForFds() const
1897 {
1898 bool hasFds = false;
1899 for (size_t i=0; i<mObjectsSize; i++) {
1900 const flat_binder_object* flat
1901 = reinterpret_cast<const flat_binder_object*>(mData + mObjects[i]);
1902 if (flat->type == BINDER_TYPE_FD) {
1903 hasFds = true;
1904 break;
1905 }
1906 }
1907 mHasFds = hasFds;
1908 mFdsKnown = true;
1909 }
1910
getBlobAshmemSize() const1911 size_t Parcel::getBlobAshmemSize() const
1912 {
1913 return mBlobAshmemSize;
1914 }
1915
1916 // --- Parcel::Blob ---
1917
Blob()1918 Parcel::Blob::Blob() :
1919 mFd(-1), mData(NULL), mSize(0), mMutable(false) {
1920 }
1921
~Blob()1922 Parcel::Blob::~Blob() {
1923 release();
1924 }
1925
release()1926 void Parcel::Blob::release() {
1927 if (mFd != -1 && mData) {
1928 ::munmap(mData, mSize);
1929 }
1930 clear();
1931 }
1932
init(int fd,void * data,size_t size,bool isMutable)1933 void Parcel::Blob::init(int fd, void* data, size_t size, bool isMutable) {
1934 mFd = fd;
1935 mData = data;
1936 mSize = size;
1937 mMutable = isMutable;
1938 }
1939
clear()1940 void Parcel::Blob::clear() {
1941 mFd = -1;
1942 mData = NULL;
1943 mSize = 0;
1944 mMutable = false;
1945 }
1946
1947 }; // namespace android
1948