1 /*
2 * Copyright (C) 2005 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "Parcel"
18 //#define LOG_NDEBUG 0
19
20 #include <errno.h>
21 #include <fcntl.h>
22 #include <inttypes.h>
23 #include <pthread.h>
24 #include <stdint.h>
25 #include <stdio.h>
26 #include <stdlib.h>
27 #include <sys/mman.h>
28 #include <sys/stat.h>
29 #include <sys/types.h>
30 #include <sys/resource.h>
31 #include <unistd.h>
32
33 #include <binder/Binder.h>
34 #include <binder/BpBinder.h>
35 #include <binder/IPCThreadState.h>
36 #include <binder/Parcel.h>
37 #include <binder/ProcessState.h>
38 #include <binder/Status.h>
39 #include <binder/TextOutput.h>
40
41 #include <cutils/ashmem.h>
42 #include <utils/Debug.h>
43 #include <utils/Flattenable.h>
44 #include <utils/Log.h>
45 #include <utils/misc.h>
46 #include <utils/String8.h>
47 #include <utils/String16.h>
48
49 #include <private/binder/binder_module.h>
50 #include <private/binder/Static.h>
51
52 #ifndef INT32_MAX
53 #define INT32_MAX ((int32_t)(2147483647))
54 #endif
55
56 #define LOG_REFS(...)
57 //#define LOG_REFS(...) ALOG(LOG_DEBUG, LOG_TAG, __VA_ARGS__)
58 #define LOG_ALLOC(...)
59 //#define LOG_ALLOC(...) ALOG(LOG_DEBUG, LOG_TAG, __VA_ARGS__)
60
61 // ---------------------------------------------------------------------------
62
63 // This macro should never be used at runtime, as a too large value
64 // of s could cause an integer overflow. Instead, you should always
65 // use the wrapper function pad_size()
66 #define PAD_SIZE_UNSAFE(s) (((s)+3)&~3)
67
pad_size(size_t s)68 static size_t pad_size(size_t s) {
69 if (s > (SIZE_T_MAX - 3)) {
70 abort();
71 }
72 return PAD_SIZE_UNSAFE(s);
73 }
74
75 // Note: must be kept in sync with android/os/StrictMode.java's PENALTY_GATHER
76 #define STRICT_MODE_PENALTY_GATHER (0x40 << 16)
77
78 // XXX This can be made public if we want to provide
79 // support for typed data.
80 struct small_flat_data
81 {
82 uint32_t type;
83 uint32_t data;
84 };
85
86 namespace android {
87
88 static pthread_mutex_t gParcelGlobalAllocSizeLock = PTHREAD_MUTEX_INITIALIZER;
89 static size_t gParcelGlobalAllocSize = 0;
90 static size_t gParcelGlobalAllocCount = 0;
91
92 static size_t gMaxFds = 0;
93
94 // Maximum size of a blob to transfer in-place.
95 static const size_t BLOB_INPLACE_LIMIT = 16 * 1024;
96
97 enum {
98 BLOB_INPLACE = 0,
99 BLOB_ASHMEM_IMMUTABLE = 1,
100 BLOB_ASHMEM_MUTABLE = 2,
101 };
102
ashmem_rdev()103 static dev_t ashmem_rdev()
104 {
105 static dev_t __ashmem_rdev;
106 static pthread_mutex_t __ashmem_rdev_lock = PTHREAD_MUTEX_INITIALIZER;
107
108 pthread_mutex_lock(&__ashmem_rdev_lock);
109
110 dev_t rdev = __ashmem_rdev;
111 if (!rdev) {
112 int fd = TEMP_FAILURE_RETRY(open("/dev/ashmem", O_RDONLY));
113 if (fd >= 0) {
114 struct stat st;
115
116 int ret = TEMP_FAILURE_RETRY(fstat(fd, &st));
117 close(fd);
118 if ((ret >= 0) && S_ISCHR(st.st_mode)) {
119 rdev = __ashmem_rdev = st.st_rdev;
120 }
121 }
122 }
123
124 pthread_mutex_unlock(&__ashmem_rdev_lock);
125
126 return rdev;
127 }
128
acquire_object(const sp<ProcessState> & proc,const flat_binder_object & obj,const void * who,size_t * outAshmemSize)129 void acquire_object(const sp<ProcessState>& proc,
130 const flat_binder_object& obj, const void* who, size_t* outAshmemSize)
131 {
132 switch (obj.type) {
133 case BINDER_TYPE_BINDER:
134 if (obj.binder) {
135 LOG_REFS("Parcel %p acquiring reference on local %p", who, obj.cookie);
136 reinterpret_cast<IBinder*>(obj.cookie)->incStrong(who);
137 }
138 return;
139 case BINDER_TYPE_WEAK_BINDER:
140 if (obj.binder)
141 reinterpret_cast<RefBase::weakref_type*>(obj.binder)->incWeak(who);
142 return;
143 case BINDER_TYPE_HANDLE: {
144 const sp<IBinder> b = proc->getStrongProxyForHandle(obj.handle);
145 if (b != NULL) {
146 LOG_REFS("Parcel %p acquiring reference on remote %p", who, b.get());
147 b->incStrong(who);
148 }
149 return;
150 }
151 case BINDER_TYPE_WEAK_HANDLE: {
152 const wp<IBinder> b = proc->getWeakProxyForHandle(obj.handle);
153 if (b != NULL) b.get_refs()->incWeak(who);
154 return;
155 }
156 case BINDER_TYPE_FD: {
157 if ((obj.cookie != 0) && (outAshmemSize != NULL)) {
158 struct stat st;
159 int ret = fstat(obj.handle, &st);
160 if (!ret && S_ISCHR(st.st_mode) && (st.st_rdev == ashmem_rdev())) {
161 // If we own an ashmem fd, keep track of how much memory it refers to.
162 int size = ashmem_get_size_region(obj.handle);
163 if (size > 0) {
164 *outAshmemSize += size;
165 }
166 }
167 }
168 return;
169 }
170 }
171
172 ALOGD("Invalid object type 0x%08x", obj.type);
173 }
174
acquire_object(const sp<ProcessState> & proc,const flat_binder_object & obj,const void * who)175 void acquire_object(const sp<ProcessState>& proc,
176 const flat_binder_object& obj, const void* who)
177 {
178 acquire_object(proc, obj, who, NULL);
179 }
180
release_object(const sp<ProcessState> & proc,const flat_binder_object & obj,const void * who,size_t * outAshmemSize)181 static void release_object(const sp<ProcessState>& proc,
182 const flat_binder_object& obj, const void* who, size_t* outAshmemSize)
183 {
184 switch (obj.type) {
185 case BINDER_TYPE_BINDER:
186 if (obj.binder) {
187 LOG_REFS("Parcel %p releasing reference on local %p", who, obj.cookie);
188 reinterpret_cast<IBinder*>(obj.cookie)->decStrong(who);
189 }
190 return;
191 case BINDER_TYPE_WEAK_BINDER:
192 if (obj.binder)
193 reinterpret_cast<RefBase::weakref_type*>(obj.binder)->decWeak(who);
194 return;
195 case BINDER_TYPE_HANDLE: {
196 const sp<IBinder> b = proc->getStrongProxyForHandle(obj.handle);
197 if (b != NULL) {
198 LOG_REFS("Parcel %p releasing reference on remote %p", who, b.get());
199 b->decStrong(who);
200 }
201 return;
202 }
203 case BINDER_TYPE_WEAK_HANDLE: {
204 const wp<IBinder> b = proc->getWeakProxyForHandle(obj.handle);
205 if (b != NULL) b.get_refs()->decWeak(who);
206 return;
207 }
208 case BINDER_TYPE_FD: {
209 if (obj.cookie != 0) { // owned
210 if (outAshmemSize != NULL) {
211 struct stat st;
212 int ret = fstat(obj.handle, &st);
213 if (!ret && S_ISCHR(st.st_mode) && (st.st_rdev == ashmem_rdev())) {
214 int size = ashmem_get_size_region(obj.handle);
215 if (size > 0) {
216 *outAshmemSize -= size;
217 }
218 }
219 }
220
221 close(obj.handle);
222 }
223 return;
224 }
225 }
226
227 ALOGE("Invalid object type 0x%08x", obj.type);
228 }
229
release_object(const sp<ProcessState> & proc,const flat_binder_object & obj,const void * who)230 void release_object(const sp<ProcessState>& proc,
231 const flat_binder_object& obj, const void* who)
232 {
233 release_object(proc, obj, who, NULL);
234 }
235
finish_flatten_binder(const sp<IBinder> &,const flat_binder_object & flat,Parcel * out)236 inline static status_t finish_flatten_binder(
237 const sp<IBinder>& /*binder*/, const flat_binder_object& flat, Parcel* out)
238 {
239 return out->writeObject(flat, false);
240 }
241
flatten_binder(const sp<ProcessState> &,const sp<IBinder> & binder,Parcel * out)242 status_t flatten_binder(const sp<ProcessState>& /*proc*/,
243 const sp<IBinder>& binder, Parcel* out)
244 {
245 flat_binder_object obj;
246
247 obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
248 if (binder != NULL) {
249 IBinder *local = binder->localBinder();
250 if (!local) {
251 BpBinder *proxy = binder->remoteBinder();
252 if (proxy == NULL) {
253 ALOGE("null proxy");
254 }
255 const int32_t handle = proxy ? proxy->handle() : 0;
256 obj.type = BINDER_TYPE_HANDLE;
257 obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */
258 obj.handle = handle;
259 obj.cookie = 0;
260 } else {
261 obj.type = BINDER_TYPE_BINDER;
262 obj.binder = reinterpret_cast<uintptr_t>(local->getWeakRefs());
263 obj.cookie = reinterpret_cast<uintptr_t>(local);
264 }
265 } else {
266 obj.type = BINDER_TYPE_BINDER;
267 obj.binder = 0;
268 obj.cookie = 0;
269 }
270
271 return finish_flatten_binder(binder, obj, out);
272 }
273
flatten_binder(const sp<ProcessState> &,const wp<IBinder> & binder,Parcel * out)274 status_t flatten_binder(const sp<ProcessState>& /*proc*/,
275 const wp<IBinder>& binder, Parcel* out)
276 {
277 flat_binder_object obj;
278
279 obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
280 if (binder != NULL) {
281 sp<IBinder> real = binder.promote();
282 if (real != NULL) {
283 IBinder *local = real->localBinder();
284 if (!local) {
285 BpBinder *proxy = real->remoteBinder();
286 if (proxy == NULL) {
287 ALOGE("null proxy");
288 }
289 const int32_t handle = proxy ? proxy->handle() : 0;
290 obj.type = BINDER_TYPE_WEAK_HANDLE;
291 obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */
292 obj.handle = handle;
293 obj.cookie = 0;
294 } else {
295 obj.type = BINDER_TYPE_WEAK_BINDER;
296 obj.binder = reinterpret_cast<uintptr_t>(binder.get_refs());
297 obj.cookie = reinterpret_cast<uintptr_t>(binder.unsafe_get());
298 }
299 return finish_flatten_binder(real, obj, out);
300 }
301
302 // XXX How to deal? In order to flatten the given binder,
303 // we need to probe it for information, which requires a primary
304 // reference... but we don't have one.
305 //
306 // The OpenBinder implementation uses a dynamic_cast<> here,
307 // but we can't do that with the different reference counting
308 // implementation we are using.
309 ALOGE("Unable to unflatten Binder weak reference!");
310 obj.type = BINDER_TYPE_BINDER;
311 obj.binder = 0;
312 obj.cookie = 0;
313 return finish_flatten_binder(NULL, obj, out);
314
315 } else {
316 obj.type = BINDER_TYPE_BINDER;
317 obj.binder = 0;
318 obj.cookie = 0;
319 return finish_flatten_binder(NULL, obj, out);
320 }
321 }
322
finish_unflatten_binder(BpBinder *,const flat_binder_object &,const Parcel &)323 inline static status_t finish_unflatten_binder(
324 BpBinder* /*proxy*/, const flat_binder_object& /*flat*/,
325 const Parcel& /*in*/)
326 {
327 return NO_ERROR;
328 }
329
unflatten_binder(const sp<ProcessState> & proc,const Parcel & in,sp<IBinder> * out)330 status_t unflatten_binder(const sp<ProcessState>& proc,
331 const Parcel& in, sp<IBinder>* out)
332 {
333 const flat_binder_object* flat = in.readObject(false);
334
335 if (flat) {
336 switch (flat->type) {
337 case BINDER_TYPE_BINDER:
338 *out = reinterpret_cast<IBinder*>(flat->cookie);
339 return finish_unflatten_binder(NULL, *flat, in);
340 case BINDER_TYPE_HANDLE:
341 *out = proc->getStrongProxyForHandle(flat->handle);
342 return finish_unflatten_binder(
343 static_cast<BpBinder*>(out->get()), *flat, in);
344 }
345 }
346 return BAD_TYPE;
347 }
348
unflatten_binder(const sp<ProcessState> & proc,const Parcel & in,wp<IBinder> * out)349 status_t unflatten_binder(const sp<ProcessState>& proc,
350 const Parcel& in, wp<IBinder>* out)
351 {
352 const flat_binder_object* flat = in.readObject(false);
353
354 if (flat) {
355 switch (flat->type) {
356 case BINDER_TYPE_BINDER:
357 *out = reinterpret_cast<IBinder*>(flat->cookie);
358 return finish_unflatten_binder(NULL, *flat, in);
359 case BINDER_TYPE_WEAK_BINDER:
360 if (flat->binder != 0) {
361 out->set_object_and_refs(
362 reinterpret_cast<IBinder*>(flat->cookie),
363 reinterpret_cast<RefBase::weakref_type*>(flat->binder));
364 } else {
365 *out = NULL;
366 }
367 return finish_unflatten_binder(NULL, *flat, in);
368 case BINDER_TYPE_HANDLE:
369 case BINDER_TYPE_WEAK_HANDLE:
370 *out = proc->getWeakProxyForHandle(flat->handle);
371 return finish_unflatten_binder(
372 static_cast<BpBinder*>(out->unsafe_get()), *flat, in);
373 }
374 }
375 return BAD_TYPE;
376 }
377
378 // ---------------------------------------------------------------------------
379
Parcel()380 Parcel::Parcel()
381 {
382 LOG_ALLOC("Parcel %p: constructing", this);
383 initState();
384 }
385
~Parcel()386 Parcel::~Parcel()
387 {
388 freeDataNoInit();
389 LOG_ALLOC("Parcel %p: destroyed", this);
390 }
391
getGlobalAllocSize()392 size_t Parcel::getGlobalAllocSize() {
393 pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
394 size_t size = gParcelGlobalAllocSize;
395 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
396 return size;
397 }
398
getGlobalAllocCount()399 size_t Parcel::getGlobalAllocCount() {
400 pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
401 size_t count = gParcelGlobalAllocCount;
402 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
403 return count;
404 }
405
data() const406 const uint8_t* Parcel::data() const
407 {
408 return mData;
409 }
410
dataSize() const411 size_t Parcel::dataSize() const
412 {
413 return (mDataSize > mDataPos ? mDataSize : mDataPos);
414 }
415
dataAvail() const416 size_t Parcel::dataAvail() const
417 {
418 size_t result = dataSize() - dataPosition();
419 if (result > INT32_MAX) {
420 abort();
421 }
422 return result;
423 }
424
dataPosition() const425 size_t Parcel::dataPosition() const
426 {
427 return mDataPos;
428 }
429
dataCapacity() const430 size_t Parcel::dataCapacity() const
431 {
432 return mDataCapacity;
433 }
434
setDataSize(size_t size)435 status_t Parcel::setDataSize(size_t size)
436 {
437 if (size > INT32_MAX) {
438 // don't accept size_t values which may have come from an
439 // inadvertent conversion from a negative int.
440 return BAD_VALUE;
441 }
442
443 status_t err;
444 err = continueWrite(size);
445 if (err == NO_ERROR) {
446 mDataSize = size;
447 ALOGV("setDataSize Setting data size of %p to %zu", this, mDataSize);
448 }
449 return err;
450 }
451
setDataPosition(size_t pos) const452 void Parcel::setDataPosition(size_t pos) const
453 {
454 if (pos > INT32_MAX) {
455 // don't accept size_t values which may have come from an
456 // inadvertent conversion from a negative int.
457 abort();
458 }
459
460 mDataPos = pos;
461 mNextObjectHint = 0;
462 }
463
setDataCapacity(size_t size)464 status_t Parcel::setDataCapacity(size_t size)
465 {
466 if (size > INT32_MAX) {
467 // don't accept size_t values which may have come from an
468 // inadvertent conversion from a negative int.
469 return BAD_VALUE;
470 }
471
472 if (size > mDataCapacity) return continueWrite(size);
473 return NO_ERROR;
474 }
475
setData(const uint8_t * buffer,size_t len)476 status_t Parcel::setData(const uint8_t* buffer, size_t len)
477 {
478 if (len > INT32_MAX) {
479 // don't accept size_t values which may have come from an
480 // inadvertent conversion from a negative int.
481 return BAD_VALUE;
482 }
483
484 status_t err = restartWrite(len);
485 if (err == NO_ERROR) {
486 memcpy(const_cast<uint8_t*>(data()), buffer, len);
487 mDataSize = len;
488 mFdsKnown = false;
489 }
490 return err;
491 }
492
appendFrom(const Parcel * parcel,size_t offset,size_t len)493 status_t Parcel::appendFrom(const Parcel *parcel, size_t offset, size_t len)
494 {
495 const sp<ProcessState> proc(ProcessState::self());
496 status_t err;
497 const uint8_t *data = parcel->mData;
498 const binder_size_t *objects = parcel->mObjects;
499 size_t size = parcel->mObjectsSize;
500 int startPos = mDataPos;
501 int firstIndex = -1, lastIndex = -2;
502
503 if (len == 0) {
504 return NO_ERROR;
505 }
506
507 if (len > INT32_MAX) {
508 // don't accept size_t values which may have come from an
509 // inadvertent conversion from a negative int.
510 return BAD_VALUE;
511 }
512
513 // range checks against the source parcel size
514 if ((offset > parcel->mDataSize)
515 || (len > parcel->mDataSize)
516 || (offset + len > parcel->mDataSize)) {
517 return BAD_VALUE;
518 }
519
520 // Count objects in range
521 for (int i = 0; i < (int) size; i++) {
522 size_t off = objects[i];
523 if ((off >= offset) && (off + sizeof(flat_binder_object) <= offset + len)) {
524 if (firstIndex == -1) {
525 firstIndex = i;
526 }
527 lastIndex = i;
528 }
529 }
530 int numObjects = lastIndex - firstIndex + 1;
531
532 if ((mDataSize+len) > mDataCapacity) {
533 // grow data
534 err = growData(len);
535 if (err != NO_ERROR) {
536 return err;
537 }
538 }
539
540 // append data
541 memcpy(mData + mDataPos, data + offset, len);
542 mDataPos += len;
543 mDataSize += len;
544
545 err = NO_ERROR;
546
547 if (numObjects > 0) {
548 // grow objects
549 if (mObjectsCapacity < mObjectsSize + numObjects) {
550 size_t newSize = ((mObjectsSize + numObjects)*3)/2;
551 if (newSize < mObjectsSize) return NO_MEMORY; // overflow
552 binder_size_t *objects =
553 (binder_size_t*)realloc(mObjects, newSize*sizeof(binder_size_t));
554 if (objects == (binder_size_t*)0) {
555 return NO_MEMORY;
556 }
557 mObjects = objects;
558 mObjectsCapacity = newSize;
559 }
560
561 // append and acquire objects
562 int idx = mObjectsSize;
563 for (int i = firstIndex; i <= lastIndex; i++) {
564 size_t off = objects[i] - offset + startPos;
565 mObjects[idx++] = off;
566 mObjectsSize++;
567
568 flat_binder_object* flat
569 = reinterpret_cast<flat_binder_object*>(mData + off);
570 acquire_object(proc, *flat, this, &mOpenAshmemSize);
571
572 if (flat->type == BINDER_TYPE_FD) {
573 // If this is a file descriptor, we need to dup it so the
574 // new Parcel now owns its own fd, and can declare that we
575 // officially know we have fds.
576 flat->handle = dup(flat->handle);
577 flat->cookie = 1;
578 mHasFds = mFdsKnown = true;
579 if (!mAllowFds) {
580 err = FDS_NOT_ALLOWED;
581 }
582 }
583 }
584 }
585
586 return err;
587 }
588
allowFds() const589 bool Parcel::allowFds() const
590 {
591 return mAllowFds;
592 }
593
pushAllowFds(bool allowFds)594 bool Parcel::pushAllowFds(bool allowFds)
595 {
596 const bool origValue = mAllowFds;
597 if (!allowFds) {
598 mAllowFds = false;
599 }
600 return origValue;
601 }
602
restoreAllowFds(bool lastValue)603 void Parcel::restoreAllowFds(bool lastValue)
604 {
605 mAllowFds = lastValue;
606 }
607
hasFileDescriptors() const608 bool Parcel::hasFileDescriptors() const
609 {
610 if (!mFdsKnown) {
611 scanForFds();
612 }
613 return mHasFds;
614 }
615
616 // Write RPC headers. (previously just the interface token)
writeInterfaceToken(const String16 & interface)617 status_t Parcel::writeInterfaceToken(const String16& interface)
618 {
619 writeInt32(IPCThreadState::self()->getStrictModePolicy() |
620 STRICT_MODE_PENALTY_GATHER);
621 // currently the interface identification token is just its name as a string
622 return writeString16(interface);
623 }
624
checkInterface(IBinder * binder) const625 bool Parcel::checkInterface(IBinder* binder) const
626 {
627 return enforceInterface(binder->getInterfaceDescriptor());
628 }
629
enforceInterface(const String16 & interface,IPCThreadState * threadState) const630 bool Parcel::enforceInterface(const String16& interface,
631 IPCThreadState* threadState) const
632 {
633 int32_t strictPolicy = readInt32();
634 if (threadState == NULL) {
635 threadState = IPCThreadState::self();
636 }
637 if ((threadState->getLastTransactionBinderFlags() &
638 IBinder::FLAG_ONEWAY) != 0) {
639 // For one-way calls, the callee is running entirely
640 // disconnected from the caller, so disable StrictMode entirely.
641 // Not only does disk/network usage not impact the caller, but
642 // there's no way to commuicate back any violations anyway.
643 threadState->setStrictModePolicy(0);
644 } else {
645 threadState->setStrictModePolicy(strictPolicy);
646 }
647 const String16 str(readString16());
648 if (str == interface) {
649 return true;
650 } else {
651 ALOGW("**** enforceInterface() expected '%s' but read '%s'",
652 String8(interface).string(), String8(str).string());
653 return false;
654 }
655 }
656
objects() const657 const binder_size_t* Parcel::objects() const
658 {
659 return mObjects;
660 }
661
objectsCount() const662 size_t Parcel::objectsCount() const
663 {
664 return mObjectsSize;
665 }
666
errorCheck() const667 status_t Parcel::errorCheck() const
668 {
669 return mError;
670 }
671
setError(status_t err)672 void Parcel::setError(status_t err)
673 {
674 mError = err;
675 }
676
finishWrite(size_t len)677 status_t Parcel::finishWrite(size_t len)
678 {
679 if (len > INT32_MAX) {
680 // don't accept size_t values which may have come from an
681 // inadvertent conversion from a negative int.
682 return BAD_VALUE;
683 }
684
685 //printf("Finish write of %d\n", len);
686 mDataPos += len;
687 ALOGV("finishWrite Setting data pos of %p to %zu", this, mDataPos);
688 if (mDataPos > mDataSize) {
689 mDataSize = mDataPos;
690 ALOGV("finishWrite Setting data size of %p to %zu", this, mDataSize);
691 }
692 //printf("New pos=%d, size=%d\n", mDataPos, mDataSize);
693 return NO_ERROR;
694 }
695
writeUnpadded(const void * data,size_t len)696 status_t Parcel::writeUnpadded(const void* data, size_t len)
697 {
698 if (len > INT32_MAX) {
699 // don't accept size_t values which may have come from an
700 // inadvertent conversion from a negative int.
701 return BAD_VALUE;
702 }
703
704 size_t end = mDataPos + len;
705 if (end < mDataPos) {
706 // integer overflow
707 return BAD_VALUE;
708 }
709
710 if (end <= mDataCapacity) {
711 restart_write:
712 memcpy(mData+mDataPos, data, len);
713 return finishWrite(len);
714 }
715
716 status_t err = growData(len);
717 if (err == NO_ERROR) goto restart_write;
718 return err;
719 }
720
write(const void * data,size_t len)721 status_t Parcel::write(const void* data, size_t len)
722 {
723 if (len > INT32_MAX) {
724 // don't accept size_t values which may have come from an
725 // inadvertent conversion from a negative int.
726 return BAD_VALUE;
727 }
728
729 void* const d = writeInplace(len);
730 if (d) {
731 memcpy(d, data, len);
732 return NO_ERROR;
733 }
734 return mError;
735 }
736
writeInplace(size_t len)737 void* Parcel::writeInplace(size_t len)
738 {
739 if (len > INT32_MAX) {
740 // don't accept size_t values which may have come from an
741 // inadvertent conversion from a negative int.
742 return NULL;
743 }
744
745 const size_t padded = pad_size(len);
746
747 // sanity check for integer overflow
748 if (mDataPos+padded < mDataPos) {
749 return NULL;
750 }
751
752 if ((mDataPos+padded) <= mDataCapacity) {
753 restart_write:
754 //printf("Writing %ld bytes, padded to %ld\n", len, padded);
755 uint8_t* const data = mData+mDataPos;
756
757 // Need to pad at end?
758 if (padded != len) {
759 #if BYTE_ORDER == BIG_ENDIAN
760 static const uint32_t mask[4] = {
761 0x00000000, 0xffffff00, 0xffff0000, 0xff000000
762 };
763 #endif
764 #if BYTE_ORDER == LITTLE_ENDIAN
765 static const uint32_t mask[4] = {
766 0x00000000, 0x00ffffff, 0x0000ffff, 0x000000ff
767 };
768 #endif
769 //printf("Applying pad mask: %p to %p\n", (void*)mask[padded-len],
770 // *reinterpret_cast<void**>(data+padded-4));
771 *reinterpret_cast<uint32_t*>(data+padded-4) &= mask[padded-len];
772 }
773
774 finishWrite(padded);
775 return data;
776 }
777
778 status_t err = growData(padded);
779 if (err == NO_ERROR) goto restart_write;
780 return NULL;
781 }
782
writeUtf8AsUtf16(const std::string & str)783 status_t Parcel::writeUtf8AsUtf16(const std::string& str) {
784 const uint8_t* strData = (uint8_t*)str.data();
785 const size_t strLen= str.length();
786 const ssize_t utf16Len = utf8_to_utf16_length(strData, strLen);
787 if (utf16Len < 0 || utf16Len> std::numeric_limits<int32_t>::max()) {
788 return BAD_VALUE;
789 }
790
791 status_t err = writeInt32(utf16Len);
792 if (err) {
793 return err;
794 }
795
796 // Allocate enough bytes to hold our converted string and its terminating NULL.
797 void* dst = writeInplace((utf16Len + 1) * sizeof(char16_t));
798 if (!dst) {
799 return NO_MEMORY;
800 }
801
802 utf8_to_utf16(strData, strLen, (char16_t*)dst);
803
804 return NO_ERROR;
805 }
806
writeUtf8AsUtf16(const std::unique_ptr<std::string> & str)807 status_t Parcel::writeUtf8AsUtf16(const std::unique_ptr<std::string>& str) {
808 if (!str) {
809 return writeInt32(-1);
810 }
811 return writeUtf8AsUtf16(*str);
812 }
813
814 namespace {
815
816 template<typename T>
writeByteVectorInternal(Parcel * parcel,const std::vector<T> & val)817 status_t writeByteVectorInternal(Parcel* parcel, const std::vector<T>& val)
818 {
819 status_t status;
820 if (val.size() > std::numeric_limits<int32_t>::max()) {
821 status = BAD_VALUE;
822 return status;
823 }
824
825 status = parcel->writeInt32(val.size());
826 if (status != OK) {
827 return status;
828 }
829
830 void* data = parcel->writeInplace(val.size());
831 if (!data) {
832 status = BAD_VALUE;
833 return status;
834 }
835
836 memcpy(data, val.data(), val.size());
837 return status;
838 }
839
840 template<typename T>
writeByteVectorInternalPtr(Parcel * parcel,const std::unique_ptr<std::vector<T>> & val)841 status_t writeByteVectorInternalPtr(Parcel* parcel,
842 const std::unique_ptr<std::vector<T>>& val)
843 {
844 if (!val) {
845 return parcel->writeInt32(-1);
846 }
847
848 return writeByteVectorInternal(parcel, *val);
849 }
850
851 } // namespace
852
writeByteVector(const std::vector<int8_t> & val)853 status_t Parcel::writeByteVector(const std::vector<int8_t>& val) {
854 return writeByteVectorInternal(this, val);
855 }
856
writeByteVector(const std::unique_ptr<std::vector<int8_t>> & val)857 status_t Parcel::writeByteVector(const std::unique_ptr<std::vector<int8_t>>& val)
858 {
859 return writeByteVectorInternalPtr(this, val);
860 }
861
writeByteVector(const std::vector<uint8_t> & val)862 status_t Parcel::writeByteVector(const std::vector<uint8_t>& val) {
863 return writeByteVectorInternal(this, val);
864 }
865
writeByteVector(const std::unique_ptr<std::vector<uint8_t>> & val)866 status_t Parcel::writeByteVector(const std::unique_ptr<std::vector<uint8_t>>& val)
867 {
868 return writeByteVectorInternalPtr(this, val);
869 }
870
writeInt32Vector(const std::vector<int32_t> & val)871 status_t Parcel::writeInt32Vector(const std::vector<int32_t>& val)
872 {
873 return writeTypedVector(val, &Parcel::writeInt32);
874 }
875
writeInt32Vector(const std::unique_ptr<std::vector<int32_t>> & val)876 status_t Parcel::writeInt32Vector(const std::unique_ptr<std::vector<int32_t>>& val)
877 {
878 return writeNullableTypedVector(val, &Parcel::writeInt32);
879 }
880
writeInt64Vector(const std::vector<int64_t> & val)881 status_t Parcel::writeInt64Vector(const std::vector<int64_t>& val)
882 {
883 return writeTypedVector(val, &Parcel::writeInt64);
884 }
885
writeInt64Vector(const std::unique_ptr<std::vector<int64_t>> & val)886 status_t Parcel::writeInt64Vector(const std::unique_ptr<std::vector<int64_t>>& val)
887 {
888 return writeNullableTypedVector(val, &Parcel::writeInt64);
889 }
890
writeFloatVector(const std::vector<float> & val)891 status_t Parcel::writeFloatVector(const std::vector<float>& val)
892 {
893 return writeTypedVector(val, &Parcel::writeFloat);
894 }
895
writeFloatVector(const std::unique_ptr<std::vector<float>> & val)896 status_t Parcel::writeFloatVector(const std::unique_ptr<std::vector<float>>& val)
897 {
898 return writeNullableTypedVector(val, &Parcel::writeFloat);
899 }
900
writeDoubleVector(const std::vector<double> & val)901 status_t Parcel::writeDoubleVector(const std::vector<double>& val)
902 {
903 return writeTypedVector(val, &Parcel::writeDouble);
904 }
905
writeDoubleVector(const std::unique_ptr<std::vector<double>> & val)906 status_t Parcel::writeDoubleVector(const std::unique_ptr<std::vector<double>>& val)
907 {
908 return writeNullableTypedVector(val, &Parcel::writeDouble);
909 }
910
writeBoolVector(const std::vector<bool> & val)911 status_t Parcel::writeBoolVector(const std::vector<bool>& val)
912 {
913 return writeTypedVector(val, &Parcel::writeBool);
914 }
915
writeBoolVector(const std::unique_ptr<std::vector<bool>> & val)916 status_t Parcel::writeBoolVector(const std::unique_ptr<std::vector<bool>>& val)
917 {
918 return writeNullableTypedVector(val, &Parcel::writeBool);
919 }
920
writeCharVector(const std::vector<char16_t> & val)921 status_t Parcel::writeCharVector(const std::vector<char16_t>& val)
922 {
923 return writeTypedVector(val, &Parcel::writeChar);
924 }
925
writeCharVector(const std::unique_ptr<std::vector<char16_t>> & val)926 status_t Parcel::writeCharVector(const std::unique_ptr<std::vector<char16_t>>& val)
927 {
928 return writeNullableTypedVector(val, &Parcel::writeChar);
929 }
930
writeString16Vector(const std::vector<String16> & val)931 status_t Parcel::writeString16Vector(const std::vector<String16>& val)
932 {
933 return writeTypedVector(val, &Parcel::writeString16);
934 }
935
writeString16Vector(const std::unique_ptr<std::vector<std::unique_ptr<String16>>> & val)936 status_t Parcel::writeString16Vector(
937 const std::unique_ptr<std::vector<std::unique_ptr<String16>>>& val)
938 {
939 return writeNullableTypedVector(val, &Parcel::writeString16);
940 }
941
writeUtf8VectorAsUtf16Vector(const std::unique_ptr<std::vector<std::unique_ptr<std::string>>> & val)942 status_t Parcel::writeUtf8VectorAsUtf16Vector(
943 const std::unique_ptr<std::vector<std::unique_ptr<std::string>>>& val) {
944 return writeNullableTypedVector(val, &Parcel::writeUtf8AsUtf16);
945 }
946
writeUtf8VectorAsUtf16Vector(const std::vector<std::string> & val)947 status_t Parcel::writeUtf8VectorAsUtf16Vector(const std::vector<std::string>& val) {
948 return writeTypedVector(val, &Parcel::writeUtf8AsUtf16);
949 }
950
writeInt32(int32_t val)951 status_t Parcel::writeInt32(int32_t val)
952 {
953 return writeAligned(val);
954 }
955
writeUint32(uint32_t val)956 status_t Parcel::writeUint32(uint32_t val)
957 {
958 return writeAligned(val);
959 }
960
writeInt32Array(size_t len,const int32_t * val)961 status_t Parcel::writeInt32Array(size_t len, const int32_t *val) {
962 if (len > INT32_MAX) {
963 // don't accept size_t values which may have come from an
964 // inadvertent conversion from a negative int.
965 return BAD_VALUE;
966 }
967
968 if (!val) {
969 return writeInt32(-1);
970 }
971 status_t ret = writeInt32(static_cast<uint32_t>(len));
972 if (ret == NO_ERROR) {
973 ret = write(val, len * sizeof(*val));
974 }
975 return ret;
976 }
writeByteArray(size_t len,const uint8_t * val)977 status_t Parcel::writeByteArray(size_t len, const uint8_t *val) {
978 if (len > INT32_MAX) {
979 // don't accept size_t values which may have come from an
980 // inadvertent conversion from a negative int.
981 return BAD_VALUE;
982 }
983
984 if (!val) {
985 return writeInt32(-1);
986 }
987 status_t ret = writeInt32(static_cast<uint32_t>(len));
988 if (ret == NO_ERROR) {
989 ret = write(val, len * sizeof(*val));
990 }
991 return ret;
992 }
993
writeBool(bool val)994 status_t Parcel::writeBool(bool val)
995 {
996 return writeInt32(int32_t(val));
997 }
998
writeChar(char16_t val)999 status_t Parcel::writeChar(char16_t val)
1000 {
1001 return writeInt32(int32_t(val));
1002 }
1003
writeByte(int8_t val)1004 status_t Parcel::writeByte(int8_t val)
1005 {
1006 return writeInt32(int32_t(val));
1007 }
1008
writeInt64(int64_t val)1009 status_t Parcel::writeInt64(int64_t val)
1010 {
1011 return writeAligned(val);
1012 }
1013
writeUint64(uint64_t val)1014 status_t Parcel::writeUint64(uint64_t val)
1015 {
1016 return writeAligned(val);
1017 }
1018
writePointer(uintptr_t val)1019 status_t Parcel::writePointer(uintptr_t val)
1020 {
1021 return writeAligned<binder_uintptr_t>(val);
1022 }
1023
writeFloat(float val)1024 status_t Parcel::writeFloat(float val)
1025 {
1026 return writeAligned(val);
1027 }
1028
1029 #if defined(__mips__) && defined(__mips_hard_float)
1030
writeDouble(double val)1031 status_t Parcel::writeDouble(double val)
1032 {
1033 union {
1034 double d;
1035 unsigned long long ll;
1036 } u;
1037 u.d = val;
1038 return writeAligned(u.ll);
1039 }
1040
1041 #else
1042
writeDouble(double val)1043 status_t Parcel::writeDouble(double val)
1044 {
1045 return writeAligned(val);
1046 }
1047
1048 #endif
1049
writeCString(const char * str)1050 status_t Parcel::writeCString(const char* str)
1051 {
1052 return write(str, strlen(str)+1);
1053 }
1054
writeString8(const String8 & str)1055 status_t Parcel::writeString8(const String8& str)
1056 {
1057 status_t err = writeInt32(str.bytes());
1058 // only write string if its length is more than zero characters,
1059 // as readString8 will only read if the length field is non-zero.
1060 // this is slightly different from how writeString16 works.
1061 if (str.bytes() > 0 && err == NO_ERROR) {
1062 err = write(str.string(), str.bytes()+1);
1063 }
1064 return err;
1065 }
1066
writeString16(const std::unique_ptr<String16> & str)1067 status_t Parcel::writeString16(const std::unique_ptr<String16>& str)
1068 {
1069 if (!str) {
1070 return writeInt32(-1);
1071 }
1072
1073 return writeString16(*str);
1074 }
1075
writeString16(const String16 & str)1076 status_t Parcel::writeString16(const String16& str)
1077 {
1078 return writeString16(str.string(), str.size());
1079 }
1080
writeString16(const char16_t * str,size_t len)1081 status_t Parcel::writeString16(const char16_t* str, size_t len)
1082 {
1083 if (str == NULL) return writeInt32(-1);
1084
1085 status_t err = writeInt32(len);
1086 if (err == NO_ERROR) {
1087 len *= sizeof(char16_t);
1088 uint8_t* data = (uint8_t*)writeInplace(len+sizeof(char16_t));
1089 if (data) {
1090 memcpy(data, str, len);
1091 *reinterpret_cast<char16_t*>(data+len) = 0;
1092 return NO_ERROR;
1093 }
1094 err = mError;
1095 }
1096 return err;
1097 }
1098
writeStrongBinder(const sp<IBinder> & val)1099 status_t Parcel::writeStrongBinder(const sp<IBinder>& val)
1100 {
1101 return flatten_binder(ProcessState::self(), val, this);
1102 }
1103
writeStrongBinderVector(const std::vector<sp<IBinder>> & val)1104 status_t Parcel::writeStrongBinderVector(const std::vector<sp<IBinder>>& val)
1105 {
1106 return writeTypedVector(val, &Parcel::writeStrongBinder);
1107 }
1108
writeStrongBinderVector(const std::unique_ptr<std::vector<sp<IBinder>>> & val)1109 status_t Parcel::writeStrongBinderVector(const std::unique_ptr<std::vector<sp<IBinder>>>& val)
1110 {
1111 return writeNullableTypedVector(val, &Parcel::writeStrongBinder);
1112 }
1113
readStrongBinderVector(std::unique_ptr<std::vector<sp<IBinder>>> * val) const1114 status_t Parcel::readStrongBinderVector(std::unique_ptr<std::vector<sp<IBinder>>>* val) const {
1115 return readNullableTypedVector(val, &Parcel::readStrongBinder);
1116 }
1117
readStrongBinderVector(std::vector<sp<IBinder>> * val) const1118 status_t Parcel::readStrongBinderVector(std::vector<sp<IBinder>>* val) const {
1119 return readTypedVector(val, &Parcel::readStrongBinder);
1120 }
1121
writeWeakBinder(const wp<IBinder> & val)1122 status_t Parcel::writeWeakBinder(const wp<IBinder>& val)
1123 {
1124 return flatten_binder(ProcessState::self(), val, this);
1125 }
1126
writeRawNullableParcelable(const Parcelable * parcelable)1127 status_t Parcel::writeRawNullableParcelable(const Parcelable* parcelable) {
1128 if (!parcelable) {
1129 return writeInt32(0);
1130 }
1131
1132 return writeParcelable(*parcelable);
1133 }
1134
writeParcelable(const Parcelable & parcelable)1135 status_t Parcel::writeParcelable(const Parcelable& parcelable) {
1136 status_t status = writeInt32(1); // parcelable is not null.
1137 if (status != OK) {
1138 return status;
1139 }
1140 return parcelable.writeToParcel(this);
1141 }
1142
writeNativeHandle(const native_handle * handle)1143 status_t Parcel::writeNativeHandle(const native_handle* handle)
1144 {
1145 if (!handle || handle->version != sizeof(native_handle))
1146 return BAD_TYPE;
1147
1148 status_t err;
1149 err = writeInt32(handle->numFds);
1150 if (err != NO_ERROR) return err;
1151
1152 err = writeInt32(handle->numInts);
1153 if (err != NO_ERROR) return err;
1154
1155 for (int i=0 ; err==NO_ERROR && i<handle->numFds ; i++)
1156 err = writeDupFileDescriptor(handle->data[i]);
1157
1158 if (err != NO_ERROR) {
1159 ALOGD("write native handle, write dup fd failed");
1160 return err;
1161 }
1162 err = write(handle->data + handle->numFds, sizeof(int)*handle->numInts);
1163 return err;
1164 }
1165
writeFileDescriptor(int fd,bool takeOwnership)1166 status_t Parcel::writeFileDescriptor(int fd, bool takeOwnership)
1167 {
1168 flat_binder_object obj;
1169 obj.type = BINDER_TYPE_FD;
1170 obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
1171 obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */
1172 obj.handle = fd;
1173 obj.cookie = takeOwnership ? 1 : 0;
1174 return writeObject(obj, true);
1175 }
1176
writeDupFileDescriptor(int fd)1177 status_t Parcel::writeDupFileDescriptor(int fd)
1178 {
1179 int dupFd = dup(fd);
1180 if (dupFd < 0) {
1181 return -errno;
1182 }
1183 status_t err = writeFileDescriptor(dupFd, true /*takeOwnership*/);
1184 if (err != OK) {
1185 close(dupFd);
1186 }
1187 return err;
1188 }
1189
writeUniqueFileDescriptor(const ScopedFd & fd)1190 status_t Parcel::writeUniqueFileDescriptor(const ScopedFd& fd) {
1191 return writeDupFileDescriptor(fd.get());
1192 }
1193
writeUniqueFileDescriptorVector(const std::vector<ScopedFd> & val)1194 status_t Parcel::writeUniqueFileDescriptorVector(const std::vector<ScopedFd>& val) {
1195 return writeTypedVector(val, &Parcel::writeUniqueFileDescriptor);
1196 }
1197
writeUniqueFileDescriptorVector(const std::unique_ptr<std::vector<ScopedFd>> & val)1198 status_t Parcel::writeUniqueFileDescriptorVector(const std::unique_ptr<std::vector<ScopedFd>>& val) {
1199 return writeNullableTypedVector(val, &Parcel::writeUniqueFileDescriptor);
1200 }
1201
writeBlob(size_t len,bool mutableCopy,WritableBlob * outBlob)1202 status_t Parcel::writeBlob(size_t len, bool mutableCopy, WritableBlob* outBlob)
1203 {
1204 if (len > INT32_MAX) {
1205 // don't accept size_t values which may have come from an
1206 // inadvertent conversion from a negative int.
1207 return BAD_VALUE;
1208 }
1209
1210 status_t status;
1211 if (!mAllowFds || len <= BLOB_INPLACE_LIMIT) {
1212 ALOGV("writeBlob: write in place");
1213 status = writeInt32(BLOB_INPLACE);
1214 if (status) return status;
1215
1216 void* ptr = writeInplace(len);
1217 if (!ptr) return NO_MEMORY;
1218
1219 outBlob->init(-1, ptr, len, false);
1220 return NO_ERROR;
1221 }
1222
1223 ALOGV("writeBlob: write to ashmem");
1224 int fd = ashmem_create_region("Parcel Blob", len);
1225 if (fd < 0) return NO_MEMORY;
1226
1227 int result = ashmem_set_prot_region(fd, PROT_READ | PROT_WRITE);
1228 if (result < 0) {
1229 status = result;
1230 } else {
1231 void* ptr = ::mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
1232 if (ptr == MAP_FAILED) {
1233 status = -errno;
1234 } else {
1235 if (!mutableCopy) {
1236 result = ashmem_set_prot_region(fd, PROT_READ);
1237 }
1238 if (result < 0) {
1239 status = result;
1240 } else {
1241 status = writeInt32(mutableCopy ? BLOB_ASHMEM_MUTABLE : BLOB_ASHMEM_IMMUTABLE);
1242 if (!status) {
1243 status = writeFileDescriptor(fd, true /*takeOwnership*/);
1244 if (!status) {
1245 outBlob->init(fd, ptr, len, mutableCopy);
1246 return NO_ERROR;
1247 }
1248 }
1249 }
1250 }
1251 ::munmap(ptr, len);
1252 }
1253 ::close(fd);
1254 return status;
1255 }
1256
writeDupImmutableBlobFileDescriptor(int fd)1257 status_t Parcel::writeDupImmutableBlobFileDescriptor(int fd)
1258 {
1259 // Must match up with what's done in writeBlob.
1260 if (!mAllowFds) return FDS_NOT_ALLOWED;
1261 status_t status = writeInt32(BLOB_ASHMEM_IMMUTABLE);
1262 if (status) return status;
1263 return writeDupFileDescriptor(fd);
1264 }
1265
write(const FlattenableHelperInterface & val)1266 status_t Parcel::write(const FlattenableHelperInterface& val)
1267 {
1268 status_t err;
1269
1270 // size if needed
1271 const size_t len = val.getFlattenedSize();
1272 const size_t fd_count = val.getFdCount();
1273
1274 if ((len > INT32_MAX) || (fd_count >= gMaxFds)) {
1275 // don't accept size_t values which may have come from an
1276 // inadvertent conversion from a negative int.
1277 return BAD_VALUE;
1278 }
1279
1280 err = this->writeInt32(len);
1281 if (err) return err;
1282
1283 err = this->writeInt32(fd_count);
1284 if (err) return err;
1285
1286 // payload
1287 void* const buf = this->writeInplace(pad_size(len));
1288 if (buf == NULL)
1289 return BAD_VALUE;
1290
1291 int* fds = NULL;
1292 if (fd_count) {
1293 fds = new (std::nothrow) int[fd_count];
1294 if (fds == nullptr) {
1295 ALOGE("write: failed to allocate requested %zu fds", fd_count);
1296 return BAD_VALUE;
1297 }
1298 }
1299
1300 err = val.flatten(buf, len, fds, fd_count);
1301 for (size_t i=0 ; i<fd_count && err==NO_ERROR ; i++) {
1302 err = this->writeDupFileDescriptor( fds[i] );
1303 }
1304
1305 if (fd_count) {
1306 delete [] fds;
1307 }
1308
1309 return err;
1310 }
1311
writeObject(const flat_binder_object & val,bool nullMetaData)1312 status_t Parcel::writeObject(const flat_binder_object& val, bool nullMetaData)
1313 {
1314 const bool enoughData = (mDataPos+sizeof(val)) <= mDataCapacity;
1315 const bool enoughObjects = mObjectsSize < mObjectsCapacity;
1316 if (enoughData && enoughObjects) {
1317 restart_write:
1318 *reinterpret_cast<flat_binder_object*>(mData+mDataPos) = val;
1319
1320 // remember if it's a file descriptor
1321 if (val.type == BINDER_TYPE_FD) {
1322 if (!mAllowFds) {
1323 // fail before modifying our object index
1324 return FDS_NOT_ALLOWED;
1325 }
1326 mHasFds = mFdsKnown = true;
1327 }
1328
1329 // Need to write meta-data?
1330 if (nullMetaData || val.binder != 0) {
1331 mObjects[mObjectsSize] = mDataPos;
1332 acquire_object(ProcessState::self(), val, this, &mOpenAshmemSize);
1333 mObjectsSize++;
1334 }
1335
1336 return finishWrite(sizeof(flat_binder_object));
1337 }
1338
1339 if (!enoughData) {
1340 const status_t err = growData(sizeof(val));
1341 if (err != NO_ERROR) return err;
1342 }
1343 if (!enoughObjects) {
1344 size_t newSize = ((mObjectsSize+2)*3)/2;
1345 if (newSize < mObjectsSize) return NO_MEMORY; // overflow
1346 binder_size_t* objects = (binder_size_t*)realloc(mObjects, newSize*sizeof(binder_size_t));
1347 if (objects == NULL) return NO_MEMORY;
1348 mObjects = objects;
1349 mObjectsCapacity = newSize;
1350 }
1351
1352 goto restart_write;
1353 }
1354
writeNoException()1355 status_t Parcel::writeNoException()
1356 {
1357 binder::Status status;
1358 return status.writeToParcel(this);
1359 }
1360
remove(size_t,size_t)1361 void Parcel::remove(size_t /*start*/, size_t /*amt*/)
1362 {
1363 LOG_ALWAYS_FATAL("Parcel::remove() not yet implemented!");
1364 }
1365
read(void * outData,size_t len) const1366 status_t Parcel::read(void* outData, size_t len) const
1367 {
1368 if (len > INT32_MAX) {
1369 // don't accept size_t values which may have come from an
1370 // inadvertent conversion from a negative int.
1371 return BAD_VALUE;
1372 }
1373
1374 if ((mDataPos+pad_size(len)) >= mDataPos && (mDataPos+pad_size(len)) <= mDataSize
1375 && len <= pad_size(len)) {
1376 memcpy(outData, mData+mDataPos, len);
1377 mDataPos += pad_size(len);
1378 ALOGV("read Setting data pos of %p to %zu", this, mDataPos);
1379 return NO_ERROR;
1380 }
1381 return NOT_ENOUGH_DATA;
1382 }
1383
readInplace(size_t len) const1384 const void* Parcel::readInplace(size_t len) const
1385 {
1386 if (len > INT32_MAX) {
1387 // don't accept size_t values which may have come from an
1388 // inadvertent conversion from a negative int.
1389 return NULL;
1390 }
1391
1392 if ((mDataPos+pad_size(len)) >= mDataPos && (mDataPos+pad_size(len)) <= mDataSize
1393 && len <= pad_size(len)) {
1394 const void* data = mData+mDataPos;
1395 mDataPos += pad_size(len);
1396 ALOGV("readInplace Setting data pos of %p to %zu", this, mDataPos);
1397 return data;
1398 }
1399 return NULL;
1400 }
1401
1402 template<class T>
readAligned(T * pArg) const1403 status_t Parcel::readAligned(T *pArg) const {
1404 COMPILE_TIME_ASSERT_FUNCTION_SCOPE(PAD_SIZE_UNSAFE(sizeof(T)) == sizeof(T));
1405
1406 if ((mDataPos+sizeof(T)) <= mDataSize) {
1407 const void* data = mData+mDataPos;
1408 mDataPos += sizeof(T);
1409 *pArg = *reinterpret_cast<const T*>(data);
1410 return NO_ERROR;
1411 } else {
1412 return NOT_ENOUGH_DATA;
1413 }
1414 }
1415
1416 template<class T>
readAligned() const1417 T Parcel::readAligned() const {
1418 T result;
1419 if (readAligned(&result) != NO_ERROR) {
1420 result = 0;
1421 }
1422
1423 return result;
1424 }
1425
1426 template<class T>
writeAligned(T val)1427 status_t Parcel::writeAligned(T val) {
1428 COMPILE_TIME_ASSERT_FUNCTION_SCOPE(PAD_SIZE_UNSAFE(sizeof(T)) == sizeof(T));
1429
1430 if ((mDataPos+sizeof(val)) <= mDataCapacity) {
1431 restart_write:
1432 *reinterpret_cast<T*>(mData+mDataPos) = val;
1433 return finishWrite(sizeof(val));
1434 }
1435
1436 status_t err = growData(sizeof(val));
1437 if (err == NO_ERROR) goto restart_write;
1438 return err;
1439 }
1440
1441 namespace {
1442
1443 template<typename T>
readByteVectorInternal(const Parcel * parcel,std::vector<T> * val)1444 status_t readByteVectorInternal(const Parcel* parcel,
1445 std::vector<T>* val) {
1446 val->clear();
1447
1448 int32_t size;
1449 status_t status = parcel->readInt32(&size);
1450
1451 if (status != OK) {
1452 return status;
1453 }
1454
1455 if (size < 0) {
1456 status = UNEXPECTED_NULL;
1457 return status;
1458 }
1459 if (size_t(size) > parcel->dataAvail()) {
1460 status = BAD_VALUE;
1461 return status;
1462 }
1463
1464 const void* data = parcel->readInplace(size);
1465 if (!data) {
1466 status = BAD_VALUE;
1467 return status;
1468 }
1469 val->resize(size);
1470 memcpy(val->data(), data, size);
1471
1472 return status;
1473 }
1474
1475 template<typename T>
readByteVectorInternalPtr(const Parcel * parcel,std::unique_ptr<std::vector<T>> * val)1476 status_t readByteVectorInternalPtr(
1477 const Parcel* parcel,
1478 std::unique_ptr<std::vector<T>>* val) {
1479 const int32_t start = parcel->dataPosition();
1480 int32_t size;
1481 status_t status = parcel->readInt32(&size);
1482 val->reset();
1483
1484 if (status != OK || size < 0) {
1485 return status;
1486 }
1487
1488 parcel->setDataPosition(start);
1489 val->reset(new (std::nothrow) std::vector<T>());
1490
1491 status = readByteVectorInternal(parcel, val->get());
1492
1493 if (status != OK) {
1494 val->reset();
1495 }
1496
1497 return status;
1498 }
1499
1500 } // namespace
1501
readByteVector(std::vector<int8_t> * val) const1502 status_t Parcel::readByteVector(std::vector<int8_t>* val) const {
1503 return readByteVectorInternal(this, val);
1504 }
1505
readByteVector(std::vector<uint8_t> * val) const1506 status_t Parcel::readByteVector(std::vector<uint8_t>* val) const {
1507 return readByteVectorInternal(this, val);
1508 }
1509
readByteVector(std::unique_ptr<std::vector<int8_t>> * val) const1510 status_t Parcel::readByteVector(std::unique_ptr<std::vector<int8_t>>* val) const {
1511 return readByteVectorInternalPtr(this, val);
1512 }
1513
readByteVector(std::unique_ptr<std::vector<uint8_t>> * val) const1514 status_t Parcel::readByteVector(std::unique_ptr<std::vector<uint8_t>>* val) const {
1515 return readByteVectorInternalPtr(this, val);
1516 }
1517
readInt32Vector(std::unique_ptr<std::vector<int32_t>> * val) const1518 status_t Parcel::readInt32Vector(std::unique_ptr<std::vector<int32_t>>* val) const {
1519 return readNullableTypedVector(val, &Parcel::readInt32);
1520 }
1521
readInt32Vector(std::vector<int32_t> * val) const1522 status_t Parcel::readInt32Vector(std::vector<int32_t>* val) const {
1523 return readTypedVector(val, &Parcel::readInt32);
1524 }
1525
readInt64Vector(std::unique_ptr<std::vector<int64_t>> * val) const1526 status_t Parcel::readInt64Vector(std::unique_ptr<std::vector<int64_t>>* val) const {
1527 return readNullableTypedVector(val, &Parcel::readInt64);
1528 }
1529
readInt64Vector(std::vector<int64_t> * val) const1530 status_t Parcel::readInt64Vector(std::vector<int64_t>* val) const {
1531 return readTypedVector(val, &Parcel::readInt64);
1532 }
1533
readFloatVector(std::unique_ptr<std::vector<float>> * val) const1534 status_t Parcel::readFloatVector(std::unique_ptr<std::vector<float>>* val) const {
1535 return readNullableTypedVector(val, &Parcel::readFloat);
1536 }
1537
readFloatVector(std::vector<float> * val) const1538 status_t Parcel::readFloatVector(std::vector<float>* val) const {
1539 return readTypedVector(val, &Parcel::readFloat);
1540 }
1541
readDoubleVector(std::unique_ptr<std::vector<double>> * val) const1542 status_t Parcel::readDoubleVector(std::unique_ptr<std::vector<double>>* val) const {
1543 return readNullableTypedVector(val, &Parcel::readDouble);
1544 }
1545
readDoubleVector(std::vector<double> * val) const1546 status_t Parcel::readDoubleVector(std::vector<double>* val) const {
1547 return readTypedVector(val, &Parcel::readDouble);
1548 }
1549
readBoolVector(std::unique_ptr<std::vector<bool>> * val) const1550 status_t Parcel::readBoolVector(std::unique_ptr<std::vector<bool>>* val) const {
1551 const int32_t start = dataPosition();
1552 int32_t size;
1553 status_t status = readInt32(&size);
1554 val->reset();
1555
1556 if (status != OK || size < 0) {
1557 return status;
1558 }
1559
1560 setDataPosition(start);
1561 val->reset(new (std::nothrow) std::vector<bool>());
1562
1563 status = readBoolVector(val->get());
1564
1565 if (status != OK) {
1566 val->reset();
1567 }
1568
1569 return status;
1570 }
1571
readBoolVector(std::vector<bool> * val) const1572 status_t Parcel::readBoolVector(std::vector<bool>* val) const {
1573 int32_t size;
1574 status_t status = readInt32(&size);
1575
1576 if (status != OK) {
1577 return status;
1578 }
1579
1580 if (size < 0) {
1581 return UNEXPECTED_NULL;
1582 }
1583
1584 val->resize(size);
1585
1586 /* C++ bool handling means a vector of bools isn't necessarily addressable
1587 * (we might use individual bits)
1588 */
1589 bool data;
1590 for (int32_t i = 0; i < size; ++i) {
1591 status = readBool(&data);
1592 (*val)[i] = data;
1593
1594 if (status != OK) {
1595 return status;
1596 }
1597 }
1598
1599 return OK;
1600 }
1601
readCharVector(std::unique_ptr<std::vector<char16_t>> * val) const1602 status_t Parcel::readCharVector(std::unique_ptr<std::vector<char16_t>>* val) const {
1603 return readNullableTypedVector(val, &Parcel::readChar);
1604 }
1605
readCharVector(std::vector<char16_t> * val) const1606 status_t Parcel::readCharVector(std::vector<char16_t>* val) const {
1607 return readTypedVector(val, &Parcel::readChar);
1608 }
1609
readString16Vector(std::unique_ptr<std::vector<std::unique_ptr<String16>>> * val) const1610 status_t Parcel::readString16Vector(
1611 std::unique_ptr<std::vector<std::unique_ptr<String16>>>* val) const {
1612 return readNullableTypedVector(val, &Parcel::readString16);
1613 }
1614
readString16Vector(std::vector<String16> * val) const1615 status_t Parcel::readString16Vector(std::vector<String16>* val) const {
1616 return readTypedVector(val, &Parcel::readString16);
1617 }
1618
readUtf8VectorFromUtf16Vector(std::unique_ptr<std::vector<std::unique_ptr<std::string>>> * val) const1619 status_t Parcel::readUtf8VectorFromUtf16Vector(
1620 std::unique_ptr<std::vector<std::unique_ptr<std::string>>>* val) const {
1621 return readNullableTypedVector(val, &Parcel::readUtf8FromUtf16);
1622 }
1623
readUtf8VectorFromUtf16Vector(std::vector<std::string> * val) const1624 status_t Parcel::readUtf8VectorFromUtf16Vector(std::vector<std::string>* val) const {
1625 return readTypedVector(val, &Parcel::readUtf8FromUtf16);
1626 }
1627
readInt32(int32_t * pArg) const1628 status_t Parcel::readInt32(int32_t *pArg) const
1629 {
1630 return readAligned(pArg);
1631 }
1632
readInt32() const1633 int32_t Parcel::readInt32() const
1634 {
1635 return readAligned<int32_t>();
1636 }
1637
readUint32(uint32_t * pArg) const1638 status_t Parcel::readUint32(uint32_t *pArg) const
1639 {
1640 return readAligned(pArg);
1641 }
1642
readUint32() const1643 uint32_t Parcel::readUint32() const
1644 {
1645 return readAligned<uint32_t>();
1646 }
1647
readInt64(int64_t * pArg) const1648 status_t Parcel::readInt64(int64_t *pArg) const
1649 {
1650 return readAligned(pArg);
1651 }
1652
1653
readInt64() const1654 int64_t Parcel::readInt64() const
1655 {
1656 return readAligned<int64_t>();
1657 }
1658
readUint64(uint64_t * pArg) const1659 status_t Parcel::readUint64(uint64_t *pArg) const
1660 {
1661 return readAligned(pArg);
1662 }
1663
readUint64() const1664 uint64_t Parcel::readUint64() const
1665 {
1666 return readAligned<uint64_t>();
1667 }
1668
readPointer(uintptr_t * pArg) const1669 status_t Parcel::readPointer(uintptr_t *pArg) const
1670 {
1671 status_t ret;
1672 binder_uintptr_t ptr;
1673 ret = readAligned(&ptr);
1674 if (!ret)
1675 *pArg = ptr;
1676 return ret;
1677 }
1678
readPointer() const1679 uintptr_t Parcel::readPointer() const
1680 {
1681 return readAligned<binder_uintptr_t>();
1682 }
1683
1684
readFloat(float * pArg) const1685 status_t Parcel::readFloat(float *pArg) const
1686 {
1687 return readAligned(pArg);
1688 }
1689
1690
readFloat() const1691 float Parcel::readFloat() const
1692 {
1693 return readAligned<float>();
1694 }
1695
1696 #if defined(__mips__) && defined(__mips_hard_float)
1697
readDouble(double * pArg) const1698 status_t Parcel::readDouble(double *pArg) const
1699 {
1700 union {
1701 double d;
1702 unsigned long long ll;
1703 } u;
1704 u.d = 0;
1705 status_t status;
1706 status = readAligned(&u.ll);
1707 *pArg = u.d;
1708 return status;
1709 }
1710
readDouble() const1711 double Parcel::readDouble() const
1712 {
1713 union {
1714 double d;
1715 unsigned long long ll;
1716 } u;
1717 u.ll = readAligned<unsigned long long>();
1718 return u.d;
1719 }
1720
1721 #else
1722
readDouble(double * pArg) const1723 status_t Parcel::readDouble(double *pArg) const
1724 {
1725 return readAligned(pArg);
1726 }
1727
readDouble() const1728 double Parcel::readDouble() const
1729 {
1730 return readAligned<double>();
1731 }
1732
1733 #endif
1734
readIntPtr(intptr_t * pArg) const1735 status_t Parcel::readIntPtr(intptr_t *pArg) const
1736 {
1737 return readAligned(pArg);
1738 }
1739
1740
readIntPtr() const1741 intptr_t Parcel::readIntPtr() const
1742 {
1743 return readAligned<intptr_t>();
1744 }
1745
readBool(bool * pArg) const1746 status_t Parcel::readBool(bool *pArg) const
1747 {
1748 int32_t tmp;
1749 status_t ret = readInt32(&tmp);
1750 *pArg = (tmp != 0);
1751 return ret;
1752 }
1753
readBool() const1754 bool Parcel::readBool() const
1755 {
1756 return readInt32() != 0;
1757 }
1758
readChar(char16_t * pArg) const1759 status_t Parcel::readChar(char16_t *pArg) const
1760 {
1761 int32_t tmp;
1762 status_t ret = readInt32(&tmp);
1763 *pArg = char16_t(tmp);
1764 return ret;
1765 }
1766
readChar() const1767 char16_t Parcel::readChar() const
1768 {
1769 return char16_t(readInt32());
1770 }
1771
readByte(int8_t * pArg) const1772 status_t Parcel::readByte(int8_t *pArg) const
1773 {
1774 int32_t tmp;
1775 status_t ret = readInt32(&tmp);
1776 *pArg = int8_t(tmp);
1777 return ret;
1778 }
1779
readByte() const1780 int8_t Parcel::readByte() const
1781 {
1782 return int8_t(readInt32());
1783 }
1784
readUtf8FromUtf16(std::string * str) const1785 status_t Parcel::readUtf8FromUtf16(std::string* str) const {
1786 size_t utf16Size = 0;
1787 const char16_t* src = readString16Inplace(&utf16Size);
1788 if (!src) {
1789 return UNEXPECTED_NULL;
1790 }
1791
1792 // Save ourselves the trouble, we're done.
1793 if (utf16Size == 0u) {
1794 str->clear();
1795 return NO_ERROR;
1796 }
1797
1798 ssize_t utf8Size = utf16_to_utf8_length(src, utf16Size);
1799 if (utf8Size < 0) {
1800 return BAD_VALUE;
1801 }
1802 // Note that while it is probably safe to assume string::resize keeps a
1803 // spare byte around for the trailing null, we're going to be explicit.
1804 str->resize(utf8Size + 1);
1805 utf16_to_utf8(src, utf16Size, &((*str)[0]));
1806 str->resize(utf8Size);
1807 return NO_ERROR;
1808 }
1809
readUtf8FromUtf16(std::unique_ptr<std::string> * str) const1810 status_t Parcel::readUtf8FromUtf16(std::unique_ptr<std::string>* str) const {
1811 const int32_t start = dataPosition();
1812 int32_t size;
1813 status_t status = readInt32(&size);
1814 str->reset();
1815
1816 if (status != OK || size < 0) {
1817 return status;
1818 }
1819
1820 setDataPosition(start);
1821 str->reset(new (std::nothrow) std::string());
1822 return readUtf8FromUtf16(str->get());
1823 }
1824
readCString() const1825 const char* Parcel::readCString() const
1826 {
1827 const size_t avail = mDataSize-mDataPos;
1828 if (avail > 0) {
1829 const char* str = reinterpret_cast<const char*>(mData+mDataPos);
1830 // is the string's trailing NUL within the parcel's valid bounds?
1831 const char* eos = reinterpret_cast<const char*>(memchr(str, 0, avail));
1832 if (eos) {
1833 const size_t len = eos - str;
1834 mDataPos += pad_size(len+1);
1835 ALOGV("readCString Setting data pos of %p to %zu", this, mDataPos);
1836 return str;
1837 }
1838 }
1839 return NULL;
1840 }
1841
readString8() const1842 String8 Parcel::readString8() const
1843 {
1844 int32_t size = readInt32();
1845 // watch for potential int overflow adding 1 for trailing NUL
1846 if (size > 0 && size < INT32_MAX) {
1847 const char* str = (const char*)readInplace(size+1);
1848 if (str) return String8(str, size);
1849 }
1850 return String8();
1851 }
1852
readString16() const1853 String16 Parcel::readString16() const
1854 {
1855 size_t len;
1856 const char16_t* str = readString16Inplace(&len);
1857 if (str) return String16(str, len);
1858 ALOGE("Reading a NULL string not supported here.");
1859 return String16();
1860 }
1861
readString16(std::unique_ptr<String16> * pArg) const1862 status_t Parcel::readString16(std::unique_ptr<String16>* pArg) const
1863 {
1864 const int32_t start = dataPosition();
1865 int32_t size;
1866 status_t status = readInt32(&size);
1867 pArg->reset();
1868
1869 if (status != OK || size < 0) {
1870 return status;
1871 }
1872
1873 setDataPosition(start);
1874 pArg->reset(new (std::nothrow) String16());
1875
1876 status = readString16(pArg->get());
1877
1878 if (status != OK) {
1879 pArg->reset();
1880 }
1881
1882 return status;
1883 }
1884
readString16(String16 * pArg) const1885 status_t Parcel::readString16(String16* pArg) const
1886 {
1887 size_t len;
1888 const char16_t* str = readString16Inplace(&len);
1889 if (str) {
1890 pArg->setTo(str, len);
1891 return 0;
1892 } else {
1893 *pArg = String16();
1894 return UNEXPECTED_NULL;
1895 }
1896 }
1897
readString16Inplace(size_t * outLen) const1898 const char16_t* Parcel::readString16Inplace(size_t* outLen) const
1899 {
1900 int32_t size = readInt32();
1901 // watch for potential int overflow from size+1
1902 if (size >= 0 && size < INT32_MAX) {
1903 *outLen = size;
1904 const char16_t* str = (const char16_t*)readInplace((size+1)*sizeof(char16_t));
1905 if (str != NULL) {
1906 return str;
1907 }
1908 }
1909 *outLen = 0;
1910 return NULL;
1911 }
1912
readStrongBinder(sp<IBinder> * val) const1913 status_t Parcel::readStrongBinder(sp<IBinder>* val) const
1914 {
1915 return unflatten_binder(ProcessState::self(), *this, val);
1916 }
1917
readStrongBinder() const1918 sp<IBinder> Parcel::readStrongBinder() const
1919 {
1920 sp<IBinder> val;
1921 readStrongBinder(&val);
1922 return val;
1923 }
1924
readWeakBinder() const1925 wp<IBinder> Parcel::readWeakBinder() const
1926 {
1927 wp<IBinder> val;
1928 unflatten_binder(ProcessState::self(), *this, &val);
1929 return val;
1930 }
1931
readParcelable(Parcelable * parcelable) const1932 status_t Parcel::readParcelable(Parcelable* parcelable) const {
1933 int32_t have_parcelable = 0;
1934 status_t status = readInt32(&have_parcelable);
1935 if (status != OK) {
1936 return status;
1937 }
1938 if (!have_parcelable) {
1939 return UNEXPECTED_NULL;
1940 }
1941 return parcelable->readFromParcel(this);
1942 }
1943
readExceptionCode() const1944 int32_t Parcel::readExceptionCode() const
1945 {
1946 binder::Status status;
1947 status.readFromParcel(*this);
1948 return status.exceptionCode();
1949 }
1950
readNativeHandle() const1951 native_handle* Parcel::readNativeHandle() const
1952 {
1953 int numFds, numInts;
1954 status_t err;
1955 err = readInt32(&numFds);
1956 if (err != NO_ERROR) return 0;
1957 err = readInt32(&numInts);
1958 if (err != NO_ERROR) return 0;
1959
1960 native_handle* h = native_handle_create(numFds, numInts);
1961 if (!h) {
1962 return 0;
1963 }
1964
1965 for (int i=0 ; err==NO_ERROR && i<numFds ; i++) {
1966 h->data[i] = dup(readFileDescriptor());
1967 if (h->data[i] < 0) {
1968 for (int j = 0; j < i; j++) {
1969 close(h->data[j]);
1970 }
1971 native_handle_delete(h);
1972 return 0;
1973 }
1974 }
1975 err = read(h->data + numFds, sizeof(int)*numInts);
1976 if (err != NO_ERROR) {
1977 native_handle_close(h);
1978 native_handle_delete(h);
1979 h = 0;
1980 }
1981 return h;
1982 }
1983
1984
readFileDescriptor() const1985 int Parcel::readFileDescriptor() const
1986 {
1987 const flat_binder_object* flat = readObject(true);
1988
1989 if (flat && flat->type == BINDER_TYPE_FD) {
1990 return flat->handle;
1991 }
1992
1993 return BAD_TYPE;
1994 }
1995
readUniqueFileDescriptor(ScopedFd * val) const1996 status_t Parcel::readUniqueFileDescriptor(ScopedFd* val) const
1997 {
1998 int got = readFileDescriptor();
1999
2000 if (got == BAD_TYPE) {
2001 return BAD_TYPE;
2002 }
2003
2004 val->reset(dup(got));
2005
2006 if (val->get() < 0) {
2007 return BAD_VALUE;
2008 }
2009
2010 return OK;
2011 }
2012
2013
readUniqueFileDescriptorVector(std::unique_ptr<std::vector<ScopedFd>> * val) const2014 status_t Parcel::readUniqueFileDescriptorVector(std::unique_ptr<std::vector<ScopedFd>>* val) const {
2015 return readNullableTypedVector(val, &Parcel::readUniqueFileDescriptor);
2016 }
2017
readUniqueFileDescriptorVector(std::vector<ScopedFd> * val) const2018 status_t Parcel::readUniqueFileDescriptorVector(std::vector<ScopedFd>* val) const {
2019 return readTypedVector(val, &Parcel::readUniqueFileDescriptor);
2020 }
2021
readBlob(size_t len,ReadableBlob * outBlob) const2022 status_t Parcel::readBlob(size_t len, ReadableBlob* outBlob) const
2023 {
2024 int32_t blobType;
2025 status_t status = readInt32(&blobType);
2026 if (status) return status;
2027
2028 if (blobType == BLOB_INPLACE) {
2029 ALOGV("readBlob: read in place");
2030 const void* ptr = readInplace(len);
2031 if (!ptr) return BAD_VALUE;
2032
2033 outBlob->init(-1, const_cast<void*>(ptr), len, false);
2034 return NO_ERROR;
2035 }
2036
2037 ALOGV("readBlob: read from ashmem");
2038 bool isMutable = (blobType == BLOB_ASHMEM_MUTABLE);
2039 int fd = readFileDescriptor();
2040 if (fd == int(BAD_TYPE)) return BAD_VALUE;
2041
2042 void* ptr = ::mmap(NULL, len, isMutable ? PROT_READ | PROT_WRITE : PROT_READ,
2043 MAP_SHARED, fd, 0);
2044 if (ptr == MAP_FAILED) return NO_MEMORY;
2045
2046 outBlob->init(fd, ptr, len, isMutable);
2047 return NO_ERROR;
2048 }
2049
read(FlattenableHelperInterface & val) const2050 status_t Parcel::read(FlattenableHelperInterface& val) const
2051 {
2052 // size
2053 const size_t len = this->readInt32();
2054 const size_t fd_count = this->readInt32();
2055
2056 if ((len > INT32_MAX) || (fd_count >= gMaxFds)) {
2057 // don't accept size_t values which may have come from an
2058 // inadvertent conversion from a negative int.
2059 return BAD_VALUE;
2060 }
2061
2062 // payload
2063 void const* const buf = this->readInplace(pad_size(len));
2064 if (buf == NULL)
2065 return BAD_VALUE;
2066
2067 int* fds = NULL;
2068 if (fd_count) {
2069 fds = new (std::nothrow) int[fd_count];
2070 if (fds == nullptr) {
2071 ALOGE("read: failed to allocate requested %zu fds", fd_count);
2072 return BAD_VALUE;
2073 }
2074 }
2075
2076 status_t err = NO_ERROR;
2077 for (size_t i=0 ; i<fd_count && err==NO_ERROR ; i++) {
2078 fds[i] = dup(this->readFileDescriptor());
2079 if (fds[i] < 0) {
2080 err = BAD_VALUE;
2081 ALOGE("dup() failed in Parcel::read, i is %zu, fds[i] is %d, fd_count is %zu, error: %s",
2082 i, fds[i], fd_count, strerror(errno));
2083 }
2084 }
2085
2086 if (err == NO_ERROR) {
2087 err = val.unflatten(buf, len, fds, fd_count);
2088 }
2089
2090 if (fd_count) {
2091 delete [] fds;
2092 }
2093
2094 return err;
2095 }
readObject(bool nullMetaData) const2096 const flat_binder_object* Parcel::readObject(bool nullMetaData) const
2097 {
2098 const size_t DPOS = mDataPos;
2099 if ((DPOS+sizeof(flat_binder_object)) <= mDataSize) {
2100 const flat_binder_object* obj
2101 = reinterpret_cast<const flat_binder_object*>(mData+DPOS);
2102 mDataPos = DPOS + sizeof(flat_binder_object);
2103 if (!nullMetaData && (obj->cookie == 0 && obj->binder == 0)) {
2104 // When transferring a NULL object, we don't write it into
2105 // the object list, so we don't want to check for it when
2106 // reading.
2107 ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos);
2108 return obj;
2109 }
2110
2111 // Ensure that this object is valid...
2112 binder_size_t* const OBJS = mObjects;
2113 const size_t N = mObjectsSize;
2114 size_t opos = mNextObjectHint;
2115
2116 if (N > 0) {
2117 ALOGV("Parcel %p looking for obj at %zu, hint=%zu",
2118 this, DPOS, opos);
2119
2120 // Start at the current hint position, looking for an object at
2121 // the current data position.
2122 if (opos < N) {
2123 while (opos < (N-1) && OBJS[opos] < DPOS) {
2124 opos++;
2125 }
2126 } else {
2127 opos = N-1;
2128 }
2129 if (OBJS[opos] == DPOS) {
2130 // Found it!
2131 ALOGV("Parcel %p found obj %zu at index %zu with forward search",
2132 this, DPOS, opos);
2133 mNextObjectHint = opos+1;
2134 ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos);
2135 return obj;
2136 }
2137
2138 // Look backwards for it...
2139 while (opos > 0 && OBJS[opos] > DPOS) {
2140 opos--;
2141 }
2142 if (OBJS[opos] == DPOS) {
2143 // Found it!
2144 ALOGV("Parcel %p found obj %zu at index %zu with backward search",
2145 this, DPOS, opos);
2146 mNextObjectHint = opos+1;
2147 ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos);
2148 return obj;
2149 }
2150 }
2151 ALOGW("Attempt to read object from Parcel %p at offset %zu that is not in the object list",
2152 this, DPOS);
2153 }
2154 return NULL;
2155 }
2156
closeFileDescriptors()2157 void Parcel::closeFileDescriptors()
2158 {
2159 size_t i = mObjectsSize;
2160 if (i > 0) {
2161 //ALOGI("Closing file descriptors for %zu objects...", i);
2162 }
2163 while (i > 0) {
2164 i--;
2165 const flat_binder_object* flat
2166 = reinterpret_cast<flat_binder_object*>(mData+mObjects[i]);
2167 if (flat->type == BINDER_TYPE_FD) {
2168 //ALOGI("Closing fd: %ld", flat->handle);
2169 close(flat->handle);
2170 }
2171 }
2172 }
2173
ipcData() const2174 uintptr_t Parcel::ipcData() const
2175 {
2176 return reinterpret_cast<uintptr_t>(mData);
2177 }
2178
ipcDataSize() const2179 size_t Parcel::ipcDataSize() const
2180 {
2181 return (mDataSize > mDataPos ? mDataSize : mDataPos);
2182 }
2183
ipcObjects() const2184 uintptr_t Parcel::ipcObjects() const
2185 {
2186 return reinterpret_cast<uintptr_t>(mObjects);
2187 }
2188
ipcObjectsCount() const2189 size_t Parcel::ipcObjectsCount() const
2190 {
2191 return mObjectsSize;
2192 }
2193
ipcSetDataReference(const uint8_t * data,size_t dataSize,const binder_size_t * objects,size_t objectsCount,release_func relFunc,void * relCookie)2194 void Parcel::ipcSetDataReference(const uint8_t* data, size_t dataSize,
2195 const binder_size_t* objects, size_t objectsCount, release_func relFunc, void* relCookie)
2196 {
2197 binder_size_t minOffset = 0;
2198 freeDataNoInit();
2199 mError = NO_ERROR;
2200 mData = const_cast<uint8_t*>(data);
2201 mDataSize = mDataCapacity = dataSize;
2202 //ALOGI("setDataReference Setting data size of %p to %lu (pid=%d)", this, mDataSize, getpid());
2203 mDataPos = 0;
2204 ALOGV("setDataReference Setting data pos of %p to %zu", this, mDataPos);
2205 mObjects = const_cast<binder_size_t*>(objects);
2206 mObjectsSize = mObjectsCapacity = objectsCount;
2207 mNextObjectHint = 0;
2208 mOwner = relFunc;
2209 mOwnerCookie = relCookie;
2210 for (size_t i = 0; i < mObjectsSize; i++) {
2211 binder_size_t offset = mObjects[i];
2212 if (offset < minOffset) {
2213 ALOGE("%s: bad object offset %" PRIu64 " < %" PRIu64 "\n",
2214 __func__, (uint64_t)offset, (uint64_t)minOffset);
2215 mObjectsSize = 0;
2216 break;
2217 }
2218 minOffset = offset + sizeof(flat_binder_object);
2219 }
2220 scanForFds();
2221 }
2222
print(TextOutput & to,uint32_t) const2223 void Parcel::print(TextOutput& to, uint32_t /*flags*/) const
2224 {
2225 to << "Parcel(";
2226
2227 if (errorCheck() != NO_ERROR) {
2228 const status_t err = errorCheck();
2229 to << "Error: " << (void*)(intptr_t)err << " \"" << strerror(-err) << "\"";
2230 } else if (dataSize() > 0) {
2231 const uint8_t* DATA = data();
2232 to << indent << HexDump(DATA, dataSize()) << dedent;
2233 const binder_size_t* OBJS = objects();
2234 const size_t N = objectsCount();
2235 for (size_t i=0; i<N; i++) {
2236 const flat_binder_object* flat
2237 = reinterpret_cast<const flat_binder_object*>(DATA+OBJS[i]);
2238 to << endl << "Object #" << i << " @ " << (void*)OBJS[i] << ": "
2239 << TypeCode(flat->type & 0x7f7f7f00)
2240 << " = " << flat->binder;
2241 }
2242 } else {
2243 to << "NULL";
2244 }
2245
2246 to << ")";
2247 }
2248
releaseObjects()2249 void Parcel::releaseObjects()
2250 {
2251 const sp<ProcessState> proc(ProcessState::self());
2252 size_t i = mObjectsSize;
2253 uint8_t* const data = mData;
2254 binder_size_t* const objects = mObjects;
2255 while (i > 0) {
2256 i--;
2257 const flat_binder_object* flat
2258 = reinterpret_cast<flat_binder_object*>(data+objects[i]);
2259 release_object(proc, *flat, this, &mOpenAshmemSize);
2260 }
2261 }
2262
acquireObjects()2263 void Parcel::acquireObjects()
2264 {
2265 const sp<ProcessState> proc(ProcessState::self());
2266 size_t i = mObjectsSize;
2267 uint8_t* const data = mData;
2268 binder_size_t* const objects = mObjects;
2269 while (i > 0) {
2270 i--;
2271 const flat_binder_object* flat
2272 = reinterpret_cast<flat_binder_object*>(data+objects[i]);
2273 acquire_object(proc, *flat, this, &mOpenAshmemSize);
2274 }
2275 }
2276
freeData()2277 void Parcel::freeData()
2278 {
2279 freeDataNoInit();
2280 initState();
2281 }
2282
freeDataNoInit()2283 void Parcel::freeDataNoInit()
2284 {
2285 if (mOwner) {
2286 LOG_ALLOC("Parcel %p: freeing other owner data", this);
2287 //ALOGI("Freeing data ref of %p (pid=%d)", this, getpid());
2288 mOwner(this, mData, mDataSize, mObjects, mObjectsSize, mOwnerCookie);
2289 } else {
2290 LOG_ALLOC("Parcel %p: freeing allocated data", this);
2291 releaseObjects();
2292 if (mData) {
2293 LOG_ALLOC("Parcel %p: freeing with %zu capacity", this, mDataCapacity);
2294 pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
2295 if (mDataCapacity <= gParcelGlobalAllocSize) {
2296 gParcelGlobalAllocSize = gParcelGlobalAllocSize - mDataCapacity;
2297 } else {
2298 gParcelGlobalAllocSize = 0;
2299 }
2300 if (gParcelGlobalAllocCount > 0) {
2301 gParcelGlobalAllocCount--;
2302 }
2303 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
2304 free(mData);
2305 }
2306 if (mObjects) free(mObjects);
2307 }
2308 }
2309
growData(size_t len)2310 status_t Parcel::growData(size_t len)
2311 {
2312 if (len > INT32_MAX) {
2313 // don't accept size_t values which may have come from an
2314 // inadvertent conversion from a negative int.
2315 return BAD_VALUE;
2316 }
2317
2318 size_t newSize = ((mDataSize+len)*3)/2;
2319 return (newSize <= mDataSize)
2320 ? (status_t) NO_MEMORY
2321 : continueWrite(newSize);
2322 }
2323
restartWrite(size_t desired)2324 status_t Parcel::restartWrite(size_t desired)
2325 {
2326 if (desired > INT32_MAX) {
2327 // don't accept size_t values which may have come from an
2328 // inadvertent conversion from a negative int.
2329 return BAD_VALUE;
2330 }
2331
2332 if (mOwner) {
2333 freeData();
2334 return continueWrite(desired);
2335 }
2336
2337 uint8_t* data = (uint8_t*)realloc(mData, desired);
2338 if (!data && desired > mDataCapacity) {
2339 mError = NO_MEMORY;
2340 return NO_MEMORY;
2341 }
2342
2343 releaseObjects();
2344
2345 if (data) {
2346 LOG_ALLOC("Parcel %p: restart from %zu to %zu capacity", this, mDataCapacity, desired);
2347 pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
2348 gParcelGlobalAllocSize += desired;
2349 gParcelGlobalAllocSize -= mDataCapacity;
2350 if (!mData) {
2351 gParcelGlobalAllocCount++;
2352 }
2353 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
2354 mData = data;
2355 mDataCapacity = desired;
2356 }
2357
2358 mDataSize = mDataPos = 0;
2359 ALOGV("restartWrite Setting data size of %p to %zu", this, mDataSize);
2360 ALOGV("restartWrite Setting data pos of %p to %zu", this, mDataPos);
2361
2362 free(mObjects);
2363 mObjects = NULL;
2364 mObjectsSize = mObjectsCapacity = 0;
2365 mNextObjectHint = 0;
2366 mHasFds = false;
2367 mFdsKnown = true;
2368 mAllowFds = true;
2369
2370 return NO_ERROR;
2371 }
2372
continueWrite(size_t desired)2373 status_t Parcel::continueWrite(size_t desired)
2374 {
2375 if (desired > INT32_MAX) {
2376 // don't accept size_t values which may have come from an
2377 // inadvertent conversion from a negative int.
2378 return BAD_VALUE;
2379 }
2380
2381 // If shrinking, first adjust for any objects that appear
2382 // after the new data size.
2383 size_t objectsSize = mObjectsSize;
2384 if (desired < mDataSize) {
2385 if (desired == 0) {
2386 objectsSize = 0;
2387 } else {
2388 while (objectsSize > 0) {
2389 if (mObjects[objectsSize-1] < desired)
2390 break;
2391 objectsSize--;
2392 }
2393 }
2394 }
2395
2396 if (mOwner) {
2397 // If the size is going to zero, just release the owner's data.
2398 if (desired == 0) {
2399 freeData();
2400 return NO_ERROR;
2401 }
2402
2403 // If there is a different owner, we need to take
2404 // posession.
2405 uint8_t* data = (uint8_t*)malloc(desired);
2406 if (!data) {
2407 mError = NO_MEMORY;
2408 return NO_MEMORY;
2409 }
2410 binder_size_t* objects = NULL;
2411
2412 if (objectsSize) {
2413 objects = (binder_size_t*)calloc(objectsSize, sizeof(binder_size_t));
2414 if (!objects) {
2415 free(data);
2416
2417 mError = NO_MEMORY;
2418 return NO_MEMORY;
2419 }
2420
2421 // Little hack to only acquire references on objects
2422 // we will be keeping.
2423 size_t oldObjectsSize = mObjectsSize;
2424 mObjectsSize = objectsSize;
2425 acquireObjects();
2426 mObjectsSize = oldObjectsSize;
2427 }
2428
2429 if (mData) {
2430 memcpy(data, mData, mDataSize < desired ? mDataSize : desired);
2431 }
2432 if (objects && mObjects) {
2433 memcpy(objects, mObjects, objectsSize*sizeof(binder_size_t));
2434 }
2435 //ALOGI("Freeing data ref of %p (pid=%d)", this, getpid());
2436 mOwner(this, mData, mDataSize, mObjects, mObjectsSize, mOwnerCookie);
2437 mOwner = NULL;
2438
2439 LOG_ALLOC("Parcel %p: taking ownership of %zu capacity", this, desired);
2440 pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
2441 gParcelGlobalAllocSize += desired;
2442 gParcelGlobalAllocCount++;
2443 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
2444
2445 mData = data;
2446 mObjects = objects;
2447 mDataSize = (mDataSize < desired) ? mDataSize : desired;
2448 ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize);
2449 mDataCapacity = desired;
2450 mObjectsSize = mObjectsCapacity = objectsSize;
2451 mNextObjectHint = 0;
2452
2453 } else if (mData) {
2454 if (objectsSize < mObjectsSize) {
2455 // Need to release refs on any objects we are dropping.
2456 const sp<ProcessState> proc(ProcessState::self());
2457 for (size_t i=objectsSize; i<mObjectsSize; i++) {
2458 const flat_binder_object* flat
2459 = reinterpret_cast<flat_binder_object*>(mData+mObjects[i]);
2460 if (flat->type == BINDER_TYPE_FD) {
2461 // will need to rescan because we may have lopped off the only FDs
2462 mFdsKnown = false;
2463 }
2464 release_object(proc, *flat, this, &mOpenAshmemSize);
2465 }
2466 binder_size_t* objects =
2467 (binder_size_t*)realloc(mObjects, objectsSize*sizeof(binder_size_t));
2468 if (objects) {
2469 mObjects = objects;
2470 }
2471 mObjectsSize = objectsSize;
2472 mNextObjectHint = 0;
2473 }
2474
2475 // We own the data, so we can just do a realloc().
2476 if (desired > mDataCapacity) {
2477 uint8_t* data = (uint8_t*)realloc(mData, desired);
2478 if (data) {
2479 LOG_ALLOC("Parcel %p: continue from %zu to %zu capacity", this, mDataCapacity,
2480 desired);
2481 pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
2482 gParcelGlobalAllocSize += desired;
2483 gParcelGlobalAllocSize -= mDataCapacity;
2484 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
2485 mData = data;
2486 mDataCapacity = desired;
2487 } else if (desired > mDataCapacity) {
2488 mError = NO_MEMORY;
2489 return NO_MEMORY;
2490 }
2491 } else {
2492 if (mDataSize > desired) {
2493 mDataSize = desired;
2494 ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize);
2495 }
2496 if (mDataPos > desired) {
2497 mDataPos = desired;
2498 ALOGV("continueWrite Setting data pos of %p to %zu", this, mDataPos);
2499 }
2500 }
2501
2502 } else {
2503 // This is the first data. Easy!
2504 uint8_t* data = (uint8_t*)malloc(desired);
2505 if (!data) {
2506 mError = NO_MEMORY;
2507 return NO_MEMORY;
2508 }
2509
2510 if(!(mDataCapacity == 0 && mObjects == NULL
2511 && mObjectsCapacity == 0)) {
2512 ALOGE("continueWrite: %zu/%p/%zu/%zu", mDataCapacity, mObjects, mObjectsCapacity, desired);
2513 }
2514
2515 LOG_ALLOC("Parcel %p: allocating with %zu capacity", this, desired);
2516 pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
2517 gParcelGlobalAllocSize += desired;
2518 gParcelGlobalAllocCount++;
2519 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
2520
2521 mData = data;
2522 mDataSize = mDataPos = 0;
2523 ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize);
2524 ALOGV("continueWrite Setting data pos of %p to %zu", this, mDataPos);
2525 mDataCapacity = desired;
2526 }
2527
2528 return NO_ERROR;
2529 }
2530
initState()2531 void Parcel::initState()
2532 {
2533 LOG_ALLOC("Parcel %p: initState", this);
2534 mError = NO_ERROR;
2535 mData = 0;
2536 mDataSize = 0;
2537 mDataCapacity = 0;
2538 mDataPos = 0;
2539 ALOGV("initState Setting data size of %p to %zu", this, mDataSize);
2540 ALOGV("initState Setting data pos of %p to %zu", this, mDataPos);
2541 mObjects = NULL;
2542 mObjectsSize = 0;
2543 mObjectsCapacity = 0;
2544 mNextObjectHint = 0;
2545 mHasFds = false;
2546 mFdsKnown = true;
2547 mAllowFds = true;
2548 mOwner = NULL;
2549 mOpenAshmemSize = 0;
2550
2551 // racing multiple init leads only to multiple identical write
2552 if (gMaxFds == 0) {
2553 struct rlimit result;
2554 if (!getrlimit(RLIMIT_NOFILE, &result)) {
2555 gMaxFds = (size_t)result.rlim_cur;
2556 //ALOGI("parcel fd limit set to %zu", gMaxFds);
2557 } else {
2558 ALOGW("Unable to getrlimit: %s", strerror(errno));
2559 gMaxFds = 1024;
2560 }
2561 }
2562 }
2563
scanForFds() const2564 void Parcel::scanForFds() const
2565 {
2566 bool hasFds = false;
2567 for (size_t i=0; i<mObjectsSize; i++) {
2568 const flat_binder_object* flat
2569 = reinterpret_cast<const flat_binder_object*>(mData + mObjects[i]);
2570 if (flat->type == BINDER_TYPE_FD) {
2571 hasFds = true;
2572 break;
2573 }
2574 }
2575 mHasFds = hasFds;
2576 mFdsKnown = true;
2577 }
2578
getBlobAshmemSize() const2579 size_t Parcel::getBlobAshmemSize() const
2580 {
2581 // This used to return the size of all blobs that were written to ashmem, now we're returning
2582 // the ashmem currently referenced by this Parcel, which should be equivalent.
2583 // TODO: Remove method once ABI can be changed.
2584 return mOpenAshmemSize;
2585 }
2586
getOpenAshmemSize() const2587 size_t Parcel::getOpenAshmemSize() const
2588 {
2589 return mOpenAshmemSize;
2590 }
2591
2592 // --- Parcel::Blob ---
2593
Blob()2594 Parcel::Blob::Blob() :
2595 mFd(-1), mData(NULL), mSize(0), mMutable(false) {
2596 }
2597
~Blob()2598 Parcel::Blob::~Blob() {
2599 release();
2600 }
2601
release()2602 void Parcel::Blob::release() {
2603 if (mFd != -1 && mData) {
2604 ::munmap(mData, mSize);
2605 }
2606 clear();
2607 }
2608
init(int fd,void * data,size_t size,bool isMutable)2609 void Parcel::Blob::init(int fd, void* data, size_t size, bool isMutable) {
2610 mFd = fd;
2611 mData = data;
2612 mSize = size;
2613 mMutable = isMutable;
2614 }
2615
clear()2616 void Parcel::Blob::clear() {
2617 mFd = -1;
2618 mData = NULL;
2619 mSize = 0;
2620 mMutable = false;
2621 }
2622
2623 }; // namespace android
2624