1 /*
2  * Copyright (C) 2005 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #define LOG_TAG "Parcel"
18 //#define LOG_NDEBUG 0
19 
20 #include <errno.h>
21 #include <fcntl.h>
22 #include <inttypes.h>
23 #include <pthread.h>
24 #include <stdint.h>
25 #include <stdio.h>
26 #include <stdlib.h>
27 #include <sys/mman.h>
28 #include <sys/stat.h>
29 #include <sys/types.h>
30 #include <sys/resource.h>
31 #include <unistd.h>
32 
33 #include <binder/Binder.h>
34 #include <binder/BpBinder.h>
35 #include <binder/IPCThreadState.h>
36 #include <binder/Parcel.h>
37 #include <binder/ProcessState.h>
38 #include <binder/Stability.h>
39 #include <binder/Status.h>
40 #include <binder/TextOutput.h>
41 
42 #include <cutils/ashmem.h>
43 #include <utils/Debug.h>
44 #include <utils/Flattenable.h>
45 #include <utils/Log.h>
46 #include <utils/misc.h>
47 #include <utils/String8.h>
48 #include <utils/String16.h>
49 
50 #include <private/binder/binder_module.h>
51 #include "Static.h"
52 
53 #define LOG_REFS(...)
54 //#define LOG_REFS(...) ALOG(LOG_DEBUG, LOG_TAG, __VA_ARGS__)
55 #define LOG_ALLOC(...)
56 //#define LOG_ALLOC(...) ALOG(LOG_DEBUG, LOG_TAG, __VA_ARGS__)
57 
58 // ---------------------------------------------------------------------------
59 
60 // This macro should never be used at runtime, as a too large value
61 // of s could cause an integer overflow. Instead, you should always
62 // use the wrapper function pad_size()
63 #define PAD_SIZE_UNSAFE(s) (((s)+3)&~3)
64 
pad_size(size_t s)65 static size_t pad_size(size_t s) {
66     if (s > (std::numeric_limits<size_t>::max() - 3)) {
67         LOG_ALWAYS_FATAL("pad size too big %zu", s);
68     }
69     return PAD_SIZE_UNSAFE(s);
70 }
71 
72 // Note: must be kept in sync with android/os/StrictMode.java's PENALTY_GATHER
73 #define STRICT_MODE_PENALTY_GATHER (1 << 31)
74 
75 namespace android {
76 
77 // many things compile this into prebuilts on the stack
78 static_assert(sizeof(Parcel) == 60 || sizeof(Parcel) == 120);
79 
80 static pthread_mutex_t gParcelGlobalAllocSizeLock = PTHREAD_MUTEX_INITIALIZER;
81 static size_t gParcelGlobalAllocSize = 0;
82 static size_t gParcelGlobalAllocCount = 0;
83 
84 static size_t gMaxFds = 0;
85 
86 // Maximum size of a blob to transfer in-place.
87 static const size_t BLOB_INPLACE_LIMIT = 16 * 1024;
88 
89 enum {
90     BLOB_INPLACE = 0,
91     BLOB_ASHMEM_IMMUTABLE = 1,
92     BLOB_ASHMEM_MUTABLE = 2,
93 };
94 
acquire_object(const sp<ProcessState> & proc,const flat_binder_object & obj,const void * who,size_t * outAshmemSize)95 static void acquire_object(const sp<ProcessState>& proc,
96     const flat_binder_object& obj, const void* who, size_t* outAshmemSize)
97 {
98     switch (obj.hdr.type) {
99         case BINDER_TYPE_BINDER:
100             if (obj.binder) {
101                 LOG_REFS("Parcel %p acquiring reference on local %p", who, obj.cookie);
102                 reinterpret_cast<IBinder*>(obj.cookie)->incStrong(who);
103             }
104             return;
105         case BINDER_TYPE_HANDLE: {
106             const sp<IBinder> b = proc->getStrongProxyForHandle(obj.handle);
107             if (b != nullptr) {
108                 LOG_REFS("Parcel %p acquiring reference on remote %p", who, b.get());
109                 b->incStrong(who);
110             }
111             return;
112         }
113         case BINDER_TYPE_FD: {
114             if ((obj.cookie != 0) && (outAshmemSize != nullptr) && ashmem_valid(obj.handle)) {
115                 // If we own an ashmem fd, keep track of how much memory it refers to.
116                 int size = ashmem_get_size_region(obj.handle);
117                 if (size > 0) {
118                     *outAshmemSize += size;
119                 }
120             }
121             return;
122         }
123     }
124 
125     ALOGD("Invalid object type 0x%08x", obj.hdr.type);
126 }
127 
release_object(const sp<ProcessState> & proc,const flat_binder_object & obj,const void * who,size_t * outAshmemSize)128 static void release_object(const sp<ProcessState>& proc,
129     const flat_binder_object& obj, const void* who, size_t* outAshmemSize)
130 {
131     switch (obj.hdr.type) {
132         case BINDER_TYPE_BINDER:
133             if (obj.binder) {
134                 LOG_REFS("Parcel %p releasing reference on local %p", who, obj.cookie);
135                 reinterpret_cast<IBinder*>(obj.cookie)->decStrong(who);
136             }
137             return;
138         case BINDER_TYPE_HANDLE: {
139             const sp<IBinder> b = proc->getStrongProxyForHandle(obj.handle);
140             if (b != nullptr) {
141                 LOG_REFS("Parcel %p releasing reference on remote %p", who, b.get());
142                 b->decStrong(who);
143             }
144             return;
145         }
146         case BINDER_TYPE_FD: {
147             if (obj.cookie != 0) { // owned
148                 if ((outAshmemSize != nullptr) && ashmem_valid(obj.handle)) {
149                     int size = ashmem_get_size_region(obj.handle);
150                     if (size > 0) {
151                         // ashmem size might have changed since last time it was accounted for, e.g.
152                         // in acquire_object(). Value of *outAshmemSize is not critical since we are
153                         // releasing the object anyway. Check for integer overflow condition.
154                         *outAshmemSize -= std::min(*outAshmemSize, static_cast<size_t>(size));
155                     }
156                 }
157 
158                 close(obj.handle);
159             }
160             return;
161         }
162     }
163 
164     ALOGE("Invalid object type 0x%08x", obj.hdr.type);
165 }
166 
finishFlattenBinder(const sp<IBinder> & binder,const flat_binder_object & flat)167 status_t Parcel::finishFlattenBinder(
168     const sp<IBinder>& binder, const flat_binder_object& flat)
169 {
170     status_t status = writeObject(flat, false);
171     if (status != OK) return status;
172 
173     internal::Stability::tryMarkCompilationUnit(binder.get());
174     return writeInt32(internal::Stability::get(binder.get()));
175 }
176 
finishUnflattenBinder(const sp<IBinder> & binder,sp<IBinder> * out) const177 status_t Parcel::finishUnflattenBinder(
178     const sp<IBinder>& binder, sp<IBinder>* out) const
179 {
180     int32_t stability;
181     status_t status = readInt32(&stability);
182     if (status != OK) return status;
183 
184     status = internal::Stability::set(binder.get(), stability, true /*log*/);
185     if (status != OK) return status;
186 
187     *out = binder;
188     return OK;
189 }
190 
flattenBinder(const sp<IBinder> & binder)191 status_t Parcel::flattenBinder(const sp<IBinder>& binder)
192 {
193     flat_binder_object obj;
194 
195     if (IPCThreadState::self()->backgroundSchedulingDisabled()) {
196         /* minimum priority for all nodes is nice 0 */
197         obj.flags = FLAT_BINDER_FLAG_ACCEPTS_FDS;
198     } else {
199         /* minimum priority for all nodes is MAX_NICE(19) */
200         obj.flags = 0x13 | FLAT_BINDER_FLAG_ACCEPTS_FDS;
201     }
202 
203     if (binder != nullptr) {
204         BBinder *local = binder->localBinder();
205         if (!local) {
206             BpBinder *proxy = binder->remoteBinder();
207             if (proxy == nullptr) {
208                 ALOGE("null proxy");
209             }
210             const int32_t handle = proxy ? proxy->handle() : 0;
211             obj.hdr.type = BINDER_TYPE_HANDLE;
212             obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */
213             obj.handle = handle;
214             obj.cookie = 0;
215         } else {
216             if (local->isRequestingSid()) {
217                 obj.flags |= FLAT_BINDER_FLAG_TXN_SECURITY_CTX;
218             }
219             obj.hdr.type = BINDER_TYPE_BINDER;
220             obj.binder = reinterpret_cast<uintptr_t>(local->getWeakRefs());
221             obj.cookie = reinterpret_cast<uintptr_t>(local);
222         }
223     } else {
224         obj.hdr.type = BINDER_TYPE_BINDER;
225         obj.binder = 0;
226         obj.cookie = 0;
227     }
228 
229     return finishFlattenBinder(binder, obj);
230 }
231 
unflattenBinder(sp<IBinder> * out) const232 status_t Parcel::unflattenBinder(sp<IBinder>* out) const
233 {
234     const flat_binder_object* flat = readObject(false);
235 
236     if (flat) {
237         switch (flat->hdr.type) {
238             case BINDER_TYPE_BINDER: {
239                 sp<IBinder> binder = reinterpret_cast<IBinder*>(flat->cookie);
240                 return finishUnflattenBinder(binder, out);
241             }
242             case BINDER_TYPE_HANDLE: {
243                 sp<IBinder> binder =
244                     ProcessState::self()->getStrongProxyForHandle(flat->handle);
245                 return finishUnflattenBinder(binder, out);
246             }
247         }
248     }
249     return BAD_TYPE;
250 }
251 
252 // ---------------------------------------------------------------------------
253 
Parcel()254 Parcel::Parcel()
255 {
256     LOG_ALLOC("Parcel %p: constructing", this);
257     initState();
258 }
259 
~Parcel()260 Parcel::~Parcel()
261 {
262     freeDataNoInit();
263     LOG_ALLOC("Parcel %p: destroyed", this);
264 }
265 
getGlobalAllocSize()266 size_t Parcel::getGlobalAllocSize() {
267     pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
268     size_t size = gParcelGlobalAllocSize;
269     pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
270     return size;
271 }
272 
getGlobalAllocCount()273 size_t Parcel::getGlobalAllocCount() {
274     pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
275     size_t count = gParcelGlobalAllocCount;
276     pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
277     return count;
278 }
279 
data() const280 const uint8_t* Parcel::data() const
281 {
282     return mData;
283 }
284 
dataSize() const285 size_t Parcel::dataSize() const
286 {
287     return (mDataSize > mDataPos ? mDataSize : mDataPos);
288 }
289 
dataAvail() const290 size_t Parcel::dataAvail() const
291 {
292     size_t result = dataSize() - dataPosition();
293     if (result > INT32_MAX) {
294         LOG_ALWAYS_FATAL("result too big: %zu", result);
295     }
296     return result;
297 }
298 
dataPosition() const299 size_t Parcel::dataPosition() const
300 {
301     return mDataPos;
302 }
303 
dataCapacity() const304 size_t Parcel::dataCapacity() const
305 {
306     return mDataCapacity;
307 }
308 
setDataSize(size_t size)309 status_t Parcel::setDataSize(size_t size)
310 {
311     if (size > INT32_MAX) {
312         // don't accept size_t values which may have come from an
313         // inadvertent conversion from a negative int.
314         return BAD_VALUE;
315     }
316 
317     status_t err;
318     err = continueWrite(size);
319     if (err == NO_ERROR) {
320         mDataSize = size;
321         ALOGV("setDataSize Setting data size of %p to %zu", this, mDataSize);
322     }
323     return err;
324 }
325 
setDataPosition(size_t pos) const326 void Parcel::setDataPosition(size_t pos) const
327 {
328     if (pos > INT32_MAX) {
329         // don't accept size_t values which may have come from an
330         // inadvertent conversion from a negative int.
331         LOG_ALWAYS_FATAL("pos too big: %zu", pos);
332     }
333 
334     mDataPos = pos;
335     mNextObjectHint = 0;
336     mObjectsSorted = false;
337 }
338 
setDataCapacity(size_t size)339 status_t Parcel::setDataCapacity(size_t size)
340 {
341     if (size > INT32_MAX) {
342         // don't accept size_t values which may have come from an
343         // inadvertent conversion from a negative int.
344         return BAD_VALUE;
345     }
346 
347     if (size > mDataCapacity) return continueWrite(size);
348     return NO_ERROR;
349 }
350 
setData(const uint8_t * buffer,size_t len)351 status_t Parcel::setData(const uint8_t* buffer, size_t len)
352 {
353     if (len > INT32_MAX) {
354         // don't accept size_t values which may have come from an
355         // inadvertent conversion from a negative int.
356         return BAD_VALUE;
357     }
358 
359     status_t err = restartWrite(len);
360     if (err == NO_ERROR) {
361         memcpy(const_cast<uint8_t*>(data()), buffer, len);
362         mDataSize = len;
363         mFdsKnown = false;
364     }
365     return err;
366 }
367 
appendFrom(const Parcel * parcel,size_t offset,size_t len)368 status_t Parcel::appendFrom(const Parcel *parcel, size_t offset, size_t len)
369 {
370     status_t err;
371     const uint8_t *data = parcel->mData;
372     const binder_size_t *objects = parcel->mObjects;
373     size_t size = parcel->mObjectsSize;
374     int startPos = mDataPos;
375     int firstIndex = -1, lastIndex = -2;
376 
377     if (len == 0) {
378         return NO_ERROR;
379     }
380 
381     if (len > INT32_MAX) {
382         // don't accept size_t values which may have come from an
383         // inadvertent conversion from a negative int.
384         return BAD_VALUE;
385     }
386 
387     // range checks against the source parcel size
388     if ((offset > parcel->mDataSize)
389             || (len > parcel->mDataSize)
390             || (offset + len > parcel->mDataSize)) {
391         return BAD_VALUE;
392     }
393 
394     // Count objects in range
395     for (int i = 0; i < (int) size; i++) {
396         size_t off = objects[i];
397         if ((off >= offset) && (off + sizeof(flat_binder_object) <= offset + len)) {
398             if (firstIndex == -1) {
399                 firstIndex = i;
400             }
401             lastIndex = i;
402         }
403     }
404     int numObjects = lastIndex - firstIndex + 1;
405 
406     if ((mDataSize+len) > mDataCapacity) {
407         // grow data
408         err = growData(len);
409         if (err != NO_ERROR) {
410             return err;
411         }
412     }
413 
414     // append data
415     memcpy(mData + mDataPos, data + offset, len);
416     mDataPos += len;
417     mDataSize += len;
418 
419     err = NO_ERROR;
420 
421     if (numObjects > 0) {
422         const sp<ProcessState> proc(ProcessState::self());
423         // grow objects
424         if (mObjectsCapacity < mObjectsSize + numObjects) {
425             if ((size_t) numObjects > SIZE_MAX - mObjectsSize) return NO_MEMORY; // overflow
426             if (mObjectsSize + numObjects > SIZE_MAX / 3) return NO_MEMORY; // overflow
427             size_t newSize = ((mObjectsSize + numObjects)*3)/2;
428             if (newSize > SIZE_MAX / sizeof(binder_size_t)) return NO_MEMORY; // overflow
429             binder_size_t *objects =
430                 (binder_size_t*)realloc(mObjects, newSize*sizeof(binder_size_t));
431             if (objects == (binder_size_t*)nullptr) {
432                 return NO_MEMORY;
433             }
434             mObjects = objects;
435             mObjectsCapacity = newSize;
436         }
437 
438         // append and acquire objects
439         int idx = mObjectsSize;
440         for (int i = firstIndex; i <= lastIndex; i++) {
441             size_t off = objects[i] - offset + startPos;
442             mObjects[idx++] = off;
443             mObjectsSize++;
444 
445             flat_binder_object* flat
446                 = reinterpret_cast<flat_binder_object*>(mData + off);
447             acquire_object(proc, *flat, this, &mOpenAshmemSize);
448 
449             if (flat->hdr.type == BINDER_TYPE_FD) {
450                 // If this is a file descriptor, we need to dup it so the
451                 // new Parcel now owns its own fd, and can declare that we
452                 // officially know we have fds.
453                 flat->handle = fcntl(flat->handle, F_DUPFD_CLOEXEC, 0);
454                 flat->cookie = 1;
455                 mHasFds = mFdsKnown = true;
456                 if (!mAllowFds) {
457                     err = FDS_NOT_ALLOWED;
458                 }
459             }
460         }
461     }
462 
463     return err;
464 }
465 
compareData(const Parcel & other)466 int Parcel::compareData(const Parcel& other) {
467     size_t size = dataSize();
468     if (size != other.dataSize()) {
469         return size < other.dataSize() ? -1 : 1;
470     }
471     return memcmp(data(), other.data(), size);
472 }
473 
allowFds() const474 bool Parcel::allowFds() const
475 {
476     return mAllowFds;
477 }
478 
pushAllowFds(bool allowFds)479 bool Parcel::pushAllowFds(bool allowFds)
480 {
481     const bool origValue = mAllowFds;
482     if (!allowFds) {
483         mAllowFds = false;
484     }
485     return origValue;
486 }
487 
restoreAllowFds(bool lastValue)488 void Parcel::restoreAllowFds(bool lastValue)
489 {
490     mAllowFds = lastValue;
491 }
492 
hasFileDescriptors() const493 bool Parcel::hasFileDescriptors() const
494 {
495     if (!mFdsKnown) {
496         scanForFds();
497     }
498     return mHasFds;
499 }
500 
updateWorkSourceRequestHeaderPosition() const501 void Parcel::updateWorkSourceRequestHeaderPosition() const {
502     // Only update the request headers once. We only want to point
503     // to the first headers read/written.
504     if (!mRequestHeaderPresent) {
505         mWorkSourceRequestHeaderPosition = dataPosition();
506         mRequestHeaderPresent = true;
507     }
508 }
509 
510 #if defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
511 constexpr int32_t kHeader = B_PACK_CHARS('V', 'N', 'D', 'R');
512 #else
513 constexpr int32_t kHeader = B_PACK_CHARS('S', 'Y', 'S', 'T');
514 #endif
515 
516 // Write RPC headers.  (previously just the interface token)
writeInterfaceToken(const String16 & interface)517 status_t Parcel::writeInterfaceToken(const String16& interface)
518 {
519     const IPCThreadState* threadState = IPCThreadState::self();
520     writeInt32(threadState->getStrictModePolicy() | STRICT_MODE_PENALTY_GATHER);
521     updateWorkSourceRequestHeaderPosition();
522     writeInt32(threadState->shouldPropagateWorkSource() ?
523             threadState->getCallingWorkSourceUid() : IPCThreadState::kUnsetWorkSource);
524     writeInt32(kHeader);
525     // currently the interface identification token is just its name as a string
526     return writeString16(interface);
527 }
528 
replaceCallingWorkSourceUid(uid_t uid)529 bool Parcel::replaceCallingWorkSourceUid(uid_t uid)
530 {
531     if (!mRequestHeaderPresent) {
532         return false;
533     }
534 
535     const size_t initialPosition = dataPosition();
536     setDataPosition(mWorkSourceRequestHeaderPosition);
537     status_t err = writeInt32(uid);
538     setDataPosition(initialPosition);
539     return err == NO_ERROR;
540 }
541 
readCallingWorkSourceUid() const542 uid_t Parcel::readCallingWorkSourceUid() const
543 {
544     if (!mRequestHeaderPresent) {
545         return IPCThreadState::kUnsetWorkSource;
546     }
547 
548     const size_t initialPosition = dataPosition();
549     setDataPosition(mWorkSourceRequestHeaderPosition);
550     uid_t uid = readInt32();
551     setDataPosition(initialPosition);
552     return uid;
553 }
554 
checkInterface(IBinder * binder) const555 bool Parcel::checkInterface(IBinder* binder) const
556 {
557     return enforceInterface(binder->getInterfaceDescriptor());
558 }
559 
enforceInterface(const String16 & interface,IPCThreadState * threadState) const560 bool Parcel::enforceInterface(const String16& interface,
561                               IPCThreadState* threadState) const
562 {
563     return enforceInterface(interface.string(), interface.size(), threadState);
564 }
565 
enforceInterface(const char16_t * interface,size_t len,IPCThreadState * threadState) const566 bool Parcel::enforceInterface(const char16_t* interface,
567                               size_t len,
568                               IPCThreadState* threadState) const
569 {
570     // StrictModePolicy.
571     int32_t strictPolicy = readInt32();
572     if (threadState == nullptr) {
573         threadState = IPCThreadState::self();
574     }
575     if ((threadState->getLastTransactionBinderFlags() &
576          IBinder::FLAG_ONEWAY) != 0) {
577       // For one-way calls, the callee is running entirely
578       // disconnected from the caller, so disable StrictMode entirely.
579       // Not only does disk/network usage not impact the caller, but
580       // there's no way to commuicate back any violations anyway.
581       threadState->setStrictModePolicy(0);
582     } else {
583       threadState->setStrictModePolicy(strictPolicy);
584     }
585     // WorkSource.
586     updateWorkSourceRequestHeaderPosition();
587     int32_t workSource = readInt32();
588     threadState->setCallingWorkSourceUidWithoutPropagation(workSource);
589     // vendor header
590     int32_t header = readInt32();
591     if (header != kHeader) {
592         ALOGE("Expecting header 0x%x but found 0x%x. Mixing copies of libbinder?", kHeader, header);
593         return false;
594     }
595     // Interface descriptor.
596     size_t parcel_interface_len;
597     const char16_t* parcel_interface = readString16Inplace(&parcel_interface_len);
598     if (len == parcel_interface_len &&
599             (!len || !memcmp(parcel_interface, interface, len * sizeof (char16_t)))) {
600         return true;
601     } else {
602         ALOGW("**** enforceInterface() expected '%s' but read '%s'",
603               String8(interface, len).string(),
604               String8(parcel_interface, parcel_interface_len).string());
605         return false;
606     }
607 }
608 
objectsCount() const609 size_t Parcel::objectsCount() const
610 {
611     return mObjectsSize;
612 }
613 
errorCheck() const614 status_t Parcel::errorCheck() const
615 {
616     return mError;
617 }
618 
setError(status_t err)619 void Parcel::setError(status_t err)
620 {
621     mError = err;
622 }
623 
finishWrite(size_t len)624 status_t Parcel::finishWrite(size_t len)
625 {
626     if (len > INT32_MAX) {
627         // don't accept size_t values which may have come from an
628         // inadvertent conversion from a negative int.
629         return BAD_VALUE;
630     }
631 
632     //printf("Finish write of %d\n", len);
633     mDataPos += len;
634     ALOGV("finishWrite Setting data pos of %p to %zu", this, mDataPos);
635     if (mDataPos > mDataSize) {
636         mDataSize = mDataPos;
637         ALOGV("finishWrite Setting data size of %p to %zu", this, mDataSize);
638     }
639     //printf("New pos=%d, size=%d\n", mDataPos, mDataSize);
640     return NO_ERROR;
641 }
642 
writeUnpadded(const void * data,size_t len)643 status_t Parcel::writeUnpadded(const void* data, size_t len)
644 {
645     if (len > INT32_MAX) {
646         // don't accept size_t values which may have come from an
647         // inadvertent conversion from a negative int.
648         return BAD_VALUE;
649     }
650 
651     size_t end = mDataPos + len;
652     if (end < mDataPos) {
653         // integer overflow
654         return BAD_VALUE;
655     }
656 
657     if (end <= mDataCapacity) {
658 restart_write:
659         memcpy(mData+mDataPos, data, len);
660         return finishWrite(len);
661     }
662 
663     status_t err = growData(len);
664     if (err == NO_ERROR) goto restart_write;
665     return err;
666 }
667 
write(const void * data,size_t len)668 status_t Parcel::write(const void* data, size_t len)
669 {
670     if (len > INT32_MAX) {
671         // don't accept size_t values which may have come from an
672         // inadvertent conversion from a negative int.
673         return BAD_VALUE;
674     }
675 
676     void* const d = writeInplace(len);
677     if (d) {
678         memcpy(d, data, len);
679         return NO_ERROR;
680     }
681     return mError;
682 }
683 
writeInplace(size_t len)684 void* Parcel::writeInplace(size_t len)
685 {
686     if (len > INT32_MAX) {
687         // don't accept size_t values which may have come from an
688         // inadvertent conversion from a negative int.
689         return nullptr;
690     }
691 
692     const size_t padded = pad_size(len);
693 
694     // sanity check for integer overflow
695     if (mDataPos+padded < mDataPos) {
696         return nullptr;
697     }
698 
699     if ((mDataPos+padded) <= mDataCapacity) {
700 restart_write:
701         //printf("Writing %ld bytes, padded to %ld\n", len, padded);
702         uint8_t* const data = mData+mDataPos;
703 
704         // Need to pad at end?
705         if (padded != len) {
706 #if BYTE_ORDER == BIG_ENDIAN
707             static const uint32_t mask[4] = {
708                 0x00000000, 0xffffff00, 0xffff0000, 0xff000000
709             };
710 #endif
711 #if BYTE_ORDER == LITTLE_ENDIAN
712             static const uint32_t mask[4] = {
713                 0x00000000, 0x00ffffff, 0x0000ffff, 0x000000ff
714             };
715 #endif
716             //printf("Applying pad mask: %p to %p\n", (void*)mask[padded-len],
717             //    *reinterpret_cast<void**>(data+padded-4));
718             *reinterpret_cast<uint32_t*>(data+padded-4) &= mask[padded-len];
719         }
720 
721         finishWrite(padded);
722         return data;
723     }
724 
725     status_t err = growData(padded);
726     if (err == NO_ERROR) goto restart_write;
727     return nullptr;
728 }
729 
writeUtf8AsUtf16(const std::string & str)730 status_t Parcel::writeUtf8AsUtf16(const std::string& str) {
731     const uint8_t* strData = (uint8_t*)str.data();
732     const size_t strLen= str.length();
733     const ssize_t utf16Len = utf8_to_utf16_length(strData, strLen);
734     if (utf16Len < 0 || utf16Len > std::numeric_limits<int32_t>::max()) {
735         return BAD_VALUE;
736     }
737 
738     status_t err = writeInt32(utf16Len);
739     if (err) {
740         return err;
741     }
742 
743     // Allocate enough bytes to hold our converted string and its terminating NULL.
744     void* dst = writeInplace((utf16Len + 1) * sizeof(char16_t));
745     if (!dst) {
746         return NO_MEMORY;
747     }
748 
749     utf8_to_utf16(strData, strLen, (char16_t*)dst, (size_t) utf16Len + 1);
750 
751     return NO_ERROR;
752 }
753 
writeUtf8AsUtf16(const std::unique_ptr<std::string> & str)754 status_t Parcel::writeUtf8AsUtf16(const std::unique_ptr<std::string>& str) {
755   if (!str) {
756     return writeInt32(-1);
757   }
758   return writeUtf8AsUtf16(*str);
759 }
760 
writeByteVectorInternal(const int8_t * data,size_t size)761 status_t Parcel::writeByteVectorInternal(const int8_t* data, size_t size) {
762     if (size > std::numeric_limits<int32_t>::max()) {
763         return BAD_VALUE;
764     }
765 
766     status_t status = writeInt32(size);
767     if (status != OK) {
768         return status;
769     }
770 
771     return write(data, size);
772 }
773 
writeByteVector(const std::vector<int8_t> & val)774 status_t Parcel::writeByteVector(const std::vector<int8_t>& val) {
775     return writeByteVectorInternal(val.data(), val.size());
776 }
777 
writeByteVector(const std::unique_ptr<std::vector<int8_t>> & val)778 status_t Parcel::writeByteVector(const std::unique_ptr<std::vector<int8_t>>& val)
779 {
780     if (!val) return writeInt32(-1);
781     return writeByteVectorInternal(val->data(), val->size());
782 }
783 
writeByteVector(const std::vector<uint8_t> & val)784 status_t Parcel::writeByteVector(const std::vector<uint8_t>& val) {
785     return writeByteVectorInternal(reinterpret_cast<const int8_t*>(val.data()), val.size());
786 }
787 
writeByteVector(const std::unique_ptr<std::vector<uint8_t>> & val)788 status_t Parcel::writeByteVector(const std::unique_ptr<std::vector<uint8_t>>& val)
789 {
790     if (!val) return writeInt32(-1);
791     return writeByteVectorInternal(reinterpret_cast<const int8_t*>(val->data()), val->size());
792 }
793 
writeInt32Vector(const std::vector<int32_t> & val)794 status_t Parcel::writeInt32Vector(const std::vector<int32_t>& val)
795 {
796     return writeTypedVector(val, &Parcel::writeInt32);
797 }
798 
writeInt32Vector(const std::unique_ptr<std::vector<int32_t>> & val)799 status_t Parcel::writeInt32Vector(const std::unique_ptr<std::vector<int32_t>>& val)
800 {
801     return writeNullableTypedVector(val, &Parcel::writeInt32);
802 }
803 
writeInt64Vector(const std::vector<int64_t> & val)804 status_t Parcel::writeInt64Vector(const std::vector<int64_t>& val)
805 {
806     return writeTypedVector(val, &Parcel::writeInt64);
807 }
808 
writeInt64Vector(const std::unique_ptr<std::vector<int64_t>> & val)809 status_t Parcel::writeInt64Vector(const std::unique_ptr<std::vector<int64_t>>& val)
810 {
811     return writeNullableTypedVector(val, &Parcel::writeInt64);
812 }
813 
writeUint64Vector(const std::vector<uint64_t> & val)814 status_t Parcel::writeUint64Vector(const std::vector<uint64_t>& val)
815 {
816     return writeTypedVector(val, &Parcel::writeUint64);
817 }
818 
writeUint64Vector(const std::unique_ptr<std::vector<uint64_t>> & val)819 status_t Parcel::writeUint64Vector(const std::unique_ptr<std::vector<uint64_t>>& val)
820 {
821     return writeNullableTypedVector(val, &Parcel::writeUint64);
822 }
823 
writeFloatVector(const std::vector<float> & val)824 status_t Parcel::writeFloatVector(const std::vector<float>& val)
825 {
826     return writeTypedVector(val, &Parcel::writeFloat);
827 }
828 
writeFloatVector(const std::unique_ptr<std::vector<float>> & val)829 status_t Parcel::writeFloatVector(const std::unique_ptr<std::vector<float>>& val)
830 {
831     return writeNullableTypedVector(val, &Parcel::writeFloat);
832 }
833 
writeDoubleVector(const std::vector<double> & val)834 status_t Parcel::writeDoubleVector(const std::vector<double>& val)
835 {
836     return writeTypedVector(val, &Parcel::writeDouble);
837 }
838 
writeDoubleVector(const std::unique_ptr<std::vector<double>> & val)839 status_t Parcel::writeDoubleVector(const std::unique_ptr<std::vector<double>>& val)
840 {
841     return writeNullableTypedVector(val, &Parcel::writeDouble);
842 }
843 
writeBoolVector(const std::vector<bool> & val)844 status_t Parcel::writeBoolVector(const std::vector<bool>& val)
845 {
846     return writeTypedVector(val, &Parcel::writeBool);
847 }
848 
writeBoolVector(const std::unique_ptr<std::vector<bool>> & val)849 status_t Parcel::writeBoolVector(const std::unique_ptr<std::vector<bool>>& val)
850 {
851     return writeNullableTypedVector(val, &Parcel::writeBool);
852 }
853 
writeCharVector(const std::vector<char16_t> & val)854 status_t Parcel::writeCharVector(const std::vector<char16_t>& val)
855 {
856     return writeTypedVector(val, &Parcel::writeChar);
857 }
858 
writeCharVector(const std::unique_ptr<std::vector<char16_t>> & val)859 status_t Parcel::writeCharVector(const std::unique_ptr<std::vector<char16_t>>& val)
860 {
861     return writeNullableTypedVector(val, &Parcel::writeChar);
862 }
863 
writeString16Vector(const std::vector<String16> & val)864 status_t Parcel::writeString16Vector(const std::vector<String16>& val)
865 {
866     return writeTypedVector(val, &Parcel::writeString16);
867 }
868 
writeString16Vector(const std::unique_ptr<std::vector<std::unique_ptr<String16>>> & val)869 status_t Parcel::writeString16Vector(
870         const std::unique_ptr<std::vector<std::unique_ptr<String16>>>& val)
871 {
872     return writeNullableTypedVector(val, &Parcel::writeString16);
873 }
874 
writeUtf8VectorAsUtf16Vector(const std::unique_ptr<std::vector<std::unique_ptr<std::string>>> & val)875 status_t Parcel::writeUtf8VectorAsUtf16Vector(
876                         const std::unique_ptr<std::vector<std::unique_ptr<std::string>>>& val) {
877     return writeNullableTypedVector(val, &Parcel::writeUtf8AsUtf16);
878 }
879 
writeUtf8VectorAsUtf16Vector(const std::vector<std::string> & val)880 status_t Parcel::writeUtf8VectorAsUtf16Vector(const std::vector<std::string>& val) {
881     return writeTypedVector(val, &Parcel::writeUtf8AsUtf16);
882 }
883 
writeInt32(int32_t val)884 status_t Parcel::writeInt32(int32_t val)
885 {
886     return writeAligned(val);
887 }
888 
writeUint32(uint32_t val)889 status_t Parcel::writeUint32(uint32_t val)
890 {
891     return writeAligned(val);
892 }
893 
writeInt32Array(size_t len,const int32_t * val)894 status_t Parcel::writeInt32Array(size_t len, const int32_t *val) {
895     if (len > INT32_MAX) {
896         // don't accept size_t values which may have come from an
897         // inadvertent conversion from a negative int.
898         return BAD_VALUE;
899     }
900 
901     if (!val) {
902         return writeInt32(-1);
903     }
904     status_t ret = writeInt32(static_cast<uint32_t>(len));
905     if (ret == NO_ERROR) {
906         ret = write(val, len * sizeof(*val));
907     }
908     return ret;
909 }
writeByteArray(size_t len,const uint8_t * val)910 status_t Parcel::writeByteArray(size_t len, const uint8_t *val) {
911     if (len > INT32_MAX) {
912         // don't accept size_t values which may have come from an
913         // inadvertent conversion from a negative int.
914         return BAD_VALUE;
915     }
916 
917     if (!val) {
918         return writeInt32(-1);
919     }
920     status_t ret = writeInt32(static_cast<uint32_t>(len));
921     if (ret == NO_ERROR) {
922         ret = write(val, len * sizeof(*val));
923     }
924     return ret;
925 }
926 
writeBool(bool val)927 status_t Parcel::writeBool(bool val)
928 {
929     return writeInt32(int32_t(val));
930 }
931 
writeChar(char16_t val)932 status_t Parcel::writeChar(char16_t val)
933 {
934     return writeInt32(int32_t(val));
935 }
936 
writeByte(int8_t val)937 status_t Parcel::writeByte(int8_t val)
938 {
939     return writeInt32(int32_t(val));
940 }
941 
writeInt64(int64_t val)942 status_t Parcel::writeInt64(int64_t val)
943 {
944     return writeAligned(val);
945 }
946 
writeUint64(uint64_t val)947 status_t Parcel::writeUint64(uint64_t val)
948 {
949     return writeAligned(val);
950 }
951 
writePointer(uintptr_t val)952 status_t Parcel::writePointer(uintptr_t val)
953 {
954     return writeAligned<binder_uintptr_t>(val);
955 }
956 
writeFloat(float val)957 status_t Parcel::writeFloat(float val)
958 {
959     return writeAligned(val);
960 }
961 
962 #if defined(__mips__) && defined(__mips_hard_float)
963 
writeDouble(double val)964 status_t Parcel::writeDouble(double val)
965 {
966     union {
967         double d;
968         unsigned long long ll;
969     } u;
970     u.d = val;
971     return writeAligned(u.ll);
972 }
973 
974 #else
975 
writeDouble(double val)976 status_t Parcel::writeDouble(double val)
977 {
978     return writeAligned(val);
979 }
980 
981 #endif
982 
writeCString(const char * str)983 status_t Parcel::writeCString(const char* str)
984 {
985     return write(str, strlen(str)+1);
986 }
987 
writeString8(const String8 & str)988 status_t Parcel::writeString8(const String8& str)
989 {
990     return writeString8(str.string(), str.size());
991 }
992 
writeString8(const char * str,size_t len)993 status_t Parcel::writeString8(const char* str, size_t len)
994 {
995     if (str == nullptr) return writeInt32(-1);
996 
997     status_t err = writeInt32(len);
998     if (err == NO_ERROR) {
999         uint8_t* data = (uint8_t*)writeInplace(len+sizeof(char));
1000         if (data) {
1001             memcpy(data, str, len);
1002             *reinterpret_cast<char*>(data+len) = 0;
1003             return NO_ERROR;
1004         }
1005         err = mError;
1006     }
1007     return err;
1008 }
1009 
writeString16(const std::unique_ptr<String16> & str)1010 status_t Parcel::writeString16(const std::unique_ptr<String16>& str)
1011 {
1012     if (!str) {
1013         return writeInt32(-1);
1014     }
1015 
1016     return writeString16(*str);
1017 }
1018 
writeString16(const String16 & str)1019 status_t Parcel::writeString16(const String16& str)
1020 {
1021     return writeString16(str.string(), str.size());
1022 }
1023 
writeString16(const char16_t * str,size_t len)1024 status_t Parcel::writeString16(const char16_t* str, size_t len)
1025 {
1026     if (str == nullptr) return writeInt32(-1);
1027 
1028     status_t err = writeInt32(len);
1029     if (err == NO_ERROR) {
1030         len *= sizeof(char16_t);
1031         uint8_t* data = (uint8_t*)writeInplace(len+sizeof(char16_t));
1032         if (data) {
1033             memcpy(data, str, len);
1034             *reinterpret_cast<char16_t*>(data+len) = 0;
1035             return NO_ERROR;
1036         }
1037         err = mError;
1038     }
1039     return err;
1040 }
1041 
writeStrongBinder(const sp<IBinder> & val)1042 status_t Parcel::writeStrongBinder(const sp<IBinder>& val)
1043 {
1044     return flattenBinder(val);
1045 }
1046 
writeStrongBinderVector(const std::vector<sp<IBinder>> & val)1047 status_t Parcel::writeStrongBinderVector(const std::vector<sp<IBinder>>& val)
1048 {
1049     return writeTypedVector(val, &Parcel::writeStrongBinder);
1050 }
1051 
writeStrongBinderVector(const std::unique_ptr<std::vector<sp<IBinder>>> & val)1052 status_t Parcel::writeStrongBinderVector(const std::unique_ptr<std::vector<sp<IBinder>>>& val)
1053 {
1054     return writeNullableTypedVector(val, &Parcel::writeStrongBinder);
1055 }
1056 
readStrongBinderVector(std::unique_ptr<std::vector<sp<IBinder>>> * val) const1057 status_t Parcel::readStrongBinderVector(std::unique_ptr<std::vector<sp<IBinder>>>* val) const {
1058     return readNullableTypedVector(val, &Parcel::readNullableStrongBinder);
1059 }
1060 
readStrongBinderVector(std::vector<sp<IBinder>> * val) const1061 status_t Parcel::readStrongBinderVector(std::vector<sp<IBinder>>* val) const {
1062     return readTypedVector(val, &Parcel::readStrongBinder);
1063 }
1064 
writeRawNullableParcelable(const Parcelable * parcelable)1065 status_t Parcel::writeRawNullableParcelable(const Parcelable* parcelable) {
1066     if (!parcelable) {
1067         return writeInt32(0);
1068     }
1069 
1070     return writeParcelable(*parcelable);
1071 }
1072 
writeParcelable(const Parcelable & parcelable)1073 status_t Parcel::writeParcelable(const Parcelable& parcelable) {
1074     status_t status = writeInt32(1);  // parcelable is not null.
1075     if (status != OK) {
1076         return status;
1077     }
1078     return parcelable.writeToParcel(this);
1079 }
1080 
writeNativeHandle(const native_handle * handle)1081 status_t Parcel::writeNativeHandle(const native_handle* handle)
1082 {
1083     if (!handle || handle->version != sizeof(native_handle))
1084         return BAD_TYPE;
1085 
1086     status_t err;
1087     err = writeInt32(handle->numFds);
1088     if (err != NO_ERROR) return err;
1089 
1090     err = writeInt32(handle->numInts);
1091     if (err != NO_ERROR) return err;
1092 
1093     for (int i=0 ; err==NO_ERROR && i<handle->numFds ; i++)
1094         err = writeDupFileDescriptor(handle->data[i]);
1095 
1096     if (err != NO_ERROR) {
1097         ALOGD("write native handle, write dup fd failed");
1098         return err;
1099     }
1100     err = write(handle->data + handle->numFds, sizeof(int)*handle->numInts);
1101     return err;
1102 }
1103 
writeFileDescriptor(int fd,bool takeOwnership)1104 status_t Parcel::writeFileDescriptor(int fd, bool takeOwnership)
1105 {
1106     flat_binder_object obj;
1107     obj.hdr.type = BINDER_TYPE_FD;
1108     obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
1109     obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */
1110     obj.handle = fd;
1111     obj.cookie = takeOwnership ? 1 : 0;
1112     return writeObject(obj, true);
1113 }
1114 
writeDupFileDescriptor(int fd)1115 status_t Parcel::writeDupFileDescriptor(int fd)
1116 {
1117     int dupFd = fcntl(fd, F_DUPFD_CLOEXEC, 0);
1118     if (dupFd < 0) {
1119         return -errno;
1120     }
1121     status_t err = writeFileDescriptor(dupFd, true /*takeOwnership*/);
1122     if (err != OK) {
1123         close(dupFd);
1124     }
1125     return err;
1126 }
1127 
writeParcelFileDescriptor(int fd,bool takeOwnership)1128 status_t Parcel::writeParcelFileDescriptor(int fd, bool takeOwnership)
1129 {
1130     writeInt32(0);
1131     return writeFileDescriptor(fd, takeOwnership);
1132 }
1133 
writeDupParcelFileDescriptor(int fd)1134 status_t Parcel::writeDupParcelFileDescriptor(int fd)
1135 {
1136     int dupFd = fcntl(fd, F_DUPFD_CLOEXEC, 0);
1137     if (dupFd < 0) {
1138         return -errno;
1139     }
1140     status_t err = writeParcelFileDescriptor(dupFd, true /*takeOwnership*/);
1141     if (err != OK) {
1142         close(dupFd);
1143     }
1144     return err;
1145 }
1146 
writeUniqueFileDescriptor(const base::unique_fd & fd)1147 status_t Parcel::writeUniqueFileDescriptor(const base::unique_fd& fd) {
1148     return writeDupFileDescriptor(fd.get());
1149 }
1150 
writeUniqueFileDescriptorVector(const std::vector<base::unique_fd> & val)1151 status_t Parcel::writeUniqueFileDescriptorVector(const std::vector<base::unique_fd>& val) {
1152     return writeTypedVector(val, &Parcel::writeUniqueFileDescriptor);
1153 }
1154 
writeUniqueFileDescriptorVector(const std::unique_ptr<std::vector<base::unique_fd>> & val)1155 status_t Parcel::writeUniqueFileDescriptorVector(const std::unique_ptr<std::vector<base::unique_fd>>& val) {
1156     return writeNullableTypedVector(val, &Parcel::writeUniqueFileDescriptor);
1157 }
1158 
writeBlob(size_t len,bool mutableCopy,WritableBlob * outBlob)1159 status_t Parcel::writeBlob(size_t len, bool mutableCopy, WritableBlob* outBlob)
1160 {
1161     if (len > INT32_MAX) {
1162         // don't accept size_t values which may have come from an
1163         // inadvertent conversion from a negative int.
1164         return BAD_VALUE;
1165     }
1166 
1167     status_t status;
1168     if (!mAllowFds || len <= BLOB_INPLACE_LIMIT) {
1169         ALOGV("writeBlob: write in place");
1170         status = writeInt32(BLOB_INPLACE);
1171         if (status) return status;
1172 
1173         void* ptr = writeInplace(len);
1174         if (!ptr) return NO_MEMORY;
1175 
1176         outBlob->init(-1, ptr, len, false);
1177         return NO_ERROR;
1178     }
1179 
1180     ALOGV("writeBlob: write to ashmem");
1181     int fd = ashmem_create_region("Parcel Blob", len);
1182     if (fd < 0) return NO_MEMORY;
1183 
1184     int result = ashmem_set_prot_region(fd, PROT_READ | PROT_WRITE);
1185     if (result < 0) {
1186         status = result;
1187     } else {
1188         void* ptr = ::mmap(nullptr, len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
1189         if (ptr == MAP_FAILED) {
1190             status = -errno;
1191         } else {
1192             if (!mutableCopy) {
1193                 result = ashmem_set_prot_region(fd, PROT_READ);
1194             }
1195             if (result < 0) {
1196                 status = result;
1197             } else {
1198                 status = writeInt32(mutableCopy ? BLOB_ASHMEM_MUTABLE : BLOB_ASHMEM_IMMUTABLE);
1199                 if (!status) {
1200                     status = writeFileDescriptor(fd, true /*takeOwnership*/);
1201                     if (!status) {
1202                         outBlob->init(fd, ptr, len, mutableCopy);
1203                         return NO_ERROR;
1204                     }
1205                 }
1206             }
1207         }
1208         ::munmap(ptr, len);
1209     }
1210     ::close(fd);
1211     return status;
1212 }
1213 
writeDupImmutableBlobFileDescriptor(int fd)1214 status_t Parcel::writeDupImmutableBlobFileDescriptor(int fd)
1215 {
1216     // Must match up with what's done in writeBlob.
1217     if (!mAllowFds) return FDS_NOT_ALLOWED;
1218     status_t status = writeInt32(BLOB_ASHMEM_IMMUTABLE);
1219     if (status) return status;
1220     return writeDupFileDescriptor(fd);
1221 }
1222 
write(const FlattenableHelperInterface & val)1223 status_t Parcel::write(const FlattenableHelperInterface& val)
1224 {
1225     status_t err;
1226 
1227     // size if needed
1228     const size_t len = val.getFlattenedSize();
1229     const size_t fd_count = val.getFdCount();
1230 
1231     if ((len > INT32_MAX) || (fd_count >= gMaxFds)) {
1232         // don't accept size_t values which may have come from an
1233         // inadvertent conversion from a negative int.
1234         return BAD_VALUE;
1235     }
1236 
1237     err = this->writeInt32(len);
1238     if (err) return err;
1239 
1240     err = this->writeInt32(fd_count);
1241     if (err) return err;
1242 
1243     // payload
1244     void* const buf = this->writeInplace(len);
1245     if (buf == nullptr)
1246         return BAD_VALUE;
1247 
1248     int* fds = nullptr;
1249     if (fd_count) {
1250         fds = new (std::nothrow) int[fd_count];
1251         if (fds == nullptr) {
1252             ALOGE("write: failed to allocate requested %zu fds", fd_count);
1253             return BAD_VALUE;
1254         }
1255     }
1256 
1257     err = val.flatten(buf, len, fds, fd_count);
1258     for (size_t i=0 ; i<fd_count && err==NO_ERROR ; i++) {
1259         err = this->writeDupFileDescriptor( fds[i] );
1260     }
1261 
1262     if (fd_count) {
1263         delete [] fds;
1264     }
1265 
1266     return err;
1267 }
1268 
writeObject(const flat_binder_object & val,bool nullMetaData)1269 status_t Parcel::writeObject(const flat_binder_object& val, bool nullMetaData)
1270 {
1271     const bool enoughData = (mDataPos+sizeof(val)) <= mDataCapacity;
1272     const bool enoughObjects = mObjectsSize < mObjectsCapacity;
1273     if (enoughData && enoughObjects) {
1274 restart_write:
1275         *reinterpret_cast<flat_binder_object*>(mData+mDataPos) = val;
1276 
1277         // remember if it's a file descriptor
1278         if (val.hdr.type == BINDER_TYPE_FD) {
1279             if (!mAllowFds) {
1280                 // fail before modifying our object index
1281                 return FDS_NOT_ALLOWED;
1282             }
1283             mHasFds = mFdsKnown = true;
1284         }
1285 
1286         // Need to write meta-data?
1287         if (nullMetaData || val.binder != 0) {
1288             mObjects[mObjectsSize] = mDataPos;
1289             acquire_object(ProcessState::self(), val, this, &mOpenAshmemSize);
1290             mObjectsSize++;
1291         }
1292 
1293         return finishWrite(sizeof(flat_binder_object));
1294     }
1295 
1296     if (!enoughData) {
1297         const status_t err = growData(sizeof(val));
1298         if (err != NO_ERROR) return err;
1299     }
1300     if (!enoughObjects) {
1301         if (mObjectsSize > SIZE_MAX - 2) return NO_MEMORY; // overflow
1302         if ((mObjectsSize + 2) > SIZE_MAX / 3) return NO_MEMORY; // overflow
1303         size_t newSize = ((mObjectsSize+2)*3)/2;
1304         if (newSize > SIZE_MAX / sizeof(binder_size_t)) return NO_MEMORY; // overflow
1305         binder_size_t* objects = (binder_size_t*)realloc(mObjects, newSize*sizeof(binder_size_t));
1306         if (objects == nullptr) return NO_MEMORY;
1307         mObjects = objects;
1308         mObjectsCapacity = newSize;
1309     }
1310 
1311     goto restart_write;
1312 }
1313 
writeNoException()1314 status_t Parcel::writeNoException()
1315 {
1316     binder::Status status;
1317     return status.writeToParcel(this);
1318 }
1319 
validateReadData(size_t upperBound) const1320 status_t Parcel::validateReadData(size_t upperBound) const
1321 {
1322     // Don't allow non-object reads on object data
1323     if (mObjectsSorted || mObjectsSize <= 1) {
1324 data_sorted:
1325         // Expect to check only against the next object
1326         if (mNextObjectHint < mObjectsSize && upperBound > mObjects[mNextObjectHint]) {
1327             // For some reason the current read position is greater than the next object
1328             // hint. Iterate until we find the right object
1329             size_t nextObject = mNextObjectHint;
1330             do {
1331                 if (mDataPos < mObjects[nextObject] + sizeof(flat_binder_object)) {
1332                     // Requested info overlaps with an object
1333                     ALOGE("Attempt to read from protected data in Parcel %p", this);
1334                     return PERMISSION_DENIED;
1335                 }
1336                 nextObject++;
1337             } while (nextObject < mObjectsSize && upperBound > mObjects[nextObject]);
1338             mNextObjectHint = nextObject;
1339         }
1340         return NO_ERROR;
1341     }
1342     // Quickly determine if mObjects is sorted.
1343     binder_size_t* currObj = mObjects + mObjectsSize - 1;
1344     binder_size_t* prevObj = currObj;
1345     while (currObj > mObjects) {
1346         prevObj--;
1347         if(*prevObj > *currObj) {
1348             goto data_unsorted;
1349         }
1350         currObj--;
1351     }
1352     mObjectsSorted = true;
1353     goto data_sorted;
1354 
1355 data_unsorted:
1356     // Insertion Sort mObjects
1357     // Great for mostly sorted lists. If randomly sorted or reverse ordered mObjects become common,
1358     // switch to std::sort(mObjects, mObjects + mObjectsSize);
1359     for (binder_size_t* iter0 = mObjects + 1; iter0 < mObjects + mObjectsSize; iter0++) {
1360         binder_size_t temp = *iter0;
1361         binder_size_t* iter1 = iter0 - 1;
1362         while (iter1 >= mObjects && *iter1 > temp) {
1363             *(iter1 + 1) = *iter1;
1364             iter1--;
1365         }
1366         *(iter1 + 1) = temp;
1367     }
1368     mNextObjectHint = 0;
1369     mObjectsSorted = true;
1370     goto data_sorted;
1371 }
1372 
read(void * outData,size_t len) const1373 status_t Parcel::read(void* outData, size_t len) const
1374 {
1375     if (len > INT32_MAX) {
1376         // don't accept size_t values which may have come from an
1377         // inadvertent conversion from a negative int.
1378         return BAD_VALUE;
1379     }
1380 
1381     if ((mDataPos+pad_size(len)) >= mDataPos && (mDataPos+pad_size(len)) <= mDataSize
1382             && len <= pad_size(len)) {
1383         if (mObjectsSize > 0) {
1384             status_t err = validateReadData(mDataPos + pad_size(len));
1385             if(err != NO_ERROR) {
1386                 // Still increment the data position by the expected length
1387                 mDataPos += pad_size(len);
1388                 ALOGV("read Setting data pos of %p to %zu", this, mDataPos);
1389                 return err;
1390             }
1391         }
1392         memcpy(outData, mData+mDataPos, len);
1393         mDataPos += pad_size(len);
1394         ALOGV("read Setting data pos of %p to %zu", this, mDataPos);
1395         return NO_ERROR;
1396     }
1397     return NOT_ENOUGH_DATA;
1398 }
1399 
readInplace(size_t len) const1400 const void* Parcel::readInplace(size_t len) const
1401 {
1402     if (len > INT32_MAX) {
1403         // don't accept size_t values which may have come from an
1404         // inadvertent conversion from a negative int.
1405         return nullptr;
1406     }
1407 
1408     if ((mDataPos+pad_size(len)) >= mDataPos && (mDataPos+pad_size(len)) <= mDataSize
1409             && len <= pad_size(len)) {
1410         if (mObjectsSize > 0) {
1411             status_t err = validateReadData(mDataPos + pad_size(len));
1412             if(err != NO_ERROR) {
1413                 // Still increment the data position by the expected length
1414                 mDataPos += pad_size(len);
1415                 ALOGV("readInplace Setting data pos of %p to %zu", this, mDataPos);
1416                 return nullptr;
1417             }
1418         }
1419 
1420         const void* data = mData+mDataPos;
1421         mDataPos += pad_size(len);
1422         ALOGV("readInplace Setting data pos of %p to %zu", this, mDataPos);
1423         return data;
1424     }
1425     return nullptr;
1426 }
1427 
1428 template<class T>
readAligned(T * pArg) const1429 status_t Parcel::readAligned(T *pArg) const {
1430     COMPILE_TIME_ASSERT_FUNCTION_SCOPE(PAD_SIZE_UNSAFE(sizeof(T)) == sizeof(T));
1431 
1432     if ((mDataPos+sizeof(T)) <= mDataSize) {
1433         if (mObjectsSize > 0) {
1434             status_t err = validateReadData(mDataPos + sizeof(T));
1435             if(err != NO_ERROR) {
1436                 // Still increment the data position by the expected length
1437                 mDataPos += sizeof(T);
1438                 return err;
1439             }
1440         }
1441 
1442         const void* data = mData+mDataPos;
1443         mDataPos += sizeof(T);
1444         *pArg =  *reinterpret_cast<const T*>(data);
1445         return NO_ERROR;
1446     } else {
1447         return NOT_ENOUGH_DATA;
1448     }
1449 }
1450 
1451 template<class T>
readAligned() const1452 T Parcel::readAligned() const {
1453     T result;
1454     if (readAligned(&result) != NO_ERROR) {
1455         result = 0;
1456     }
1457 
1458     return result;
1459 }
1460 
1461 template<class T>
writeAligned(T val)1462 status_t Parcel::writeAligned(T val) {
1463     COMPILE_TIME_ASSERT_FUNCTION_SCOPE(PAD_SIZE_UNSAFE(sizeof(T)) == sizeof(T));
1464 
1465     if ((mDataPos+sizeof(val)) <= mDataCapacity) {
1466 restart_write:
1467         *reinterpret_cast<T*>(mData+mDataPos) = val;
1468         return finishWrite(sizeof(val));
1469     }
1470 
1471     status_t err = growData(sizeof(val));
1472     if (err == NO_ERROR) goto restart_write;
1473     return err;
1474 }
1475 
readByteVector(std::vector<int8_t> * val) const1476 status_t Parcel::readByteVector(std::vector<int8_t>* val) const {
1477     size_t size;
1478     if (status_t status = reserveOutVector(val, &size); status != OK) return status;
1479     return readByteVectorInternal(val, size);
1480 }
1481 
readByteVector(std::vector<uint8_t> * val) const1482 status_t Parcel::readByteVector(std::vector<uint8_t>* val) const {
1483     size_t size;
1484     if (status_t status = reserveOutVector(val, &size); status != OK) return status;
1485     return readByteVectorInternal(val, size);
1486 }
1487 
readByteVector(std::unique_ptr<std::vector<int8_t>> * val) const1488 status_t Parcel::readByteVector(std::unique_ptr<std::vector<int8_t>>* val) const {
1489     size_t size;
1490     if (status_t status = reserveOutVector(val, &size); status != OK) return status;
1491     if (val->get() == nullptr) {
1492         // reserveOutVector does not create the out vector if size is < 0.
1493         // This occurs when writing a null byte vector.
1494         return OK;
1495     }
1496     return readByteVectorInternal(val->get(), size);
1497 }
1498 
readByteVector(std::unique_ptr<std::vector<uint8_t>> * val) const1499 status_t Parcel::readByteVector(std::unique_ptr<std::vector<uint8_t>>* val) const {
1500     size_t size;
1501     if (status_t status = reserveOutVector(val, &size); status != OK) return status;
1502     if (val->get() == nullptr) {
1503         // reserveOutVector does not create the out vector if size is < 0.
1504         // This occurs when writing a null byte vector.
1505         return OK;
1506     }
1507     return readByteVectorInternal(val->get(), size);
1508 }
1509 
readInt32Vector(std::unique_ptr<std::vector<int32_t>> * val) const1510 status_t Parcel::readInt32Vector(std::unique_ptr<std::vector<int32_t>>* val) const {
1511     return readNullableTypedVector(val, &Parcel::readInt32);
1512 }
1513 
readInt32Vector(std::vector<int32_t> * val) const1514 status_t Parcel::readInt32Vector(std::vector<int32_t>* val) const {
1515     return readTypedVector(val, &Parcel::readInt32);
1516 }
1517 
readInt64Vector(std::unique_ptr<std::vector<int64_t>> * val) const1518 status_t Parcel::readInt64Vector(std::unique_ptr<std::vector<int64_t>>* val) const {
1519     return readNullableTypedVector(val, &Parcel::readInt64);
1520 }
1521 
readInt64Vector(std::vector<int64_t> * val) const1522 status_t Parcel::readInt64Vector(std::vector<int64_t>* val) const {
1523     return readTypedVector(val, &Parcel::readInt64);
1524 }
1525 
readUint64Vector(std::unique_ptr<std::vector<uint64_t>> * val) const1526 status_t Parcel::readUint64Vector(std::unique_ptr<std::vector<uint64_t>>* val) const {
1527     return readNullableTypedVector(val, &Parcel::readUint64);
1528 }
1529 
readUint64Vector(std::vector<uint64_t> * val) const1530 status_t Parcel::readUint64Vector(std::vector<uint64_t>* val) const {
1531     return readTypedVector(val, &Parcel::readUint64);
1532 }
1533 
readFloatVector(std::unique_ptr<std::vector<float>> * val) const1534 status_t Parcel::readFloatVector(std::unique_ptr<std::vector<float>>* val) const {
1535     return readNullableTypedVector(val, &Parcel::readFloat);
1536 }
1537 
readFloatVector(std::vector<float> * val) const1538 status_t Parcel::readFloatVector(std::vector<float>* val) const {
1539     return readTypedVector(val, &Parcel::readFloat);
1540 }
1541 
readDoubleVector(std::unique_ptr<std::vector<double>> * val) const1542 status_t Parcel::readDoubleVector(std::unique_ptr<std::vector<double>>* val) const {
1543     return readNullableTypedVector(val, &Parcel::readDouble);
1544 }
1545 
readDoubleVector(std::vector<double> * val) const1546 status_t Parcel::readDoubleVector(std::vector<double>* val) const {
1547     return readTypedVector(val, &Parcel::readDouble);
1548 }
1549 
readBoolVector(std::unique_ptr<std::vector<bool>> * val) const1550 status_t Parcel::readBoolVector(std::unique_ptr<std::vector<bool>>* val) const {
1551     const int32_t start = dataPosition();
1552     int32_t size;
1553     status_t status = readInt32(&size);
1554     val->reset();
1555 
1556     if (status != OK || size < 0) {
1557         return status;
1558     }
1559 
1560     setDataPosition(start);
1561     val->reset(new (std::nothrow) std::vector<bool>());
1562 
1563     status = readBoolVector(val->get());
1564 
1565     if (status != OK) {
1566         val->reset();
1567     }
1568 
1569     return status;
1570 }
1571 
readBoolVector(std::vector<bool> * val) const1572 status_t Parcel::readBoolVector(std::vector<bool>* val) const {
1573     int32_t size;
1574     status_t status = readInt32(&size);
1575 
1576     if (status != OK) {
1577         return status;
1578     }
1579 
1580     if (size < 0) {
1581         return UNEXPECTED_NULL;
1582     }
1583 
1584     val->resize(size);
1585 
1586     /* C++ bool handling means a vector of bools isn't necessarily addressable
1587      * (we might use individual bits)
1588      */
1589     bool data;
1590     for (int32_t i = 0; i < size; ++i) {
1591         status = readBool(&data);
1592         (*val)[i] = data;
1593 
1594         if (status != OK) {
1595             return status;
1596         }
1597     }
1598 
1599     return OK;
1600 }
1601 
readCharVector(std::unique_ptr<std::vector<char16_t>> * val) const1602 status_t Parcel::readCharVector(std::unique_ptr<std::vector<char16_t>>* val) const {
1603     return readNullableTypedVector(val, &Parcel::readChar);
1604 }
1605 
readCharVector(std::vector<char16_t> * val) const1606 status_t Parcel::readCharVector(std::vector<char16_t>* val) const {
1607     return readTypedVector(val, &Parcel::readChar);
1608 }
1609 
readString16Vector(std::unique_ptr<std::vector<std::unique_ptr<String16>>> * val) const1610 status_t Parcel::readString16Vector(
1611         std::unique_ptr<std::vector<std::unique_ptr<String16>>>* val) const {
1612     return readNullableTypedVector(val, &Parcel::readString16);
1613 }
1614 
readString16Vector(std::vector<String16> * val) const1615 status_t Parcel::readString16Vector(std::vector<String16>* val) const {
1616     return readTypedVector(val, &Parcel::readString16);
1617 }
1618 
readUtf8VectorFromUtf16Vector(std::unique_ptr<std::vector<std::unique_ptr<std::string>>> * val) const1619 status_t Parcel::readUtf8VectorFromUtf16Vector(
1620         std::unique_ptr<std::vector<std::unique_ptr<std::string>>>* val) const {
1621     return readNullableTypedVector(val, &Parcel::readUtf8FromUtf16);
1622 }
1623 
readUtf8VectorFromUtf16Vector(std::vector<std::string> * val) const1624 status_t Parcel::readUtf8VectorFromUtf16Vector(std::vector<std::string>* val) const {
1625     return readTypedVector(val, &Parcel::readUtf8FromUtf16);
1626 }
1627 
readInt32(int32_t * pArg) const1628 status_t Parcel::readInt32(int32_t *pArg) const
1629 {
1630     return readAligned(pArg);
1631 }
1632 
readInt32() const1633 int32_t Parcel::readInt32() const
1634 {
1635     return readAligned<int32_t>();
1636 }
1637 
readUint32(uint32_t * pArg) const1638 status_t Parcel::readUint32(uint32_t *pArg) const
1639 {
1640     return readAligned(pArg);
1641 }
1642 
readUint32() const1643 uint32_t Parcel::readUint32() const
1644 {
1645     return readAligned<uint32_t>();
1646 }
1647 
readInt64(int64_t * pArg) const1648 status_t Parcel::readInt64(int64_t *pArg) const
1649 {
1650     return readAligned(pArg);
1651 }
1652 
1653 
readInt64() const1654 int64_t Parcel::readInt64() const
1655 {
1656     return readAligned<int64_t>();
1657 }
1658 
readUint64(uint64_t * pArg) const1659 status_t Parcel::readUint64(uint64_t *pArg) const
1660 {
1661     return readAligned(pArg);
1662 }
1663 
readUint64() const1664 uint64_t Parcel::readUint64() const
1665 {
1666     return readAligned<uint64_t>();
1667 }
1668 
readPointer(uintptr_t * pArg) const1669 status_t Parcel::readPointer(uintptr_t *pArg) const
1670 {
1671     status_t ret;
1672     binder_uintptr_t ptr;
1673     ret = readAligned(&ptr);
1674     if (!ret)
1675         *pArg = ptr;
1676     return ret;
1677 }
1678 
readPointer() const1679 uintptr_t Parcel::readPointer() const
1680 {
1681     return readAligned<binder_uintptr_t>();
1682 }
1683 
1684 
readFloat(float * pArg) const1685 status_t Parcel::readFloat(float *pArg) const
1686 {
1687     return readAligned(pArg);
1688 }
1689 
1690 
readFloat() const1691 float Parcel::readFloat() const
1692 {
1693     return readAligned<float>();
1694 }
1695 
1696 #if defined(__mips__) && defined(__mips_hard_float)
1697 
readDouble(double * pArg) const1698 status_t Parcel::readDouble(double *pArg) const
1699 {
1700     union {
1701       double d;
1702       unsigned long long ll;
1703     } u;
1704     u.d = 0;
1705     status_t status;
1706     status = readAligned(&u.ll);
1707     *pArg = u.d;
1708     return status;
1709 }
1710 
readDouble() const1711 double Parcel::readDouble() const
1712 {
1713     union {
1714       double d;
1715       unsigned long long ll;
1716     } u;
1717     u.ll = readAligned<unsigned long long>();
1718     return u.d;
1719 }
1720 
1721 #else
1722 
readDouble(double * pArg) const1723 status_t Parcel::readDouble(double *pArg) const
1724 {
1725     return readAligned(pArg);
1726 }
1727 
readDouble() const1728 double Parcel::readDouble() const
1729 {
1730     return readAligned<double>();
1731 }
1732 
1733 #endif
1734 
readIntPtr(intptr_t * pArg) const1735 status_t Parcel::readIntPtr(intptr_t *pArg) const
1736 {
1737     return readAligned(pArg);
1738 }
1739 
1740 
readIntPtr() const1741 intptr_t Parcel::readIntPtr() const
1742 {
1743     return readAligned<intptr_t>();
1744 }
1745 
readBool(bool * pArg) const1746 status_t Parcel::readBool(bool *pArg) const
1747 {
1748     int32_t tmp = 0;
1749     status_t ret = readInt32(&tmp);
1750     *pArg = (tmp != 0);
1751     return ret;
1752 }
1753 
readBool() const1754 bool Parcel::readBool() const
1755 {
1756     return readInt32() != 0;
1757 }
1758 
readChar(char16_t * pArg) const1759 status_t Parcel::readChar(char16_t *pArg) const
1760 {
1761     int32_t tmp = 0;
1762     status_t ret = readInt32(&tmp);
1763     *pArg = char16_t(tmp);
1764     return ret;
1765 }
1766 
readChar() const1767 char16_t Parcel::readChar() const
1768 {
1769     return char16_t(readInt32());
1770 }
1771 
readByte(int8_t * pArg) const1772 status_t Parcel::readByte(int8_t *pArg) const
1773 {
1774     int32_t tmp = 0;
1775     status_t ret = readInt32(&tmp);
1776     *pArg = int8_t(tmp);
1777     return ret;
1778 }
1779 
readByte() const1780 int8_t Parcel::readByte() const
1781 {
1782     return int8_t(readInt32());
1783 }
1784 
readUtf8FromUtf16(std::string * str) const1785 status_t Parcel::readUtf8FromUtf16(std::string* str) const {
1786     size_t utf16Size = 0;
1787     const char16_t* src = readString16Inplace(&utf16Size);
1788     if (!src) {
1789         return UNEXPECTED_NULL;
1790     }
1791 
1792     // Save ourselves the trouble, we're done.
1793     if (utf16Size == 0u) {
1794         str->clear();
1795        return NO_ERROR;
1796     }
1797 
1798     // Allow for closing '\0'
1799     ssize_t utf8Size = utf16_to_utf8_length(src, utf16Size) + 1;
1800     if (utf8Size < 1) {
1801         return BAD_VALUE;
1802     }
1803     // Note that while it is probably safe to assume string::resize keeps a
1804     // spare byte around for the trailing null, we still pass the size including the trailing null
1805     str->resize(utf8Size);
1806     utf16_to_utf8(src, utf16Size, &((*str)[0]), utf8Size);
1807     str->resize(utf8Size - 1);
1808     return NO_ERROR;
1809 }
1810 
readUtf8FromUtf16(std::unique_ptr<std::string> * str) const1811 status_t Parcel::readUtf8FromUtf16(std::unique_ptr<std::string>* str) const {
1812     const int32_t start = dataPosition();
1813     int32_t size;
1814     status_t status = readInt32(&size);
1815     str->reset();
1816 
1817     if (status != OK || size < 0) {
1818         return status;
1819     }
1820 
1821     setDataPosition(start);
1822     str->reset(new (std::nothrow) std::string());
1823     return readUtf8FromUtf16(str->get());
1824 }
1825 
readCString() const1826 const char* Parcel::readCString() const
1827 {
1828     if (mDataPos < mDataSize) {
1829         const size_t avail = mDataSize-mDataPos;
1830         const char* str = reinterpret_cast<const char*>(mData+mDataPos);
1831         // is the string's trailing NUL within the parcel's valid bounds?
1832         const char* eos = reinterpret_cast<const char*>(memchr(str, 0, avail));
1833         if (eos) {
1834             const size_t len = eos - str;
1835             mDataPos += pad_size(len+1);
1836             ALOGV("readCString Setting data pos of %p to %zu", this, mDataPos);
1837             return str;
1838         }
1839     }
1840     return nullptr;
1841 }
1842 
readString8() const1843 String8 Parcel::readString8() const
1844 {
1845     size_t len;
1846     const char* str = readString8Inplace(&len);
1847     if (str) return String8(str, len);
1848     ALOGE("Reading a NULL string not supported here.");
1849     return String8();
1850 }
1851 
readString8(String8 * pArg) const1852 status_t Parcel::readString8(String8* pArg) const
1853 {
1854     size_t len;
1855     const char* str = readString8Inplace(&len);
1856     if (str) {
1857         pArg->setTo(str, len);
1858         return 0;
1859     } else {
1860         *pArg = String8();
1861         return UNEXPECTED_NULL;
1862     }
1863 }
1864 
readString8Inplace(size_t * outLen) const1865 const char* Parcel::readString8Inplace(size_t* outLen) const
1866 {
1867     int32_t size = readInt32();
1868     // watch for potential int overflow from size+1
1869     if (size >= 0 && size < INT32_MAX) {
1870         *outLen = size;
1871         const char* str = (const char*)readInplace(size+1);
1872         if (str != nullptr) {
1873             return str;
1874         }
1875     }
1876     *outLen = 0;
1877     return nullptr;
1878 }
1879 
readString16() const1880 String16 Parcel::readString16() const
1881 {
1882     size_t len;
1883     const char16_t* str = readString16Inplace(&len);
1884     if (str) return String16(str, len);
1885     ALOGE("Reading a NULL string not supported here.");
1886     return String16();
1887 }
1888 
readString16(std::unique_ptr<String16> * pArg) const1889 status_t Parcel::readString16(std::unique_ptr<String16>* pArg) const
1890 {
1891     const int32_t start = dataPosition();
1892     int32_t size;
1893     status_t status = readInt32(&size);
1894     pArg->reset();
1895 
1896     if (status != OK || size < 0) {
1897         return status;
1898     }
1899 
1900     setDataPosition(start);
1901     pArg->reset(new (std::nothrow) String16());
1902 
1903     status = readString16(pArg->get());
1904 
1905     if (status != OK) {
1906         pArg->reset();
1907     }
1908 
1909     return status;
1910 }
1911 
readString16(String16 * pArg) const1912 status_t Parcel::readString16(String16* pArg) const
1913 {
1914     size_t len;
1915     const char16_t* str = readString16Inplace(&len);
1916     if (str) {
1917         pArg->setTo(str, len);
1918         return 0;
1919     } else {
1920         *pArg = String16();
1921         return UNEXPECTED_NULL;
1922     }
1923 }
1924 
readString16Inplace(size_t * outLen) const1925 const char16_t* Parcel::readString16Inplace(size_t* outLen) const
1926 {
1927     int32_t size = readInt32();
1928     // watch for potential int overflow from size+1
1929     if (size >= 0 && size < INT32_MAX) {
1930         *outLen = size;
1931         const char16_t* str = (const char16_t*)readInplace((size+1)*sizeof(char16_t));
1932         if (str != nullptr) {
1933             return str;
1934         }
1935     }
1936     *outLen = 0;
1937     return nullptr;
1938 }
1939 
readStrongBinder(sp<IBinder> * val) const1940 status_t Parcel::readStrongBinder(sp<IBinder>* val) const
1941 {
1942     status_t status = readNullableStrongBinder(val);
1943     if (status == OK && !val->get()) {
1944         status = UNEXPECTED_NULL;
1945     }
1946     return status;
1947 }
1948 
readNullableStrongBinder(sp<IBinder> * val) const1949 status_t Parcel::readNullableStrongBinder(sp<IBinder>* val) const
1950 {
1951     return unflattenBinder(val);
1952 }
1953 
readStrongBinder() const1954 sp<IBinder> Parcel::readStrongBinder() const
1955 {
1956     sp<IBinder> val;
1957     // Note that a lot of code in Android reads binders by hand with this
1958     // method, and that code has historically been ok with getting nullptr
1959     // back (while ignoring error codes).
1960     readNullableStrongBinder(&val);
1961     return val;
1962 }
1963 
readParcelable(Parcelable * parcelable) const1964 status_t Parcel::readParcelable(Parcelable* parcelable) const {
1965     int32_t have_parcelable = 0;
1966     status_t status = readInt32(&have_parcelable);
1967     if (status != OK) {
1968         return status;
1969     }
1970     if (!have_parcelable) {
1971         return UNEXPECTED_NULL;
1972     }
1973     return parcelable->readFromParcel(this);
1974 }
1975 
readExceptionCode() const1976 int32_t Parcel::readExceptionCode() const
1977 {
1978     binder::Status status;
1979     status.readFromParcel(*this);
1980     return status.exceptionCode();
1981 }
1982 
readNativeHandle() const1983 native_handle* Parcel::readNativeHandle() const
1984 {
1985     int numFds, numInts;
1986     status_t err;
1987     err = readInt32(&numFds);
1988     if (err != NO_ERROR) return nullptr;
1989     err = readInt32(&numInts);
1990     if (err != NO_ERROR) return nullptr;
1991 
1992     native_handle* h = native_handle_create(numFds, numInts);
1993     if (!h) {
1994         return nullptr;
1995     }
1996 
1997     for (int i=0 ; err==NO_ERROR && i<numFds ; i++) {
1998         h->data[i] = fcntl(readFileDescriptor(), F_DUPFD_CLOEXEC, 0);
1999         if (h->data[i] < 0) {
2000             for (int j = 0; j < i; j++) {
2001                 close(h->data[j]);
2002             }
2003             native_handle_delete(h);
2004             return nullptr;
2005         }
2006     }
2007     err = read(h->data + numFds, sizeof(int)*numInts);
2008     if (err != NO_ERROR) {
2009         native_handle_close(h);
2010         native_handle_delete(h);
2011         h = nullptr;
2012     }
2013     return h;
2014 }
2015 
readFileDescriptor() const2016 int Parcel::readFileDescriptor() const
2017 {
2018     const flat_binder_object* flat = readObject(true);
2019 
2020     if (flat && flat->hdr.type == BINDER_TYPE_FD) {
2021         return flat->handle;
2022     }
2023 
2024     return BAD_TYPE;
2025 }
2026 
readParcelFileDescriptor() const2027 int Parcel::readParcelFileDescriptor() const
2028 {
2029     int32_t hasComm = readInt32();
2030     int fd = readFileDescriptor();
2031     if (hasComm != 0) {
2032         // detach (owned by the binder driver)
2033         int comm = readFileDescriptor();
2034 
2035         // warning: this must be kept in sync with:
2036         // frameworks/base/core/java/android/os/ParcelFileDescriptor.java
2037         enum ParcelFileDescriptorStatus {
2038             DETACHED = 2,
2039         };
2040 
2041 #if BYTE_ORDER == BIG_ENDIAN
2042         const int32_t message = ParcelFileDescriptorStatus::DETACHED;
2043 #endif
2044 #if BYTE_ORDER == LITTLE_ENDIAN
2045         const int32_t message = __builtin_bswap32(ParcelFileDescriptorStatus::DETACHED);
2046 #endif
2047 
2048         ssize_t written = TEMP_FAILURE_RETRY(
2049             ::write(comm, &message, sizeof(message)));
2050 
2051         if (written == -1 || written != sizeof(message)) {
2052             ALOGW("Failed to detach ParcelFileDescriptor written: %zd err: %s",
2053                 written, strerror(errno));
2054             return BAD_TYPE;
2055         }
2056     }
2057     return fd;
2058 }
2059 
readUniqueFileDescriptor(base::unique_fd * val) const2060 status_t Parcel::readUniqueFileDescriptor(base::unique_fd* val) const
2061 {
2062     int got = readFileDescriptor();
2063 
2064     if (got == BAD_TYPE) {
2065         return BAD_TYPE;
2066     }
2067 
2068     val->reset(fcntl(got, F_DUPFD_CLOEXEC, 0));
2069 
2070     if (val->get() < 0) {
2071         return BAD_VALUE;
2072     }
2073 
2074     return OK;
2075 }
2076 
readUniqueParcelFileDescriptor(base::unique_fd * val) const2077 status_t Parcel::readUniqueParcelFileDescriptor(base::unique_fd* val) const
2078 {
2079     int got = readParcelFileDescriptor();
2080 
2081     if (got == BAD_TYPE) {
2082         return BAD_TYPE;
2083     }
2084 
2085     val->reset(fcntl(got, F_DUPFD_CLOEXEC, 0));
2086 
2087     if (val->get() < 0) {
2088         return BAD_VALUE;
2089     }
2090 
2091     return OK;
2092 }
2093 
readUniqueFileDescriptorVector(std::unique_ptr<std::vector<base::unique_fd>> * val) const2094 status_t Parcel::readUniqueFileDescriptorVector(std::unique_ptr<std::vector<base::unique_fd>>* val) const {
2095     return readNullableTypedVector(val, &Parcel::readUniqueFileDescriptor);
2096 }
2097 
readUniqueFileDescriptorVector(std::vector<base::unique_fd> * val) const2098 status_t Parcel::readUniqueFileDescriptorVector(std::vector<base::unique_fd>* val) const {
2099     return readTypedVector(val, &Parcel::readUniqueFileDescriptor);
2100 }
2101 
readBlob(size_t len,ReadableBlob * outBlob) const2102 status_t Parcel::readBlob(size_t len, ReadableBlob* outBlob) const
2103 {
2104     int32_t blobType;
2105     status_t status = readInt32(&blobType);
2106     if (status) return status;
2107 
2108     if (blobType == BLOB_INPLACE) {
2109         ALOGV("readBlob: read in place");
2110         const void* ptr = readInplace(len);
2111         if (!ptr) return BAD_VALUE;
2112 
2113         outBlob->init(-1, const_cast<void*>(ptr), len, false);
2114         return NO_ERROR;
2115     }
2116 
2117     ALOGV("readBlob: read from ashmem");
2118     bool isMutable = (blobType == BLOB_ASHMEM_MUTABLE);
2119     int fd = readFileDescriptor();
2120     if (fd == int(BAD_TYPE)) return BAD_VALUE;
2121 
2122     if (!ashmem_valid(fd)) {
2123         ALOGE("invalid fd");
2124         return BAD_VALUE;
2125     }
2126     int size = ashmem_get_size_region(fd);
2127     if (size < 0 || size_t(size) < len) {
2128         ALOGE("request size %zu does not match fd size %d", len, size);
2129         return BAD_VALUE;
2130     }
2131     void* ptr = ::mmap(nullptr, len, isMutable ? PROT_READ | PROT_WRITE : PROT_READ,
2132             MAP_SHARED, fd, 0);
2133     if (ptr == MAP_FAILED) return NO_MEMORY;
2134 
2135     outBlob->init(fd, ptr, len, isMutable);
2136     return NO_ERROR;
2137 }
2138 
read(FlattenableHelperInterface & val) const2139 status_t Parcel::read(FlattenableHelperInterface& val) const
2140 {
2141     // size
2142     const size_t len = this->readInt32();
2143     const size_t fd_count = this->readInt32();
2144 
2145     if ((len > INT32_MAX) || (fd_count >= gMaxFds)) {
2146         // don't accept size_t values which may have come from an
2147         // inadvertent conversion from a negative int.
2148         return BAD_VALUE;
2149     }
2150 
2151     // payload
2152     void const* const buf = this->readInplace(pad_size(len));
2153     if (buf == nullptr)
2154         return BAD_VALUE;
2155 
2156     int* fds = nullptr;
2157     if (fd_count) {
2158         fds = new (std::nothrow) int[fd_count];
2159         if (fds == nullptr) {
2160             ALOGE("read: failed to allocate requested %zu fds", fd_count);
2161             return BAD_VALUE;
2162         }
2163     }
2164 
2165     status_t err = NO_ERROR;
2166     for (size_t i=0 ; i<fd_count && err==NO_ERROR ; i++) {
2167         int fd = this->readFileDescriptor();
2168         if (fd < 0 || ((fds[i] = fcntl(fd, F_DUPFD_CLOEXEC, 0)) < 0)) {
2169             err = BAD_VALUE;
2170             ALOGE("fcntl(F_DUPFD_CLOEXEC) failed in Parcel::read, i is %zu, fds[i] is %d, fd_count is %zu, error: %s",
2171                   i, fds[i], fd_count, strerror(fd < 0 ? -fd : errno));
2172             // Close all the file descriptors that were dup-ed.
2173             for (size_t j=0; j<i ;j++) {
2174                 close(fds[j]);
2175             }
2176         }
2177     }
2178 
2179     if (err == NO_ERROR) {
2180         err = val.unflatten(buf, len, fds, fd_count);
2181     }
2182 
2183     if (fd_count) {
2184         delete [] fds;
2185     }
2186 
2187     return err;
2188 }
readObject(bool nullMetaData) const2189 const flat_binder_object* Parcel::readObject(bool nullMetaData) const
2190 {
2191     const size_t DPOS = mDataPos;
2192     if ((DPOS+sizeof(flat_binder_object)) <= mDataSize) {
2193         const flat_binder_object* obj
2194                 = reinterpret_cast<const flat_binder_object*>(mData+DPOS);
2195         mDataPos = DPOS + sizeof(flat_binder_object);
2196         if (!nullMetaData && (obj->cookie == 0 && obj->binder == 0)) {
2197             // When transferring a NULL object, we don't write it into
2198             // the object list, so we don't want to check for it when
2199             // reading.
2200             ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos);
2201             return obj;
2202         }
2203 
2204         // Ensure that this object is valid...
2205         binder_size_t* const OBJS = mObjects;
2206         const size_t N = mObjectsSize;
2207         size_t opos = mNextObjectHint;
2208 
2209         if (N > 0) {
2210             ALOGV("Parcel %p looking for obj at %zu, hint=%zu",
2211                  this, DPOS, opos);
2212 
2213             // Start at the current hint position, looking for an object at
2214             // the current data position.
2215             if (opos < N) {
2216                 while (opos < (N-1) && OBJS[opos] < DPOS) {
2217                     opos++;
2218                 }
2219             } else {
2220                 opos = N-1;
2221             }
2222             if (OBJS[opos] == DPOS) {
2223                 // Found it!
2224                 ALOGV("Parcel %p found obj %zu at index %zu with forward search",
2225                      this, DPOS, opos);
2226                 mNextObjectHint = opos+1;
2227                 ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos);
2228                 return obj;
2229             }
2230 
2231             // Look backwards for it...
2232             while (opos > 0 && OBJS[opos] > DPOS) {
2233                 opos--;
2234             }
2235             if (OBJS[opos] == DPOS) {
2236                 // Found it!
2237                 ALOGV("Parcel %p found obj %zu at index %zu with backward search",
2238                      this, DPOS, opos);
2239                 mNextObjectHint = opos+1;
2240                 ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos);
2241                 return obj;
2242             }
2243         }
2244         ALOGW("Attempt to read object from Parcel %p at offset %zu that is not in the object list",
2245              this, DPOS);
2246     }
2247     return nullptr;
2248 }
2249 
closeFileDescriptors()2250 void Parcel::closeFileDescriptors()
2251 {
2252     size_t i = mObjectsSize;
2253     if (i > 0) {
2254         //ALOGI("Closing file descriptors for %zu objects...", i);
2255     }
2256     while (i > 0) {
2257         i--;
2258         const flat_binder_object* flat
2259             = reinterpret_cast<flat_binder_object*>(mData+mObjects[i]);
2260         if (flat->hdr.type == BINDER_TYPE_FD) {
2261             //ALOGI("Closing fd: %ld", flat->handle);
2262             close(flat->handle);
2263         }
2264     }
2265 }
2266 
ipcData() const2267 uintptr_t Parcel::ipcData() const
2268 {
2269     return reinterpret_cast<uintptr_t>(mData);
2270 }
2271 
ipcDataSize() const2272 size_t Parcel::ipcDataSize() const
2273 {
2274     return (mDataSize > mDataPos ? mDataSize : mDataPos);
2275 }
2276 
ipcObjects() const2277 uintptr_t Parcel::ipcObjects() const
2278 {
2279     return reinterpret_cast<uintptr_t>(mObjects);
2280 }
2281 
ipcObjectsCount() const2282 size_t Parcel::ipcObjectsCount() const
2283 {
2284     return mObjectsSize;
2285 }
2286 
ipcSetDataReference(const uint8_t * data,size_t dataSize,const binder_size_t * objects,size_t objectsCount,release_func relFunc,void * relCookie)2287 void Parcel::ipcSetDataReference(const uint8_t* data, size_t dataSize,
2288     const binder_size_t* objects, size_t objectsCount, release_func relFunc, void* relCookie)
2289 {
2290     binder_size_t minOffset = 0;
2291     freeDataNoInit();
2292     mError = NO_ERROR;
2293     mData = const_cast<uint8_t*>(data);
2294     mDataSize = mDataCapacity = dataSize;
2295     //ALOGI("setDataReference Setting data size of %p to %lu (pid=%d)", this, mDataSize, getpid());
2296     mDataPos = 0;
2297     ALOGV("setDataReference Setting data pos of %p to %zu", this, mDataPos);
2298     mObjects = const_cast<binder_size_t*>(objects);
2299     mObjectsSize = mObjectsCapacity = objectsCount;
2300     mNextObjectHint = 0;
2301     mObjectsSorted = false;
2302     mOwner = relFunc;
2303     mOwnerCookie = relCookie;
2304     for (size_t i = 0; i < mObjectsSize; i++) {
2305         binder_size_t offset = mObjects[i];
2306         if (offset < minOffset) {
2307             ALOGE("%s: bad object offset %" PRIu64 " < %" PRIu64 "\n",
2308                   __func__, (uint64_t)offset, (uint64_t)minOffset);
2309             mObjectsSize = 0;
2310             break;
2311         }
2312         const flat_binder_object* flat
2313             = reinterpret_cast<const flat_binder_object*>(mData + offset);
2314         uint32_t type = flat->hdr.type;
2315         if (!(type == BINDER_TYPE_BINDER || type == BINDER_TYPE_HANDLE ||
2316               type == BINDER_TYPE_FD)) {
2317             // We should never receive other types (eg BINDER_TYPE_FDA) as long as we don't support
2318             // them in libbinder. If we do receive them, it probably means a kernel bug; try to
2319             // recover gracefully by clearing out the objects, and releasing the objects we do
2320             // know about.
2321             android_errorWriteLog(0x534e4554, "135930648");
2322             ALOGE("%s: unsupported type object (%" PRIu32 ") at offset %" PRIu64 "\n",
2323                   __func__, type, (uint64_t)offset);
2324             releaseObjects();
2325             mObjectsSize = 0;
2326             break;
2327         }
2328         minOffset = offset + sizeof(flat_binder_object);
2329     }
2330     scanForFds();
2331 }
2332 
print(TextOutput & to,uint32_t) const2333 void Parcel::print(TextOutput& to, uint32_t /*flags*/) const
2334 {
2335     to << "Parcel(";
2336 
2337     if (errorCheck() != NO_ERROR) {
2338         const status_t err = errorCheck();
2339         to << "Error: " << (void*)(intptr_t)err << " \"" << strerror(-err) << "\"";
2340     } else if (dataSize() > 0) {
2341         const uint8_t* DATA = data();
2342         to << indent << HexDump(DATA, dataSize()) << dedent;
2343         const binder_size_t* OBJS = mObjects;
2344         const size_t N = objectsCount();
2345         for (size_t i=0; i<N; i++) {
2346             const flat_binder_object* flat
2347                 = reinterpret_cast<const flat_binder_object*>(DATA+OBJS[i]);
2348             to << endl << "Object #" << i << " @ " << (void*)OBJS[i] << ": "
2349                 << TypeCode(flat->hdr.type & 0x7f7f7f00)
2350                 << " = " << flat->binder;
2351         }
2352     } else {
2353         to << "NULL";
2354     }
2355 
2356     to << ")";
2357 }
2358 
releaseObjects()2359 void Parcel::releaseObjects()
2360 {
2361     size_t i = mObjectsSize;
2362     if (i == 0) {
2363         return;
2364     }
2365     sp<ProcessState> proc(ProcessState::self());
2366     uint8_t* const data = mData;
2367     binder_size_t* const objects = mObjects;
2368     while (i > 0) {
2369         i--;
2370         const flat_binder_object* flat
2371             = reinterpret_cast<flat_binder_object*>(data+objects[i]);
2372         release_object(proc, *flat, this, &mOpenAshmemSize);
2373     }
2374 }
2375 
acquireObjects()2376 void Parcel::acquireObjects()
2377 {
2378     size_t i = mObjectsSize;
2379     if (i == 0) {
2380         return;
2381     }
2382     const sp<ProcessState> proc(ProcessState::self());
2383     uint8_t* const data = mData;
2384     binder_size_t* const objects = mObjects;
2385     while (i > 0) {
2386         i--;
2387         const flat_binder_object* flat
2388             = reinterpret_cast<flat_binder_object*>(data+objects[i]);
2389         acquire_object(proc, *flat, this, &mOpenAshmemSize);
2390     }
2391 }
2392 
freeData()2393 void Parcel::freeData()
2394 {
2395     freeDataNoInit();
2396     initState();
2397 }
2398 
freeDataNoInit()2399 void Parcel::freeDataNoInit()
2400 {
2401     if (mOwner) {
2402         LOG_ALLOC("Parcel %p: freeing other owner data", this);
2403         //ALOGI("Freeing data ref of %p (pid=%d)", this, getpid());
2404         mOwner(this, mData, mDataSize, mObjects, mObjectsSize, mOwnerCookie);
2405     } else {
2406         LOG_ALLOC("Parcel %p: freeing allocated data", this);
2407         releaseObjects();
2408         if (mData) {
2409             LOG_ALLOC("Parcel %p: freeing with %zu capacity", this, mDataCapacity);
2410             pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
2411             if (mDataCapacity <= gParcelGlobalAllocSize) {
2412               gParcelGlobalAllocSize = gParcelGlobalAllocSize - mDataCapacity;
2413             } else {
2414               gParcelGlobalAllocSize = 0;
2415             }
2416             if (gParcelGlobalAllocCount > 0) {
2417               gParcelGlobalAllocCount--;
2418             }
2419             pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
2420             free(mData);
2421         }
2422         if (mObjects) free(mObjects);
2423     }
2424 }
2425 
growData(size_t len)2426 status_t Parcel::growData(size_t len)
2427 {
2428     if (len > INT32_MAX) {
2429         // don't accept size_t values which may have come from an
2430         // inadvertent conversion from a negative int.
2431         return BAD_VALUE;
2432     }
2433 
2434     if (len > SIZE_MAX - mDataSize) return NO_MEMORY; // overflow
2435     if (mDataSize + len > SIZE_MAX / 3) return NO_MEMORY; // overflow
2436     size_t newSize = ((mDataSize+len)*3)/2;
2437     return (newSize <= mDataSize)
2438             ? (status_t) NO_MEMORY
2439             : continueWrite(newSize);
2440 }
2441 
restartWrite(size_t desired)2442 status_t Parcel::restartWrite(size_t desired)
2443 {
2444     if (desired > INT32_MAX) {
2445         // don't accept size_t values which may have come from an
2446         // inadvertent conversion from a negative int.
2447         return BAD_VALUE;
2448     }
2449 
2450     if (mOwner) {
2451         freeData();
2452         return continueWrite(desired);
2453     }
2454 
2455     uint8_t* data = (uint8_t*)realloc(mData, desired);
2456     if (!data && desired > mDataCapacity) {
2457         mError = NO_MEMORY;
2458         return NO_MEMORY;
2459     }
2460 
2461     releaseObjects();
2462 
2463     if (data) {
2464         LOG_ALLOC("Parcel %p: restart from %zu to %zu capacity", this, mDataCapacity, desired);
2465         pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
2466         gParcelGlobalAllocSize += desired;
2467         gParcelGlobalAllocSize -= mDataCapacity;
2468         if (!mData) {
2469             gParcelGlobalAllocCount++;
2470         }
2471         pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
2472         mData = data;
2473         mDataCapacity = desired;
2474     }
2475 
2476     mDataSize = mDataPos = 0;
2477     ALOGV("restartWrite Setting data size of %p to %zu", this, mDataSize);
2478     ALOGV("restartWrite Setting data pos of %p to %zu", this, mDataPos);
2479 
2480     free(mObjects);
2481     mObjects = nullptr;
2482     mObjectsSize = mObjectsCapacity = 0;
2483     mNextObjectHint = 0;
2484     mObjectsSorted = false;
2485     mHasFds = false;
2486     mFdsKnown = true;
2487     mAllowFds = true;
2488 
2489     return NO_ERROR;
2490 }
2491 
continueWrite(size_t desired)2492 status_t Parcel::continueWrite(size_t desired)
2493 {
2494     if (desired > INT32_MAX) {
2495         // don't accept size_t values which may have come from an
2496         // inadvertent conversion from a negative int.
2497         return BAD_VALUE;
2498     }
2499 
2500     // If shrinking, first adjust for any objects that appear
2501     // after the new data size.
2502     size_t objectsSize = mObjectsSize;
2503     if (desired < mDataSize) {
2504         if (desired == 0) {
2505             objectsSize = 0;
2506         } else {
2507             while (objectsSize > 0) {
2508                 if (mObjects[objectsSize-1] < desired)
2509                     break;
2510                 objectsSize--;
2511             }
2512         }
2513     }
2514 
2515     if (mOwner) {
2516         // If the size is going to zero, just release the owner's data.
2517         if (desired == 0) {
2518             freeData();
2519             return NO_ERROR;
2520         }
2521 
2522         // If there is a different owner, we need to take
2523         // posession.
2524         uint8_t* data = (uint8_t*)malloc(desired);
2525         if (!data) {
2526             mError = NO_MEMORY;
2527             return NO_MEMORY;
2528         }
2529         binder_size_t* objects = nullptr;
2530 
2531         if (objectsSize) {
2532             objects = (binder_size_t*)calloc(objectsSize, sizeof(binder_size_t));
2533             if (!objects) {
2534                 free(data);
2535 
2536                 mError = NO_MEMORY;
2537                 return NO_MEMORY;
2538             }
2539 
2540             // Little hack to only acquire references on objects
2541             // we will be keeping.
2542             size_t oldObjectsSize = mObjectsSize;
2543             mObjectsSize = objectsSize;
2544             acquireObjects();
2545             mObjectsSize = oldObjectsSize;
2546         }
2547 
2548         if (mData) {
2549             memcpy(data, mData, mDataSize < desired ? mDataSize : desired);
2550         }
2551         if (objects && mObjects) {
2552             memcpy(objects, mObjects, objectsSize*sizeof(binder_size_t));
2553         }
2554         //ALOGI("Freeing data ref of %p (pid=%d)", this, getpid());
2555         mOwner(this, mData, mDataSize, mObjects, mObjectsSize, mOwnerCookie);
2556         mOwner = nullptr;
2557 
2558         LOG_ALLOC("Parcel %p: taking ownership of %zu capacity", this, desired);
2559         pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
2560         gParcelGlobalAllocSize += desired;
2561         gParcelGlobalAllocCount++;
2562         pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
2563 
2564         mData = data;
2565         mObjects = objects;
2566         mDataSize = (mDataSize < desired) ? mDataSize : desired;
2567         ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize);
2568         mDataCapacity = desired;
2569         mObjectsSize = mObjectsCapacity = objectsSize;
2570         mNextObjectHint = 0;
2571         mObjectsSorted = false;
2572 
2573     } else if (mData) {
2574         if (objectsSize < mObjectsSize) {
2575             // Need to release refs on any objects we are dropping.
2576             const sp<ProcessState> proc(ProcessState::self());
2577             for (size_t i=objectsSize; i<mObjectsSize; i++) {
2578                 const flat_binder_object* flat
2579                     = reinterpret_cast<flat_binder_object*>(mData+mObjects[i]);
2580                 if (flat->hdr.type == BINDER_TYPE_FD) {
2581                     // will need to rescan because we may have lopped off the only FDs
2582                     mFdsKnown = false;
2583                 }
2584                 release_object(proc, *flat, this, &mOpenAshmemSize);
2585             }
2586 
2587             if (objectsSize == 0) {
2588                 free(mObjects);
2589                 mObjects = nullptr;
2590                 mObjectsCapacity = 0;
2591             } else {
2592                 binder_size_t* objects =
2593                     (binder_size_t*)realloc(mObjects, objectsSize*sizeof(binder_size_t));
2594                 if (objects) {
2595                     mObjects = objects;
2596                     mObjectsCapacity = objectsSize;
2597                 }
2598             }
2599             mObjectsSize = objectsSize;
2600             mNextObjectHint = 0;
2601             mObjectsSorted = false;
2602         }
2603 
2604         // We own the data, so we can just do a realloc().
2605         if (desired > mDataCapacity) {
2606             uint8_t* data = (uint8_t*)realloc(mData, desired);
2607             if (data) {
2608                 LOG_ALLOC("Parcel %p: continue from %zu to %zu capacity", this, mDataCapacity,
2609                         desired);
2610                 pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
2611                 gParcelGlobalAllocSize += desired;
2612                 gParcelGlobalAllocSize -= mDataCapacity;
2613                 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
2614                 mData = data;
2615                 mDataCapacity = desired;
2616             } else {
2617                 mError = NO_MEMORY;
2618                 return NO_MEMORY;
2619             }
2620         } else {
2621             if (mDataSize > desired) {
2622                 mDataSize = desired;
2623                 ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize);
2624             }
2625             if (mDataPos > desired) {
2626                 mDataPos = desired;
2627                 ALOGV("continueWrite Setting data pos of %p to %zu", this, mDataPos);
2628             }
2629         }
2630 
2631     } else {
2632         // This is the first data.  Easy!
2633         uint8_t* data = (uint8_t*)malloc(desired);
2634         if (!data) {
2635             mError = NO_MEMORY;
2636             return NO_MEMORY;
2637         }
2638 
2639         if(!(mDataCapacity == 0 && mObjects == nullptr
2640              && mObjectsCapacity == 0)) {
2641             ALOGE("continueWrite: %zu/%p/%zu/%zu", mDataCapacity, mObjects, mObjectsCapacity, desired);
2642         }
2643 
2644         LOG_ALLOC("Parcel %p: allocating with %zu capacity", this, desired);
2645         pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
2646         gParcelGlobalAllocSize += desired;
2647         gParcelGlobalAllocCount++;
2648         pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
2649 
2650         mData = data;
2651         mDataSize = mDataPos = 0;
2652         ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize);
2653         ALOGV("continueWrite Setting data pos of %p to %zu", this, mDataPos);
2654         mDataCapacity = desired;
2655     }
2656 
2657     return NO_ERROR;
2658 }
2659 
initState()2660 void Parcel::initState()
2661 {
2662     LOG_ALLOC("Parcel %p: initState", this);
2663     mError = NO_ERROR;
2664     mData = nullptr;
2665     mDataSize = 0;
2666     mDataCapacity = 0;
2667     mDataPos = 0;
2668     ALOGV("initState Setting data size of %p to %zu", this, mDataSize);
2669     ALOGV("initState Setting data pos of %p to %zu", this, mDataPos);
2670     mObjects = nullptr;
2671     mObjectsSize = 0;
2672     mObjectsCapacity = 0;
2673     mNextObjectHint = 0;
2674     mObjectsSorted = false;
2675     mHasFds = false;
2676     mFdsKnown = true;
2677     mAllowFds = true;
2678     mOwner = nullptr;
2679     mOpenAshmemSize = 0;
2680     mWorkSourceRequestHeaderPosition = 0;
2681     mRequestHeaderPresent = false;
2682 
2683     // racing multiple init leads only to multiple identical write
2684     if (gMaxFds == 0) {
2685         struct rlimit result;
2686         if (!getrlimit(RLIMIT_NOFILE, &result)) {
2687             gMaxFds = (size_t)result.rlim_cur;
2688             //ALOGI("parcel fd limit set to %zu", gMaxFds);
2689         } else {
2690             ALOGW("Unable to getrlimit: %s", strerror(errno));
2691             gMaxFds = 1024;
2692         }
2693     }
2694 }
2695 
scanForFds() const2696 void Parcel::scanForFds() const
2697 {
2698     bool hasFds = false;
2699     for (size_t i=0; i<mObjectsSize; i++) {
2700         const flat_binder_object* flat
2701             = reinterpret_cast<const flat_binder_object*>(mData + mObjects[i]);
2702         if (flat->hdr.type == BINDER_TYPE_FD) {
2703             hasFds = true;
2704             break;
2705         }
2706     }
2707     mHasFds = hasFds;
2708     mFdsKnown = true;
2709 }
2710 
getBlobAshmemSize() const2711 size_t Parcel::getBlobAshmemSize() const
2712 {
2713     // This used to return the size of all blobs that were written to ashmem, now we're returning
2714     // the ashmem currently referenced by this Parcel, which should be equivalent.
2715     // TODO: Remove method once ABI can be changed.
2716     return mOpenAshmemSize;
2717 }
2718 
getOpenAshmemSize() const2719 size_t Parcel::getOpenAshmemSize() const
2720 {
2721     return mOpenAshmemSize;
2722 }
2723 
2724 // --- Parcel::Blob ---
2725 
Blob()2726 Parcel::Blob::Blob() :
2727         mFd(-1), mData(nullptr), mSize(0), mMutable(false) {
2728 }
2729 
~Blob()2730 Parcel::Blob::~Blob() {
2731     release();
2732 }
2733 
release()2734 void Parcel::Blob::release() {
2735     if (mFd != -1 && mData) {
2736         ::munmap(mData, mSize);
2737     }
2738     clear();
2739 }
2740 
init(int fd,void * data,size_t size,bool isMutable)2741 void Parcel::Blob::init(int fd, void* data, size_t size, bool isMutable) {
2742     mFd = fd;
2743     mData = data;
2744     mSize = size;
2745     mMutable = isMutable;
2746 }
2747 
clear()2748 void Parcel::Blob::clear() {
2749     mFd = -1;
2750     mData = nullptr;
2751     mSize = 0;
2752     mMutable = false;
2753 }
2754 
2755 } // namespace android
2756