1 /*
2 * Copyright (C) 2005 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "hw-Parcel"
18 //#define LOG_NDEBUG 0
19
20 #include <errno.h>
21 #include <fcntl.h>
22 #include <inttypes.h>
23 #include <pthread.h>
24 #include <stdint.h>
25 #include <stdio.h>
26 #include <stdlib.h>
27 #include <sys/mman.h>
28 #include <sys/stat.h>
29 #include <sys/types.h>
30 #include <sys/resource.h>
31 #include <unistd.h>
32
33 #include <hwbinder/Binder.h>
34 #include <hwbinder/BpHwBinder.h>
35 #include <hwbinder/IPCThreadState.h>
36 #include <hwbinder/Parcel.h>
37 #include <hwbinder/ProcessState.h>
38 #include <hwbinder/TextOutput.h>
39 #include <hwbinder/binder_kernel.h>
40
41 #include <cutils/ashmem.h>
42 #include <utils/Debug.h>
43 #include <utils/Log.h>
44 #include <utils/misc.h>
45 #include <utils/String8.h>
46 #include <utils/String16.h>
47
48 #include <private/binder/binder_module.h>
49 #include <hwbinder/Static.h>
50
51 #ifndef INT32_MAX
52 #define INT32_MAX ((int32_t)(2147483647))
53 #endif
54
55 #define LOG_REFS(...)
56 //#define LOG_REFS(...) ALOG(LOG_DEBUG, LOG_TAG, __VA_ARGS__)
57 #define LOG_ALLOC(...)
58 //#define LOG_ALLOC(...) ALOG(LOG_DEBUG, LOG_TAG, __VA_ARGS__)
59 #define LOG_BUFFER(...)
60 // #define LOG_BUFFER(...) ALOG(LOG_DEBUG, LOG_TAG, __VA_ARGS__)
61
62 // ---------------------------------------------------------------------------
63
64 // This macro should never be used at runtime, as a too large value
65 // of s could cause an integer overflow. Instead, you should always
66 // use the wrapper function pad_size()
67 #define PAD_SIZE_UNSAFE(s) (((s)+3)&~3)
68
pad_size(size_t s)69 static size_t pad_size(size_t s) {
70 if (s > (SIZE_T_MAX - 3)) {
71 abort();
72 }
73 return PAD_SIZE_UNSAFE(s);
74 }
75
76 // Note: must be kept in sync with android/os/StrictMode.java's PENALTY_GATHER
77 #define STRICT_MODE_PENALTY_GATHER (0x40 << 16)
78
79 // XXX This can be made public if we want to provide
80 // support for typed data.
81 struct small_flat_data
82 {
83 uint32_t type;
84 uint32_t data;
85 };
86
87 namespace android {
88 namespace hardware {
89
90 static pthread_mutex_t gParcelGlobalAllocSizeLock = PTHREAD_MUTEX_INITIALIZER;
91 static size_t gParcelGlobalAllocSize = 0;
92 static size_t gParcelGlobalAllocCount = 0;
93
94 static size_t gMaxFds = 0;
95
96 static const size_t PARCEL_REF_CAP = 1024;
97
acquire_binder_object(const sp<ProcessState> & proc,const flat_binder_object & obj,const void * who)98 void acquire_binder_object(const sp<ProcessState>& proc,
99 const flat_binder_object& obj, const void* who)
100 {
101 switch (obj.type) {
102 case BINDER_TYPE_BINDER:
103 if (obj.binder) {
104 LOG_REFS("Parcel %p acquiring reference on local %p", who, obj.cookie);
105 reinterpret_cast<IBinder*>(obj.cookie)->incStrong(who);
106 }
107 return;
108 case BINDER_TYPE_WEAK_BINDER:
109 if (obj.binder)
110 reinterpret_cast<RefBase::weakref_type*>(obj.binder)->incWeak(who);
111 return;
112 case BINDER_TYPE_HANDLE: {
113 const sp<IBinder> b = proc->getStrongProxyForHandle(obj.handle);
114 if (b != NULL) {
115 LOG_REFS("Parcel %p acquiring reference on remote %p", who, b.get());
116 b->incStrong(who);
117 }
118 return;
119 }
120 case BINDER_TYPE_WEAK_HANDLE: {
121 const wp<IBinder> b = proc->getWeakProxyForHandle(obj.handle);
122 if (b != NULL) b.get_refs()->incWeak(who);
123 return;
124 }
125 }
126
127 ALOGD("Invalid object type 0x%08x", obj.type);
128 }
129
acquire_object(const sp<ProcessState> & proc,const binder_object_header & obj,const void * who)130 void acquire_object(const sp<ProcessState>& proc, const binder_object_header& obj,
131 const void *who) {
132 switch (obj.type) {
133 case BINDER_TYPE_BINDER:
134 case BINDER_TYPE_WEAK_BINDER:
135 case BINDER_TYPE_HANDLE:
136 case BINDER_TYPE_WEAK_HANDLE: {
137 const flat_binder_object& fbo = reinterpret_cast<const flat_binder_object&>(obj);
138 acquire_binder_object(proc, fbo, who);
139 break;
140 }
141 }
142 }
143
release_object(const sp<ProcessState> & proc,const flat_binder_object & obj,const void * who)144 void release_object(const sp<ProcessState>& proc,
145 const flat_binder_object& obj, const void* who)
146 {
147 switch (obj.type) {
148 case BINDER_TYPE_BINDER:
149 if (obj.binder) {
150 LOG_REFS("Parcel %p releasing reference on local %p", who, obj.cookie);
151 reinterpret_cast<IBinder*>(obj.cookie)->decStrong(who);
152 }
153 return;
154 case BINDER_TYPE_WEAK_BINDER:
155 if (obj.binder)
156 reinterpret_cast<RefBase::weakref_type*>(obj.binder)->decWeak(who);
157 return;
158 case BINDER_TYPE_HANDLE: {
159 const sp<IBinder> b = proc->getStrongProxyForHandle(obj.handle);
160 if (b != NULL) {
161 LOG_REFS("Parcel %p releasing reference on remote %p", who, b.get());
162 b->decStrong(who);
163 }
164 return;
165 }
166 case BINDER_TYPE_WEAK_HANDLE: {
167 const wp<IBinder> b = proc->getWeakProxyForHandle(obj.handle);
168 if (b != NULL) b.get_refs()->decWeak(who);
169 return;
170 }
171 case BINDER_TYPE_FD: {
172 if (obj.cookie != 0) { // owned
173 close(obj.handle);
174 }
175 return;
176 }
177 case BINDER_TYPE_PTR: {
178 // The relevant buffer is part of the transaction buffer and will be freed that way
179 return;
180 }
181 case BINDER_TYPE_FDA: {
182 // The enclosed file descriptors are closed in the kernel
183 return;
184 }
185 }
186
187 ALOGE("Invalid object type 0x%08x", obj.type);
188 }
189
finish_flatten_binder(const sp<IBinder> &,const flat_binder_object & flat,Parcel * out)190 inline static status_t finish_flatten_binder(
191 const sp<IBinder>& /*binder*/, const flat_binder_object& flat, Parcel* out)
192 {
193 return out->writeObject(flat);
194 }
195
flatten_binder(const sp<ProcessState> &,const sp<IBinder> & binder,Parcel * out)196 status_t flatten_binder(const sp<ProcessState>& /*proc*/,
197 const sp<IBinder>& binder, Parcel* out)
198 {
199 flat_binder_object obj;
200
201 if (binder != NULL) {
202 BHwBinder *local = binder->localBinder();
203 if (!local) {
204 BpHwBinder *proxy = binder->remoteBinder();
205 if (proxy == NULL) {
206 ALOGE("null proxy");
207 }
208 const int32_t handle = proxy ? proxy->handle() : 0;
209 obj.type = BINDER_TYPE_HANDLE;
210 obj.flags = FLAT_BINDER_FLAG_ACCEPTS_FDS;
211 obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */
212 obj.handle = handle;
213 obj.cookie = 0;
214 } else {
215 // Get policy and convert it
216 int policy = local->getMinSchedulingPolicy();
217 int priority = local->getMinSchedulingPriority();
218
219 obj.flags = priority & FLAT_BINDER_FLAG_PRIORITY_MASK;
220 obj.flags |= FLAT_BINDER_FLAG_ACCEPTS_FDS;
221 obj.flags |= (policy & 3) << FLAT_BINDER_FLAG_SCHEDPOLICY_SHIFT;
222 obj.type = BINDER_TYPE_BINDER;
223 obj.binder = reinterpret_cast<uintptr_t>(local->getWeakRefs());
224 obj.cookie = reinterpret_cast<uintptr_t>(local);
225 }
226 } else {
227 obj.type = BINDER_TYPE_BINDER;
228 obj.binder = 0;
229 obj.cookie = 0;
230 }
231
232 return finish_flatten_binder(binder, obj, out);
233 }
234
flatten_binder(const sp<ProcessState> &,const wp<IBinder> & binder,Parcel * out)235 status_t flatten_binder(const sp<ProcessState>& /*proc*/,
236 const wp<IBinder>& binder, Parcel* out)
237 {
238 flat_binder_object obj;
239
240 obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
241 if (binder != NULL) {
242 sp<IBinder> real = binder.promote();
243 if (real != NULL) {
244 IBinder *local = real->localBinder();
245 if (!local) {
246 BpHwBinder *proxy = real->remoteBinder();
247 if (proxy == NULL) {
248 ALOGE("null proxy");
249 }
250 const int32_t handle = proxy ? proxy->handle() : 0;
251 obj.type = BINDER_TYPE_WEAK_HANDLE;
252 obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */
253 obj.handle = handle;
254 obj.cookie = 0;
255 } else {
256 obj.type = BINDER_TYPE_WEAK_BINDER;
257 obj.binder = reinterpret_cast<uintptr_t>(binder.get_refs());
258 obj.cookie = reinterpret_cast<uintptr_t>(binder.unsafe_get());
259 }
260 return finish_flatten_binder(real, obj, out);
261 }
262
263 // XXX How to deal? In order to flatten the given binder,
264 // we need to probe it for information, which requires a primary
265 // reference... but we don't have one.
266 //
267 // The OpenBinder implementation uses a dynamic_cast<> here,
268 // but we can't do that with the different reference counting
269 // implementation we are using.
270 ALOGE("Unable to unflatten Binder weak reference!");
271 obj.type = BINDER_TYPE_BINDER;
272 obj.binder = 0;
273 obj.cookie = 0;
274 return finish_flatten_binder(NULL, obj, out);
275
276 } else {
277 obj.type = BINDER_TYPE_BINDER;
278 obj.binder = 0;
279 obj.cookie = 0;
280 return finish_flatten_binder(NULL, obj, out);
281 }
282 }
283
finish_unflatten_binder(BpHwBinder *,const flat_binder_object &,const Parcel &)284 inline static status_t finish_unflatten_binder(
285 BpHwBinder* /*proxy*/, const flat_binder_object& /*flat*/,
286 const Parcel& /*in*/)
287 {
288 return NO_ERROR;
289 }
290
unflatten_binder(const sp<ProcessState> & proc,const Parcel & in,sp<IBinder> * out)291 status_t unflatten_binder(const sp<ProcessState>& proc,
292 const Parcel& in, sp<IBinder>* out)
293 {
294 const flat_binder_object* flat = in.readObject<flat_binder_object>();
295
296 if (flat) {
297 switch (flat->type) {
298 case BINDER_TYPE_BINDER:
299 *out = reinterpret_cast<IBinder*>(flat->cookie);
300 return finish_unflatten_binder(NULL, *flat, in);
301 case BINDER_TYPE_HANDLE:
302 *out = proc->getStrongProxyForHandle(flat->handle);
303 return finish_unflatten_binder(
304 static_cast<BpHwBinder*>(out->get()), *flat, in);
305 }
306 }
307 return BAD_TYPE;
308 }
309
unflatten_binder(const sp<ProcessState> & proc,const Parcel & in,wp<IBinder> * out)310 status_t unflatten_binder(const sp<ProcessState>& proc,
311 const Parcel& in, wp<IBinder>* out)
312 {
313 const flat_binder_object* flat = in.readObject<flat_binder_object>();
314
315 if (flat) {
316 switch (flat->type) {
317 case BINDER_TYPE_BINDER:
318 *out = reinterpret_cast<IBinder*>(flat->cookie);
319 return finish_unflatten_binder(NULL, *flat, in);
320 case BINDER_TYPE_WEAK_BINDER:
321 if (flat->binder != 0) {
322 out->set_object_and_refs(
323 reinterpret_cast<IBinder*>(flat->cookie),
324 reinterpret_cast<RefBase::weakref_type*>(flat->binder));
325 } else {
326 *out = NULL;
327 }
328 return finish_unflatten_binder(NULL, *flat, in);
329 case BINDER_TYPE_HANDLE:
330 case BINDER_TYPE_WEAK_HANDLE:
331 *out = proc->getWeakProxyForHandle(flat->handle);
332 return finish_unflatten_binder(
333 static_cast<BpHwBinder*>(out->unsafe_get()), *flat, in);
334 }
335 }
336 return BAD_TYPE;
337 }
338
339 /*
340 * Return true iff:
341 * 1. obj is indeed a binder_buffer_object (type is BINDER_TYPE_PTR), and
342 * 2. obj does NOT have the flag BINDER_BUFFER_REF (it is not a reference, but
343 * an actual buffer.)
344 */
isBuffer(const binder_buffer_object & obj)345 static inline bool isBuffer(const binder_buffer_object& obj) {
346 return obj.hdr.type == BINDER_TYPE_PTR
347 && (obj.flags & BINDER_BUFFER_REF) == 0;
348 }
349
350 // ---------------------------------------------------------------------------
351
Parcel()352 Parcel::Parcel()
353 {
354 LOG_ALLOC("Parcel %p: constructing", this);
355 initState();
356 }
357
~Parcel()358 Parcel::~Parcel()
359 {
360 freeDataNoInit();
361 LOG_ALLOC("Parcel %p: destroyed", this);
362 }
363
getGlobalAllocSize()364 size_t Parcel::getGlobalAllocSize() {
365 pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
366 size_t size = gParcelGlobalAllocSize;
367 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
368 return size;
369 }
370
getGlobalAllocCount()371 size_t Parcel::getGlobalAllocCount() {
372 pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
373 size_t count = gParcelGlobalAllocCount;
374 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
375 return count;
376 }
377
data() const378 const uint8_t* Parcel::data() const
379 {
380 return mData;
381 }
382
dataSize() const383 size_t Parcel::dataSize() const
384 {
385 return (mDataSize > mDataPos ? mDataSize : mDataPos);
386 }
387
dataAvail() const388 size_t Parcel::dataAvail() const
389 {
390 size_t result = dataSize() - dataPosition();
391 if (result > INT32_MAX) {
392 abort();
393 }
394 return result;
395 }
396
dataPosition() const397 size_t Parcel::dataPosition() const
398 {
399 return mDataPos;
400 }
401
dataCapacity() const402 size_t Parcel::dataCapacity() const
403 {
404 return mDataCapacity;
405 }
406
setDataSize(size_t size)407 status_t Parcel::setDataSize(size_t size)
408 {
409 if (size > INT32_MAX) {
410 // don't accept size_t values which may have come from an
411 // inadvertent conversion from a negative int.
412 return BAD_VALUE;
413 }
414
415 status_t err;
416 err = continueWrite(size);
417 if (err == NO_ERROR) {
418 mDataSize = size;
419 ALOGV("setDataSize Setting data size of %p to %zu", this, mDataSize);
420 }
421 return err;
422 }
423
setDataPosition(size_t pos) const424 void Parcel::setDataPosition(size_t pos) const
425 {
426 if (pos > INT32_MAX) {
427 // don't accept size_t values which may have come from an
428 // inadvertent conversion from a negative int.
429 abort();
430 }
431
432 mDataPos = pos;
433 mNextObjectHint = 0;
434 }
435
setDataCapacity(size_t size)436 status_t Parcel::setDataCapacity(size_t size)
437 {
438 if (size > INT32_MAX) {
439 // don't accept size_t values which may have come from an
440 // inadvertent conversion from a negative int.
441 return BAD_VALUE;
442 }
443
444 if (size > mDataCapacity) return continueWrite(size);
445 return NO_ERROR;
446 }
447
setData(const uint8_t * buffer,size_t len)448 status_t Parcel::setData(const uint8_t* buffer, size_t len)
449 {
450 if (len > INT32_MAX) {
451 // don't accept size_t values which may have come from an
452 // inadvertent conversion from a negative int.
453 return BAD_VALUE;
454 }
455
456 status_t err = restartWrite(len);
457 if (err == NO_ERROR) {
458 memcpy(const_cast<uint8_t*>(data()), buffer, len);
459 mDataSize = len;
460 mFdsKnown = false;
461 }
462 return err;
463 }
464
465 // Write RPC headers. (previously just the interface token)
writeInterfaceToken(const char * interface)466 status_t Parcel::writeInterfaceToken(const char* interface)
467 {
468 // currently the interface identification token is just its name as a string
469 return writeCString(interface);
470 }
471
enforceInterface(const char * interface) const472 bool Parcel::enforceInterface(const char* interface) const
473 {
474 const char* str = readCString();
475 if (strcmp(str, interface) == 0) {
476 return true;
477 } else {
478 ALOGW("**** enforceInterface() expected '%s' but read '%s'",
479 String8(interface).string(), String8(str).string());
480 return false;
481 }
482 }
483
objects() const484 const binder_size_t* Parcel::objects() const
485 {
486 return mObjects;
487 }
488
objectsCount() const489 size_t Parcel::objectsCount() const
490 {
491 return mObjectsSize;
492 }
493
errorCheck() const494 status_t Parcel::errorCheck() const
495 {
496 return mError;
497 }
498
setError(status_t err)499 void Parcel::setError(status_t err)
500 {
501 mError = err;
502 }
503
finishWrite(size_t len)504 status_t Parcel::finishWrite(size_t len)
505 {
506 if (len > INT32_MAX) {
507 // don't accept size_t values which may have come from an
508 // inadvertent conversion from a negative int.
509 return BAD_VALUE;
510 }
511
512 //printf("Finish write of %d\n", len);
513 mDataPos += len;
514 ALOGV("finishWrite Setting data pos of %p to %zu", this, mDataPos);
515 if (mDataPos > mDataSize) {
516 mDataSize = mDataPos;
517 ALOGV("finishWrite Setting data size of %p to %zu", this, mDataSize);
518 }
519 //printf("New pos=%d, size=%d\n", mDataPos, mDataSize);
520 return NO_ERROR;
521 }
522
writeUnpadded(const void * data,size_t len)523 status_t Parcel::writeUnpadded(const void* data, size_t len)
524 {
525 if (len > INT32_MAX) {
526 // don't accept size_t values which may have come from an
527 // inadvertent conversion from a negative int.
528 return BAD_VALUE;
529 }
530
531 size_t end = mDataPos + len;
532 if (end < mDataPos) {
533 // integer overflow
534 return BAD_VALUE;
535 }
536
537 if (end <= mDataCapacity) {
538 restart_write:
539 memcpy(mData+mDataPos, data, len);
540 return finishWrite(len);
541 }
542
543 status_t err = growData(len);
544 if (err == NO_ERROR) goto restart_write;
545 return err;
546 }
547
write(const void * data,size_t len)548 status_t Parcel::write(const void* data, size_t len)
549 {
550 if (len > INT32_MAX) {
551 // don't accept size_t values which may have come from an
552 // inadvertent conversion from a negative int.
553 return BAD_VALUE;
554 }
555
556 void* const d = writeInplace(len);
557 if (d) {
558 memcpy(d, data, len);
559 return NO_ERROR;
560 }
561 return mError;
562 }
563
writeInplace(size_t len)564 void* Parcel::writeInplace(size_t len)
565 {
566 if (len > INT32_MAX) {
567 // don't accept size_t values which may have come from an
568 // inadvertent conversion from a negative int.
569 return NULL;
570 }
571
572 const size_t padded = pad_size(len);
573
574 // sanity check for integer overflow
575 if (mDataPos+padded < mDataPos) {
576 return NULL;
577 }
578
579 if ((mDataPos+padded) <= mDataCapacity) {
580 restart_write:
581 //printf("Writing %ld bytes, padded to %ld\n", len, padded);
582 uint8_t* const data = mData+mDataPos;
583
584 // Need to pad at end?
585 if (padded != len) {
586 #if BYTE_ORDER == BIG_ENDIAN
587 static const uint32_t mask[4] = {
588 0x00000000, 0xffffff00, 0xffff0000, 0xff000000
589 };
590 #endif
591 #if BYTE_ORDER == LITTLE_ENDIAN
592 static const uint32_t mask[4] = {
593 0x00000000, 0x00ffffff, 0x0000ffff, 0x000000ff
594 };
595 #endif
596 //printf("Applying pad mask: %p to %p\n", (void*)mask[padded-len],
597 // *reinterpret_cast<void**>(data+padded-4));
598 *reinterpret_cast<uint32_t*>(data+padded-4) &= mask[padded-len];
599 }
600
601 finishWrite(padded);
602 return data;
603 }
604
605 status_t err = growData(padded);
606 if (err == NO_ERROR) goto restart_write;
607 return NULL;
608 }
609
writeInt8(int8_t val)610 status_t Parcel::writeInt8(int8_t val)
611 {
612 return write(&val, sizeof(val));
613 }
614
writeUint8(uint8_t val)615 status_t Parcel::writeUint8(uint8_t val)
616 {
617 return write(&val, sizeof(val));
618 }
619
writeInt16(int16_t val)620 status_t Parcel::writeInt16(int16_t val)
621 {
622 return write(&val, sizeof(val));
623 }
624
writeUint16(uint16_t val)625 status_t Parcel::writeUint16(uint16_t val)
626 {
627 return write(&val, sizeof(val));
628 }
629
writeInt32(int32_t val)630 status_t Parcel::writeInt32(int32_t val)
631 {
632 return writeAligned(val);
633 }
634
writeUint32(uint32_t val)635 status_t Parcel::writeUint32(uint32_t val)
636 {
637 return writeAligned(val);
638 }
639
writeBool(bool val)640 status_t Parcel::writeBool(bool val)
641 {
642 return writeInt8(int8_t(val));
643 }
writeInt64(int64_t val)644 status_t Parcel::writeInt64(int64_t val)
645 {
646 return writeAligned(val);
647 }
648
writeUint64(uint64_t val)649 status_t Parcel::writeUint64(uint64_t val)
650 {
651 return writeAligned(val);
652 }
653
writePointer(uintptr_t val)654 status_t Parcel::writePointer(uintptr_t val)
655 {
656 return writeAligned<binder_uintptr_t>(val);
657 }
658
writeFloat(float val)659 status_t Parcel::writeFloat(float val)
660 {
661 return writeAligned(val);
662 }
663
664 #if defined(__mips__) && defined(__mips_hard_float)
665
writeDouble(double val)666 status_t Parcel::writeDouble(double val)
667 {
668 union {
669 double d;
670 unsigned long long ll;
671 } u;
672 u.d = val;
673 return writeAligned(u.ll);
674 }
675
676 #else
677
writeDouble(double val)678 status_t Parcel::writeDouble(double val)
679 {
680 return writeAligned(val);
681 }
682
683 #endif
684
writeCString(const char * str)685 status_t Parcel::writeCString(const char* str)
686 {
687 return write(str, strlen(str)+1);
688 }
writeString16(const std::unique_ptr<String16> & str)689 status_t Parcel::writeString16(const std::unique_ptr<String16>& str)
690 {
691 if (!str) {
692 return writeInt32(-1);
693 }
694
695 return writeString16(*str);
696 }
697
writeString16(const String16 & str)698 status_t Parcel::writeString16(const String16& str)
699 {
700 return writeString16(str.string(), str.size());
701 }
702
writeString16(const char16_t * str,size_t len)703 status_t Parcel::writeString16(const char16_t* str, size_t len)
704 {
705 if (str == NULL) return writeInt32(-1);
706
707 status_t err = writeInt32(len);
708 if (err == NO_ERROR) {
709 len *= sizeof(char16_t);
710 uint8_t* data = (uint8_t*)writeInplace(len+sizeof(char16_t));
711 if (data) {
712 memcpy(data, str, len);
713 *reinterpret_cast<char16_t*>(data+len) = 0;
714 return NO_ERROR;
715 }
716 err = mError;
717 }
718 return err;
719 }
writeStrongBinder(const sp<IBinder> & val)720 status_t Parcel::writeStrongBinder(const sp<IBinder>& val)
721 {
722 return flatten_binder(ProcessState::self(), val, this);
723 }
724
writeWeakBinder(const wp<IBinder> & val)725 status_t Parcel::writeWeakBinder(const wp<IBinder>& val)
726 {
727 return flatten_binder(ProcessState::self(), val, this);
728 }
729
730 template <typename T>
writeObject(const T & val)731 status_t Parcel::writeObject(const T& val)
732 {
733 const bool enoughData = (mDataPos+sizeof(val)) <= mDataCapacity;
734 const bool enoughObjects = mObjectsSize < mObjectsCapacity;
735 if (enoughData && enoughObjects) {
736 restart_write:
737 *reinterpret_cast<T*>(mData+mDataPos) = val;
738
739 const binder_object_header* hdr = reinterpret_cast<binder_object_header*>(mData+mDataPos);
740 switch (hdr->type) {
741 case BINDER_TYPE_BINDER:
742 case BINDER_TYPE_WEAK_BINDER:
743 case BINDER_TYPE_HANDLE:
744 case BINDER_TYPE_WEAK_HANDLE: {
745 const flat_binder_object *fbo = reinterpret_cast<const flat_binder_object*>(hdr);
746 if (fbo->binder != 0) {
747 mObjects[mObjectsSize++] = mDataPos;
748 acquire_binder_object(ProcessState::self(), *fbo, this);
749 }
750 break;
751 }
752 case BINDER_TYPE_FD: {
753 const binder_fd_object *fd_obj = reinterpret_cast<const binder_fd_object*>(hdr);
754 // remember if it's a file descriptor
755 if (!mAllowFds) {
756 // fail before modifying our object index
757 return FDS_NOT_ALLOWED;
758 }
759 mHasFds = mFdsKnown = true;
760 mObjects[mObjectsSize++] = mDataPos;
761 break;
762 }
763 case BINDER_TYPE_FDA:
764 mObjects[mObjectsSize++] = mDataPos;
765 break;
766 case BINDER_TYPE_PTR: {
767 const binder_buffer_object *buffer_obj = reinterpret_cast<
768 const binder_buffer_object*>(hdr);
769 if ((void *)buffer_obj->buffer != nullptr) {
770 mObjects[mObjectsSize++] = mDataPos;
771 }
772 break;
773 }
774 default: {
775 ALOGE("writeObject: unknown type %d", hdr->type);
776 break;
777 }
778 }
779 return finishWrite(sizeof(val));
780 }
781
782 if (!enoughData) {
783 const status_t err = growData(sizeof(val));
784 if (err != NO_ERROR) return err;
785 }
786 if (!enoughObjects) {
787 size_t newSize = ((mObjectsSize+2)*3)/2;
788 if (newSize * sizeof(binder_size_t) < mObjectsSize) return NO_MEMORY; // overflow
789 binder_size_t* objects = (binder_size_t*)realloc(mObjects, newSize*sizeof(binder_size_t));
790 if (objects == NULL) return NO_MEMORY;
791 mObjects = objects;
792 mObjectsCapacity = newSize;
793 }
794
795 goto restart_write;
796 }
797
798 template status_t Parcel::writeObject<flat_binder_object>(const flat_binder_object& val);
799 template status_t Parcel::writeObject<binder_fd_object>(const binder_fd_object& val);
800 template status_t Parcel::writeObject<binder_buffer_object>(const binder_buffer_object& val);
801 template status_t Parcel::writeObject<binder_fd_array_object>(const binder_fd_array_object& val);
802
803
804 // TODO merge duplicated code in writeEmbeddedBuffer, writeEmbeddedReference, and writeEmbeddedNullReference
805 // TODO merge duplicated code in writeBuffer, writeReference, and writeNullReference
806
validateBufferChild(size_t child_buffer_handle,size_t child_offset) const807 bool Parcel::validateBufferChild(size_t child_buffer_handle,
808 size_t child_offset) const {
809 if (child_buffer_handle >= mObjectsSize)
810 return false;
811 binder_buffer_object *child = reinterpret_cast<binder_buffer_object*>
812 (mData + mObjects[child_buffer_handle]);
813 if (!isBuffer(*child) || child_offset > child->length) {
814 // Parent object not a buffer, or not large enough
815 LOG_BUFFER("writeEmbeddedReference found wierd child. "
816 "child_offset = %zu, child->length = %zu",
817 child_offset, (size_t)child->length);
818 return false;
819 }
820 return true;
821 }
822
validateBufferParent(size_t parent_buffer_handle,size_t parent_offset) const823 bool Parcel::validateBufferParent(size_t parent_buffer_handle,
824 size_t parent_offset) const {
825 if (parent_buffer_handle >= mObjectsSize)
826 return false;
827 binder_buffer_object *parent = reinterpret_cast<binder_buffer_object*>
828 (mData + mObjects[parent_buffer_handle]);
829 if (!isBuffer(*parent) ||
830 sizeof(binder_uintptr_t) > parent->length ||
831 parent_offset > parent->length - sizeof(binder_uintptr_t)) {
832 // Parent object not a buffer, or not large enough
833 return false;
834 }
835 return true;
836 }
writeEmbeddedBuffer(const void * buffer,size_t length,size_t * handle,size_t parent_buffer_handle,size_t parent_offset)837 status_t Parcel::writeEmbeddedBuffer(
838 const void *buffer, size_t length, size_t *handle,
839 size_t parent_buffer_handle, size_t parent_offset) {
840 LOG_BUFFER("writeEmbeddedBuffer(%p, %zu, parent = (%zu, %zu)) -> %zu",
841 buffer, length, parent_buffer_handle,
842 parent_offset, mObjectsSize);
843 binder_buffer_object obj;
844 obj.hdr.type = BINDER_TYPE_PTR;
845 obj.buffer = reinterpret_cast<binder_uintptr_t>(buffer);
846 obj.length = length;
847 obj.flags = BINDER_BUFFER_HAS_PARENT;
848 if(!validateBufferParent(parent_buffer_handle, parent_offset))
849 return BAD_VALUE;
850 obj.parent = parent_buffer_handle;
851 obj.parent_offset = parent_offset;
852 if (handle != nullptr) {
853 // We use an index into mObjects as a handle
854 *handle = mObjectsSize;
855 }
856 return writeObject(obj);
857 }
858
writeBuffer(const void * buffer,size_t length,size_t * handle)859 status_t Parcel::writeBuffer(const void *buffer, size_t length, size_t *handle)
860 {
861 LOG_BUFFER("writeBuffer(%p, %zu) -> %zu",
862 buffer, length, mObjectsSize);
863 binder_buffer_object obj;
864 obj.hdr.type = BINDER_TYPE_PTR;
865 obj.buffer = reinterpret_cast<binder_uintptr_t>(buffer);
866 obj.length = length;
867 obj.flags = 0;
868 if (handle != nullptr) {
869 // We use an index into mObjects as a handle
870 *handle = mObjectsSize;
871 }
872 return writeObject(obj);
873 }
874
incrementNumReferences()875 status_t Parcel::incrementNumReferences() {
876 ++mNumRef;
877 LOG_BUFFER("incrementNumReferences: %zu", mNumRef);
878 return mNumRef <= PARCEL_REF_CAP ? OK : NO_MEMORY;
879 }
880
writeReference(size_t * handle,size_t child_buffer_handle,size_t child_offset)881 status_t Parcel::writeReference(size_t *handle,
882 size_t child_buffer_handle, size_t child_offset) {
883 LOG_BUFFER("writeReference(child = (%zu, %zu)) -> %zu",
884 child_buffer_handle, child_offset,
885 mObjectsSize);
886 status_t status = incrementNumReferences();
887 if (status != OK)
888 return status;
889 binder_buffer_object obj;
890 obj.hdr.type = BINDER_TYPE_PTR;
891 obj.flags = BINDER_BUFFER_REF;
892 if (!validateBufferChild(child_buffer_handle, child_offset))
893 return BAD_VALUE;
894 obj.child = child_buffer_handle;
895 obj.child_offset = child_offset;
896 if (handle != nullptr)
897 // We use an index into mObjects as a handle
898 *handle = mObjectsSize;
899 return writeObject(obj);
900 }
901
902 /* Write an object that describes a pointer from parent to child.
903 * Output the handle of that object in the size_t *handle variable. */
writeEmbeddedReference(size_t * handle,size_t child_buffer_handle,size_t child_offset,size_t parent_buffer_handle,size_t parent_offset)904 status_t Parcel::writeEmbeddedReference(size_t *handle,
905 size_t child_buffer_handle, size_t child_offset,
906 size_t parent_buffer_handle, size_t parent_offset) {
907 LOG_BUFFER("writeEmbeddedReference(child = (%zu, %zu), parent = (%zu, %zu)) -> %zu",
908 child_buffer_handle, child_offset,
909 parent_buffer_handle, parent_offset,
910 mObjectsSize);
911 status_t status = incrementNumReferences();
912 if (status != OK)
913 return status;
914 binder_buffer_object obj;
915 obj.hdr.type = BINDER_TYPE_PTR;
916 obj.flags = BINDER_BUFFER_REF | BINDER_BUFFER_HAS_PARENT;
917 if (!validateBufferChild(child_buffer_handle, child_offset))
918 return BAD_VALUE;
919 obj.child = child_buffer_handle;
920 obj.child_offset = child_offset;
921 if(!validateBufferParent(parent_buffer_handle, parent_offset))
922 return BAD_VALUE;
923 obj.parent = parent_buffer_handle;
924 obj.parent_offset = parent_offset;
925 if (handle != nullptr) {
926 // We use an index into mObjects as a handle
927 *handle = mObjectsSize;
928 }
929 return writeObject(obj);
930 }
931
writeNullReference(size_t * handle)932 status_t Parcel::writeNullReference(size_t * handle) {
933 LOG_BUFFER("writeNullReference -> %zu", mObjectsSize);
934 status_t status = incrementNumReferences();
935 if (status != OK)
936 return status;
937 binder_buffer_object obj;
938 obj.hdr.type = BINDER_TYPE_PTR;
939 obj.flags = BINDER_BUFFER_REF;
940 if (handle != nullptr)
941 // We use an index into mObjects as a handle
942 *handle = mObjectsSize;
943 return writeObject(obj);
944 }
945
writeEmbeddedNullReference(size_t * handle,size_t parent_buffer_handle,size_t parent_offset)946 status_t Parcel::writeEmbeddedNullReference(size_t * handle,
947 size_t parent_buffer_handle, size_t parent_offset) {
948 LOG_BUFFER("writeEmbeddedNullReference(parent = (%zu, %zu)) -> %zu",
949 parent_buffer_handle,
950 parent_offset,
951 mObjectsSize);
952 status_t status = incrementNumReferences();
953 if (status != OK)
954 return status;
955 binder_buffer_object obj;
956 obj.hdr.type = BINDER_TYPE_PTR;
957 obj.flags = BINDER_BUFFER_REF | BINDER_BUFFER_HAS_PARENT;
958 // parent_buffer_handle and parent_offset needs to be checked.
959 if(!validateBufferParent(parent_buffer_handle, parent_offset))
960 return BAD_VALUE;
961 obj.parent = parent_buffer_handle;
962 obj.parent_offset = parent_offset;
963 if (handle != nullptr) {
964 // We use an index into mObjects as a handle
965 *handle = mObjectsSize;
966 }
967 return writeObject(obj);
968 }
969
clearCache() const970 void Parcel::clearCache() const {
971 LOG_BUFFER("clearing cache.");
972 mBufCachePos = 0;
973 mBufCache.clear();
974 }
975
updateCache() const976 void Parcel::updateCache() const {
977 if(mBufCachePos == mObjectsSize)
978 return;
979 LOG_BUFFER("updating cache from %zu to %zu", mBufCachePos, mObjectsSize);
980 for(size_t i = mBufCachePos; i < mObjectsSize; i++) {
981 binder_size_t dataPos = mObjects[i];
982 binder_buffer_object *obj =
983 reinterpret_cast<binder_buffer_object*>(mData+dataPos);
984 if(!isBuffer(*obj))
985 continue;
986 BufferInfo ifo;
987 ifo.index = i;
988 ifo.buffer = obj->buffer;
989 ifo.bufend = obj->buffer + obj->length;
990 mBufCache.push_back(ifo);
991 }
992 mBufCachePos = mObjectsSize;
993 }
994
995 /* O(n) (n=#buffers) to find a buffer that contains the given addr */
findBuffer(const void * ptr,size_t length,bool * found,size_t * handle,size_t * offset) const996 status_t Parcel::findBuffer(const void *ptr, size_t length, bool *found,
997 size_t *handle, size_t *offset) const {
998 if(found == nullptr)
999 return UNKNOWN_ERROR;
1000 updateCache();
1001 binder_uintptr_t ptrVal = reinterpret_cast<binder_uintptr_t>(ptr);
1002 // true if the pointer is in some buffer, but the length is too big
1003 // so that ptr + length doesn't fit into the buffer.
1004 bool suspectRejectBadPointer = false;
1005 LOG_BUFFER("findBuffer examining %zu objects.", mObjectsSize);
1006 for(auto entry = mBufCache.rbegin(); entry != mBufCache.rend(); ++entry ) {
1007 if(entry->buffer <= ptrVal && ptrVal < entry->bufend) {
1008 // might have found it.
1009 if(ptrVal + length <= entry->bufend) {
1010 *found = true;
1011 if(handle != nullptr) *handle = entry->index;
1012 if(offset != nullptr) *offset = ptrVal - entry->buffer;
1013 LOG_BUFFER(" findBuffer has a match at %zu!", entry->index);
1014 return OK;
1015 } else {
1016 suspectRejectBadPointer = true;
1017 }
1018 }
1019 }
1020 LOG_BUFFER("findBuffer did not find for ptr = %p.", ptr);
1021 *found = false;
1022 return suspectRejectBadPointer ? BAD_VALUE : OK;
1023 }
1024
1025 /* findBuffer with the assumption that ptr = .buffer (so it points to top
1026 * of the buffer, aka offset 0).
1027 * */
quickFindBuffer(const void * ptr,size_t * handle) const1028 status_t Parcel::quickFindBuffer(const void *ptr, size_t *handle) const {
1029 updateCache();
1030 binder_uintptr_t ptrVal = reinterpret_cast<binder_uintptr_t>(ptr);
1031 LOG_BUFFER("quickFindBuffer examining %zu objects.", mObjectsSize);
1032 for(auto entry = mBufCache.rbegin(); entry != mBufCache.rend(); ++entry ) {
1033 if(entry->buffer == ptrVal) {
1034 if(handle != nullptr) *handle = entry->index;
1035 return OK;
1036 }
1037 }
1038 LOG_BUFFER("quickFindBuffer did not find for ptr = %p.", ptr);
1039 return NO_INIT;
1040 }
1041
writeNativeHandleNoDup(const native_handle_t * handle,bool embedded,size_t parent_buffer_handle,size_t parent_offset)1042 status_t Parcel::writeNativeHandleNoDup(const native_handle_t *handle,
1043 bool embedded,
1044 size_t parent_buffer_handle,
1045 size_t parent_offset)
1046 {
1047 struct binder_fd_array_object fd_array;
1048 size_t buffer_handle;
1049 status_t status = OK;
1050 uint32_t flags = 0;
1051
1052 if (handle == nullptr) {
1053 status = writeUint64(0);
1054 return status;
1055 }
1056
1057 size_t native_handle_size = sizeof(native_handle_t)
1058 + handle->numFds * sizeof(int) + handle->numInts * sizeof(int);
1059 writeUint64(native_handle_size);
1060
1061 if (embedded) {
1062 status = writeEmbeddedBuffer((void*) handle,
1063 native_handle_size, &buffer_handle,
1064 parent_buffer_handle, parent_offset);
1065 } else {
1066 status = writeBuffer((void*) handle, native_handle_size, &buffer_handle);
1067 }
1068
1069 if (status != OK) {
1070 return status;
1071 }
1072
1073 fd_array.hdr.type = BINDER_TYPE_FDA;
1074 fd_array.num_fds = handle->numFds;
1075 fd_array.parent = buffer_handle;
1076 fd_array.parent_offset = offsetof(native_handle_t, data);
1077
1078 return writeObject(fd_array);
1079 }
1080
writeNativeHandleNoDup(const native_handle_t * handle)1081 status_t Parcel::writeNativeHandleNoDup(const native_handle_t *handle)
1082 {
1083 return writeNativeHandleNoDup(handle, false /* embedded */);
1084 }
1085
writeEmbeddedNativeHandle(const native_handle_t * handle,size_t parent_buffer_handle,size_t parent_offset)1086 status_t Parcel::writeEmbeddedNativeHandle(const native_handle_t *handle,
1087 size_t parent_buffer_handle,
1088 size_t parent_offset)
1089 {
1090 return writeNativeHandleNoDup(handle, true /* embedded */,
1091 parent_buffer_handle, parent_offset);
1092 }
1093
remove(size_t,size_t)1094 void Parcel::remove(size_t /*start*/, size_t /*amt*/)
1095 {
1096 LOG_ALWAYS_FATAL("Parcel::remove() not yet implemented!");
1097 }
1098
read(void * outData,size_t len) const1099 status_t Parcel::read(void* outData, size_t len) const
1100 {
1101 if (len > INT32_MAX) {
1102 // don't accept size_t values which may have come from an
1103 // inadvertent conversion from a negative int.
1104 return BAD_VALUE;
1105 }
1106
1107 if ((mDataPos+pad_size(len)) >= mDataPos && (mDataPos+pad_size(len)) <= mDataSize
1108 && len <= pad_size(len)) {
1109 memcpy(outData, mData+mDataPos, len);
1110 mDataPos += pad_size(len);
1111 ALOGV("read Setting data pos of %p to %zu", this, mDataPos);
1112 return NO_ERROR;
1113 }
1114 return NOT_ENOUGH_DATA;
1115 }
1116
readInplace(size_t len) const1117 const void* Parcel::readInplace(size_t len) const
1118 {
1119 if (len > INT32_MAX) {
1120 // don't accept size_t values which may have come from an
1121 // inadvertent conversion from a negative int.
1122 return NULL;
1123 }
1124
1125 if ((mDataPos+pad_size(len)) >= mDataPos && (mDataPos+pad_size(len)) <= mDataSize
1126 && len <= pad_size(len)) {
1127 const void* data = mData+mDataPos;
1128 mDataPos += pad_size(len);
1129 ALOGV("readInplace Setting data pos of %p to %zu", this, mDataPos);
1130 return data;
1131 }
1132 return NULL;
1133 }
1134
1135 template<class T>
readAligned(T * pArg) const1136 status_t Parcel::readAligned(T *pArg) const {
1137 COMPILE_TIME_ASSERT_FUNCTION_SCOPE(PAD_SIZE_UNSAFE(sizeof(T)) == sizeof(T));
1138
1139 if ((mDataPos+sizeof(T)) <= mDataSize) {
1140 const void* data = mData+mDataPos;
1141 mDataPos += sizeof(T);
1142 *pArg = *reinterpret_cast<const T*>(data);
1143 return NO_ERROR;
1144 } else {
1145 return NOT_ENOUGH_DATA;
1146 }
1147 }
1148
1149 template<class T>
readAligned() const1150 T Parcel::readAligned() const {
1151 T result;
1152 if (readAligned(&result) != NO_ERROR) {
1153 result = 0;
1154 }
1155
1156 return result;
1157 }
1158
1159 template<class T>
writeAligned(T val)1160 status_t Parcel::writeAligned(T val) {
1161 COMPILE_TIME_ASSERT_FUNCTION_SCOPE(PAD_SIZE_UNSAFE(sizeof(T)) == sizeof(T));
1162
1163 if ((mDataPos+sizeof(val)) <= mDataCapacity) {
1164 restart_write:
1165 *reinterpret_cast<T*>(mData+mDataPos) = val;
1166 return finishWrite(sizeof(val));
1167 }
1168
1169 status_t err = growData(sizeof(val));
1170 if (err == NO_ERROR) goto restart_write;
1171 return err;
1172 }
1173
readInt8(int8_t * pArg) const1174 status_t Parcel::readInt8(int8_t *pArg) const
1175 {
1176 return read(pArg, sizeof(*pArg));
1177 }
1178
readUint8(uint8_t * pArg) const1179 status_t Parcel::readUint8(uint8_t *pArg) const
1180 {
1181 return read(pArg, sizeof(*pArg));
1182 }
1183
readInt16(int16_t * pArg) const1184 status_t Parcel::readInt16(int16_t *pArg) const
1185 {
1186 return read(pArg, sizeof(*pArg));
1187 }
1188
readUint16(uint16_t * pArg) const1189 status_t Parcel::readUint16(uint16_t *pArg) const
1190 {
1191 return read(pArg, sizeof(*pArg));
1192 }
1193
readInt32(int32_t * pArg) const1194 status_t Parcel::readInt32(int32_t *pArg) const
1195 {
1196 return readAligned(pArg);
1197 }
1198
readInt32() const1199 int32_t Parcel::readInt32() const
1200 {
1201 return readAligned<int32_t>();
1202 }
1203
readUint32(uint32_t * pArg) const1204 status_t Parcel::readUint32(uint32_t *pArg) const
1205 {
1206 return readAligned(pArg);
1207 }
1208
readUint32() const1209 uint32_t Parcel::readUint32() const
1210 {
1211 return readAligned<uint32_t>();
1212 }
1213
readInt64(int64_t * pArg) const1214 status_t Parcel::readInt64(int64_t *pArg) const
1215 {
1216 return readAligned(pArg);
1217 }
1218
readInt64() const1219 int64_t Parcel::readInt64() const
1220 {
1221 return readAligned<int64_t>();
1222 }
1223
readUint64(uint64_t * pArg) const1224 status_t Parcel::readUint64(uint64_t *pArg) const
1225 {
1226 return readAligned(pArg);
1227 }
1228
readUint64() const1229 uint64_t Parcel::readUint64() const
1230 {
1231 return readAligned<uint64_t>();
1232 }
1233
readPointer(uintptr_t * pArg) const1234 status_t Parcel::readPointer(uintptr_t *pArg) const
1235 {
1236 status_t ret;
1237 binder_uintptr_t ptr;
1238 ret = readAligned(&ptr);
1239 if (!ret)
1240 *pArg = ptr;
1241 return ret;
1242 }
1243
readPointer() const1244 uintptr_t Parcel::readPointer() const
1245 {
1246 return readAligned<binder_uintptr_t>();
1247 }
1248
1249
readFloat(float * pArg) const1250 status_t Parcel::readFloat(float *pArg) const
1251 {
1252 return readAligned(pArg);
1253 }
1254
1255
readFloat() const1256 float Parcel::readFloat() const
1257 {
1258 return readAligned<float>();
1259 }
1260
1261 #if defined(__mips__) && defined(__mips_hard_float)
1262
readDouble(double * pArg) const1263 status_t Parcel::readDouble(double *pArg) const
1264 {
1265 union {
1266 double d;
1267 unsigned long long ll;
1268 } u;
1269 u.d = 0;
1270 status_t status;
1271 status = readAligned(&u.ll);
1272 *pArg = u.d;
1273 return status;
1274 }
1275
readDouble() const1276 double Parcel::readDouble() const
1277 {
1278 union {
1279 double d;
1280 unsigned long long ll;
1281 } u;
1282 u.ll = readAligned<unsigned long long>();
1283 return u.d;
1284 }
1285
1286 #else
1287
readDouble(double * pArg) const1288 status_t Parcel::readDouble(double *pArg) const
1289 {
1290 return readAligned(pArg);
1291 }
1292
readDouble() const1293 double Parcel::readDouble() const
1294 {
1295 return readAligned<double>();
1296 }
1297
1298 #endif
1299
readBool(bool * pArg) const1300 status_t Parcel::readBool(bool *pArg) const
1301 {
1302 int8_t tmp;
1303 status_t ret = readInt8(&tmp);
1304 *pArg = (tmp != 0);
1305 return ret;
1306 }
1307
readBool() const1308 bool Parcel::readBool() const
1309 {
1310 int8_t tmp;
1311 status_t err = readInt8(&tmp);
1312
1313 if (err != OK) {
1314 return 0;
1315 }
1316
1317 return tmp != 0;
1318 }
1319
readCString() const1320 const char* Parcel::readCString() const
1321 {
1322 const size_t avail = mDataSize-mDataPos;
1323 if (avail > 0) {
1324 const char* str = reinterpret_cast<const char*>(mData+mDataPos);
1325 // is the string's trailing NUL within the parcel's valid bounds?
1326 const char* eos = reinterpret_cast<const char*>(memchr(str, 0, avail));
1327 if (eos) {
1328 const size_t len = eos - str;
1329 mDataPos += pad_size(len+1);
1330 ALOGV("readCString Setting data pos of %p to %zu", this, mDataPos);
1331 return str;
1332 }
1333 }
1334 return NULL;
1335 }
readString16() const1336 String16 Parcel::readString16() const
1337 {
1338 size_t len;
1339 const char16_t* str = readString16Inplace(&len);
1340 if (str) return String16(str, len);
1341 ALOGE("Reading a NULL string not supported here.");
1342 return String16();
1343 }
1344
readString16(std::unique_ptr<String16> * pArg) const1345 status_t Parcel::readString16(std::unique_ptr<String16>* pArg) const
1346 {
1347 const int32_t start = dataPosition();
1348 int32_t size;
1349 status_t status = readInt32(&size);
1350 pArg->reset();
1351
1352 if (status != OK || size < 0) {
1353 return status;
1354 }
1355
1356 setDataPosition(start);
1357 pArg->reset(new (std::nothrow) String16());
1358
1359 status = readString16(pArg->get());
1360
1361 if (status != OK) {
1362 pArg->reset();
1363 }
1364
1365 return status;
1366 }
1367
readString16(String16 * pArg) const1368 status_t Parcel::readString16(String16* pArg) const
1369 {
1370 size_t len;
1371 const char16_t* str = readString16Inplace(&len);
1372 if (str) {
1373 pArg->setTo(str, len);
1374 return 0;
1375 } else {
1376 *pArg = String16();
1377 return UNEXPECTED_NULL;
1378 }
1379 }
1380
readString16Inplace(size_t * outLen) const1381 const char16_t* Parcel::readString16Inplace(size_t* outLen) const
1382 {
1383 int32_t size = readInt32();
1384 // watch for potential int overflow from size+1
1385 if (size >= 0 && size < INT32_MAX) {
1386 *outLen = size;
1387 const char16_t* str = (const char16_t*)readInplace((size+1)*sizeof(char16_t));
1388 if (str != NULL) {
1389 return str;
1390 }
1391 }
1392 *outLen = 0;
1393 return NULL;
1394 }
readStrongBinder(sp<IBinder> * val) const1395 status_t Parcel::readStrongBinder(sp<IBinder>* val) const
1396 {
1397 status_t status = readNullableStrongBinder(val);
1398 if (status == OK && !val->get()) {
1399 status = UNEXPECTED_NULL;
1400 }
1401 return status;
1402 }
1403
readNullableStrongBinder(sp<IBinder> * val) const1404 status_t Parcel::readNullableStrongBinder(sp<IBinder>* val) const
1405 {
1406 return unflatten_binder(ProcessState::self(), *this, val);
1407 }
1408
readStrongBinder() const1409 sp<IBinder> Parcel::readStrongBinder() const
1410 {
1411 sp<IBinder> val;
1412 // Note that a lot of code in Android reads binders by hand with this
1413 // method, and that code has historically been ok with getting nullptr
1414 // back (while ignoring error codes).
1415 readNullableStrongBinder(&val);
1416 return val;
1417 }
1418
readWeakBinder() const1419 wp<IBinder> Parcel::readWeakBinder() const
1420 {
1421 wp<IBinder> val;
1422 unflatten_binder(ProcessState::self(), *this, &val);
1423 return val;
1424 }
1425
1426 template<typename T>
readObject(size_t * objects_offset) const1427 const T* Parcel::readObject(size_t *objects_offset) const
1428 {
1429 const size_t DPOS = mDataPos;
1430 if (objects_offset != nullptr) {
1431 *objects_offset = 0;
1432 }
1433
1434 if ((DPOS+sizeof(T)) <= mDataSize) {
1435 const T* obj = reinterpret_cast<const T*>(mData+DPOS);
1436 mDataPos = DPOS + sizeof(T);
1437 const binder_object_header *hdr = reinterpret_cast<const binder_object_header*>(obj);
1438 switch (hdr->type) {
1439 case BINDER_TYPE_BINDER:
1440 case BINDER_TYPE_WEAK_BINDER:
1441 case BINDER_TYPE_HANDLE:
1442 case BINDER_TYPE_WEAK_HANDLE: {
1443 const flat_binder_object *flat_obj =
1444 reinterpret_cast<const flat_binder_object*>(hdr);
1445 if (flat_obj->cookie == 0 && flat_obj->binder == 0) {
1446 // When transferring a NULL binder object, we don't write it into
1447 // the object list, so we don't want to check for it when
1448 // reading.
1449 ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos);
1450 return obj;
1451 }
1452 break;
1453 }
1454 case BINDER_TYPE_FD:
1455 case BINDER_TYPE_FDA:
1456 // fd (-arrays) must always appear in the meta-data list (eg touched by the kernel)
1457 break;
1458 case BINDER_TYPE_PTR: {
1459 const binder_buffer_object *buffer_obj =
1460 reinterpret_cast<const binder_buffer_object*>(hdr);
1461 if ((void *)buffer_obj->buffer == nullptr) {
1462 // null pointers can be returned directly - they're not written in the
1463 // object list. All non-null buffers must appear in the objects list.
1464 return obj;
1465 }
1466 break;
1467 }
1468 }
1469 // Ensure that this object is valid...
1470 binder_size_t* const OBJS = mObjects;
1471 const size_t N = mObjectsSize;
1472 size_t opos = mNextObjectHint;
1473
1474 if (N > 0) {
1475 ALOGV("Parcel %p looking for obj at %zu, hint=%zu",
1476 this, DPOS, opos);
1477
1478 // Start at the current hint position, looking for an object at
1479 // the current data position.
1480 if (opos < N) {
1481 while (opos < (N-1) && OBJS[opos] < DPOS) {
1482 opos++;
1483 }
1484 } else {
1485 opos = N-1;
1486 }
1487 if (OBJS[opos] == DPOS) {
1488 // Found it!
1489 ALOGV("Parcel %p found obj %zu at index %zu with forward search",
1490 this, DPOS, opos);
1491 mNextObjectHint = opos+1;
1492 ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos);
1493 if (objects_offset != nullptr) {
1494 *objects_offset = opos;
1495 }
1496 return obj;
1497 }
1498
1499 // Look backwards for it...
1500 while (opos > 0 && OBJS[opos] > DPOS) {
1501 opos--;
1502 }
1503 if (OBJS[opos] == DPOS) {
1504 // Found it!
1505 ALOGV("Parcel %p found obj %zu at index %zu with backward search",
1506 this, DPOS, opos);
1507 mNextObjectHint = opos+1;
1508 ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos);
1509 if (objects_offset != nullptr) {
1510 *objects_offset = opos;
1511 }
1512 return obj;
1513 }
1514 }
1515 ALOGW("Attempt to read object from Parcel %p at offset %zu that is not in the object list",
1516 this, DPOS);
1517 }
1518 return NULL;
1519 }
1520
1521 template const flat_binder_object* Parcel::readObject<flat_binder_object>(size_t *objects_offset) const;
1522
1523 template const binder_fd_object* Parcel::readObject<binder_fd_object>(size_t *objects_offset) const;
1524
1525 template const binder_buffer_object* Parcel::readObject<binder_buffer_object>(size_t *objects_offset) const;
1526
1527 template const binder_fd_array_object* Parcel::readObject<binder_fd_array_object>(size_t *objects_offset) const;
1528
verifyBufferObject(const binder_buffer_object * buffer_obj,size_t size,uint32_t flags,size_t parent,size_t parentOffset) const1529 bool Parcel::verifyBufferObject(const binder_buffer_object *buffer_obj,
1530 size_t size, uint32_t flags, size_t parent,
1531 size_t parentOffset) const {
1532 if (buffer_obj->length != size) {
1533 ALOGE("Buffer length %" PRIu64 " does not match expected size %zu.",
1534 static_cast<uint64_t>(buffer_obj->length), size);
1535 return false;
1536 }
1537
1538 if (buffer_obj->flags != flags) {
1539 ALOGE("Buffer flags 0x%02X do not match expected flags 0x%02X.", buffer_obj->flags, flags);
1540 return false;
1541 }
1542
1543 if (flags & BINDER_BUFFER_HAS_PARENT) {
1544 if (buffer_obj->parent != parent) {
1545 ALOGE("Buffer parent %" PRIu64 " does not match expected parent %zu.",
1546 static_cast<uint64_t>(buffer_obj->parent), parent);
1547 return false;
1548 }
1549 if (buffer_obj->parent_offset != parentOffset) {
1550 ALOGE("Buffer parent offset %" PRIu64 " does not match expected offset %zu.",
1551 static_cast<uint64_t>(buffer_obj->parent_offset), parentOffset);
1552 return false;
1553 }
1554 }
1555
1556 return true;
1557 }
1558
readBuffer(size_t buffer_size,size_t * buffer_handle,uint32_t flags,size_t parent,size_t parentOffset,const void ** buffer_out) const1559 status_t Parcel::readBuffer(size_t buffer_size, size_t *buffer_handle,
1560 uint32_t flags, size_t parent, size_t parentOffset,
1561 const void **buffer_out) const {
1562
1563 status_t status = OK;
1564
1565 const binder_buffer_object* buffer_obj = readObject<binder_buffer_object>(buffer_handle);
1566
1567 if (buffer_obj == nullptr || !isBuffer(*buffer_obj)) {
1568 return BAD_VALUE;
1569 }
1570
1571 if (!verifyBufferObject(buffer_obj, buffer_size, flags, parent, parentOffset)) {
1572 return BAD_VALUE;
1573 }
1574
1575 // in read side, always use .buffer and .length.
1576 *buffer_out = reinterpret_cast<void*>(buffer_obj->buffer);
1577
1578 return OK;
1579 }
1580
readNullableBuffer(size_t buffer_size,size_t * buffer_handle,const void ** buffer_out) const1581 status_t Parcel::readNullableBuffer(size_t buffer_size, size_t *buffer_handle,
1582 const void **buffer_out) const
1583 {
1584 return readBuffer(buffer_size, buffer_handle,
1585 0 /* flags */, 0 /* parent */, 0 /* parentOffset */,
1586 buffer_out);
1587 }
1588
readBuffer(size_t buffer_size,size_t * buffer_handle,const void ** buffer_out) const1589 status_t Parcel::readBuffer(size_t buffer_size, size_t *buffer_handle,
1590 const void **buffer_out) const
1591 {
1592 status_t status = readNullableBuffer(buffer_size, buffer_handle, buffer_out);
1593 if (status == OK && *buffer_out == nullptr) {
1594 return UNEXPECTED_NULL;
1595 }
1596 return status;
1597 }
1598
1599
readEmbeddedBuffer(size_t buffer_size,size_t * buffer_handle,size_t parent_buffer_handle,size_t parent_offset,const void ** buffer_out) const1600 status_t Parcel::readEmbeddedBuffer(size_t buffer_size,
1601 size_t *buffer_handle,
1602 size_t parent_buffer_handle,
1603 size_t parent_offset,
1604 const void **buffer_out) const
1605 {
1606 status_t status = readNullableEmbeddedBuffer(buffer_size, buffer_handle,
1607 parent_buffer_handle,
1608 parent_offset, buffer_out);
1609 if (status == OK && *buffer_out == nullptr) {
1610 return UNEXPECTED_NULL;
1611 }
1612 return status;
1613 }
1614
readNullableEmbeddedBuffer(size_t buffer_size,size_t * buffer_handle,size_t parent_buffer_handle,size_t parent_offset,const void ** buffer_out) const1615 status_t Parcel::readNullableEmbeddedBuffer(size_t buffer_size,
1616 size_t *buffer_handle,
1617 size_t parent_buffer_handle,
1618 size_t parent_offset,
1619 const void **buffer_out) const
1620 {
1621 return readBuffer(buffer_size, buffer_handle, BINDER_BUFFER_HAS_PARENT,
1622 parent_buffer_handle, parent_offset, buffer_out);
1623 }
1624
1625 // isRef if corresponds to a writeReference call, else corresponds to a writeBuffer call.
1626 // see ::android::hardware::writeReferenceToParcel for details.
readReference(void const ** bufptr,size_t * buffer_handle,bool * isRef) const1627 status_t Parcel::readReference(void const* *bufptr,
1628 size_t *buffer_handle, bool *isRef) const
1629 {
1630 LOG_BUFFER("readReference");
1631 const binder_buffer_object* buffer_obj = readObject<binder_buffer_object>();
1632 LOG_BUFFER(" readReference: buf = %p, len = %zu, flags = %x",
1633 (void*)buffer_obj->buffer, (size_t)buffer_obj->length,
1634 (int)buffer_obj->flags);
1635 // TODO need verification here
1636 if (buffer_obj && buffer_obj->hdr.type == BINDER_TYPE_PTR) {
1637 if (buffer_handle != nullptr) {
1638 *buffer_handle = 0; // TODO fix this, as readBuffer would do
1639 }
1640 if(isRef != nullptr) {
1641 *isRef = (buffer_obj->flags & BINDER_BUFFER_REF) != 0;
1642 LOG_BUFFER(" readReference: isRef = %d", *isRef);
1643 }
1644 // in read side, always use .buffer and .length.
1645 if(bufptr != nullptr) {
1646 *bufptr = (void*)buffer_obj->buffer;
1647 }
1648 return OK;
1649 }
1650
1651 return BAD_VALUE;
1652 }
1653
1654 // isRef if corresponds to a writeEmbeddedReference call, else corresponds to a writeEmbeddedBuffer call.
1655 // see ::android::hardware::writeEmbeddedReferenceToParcel for details.
readEmbeddedReference(void const ** bufptr,size_t * buffer_handle,size_t,size_t,bool * isRef) const1656 status_t Parcel::readEmbeddedReference(void const* *bufptr,
1657 size_t *buffer_handle,
1658 size_t /* parent_buffer_handle */,
1659 size_t /* parent_offset */,
1660 bool *isRef) const
1661 {
1662 // TODO verify parent and offset
1663 LOG_BUFFER("readEmbeddedReference");
1664 return (readReference(bufptr, buffer_handle, isRef));
1665 }
1666
readEmbeddedNativeHandle(size_t parent_buffer_handle,size_t parent_offset,const native_handle_t ** handle) const1667 status_t Parcel::readEmbeddedNativeHandle(size_t parent_buffer_handle,
1668 size_t parent_offset,
1669 const native_handle_t **handle) const
1670 {
1671 status_t status = readNullableEmbeddedNativeHandle(parent_buffer_handle, parent_offset, handle);
1672 if (status == OK && *handle == nullptr) {
1673 return UNEXPECTED_NULL;
1674 }
1675 return status;
1676 }
1677
readNullableNativeHandleNoDup(const native_handle_t ** handle,bool embedded,size_t parent_buffer_handle,size_t parent_offset) const1678 status_t Parcel::readNullableNativeHandleNoDup(const native_handle_t **handle,
1679 bool embedded,
1680 size_t parent_buffer_handle,
1681 size_t parent_offset) const
1682 {
1683 status_t status;
1684 uint64_t nativeHandleSize;
1685 size_t fdaParent;
1686
1687 status = readUint64(&nativeHandleSize);
1688 if (status != OK || nativeHandleSize == 0) {
1689 *handle = nullptr;
1690 return status;
1691 }
1692
1693 if (nativeHandleSize < sizeof(native_handle_t)) {
1694 ALOGE("Received a native_handle_t size that was too small.");
1695 return BAD_VALUE;
1696 }
1697
1698 if (embedded) {
1699 status = readNullableEmbeddedBuffer(nativeHandleSize, &fdaParent,
1700 parent_buffer_handle, parent_offset,
1701 reinterpret_cast<const void**>(handle));
1702 } else {
1703 status = readNullableBuffer(nativeHandleSize, &fdaParent,
1704 reinterpret_cast<const void**>(handle));
1705 }
1706
1707 if (status != OK) {
1708 return status;
1709 }
1710
1711 const binder_fd_array_object* fd_array_obj = readObject<binder_fd_array_object>();
1712
1713 if (fd_array_obj == nullptr || fd_array_obj->hdr.type != BINDER_TYPE_FDA) {
1714 ALOGE("Can't find file-descriptor array object.");
1715 return BAD_VALUE;
1716 }
1717
1718 if (static_cast<int>(fd_array_obj->num_fds) != (*handle)->numFds) {
1719 ALOGE("Number of native handles does not match.");
1720 return BAD_VALUE;
1721 }
1722
1723 if (fd_array_obj->parent != fdaParent) {
1724 ALOGE("Parent handle of file-descriptor array not correct.");
1725 return BAD_VALUE;
1726 }
1727
1728 if (fd_array_obj->parent_offset != offsetof(native_handle_t, data)) {
1729 ALOGE("FD array object not properly offset in parent.");
1730 return BAD_VALUE;
1731 }
1732
1733 return OK;
1734 }
1735
readNullableEmbeddedNativeHandle(size_t parent_buffer_handle,size_t parent_offset,const native_handle_t ** handle) const1736 status_t Parcel::readNullableEmbeddedNativeHandle(size_t parent_buffer_handle,
1737 size_t parent_offset,
1738 const native_handle_t **handle) const
1739 {
1740 return readNullableNativeHandleNoDup(handle, true /* embedded */, parent_buffer_handle,
1741 parent_offset);
1742 }
1743
readNativeHandleNoDup(const native_handle_t ** handle) const1744 status_t Parcel::readNativeHandleNoDup(const native_handle_t **handle) const
1745 {
1746 status_t status = readNullableNativeHandleNoDup(handle);
1747 if (status == OK && *handle == nullptr) {
1748 return UNEXPECTED_NULL;
1749 }
1750 return status;
1751 }
1752
readNullableNativeHandleNoDup(const native_handle_t ** handle) const1753 status_t Parcel::readNullableNativeHandleNoDup(const native_handle_t **handle) const
1754 {
1755 return readNullableNativeHandleNoDup(handle, false /* embedded */);
1756 }
1757
closeFileDescriptors()1758 void Parcel::closeFileDescriptors()
1759 {
1760 size_t i = mObjectsSize;
1761 if (i > 0) {
1762 //ALOGI("Closing file descriptors for %zu objects...", i);
1763 }
1764 while (i > 0) {
1765 i--;
1766 const flat_binder_object* flat
1767 = reinterpret_cast<flat_binder_object*>(mData+mObjects[i]);
1768 if (flat->type == BINDER_TYPE_FD) {
1769 //ALOGI("Closing fd: %ld", flat->handle);
1770 close(flat->handle);
1771 }
1772 }
1773 }
1774
ipcData() const1775 uintptr_t Parcel::ipcData() const
1776 {
1777 return reinterpret_cast<uintptr_t>(mData);
1778 }
1779
ipcDataSize() const1780 size_t Parcel::ipcDataSize() const
1781 {
1782 return mDataSize > mDataPos ? mDataSize : mDataPos;
1783 }
1784
ipcObjects() const1785 uintptr_t Parcel::ipcObjects() const
1786 {
1787 return reinterpret_cast<uintptr_t>(mObjects);
1788 }
1789
ipcObjectsCount() const1790 size_t Parcel::ipcObjectsCount() const
1791 {
1792 return mObjectsSize;
1793 }
1794
1795 #define BUFFER_ALIGNMENT_BYTES 8
ipcBufferSize() const1796 size_t Parcel::ipcBufferSize() const
1797 {
1798 size_t totalBuffersSize = 0;
1799 // Add size for BINDER_TYPE_PTR
1800 size_t i = mObjectsSize;
1801 while (i > 0) {
1802 i--;
1803 const binder_buffer_object* buffer
1804 = reinterpret_cast<binder_buffer_object*>(mData+mObjects[i]);
1805 if (isBuffer(*buffer)) {
1806 /* The binder kernel driver requires each buffer to be 8-byte
1807 * aligned */
1808 size_t alignedSize = (buffer->length + (BUFFER_ALIGNMENT_BYTES - 1))
1809 & ~(BUFFER_ALIGNMENT_BYTES - 1);
1810 if (alignedSize > SIZE_MAX - totalBuffersSize) {
1811 ALOGE("ipcBuffersSize(): invalid buffer sizes.");
1812 return 0;
1813 }
1814 totalBuffersSize += alignedSize;
1815 }
1816 }
1817 return totalBuffersSize;
1818 }
1819
ipcSetDataReference(const uint8_t * data,size_t dataSize,const binder_size_t * objects,size_t objectsCount,release_func relFunc,void * relCookie)1820 void Parcel::ipcSetDataReference(const uint8_t* data, size_t dataSize,
1821 const binder_size_t* objects, size_t objectsCount, release_func relFunc, void* relCookie)
1822 {
1823 binder_size_t minOffset = 0;
1824 freeDataNoInit();
1825 mError = NO_ERROR;
1826 mData = const_cast<uint8_t*>(data);
1827 mDataSize = mDataCapacity = dataSize;
1828 //ALOGI("setDataReference Setting data size of %p to %lu (pid=%d)", this, mDataSize, getpid());
1829 mDataPos = 0;
1830 ALOGV("setDataReference Setting data pos of %p to %zu", this, mDataPos);
1831 mObjects = const_cast<binder_size_t*>(objects);
1832 mObjectsSize = mObjectsCapacity = objectsCount;
1833 mNextObjectHint = 0;
1834 clearCache();
1835 mNumRef = 0;
1836 mOwner = relFunc;
1837 mOwnerCookie = relCookie;
1838 for (size_t i = 0; i < mObjectsSize; i++) {
1839 binder_size_t offset = mObjects[i];
1840 if (offset < minOffset) {
1841 ALOGE("%s: bad object offset %" PRIu64 " < %" PRIu64 "\n",
1842 __func__, (uint64_t)offset, (uint64_t)minOffset);
1843 mObjectsSize = 0;
1844 break;
1845 }
1846 minOffset = offset + sizeof(flat_binder_object);
1847 }
1848 scanForFds();
1849 }
1850
print(TextOutput & to,uint32_t) const1851 void Parcel::print(TextOutput& to, uint32_t /*flags*/) const
1852 {
1853 to << "Parcel(";
1854
1855 if (errorCheck() != NO_ERROR) {
1856 const status_t err = errorCheck();
1857 to << "Error: " << (void*)(intptr_t)err << " \"" << strerror(-err) << "\"";
1858 } else if (dataSize() > 0) {
1859 const uint8_t* DATA = data();
1860 to << indent << HexDump(DATA, dataSize()) << dedent;
1861 const binder_size_t* OBJS = objects();
1862 const size_t N = objectsCount();
1863 for (size_t i=0; i<N; i++) {
1864 const flat_binder_object* flat
1865 = reinterpret_cast<const flat_binder_object*>(DATA+OBJS[i]);
1866 if (flat->type == BINDER_TYPE_PTR) {
1867 const binder_buffer_object* buffer
1868 = reinterpret_cast<const binder_buffer_object*>(DATA+OBJS[i]);
1869 if(isBuffer(*buffer)) {
1870 HexDump bufferDump((const uint8_t*)buffer->buffer, (size_t)buffer->length);
1871 bufferDump.setSingleLineCutoff(0);
1872 to << endl << "Object #" << i << " @ " << (void*)OBJS[i] << " (buffer size " << buffer->length << "):";
1873 to << indent << bufferDump << dedent;
1874 } else {
1875 to << endl << "Object #" << i << " @ " << (void*)OBJS[i];
1876 }
1877 } else {
1878 to << endl << "Object #" << i << " @ " << (void*)OBJS[i] << ": "
1879 << TypeCode(flat->type & 0x7f7f7f00)
1880 << " = " << flat->binder;
1881 }
1882 }
1883 } else {
1884 to << "NULL";
1885 }
1886
1887 to << ")";
1888 }
1889
releaseObjects()1890 void Parcel::releaseObjects()
1891 {
1892 const sp<ProcessState> proc(ProcessState::self());
1893 size_t i = mObjectsSize;
1894 uint8_t* const data = mData;
1895 binder_size_t* const objects = mObjects;
1896 while (i > 0) {
1897 i--;
1898 const flat_binder_object* flat
1899 = reinterpret_cast<flat_binder_object*>(data+objects[i]);
1900 release_object(proc, *flat, this);
1901 }
1902 }
1903
acquireObjects()1904 void Parcel::acquireObjects()
1905 {
1906 const sp<ProcessState> proc(ProcessState::self());
1907 size_t i = mObjectsSize;
1908 uint8_t* const data = mData;
1909 binder_size_t* const objects = mObjects;
1910 while (i > 0) {
1911 i--;
1912 const binder_object_header* flat
1913 = reinterpret_cast<binder_object_header*>(data+objects[i]);
1914 acquire_object(proc, *flat, this);
1915 }
1916 }
1917
freeData()1918 void Parcel::freeData()
1919 {
1920 freeDataNoInit();
1921 initState();
1922 }
1923
freeDataNoInit()1924 void Parcel::freeDataNoInit()
1925 {
1926 if (mOwner) {
1927 LOG_ALLOC("Parcel %p: freeing other owner data", this);
1928 //ALOGI("Freeing data ref of %p (pid=%d)", this, getpid());
1929 mOwner(this, mData, mDataSize, mObjects, mObjectsSize, mOwnerCookie);
1930 } else {
1931 LOG_ALLOC("Parcel %p: freeing allocated data", this);
1932 releaseObjects();
1933 if (mData) {
1934 LOG_ALLOC("Parcel %p: freeing with %zu capacity", this, mDataCapacity);
1935 pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
1936 if (mDataCapacity <= gParcelGlobalAllocSize) {
1937 gParcelGlobalAllocSize = gParcelGlobalAllocSize - mDataCapacity;
1938 } else {
1939 gParcelGlobalAllocSize = 0;
1940 }
1941 if (gParcelGlobalAllocCount > 0) {
1942 gParcelGlobalAllocCount--;
1943 }
1944 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
1945 free(mData);
1946 }
1947 if (mObjects) free(mObjects);
1948 }
1949 }
1950
growData(size_t len)1951 status_t Parcel::growData(size_t len)
1952 {
1953 if (len > INT32_MAX) {
1954 // don't accept size_t values which may have come from an
1955 // inadvertent conversion from a negative int.
1956 return BAD_VALUE;
1957 }
1958
1959 size_t newSize = ((mDataSize+len)*3)/2;
1960 return (newSize <= mDataSize)
1961 ? (status_t) NO_MEMORY
1962 : continueWrite(newSize);
1963 }
1964
restartWrite(size_t desired)1965 status_t Parcel::restartWrite(size_t desired)
1966 {
1967 if (desired > INT32_MAX) {
1968 // don't accept size_t values which may have come from an
1969 // inadvertent conversion from a negative int.
1970 return BAD_VALUE;
1971 }
1972
1973 if (mOwner) {
1974 freeData();
1975 return continueWrite(desired);
1976 }
1977
1978 uint8_t* data = (uint8_t*)realloc(mData, desired);
1979 if (!data && desired > mDataCapacity) {
1980 mError = NO_MEMORY;
1981 return NO_MEMORY;
1982 }
1983
1984 releaseObjects();
1985
1986 if (data) {
1987 LOG_ALLOC("Parcel %p: restart from %zu to %zu capacity", this, mDataCapacity, desired);
1988 pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
1989 gParcelGlobalAllocSize += desired;
1990 gParcelGlobalAllocSize -= mDataCapacity;
1991 if (!mData) {
1992 gParcelGlobalAllocCount++;
1993 }
1994 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
1995 mData = data;
1996 mDataCapacity = desired;
1997 }
1998
1999 mDataSize = mDataPos = 0;
2000 ALOGV("restartWrite Setting data size of %p to %zu", this, mDataSize);
2001 ALOGV("restartWrite Setting data pos of %p to %zu", this, mDataPos);
2002
2003 free(mObjects);
2004 mObjects = NULL;
2005 mObjectsSize = mObjectsCapacity = 0;
2006 mNextObjectHint = 0;
2007 mHasFds = false;
2008 clearCache();
2009 mNumRef = 0;
2010 mFdsKnown = true;
2011 mAllowFds = true;
2012
2013 return NO_ERROR;
2014 }
2015
continueWrite(size_t desired)2016 status_t Parcel::continueWrite(size_t desired)
2017 {
2018 if (desired > INT32_MAX) {
2019 // don't accept size_t values which may have come from an
2020 // inadvertent conversion from a negative int.
2021 return BAD_VALUE;
2022 }
2023
2024 // If shrinking, first adjust for any objects that appear
2025 // after the new data size.
2026 size_t objectsSize = mObjectsSize;
2027 if (desired < mDataSize) {
2028 if (desired == 0) {
2029 objectsSize = 0;
2030 } else {
2031 while (objectsSize > 0) {
2032 if (mObjects[objectsSize-1] < desired)
2033 break;
2034 objectsSize--;
2035 }
2036 }
2037 }
2038
2039 if (mOwner) {
2040 // If the size is going to zero, just release the owner's data.
2041 if (desired == 0) {
2042 freeData();
2043 return NO_ERROR;
2044 }
2045
2046 // If there is a different owner, we need to take
2047 // posession.
2048 uint8_t* data = (uint8_t*)malloc(desired);
2049 if (!data) {
2050 mError = NO_MEMORY;
2051 return NO_MEMORY;
2052 }
2053 binder_size_t* objects = NULL;
2054
2055 if (objectsSize) {
2056 objects = (binder_size_t*)calloc(objectsSize, sizeof(binder_size_t));
2057 if (!objects) {
2058 free(data);
2059
2060 mError = NO_MEMORY;
2061 return NO_MEMORY;
2062 }
2063
2064 // Little hack to only acquire references on objects
2065 // we will be keeping.
2066 size_t oldObjectsSize = mObjectsSize;
2067 mObjectsSize = objectsSize;
2068 acquireObjects();
2069 mObjectsSize = oldObjectsSize;
2070 }
2071
2072 if (mData) {
2073 memcpy(data, mData, mDataSize < desired ? mDataSize : desired);
2074 }
2075 if (objects && mObjects) {
2076 memcpy(objects, mObjects, objectsSize*sizeof(binder_size_t));
2077 }
2078 //ALOGI("Freeing data ref of %p (pid=%d)", this, getpid());
2079 mOwner(this, mData, mDataSize, mObjects, mObjectsSize, mOwnerCookie);
2080 mOwner = NULL;
2081
2082 LOG_ALLOC("Parcel %p: taking ownership of %zu capacity", this, desired);
2083 pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
2084 gParcelGlobalAllocSize += desired;
2085 gParcelGlobalAllocCount++;
2086 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
2087
2088 mData = data;
2089 mObjects = objects;
2090 mDataSize = (mDataSize < desired) ? mDataSize : desired;
2091 ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize);
2092 mDataCapacity = desired;
2093 mObjectsSize = mObjectsCapacity = objectsSize;
2094 mNextObjectHint = 0;
2095
2096 clearCache();
2097 } else if (mData) {
2098 if (objectsSize < mObjectsSize) {
2099 // Need to release refs on any objects we are dropping.
2100 const sp<ProcessState> proc(ProcessState::self());
2101 for (size_t i=objectsSize; i<mObjectsSize; i++) {
2102 const flat_binder_object* flat
2103 = reinterpret_cast<flat_binder_object*>(mData+mObjects[i]);
2104 if (flat->type == BINDER_TYPE_FD) {
2105 // will need to rescan because we may have lopped off the only FDs
2106 mFdsKnown = false;
2107 }
2108 release_object(proc, *flat, this);
2109 }
2110 binder_size_t* objects =
2111 (binder_size_t*)realloc(mObjects, objectsSize*sizeof(binder_size_t));
2112 if (objects) {
2113 mObjects = objects;
2114 }
2115 mObjectsSize = objectsSize;
2116 mNextObjectHint = 0;
2117
2118 clearCache();
2119 }
2120
2121 // We own the data, so we can just do a realloc().
2122 if (desired > mDataCapacity) {
2123 uint8_t* data = (uint8_t*)realloc(mData, desired);
2124 if (data) {
2125 LOG_ALLOC("Parcel %p: continue from %zu to %zu capacity", this, mDataCapacity,
2126 desired);
2127 pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
2128 gParcelGlobalAllocSize += desired;
2129 gParcelGlobalAllocSize -= mDataCapacity;
2130 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
2131 mData = data;
2132 mDataCapacity = desired;
2133 } else if (desired > mDataCapacity) {
2134 mError = NO_MEMORY;
2135 return NO_MEMORY;
2136 }
2137 } else {
2138 if (mDataSize > desired) {
2139 mDataSize = desired;
2140 ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize);
2141 }
2142 if (mDataPos > desired) {
2143 mDataPos = desired;
2144 ALOGV("continueWrite Setting data pos of %p to %zu", this, mDataPos);
2145 }
2146 }
2147
2148 } else {
2149 // This is the first data. Easy!
2150 uint8_t* data = (uint8_t*)malloc(desired);
2151 if (!data) {
2152 mError = NO_MEMORY;
2153 return NO_MEMORY;
2154 }
2155
2156 if(!(mDataCapacity == 0 && mObjects == NULL
2157 && mObjectsCapacity == 0)) {
2158 ALOGE("continueWrite: %zu/%p/%zu/%zu", mDataCapacity, mObjects, mObjectsCapacity, desired);
2159 }
2160
2161 LOG_ALLOC("Parcel %p: allocating with %zu capacity", this, desired);
2162 pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
2163 gParcelGlobalAllocSize += desired;
2164 gParcelGlobalAllocCount++;
2165 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
2166
2167 mData = data;
2168 mDataSize = mDataPos = 0;
2169 ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize);
2170 ALOGV("continueWrite Setting data pos of %p to %zu", this, mDataPos);
2171 mDataCapacity = desired;
2172 }
2173
2174 return NO_ERROR;
2175 }
2176
initState()2177 void Parcel::initState()
2178 {
2179 LOG_ALLOC("Parcel %p: initState", this);
2180 mError = NO_ERROR;
2181 mData = 0;
2182 mDataSize = 0;
2183 mDataCapacity = 0;
2184 mDataPos = 0;
2185 ALOGV("initState Setting data size of %p to %zu", this, mDataSize);
2186 ALOGV("initState Setting data pos of %p to %zu", this, mDataPos);
2187 mObjects = NULL;
2188 mObjectsSize = 0;
2189 mObjectsCapacity = 0;
2190 mNextObjectHint = 0;
2191 mHasFds = false;
2192 mFdsKnown = true;
2193 mAllowFds = true;
2194 mOwner = NULL;
2195 clearCache();
2196 mNumRef = 0;
2197
2198 // racing multiple init leads only to multiple identical write
2199 if (gMaxFds == 0) {
2200 struct rlimit result;
2201 if (!getrlimit(RLIMIT_NOFILE, &result)) {
2202 gMaxFds = (size_t)result.rlim_cur;
2203 //ALOGI("parcel fd limit set to %zu", gMaxFds);
2204 } else {
2205 ALOGW("Unable to getrlimit: %s", strerror(errno));
2206 gMaxFds = 1024;
2207 }
2208 }
2209 }
2210
scanForFds() const2211 void Parcel::scanForFds() const
2212 {
2213 bool hasFds = false;
2214 for (size_t i=0; i<mObjectsSize; i++) {
2215 const flat_binder_object* flat
2216 = reinterpret_cast<const flat_binder_object*>(mData + mObjects[i]);
2217 if (flat->type == BINDER_TYPE_FD) {
2218 hasFds = true;
2219 break;
2220 }
2221 }
2222 mHasFds = hasFds;
2223 mFdsKnown = true;
2224 }
2225
2226 }; // namespace hardware
2227 }; // namespace android
2228