1 /*
2 * Copyright (C) 2005 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "Parcel"
18 //#define LOG_NDEBUG 0
19
20 #include <endian.h>
21 #include <errno.h>
22 #include <fcntl.h>
23 #include <inttypes.h>
24 #include <pthread.h>
25 #include <stdint.h>
26 #include <stdio.h>
27 #include <stdlib.h>
28 #include <sys/mman.h>
29 #include <sys/resource.h>
30 #include <sys/stat.h>
31 #include <sys/types.h>
32 #include <unistd.h>
33 #include <algorithm>
34
35 #include <binder/Binder.h>
36 #include <binder/BpBinder.h>
37 #include <binder/Functional.h>
38 #include <binder/IPCThreadState.h>
39 #include <binder/Parcel.h>
40 #include <binder/ProcessState.h>
41 #include <binder/Stability.h>
42 #include <binder/Status.h>
43 #include <binder/TextOutput.h>
44
45 #ifndef BINDER_DISABLE_BLOB
46 #include <cutils/ashmem.h>
47 #endif
48 #include <utils/String16.h>
49 #include <utils/String8.h>
50
51 #include "OS.h"
52 #include "RpcState.h"
53 #include "Static.h"
54 #include "Utils.h"
55
56 // A lot of code in this file uses definitions from the
57 // Linux kernel header for Binder <linux/android/binder.h>
58 // which is included indirectly via "binder_module.h".
59 // Non-Linux OSes do not have that header, so libbinder should be
60 // built for those targets without kernel binder support, i.e.,
61 // without BINDER_WITH_KERNEL_IPC. For this reason, all code in this
62 // file that depends on kernel binder, including the header itself,
63 // is conditional on BINDER_WITH_KERNEL_IPC.
64 #ifdef BINDER_WITH_KERNEL_IPC
65 #include <linux/sched.h>
66 #include "binder_module.h"
67 #else // BINDER_WITH_KERNEL_IPC
68 // Needed by {read,write}Pointer
69 typedef uintptr_t binder_uintptr_t;
70 #endif // BINDER_WITH_KERNEL_IPC
71
72 #ifdef __BIONIC__
73 #include <android/fdsan.h>
74 #endif
75
76 #define LOG_REFS(...)
77 // #define LOG_REFS(...) ALOG(LOG_DEBUG, LOG_TAG, __VA_ARGS__)
78 #define LOG_ALLOC(...)
79 // #define LOG_ALLOC(...) ALOG(LOG_DEBUG, LOG_TAG, __VA_ARGS__)
80
81 // ---------------------------------------------------------------------------
82
83 // This macro should never be used at runtime, as a too large value
84 // of s could cause an integer overflow. Instead, you should always
85 // use the wrapper function pad_size()
86 #define PAD_SIZE_UNSAFE(s) (((s) + 3) & ~3UL)
87
pad_size(size_t s)88 static size_t pad_size(size_t s) {
89 if (s > (std::numeric_limits<size_t>::max() - 3)) {
90 LOG_ALWAYS_FATAL("pad size too big %zu", s);
91 }
92 return PAD_SIZE_UNSAFE(s);
93 }
94
95 // Note: must be kept in sync with android/os/StrictMode.java's PENALTY_GATHER
96 #define STRICT_MODE_PENALTY_GATHER (1 << 31)
97
98 namespace android {
99
100 using namespace android::binder::impl;
101 using binder::borrowed_fd;
102 using binder::unique_fd;
103
104 // many things compile this into prebuilts on the stack
105 #ifdef __LP64__
106 static_assert(sizeof(Parcel) == 120);
107 #else
108 static_assert(sizeof(Parcel) == 60);
109 #endif
110
111 static std::atomic<size_t> gParcelGlobalAllocCount;
112 static std::atomic<size_t> gParcelGlobalAllocSize;
113
114 // Maximum number of file descriptors per Parcel.
115 constexpr size_t kMaxFds = 1024;
116
117 // Maximum size of a blob to transfer in-place.
118 [[maybe_unused]] static const size_t BLOB_INPLACE_LIMIT = 16 * 1024;
119
120 #if defined(__BIONIC__)
FdTag(int fd,const void * old_addr,const void * new_addr)121 static void FdTag(int fd, const void* old_addr, const void* new_addr) {
122 if (android_fdsan_exchange_owner_tag) {
123 uint64_t old_tag = android_fdsan_create_owner_tag(ANDROID_FDSAN_OWNER_TYPE_PARCEL,
124 reinterpret_cast<uint64_t>(old_addr));
125 uint64_t new_tag = android_fdsan_create_owner_tag(ANDROID_FDSAN_OWNER_TYPE_PARCEL,
126 reinterpret_cast<uint64_t>(new_addr));
127 android_fdsan_exchange_owner_tag(fd, old_tag, new_tag);
128 }
129 }
FdTagClose(int fd,const void * addr)130 static void FdTagClose(int fd, const void* addr) {
131 if (android_fdsan_close_with_tag) {
132 uint64_t tag = android_fdsan_create_owner_tag(ANDROID_FDSAN_OWNER_TYPE_PARCEL,
133 reinterpret_cast<uint64_t>(addr));
134 android_fdsan_close_with_tag(fd, tag);
135 } else {
136 close(fd);
137 }
138 }
139 #else
FdTag(int fd,const void * old_addr,const void * new_addr)140 static void FdTag(int fd, const void* old_addr, const void* new_addr) {
141 (void)fd;
142 (void)old_addr;
143 (void)new_addr;
144 }
FdTagClose(int fd,const void * addr)145 static void FdTagClose(int fd, const void* addr) {
146 (void)addr;
147 close(fd);
148 }
149 #endif
150
151 enum {
152 BLOB_INPLACE = 0,
153 BLOB_ASHMEM_IMMUTABLE = 1,
154 BLOB_ASHMEM_MUTABLE = 2,
155 };
156
157 #ifdef BINDER_WITH_KERNEL_IPC
acquire_object(const sp<ProcessState> & proc,const flat_binder_object & obj,const void * who)158 static void acquire_object(const sp<ProcessState>& proc, const flat_binder_object& obj,
159 const void* who) {
160 switch (obj.hdr.type) {
161 case BINDER_TYPE_BINDER:
162 if (obj.binder) {
163 LOG_REFS("Parcel %p acquiring reference on local %llu", who, obj.cookie);
164 reinterpret_cast<IBinder*>(obj.cookie)->incStrong(who);
165 }
166 return;
167 case BINDER_TYPE_HANDLE: {
168 const sp<IBinder> b = proc->getStrongProxyForHandle(obj.handle);
169 if (b != nullptr) {
170 LOG_REFS("Parcel %p acquiring reference on remote %p", who, b.get());
171 b->incStrong(who);
172 }
173 return;
174 }
175 case BINDER_TYPE_FD: {
176 if (obj.cookie != 0) { // owned
177 FdTag(obj.handle, nullptr, who);
178 }
179 return;
180 }
181 }
182
183 ALOGD("Invalid object type 0x%08x", obj.hdr.type);
184 }
185
release_object(const sp<ProcessState> & proc,const flat_binder_object & obj,const void * who)186 static void release_object(const sp<ProcessState>& proc, const flat_binder_object& obj,
187 const void* who) {
188 switch (obj.hdr.type) {
189 case BINDER_TYPE_BINDER:
190 if (obj.binder) {
191 LOG_REFS("Parcel %p releasing reference on local %llu", who, obj.cookie);
192 reinterpret_cast<IBinder*>(obj.cookie)->decStrong(who);
193 }
194 return;
195 case BINDER_TYPE_HANDLE: {
196 const sp<IBinder> b = proc->getStrongProxyForHandle(obj.handle);
197 if (b != nullptr) {
198 LOG_REFS("Parcel %p releasing reference on remote %p", who, b.get());
199 b->decStrong(who);
200 }
201 return;
202 }
203 case BINDER_TYPE_FD: {
204 // note: this path is not used when mOwner, so the tag is also released
205 // in 'closeFileDescriptors'
206 if (obj.cookie != 0) { // owned
207 FdTagClose(obj.handle, who);
208 }
209 return;
210 }
211 }
212
213 ALOGE("Invalid object type 0x%08x", obj.hdr.type);
214 }
215 #endif // BINDER_WITH_KERNEL_IPC
216
toRawFd(const std::variant<unique_fd,borrowed_fd> & v)217 static int toRawFd(const std::variant<unique_fd, borrowed_fd>& v) {
218 return std::visit([](const auto& fd) { return fd.get(); }, v);
219 }
220
RpcFields(const sp<RpcSession> & session)221 Parcel::RpcFields::RpcFields(const sp<RpcSession>& session) : mSession(session) {
222 LOG_ALWAYS_FATAL_IF(mSession == nullptr);
223 }
224
finishFlattenBinder(const sp<IBinder> & binder)225 status_t Parcel::finishFlattenBinder(const sp<IBinder>& binder)
226 {
227 internal::Stability::tryMarkCompilationUnit(binder.get());
228 int16_t rep = internal::Stability::getRepr(binder.get());
229 return writeInt32(rep);
230 }
231
finishUnflattenBinder(const sp<IBinder> & binder,sp<IBinder> * out) const232 status_t Parcel::finishUnflattenBinder(
233 const sp<IBinder>& binder, sp<IBinder>* out) const
234 {
235 int32_t stability;
236 status_t status = readInt32(&stability);
237 if (status != OK) return status;
238
239 status = internal::Stability::setRepr(binder.get(), static_cast<int16_t>(stability),
240 true /*log*/);
241 if (status != OK) return status;
242
243 *out = binder;
244 return OK;
245 }
246
247 #ifdef BINDER_WITH_KERNEL_IPC
schedPolicyMask(int policy,int priority)248 static constexpr inline int schedPolicyMask(int policy, int priority) {
249 return (priority & FLAT_BINDER_FLAG_PRIORITY_MASK) | ((policy & 3) << FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT);
250 }
251 #endif // BINDER_WITH_KERNEL_IPC
252
flattenBinder(const sp<IBinder> & binder)253 status_t Parcel::flattenBinder(const sp<IBinder>& binder) {
254 BBinder* local = nullptr;
255 if (binder) local = binder->localBinder();
256 if (local) local->setParceled();
257
258 if (const auto* rpcFields = maybeRpcFields()) {
259 if (binder) {
260 status_t status = writeInt32(RpcFields::TYPE_BINDER); // non-null
261 if (status != OK) return status;
262 uint64_t address;
263 // TODO(b/167966510): need to undo this if the Parcel is not sent
264 status = rpcFields->mSession->state()->onBinderLeaving(rpcFields->mSession, binder,
265 &address);
266 if (status != OK) return status;
267 status = writeUint64(address);
268 if (status != OK) return status;
269 } else {
270 status_t status = writeInt32(RpcFields::TYPE_BINDER_NULL); // null
271 if (status != OK) return status;
272 }
273 return finishFlattenBinder(binder);
274 }
275
276 #ifdef BINDER_WITH_KERNEL_IPC
277 flat_binder_object obj;
278
279 int schedBits = 0;
280 if (!IPCThreadState::self()->backgroundSchedulingDisabled()) {
281 schedBits = schedPolicyMask(SCHED_NORMAL, 19);
282 }
283
284 if (binder != nullptr) {
285 if (!local) {
286 BpBinder *proxy = binder->remoteBinder();
287 if (proxy == nullptr) {
288 ALOGE("null proxy");
289 } else {
290 if (proxy->isRpcBinder()) {
291 ALOGE("Sending a socket binder over kernel binder is prohibited");
292 return INVALID_OPERATION;
293 }
294 }
295 const int32_t handle = proxy ? proxy->getPrivateAccessor().binderHandle() : 0;
296 obj.hdr.type = BINDER_TYPE_HANDLE;
297 obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */
298 obj.flags = 0;
299 obj.handle = handle;
300 obj.cookie = 0;
301 } else {
302 int policy = local->getMinSchedulerPolicy();
303 int priority = local->getMinSchedulerPriority();
304
305 if (policy != 0 || priority != 0) {
306 // override value, since it is set explicitly
307 schedBits = schedPolicyMask(policy, priority);
308 }
309 obj.flags = FLAT_BINDER_FLAG_ACCEPTS_FDS;
310 if (local->isRequestingSid()) {
311 obj.flags |= FLAT_BINDER_FLAG_TXN_SECURITY_CTX;
312 }
313 if (local->isInheritRt()) {
314 obj.flags |= FLAT_BINDER_FLAG_INHERIT_RT;
315 }
316 obj.hdr.type = BINDER_TYPE_BINDER;
317 obj.binder = reinterpret_cast<uintptr_t>(local->getWeakRefs());
318 obj.cookie = reinterpret_cast<uintptr_t>(local);
319 }
320 } else {
321 obj.hdr.type = BINDER_TYPE_BINDER;
322 obj.flags = 0;
323 obj.binder = 0;
324 obj.cookie = 0;
325 }
326
327 obj.flags |= schedBits;
328
329 status_t status = writeObject(obj, false);
330 if (status != OK) return status;
331
332 return finishFlattenBinder(binder);
333 #else // BINDER_WITH_KERNEL_IPC
334 LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
335 return INVALID_OPERATION;
336 #endif // BINDER_WITH_KERNEL_IPC
337 }
338
unflattenBinder(sp<IBinder> * out) const339 status_t Parcel::unflattenBinder(sp<IBinder>* out) const
340 {
341 if (const auto* rpcFields = maybeRpcFields()) {
342 int32_t isPresent;
343 status_t status = readInt32(&isPresent);
344 if (status != OK) return status;
345
346 sp<IBinder> binder;
347
348 if (isPresent & 1) {
349 uint64_t addr;
350 if (status_t status = readUint64(&addr); status != OK) return status;
351 if (status_t status =
352 rpcFields->mSession->state()->onBinderEntering(rpcFields->mSession, addr,
353 &binder);
354 status != OK)
355 return status;
356 if (status_t status =
357 rpcFields->mSession->state()->flushExcessBinderRefs(rpcFields->mSession,
358 addr, binder);
359 status != OK)
360 return status;
361 }
362
363 return finishUnflattenBinder(binder, out);
364 }
365
366 #ifdef BINDER_WITH_KERNEL_IPC
367 const flat_binder_object* flat = readObject(false);
368
369 if (flat) {
370 switch (flat->hdr.type) {
371 case BINDER_TYPE_BINDER: {
372 sp<IBinder> binder =
373 sp<IBinder>::fromExisting(reinterpret_cast<IBinder*>(flat->cookie));
374 return finishUnflattenBinder(binder, out);
375 }
376 case BINDER_TYPE_HANDLE: {
377 sp<IBinder> binder =
378 ProcessState::self()->getStrongProxyForHandle(flat->handle);
379 return finishUnflattenBinder(binder, out);
380 }
381 }
382 }
383 return BAD_TYPE;
384 #else // BINDER_WITH_KERNEL_IPC
385 LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
386 return INVALID_OPERATION;
387 #endif // BINDER_WITH_KERNEL_IPC
388 }
389
390 // ---------------------------------------------------------------------------
391
Parcel()392 Parcel::Parcel()
393 {
394 LOG_ALLOC("Parcel %p: constructing", this);
395 initState();
396 }
397
~Parcel()398 Parcel::~Parcel()
399 {
400 freeDataNoInit();
401 LOG_ALLOC("Parcel %p: destroyed", this);
402 }
403
getGlobalAllocSize()404 size_t Parcel::getGlobalAllocSize() {
405 return gParcelGlobalAllocSize.load();
406 }
407
getGlobalAllocCount()408 size_t Parcel::getGlobalAllocCount() {
409 return gParcelGlobalAllocCount.load();
410 }
411
data() const412 const uint8_t* Parcel::data() const
413 {
414 return mData;
415 }
416
dataSize() const417 size_t Parcel::dataSize() const
418 {
419 return (mDataSize > mDataPos ? mDataSize : mDataPos);
420 }
421
dataBufferSize() const422 size_t Parcel::dataBufferSize() const {
423 return mDataSize;
424 }
425
dataAvail() const426 size_t Parcel::dataAvail() const
427 {
428 size_t result = dataSize() - dataPosition();
429 if (result > INT32_MAX) {
430 LOG_ALWAYS_FATAL("result too big: %zu", result);
431 }
432 return result;
433 }
434
dataPosition() const435 size_t Parcel::dataPosition() const
436 {
437 return mDataPos;
438 }
439
dataCapacity() const440 size_t Parcel::dataCapacity() const
441 {
442 return mDataCapacity;
443 }
444
setDataSize(size_t size)445 status_t Parcel::setDataSize(size_t size)
446 {
447 if (size > INT32_MAX) {
448 // don't accept size_t values which may have come from an
449 // inadvertent conversion from a negative int.
450 return BAD_VALUE;
451 }
452
453 status_t err;
454 err = continueWrite(size);
455 if (err == NO_ERROR) {
456 mDataSize = size;
457 ALOGV("setDataSize Setting data size of %p to %zu", this, mDataSize);
458 }
459 return err;
460 }
461
setDataPosition(size_t pos) const462 void Parcel::setDataPosition(size_t pos) const
463 {
464 if (pos > INT32_MAX) {
465 // don't accept size_t values which may have come from an
466 // inadvertent conversion from a negative int.
467 LOG_ALWAYS_FATAL("pos too big: %zu", pos);
468 }
469
470 mDataPos = pos;
471 if (const auto* kernelFields = maybeKernelFields()) {
472 kernelFields->mNextObjectHint = 0;
473 kernelFields->mObjectsSorted = false;
474 }
475 }
476
setDataCapacity(size_t size)477 status_t Parcel::setDataCapacity(size_t size)
478 {
479 if (size > INT32_MAX) {
480 // don't accept size_t values which may have come from an
481 // inadvertent conversion from a negative int.
482 return BAD_VALUE;
483 }
484
485 if (size > mDataCapacity) return continueWrite(size);
486 return NO_ERROR;
487 }
488
setData(const uint8_t * buffer,size_t len)489 status_t Parcel::setData(const uint8_t* buffer, size_t len)
490 {
491 if (len > INT32_MAX) {
492 // don't accept size_t values which may have come from an
493 // inadvertent conversion from a negative int.
494 return BAD_VALUE;
495 }
496
497 status_t err = restartWrite(len);
498 if (err == NO_ERROR) {
499 memcpy(const_cast<uint8_t*>(data()), buffer, len);
500 mDataSize = len;
501 if (auto* kernelFields = maybeKernelFields()) {
502 kernelFields->mFdsKnown = false;
503 }
504 }
505 return err;
506 }
507
appendFrom(const Parcel * parcel,size_t offset,size_t len)508 status_t Parcel::appendFrom(const Parcel* parcel, size_t offset, size_t len) {
509 if (isForRpc() != parcel->isForRpc()) {
510 ALOGE("Cannot append Parcel from one context to another. They may be different formats, "
511 "and objects are specific to a context.");
512 return BAD_TYPE;
513 }
514 if (isForRpc() && maybeRpcFields()->mSession != parcel->maybeRpcFields()->mSession) {
515 ALOGE("Cannot append Parcels from different sessions");
516 return BAD_TYPE;
517 }
518
519 status_t err;
520 const uint8_t* data = parcel->mData;
521 int startPos = mDataPos;
522
523 if (len == 0) {
524 return NO_ERROR;
525 }
526
527 if (len > INT32_MAX) {
528 // don't accept size_t values which may have come from an
529 // inadvertent conversion from a negative int.
530 return BAD_VALUE;
531 }
532
533 // range checks against the source parcel size
534 if ((offset > parcel->mDataSize)
535 || (len > parcel->mDataSize)
536 || (offset + len > parcel->mDataSize)) {
537 return BAD_VALUE;
538 }
539
540 if ((mDataSize+len) > mDataCapacity) {
541 // grow data
542 err = growData(len);
543 if (err != NO_ERROR) {
544 return err;
545 }
546 }
547
548 // append data
549 memcpy(mData + mDataPos, data + offset, len);
550 mDataPos += len;
551 mDataSize += len;
552
553 err = NO_ERROR;
554
555 if (auto* kernelFields = maybeKernelFields()) {
556 #ifdef BINDER_WITH_KERNEL_IPC
557 auto* otherKernelFields = parcel->maybeKernelFields();
558 LOG_ALWAYS_FATAL_IF(otherKernelFields == nullptr);
559
560 const binder_size_t* objects = otherKernelFields->mObjects;
561 size_t size = otherKernelFields->mObjectsSize;
562 // Count objects in range
563 int firstIndex = -1, lastIndex = -2;
564 for (int i = 0; i < (int)size; i++) {
565 size_t off = objects[i];
566 if ((off >= offset) && (off + sizeof(flat_binder_object) <= offset + len)) {
567 if (firstIndex == -1) {
568 firstIndex = i;
569 }
570 lastIndex = i;
571 }
572 }
573 int numObjects = lastIndex - firstIndex + 1;
574 if (numObjects > 0) {
575 const sp<ProcessState> proc(ProcessState::self());
576 // grow objects
577 if (kernelFields->mObjectsCapacity < kernelFields->mObjectsSize + numObjects) {
578 if ((size_t)numObjects > SIZE_MAX - kernelFields->mObjectsSize)
579 return NO_MEMORY; // overflow
580 if (kernelFields->mObjectsSize + numObjects > SIZE_MAX / 3)
581 return NO_MEMORY; // overflow
582 size_t newSize = ((kernelFields->mObjectsSize + numObjects) * 3) / 2;
583 if (newSize > SIZE_MAX / sizeof(binder_size_t)) return NO_MEMORY; // overflow
584 binder_size_t* objects = (binder_size_t*)realloc(kernelFields->mObjects,
585 newSize * sizeof(binder_size_t));
586 if (objects == (binder_size_t*)nullptr) {
587 return NO_MEMORY;
588 }
589 kernelFields->mObjects = objects;
590 kernelFields->mObjectsCapacity = newSize;
591 }
592
593 // append and acquire objects
594 int idx = kernelFields->mObjectsSize;
595 for (int i = firstIndex; i <= lastIndex; i++) {
596 size_t off = objects[i] - offset + startPos;
597 kernelFields->mObjects[idx++] = off;
598 kernelFields->mObjectsSize++;
599
600 flat_binder_object* flat = reinterpret_cast<flat_binder_object*>(mData + off);
601
602 if (flat->hdr.type == BINDER_TYPE_FD) {
603 // If this is a file descriptor, we need to dup it so the
604 // new Parcel now owns its own fd, and can declare that we
605 // officially know we have fds.
606 flat->handle = fcntl(flat->handle, F_DUPFD_CLOEXEC, 0);
607 flat->cookie = 1;
608 kernelFields->mHasFds = kernelFields->mFdsKnown = true;
609 if (!mAllowFds) {
610 err = FDS_NOT_ALLOWED;
611 }
612 }
613
614 acquire_object(proc, *flat, this);
615 }
616 }
617 #else
618 LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
619 return INVALID_OPERATION;
620 #endif // BINDER_WITH_KERNEL_IPC
621 } else {
622 auto* rpcFields = maybeRpcFields();
623 LOG_ALWAYS_FATAL_IF(rpcFields == nullptr);
624 auto* otherRpcFields = parcel->maybeRpcFields();
625 if (otherRpcFields == nullptr) {
626 return BAD_TYPE;
627 }
628 if (rpcFields->mSession != otherRpcFields->mSession) {
629 return BAD_TYPE;
630 }
631
632 const size_t savedDataPos = mDataPos;
633 auto scopeGuard = make_scope_guard([&]() { mDataPos = savedDataPos; });
634
635 rpcFields->mObjectPositions.reserve(otherRpcFields->mObjectPositions.size());
636 if (otherRpcFields->mFds != nullptr) {
637 if (rpcFields->mFds == nullptr) {
638 rpcFields->mFds = std::make_unique<decltype(rpcFields->mFds)::element_type>();
639 }
640 rpcFields->mFds->reserve(otherRpcFields->mFds->size());
641 }
642 for (size_t i = 0; i < otherRpcFields->mObjectPositions.size(); i++) {
643 const binder_size_t objPos = otherRpcFields->mObjectPositions[i];
644 if (offset <= objPos && objPos < offset + len) {
645 size_t newDataPos = objPos - offset + startPos;
646 rpcFields->mObjectPositions.push_back(newDataPos);
647
648 mDataPos = newDataPos;
649 int32_t objectType;
650 if (status_t status = readInt32(&objectType); status != OK) {
651 return status;
652 }
653 if (objectType != RpcFields::TYPE_NATIVE_FILE_DESCRIPTOR) {
654 continue;
655 }
656
657 if (!mAllowFds) {
658 return FDS_NOT_ALLOWED;
659 }
660
661 // Read FD, duplicate, and add to list.
662 int32_t fdIndex;
663 if (status_t status = readInt32(&fdIndex); status != OK) {
664 return status;
665 }
666 int oldFd = toRawFd(otherRpcFields->mFds->at(fdIndex));
667 // To match kernel binder behavior, we always dup, even if the
668 // FD was unowned in the source parcel.
669 int newFd = -1;
670 if (status_t status = binder::os::dupFileDescriptor(oldFd, &newFd); status != OK) {
671 ALOGW("Failed to duplicate file descriptor %d: %s", oldFd, strerror(-status));
672 }
673 rpcFields->mFds->emplace_back(unique_fd(newFd));
674 // Fixup the index in the data.
675 mDataPos = newDataPos + 4;
676 if (status_t status = writeInt32(rpcFields->mFds->size() - 1); status != OK) {
677 return status;
678 }
679 }
680 }
681 }
682
683 return err;
684 }
685
compareData(const Parcel & other)686 int Parcel::compareData(const Parcel& other) {
687 size_t size = dataSize();
688 if (size != other.dataSize()) {
689 return size < other.dataSize() ? -1 : 1;
690 }
691 return memcmp(data(), other.data(), size);
692 }
693
compareDataInRange(size_t thisOffset,const Parcel & other,size_t otherOffset,size_t len,int * result) const694 status_t Parcel::compareDataInRange(size_t thisOffset, const Parcel& other, size_t otherOffset,
695 size_t len, int* result) const {
696 if (len > INT32_MAX || thisOffset > INT32_MAX || otherOffset > INT32_MAX) {
697 // Don't accept size_t values which may have come from an inadvertent conversion from a
698 // negative int.
699 return BAD_VALUE;
700 }
701 size_t thisLimit;
702 if (__builtin_add_overflow(thisOffset, len, &thisLimit) || thisLimit > mDataSize) {
703 return BAD_VALUE;
704 }
705 size_t otherLimit;
706 if (__builtin_add_overflow(otherOffset, len, &otherLimit) || otherLimit > other.mDataSize) {
707 return BAD_VALUE;
708 }
709 *result = memcmp(data() + thisOffset, other.data() + otherOffset, len);
710 return NO_ERROR;
711 }
712
allowFds() const713 bool Parcel::allowFds() const
714 {
715 return mAllowFds;
716 }
717
pushAllowFds(bool allowFds)718 bool Parcel::pushAllowFds(bool allowFds)
719 {
720 const bool origValue = mAllowFds;
721 if (!allowFds) {
722 mAllowFds = false;
723 }
724 return origValue;
725 }
726
restoreAllowFds(bool lastValue)727 void Parcel::restoreAllowFds(bool lastValue)
728 {
729 mAllowFds = lastValue;
730 }
731
hasFileDescriptors() const732 bool Parcel::hasFileDescriptors() const
733 {
734 if (const auto* rpcFields = maybeRpcFields()) {
735 return rpcFields->mFds != nullptr && !rpcFields->mFds->empty();
736 }
737 auto* kernelFields = maybeKernelFields();
738 if (!kernelFields->mFdsKnown) {
739 scanForFds();
740 }
741 return kernelFields->mHasFds;
742 }
743
hasBinders(bool * result) const744 status_t Parcel::hasBinders(bool* result) const {
745 status_t status = hasBindersInRange(0, dataSize(), result);
746 ALOGE_IF(status != NO_ERROR, "Error %d calling hasBindersInRange()", status);
747 return status;
748 }
749
debugReadAllStrongBinders() const750 std::vector<sp<IBinder>> Parcel::debugReadAllStrongBinders() const {
751 std::vector<sp<IBinder>> ret;
752
753 #ifdef BINDER_WITH_KERNEL_IPC
754 const auto* kernelFields = maybeKernelFields();
755 if (kernelFields == nullptr) {
756 return ret;
757 }
758
759 size_t initPosition = dataPosition();
760 for (size_t i = 0; i < kernelFields->mObjectsSize; i++) {
761 binder_size_t offset = kernelFields->mObjects[i];
762 const flat_binder_object* flat =
763 reinterpret_cast<const flat_binder_object*>(mData + offset);
764 if (flat->hdr.type != BINDER_TYPE_BINDER) continue;
765
766 setDataPosition(offset);
767
768 sp<IBinder> binder = readStrongBinder();
769 if (binder != nullptr) ret.push_back(binder);
770 }
771
772 setDataPosition(initPosition);
773 #endif // BINDER_WITH_KERNEL_IPC
774
775 return ret;
776 }
777
debugReadAllFileDescriptors() const778 std::vector<int> Parcel::debugReadAllFileDescriptors() const {
779 std::vector<int> ret;
780
781 if (const auto* kernelFields = maybeKernelFields()) {
782 #ifdef BINDER_WITH_KERNEL_IPC
783 size_t initPosition = dataPosition();
784 for (size_t i = 0; i < kernelFields->mObjectsSize; i++) {
785 binder_size_t offset = kernelFields->mObjects[i];
786 const flat_binder_object* flat =
787 reinterpret_cast<const flat_binder_object*>(mData + offset);
788 if (flat->hdr.type != BINDER_TYPE_FD) continue;
789
790 setDataPosition(offset);
791
792 int fd = readFileDescriptor();
793 LOG_ALWAYS_FATAL_IF(fd == -1);
794 ret.push_back(fd);
795 }
796 setDataPosition(initPosition);
797 #else
798 LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
799 #endif
800 } else if (const auto* rpcFields = maybeRpcFields(); rpcFields && rpcFields->mFds) {
801 for (const auto& fd : *rpcFields->mFds) {
802 ret.push_back(toRawFd(fd));
803 }
804 }
805
806 return ret;
807 }
808
hasBindersInRange(size_t offset,size_t len,bool * result) const809 status_t Parcel::hasBindersInRange(size_t offset, size_t len, bool* result) const {
810 if (len > INT32_MAX || offset > INT32_MAX) {
811 // Don't accept size_t values which may have come from an inadvertent conversion from a
812 // negative int.
813 return BAD_VALUE;
814 }
815 size_t limit;
816 if (__builtin_add_overflow(offset, len, &limit) || limit > mDataSize) {
817 return BAD_VALUE;
818 }
819 *result = false;
820 if (const auto* kernelFields = maybeKernelFields()) {
821 #ifdef BINDER_WITH_KERNEL_IPC
822 for (size_t i = 0; i < kernelFields->mObjectsSize; i++) {
823 size_t pos = kernelFields->mObjects[i];
824 if (pos < offset) continue;
825 if (pos + sizeof(flat_binder_object) > offset + len) {
826 if (kernelFields->mObjectsSorted) {
827 break;
828 } else {
829 continue;
830 }
831 }
832 const flat_binder_object* flat =
833 reinterpret_cast<const flat_binder_object*>(mData + pos);
834 if (flat->hdr.type == BINDER_TYPE_BINDER || flat->hdr.type == BINDER_TYPE_HANDLE) {
835 *result = true;
836 break;
837 }
838 }
839 #else
840 LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
841 return INVALID_OPERATION;
842 #endif // BINDER_WITH_KERNEL_IPC
843 } else if (const auto* rpcFields = maybeRpcFields()) {
844 return INVALID_OPERATION;
845 }
846 return NO_ERROR;
847 }
848
hasFileDescriptorsInRange(size_t offset,size_t len,bool * result) const849 status_t Parcel::hasFileDescriptorsInRange(size_t offset, size_t len, bool* result) const {
850 if (len > INT32_MAX || offset > INT32_MAX) {
851 // Don't accept size_t values which may have come from an inadvertent conversion from a
852 // negative int.
853 return BAD_VALUE;
854 }
855 size_t limit;
856 if (__builtin_add_overflow(offset, len, &limit) || limit > mDataSize) {
857 return BAD_VALUE;
858 }
859 *result = false;
860 if (const auto* kernelFields = maybeKernelFields()) {
861 #ifdef BINDER_WITH_KERNEL_IPC
862 for (size_t i = 0; i < kernelFields->mObjectsSize; i++) {
863 size_t pos = kernelFields->mObjects[i];
864 if (pos < offset) continue;
865 if (pos + sizeof(flat_binder_object) > offset + len) {
866 if (kernelFields->mObjectsSorted) {
867 break;
868 } else {
869 continue;
870 }
871 }
872 const flat_binder_object* flat =
873 reinterpret_cast<const flat_binder_object*>(mData + pos);
874 if (flat->hdr.type == BINDER_TYPE_FD) {
875 *result = true;
876 break;
877 }
878 }
879 #else
880 LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
881 return INVALID_OPERATION;
882 #endif // BINDER_WITH_KERNEL_IPC
883 } else if (const auto* rpcFields = maybeRpcFields()) {
884 for (uint32_t pos : rpcFields->mObjectPositions) {
885 if (offset <= pos && pos < limit) {
886 const auto* type = reinterpret_cast<const RpcFields::ObjectType*>(mData + pos);
887 if (*type == RpcFields::TYPE_NATIVE_FILE_DESCRIPTOR) {
888 *result = true;
889 break;
890 }
891 }
892 }
893 }
894 return NO_ERROR;
895 }
896
markSensitive() const897 void Parcel::markSensitive() const
898 {
899 mDeallocZero = true;
900 }
901
markForBinder(const sp<IBinder> & binder)902 void Parcel::markForBinder(const sp<IBinder>& binder) {
903 LOG_ALWAYS_FATAL_IF(mData != nullptr, "format must be set before data is written");
904
905 if (binder && binder->remoteBinder() && binder->remoteBinder()->isRpcBinder()) {
906 markForRpc(binder->remoteBinder()->getPrivateAccessor().rpcSession());
907 }
908 }
909
markForRpc(const sp<RpcSession> & session)910 void Parcel::markForRpc(const sp<RpcSession>& session) {
911 LOG_ALWAYS_FATAL_IF(mData != nullptr && mOwner == nullptr,
912 "format must be set before data is written OR on IPC data");
913
914 mVariantFields.emplace<RpcFields>(session);
915 }
916
isForRpc() const917 bool Parcel::isForRpc() const {
918 return std::holds_alternative<RpcFields>(mVariantFields);
919 }
920
updateWorkSourceRequestHeaderPosition() const921 void Parcel::updateWorkSourceRequestHeaderPosition() const {
922 auto* kernelFields = maybeKernelFields();
923 if (kernelFields == nullptr) {
924 return;
925 }
926
927 // Only update the request headers once. We only want to point
928 // to the first headers read/written.
929 if (!kernelFields->mRequestHeaderPresent) {
930 kernelFields->mWorkSourceRequestHeaderPosition = dataPosition();
931 kernelFields->mRequestHeaderPresent = true;
932 }
933 }
934
935 #ifdef BINDER_WITH_KERNEL_IPC
936
937 #if defined(__ANDROID__)
938
939 #if defined(__ANDROID_VNDK__)
940 constexpr int32_t kHeader = B_PACK_CHARS('V', 'N', 'D', 'R');
941 #elif defined(__ANDROID_RECOVERY__)
942 constexpr int32_t kHeader = B_PACK_CHARS('R', 'E', 'C', 'O');
943 #else
944 constexpr int32_t kHeader = B_PACK_CHARS('S', 'Y', 'S', 'T');
945 #endif
946
947 #else // ANDROID not defined
948
949 // If kernel binder is used in new environments, we need to make sure it's separated
950 // out and has a separate header.
951 constexpr int32_t kHeader = B_PACK_CHARS('U', 'N', 'K', 'N');
952 #endif
953
954 #endif // BINDER_WITH_KERNEL_IPC
955
956 // Write RPC headers. (previously just the interface token)
writeInterfaceToken(const String16 & interface)957 status_t Parcel::writeInterfaceToken(const String16& interface)
958 {
959 return writeInterfaceToken(interface.c_str(), interface.size());
960 }
961
writeInterfaceToken(const char16_t * str,size_t len)962 status_t Parcel::writeInterfaceToken(const char16_t* str, size_t len) {
963 if (auto* kernelFields = maybeKernelFields()) {
964 #ifdef BINDER_WITH_KERNEL_IPC
965 const IPCThreadState* threadState = IPCThreadState::self();
966 writeInt32(threadState->getStrictModePolicy() | STRICT_MODE_PENALTY_GATHER);
967 updateWorkSourceRequestHeaderPosition();
968 writeInt32(threadState->shouldPropagateWorkSource() ? threadState->getCallingWorkSourceUid()
969 : IPCThreadState::kUnsetWorkSource);
970 writeInt32(kHeader);
971 #else // BINDER_WITH_KERNEL_IPC
972 LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
973 return INVALID_OPERATION;
974 #endif // BINDER_WITH_KERNEL_IPC
975 }
976
977 // currently the interface identification token is just its name as a string
978 return writeString16(str, len);
979 }
980
replaceCallingWorkSourceUid(uid_t uid)981 bool Parcel::replaceCallingWorkSourceUid(uid_t uid)
982 {
983 auto* kernelFields = maybeKernelFields();
984 if (kernelFields == nullptr) {
985 return false;
986 }
987 if (!kernelFields->mRequestHeaderPresent) {
988 return false;
989 }
990
991 const size_t initialPosition = dataPosition();
992 setDataPosition(kernelFields->mWorkSourceRequestHeaderPosition);
993 status_t err = writeInt32(uid);
994 setDataPosition(initialPosition);
995 return err == NO_ERROR;
996 }
997
readCallingWorkSourceUid() const998 uid_t Parcel::readCallingWorkSourceUid() const
999 {
1000 auto* kernelFields = maybeKernelFields();
1001 if (kernelFields == nullptr) {
1002 return false;
1003 }
1004 if (!kernelFields->mRequestHeaderPresent) {
1005 return IPCThreadState::kUnsetWorkSource;
1006 }
1007
1008 const size_t initialPosition = dataPosition();
1009 setDataPosition(kernelFields->mWorkSourceRequestHeaderPosition);
1010 uid_t uid = readInt32();
1011 setDataPosition(initialPosition);
1012 return uid;
1013 }
1014
checkInterface(IBinder * binder) const1015 bool Parcel::checkInterface(IBinder* binder) const
1016 {
1017 return enforceInterface(binder->getInterfaceDescriptor());
1018 }
1019
enforceInterface(const String16 & interface,IPCThreadState * threadState) const1020 bool Parcel::enforceInterface(const String16& interface,
1021 IPCThreadState* threadState) const
1022 {
1023 return enforceInterface(interface.c_str(), interface.size(), threadState);
1024 }
1025
enforceInterface(const char16_t * interface,size_t len,IPCThreadState * threadState) const1026 bool Parcel::enforceInterface(const char16_t* interface,
1027 size_t len,
1028 IPCThreadState* threadState) const
1029 {
1030 if (auto* kernelFields = maybeKernelFields()) {
1031 #ifdef BINDER_WITH_KERNEL_IPC
1032 // StrictModePolicy.
1033 int32_t strictPolicy = readInt32();
1034 if (threadState == nullptr) {
1035 threadState = IPCThreadState::self();
1036 }
1037 if ((threadState->getLastTransactionBinderFlags() & IBinder::FLAG_ONEWAY) != 0) {
1038 // For one-way calls, the callee is running entirely
1039 // disconnected from the caller, so disable StrictMode entirely.
1040 // Not only does disk/network usage not impact the caller, but
1041 // there's no way to communicate back violations anyway.
1042 threadState->setStrictModePolicy(0);
1043 } else {
1044 threadState->setStrictModePolicy(strictPolicy);
1045 }
1046 // WorkSource.
1047 updateWorkSourceRequestHeaderPosition();
1048 int32_t workSource = readInt32();
1049 threadState->setCallingWorkSourceUidWithoutPropagation(workSource);
1050 // vendor header
1051 int32_t header = readInt32();
1052
1053 // fuzzers skip this check, because it is for protecting the underlying ABI, but
1054 // we don't want it to reduce our coverage
1055 if (header != kHeader && !mServiceFuzzing) {
1056 ALOGE("Expecting header 0x%x but found 0x%x. Mixing copies of libbinder?", kHeader,
1057 header);
1058 return false;
1059 }
1060 #else // BINDER_WITH_KERNEL_IPC
1061 LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
1062 (void)threadState;
1063 return false;
1064 #endif // BINDER_WITH_KERNEL_IPC
1065 }
1066
1067 // Interface descriptor.
1068 size_t parcel_interface_len;
1069 const char16_t* parcel_interface = readString16Inplace(&parcel_interface_len);
1070 if (len == parcel_interface_len &&
1071 (!len || !memcmp(parcel_interface, interface, len * sizeof (char16_t)))) {
1072 return true;
1073 } else {
1074 if (mServiceFuzzing) {
1075 // ignore. Theoretically, this could cause a few false positives, because
1076 // people could assume things about getInterfaceDescriptor if they pass
1077 // this point, but it would be extremely fragile. It's more important that
1078 // we fuzz with the above things read from the Parcel.
1079 return true;
1080 } else {
1081 ALOGW("**** enforceInterface() expected '%s' but read '%s'",
1082 String8(interface, len).c_str(),
1083 String8(parcel_interface, parcel_interface_len).c_str());
1084 return false;
1085 }
1086 }
1087 }
1088
setEnforceNoDataAvail(bool enforceNoDataAvail)1089 void Parcel::setEnforceNoDataAvail(bool enforceNoDataAvail) {
1090 mEnforceNoDataAvail = enforceNoDataAvail;
1091 }
1092
setServiceFuzzing()1093 void Parcel::setServiceFuzzing() {
1094 mServiceFuzzing = true;
1095 }
1096
isServiceFuzzing() const1097 bool Parcel::isServiceFuzzing() const {
1098 return mServiceFuzzing;
1099 }
1100
enforceNoDataAvail() const1101 binder::Status Parcel::enforceNoDataAvail() const {
1102 if (!mEnforceNoDataAvail) {
1103 return binder::Status::ok();
1104 }
1105
1106 const auto n = dataAvail();
1107 if (n == 0) {
1108 return binder::Status::ok();
1109 }
1110 return binder::Status::
1111 fromExceptionCode(binder::Status::Exception::EX_BAD_PARCELABLE,
1112 String8::format("Parcel data not fully consumed, unread size: %zu",
1113 n));
1114 }
1115
objectsCount() const1116 size_t Parcel::objectsCount() const
1117 {
1118 if (const auto* kernelFields = maybeKernelFields()) {
1119 return kernelFields->mObjectsSize;
1120 }
1121 return 0;
1122 }
1123
errorCheck() const1124 status_t Parcel::errorCheck() const
1125 {
1126 return mError;
1127 }
1128
setError(status_t err)1129 void Parcel::setError(status_t err)
1130 {
1131 mError = err;
1132 }
1133
finishWrite(size_t len)1134 status_t Parcel::finishWrite(size_t len)
1135 {
1136 if (len > INT32_MAX) {
1137 // don't accept size_t values which may have come from an
1138 // inadvertent conversion from a negative int.
1139 return BAD_VALUE;
1140 }
1141
1142 //printf("Finish write of %d\n", len);
1143 mDataPos += len;
1144 ALOGV("finishWrite Setting data pos of %p to %zu", this, mDataPos);
1145 if (mDataPos > mDataSize) {
1146 mDataSize = mDataPos;
1147 ALOGV("finishWrite Setting data size of %p to %zu", this, mDataSize);
1148 }
1149 //printf("New pos=%d, size=%d\n", mDataPos, mDataSize);
1150 return NO_ERROR;
1151 }
1152
writeUnpadded(const void * data,size_t len)1153 status_t Parcel::writeUnpadded(const void* data, size_t len)
1154 {
1155 if (len > INT32_MAX) {
1156 // don't accept size_t values which may have come from an
1157 // inadvertent conversion from a negative int.
1158 return BAD_VALUE;
1159 }
1160
1161 size_t end = mDataPos + len;
1162 if (end < mDataPos) {
1163 // integer overflow
1164 return BAD_VALUE;
1165 }
1166
1167 if (end <= mDataCapacity) {
1168 restart_write:
1169 memcpy(mData+mDataPos, data, len);
1170 return finishWrite(len);
1171 }
1172
1173 status_t err = growData(len);
1174 if (err == NO_ERROR) goto restart_write;
1175 return err;
1176 }
1177
write(const void * data,size_t len)1178 status_t Parcel::write(const void* data, size_t len)
1179 {
1180 if (len > INT32_MAX) {
1181 // don't accept size_t values which may have come from an
1182 // inadvertent conversion from a negative int.
1183 return BAD_VALUE;
1184 }
1185
1186 void* const d = writeInplace(len);
1187 if (d) {
1188 memcpy(d, data, len);
1189 return NO_ERROR;
1190 }
1191 return mError;
1192 }
1193
writeInplace(size_t len)1194 void* Parcel::writeInplace(size_t len)
1195 {
1196 if (len > INT32_MAX) {
1197 // don't accept size_t values which may have come from an
1198 // inadvertent conversion from a negative int.
1199 return nullptr;
1200 }
1201
1202 const size_t padded = pad_size(len);
1203
1204 // check for integer overflow
1205 if (mDataPos+padded < mDataPos) {
1206 return nullptr;
1207 }
1208
1209 if ((mDataPos+padded) <= mDataCapacity) {
1210 restart_write:
1211 //printf("Writing %ld bytes, padded to %ld\n", len, padded);
1212 uint8_t* const data = mData+mDataPos;
1213
1214 // Need to pad at end?
1215 if (padded != len) {
1216 #if BYTE_ORDER == BIG_ENDIAN
1217 static const uint32_t mask[4] = {
1218 0x00000000, 0xffffff00, 0xffff0000, 0xff000000
1219 };
1220 #endif
1221 #if BYTE_ORDER == LITTLE_ENDIAN
1222 static const uint32_t mask[4] = {
1223 0x00000000, 0x00ffffff, 0x0000ffff, 0x000000ff
1224 };
1225 #endif
1226 //printf("Applying pad mask: %p to %p\n", (void*)mask[padded-len],
1227 // *reinterpret_cast<void**>(data+padded-4));
1228 *reinterpret_cast<uint32_t*>(data+padded-4) &= mask[padded-len];
1229 }
1230
1231 finishWrite(padded);
1232 return data;
1233 }
1234
1235 status_t err = growData(padded);
1236 if (err == NO_ERROR) goto restart_write;
1237 return nullptr;
1238 }
1239
writeUtf8AsUtf16(const std::string & str)1240 status_t Parcel::writeUtf8AsUtf16(const std::string& str) {
1241 const uint8_t* strData = (uint8_t*)str.data();
1242 const size_t strLen= str.length();
1243 const ssize_t utf16Len = utf8_to_utf16_length(strData, strLen);
1244 if (utf16Len < 0 || utf16Len > std::numeric_limits<int32_t>::max()) {
1245 return BAD_VALUE;
1246 }
1247
1248 status_t err = writeInt32(utf16Len);
1249 if (err) {
1250 return err;
1251 }
1252
1253 // Allocate enough bytes to hold our converted string and its terminating NULL.
1254 void* dst = writeInplace((utf16Len + 1) * sizeof(char16_t));
1255 if (!dst) {
1256 return NO_MEMORY;
1257 }
1258
1259 utf8_to_utf16(strData, strLen, (char16_t*)dst, (size_t) utf16Len + 1);
1260
1261 return NO_ERROR;
1262 }
1263
1264
writeUtf8AsUtf16(const std::optional<std::string> & str)1265 status_t Parcel::writeUtf8AsUtf16(const std::optional<std::string>& str) { return writeData(str); }
writeUtf8AsUtf16(const std::unique_ptr<std::string> & str)1266 status_t Parcel::writeUtf8AsUtf16(const std::unique_ptr<std::string>& str) { return writeData(str); }
1267
writeString16(const std::optional<String16> & str)1268 status_t Parcel::writeString16(const std::optional<String16>& str) { return writeData(str); }
writeString16(const std::unique_ptr<String16> & str)1269 status_t Parcel::writeString16(const std::unique_ptr<String16>& str) { return writeData(str); }
1270
writeByteVector(const std::vector<int8_t> & val)1271 status_t Parcel::writeByteVector(const std::vector<int8_t>& val) { return writeData(val); }
writeByteVector(const std::optional<std::vector<int8_t>> & val)1272 status_t Parcel::writeByteVector(const std::optional<std::vector<int8_t>>& val) { return writeData(val); }
writeByteVector(const std::unique_ptr<std::vector<int8_t>> & val)1273 status_t Parcel::writeByteVector(const std::unique_ptr<std::vector<int8_t>>& val) { return writeData(val); }
writeByteVector(const std::vector<uint8_t> & val)1274 status_t Parcel::writeByteVector(const std::vector<uint8_t>& val) { return writeData(val); }
writeByteVector(const std::optional<std::vector<uint8_t>> & val)1275 status_t Parcel::writeByteVector(const std::optional<std::vector<uint8_t>>& val) { return writeData(val); }
writeByteVector(const std::unique_ptr<std::vector<uint8_t>> & val)1276 status_t Parcel::writeByteVector(const std::unique_ptr<std::vector<uint8_t>>& val){ return writeData(val); }
writeInt32Vector(const std::vector<int32_t> & val)1277 status_t Parcel::writeInt32Vector(const std::vector<int32_t>& val) { return writeData(val); }
writeInt32Vector(const std::optional<std::vector<int32_t>> & val)1278 status_t Parcel::writeInt32Vector(const std::optional<std::vector<int32_t>>& val) { return writeData(val); }
writeInt32Vector(const std::unique_ptr<std::vector<int32_t>> & val)1279 status_t Parcel::writeInt32Vector(const std::unique_ptr<std::vector<int32_t>>& val) { return writeData(val); }
writeInt64Vector(const std::vector<int64_t> & val)1280 status_t Parcel::writeInt64Vector(const std::vector<int64_t>& val) { return writeData(val); }
writeInt64Vector(const std::optional<std::vector<int64_t>> & val)1281 status_t Parcel::writeInt64Vector(const std::optional<std::vector<int64_t>>& val) { return writeData(val); }
writeInt64Vector(const std::unique_ptr<std::vector<int64_t>> & val)1282 status_t Parcel::writeInt64Vector(const std::unique_ptr<std::vector<int64_t>>& val) { return writeData(val); }
writeUint64Vector(const std::vector<uint64_t> & val)1283 status_t Parcel::writeUint64Vector(const std::vector<uint64_t>& val) { return writeData(val); }
writeUint64Vector(const std::optional<std::vector<uint64_t>> & val)1284 status_t Parcel::writeUint64Vector(const std::optional<std::vector<uint64_t>>& val) { return writeData(val); }
writeUint64Vector(const std::unique_ptr<std::vector<uint64_t>> & val)1285 status_t Parcel::writeUint64Vector(const std::unique_ptr<std::vector<uint64_t>>& val) { return writeData(val); }
writeFloatVector(const std::vector<float> & val)1286 status_t Parcel::writeFloatVector(const std::vector<float>& val) { return writeData(val); }
writeFloatVector(const std::optional<std::vector<float>> & val)1287 status_t Parcel::writeFloatVector(const std::optional<std::vector<float>>& val) { return writeData(val); }
writeFloatVector(const std::unique_ptr<std::vector<float>> & val)1288 status_t Parcel::writeFloatVector(const std::unique_ptr<std::vector<float>>& val) { return writeData(val); }
writeDoubleVector(const std::vector<double> & val)1289 status_t Parcel::writeDoubleVector(const std::vector<double>& val) { return writeData(val); }
writeDoubleVector(const std::optional<std::vector<double>> & val)1290 status_t Parcel::writeDoubleVector(const std::optional<std::vector<double>>& val) { return writeData(val); }
writeDoubleVector(const std::unique_ptr<std::vector<double>> & val)1291 status_t Parcel::writeDoubleVector(const std::unique_ptr<std::vector<double>>& val) { return writeData(val); }
writeBoolVector(const std::vector<bool> & val)1292 status_t Parcel::writeBoolVector(const std::vector<bool>& val) { return writeData(val); }
writeBoolVector(const std::optional<std::vector<bool>> & val)1293 status_t Parcel::writeBoolVector(const std::optional<std::vector<bool>>& val) { return writeData(val); }
writeBoolVector(const std::unique_ptr<std::vector<bool>> & val)1294 status_t Parcel::writeBoolVector(const std::unique_ptr<std::vector<bool>>& val) { return writeData(val); }
writeCharVector(const std::vector<char16_t> & val)1295 status_t Parcel::writeCharVector(const std::vector<char16_t>& val) { return writeData(val); }
writeCharVector(const std::optional<std::vector<char16_t>> & val)1296 status_t Parcel::writeCharVector(const std::optional<std::vector<char16_t>>& val) { return writeData(val); }
writeCharVector(const std::unique_ptr<std::vector<char16_t>> & val)1297 status_t Parcel::writeCharVector(const std::unique_ptr<std::vector<char16_t>>& val) { return writeData(val); }
1298
writeString16Vector(const std::vector<String16> & val)1299 status_t Parcel::writeString16Vector(const std::vector<String16>& val) { return writeData(val); }
writeString16Vector(const std::optional<std::vector<std::optional<String16>>> & val)1300 status_t Parcel::writeString16Vector(
1301 const std::optional<std::vector<std::optional<String16>>>& val) { return writeData(val); }
writeString16Vector(const std::unique_ptr<std::vector<std::unique_ptr<String16>>> & val)1302 status_t Parcel::writeString16Vector(
1303 const std::unique_ptr<std::vector<std::unique_ptr<String16>>>& val) { return writeData(val); }
writeUtf8VectorAsUtf16Vector(const std::optional<std::vector<std::optional<std::string>>> & val)1304 status_t Parcel::writeUtf8VectorAsUtf16Vector(
1305 const std::optional<std::vector<std::optional<std::string>>>& val) { return writeData(val); }
writeUtf8VectorAsUtf16Vector(const std::unique_ptr<std::vector<std::unique_ptr<std::string>>> & val)1306 status_t Parcel::writeUtf8VectorAsUtf16Vector(
1307 const std::unique_ptr<std::vector<std::unique_ptr<std::string>>>& val) { return writeData(val); }
writeUtf8VectorAsUtf16Vector(const std::vector<std::string> & val)1308 status_t Parcel::writeUtf8VectorAsUtf16Vector(const std::vector<std::string>& val) { return writeData(val); }
1309
writeUniqueFileDescriptorVector(const std::vector<unique_fd> & val)1310 status_t Parcel::writeUniqueFileDescriptorVector(const std::vector<unique_fd>& val) {
1311 return writeData(val);
1312 }
writeUniqueFileDescriptorVector(const std::optional<std::vector<unique_fd>> & val)1313 status_t Parcel::writeUniqueFileDescriptorVector(const std::optional<std::vector<unique_fd>>& val) {
1314 return writeData(val);
1315 }
writeUniqueFileDescriptorVector(const std::unique_ptr<std::vector<unique_fd>> & val)1316 status_t Parcel::writeUniqueFileDescriptorVector(
1317 const std::unique_ptr<std::vector<unique_fd>>& val) {
1318 return writeData(val);
1319 }
1320
writeStrongBinderVector(const std::vector<sp<IBinder>> & val)1321 status_t Parcel::writeStrongBinderVector(const std::vector<sp<IBinder>>& val) { return writeData(val); }
writeStrongBinderVector(const std::optional<std::vector<sp<IBinder>>> & val)1322 status_t Parcel::writeStrongBinderVector(const std::optional<std::vector<sp<IBinder>>>& val) { return writeData(val); }
writeStrongBinderVector(const std::unique_ptr<std::vector<sp<IBinder>>> & val)1323 status_t Parcel::writeStrongBinderVector(const std::unique_ptr<std::vector<sp<IBinder>>>& val) { return writeData(val); }
1324
writeParcelable(const Parcelable & parcelable)1325 status_t Parcel::writeParcelable(const Parcelable& parcelable) { return writeData(parcelable); }
1326
readUtf8FromUtf16(std::optional<std::string> * str) const1327 status_t Parcel::readUtf8FromUtf16(std::optional<std::string>* str) const { return readData(str); }
readUtf8FromUtf16(std::unique_ptr<std::string> * str) const1328 status_t Parcel::readUtf8FromUtf16(std::unique_ptr<std::string>* str) const { return readData(str); }
1329
readString16(std::optional<String16> * pArg) const1330 status_t Parcel::readString16(std::optional<String16>* pArg) const { return readData(pArg); }
readString16(std::unique_ptr<String16> * pArg) const1331 status_t Parcel::readString16(std::unique_ptr<String16>* pArg) const { return readData(pArg); }
1332
readByteVector(std::vector<int8_t> * val) const1333 status_t Parcel::readByteVector(std::vector<int8_t>* val) const { return readData(val); }
readByteVector(std::vector<uint8_t> * val) const1334 status_t Parcel::readByteVector(std::vector<uint8_t>* val) const { return readData(val); }
readByteVector(std::optional<std::vector<int8_t>> * val) const1335 status_t Parcel::readByteVector(std::optional<std::vector<int8_t>>* val) const { return readData(val); }
readByteVector(std::unique_ptr<std::vector<int8_t>> * val) const1336 status_t Parcel::readByteVector(std::unique_ptr<std::vector<int8_t>>* val) const { return readData(val); }
readByteVector(std::optional<std::vector<uint8_t>> * val) const1337 status_t Parcel::readByteVector(std::optional<std::vector<uint8_t>>* val) const { return readData(val); }
readByteVector(std::unique_ptr<std::vector<uint8_t>> * val) const1338 status_t Parcel::readByteVector(std::unique_ptr<std::vector<uint8_t>>* val) const { return readData(val); }
readInt32Vector(std::optional<std::vector<int32_t>> * val) const1339 status_t Parcel::readInt32Vector(std::optional<std::vector<int32_t>>* val) const { return readData(val); }
readInt32Vector(std::unique_ptr<std::vector<int32_t>> * val) const1340 status_t Parcel::readInt32Vector(std::unique_ptr<std::vector<int32_t>>* val) const { return readData(val); }
readInt32Vector(std::vector<int32_t> * val) const1341 status_t Parcel::readInt32Vector(std::vector<int32_t>* val) const { return readData(val); }
readInt64Vector(std::optional<std::vector<int64_t>> * val) const1342 status_t Parcel::readInt64Vector(std::optional<std::vector<int64_t>>* val) const { return readData(val); }
readInt64Vector(std::unique_ptr<std::vector<int64_t>> * val) const1343 status_t Parcel::readInt64Vector(std::unique_ptr<std::vector<int64_t>>* val) const { return readData(val); }
readInt64Vector(std::vector<int64_t> * val) const1344 status_t Parcel::readInt64Vector(std::vector<int64_t>* val) const { return readData(val); }
readUint64Vector(std::optional<std::vector<uint64_t>> * val) const1345 status_t Parcel::readUint64Vector(std::optional<std::vector<uint64_t>>* val) const { return readData(val); }
readUint64Vector(std::unique_ptr<std::vector<uint64_t>> * val) const1346 status_t Parcel::readUint64Vector(std::unique_ptr<std::vector<uint64_t>>* val) const { return readData(val); }
readUint64Vector(std::vector<uint64_t> * val) const1347 status_t Parcel::readUint64Vector(std::vector<uint64_t>* val) const { return readData(val); }
readFloatVector(std::optional<std::vector<float>> * val) const1348 status_t Parcel::readFloatVector(std::optional<std::vector<float>>* val) const { return readData(val); }
readFloatVector(std::unique_ptr<std::vector<float>> * val) const1349 status_t Parcel::readFloatVector(std::unique_ptr<std::vector<float>>* val) const { return readData(val); }
readFloatVector(std::vector<float> * val) const1350 status_t Parcel::readFloatVector(std::vector<float>* val) const { return readData(val); }
readDoubleVector(std::optional<std::vector<double>> * val) const1351 status_t Parcel::readDoubleVector(std::optional<std::vector<double>>* val) const { return readData(val); }
readDoubleVector(std::unique_ptr<std::vector<double>> * val) const1352 status_t Parcel::readDoubleVector(std::unique_ptr<std::vector<double>>* val) const { return readData(val); }
readDoubleVector(std::vector<double> * val) const1353 status_t Parcel::readDoubleVector(std::vector<double>* val) const { return readData(val); }
readBoolVector(std::optional<std::vector<bool>> * val) const1354 status_t Parcel::readBoolVector(std::optional<std::vector<bool>>* val) const { return readData(val); }
readBoolVector(std::unique_ptr<std::vector<bool>> * val) const1355 status_t Parcel::readBoolVector(std::unique_ptr<std::vector<bool>>* val) const { return readData(val); }
readBoolVector(std::vector<bool> * val) const1356 status_t Parcel::readBoolVector(std::vector<bool>* val) const { return readData(val); }
readCharVector(std::optional<std::vector<char16_t>> * val) const1357 status_t Parcel::readCharVector(std::optional<std::vector<char16_t>>* val) const { return readData(val); }
readCharVector(std::unique_ptr<std::vector<char16_t>> * val) const1358 status_t Parcel::readCharVector(std::unique_ptr<std::vector<char16_t>>* val) const { return readData(val); }
readCharVector(std::vector<char16_t> * val) const1359 status_t Parcel::readCharVector(std::vector<char16_t>* val) const { return readData(val); }
1360
readString16Vector(std::optional<std::vector<std::optional<String16>>> * val) const1361 status_t Parcel::readString16Vector(
1362 std::optional<std::vector<std::optional<String16>>>* val) const { return readData(val); }
readString16Vector(std::unique_ptr<std::vector<std::unique_ptr<String16>>> * val) const1363 status_t Parcel::readString16Vector(
1364 std::unique_ptr<std::vector<std::unique_ptr<String16>>>* val) const { return readData(val); }
readString16Vector(std::vector<String16> * val) const1365 status_t Parcel::readString16Vector(std::vector<String16>* val) const { return readData(val); }
readUtf8VectorFromUtf16Vector(std::optional<std::vector<std::optional<std::string>>> * val) const1366 status_t Parcel::readUtf8VectorFromUtf16Vector(
1367 std::optional<std::vector<std::optional<std::string>>>* val) const { return readData(val); }
readUtf8VectorFromUtf16Vector(std::unique_ptr<std::vector<std::unique_ptr<std::string>>> * val) const1368 status_t Parcel::readUtf8VectorFromUtf16Vector(
1369 std::unique_ptr<std::vector<std::unique_ptr<std::string>>>* val) const { return readData(val); }
readUtf8VectorFromUtf16Vector(std::vector<std::string> * val) const1370 status_t Parcel::readUtf8VectorFromUtf16Vector(std::vector<std::string>* val) const { return readData(val); }
1371
readUniqueFileDescriptorVector(std::optional<std::vector<unique_fd>> * val) const1372 status_t Parcel::readUniqueFileDescriptorVector(std::optional<std::vector<unique_fd>>* val) const {
1373 return readData(val);
1374 }
readUniqueFileDescriptorVector(std::unique_ptr<std::vector<unique_fd>> * val) const1375 status_t Parcel::readUniqueFileDescriptorVector(
1376 std::unique_ptr<std::vector<unique_fd>>* val) const {
1377 return readData(val);
1378 }
readUniqueFileDescriptorVector(std::vector<unique_fd> * val) const1379 status_t Parcel::readUniqueFileDescriptorVector(std::vector<unique_fd>* val) const {
1380 return readData(val);
1381 }
1382
readStrongBinderVector(std::optional<std::vector<sp<IBinder>>> * val) const1383 status_t Parcel::readStrongBinderVector(std::optional<std::vector<sp<IBinder>>>* val) const { return readData(val); }
readStrongBinderVector(std::unique_ptr<std::vector<sp<IBinder>>> * val) const1384 status_t Parcel::readStrongBinderVector(std::unique_ptr<std::vector<sp<IBinder>>>* val) const { return readData(val); }
readStrongBinderVector(std::vector<sp<IBinder>> * val) const1385 status_t Parcel::readStrongBinderVector(std::vector<sp<IBinder>>* val) const { return readData(val); }
1386
readParcelable(Parcelable * parcelable) const1387 status_t Parcel::readParcelable(Parcelable* parcelable) const { return readData(parcelable); }
1388
writeInt32(int32_t val)1389 status_t Parcel::writeInt32(int32_t val)
1390 {
1391 return writeAligned(val);
1392 }
1393
writeUint32(uint32_t val)1394 status_t Parcel::writeUint32(uint32_t val)
1395 {
1396 return writeAligned(val);
1397 }
1398
writeInt32Array(size_t len,const int32_t * val)1399 status_t Parcel::writeInt32Array(size_t len, const int32_t *val) {
1400 if (len > INT32_MAX) {
1401 // don't accept size_t values which may have come from an
1402 // inadvertent conversion from a negative int.
1403 return BAD_VALUE;
1404 }
1405
1406 if (!val) {
1407 return writeInt32(-1);
1408 }
1409 status_t ret = writeInt32(static_cast<uint32_t>(len));
1410 if (ret == NO_ERROR) {
1411 ret = write(val, len * sizeof(*val));
1412 }
1413 return ret;
1414 }
writeByteArray(size_t len,const uint8_t * val)1415 status_t Parcel::writeByteArray(size_t len, const uint8_t *val) {
1416 if (len > INT32_MAX) {
1417 // don't accept size_t values which may have come from an
1418 // inadvertent conversion from a negative int.
1419 return BAD_VALUE;
1420 }
1421
1422 if (!val) {
1423 return writeInt32(-1);
1424 }
1425 status_t ret = writeInt32(static_cast<uint32_t>(len));
1426 if (ret == NO_ERROR) {
1427 ret = write(val, len * sizeof(*val));
1428 }
1429 return ret;
1430 }
1431
writeBool(bool val)1432 status_t Parcel::writeBool(bool val)
1433 {
1434 return writeInt32(int32_t(val));
1435 }
1436
writeChar(char16_t val)1437 status_t Parcel::writeChar(char16_t val)
1438 {
1439 return writeInt32(int32_t(val));
1440 }
1441
writeByte(int8_t val)1442 status_t Parcel::writeByte(int8_t val)
1443 {
1444 return writeInt32(int32_t(val));
1445 }
1446
writeInt64(int64_t val)1447 status_t Parcel::writeInt64(int64_t val)
1448 {
1449 return writeAligned(val);
1450 }
1451
writeUint64(uint64_t val)1452 status_t Parcel::writeUint64(uint64_t val)
1453 {
1454 return writeAligned(val);
1455 }
1456
writePointer(uintptr_t val)1457 status_t Parcel::writePointer(uintptr_t val)
1458 {
1459 return writeAligned<binder_uintptr_t>(val);
1460 }
1461
writeFloat(float val)1462 status_t Parcel::writeFloat(float val)
1463 {
1464 return writeAligned(val);
1465 }
1466
1467 #if defined(__mips__) && defined(__mips_hard_float)
1468
writeDouble(double val)1469 status_t Parcel::writeDouble(double val)
1470 {
1471 union {
1472 double d;
1473 unsigned long long ll;
1474 } u;
1475 u.d = val;
1476 return writeAligned(u.ll);
1477 }
1478
1479 #else
1480
writeDouble(double val)1481 status_t Parcel::writeDouble(double val)
1482 {
1483 return writeAligned(val);
1484 }
1485
1486 #endif
1487
writeCString(const char * str)1488 status_t Parcel::writeCString(const char* str)
1489 {
1490 return write(str, strlen(str)+1);
1491 }
1492
writeString8(const String8 & str)1493 status_t Parcel::writeString8(const String8& str)
1494 {
1495 return writeString8(str.c_str(), str.size());
1496 }
1497
writeString8(const char * str,size_t len)1498 status_t Parcel::writeString8(const char* str, size_t len)
1499 {
1500 if (str == nullptr) return writeInt32(-1);
1501
1502 // NOTE: Keep this logic in sync with android_os_Parcel.cpp
1503 status_t err = writeInt32(len);
1504 if (err == NO_ERROR) {
1505 uint8_t* data = (uint8_t*)writeInplace(len+sizeof(char));
1506 if (data) {
1507 memcpy(data, str, len);
1508 *reinterpret_cast<char*>(data+len) = 0;
1509 return NO_ERROR;
1510 }
1511 err = mError;
1512 }
1513 return err;
1514 }
1515
writeString16(const String16 & str)1516 status_t Parcel::writeString16(const String16& str)
1517 {
1518 return writeString16(str.c_str(), str.size());
1519 }
1520
writeString16(const char16_t * str,size_t len)1521 status_t Parcel::writeString16(const char16_t* str, size_t len)
1522 {
1523 if (str == nullptr) return writeInt32(-1);
1524
1525 // NOTE: Keep this logic in sync with android_os_Parcel.cpp
1526 status_t err = writeInt32(len);
1527 if (err == NO_ERROR) {
1528 len *= sizeof(char16_t);
1529 uint8_t* data = (uint8_t*)writeInplace(len+sizeof(char16_t));
1530 if (data) {
1531 memcpy(data, str, len);
1532 *reinterpret_cast<char16_t*>(data+len) = 0;
1533 return NO_ERROR;
1534 }
1535 err = mError;
1536 }
1537 return err;
1538 }
1539
writeStrongBinder(const sp<IBinder> & val)1540 status_t Parcel::writeStrongBinder(const sp<IBinder>& val)
1541 {
1542 return flattenBinder(val);
1543 }
1544
1545
writeRawNullableParcelable(const Parcelable * parcelable)1546 status_t Parcel::writeRawNullableParcelable(const Parcelable* parcelable) {
1547 if (!parcelable) {
1548 return writeInt32(0);
1549 }
1550
1551 return writeParcelable(*parcelable);
1552 }
1553
1554 #ifndef BINDER_DISABLE_NATIVE_HANDLE
writeNativeHandle(const native_handle * handle)1555 status_t Parcel::writeNativeHandle(const native_handle* handle)
1556 {
1557 if (!handle || handle->version != sizeof(native_handle))
1558 return BAD_TYPE;
1559
1560 status_t err;
1561 err = writeInt32(handle->numFds);
1562 if (err != NO_ERROR) return err;
1563
1564 err = writeInt32(handle->numInts);
1565 if (err != NO_ERROR) return err;
1566
1567 for (int i=0 ; err==NO_ERROR && i<handle->numFds ; i++)
1568 err = writeDupFileDescriptor(handle->data[i]);
1569
1570 if (err != NO_ERROR) {
1571 ALOGD("write native handle, write dup fd failed");
1572 return err;
1573 }
1574 err = write(handle->data + handle->numFds, sizeof(int)*handle->numInts);
1575 return err;
1576 }
1577 #endif
1578
writeFileDescriptor(int fd,bool takeOwnership)1579 status_t Parcel::writeFileDescriptor(int fd, bool takeOwnership) {
1580 if (auto* rpcFields = maybeRpcFields()) {
1581 std::variant<unique_fd, borrowed_fd> fdVariant;
1582 if (takeOwnership) {
1583 fdVariant = unique_fd(fd);
1584 } else {
1585 fdVariant = borrowed_fd(fd);
1586 }
1587 if (!mAllowFds) {
1588 return FDS_NOT_ALLOWED;
1589 }
1590 switch (rpcFields->mSession->getFileDescriptorTransportMode()) {
1591 case RpcSession::FileDescriptorTransportMode::NONE: {
1592 return FDS_NOT_ALLOWED;
1593 }
1594 case RpcSession::FileDescriptorTransportMode::UNIX:
1595 case RpcSession::FileDescriptorTransportMode::TRUSTY: {
1596 if (rpcFields->mFds == nullptr) {
1597 rpcFields->mFds = std::make_unique<decltype(rpcFields->mFds)::element_type>();
1598 }
1599 size_t dataPos = mDataPos;
1600 if (dataPos > UINT32_MAX) {
1601 return NO_MEMORY;
1602 }
1603 if (status_t err = writeInt32(RpcFields::TYPE_NATIVE_FILE_DESCRIPTOR); err != OK) {
1604 return err;
1605 }
1606 if (status_t err = writeInt32(rpcFields->mFds->size()); err != OK) {
1607 return err;
1608 }
1609 rpcFields->mObjectPositions.push_back(dataPos);
1610 rpcFields->mFds->push_back(std::move(fdVariant));
1611 return OK;
1612 }
1613 }
1614 }
1615
1616 #ifdef BINDER_WITH_KERNEL_IPC
1617 flat_binder_object obj;
1618 obj.hdr.type = BINDER_TYPE_FD;
1619 obj.flags = 0;
1620 obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */
1621 obj.handle = fd;
1622 obj.cookie = takeOwnership ? 1 : 0;
1623 return writeObject(obj, true);
1624 #else // BINDER_WITH_KERNEL_IPC
1625 LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
1626 (void)fd;
1627 (void)takeOwnership;
1628 return INVALID_OPERATION;
1629 #endif // BINDER_WITH_KERNEL_IPC
1630 }
1631
writeDupFileDescriptor(int fd)1632 status_t Parcel::writeDupFileDescriptor(int fd)
1633 {
1634 int dupFd;
1635 if (status_t err = binder::os::dupFileDescriptor(fd, &dupFd); err != OK) {
1636 return err;
1637 }
1638 status_t err = writeFileDescriptor(dupFd, true /*takeOwnership*/);
1639 if (err != OK) {
1640 close(dupFd);
1641 }
1642 return err;
1643 }
1644
writeParcelFileDescriptor(int fd,bool takeOwnership)1645 status_t Parcel::writeParcelFileDescriptor(int fd, bool takeOwnership)
1646 {
1647 writeInt32(0);
1648 return writeFileDescriptor(fd, takeOwnership);
1649 }
1650
writeDupParcelFileDescriptor(int fd)1651 status_t Parcel::writeDupParcelFileDescriptor(int fd)
1652 {
1653 int dupFd;
1654 if (status_t err = binder::os::dupFileDescriptor(fd, &dupFd); err != OK) {
1655 return err;
1656 }
1657 status_t err = writeParcelFileDescriptor(dupFd, true /*takeOwnership*/);
1658 if (err != OK) {
1659 close(dupFd);
1660 }
1661 return err;
1662 }
1663
writeUniqueFileDescriptor(const unique_fd & fd)1664 status_t Parcel::writeUniqueFileDescriptor(const unique_fd& fd) {
1665 return writeDupFileDescriptor(fd.get());
1666 }
1667
writeBlob(size_t len,bool mutableCopy,WritableBlob * outBlob)1668 status_t Parcel::writeBlob(size_t len, bool mutableCopy, WritableBlob* outBlob)
1669 {
1670 #ifdef BINDER_DISABLE_BLOB
1671 (void)len;
1672 (void)mutableCopy;
1673 (void)outBlob;
1674 return INVALID_OPERATION;
1675 #else
1676 if (len > INT32_MAX) {
1677 // don't accept size_t values which may have come from an
1678 // inadvertent conversion from a negative int.
1679 return BAD_VALUE;
1680 }
1681
1682 status_t status;
1683 if (!mAllowFds || len <= BLOB_INPLACE_LIMIT) {
1684 ALOGV("writeBlob: write in place");
1685 status = writeInt32(BLOB_INPLACE);
1686 if (status) return status;
1687
1688 void* ptr = writeInplace(len);
1689 if (!ptr) return NO_MEMORY;
1690
1691 outBlob->init(-1, ptr, len, false);
1692 return NO_ERROR;
1693 }
1694
1695 ALOGV("writeBlob: write to ashmem");
1696 int fd = ashmem_create_region("Parcel Blob", len);
1697 if (fd < 0) return NO_MEMORY;
1698
1699 int result = ashmem_set_prot_region(fd, PROT_READ | PROT_WRITE);
1700 if (result < 0) {
1701 status = result;
1702 } else {
1703 void* ptr = ::mmap(nullptr, len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
1704 if (ptr == MAP_FAILED) {
1705 status = -errno;
1706 } else {
1707 if (!mutableCopy) {
1708 result = ashmem_set_prot_region(fd, PROT_READ);
1709 }
1710 if (result < 0) {
1711 status = result;
1712 } else {
1713 status = writeInt32(mutableCopy ? BLOB_ASHMEM_MUTABLE : BLOB_ASHMEM_IMMUTABLE);
1714 if (!status) {
1715 status = writeFileDescriptor(fd, true /*takeOwnership*/);
1716 if (!status) {
1717 outBlob->init(fd, ptr, len, mutableCopy);
1718 return NO_ERROR;
1719 }
1720 }
1721 }
1722 }
1723 ::munmap(ptr, len);
1724 }
1725 ::close(fd);
1726 return status;
1727 #endif
1728 }
1729
writeDupImmutableBlobFileDescriptor(int fd)1730 status_t Parcel::writeDupImmutableBlobFileDescriptor(int fd)
1731 {
1732 // Must match up with what's done in writeBlob.
1733 if (!mAllowFds) return FDS_NOT_ALLOWED;
1734 status_t status = writeInt32(BLOB_ASHMEM_IMMUTABLE);
1735 if (status) return status;
1736 return writeDupFileDescriptor(fd);
1737 }
1738
write(const FlattenableHelperInterface & val)1739 status_t Parcel::write(const FlattenableHelperInterface& val)
1740 {
1741 status_t err;
1742
1743 // size if needed
1744 const size_t len = val.getFlattenedSize();
1745 const size_t fd_count = val.getFdCount();
1746
1747 if ((len > INT32_MAX) || (fd_count > kMaxFds)) {
1748 // don't accept size_t values which may have come from an
1749 // inadvertent conversion from a negative int.
1750 return BAD_VALUE;
1751 }
1752
1753 err = this->writeInt32(len);
1754 if (err) return err;
1755
1756 err = this->writeInt32(fd_count);
1757 if (err) return err;
1758
1759 // payload
1760 void* const buf = this->writeInplace(len);
1761 if (buf == nullptr)
1762 return BAD_VALUE;
1763
1764 int* fds = nullptr;
1765 if (fd_count) {
1766 fds = new (std::nothrow) int[fd_count];
1767 if (fds == nullptr) {
1768 ALOGE("write: failed to allocate requested %zu fds", fd_count);
1769 return BAD_VALUE;
1770 }
1771 }
1772
1773 err = val.flatten(buf, len, fds, fd_count);
1774 for (size_t i=0 ; i<fd_count && err==NO_ERROR ; i++) {
1775 err = this->writeDupFileDescriptor( fds[i] );
1776 }
1777
1778 if (fd_count) {
1779 delete [] fds;
1780 }
1781
1782 return err;
1783 }
1784
writeObject(const flat_binder_object & val,bool nullMetaData)1785 status_t Parcel::writeObject(const flat_binder_object& val, bool nullMetaData)
1786 {
1787 auto* kernelFields = maybeKernelFields();
1788 LOG_ALWAYS_FATAL_IF(kernelFields == nullptr, "Can't write flat_binder_object to RPC Parcel");
1789
1790 #ifdef BINDER_WITH_KERNEL_IPC
1791 const bool enoughData = (mDataPos+sizeof(val)) <= mDataCapacity;
1792 const bool enoughObjects = kernelFields->mObjectsSize < kernelFields->mObjectsCapacity;
1793 if (enoughData && enoughObjects) {
1794 restart_write:
1795 *reinterpret_cast<flat_binder_object*>(mData+mDataPos) = val;
1796
1797 // remember if it's a file descriptor
1798 if (val.hdr.type == BINDER_TYPE_FD) {
1799 if (!mAllowFds) {
1800 // fail before modifying our object index
1801 return FDS_NOT_ALLOWED;
1802 }
1803 kernelFields->mHasFds = kernelFields->mFdsKnown = true;
1804 }
1805
1806 // Need to write meta-data?
1807 if (nullMetaData || val.binder != 0) {
1808 kernelFields->mObjects[kernelFields->mObjectsSize] = mDataPos;
1809 acquire_object(ProcessState::self(), val, this);
1810 kernelFields->mObjectsSize++;
1811 }
1812
1813 return finishWrite(sizeof(flat_binder_object));
1814 }
1815
1816 if (!enoughData) {
1817 const status_t err = growData(sizeof(val));
1818 if (err != NO_ERROR) return err;
1819 }
1820 if (!enoughObjects) {
1821 if (kernelFields->mObjectsSize > SIZE_MAX - 2) return NO_MEMORY; // overflow
1822 if ((kernelFields->mObjectsSize + 2) > SIZE_MAX / 3) return NO_MEMORY; // overflow
1823 size_t newSize = ((kernelFields->mObjectsSize + 2) * 3) / 2;
1824 if (newSize > SIZE_MAX / sizeof(binder_size_t)) return NO_MEMORY; // overflow
1825 binder_size_t* objects =
1826 (binder_size_t*)realloc(kernelFields->mObjects, newSize * sizeof(binder_size_t));
1827 if (objects == nullptr) return NO_MEMORY;
1828 kernelFields->mObjects = objects;
1829 kernelFields->mObjectsCapacity = newSize;
1830 }
1831
1832 goto restart_write;
1833 #else // BINDER_WITH_KERNEL_IPC
1834 LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
1835 (void)val;
1836 (void)nullMetaData;
1837 return INVALID_OPERATION;
1838 #endif // BINDER_WITH_KERNEL_IPC
1839 }
1840
writeNoException()1841 status_t Parcel::writeNoException()
1842 {
1843 binder::Status status;
1844 return status.writeToParcel(this);
1845 }
1846
validateReadData(size_t upperBound) const1847 status_t Parcel::validateReadData(size_t upperBound) const
1848 {
1849 const auto* kernelFields = maybeKernelFields();
1850 if (kernelFields == nullptr) {
1851 // Can't validate RPC Parcel reads because the location of binder
1852 // objects is unknown.
1853 return OK;
1854 }
1855
1856 #ifdef BINDER_WITH_KERNEL_IPC
1857 // Don't allow non-object reads on object data
1858 if (kernelFields->mObjectsSorted || kernelFields->mObjectsSize <= 1) {
1859 data_sorted:
1860 // Expect to check only against the next object
1861 if (kernelFields->mNextObjectHint < kernelFields->mObjectsSize &&
1862 upperBound > kernelFields->mObjects[kernelFields->mNextObjectHint]) {
1863 // For some reason the current read position is greater than the next object
1864 // hint. Iterate until we find the right object
1865 size_t nextObject = kernelFields->mNextObjectHint;
1866 do {
1867 if (mDataPos < kernelFields->mObjects[nextObject] + sizeof(flat_binder_object)) {
1868 // Requested info overlaps with an object
1869 if (!mServiceFuzzing) {
1870 ALOGE("Attempt to read from protected data in Parcel %p", this);
1871 }
1872 return PERMISSION_DENIED;
1873 }
1874 nextObject++;
1875 } while (nextObject < kernelFields->mObjectsSize &&
1876 upperBound > kernelFields->mObjects[nextObject]);
1877 kernelFields->mNextObjectHint = nextObject;
1878 }
1879 return NO_ERROR;
1880 }
1881 // Quickly determine if mObjects is sorted.
1882 binder_size_t* currObj = kernelFields->mObjects + kernelFields->mObjectsSize - 1;
1883 binder_size_t* prevObj = currObj;
1884 while (currObj > kernelFields->mObjects) {
1885 prevObj--;
1886 if(*prevObj > *currObj) {
1887 goto data_unsorted;
1888 }
1889 currObj--;
1890 }
1891 kernelFields->mObjectsSorted = true;
1892 goto data_sorted;
1893
1894 data_unsorted:
1895 // Insertion Sort mObjects
1896 // Great for mostly sorted lists. If randomly sorted or reverse ordered mObjects become common,
1897 // switch to std::sort(mObjects, mObjects + mObjectsSize);
1898 for (binder_size_t* iter0 = kernelFields->mObjects + 1;
1899 iter0 < kernelFields->mObjects + kernelFields->mObjectsSize; iter0++) {
1900 binder_size_t temp = *iter0;
1901 binder_size_t* iter1 = iter0 - 1;
1902 while (iter1 >= kernelFields->mObjects && *iter1 > temp) {
1903 *(iter1 + 1) = *iter1;
1904 iter1--;
1905 }
1906 *(iter1 + 1) = temp;
1907 }
1908 kernelFields->mNextObjectHint = 0;
1909 kernelFields->mObjectsSorted = true;
1910 goto data_sorted;
1911 #else // BINDER_WITH_KERNEL_IPC
1912 (void)upperBound;
1913 return NO_ERROR;
1914 #endif // BINDER_WITH_KERNEL_IPC
1915 }
1916
read(void * outData,size_t len) const1917 status_t Parcel::read(void* outData, size_t len) const
1918 {
1919 if (len > INT32_MAX) {
1920 // don't accept size_t values which may have come from an
1921 // inadvertent conversion from a negative int.
1922 return BAD_VALUE;
1923 }
1924
1925 if ((mDataPos+pad_size(len)) >= mDataPos && (mDataPos+pad_size(len)) <= mDataSize
1926 && len <= pad_size(len)) {
1927 const auto* kernelFields = maybeKernelFields();
1928 if (kernelFields != nullptr && kernelFields->mObjectsSize > 0) {
1929 status_t err = validateReadData(mDataPos + pad_size(len));
1930 if(err != NO_ERROR) {
1931 // Still increment the data position by the expected length
1932 mDataPos += pad_size(len);
1933 ALOGV("read Setting data pos of %p to %zu", this, mDataPos);
1934 return err;
1935 }
1936 }
1937 memcpy(outData, mData+mDataPos, len);
1938 mDataPos += pad_size(len);
1939 ALOGV("read Setting data pos of %p to %zu", this, mDataPos);
1940 return NO_ERROR;
1941 }
1942 return NOT_ENOUGH_DATA;
1943 }
1944
readInplace(size_t len) const1945 const void* Parcel::readInplace(size_t len) const
1946 {
1947 if (len > INT32_MAX) {
1948 // don't accept size_t values which may have come from an
1949 // inadvertent conversion from a negative int.
1950 return nullptr;
1951 }
1952
1953 if ((mDataPos+pad_size(len)) >= mDataPos && (mDataPos+pad_size(len)) <= mDataSize
1954 && len <= pad_size(len)) {
1955 const auto* kernelFields = maybeKernelFields();
1956 if (kernelFields != nullptr && kernelFields->mObjectsSize > 0) {
1957 status_t err = validateReadData(mDataPos + pad_size(len));
1958 if(err != NO_ERROR) {
1959 // Still increment the data position by the expected length
1960 mDataPos += pad_size(len);
1961 ALOGV("readInplace Setting data pos of %p to %zu", this, mDataPos);
1962 return nullptr;
1963 }
1964 }
1965
1966 const void* data = mData+mDataPos;
1967 mDataPos += pad_size(len);
1968 ALOGV("readInplace Setting data pos of %p to %zu", this, mDataPos);
1969 return data;
1970 }
1971 return nullptr;
1972 }
1973
readOutVectorSizeWithCheck(size_t elmSize,int32_t * size) const1974 status_t Parcel::readOutVectorSizeWithCheck(size_t elmSize, int32_t* size) const {
1975 if (status_t status = readInt32(size); status != OK) return status;
1976 if (*size < 0) return OK; // may be null, client to handle
1977
1978 LOG_ALWAYS_FATAL_IF(elmSize > INT32_MAX, "Cannot have element as big as %zu", elmSize);
1979
1980 // approximation, can't know max element size (e.g. if it makes heap
1981 // allocations)
1982 static_assert(sizeof(int) == sizeof(int32_t), "Android is LP64");
1983 int32_t allocationSize;
1984 if (__builtin_smul_overflow(elmSize, *size, &allocationSize)) return NO_MEMORY;
1985
1986 // High limit of 1MB since something this big could never be returned. Could
1987 // probably scope this down, but might impact very specific usecases.
1988 constexpr int32_t kMaxAllocationSize = 1 * 1000 * 1000;
1989
1990 if (allocationSize >= kMaxAllocationSize) {
1991 return NO_MEMORY;
1992 }
1993
1994 return OK;
1995 }
1996
1997 template<class T>
readAligned(T * pArg) const1998 status_t Parcel::readAligned(T *pArg) const {
1999 static_assert(PAD_SIZE_UNSAFE(sizeof(T)) == sizeof(T));
2000 static_assert(std::is_trivially_copyable_v<T>);
2001
2002 if ((mDataPos+sizeof(T)) <= mDataSize) {
2003 const auto* kernelFields = maybeKernelFields();
2004 if (kernelFields != nullptr && kernelFields->mObjectsSize > 0) {
2005 status_t err = validateReadData(mDataPos + sizeof(T));
2006 if(err != NO_ERROR) {
2007 // Still increment the data position by the expected length
2008 mDataPos += sizeof(T);
2009 return err;
2010 }
2011 }
2012
2013 memcpy(pArg, mData + mDataPos, sizeof(T));
2014 mDataPos += sizeof(T);
2015 return NO_ERROR;
2016 } else {
2017 return NOT_ENOUGH_DATA;
2018 }
2019 }
2020
2021 template<class T>
readAligned() const2022 T Parcel::readAligned() const {
2023 T result;
2024 if (readAligned(&result) != NO_ERROR) {
2025 result = 0;
2026 }
2027
2028 return result;
2029 }
2030
2031 template<class T>
writeAligned(T val)2032 status_t Parcel::writeAligned(T val) {
2033 static_assert(PAD_SIZE_UNSAFE(sizeof(T)) == sizeof(T));
2034 static_assert(std::is_trivially_copyable_v<T>);
2035
2036 if ((mDataPos+sizeof(val)) <= mDataCapacity) {
2037 restart_write:
2038 memcpy(mData + mDataPos, &val, sizeof(val));
2039 return finishWrite(sizeof(val));
2040 }
2041
2042 status_t err = growData(sizeof(val));
2043 if (err == NO_ERROR) goto restart_write;
2044 return err;
2045 }
2046
readInt32(int32_t * pArg) const2047 status_t Parcel::readInt32(int32_t *pArg) const
2048 {
2049 return readAligned(pArg);
2050 }
2051
readInt32() const2052 int32_t Parcel::readInt32() const
2053 {
2054 return readAligned<int32_t>();
2055 }
2056
readUint32(uint32_t * pArg) const2057 status_t Parcel::readUint32(uint32_t *pArg) const
2058 {
2059 return readAligned(pArg);
2060 }
2061
readUint32() const2062 uint32_t Parcel::readUint32() const
2063 {
2064 return readAligned<uint32_t>();
2065 }
2066
readInt64(int64_t * pArg) const2067 status_t Parcel::readInt64(int64_t *pArg) const
2068 {
2069 return readAligned(pArg);
2070 }
2071
2072
readInt64() const2073 int64_t Parcel::readInt64() const
2074 {
2075 return readAligned<int64_t>();
2076 }
2077
readUint64(uint64_t * pArg) const2078 status_t Parcel::readUint64(uint64_t *pArg) const
2079 {
2080 return readAligned(pArg);
2081 }
2082
readUint64() const2083 uint64_t Parcel::readUint64() const
2084 {
2085 return readAligned<uint64_t>();
2086 }
2087
readPointer(uintptr_t * pArg) const2088 status_t Parcel::readPointer(uintptr_t *pArg) const
2089 {
2090 status_t ret;
2091 binder_uintptr_t ptr;
2092 ret = readAligned(&ptr);
2093 if (!ret)
2094 *pArg = ptr;
2095 return ret;
2096 }
2097
readPointer() const2098 uintptr_t Parcel::readPointer() const
2099 {
2100 return readAligned<binder_uintptr_t>();
2101 }
2102
2103
readFloat(float * pArg) const2104 status_t Parcel::readFloat(float *pArg) const
2105 {
2106 return readAligned(pArg);
2107 }
2108
2109
readFloat() const2110 float Parcel::readFloat() const
2111 {
2112 return readAligned<float>();
2113 }
2114
2115 #if defined(__mips__) && defined(__mips_hard_float)
2116
readDouble(double * pArg) const2117 status_t Parcel::readDouble(double *pArg) const
2118 {
2119 union {
2120 double d;
2121 unsigned long long ll;
2122 } u;
2123 u.d = 0;
2124 status_t status;
2125 status = readAligned(&u.ll);
2126 *pArg = u.d;
2127 return status;
2128 }
2129
readDouble() const2130 double Parcel::readDouble() const
2131 {
2132 union {
2133 double d;
2134 unsigned long long ll;
2135 } u;
2136 u.ll = readAligned<unsigned long long>();
2137 return u.d;
2138 }
2139
2140 #else
2141
readDouble(double * pArg) const2142 status_t Parcel::readDouble(double *pArg) const
2143 {
2144 return readAligned(pArg);
2145 }
2146
readDouble() const2147 double Parcel::readDouble() const
2148 {
2149 return readAligned<double>();
2150 }
2151
2152 #endif
2153
readBool(bool * pArg) const2154 status_t Parcel::readBool(bool *pArg) const
2155 {
2156 int32_t tmp = 0;
2157 status_t ret = readInt32(&tmp);
2158 *pArg = (tmp != 0);
2159 return ret;
2160 }
2161
readBool() const2162 bool Parcel::readBool() const
2163 {
2164 return readInt32() != 0;
2165 }
2166
readChar(char16_t * pArg) const2167 status_t Parcel::readChar(char16_t *pArg) const
2168 {
2169 int32_t tmp = 0;
2170 status_t ret = readInt32(&tmp);
2171 *pArg = char16_t(tmp);
2172 return ret;
2173 }
2174
readChar() const2175 char16_t Parcel::readChar() const
2176 {
2177 return char16_t(readInt32());
2178 }
2179
readByte(int8_t * pArg) const2180 status_t Parcel::readByte(int8_t *pArg) const
2181 {
2182 int32_t tmp = 0;
2183 status_t ret = readInt32(&tmp);
2184 *pArg = int8_t(tmp);
2185 return ret;
2186 }
2187
readByte() const2188 int8_t Parcel::readByte() const
2189 {
2190 return int8_t(readInt32());
2191 }
2192
readUtf8FromUtf16(std::string * str) const2193 status_t Parcel::readUtf8FromUtf16(std::string* str) const {
2194 size_t utf16Size = 0;
2195 const char16_t* src = readString16Inplace(&utf16Size);
2196 if (!src) {
2197 return UNEXPECTED_NULL;
2198 }
2199
2200 // Save ourselves the trouble, we're done.
2201 if (utf16Size == 0u) {
2202 str->clear();
2203 return NO_ERROR;
2204 }
2205
2206 // Allow for closing '\0'
2207 ssize_t utf8Size = utf16_to_utf8_length(src, utf16Size) + 1;
2208 if (utf8Size < 1) {
2209 return BAD_VALUE;
2210 }
2211 // Note that while it is probably safe to assume string::resize keeps a
2212 // spare byte around for the trailing null, we still pass the size including the trailing null
2213 str->resize(utf8Size);
2214 utf16_to_utf8(src, utf16Size, &((*str)[0]), utf8Size);
2215 str->resize(utf8Size - 1);
2216 return NO_ERROR;
2217 }
2218
readCString() const2219 const char* Parcel::readCString() const
2220 {
2221 if (mDataPos < mDataSize) {
2222 const size_t avail = mDataSize-mDataPos;
2223 const char* str = reinterpret_cast<const char*>(mData+mDataPos);
2224 // is the string's trailing NUL within the parcel's valid bounds?
2225 const char* eos = reinterpret_cast<const char*>(memchr(str, 0, avail));
2226 if (eos) {
2227 const size_t len = eos - str;
2228 mDataPos += pad_size(len+1);
2229 ALOGV("readCString Setting data pos of %p to %zu", this, mDataPos);
2230 return str;
2231 }
2232 }
2233 return nullptr;
2234 }
2235
readString8() const2236 String8 Parcel::readString8() const
2237 {
2238 size_t len;
2239 const char* str = readString8Inplace(&len);
2240 if (str) return String8(str, len);
2241
2242 if (!mServiceFuzzing) {
2243 ALOGE("Reading a NULL string not supported here.");
2244 }
2245
2246 return String8();
2247 }
2248
readString8(String8 * pArg) const2249 status_t Parcel::readString8(String8* pArg) const
2250 {
2251 size_t len;
2252 const char* str = readString8Inplace(&len);
2253 if (str) {
2254 pArg->setTo(str, len);
2255 return 0;
2256 } else {
2257 *pArg = String8();
2258 return UNEXPECTED_NULL;
2259 }
2260 }
2261
readString8Inplace(size_t * outLen) const2262 const char* Parcel::readString8Inplace(size_t* outLen) const
2263 {
2264 int32_t size = readInt32();
2265 // watch for potential int overflow from size+1
2266 if (size >= 0 && size < INT32_MAX) {
2267 *outLen = size;
2268 const char* str = (const char*)readInplace(size+1);
2269 if (str != nullptr) {
2270 if (str[size] == '\0') {
2271 return str;
2272 }
2273 android_errorWriteLog(0x534e4554, "172655291");
2274 }
2275 }
2276 *outLen = 0;
2277 return nullptr;
2278 }
2279
readString16() const2280 String16 Parcel::readString16() const
2281 {
2282 size_t len;
2283 const char16_t* str = readString16Inplace(&len);
2284 if (str) return String16(str, len);
2285
2286 if (!mServiceFuzzing) {
2287 ALOGE("Reading a NULL string not supported here.");
2288 }
2289
2290 return String16();
2291 }
2292
2293
readString16(String16 * pArg) const2294 status_t Parcel::readString16(String16* pArg) const
2295 {
2296 size_t len;
2297 const char16_t* str = readString16Inplace(&len);
2298 if (str) {
2299 pArg->setTo(str, len);
2300 return 0;
2301 } else {
2302 *pArg = String16();
2303 return UNEXPECTED_NULL;
2304 }
2305 }
2306
readString16Inplace(size_t * outLen) const2307 const char16_t* Parcel::readString16Inplace(size_t* outLen) const
2308 {
2309 int32_t size = readInt32();
2310 // watch for potential int overflow from size+1
2311 if (size >= 0 && size < INT32_MAX) {
2312 *outLen = size;
2313 const char16_t* str = (const char16_t*)readInplace((size+1)*sizeof(char16_t));
2314 if (str != nullptr) {
2315 if (str[size] == u'\0') {
2316 return str;
2317 }
2318 android_errorWriteLog(0x534e4554, "172655291");
2319 }
2320 }
2321 *outLen = 0;
2322 return nullptr;
2323 }
2324
readStrongBinder(sp<IBinder> * val) const2325 status_t Parcel::readStrongBinder(sp<IBinder>* val) const
2326 {
2327 status_t status = readNullableStrongBinder(val);
2328 if (status == OK && !val->get()) {
2329 if (!mServiceFuzzing) {
2330 ALOGW("Expecting binder but got null!");
2331 }
2332 status = UNEXPECTED_NULL;
2333 }
2334 return status;
2335 }
2336
readNullableStrongBinder(sp<IBinder> * val) const2337 status_t Parcel::readNullableStrongBinder(sp<IBinder>* val) const
2338 {
2339 return unflattenBinder(val);
2340 }
2341
readStrongBinder() const2342 sp<IBinder> Parcel::readStrongBinder() const
2343 {
2344 sp<IBinder> val;
2345 // Note that a lot of code in Android reads binders by hand with this
2346 // method, and that code has historically been ok with getting nullptr
2347 // back (while ignoring error codes).
2348 readNullableStrongBinder(&val);
2349 return val;
2350 }
2351
readExceptionCode() const2352 int32_t Parcel::readExceptionCode() const
2353 {
2354 binder::Status status;
2355 status.readFromParcel(*this);
2356 return status.exceptionCode();
2357 }
2358
2359 #ifndef BINDER_DISABLE_NATIVE_HANDLE
readNativeHandle() const2360 native_handle* Parcel::readNativeHandle() const
2361 {
2362 int numFds, numInts;
2363 status_t err;
2364 err = readInt32(&numFds);
2365 if (err != NO_ERROR) return nullptr;
2366 err = readInt32(&numInts);
2367 if (err != NO_ERROR) return nullptr;
2368
2369 native_handle* h = native_handle_create(numFds, numInts);
2370 if (!h) {
2371 return nullptr;
2372 }
2373
2374 for (int i=0 ; err==NO_ERROR && i<numFds ; i++) {
2375 h->data[i] = fcntl(readFileDescriptor(), F_DUPFD_CLOEXEC, 0);
2376 if (h->data[i] < 0) {
2377 for (int j = 0; j < i; j++) {
2378 close(h->data[j]);
2379 }
2380 native_handle_delete(h);
2381 return nullptr;
2382 }
2383 }
2384 err = read(h->data + numFds, sizeof(int)*numInts);
2385 if (err != NO_ERROR) {
2386 native_handle_close(h);
2387 native_handle_delete(h);
2388 h = nullptr;
2389 }
2390 return h;
2391 }
2392 #endif
2393
readFileDescriptor() const2394 int Parcel::readFileDescriptor() const {
2395 if (const auto* rpcFields = maybeRpcFields()) {
2396 if (!std::binary_search(rpcFields->mObjectPositions.begin(),
2397 rpcFields->mObjectPositions.end(), mDataPos)) {
2398 if (!mServiceFuzzing) {
2399 ALOGW("Attempt to read file descriptor from Parcel %p at offset %zu that is not in "
2400 "the object list",
2401 this, mDataPos);
2402 }
2403 return BAD_TYPE;
2404 }
2405
2406 int32_t objectType = readInt32();
2407 if (objectType != RpcFields::TYPE_NATIVE_FILE_DESCRIPTOR) {
2408 return BAD_TYPE;
2409 }
2410
2411 int32_t fdIndex = readInt32();
2412 if (rpcFields->mFds == nullptr || fdIndex < 0 ||
2413 static_cast<size_t>(fdIndex) >= rpcFields->mFds->size()) {
2414 ALOGE("RPC Parcel contains invalid file descriptor index. index=%d fd_count=%zu",
2415 fdIndex, rpcFields->mFds ? rpcFields->mFds->size() : 0);
2416 return BAD_VALUE;
2417 }
2418 return toRawFd(rpcFields->mFds->at(fdIndex));
2419 }
2420
2421 #ifdef BINDER_WITH_KERNEL_IPC
2422 const flat_binder_object* flat = readObject(true);
2423
2424 if (flat && flat->hdr.type == BINDER_TYPE_FD) {
2425 return flat->handle;
2426 }
2427
2428 return BAD_TYPE;
2429 #else // BINDER_WITH_KERNEL_IPC
2430 LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
2431 return INVALID_OPERATION;
2432 #endif // BINDER_WITH_KERNEL_IPC
2433 }
2434
readParcelFileDescriptor() const2435 int Parcel::readParcelFileDescriptor() const {
2436 int32_t hasComm = readInt32();
2437 int fd = readFileDescriptor();
2438 if (hasComm != 0) {
2439 // detach (owned by the binder driver)
2440 int comm = readFileDescriptor();
2441
2442 // warning: this must be kept in sync with:
2443 // frameworks/base/core/java/android/os/ParcelFileDescriptor.java
2444 enum ParcelFileDescriptorStatus {
2445 DETACHED = 2,
2446 };
2447
2448 #if BYTE_ORDER == BIG_ENDIAN
2449 const int32_t message = ParcelFileDescriptorStatus::DETACHED;
2450 #endif
2451 #if BYTE_ORDER == LITTLE_ENDIAN
2452 const int32_t message = __builtin_bswap32(ParcelFileDescriptorStatus::DETACHED);
2453 #endif
2454
2455 ssize_t written = TEMP_FAILURE_RETRY(
2456 ::write(comm, &message, sizeof(message)));
2457
2458 if (written != sizeof(message)) {
2459 ALOGW("Failed to detach ParcelFileDescriptor written: %zd err: %s",
2460 written, strerror(errno));
2461 return BAD_TYPE;
2462 }
2463 }
2464 return fd;
2465 }
2466
readUniqueFileDescriptor(unique_fd * val) const2467 status_t Parcel::readUniqueFileDescriptor(unique_fd* val) const {
2468 int got = readFileDescriptor();
2469
2470 if (got == BAD_TYPE) {
2471 return BAD_TYPE;
2472 }
2473
2474 int dupFd;
2475 if (status_t err = binder::os::dupFileDescriptor(got, &dupFd); err != OK) {
2476 return BAD_VALUE;
2477 }
2478
2479 val->reset(dupFd);
2480
2481 if (val->get() < 0) {
2482 return BAD_VALUE;
2483 }
2484
2485 return OK;
2486 }
2487
readUniqueParcelFileDescriptor(unique_fd * val) const2488 status_t Parcel::readUniqueParcelFileDescriptor(unique_fd* val) const {
2489 int got = readParcelFileDescriptor();
2490
2491 if (got == BAD_TYPE) {
2492 return BAD_TYPE;
2493 }
2494
2495 int dupFd;
2496 if (status_t err = binder::os::dupFileDescriptor(got, &dupFd); err != OK) {
2497 return BAD_VALUE;
2498 }
2499
2500 val->reset(dupFd);
2501
2502 if (val->get() < 0) {
2503 return BAD_VALUE;
2504 }
2505
2506 return OK;
2507 }
2508
readBlob(size_t len,ReadableBlob * outBlob) const2509 status_t Parcel::readBlob(size_t len, ReadableBlob* outBlob) const
2510 {
2511 #ifdef BINDER_DISABLE_BLOB
2512 (void)len;
2513 (void)outBlob;
2514 return INVALID_OPERATION;
2515 #else
2516 int32_t blobType;
2517 status_t status = readInt32(&blobType);
2518 if (status) return status;
2519
2520 if (blobType == BLOB_INPLACE) {
2521 ALOGV("readBlob: read in place");
2522 const void* ptr = readInplace(len);
2523 if (!ptr) return BAD_VALUE;
2524
2525 outBlob->init(-1, const_cast<void*>(ptr), len, false);
2526 return NO_ERROR;
2527 }
2528
2529 ALOGV("readBlob: read from ashmem");
2530 bool isMutable = (blobType == BLOB_ASHMEM_MUTABLE);
2531 int fd = readFileDescriptor();
2532 if (fd == int(BAD_TYPE)) return BAD_VALUE;
2533
2534 if (!ashmem_valid(fd)) {
2535 ALOGE("invalid fd");
2536 return BAD_VALUE;
2537 }
2538 int size = ashmem_get_size_region(fd);
2539 if (size < 0 || size_t(size) < len) {
2540 ALOGE("request size %zu does not match fd size %d", len, size);
2541 return BAD_VALUE;
2542 }
2543 void* ptr = ::mmap(nullptr, len, isMutable ? PROT_READ | PROT_WRITE : PROT_READ,
2544 MAP_SHARED, fd, 0);
2545 if (ptr == MAP_FAILED) return NO_MEMORY;
2546
2547 outBlob->init(fd, ptr, len, isMutable);
2548 return NO_ERROR;
2549 #endif
2550 }
2551
read(FlattenableHelperInterface & val) const2552 status_t Parcel::read(FlattenableHelperInterface& val) const
2553 {
2554 // size
2555 const size_t len = this->readInt32();
2556 const size_t fd_count = this->readInt32();
2557
2558 if ((len > INT32_MAX) || (fd_count > kMaxFds)) {
2559 // don't accept size_t values which may have come from an
2560 // inadvertent conversion from a negative int.
2561 return BAD_VALUE;
2562 }
2563
2564 // payload
2565 void const* const buf = this->readInplace(pad_size(len));
2566 if (buf == nullptr)
2567 return BAD_VALUE;
2568
2569 int* fds = nullptr;
2570 if (fd_count) {
2571 fds = new (std::nothrow) int[fd_count];
2572 if (fds == nullptr) {
2573 ALOGE("read: failed to allocate requested %zu fds", fd_count);
2574 return BAD_VALUE;
2575 }
2576 }
2577
2578 status_t err = NO_ERROR;
2579 for (size_t i=0 ; i<fd_count && err==NO_ERROR ; i++) {
2580 int fd = this->readFileDescriptor();
2581 if (fd < 0 || ((fds[i] = fcntl(fd, F_DUPFD_CLOEXEC, 0)) < 0)) {
2582 err = BAD_VALUE;
2583 ALOGE("fcntl(F_DUPFD_CLOEXEC) failed in Parcel::read, i is %zu, fds[i] is %d, fd_count is %zu, error: %s",
2584 i, fds[i], fd_count, strerror(fd < 0 ? -fd : errno));
2585 // Close all the file descriptors that were dup-ed.
2586 for (size_t j=0; j<i ;j++) {
2587 close(fds[j]);
2588 }
2589 }
2590 }
2591
2592 if (err == NO_ERROR) {
2593 err = val.unflatten(buf, len, fds, fd_count);
2594 }
2595
2596 if (fd_count) {
2597 delete [] fds;
2598 }
2599
2600 return err;
2601 }
2602
2603 #ifdef BINDER_WITH_KERNEL_IPC
readObject(bool nullMetaData) const2604 const flat_binder_object* Parcel::readObject(bool nullMetaData) const
2605 {
2606 const auto* kernelFields = maybeKernelFields();
2607 if (kernelFields == nullptr) {
2608 return nullptr;
2609 }
2610
2611 const size_t DPOS = mDataPos;
2612 if ((DPOS+sizeof(flat_binder_object)) <= mDataSize) {
2613 const flat_binder_object* obj
2614 = reinterpret_cast<const flat_binder_object*>(mData+DPOS);
2615 mDataPos = DPOS + sizeof(flat_binder_object);
2616 if (!nullMetaData && (obj->cookie == 0 && obj->binder == 0)) {
2617 // When transferring a NULL object, we don't write it into
2618 // the object list, so we don't want to check for it when
2619 // reading.
2620 ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos);
2621 return obj;
2622 }
2623
2624 // Ensure that this object is valid...
2625 binder_size_t* const OBJS = kernelFields->mObjects;
2626 const size_t N = kernelFields->mObjectsSize;
2627 size_t opos = kernelFields->mNextObjectHint;
2628
2629 if (N > 0) {
2630 ALOGV("Parcel %p looking for obj at %zu, hint=%zu",
2631 this, DPOS, opos);
2632
2633 // Start at the current hint position, looking for an object at
2634 // the current data position.
2635 if (opos < N) {
2636 while (opos < (N-1) && OBJS[opos] < DPOS) {
2637 opos++;
2638 }
2639 } else {
2640 opos = N-1;
2641 }
2642 if (OBJS[opos] == DPOS) {
2643 // Found it!
2644 ALOGV("Parcel %p found obj %zu at index %zu with forward search",
2645 this, DPOS, opos);
2646 kernelFields->mNextObjectHint = opos + 1;
2647 ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos);
2648 return obj;
2649 }
2650
2651 // Look backwards for it...
2652 while (opos > 0 && OBJS[opos] > DPOS) {
2653 opos--;
2654 }
2655 if (OBJS[opos] == DPOS) {
2656 // Found it!
2657 ALOGV("Parcel %p found obj %zu at index %zu with backward search",
2658 this, DPOS, opos);
2659 kernelFields->mNextObjectHint = opos + 1;
2660 ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos);
2661 return obj;
2662 }
2663 }
2664 if (!mServiceFuzzing) {
2665 ALOGW("Attempt to read object from Parcel %p at offset %zu that is not in the object "
2666 "list",
2667 this, DPOS);
2668 }
2669 }
2670 return nullptr;
2671 }
2672 #endif // BINDER_WITH_KERNEL_IPC
2673
closeFileDescriptors()2674 void Parcel::closeFileDescriptors() {
2675 if (auto* kernelFields = maybeKernelFields()) {
2676 #ifdef BINDER_WITH_KERNEL_IPC
2677 size_t i = kernelFields->mObjectsSize;
2678 if (i > 0) {
2679 // ALOGI("Closing file descriptors for %zu objects...", i);
2680 }
2681 while (i > 0) {
2682 i--;
2683 const flat_binder_object* flat =
2684 reinterpret_cast<flat_binder_object*>(mData + kernelFields->mObjects[i]);
2685 if (flat->hdr.type == BINDER_TYPE_FD) {
2686 // ALOGI("Closing fd: %ld", flat->handle);
2687 // FDs from the kernel are always owned
2688 FdTagClose(flat->handle, this);
2689 }
2690 }
2691 #else // BINDER_WITH_KERNEL_IPC
2692 LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
2693 #endif // BINDER_WITH_KERNEL_IPC
2694 } else if (auto* rpcFields = maybeRpcFields()) {
2695 rpcFields->mFds.reset();
2696 }
2697 }
2698
ipcData() const2699 uintptr_t Parcel::ipcData() const
2700 {
2701 return reinterpret_cast<uintptr_t>(mData);
2702 }
2703
ipcDataSize() const2704 size_t Parcel::ipcDataSize() const
2705 {
2706 return (mDataSize > mDataPos ? mDataSize : mDataPos);
2707 }
2708
ipcObjects() const2709 uintptr_t Parcel::ipcObjects() const
2710 {
2711 if (const auto* kernelFields = maybeKernelFields()) {
2712 return reinterpret_cast<uintptr_t>(kernelFields->mObjects);
2713 }
2714 return 0;
2715 }
2716
ipcObjectsCount() const2717 size_t Parcel::ipcObjectsCount() const
2718 {
2719 if (const auto* kernelFields = maybeKernelFields()) {
2720 return kernelFields->mObjectsSize;
2721 }
2722 return 0;
2723 }
2724
ipcSetDataReference(const uint8_t * data,size_t dataSize,const binder_size_t * objects,size_t objectsCount,release_func relFunc)2725 void Parcel::ipcSetDataReference(const uint8_t* data, size_t dataSize, const binder_size_t* objects,
2726 size_t objectsCount, release_func relFunc) {
2727 // this code uses 'mOwner == nullptr' to understand whether it owns memory
2728 LOG_ALWAYS_FATAL_IF(relFunc == nullptr, "must provide cleanup function");
2729
2730 freeData();
2731
2732 auto* kernelFields = maybeKernelFields();
2733 LOG_ALWAYS_FATAL_IF(kernelFields == nullptr); // guaranteed by freeData.
2734
2735 mData = const_cast<uint8_t*>(data);
2736 mDataSize = mDataCapacity = dataSize;
2737 kernelFields->mObjects = const_cast<binder_size_t*>(objects);
2738 kernelFields->mObjectsSize = kernelFields->mObjectsCapacity = objectsCount;
2739 mOwner = relFunc;
2740
2741 #ifdef BINDER_WITH_KERNEL_IPC
2742 binder_size_t minOffset = 0;
2743 for (size_t i = 0; i < kernelFields->mObjectsSize; i++) {
2744 binder_size_t offset = kernelFields->mObjects[i];
2745 if (offset < minOffset) {
2746 ALOGE("%s: bad object offset %" PRIu64 " < %" PRIu64 "\n",
2747 __func__, (uint64_t)offset, (uint64_t)minOffset);
2748 kernelFields->mObjectsSize = 0;
2749 break;
2750 }
2751 const flat_binder_object* flat
2752 = reinterpret_cast<const flat_binder_object*>(mData + offset);
2753 uint32_t type = flat->hdr.type;
2754 if (!(type == BINDER_TYPE_BINDER || type == BINDER_TYPE_HANDLE ||
2755 type == BINDER_TYPE_FD)) {
2756 // We should never receive other types (eg BINDER_TYPE_FDA) as long as we don't support
2757 // them in libbinder. If we do receive them, it probably means a kernel bug; try to
2758 // recover gracefully by clearing out the objects.
2759 android_errorWriteLog(0x534e4554, "135930648");
2760 android_errorWriteLog(0x534e4554, "203847542");
2761 ALOGE("%s: unsupported type object (%" PRIu32 ") at offset %" PRIu64 "\n",
2762 __func__, type, (uint64_t)offset);
2763
2764 // WARNING: callers of ipcSetDataReference need to make sure they
2765 // don't rely on mObjectsSize in their release_func.
2766 kernelFields->mObjectsSize = 0;
2767 break;
2768 }
2769 if (type == BINDER_TYPE_FD) {
2770 // FDs from the kernel are always owned
2771 FdTag(flat->handle, nullptr, this);
2772 }
2773 minOffset = offset + sizeof(flat_binder_object);
2774 }
2775 scanForFds();
2776 #else // BINDER_WITH_KERNEL_IPC
2777 LOG_ALWAYS_FATAL_IF(objectsCount != 0,
2778 "Non-zero objects count passed to Parcel with kernel driver disabled");
2779 #endif // BINDER_WITH_KERNEL_IPC
2780 }
2781
rpcSetDataReference(const sp<RpcSession> & session,const uint8_t * data,size_t dataSize,const uint32_t * objectTable,size_t objectTableSize,std::vector<std::variant<unique_fd,borrowed_fd>> && ancillaryFds,release_func relFunc)2782 status_t Parcel::rpcSetDataReference(
2783 const sp<RpcSession>& session, const uint8_t* data, size_t dataSize,
2784 const uint32_t* objectTable, size_t objectTableSize,
2785 std::vector<std::variant<unique_fd, borrowed_fd>>&& ancillaryFds, release_func relFunc) {
2786 // this code uses 'mOwner == nullptr' to understand whether it owns memory
2787 LOG_ALWAYS_FATAL_IF(relFunc == nullptr, "must provide cleanup function");
2788
2789 LOG_ALWAYS_FATAL_IF(session == nullptr);
2790
2791 if (objectTableSize != ancillaryFds.size()) {
2792 ALOGE("objectTableSize=%zu ancillaryFds.size=%zu", objectTableSize, ancillaryFds.size());
2793 relFunc(data, dataSize, nullptr, 0);
2794 return BAD_VALUE;
2795 }
2796 for (size_t i = 0; i < objectTableSize; i++) {
2797 uint32_t minObjectEnd;
2798 if (__builtin_add_overflow(objectTable[i], sizeof(RpcFields::ObjectType), &minObjectEnd) ||
2799 minObjectEnd >= dataSize) {
2800 ALOGE("received out of range object position: %" PRIu32 " (parcel size is %zu)",
2801 objectTable[i], dataSize);
2802 relFunc(data, dataSize, nullptr, 0);
2803 return BAD_VALUE;
2804 }
2805 }
2806
2807 freeData();
2808 markForRpc(session);
2809
2810 auto* rpcFields = maybeRpcFields();
2811 LOG_ALWAYS_FATAL_IF(rpcFields == nullptr); // guaranteed by markForRpc.
2812
2813 mData = const_cast<uint8_t*>(data);
2814 mDataSize = mDataCapacity = dataSize;
2815 mOwner = relFunc;
2816
2817 rpcFields->mObjectPositions.reserve(objectTableSize);
2818 for (size_t i = 0; i < objectTableSize; i++) {
2819 rpcFields->mObjectPositions.push_back(objectTable[i]);
2820 }
2821 if (!ancillaryFds.empty()) {
2822 rpcFields->mFds = std::make_unique<decltype(rpcFields->mFds)::element_type>();
2823 *rpcFields->mFds = std::move(ancillaryFds);
2824 }
2825
2826 return OK;
2827 }
2828
print(std::ostream & to,uint32_t) const2829 void Parcel::print(std::ostream& to, uint32_t /*flags*/) const {
2830 to << "Parcel(";
2831
2832 if (errorCheck() != NO_ERROR) {
2833 const status_t err = errorCheck();
2834 to << "Error: " << (void*)(intptr_t)err << " \"" << strerror(-err) << "\"";
2835 } else if (dataSize() > 0) {
2836 const uint8_t* DATA = data();
2837 to << "\t" << HexDump(DATA, dataSize());
2838 #ifdef BINDER_WITH_KERNEL_IPC
2839 if (const auto* kernelFields = maybeKernelFields()) {
2840 const binder_size_t* OBJS = kernelFields->mObjects;
2841 const size_t N = objectsCount();
2842 for (size_t i = 0; i < N; i++) {
2843 const flat_binder_object* flat =
2844 reinterpret_cast<const flat_binder_object*>(DATA + OBJS[i]);
2845 to << "Object #" << i << " @ " << (void*)OBJS[i] << ": "
2846 << TypeCode(flat->hdr.type & 0x7f7f7f00) << " = " << flat->binder;
2847 }
2848 }
2849 #endif // BINDER_WITH_KERNEL_IPC
2850 } else {
2851 to << "NULL";
2852 }
2853
2854 to << ")";
2855 }
2856
releaseObjects()2857 void Parcel::releaseObjects()
2858 {
2859 auto* kernelFields = maybeKernelFields();
2860 if (kernelFields == nullptr) {
2861 return;
2862 }
2863
2864 #ifdef BINDER_WITH_KERNEL_IPC
2865 size_t i = kernelFields->mObjectsSize;
2866 if (i == 0) {
2867 return;
2868 }
2869 sp<ProcessState> proc(ProcessState::self());
2870 uint8_t* const data = mData;
2871 binder_size_t* const objects = kernelFields->mObjects;
2872 while (i > 0) {
2873 i--;
2874 const flat_binder_object* flat = reinterpret_cast<flat_binder_object*>(data + objects[i]);
2875 release_object(proc, *flat, this);
2876 }
2877 #endif // BINDER_WITH_KERNEL_IPC
2878 }
2879
acquireObjects()2880 void Parcel::acquireObjects()
2881 {
2882 auto* kernelFields = maybeKernelFields();
2883 if (kernelFields == nullptr) {
2884 return;
2885 }
2886
2887 #ifdef BINDER_WITH_KERNEL_IPC
2888 size_t i = kernelFields->mObjectsSize;
2889 if (i == 0) {
2890 return;
2891 }
2892 const sp<ProcessState> proc(ProcessState::self());
2893 uint8_t* const data = mData;
2894 binder_size_t* const objects = kernelFields->mObjects;
2895 while (i > 0) {
2896 i--;
2897 const flat_binder_object* flat = reinterpret_cast<flat_binder_object*>(data + objects[i]);
2898 acquire_object(proc, *flat, this);
2899 }
2900 #endif // BINDER_WITH_KERNEL_IPC
2901 }
2902
freeData()2903 void Parcel::freeData()
2904 {
2905 freeDataNoInit();
2906 initState();
2907 }
2908
freeDataNoInit()2909 void Parcel::freeDataNoInit()
2910 {
2911 if (mOwner) {
2912 LOG_ALLOC("Parcel %p: freeing other owner data", this);
2913 //ALOGI("Freeing data ref of %p (pid=%d)", this, getpid());
2914 auto* kernelFields = maybeKernelFields();
2915 // Close FDs before freeing, otherwise they will leak for kernel binder.
2916 closeFileDescriptors();
2917 mOwner(mData, mDataSize, kernelFields ? kernelFields->mObjects : nullptr,
2918 kernelFields ? kernelFields->mObjectsSize : 0);
2919 } else {
2920 LOG_ALLOC("Parcel %p: freeing allocated data", this);
2921 releaseObjects();
2922 if (mData) {
2923 LOG_ALLOC("Parcel %p: freeing with %zu capacity", this, mDataCapacity);
2924 gParcelGlobalAllocSize -= mDataCapacity;
2925 gParcelGlobalAllocCount--;
2926 if (mDeallocZero) {
2927 zeroMemory(mData, mDataSize);
2928 }
2929 free(mData);
2930 }
2931 auto* kernelFields = maybeKernelFields();
2932 if (kernelFields && kernelFields->mObjects) free(kernelFields->mObjects);
2933 }
2934 }
2935
growData(size_t len)2936 status_t Parcel::growData(size_t len)
2937 {
2938 if (len > INT32_MAX) {
2939 // don't accept size_t values which may have come from an
2940 // inadvertent conversion from a negative int.
2941 return BAD_VALUE;
2942 }
2943
2944 if (len > SIZE_MAX - mDataSize) return NO_MEMORY; // overflow
2945 if (mDataSize + len > SIZE_MAX / 3) return NO_MEMORY; // overflow
2946 size_t newSize = ((mDataSize+len)*3)/2;
2947 return (newSize <= mDataSize)
2948 ? (status_t) NO_MEMORY
2949 : continueWrite(std::max(newSize, (size_t) 128));
2950 }
2951
reallocZeroFree(uint8_t * data,size_t oldCapacity,size_t newCapacity,bool zero)2952 static uint8_t* reallocZeroFree(uint8_t* data, size_t oldCapacity, size_t newCapacity, bool zero) {
2953 if (!zero) {
2954 return (uint8_t*)realloc(data, newCapacity);
2955 }
2956 uint8_t* newData = (uint8_t*)malloc(newCapacity);
2957 if (!newData) {
2958 return nullptr;
2959 }
2960
2961 memcpy(newData, data, std::min(oldCapacity, newCapacity));
2962 zeroMemory(data, oldCapacity);
2963 free(data);
2964 return newData;
2965 }
2966
restartWrite(size_t desired)2967 status_t Parcel::restartWrite(size_t desired)
2968 {
2969 if (desired > INT32_MAX) {
2970 // don't accept size_t values which may have come from an
2971 // inadvertent conversion from a negative int.
2972 return BAD_VALUE;
2973 }
2974
2975 if (mOwner) {
2976 freeData();
2977 return continueWrite(desired);
2978 }
2979
2980 releaseObjects();
2981
2982 uint8_t* data = reallocZeroFree(mData, mDataCapacity, desired, mDeallocZero);
2983 if (!data && desired > mDataCapacity) {
2984 LOG_ALWAYS_FATAL("out of memory");
2985 mError = NO_MEMORY;
2986 return NO_MEMORY;
2987 }
2988
2989 if (data || desired == 0) {
2990 LOG_ALLOC("Parcel %p: restart from %zu to %zu capacity", this, mDataCapacity, desired);
2991 if (mDataCapacity > desired) {
2992 gParcelGlobalAllocSize -= (mDataCapacity - desired);
2993 } else {
2994 gParcelGlobalAllocSize += (desired - mDataCapacity);
2995 }
2996
2997 if (!mData) {
2998 gParcelGlobalAllocCount++;
2999 }
3000 mData = data;
3001 mDataCapacity = desired;
3002 }
3003
3004 mDataSize = mDataPos = 0;
3005 ALOGV("restartWrite Setting data size of %p to %zu", this, mDataSize);
3006 ALOGV("restartWrite Setting data pos of %p to %zu", this, mDataPos);
3007
3008 if (auto* kernelFields = maybeKernelFields()) {
3009 free(kernelFields->mObjects);
3010 kernelFields->mObjects = nullptr;
3011 kernelFields->mObjectsSize = kernelFields->mObjectsCapacity = 0;
3012 kernelFields->mNextObjectHint = 0;
3013 kernelFields->mObjectsSorted = false;
3014 kernelFields->mHasFds = false;
3015 kernelFields->mFdsKnown = true;
3016 } else if (auto* rpcFields = maybeRpcFields()) {
3017 rpcFields->mObjectPositions.clear();
3018 rpcFields->mFds.reset();
3019 }
3020 mAllowFds = true;
3021
3022 return NO_ERROR;
3023 }
3024
continueWrite(size_t desired)3025 status_t Parcel::continueWrite(size_t desired)
3026 {
3027 if (desired > INT32_MAX) {
3028 // don't accept size_t values which may have come from an
3029 // inadvertent conversion from a negative int.
3030 return BAD_VALUE;
3031 }
3032
3033 auto* kernelFields = maybeKernelFields();
3034 auto* rpcFields = maybeRpcFields();
3035
3036 // If shrinking, first adjust for any objects that appear
3037 // after the new data size.
3038 size_t objectsSize =
3039 kernelFields ? kernelFields->mObjectsSize : rpcFields->mObjectPositions.size();
3040 if (desired < mDataSize) {
3041 if (desired == 0) {
3042 objectsSize = 0;
3043 } else {
3044 if (kernelFields) {
3045 while (objectsSize > 0) {
3046 if (kernelFields->mObjects[objectsSize - 1] < desired) break;
3047 objectsSize--;
3048 }
3049 } else {
3050 while (objectsSize > 0) {
3051 if (rpcFields->mObjectPositions[objectsSize - 1] < desired) break;
3052 objectsSize--;
3053 }
3054 }
3055 }
3056 }
3057
3058 if (mOwner) {
3059 // If the size is going to zero, just release the owner's data.
3060 if (desired == 0) {
3061 freeData();
3062 return NO_ERROR;
3063 }
3064
3065 // If there is a different owner, we need to take
3066 // posession.
3067 uint8_t* data = (uint8_t*)malloc(desired);
3068 if (!data) {
3069 mError = NO_MEMORY;
3070 return NO_MEMORY;
3071 }
3072 binder_size_t* objects = nullptr;
3073
3074 if (kernelFields && objectsSize) {
3075 objects = (binder_size_t*)calloc(objectsSize, sizeof(binder_size_t));
3076 if (!objects) {
3077 free(data);
3078
3079 mError = NO_MEMORY;
3080 return NO_MEMORY;
3081 }
3082
3083 // Little hack to only acquire references on objects
3084 // we will be keeping.
3085 size_t oldObjectsSize = kernelFields->mObjectsSize;
3086 kernelFields->mObjectsSize = objectsSize;
3087 acquireObjects();
3088 kernelFields->mObjectsSize = oldObjectsSize;
3089 }
3090 if (rpcFields) {
3091 if (status_t status = truncateRpcObjects(objectsSize); status != OK) {
3092 free(data);
3093 return status;
3094 }
3095 }
3096
3097 if (mData) {
3098 memcpy(data, mData, mDataSize < desired ? mDataSize : desired);
3099 }
3100 if (objects && kernelFields && kernelFields->mObjects) {
3101 memcpy(objects, kernelFields->mObjects, objectsSize * sizeof(binder_size_t));
3102 }
3103 // ALOGI("Freeing data ref of %p (pid=%d)", this, getpid());
3104 if (kernelFields) {
3105 // TODO(b/239222407): This seems wrong. We should only free FDs when
3106 // they are in a truncated section of the parcel.
3107 closeFileDescriptors();
3108 }
3109 mOwner(mData, mDataSize, kernelFields ? kernelFields->mObjects : nullptr,
3110 kernelFields ? kernelFields->mObjectsSize : 0);
3111 mOwner = nullptr;
3112
3113 LOG_ALLOC("Parcel %p: taking ownership of %zu capacity", this, desired);
3114 gParcelGlobalAllocSize += desired;
3115 gParcelGlobalAllocCount++;
3116
3117 mData = data;
3118 mDataSize = (mDataSize < desired) ? mDataSize : desired;
3119 ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize);
3120 mDataCapacity = desired;
3121 if (kernelFields) {
3122 kernelFields->mObjects = objects;
3123 kernelFields->mObjectsSize = kernelFields->mObjectsCapacity = objectsSize;
3124 kernelFields->mNextObjectHint = 0;
3125 kernelFields->mObjectsSorted = false;
3126 }
3127
3128 } else if (mData) {
3129 if (kernelFields && objectsSize < kernelFields->mObjectsSize) {
3130 #ifdef BINDER_WITH_KERNEL_IPC
3131 // Need to release refs on any objects we are dropping.
3132 const sp<ProcessState> proc(ProcessState::self());
3133 for (size_t i = objectsSize; i < kernelFields->mObjectsSize; i++) {
3134 const flat_binder_object* flat =
3135 reinterpret_cast<flat_binder_object*>(mData + kernelFields->mObjects[i]);
3136 if (flat->hdr.type == BINDER_TYPE_FD) {
3137 // will need to rescan because we may have lopped off the only FDs
3138 kernelFields->mFdsKnown = false;
3139 }
3140 release_object(proc, *flat, this);
3141 }
3142
3143 if (objectsSize == 0) {
3144 free(kernelFields->mObjects);
3145 kernelFields->mObjects = nullptr;
3146 kernelFields->mObjectsCapacity = 0;
3147 } else {
3148 binder_size_t* objects =
3149 (binder_size_t*)realloc(kernelFields->mObjects,
3150 objectsSize * sizeof(binder_size_t));
3151 if (objects) {
3152 kernelFields->mObjects = objects;
3153 kernelFields->mObjectsCapacity = objectsSize;
3154 }
3155 }
3156 kernelFields->mObjectsSize = objectsSize;
3157 kernelFields->mNextObjectHint = 0;
3158 kernelFields->mObjectsSorted = false;
3159 #else // BINDER_WITH_KERNEL_IPC
3160 LOG_ALWAYS_FATAL("Non-zero numObjects for RPC Parcel");
3161 #endif // BINDER_WITH_KERNEL_IPC
3162 }
3163 if (rpcFields) {
3164 if (status_t status = truncateRpcObjects(objectsSize); status != OK) {
3165 return status;
3166 }
3167 }
3168
3169 // We own the data, so we can just do a realloc().
3170 if (desired > mDataCapacity) {
3171 uint8_t* data = reallocZeroFree(mData, mDataCapacity, desired, mDeallocZero);
3172 if (data) {
3173 LOG_ALLOC("Parcel %p: continue from %zu to %zu capacity", this, mDataCapacity,
3174 desired);
3175 gParcelGlobalAllocSize += desired;
3176 gParcelGlobalAllocSize -= mDataCapacity;
3177 mData = data;
3178 mDataCapacity = desired;
3179 } else {
3180 mError = NO_MEMORY;
3181 return NO_MEMORY;
3182 }
3183 } else {
3184 if (mDataSize > desired) {
3185 mDataSize = desired;
3186 ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize);
3187 }
3188 if (mDataPos > desired) {
3189 mDataPos = desired;
3190 ALOGV("continueWrite Setting data pos of %p to %zu", this, mDataPos);
3191 }
3192 }
3193
3194 } else {
3195 // This is the first data. Easy!
3196 uint8_t* data = (uint8_t*)malloc(desired);
3197 if (!data) {
3198 mError = NO_MEMORY;
3199 return NO_MEMORY;
3200 }
3201
3202 if (!(mDataCapacity == 0 &&
3203 (kernelFields == nullptr ||
3204 (kernelFields->mObjects == nullptr && kernelFields->mObjectsCapacity == 0)))) {
3205 ALOGE("continueWrite: %zu/%p/%zu/%zu", mDataCapacity,
3206 kernelFields ? kernelFields->mObjects : nullptr,
3207 kernelFields ? kernelFields->mObjectsCapacity : 0, desired);
3208 }
3209
3210 LOG_ALLOC("Parcel %p: allocating with %zu capacity", this, desired);
3211 gParcelGlobalAllocSize += desired;
3212 gParcelGlobalAllocCount++;
3213
3214 mData = data;
3215 mDataSize = mDataPos = 0;
3216 ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize);
3217 ALOGV("continueWrite Setting data pos of %p to %zu", this, mDataPos);
3218 mDataCapacity = desired;
3219 }
3220
3221 return NO_ERROR;
3222 }
3223
truncateRpcObjects(size_t newObjectsSize)3224 status_t Parcel::truncateRpcObjects(size_t newObjectsSize) {
3225 auto* rpcFields = maybeRpcFields();
3226 if (newObjectsSize == 0) {
3227 rpcFields->mObjectPositions.clear();
3228 if (rpcFields->mFds) {
3229 rpcFields->mFds->clear();
3230 }
3231 return OK;
3232 }
3233 while (rpcFields->mObjectPositions.size() > newObjectsSize) {
3234 uint32_t pos = rpcFields->mObjectPositions.back();
3235 rpcFields->mObjectPositions.pop_back();
3236 const auto type = *reinterpret_cast<const RpcFields::ObjectType*>(mData + pos);
3237 if (type == RpcFields::TYPE_NATIVE_FILE_DESCRIPTOR) {
3238 const auto fdIndex =
3239 *reinterpret_cast<const int32_t*>(mData + pos + sizeof(RpcFields::ObjectType));
3240 if (rpcFields->mFds == nullptr || fdIndex < 0 ||
3241 static_cast<size_t>(fdIndex) >= rpcFields->mFds->size()) {
3242 ALOGE("RPC Parcel contains invalid file descriptor index. index=%d fd_count=%zu",
3243 fdIndex, rpcFields->mFds ? rpcFields->mFds->size() : 0);
3244 return BAD_VALUE;
3245 }
3246 // In practice, this always removes the last element.
3247 rpcFields->mFds->erase(rpcFields->mFds->begin() + fdIndex);
3248 }
3249 }
3250 return OK;
3251 }
3252
initState()3253 void Parcel::initState()
3254 {
3255 LOG_ALLOC("Parcel %p: initState", this);
3256 mError = NO_ERROR;
3257 mData = nullptr;
3258 mDataSize = 0;
3259 mDataCapacity = 0;
3260 mDataPos = 0;
3261 ALOGV("initState Setting data size of %p to %zu", this, mDataSize);
3262 ALOGV("initState Setting data pos of %p to %zu", this, mDataPos);
3263 mVariantFields.emplace<KernelFields>();
3264 mAllowFds = true;
3265 mDeallocZero = false;
3266 mOwner = nullptr;
3267 mEnforceNoDataAvail = true;
3268 mServiceFuzzing = false;
3269 }
3270
scanForFds() const3271 void Parcel::scanForFds() const {
3272 auto* kernelFields = maybeKernelFields();
3273 if (kernelFields == nullptr) {
3274 return;
3275 }
3276 status_t status = hasFileDescriptorsInRange(0, dataSize(), &kernelFields->mHasFds);
3277 ALOGE_IF(status != NO_ERROR, "Error %d calling hasFileDescriptorsInRange()", status);
3278 kernelFields->mFdsKnown = true;
3279 }
3280
3281 #ifdef BINDER_WITH_KERNEL_IPC
getBlobAshmemSize() const3282 size_t Parcel::getBlobAshmemSize() const
3283 {
3284 // This used to return the size of all blobs that were written to ashmem, now we're returning
3285 // the ashmem currently referenced by this Parcel, which should be equivalent.
3286 // TODO(b/202029388): Remove method once ABI can be changed.
3287 return getOpenAshmemSize();
3288 }
3289
getOpenAshmemSize() const3290 size_t Parcel::getOpenAshmemSize() const
3291 {
3292 auto* kernelFields = maybeKernelFields();
3293 if (kernelFields == nullptr) {
3294 return 0;
3295 }
3296
3297 size_t openAshmemSize = 0;
3298 #ifndef BINDER_DISABLE_BLOB
3299 for (size_t i = 0; i < kernelFields->mObjectsSize; i++) {
3300 const flat_binder_object* flat =
3301 reinterpret_cast<const flat_binder_object*>(mData + kernelFields->mObjects[i]);
3302
3303 // cookie is compared against zero for historical reasons
3304 // > obj.cookie = takeOwnership ? 1 : 0;
3305 if (flat->hdr.type == BINDER_TYPE_FD && flat->cookie != 0 && ashmem_valid(flat->handle)) {
3306 int size = ashmem_get_size_region(flat->handle);
3307 if (__builtin_add_overflow(openAshmemSize, size, &openAshmemSize)) {
3308 ALOGE("Overflow when computing ashmem size.");
3309 return SIZE_MAX;
3310 }
3311 }
3312 }
3313 #endif
3314 return openAshmemSize;
3315 }
3316 #endif // BINDER_WITH_KERNEL_IPC
3317
3318 // --- Parcel::Blob ---
3319
Blob()3320 Parcel::Blob::Blob() :
3321 mFd(-1), mData(nullptr), mSize(0), mMutable(false) {
3322 }
3323
~Blob()3324 Parcel::Blob::~Blob() {
3325 release();
3326 }
3327
release()3328 void Parcel::Blob::release() {
3329 if (mFd != -1 && mData) {
3330 ::munmap(mData, mSize);
3331 }
3332 clear();
3333 }
3334
init(int fd,void * data,size_t size,bool isMutable)3335 void Parcel::Blob::init(int fd, void* data, size_t size, bool isMutable) {
3336 mFd = fd;
3337 mData = data;
3338 mSize = size;
3339 mMutable = isMutable;
3340 }
3341
clear()3342 void Parcel::Blob::clear() {
3343 mFd = -1;
3344 mData = nullptr;
3345 mSize = 0;
3346 mMutable = false;
3347 }
3348
3349 } // namespace android
3350