1 /* 2 * Copyright (C) 2005 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #define LOG_TAG "hw-Parcel" 18 //#define LOG_NDEBUG 0 19 20 #include <errno.h> 21 #include <fcntl.h> 22 #include <inttypes.h> 23 #include <pthread.h> 24 #include <stdint.h> 25 #include <stdio.h> 26 #include <stdlib.h> 27 #include <sys/mman.h> 28 #include <sys/stat.h> 29 #include <sys/types.h> 30 #include <sys/resource.h> 31 #include <unistd.h> 32 33 #include <hwbinder/Binder.h> 34 #include <hwbinder/BpHwBinder.h> 35 #include <hwbinder/IPCThreadState.h> 36 #include <hwbinder/Parcel.h> 37 #include <hwbinder/ProcessState.h> 38 #include <hwbinder/TextOutput.h> 39 40 #include <cutils/ashmem.h> 41 #include <utils/Debug.h> 42 #include <utils/Log.h> 43 #include <utils/misc.h> 44 #include <utils/String8.h> 45 #include <utils/String16.h> 46 47 #include "binder_kernel.h" 48 #include <hwbinder/Static.h> 49 50 #define LOG_REFS(...) 51 //#define LOG_REFS(...) ALOG(LOG_DEBUG, LOG_TAG, __VA_ARGS__) 52 #define LOG_ALLOC(...) 53 //#define LOG_ALLOC(...) ALOG(LOG_DEBUG, LOG_TAG, __VA_ARGS__) 54 #define LOG_BUFFER(...) 55 // #define LOG_BUFFER(...) ALOG(LOG_DEBUG, LOG_TAG, __VA_ARGS__) 56 57 // --------------------------------------------------------------------------- 58 59 // This macro should never be used at runtime, as a too large value 60 // of s could cause an integer overflow. Instead, you should always 61 // use the wrapper function pad_size() 62 #define PAD_SIZE_UNSAFE(s) (((s)+3)&~3) 63 64 static size_t pad_size(size_t s) { 65 if (s > (std::numeric_limits<size_t>::max() - 3)) { 66 LOG_ALWAYS_FATAL("pad size too big %zu", s); 67 } 68 return PAD_SIZE_UNSAFE(s); 69 } 70 71 // Note: must be kept in sync with android/os/StrictMode.java's PENALTY_GATHER 72 #define STRICT_MODE_PENALTY_GATHER (0x40 << 16) 73 74 namespace android { 75 namespace hardware { 76 77 static pthread_mutex_t gParcelGlobalAllocSizeLock = PTHREAD_MUTEX_INITIALIZER; 78 static size_t gParcelGlobalAllocSize = 0; 79 static size_t gParcelGlobalAllocCount = 0; 80 81 static size_t gMaxFds = 0; 82 83 void acquire_binder_object(const sp<ProcessState>& proc, 84 const flat_binder_object& obj, const void* who) 85 { 86 switch (obj.hdr.type) { 87 case BINDER_TYPE_BINDER: 88 if (obj.binder) { 89 LOG_REFS("Parcel %p acquiring reference on local %p", who, obj.cookie); 90 reinterpret_cast<IBinder*>(obj.cookie)->incStrong(who); 91 } 92 return; 93 case BINDER_TYPE_WEAK_BINDER: 94 if (obj.binder) 95 reinterpret_cast<RefBase::weakref_type*>(obj.binder)->incWeak(who); 96 return; 97 case BINDER_TYPE_HANDLE: { 98 const sp<IBinder> b = proc->getStrongProxyForHandle(obj.handle); 99 if (b != nullptr) { 100 LOG_REFS("Parcel %p acquiring reference on remote %p", who, b.get()); 101 b->incStrong(who); 102 } 103 return; 104 } 105 case BINDER_TYPE_WEAK_HANDLE: { 106 const wp<IBinder> b = proc->getWeakProxyForHandle(obj.handle); 107 if (b != nullptr) b.get_refs()->incWeak(who); 108 return; 109 } 110 } 111 112 ALOGD("Invalid object type 0x%08x", obj.hdr.type); 113 } 114 115 void acquire_object(const sp<ProcessState>& proc, const binder_object_header& obj, 116 const void *who) { 117 switch (obj.type) { 118 case BINDER_TYPE_BINDER: 119 case BINDER_TYPE_WEAK_BINDER: 120 case BINDER_TYPE_HANDLE: 121 case BINDER_TYPE_WEAK_HANDLE: { 122 const flat_binder_object& fbo = reinterpret_cast<const flat_binder_object&>(obj); 123 acquire_binder_object(proc, fbo, who); 124 break; 125 } 126 } 127 } 128 129 void release_object(const sp<ProcessState>& proc, 130 const flat_binder_object& obj, const void* who) 131 { 132 switch (obj.hdr.type) { 133 case BINDER_TYPE_BINDER: 134 if (obj.binder) { 135 LOG_REFS("Parcel %p releasing reference on local %p", who, obj.cookie); 136 reinterpret_cast<IBinder*>(obj.cookie)->decStrong(who); 137 } 138 return; 139 case BINDER_TYPE_WEAK_BINDER: 140 if (obj.binder) 141 reinterpret_cast<RefBase::weakref_type*>(obj.binder)->decWeak(who); 142 return; 143 case BINDER_TYPE_HANDLE: { 144 const sp<IBinder> b = proc->getStrongProxyForHandle(obj.handle); 145 if (b != nullptr) { 146 LOG_REFS("Parcel %p releasing reference on remote %p", who, b.get()); 147 b->decStrong(who); 148 } 149 return; 150 } 151 case BINDER_TYPE_WEAK_HANDLE: { 152 const wp<IBinder> b = proc->getWeakProxyForHandle(obj.handle); 153 if (b != nullptr) b.get_refs()->decWeak(who); 154 return; 155 } 156 case BINDER_TYPE_FD: { 157 if (obj.cookie != 0) { // owned 158 close(obj.handle); 159 } 160 return; 161 } 162 case BINDER_TYPE_PTR: { 163 // The relevant buffer is part of the transaction buffer and will be freed that way 164 return; 165 } 166 case BINDER_TYPE_FDA: { 167 // The enclosed file descriptors are closed in the kernel 168 return; 169 } 170 } 171 172 ALOGE("Invalid object type 0x%08x", obj.hdr.type); 173 } 174 175 inline static status_t finish_flatten_binder( 176 const sp<IBinder>& /*binder*/, const flat_binder_object& flat, Parcel* out) 177 { 178 return out->writeObject(flat); 179 } 180 181 status_t flatten_binder(const sp<ProcessState>& /*proc*/, 182 const sp<IBinder>& binder, Parcel* out) 183 { 184 flat_binder_object obj = {}; 185 186 if (binder != nullptr) { 187 BHwBinder *local = binder->localBinder(); 188 if (!local) { 189 BpHwBinder *proxy = binder->remoteBinder(); 190 if (proxy == nullptr) { 191 ALOGE("null proxy"); 192 } 193 const int32_t handle = proxy ? proxy->handle() : 0; 194 obj.hdr.type = BINDER_TYPE_HANDLE; 195 obj.flags = FLAT_BINDER_FLAG_ACCEPTS_FDS; 196 obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */ 197 obj.handle = handle; 198 obj.cookie = 0; 199 } else { 200 // Get policy and convert it 201 int policy = local->getMinSchedulingPolicy(); 202 int priority = local->getMinSchedulingPriority(); 203 204 obj.flags = priority & FLAT_BINDER_FLAG_PRIORITY_MASK; 205 obj.flags |= FLAT_BINDER_FLAG_ACCEPTS_FDS | FLAT_BINDER_FLAG_INHERIT_RT; 206 obj.flags |= (policy & 3) << FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT; 207 if (local->isRequestingSid()) { 208 obj.flags |= FLAT_BINDER_FLAG_TXN_SECURITY_CTX; 209 } 210 obj.hdr.type = BINDER_TYPE_BINDER; 211 obj.binder = reinterpret_cast<uintptr_t>(local->getWeakRefs()); 212 obj.cookie = reinterpret_cast<uintptr_t>(local); 213 } 214 } else { 215 obj.hdr.type = BINDER_TYPE_BINDER; 216 obj.binder = 0; 217 obj.cookie = 0; 218 } 219 220 return finish_flatten_binder(binder, obj, out); 221 } 222 223 inline static status_t finish_unflatten_binder( 224 BpHwBinder* /*proxy*/, const flat_binder_object& /*flat*/, 225 const Parcel& /*in*/) 226 { 227 return NO_ERROR; 228 } 229 230 status_t unflatten_binder(const sp<ProcessState>& proc, 231 const Parcel& in, sp<IBinder>* out) 232 { 233 const flat_binder_object* flat = in.readObject<flat_binder_object>(); 234 235 if (flat) { 236 switch (flat->hdr.type) { 237 case BINDER_TYPE_BINDER: 238 *out = reinterpret_cast<IBinder*>(flat->cookie); 239 return finish_unflatten_binder(nullptr, *flat, in); 240 case BINDER_TYPE_HANDLE: 241 *out = proc->getStrongProxyForHandle(flat->handle); 242 return finish_unflatten_binder( 243 static_cast<BpHwBinder*>(out->get()), *flat, in); 244 } 245 } 246 return BAD_TYPE; 247 } 248 249 // --------------------------------------------------------------------------- 250 251 Parcel::Parcel() 252 { 253 LOG_ALLOC("Parcel %p: constructing", this); 254 initState(); 255 } 256 257 Parcel::~Parcel() 258 { 259 freeDataNoInit(); 260 LOG_ALLOC("Parcel %p: destroyed", this); 261 } 262 263 size_t Parcel::getGlobalAllocSize() { 264 pthread_mutex_lock(&gParcelGlobalAllocSizeLock); 265 size_t size = gParcelGlobalAllocSize; 266 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock); 267 return size; 268 } 269 270 size_t Parcel::getGlobalAllocCount() { 271 pthread_mutex_lock(&gParcelGlobalAllocSizeLock); 272 size_t count = gParcelGlobalAllocCount; 273 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock); 274 return count; 275 } 276 277 const uint8_t* Parcel::data() const 278 { 279 return mData; 280 } 281 282 size_t Parcel::dataSize() const 283 { 284 return (mDataSize > mDataPos ? mDataSize : mDataPos); 285 } 286 287 size_t Parcel::dataAvail() const 288 { 289 size_t result = dataSize() - dataPosition(); 290 if (result > INT32_MAX) { 291 LOG_ALWAYS_FATAL("result too big: %zu", result); 292 } 293 return result; 294 } 295 296 size_t Parcel::dataPosition() const 297 { 298 return mDataPos; 299 } 300 301 size_t Parcel::dataCapacity() const 302 { 303 return mDataCapacity; 304 } 305 306 status_t Parcel::setDataSize(size_t size) 307 { 308 if (size > INT32_MAX) { 309 // don't accept size_t values which may have come from an 310 // inadvertent conversion from a negative int. 311 return BAD_VALUE; 312 } 313 314 status_t err; 315 err = continueWrite(size); 316 if (err == NO_ERROR) { 317 mDataSize = size; 318 ALOGV("setDataSize Setting data size of %p to %zu", this, mDataSize); 319 } 320 return err; 321 } 322 323 void Parcel::setDataPosition(size_t pos) const 324 { 325 if (pos > INT32_MAX) { 326 // don't accept size_t values which may have come from an 327 // inadvertent conversion from a negative int. 328 LOG_ALWAYS_FATAL("pos too big: %zu", pos); 329 } 330 331 mDataPos = pos; 332 mNextObjectHint = 0; 333 } 334 335 status_t Parcel::setDataCapacity(size_t size) 336 { 337 if (size > INT32_MAX) { 338 // don't accept size_t values which may have come from an 339 // inadvertent conversion from a negative int. 340 return BAD_VALUE; 341 } 342 343 if (size > mDataCapacity) return continueWrite(size); 344 return NO_ERROR; 345 } 346 347 status_t Parcel::setData(const uint8_t* buffer, size_t len) 348 { 349 if (len > INT32_MAX) { 350 // don't accept size_t values which may have come from an 351 // inadvertent conversion from a negative int. 352 return BAD_VALUE; 353 } 354 355 status_t err = restartWrite(len); 356 if (err == NO_ERROR) { 357 memcpy(const_cast<uint8_t*>(data()), buffer, len); 358 mDataSize = len; 359 mFdsKnown = false; 360 } 361 return err; 362 } 363 364 // Write RPC headers. (previously just the interface token) 365 status_t Parcel::writeInterfaceToken(const char* interface) 366 { 367 // currently the interface identification token is just its name as a string 368 return writeCString(interface); 369 } 370 371 bool Parcel::enforceInterface(const char* interface) const 372 { 373 const char* str = readCString(); 374 if (str != nullptr && strcmp(str, interface) == 0) { 375 return true; 376 } else { 377 ALOGW("**** enforceInterface() expected '%s' but read '%s'", 378 interface, (str ? str : "<empty string>")); 379 return false; 380 } 381 } 382 383 const binder_size_t* Parcel::objects() const 384 { 385 return mObjects; 386 } 387 388 size_t Parcel::objectsCount() const 389 { 390 return mObjectsSize; 391 } 392 393 status_t Parcel::errorCheck() const 394 { 395 return mError; 396 } 397 398 void Parcel::setError(status_t err) 399 { 400 mError = err; 401 } 402 403 status_t Parcel::finishWrite(size_t len) 404 { 405 if (len > INT32_MAX) { 406 // don't accept size_t values which may have come from an 407 // inadvertent conversion from a negative int. 408 return BAD_VALUE; 409 } 410 411 //printf("Finish write of %d\n", len); 412 mDataPos += len; 413 ALOGV("finishWrite Setting data pos of %p to %zu", this, mDataPos); 414 if (mDataPos > mDataSize) { 415 mDataSize = mDataPos; 416 ALOGV("finishWrite Setting data size of %p to %zu", this, mDataSize); 417 } 418 //printf("New pos=%d, size=%d\n", mDataPos, mDataSize); 419 return NO_ERROR; 420 } 421 422 status_t Parcel::writeUnpadded(const void* data, size_t len) 423 { 424 if (len > INT32_MAX) { 425 // don't accept size_t values which may have come from an 426 // inadvertent conversion from a negative int. 427 return BAD_VALUE; 428 } 429 430 size_t end = mDataPos + len; 431 if (end < mDataPos) { 432 // integer overflow 433 return BAD_VALUE; 434 } 435 436 if (end <= mDataCapacity) { 437 restart_write: 438 memcpy(mData+mDataPos, data, len); 439 return finishWrite(len); 440 } 441 442 status_t err = growData(len); 443 if (err == NO_ERROR) goto restart_write; 444 return err; 445 } 446 447 status_t Parcel::write(const void* data, size_t len) 448 { 449 if (len > INT32_MAX) { 450 // don't accept size_t values which may have come from an 451 // inadvertent conversion from a negative int. 452 return BAD_VALUE; 453 } 454 455 void* const d = writeInplace(len); 456 if (d) { 457 memcpy(d, data, len); 458 return NO_ERROR; 459 } 460 return mError; 461 } 462 463 void* Parcel::writeInplace(size_t len) 464 { 465 if (len > INT32_MAX) { 466 // don't accept size_t values which may have come from an 467 // inadvertent conversion from a negative int. 468 return nullptr; 469 } 470 471 const size_t padded = pad_size(len); 472 473 // sanity check for integer overflow 474 if (mDataPos+padded < mDataPos) { 475 return nullptr; 476 } 477 478 if ((mDataPos+padded) <= mDataCapacity) { 479 restart_write: 480 //printf("Writing %ld bytes, padded to %ld\n", len, padded); 481 uint8_t* const data = mData+mDataPos; 482 483 // Need to pad at end? 484 if (padded != len) { 485 #if BYTE_ORDER == BIG_ENDIAN 486 static const uint32_t mask[4] = { 487 0x00000000, 0xffffff00, 0xffff0000, 0xff000000 488 }; 489 #endif 490 #if BYTE_ORDER == LITTLE_ENDIAN 491 static const uint32_t mask[4] = { 492 0x00000000, 0x00ffffff, 0x0000ffff, 0x000000ff 493 }; 494 #endif 495 //printf("Applying pad mask: %p to %p\n", (void*)mask[padded-len], 496 // *reinterpret_cast<void**>(data+padded-4)); 497 *reinterpret_cast<uint32_t*>(data+padded-4) &= mask[padded-len]; 498 } 499 500 finishWrite(padded); 501 return data; 502 } 503 504 status_t err = growData(padded); 505 if (err == NO_ERROR) goto restart_write; 506 return nullptr; 507 } 508 509 status_t Parcel::writeInt8(int8_t val) 510 { 511 return write(&val, sizeof(val)); 512 } 513 514 status_t Parcel::writeUint8(uint8_t val) 515 { 516 return write(&val, sizeof(val)); 517 } 518 519 status_t Parcel::writeInt16(int16_t val) 520 { 521 return write(&val, sizeof(val)); 522 } 523 524 status_t Parcel::writeUint16(uint16_t val) 525 { 526 return write(&val, sizeof(val)); 527 } 528 529 status_t Parcel::writeInt32(int32_t val) 530 { 531 return writeAligned(val); 532 } 533 534 status_t Parcel::writeUint32(uint32_t val) 535 { 536 return writeAligned(val); 537 } 538 539 status_t Parcel::writeBool(bool val) 540 { 541 return writeInt8(int8_t(val)); 542 } 543 status_t Parcel::writeInt64(int64_t val) 544 { 545 return writeAligned(val); 546 } 547 548 status_t Parcel::writeUint64(uint64_t val) 549 { 550 return writeAligned(val); 551 } 552 553 status_t Parcel::writePointer(uintptr_t val) 554 { 555 return writeAligned<binder_uintptr_t>(val); 556 } 557 558 status_t Parcel::writeFloat(float val) 559 { 560 return writeAligned(val); 561 } 562 563 #if defined(__mips__) && defined(__mips_hard_float) 564 565 status_t Parcel::writeDouble(double val) 566 { 567 union { 568 double d; 569 unsigned long long ll; 570 } u; 571 u.d = val; 572 return writeAligned(u.ll); 573 } 574 575 #else 576 577 status_t Parcel::writeDouble(double val) 578 { 579 return writeAligned(val); 580 } 581 582 #endif 583 584 status_t Parcel::writeCString(const char* str) 585 { 586 return write(str, strlen(str)+1); 587 } 588 status_t Parcel::writeString16(const std::unique_ptr<String16>& str) 589 { 590 if (!str) { 591 return writeInt32(-1); 592 } 593 594 return writeString16(*str); 595 } 596 597 status_t Parcel::writeString16(const String16& str) 598 { 599 return writeString16(str.string(), str.size()); 600 } 601 602 status_t Parcel::writeString16(const char16_t* str, size_t len) 603 { 604 if (str == nullptr) return writeInt32(-1); 605 606 status_t err = writeInt32(len); 607 if (err == NO_ERROR) { 608 len *= sizeof(char16_t); 609 uint8_t* data = (uint8_t*)writeInplace(len+sizeof(char16_t)); 610 if (data) { 611 memcpy(data, str, len); 612 *reinterpret_cast<char16_t*>(data+len) = 0; 613 return NO_ERROR; 614 } 615 err = mError; 616 } 617 return err; 618 } 619 status_t Parcel::writeStrongBinder(const sp<IBinder>& val) 620 { 621 return flatten_binder(ProcessState::self(), val, this); 622 } 623 624 template <typename T> 625 status_t Parcel::writeObject(const T& val) 626 { 627 const bool enoughData = (mDataPos+sizeof(val)) <= mDataCapacity; 628 const bool enoughObjects = mObjectsSize < mObjectsCapacity; 629 if (enoughData && enoughObjects) { 630 restart_write: 631 *reinterpret_cast<T*>(mData+mDataPos) = val; 632 633 const binder_object_header* hdr = reinterpret_cast<binder_object_header*>(mData+mDataPos); 634 switch (hdr->type) { 635 case BINDER_TYPE_BINDER: 636 case BINDER_TYPE_WEAK_BINDER: 637 case BINDER_TYPE_HANDLE: 638 case BINDER_TYPE_WEAK_HANDLE: { 639 const flat_binder_object *fbo = reinterpret_cast<const flat_binder_object*>(hdr); 640 if (fbo->binder != 0) { 641 mObjects[mObjectsSize++] = mDataPos; 642 acquire_binder_object(ProcessState::self(), *fbo, this); 643 } 644 break; 645 } 646 case BINDER_TYPE_FD: { 647 // remember if it's a file descriptor 648 if (!mAllowFds) { 649 // fail before modifying our object index 650 return FDS_NOT_ALLOWED; 651 } 652 mHasFds = mFdsKnown = true; 653 mObjects[mObjectsSize++] = mDataPos; 654 break; 655 } 656 case BINDER_TYPE_FDA: 657 mObjects[mObjectsSize++] = mDataPos; 658 break; 659 case BINDER_TYPE_PTR: { 660 const binder_buffer_object *buffer_obj = reinterpret_cast< 661 const binder_buffer_object*>(hdr); 662 if ((void *)buffer_obj->buffer != nullptr) { 663 mObjects[mObjectsSize++] = mDataPos; 664 } 665 break; 666 } 667 default: { 668 ALOGE("writeObject: unknown type %d", hdr->type); 669 break; 670 } 671 } 672 return finishWrite(sizeof(val)); 673 } 674 675 if (!enoughData) { 676 const status_t err = growData(sizeof(val)); 677 if (err != NO_ERROR) return err; 678 } 679 if (!enoughObjects) { 680 if (mObjectsSize > SIZE_MAX - 2) return NO_MEMORY; // overflow 681 if (mObjectsSize + 2 > SIZE_MAX / 3) return NO_MEMORY; // overflow 682 size_t newSize = ((mObjectsSize+2)*3)/2; 683 if (newSize > SIZE_MAX / sizeof(binder_size_t)) return NO_MEMORY; // overflow 684 binder_size_t* objects = (binder_size_t*)realloc(mObjects, newSize*sizeof(binder_size_t)); 685 if (objects == nullptr) return NO_MEMORY; 686 mObjects = objects; 687 mObjectsCapacity = newSize; 688 } 689 690 goto restart_write; 691 } 692 693 template status_t Parcel::writeObject<flat_binder_object>(const flat_binder_object& val); 694 template status_t Parcel::writeObject<binder_fd_object>(const binder_fd_object& val); 695 template status_t Parcel::writeObject<binder_buffer_object>(const binder_buffer_object& val); 696 template status_t Parcel::writeObject<binder_fd_array_object>(const binder_fd_array_object& val); 697 698 bool Parcel::validateBufferChild(size_t child_buffer_handle, 699 size_t child_offset) const { 700 if (child_buffer_handle >= mObjectsSize) 701 return false; 702 binder_buffer_object *child = reinterpret_cast<binder_buffer_object*> 703 (mData + mObjects[child_buffer_handle]); 704 if (child->hdr.type != BINDER_TYPE_PTR || child_offset > child->length) { 705 // Parent object not a buffer, or not large enough 706 LOG_BUFFER("writeEmbeddedReference found weird child. " 707 "child_offset = %zu, child->length = %zu", 708 child_offset, (size_t)child->length); 709 return false; 710 } 711 return true; 712 } 713 714 bool Parcel::validateBufferParent(size_t parent_buffer_handle, 715 size_t parent_offset) const { 716 if (parent_buffer_handle >= mObjectsSize) 717 return false; 718 binder_buffer_object *parent = reinterpret_cast<binder_buffer_object*> 719 (mData + mObjects[parent_buffer_handle]); 720 if (parent->hdr.type != BINDER_TYPE_PTR || 721 sizeof(binder_uintptr_t) > parent->length || 722 parent_offset > parent->length - sizeof(binder_uintptr_t)) { 723 // Parent object not a buffer, or not large enough 724 return false; 725 } 726 return true; 727 } 728 status_t Parcel::writeEmbeddedBuffer( 729 const void *buffer, size_t length, size_t *handle, 730 size_t parent_buffer_handle, size_t parent_offset) { 731 LOG_BUFFER("writeEmbeddedBuffer(%p, %zu, parent = (%zu, %zu)) -> %zu", 732 buffer, length, parent_buffer_handle, 733 parent_offset, mObjectsSize); 734 if(!validateBufferParent(parent_buffer_handle, parent_offset)) 735 return BAD_VALUE; 736 binder_buffer_object obj = { 737 .hdr = { .type = BINDER_TYPE_PTR }, 738 .flags = BINDER_BUFFER_FLAG_HAS_PARENT, 739 .buffer = reinterpret_cast<binder_uintptr_t>(buffer), 740 .length = length, 741 .parent = parent_buffer_handle, 742 .parent_offset = parent_offset, 743 }; 744 if (handle != nullptr) { 745 // We use an index into mObjects as a handle 746 *handle = mObjectsSize; 747 } 748 return writeObject(obj); 749 } 750 751 status_t Parcel::writeBuffer(const void *buffer, size_t length, size_t *handle) 752 { 753 LOG_BUFFER("writeBuffer(%p, %zu) -> %zu", 754 buffer, length, mObjectsSize); 755 binder_buffer_object obj { 756 .hdr = { .type = BINDER_TYPE_PTR }, 757 .flags = 0, 758 .buffer = reinterpret_cast<binder_uintptr_t>(buffer), 759 .length = length, 760 }; 761 if (handle != nullptr) { 762 // We use an index into mObjects as a handle 763 *handle = mObjectsSize; 764 } 765 return writeObject(obj); 766 } 767 768 void Parcel::clearCache() const { 769 LOG_BUFFER("clearing cache."); 770 mBufCachePos = 0; 771 mBufCache.clear(); 772 } 773 774 void Parcel::updateCache() const { 775 if(mBufCachePos == mObjectsSize) 776 return; 777 LOG_BUFFER("updating cache from %zu to %zu", mBufCachePos, mObjectsSize); 778 for(size_t i = mBufCachePos; i < mObjectsSize; i++) { 779 binder_size_t dataPos = mObjects[i]; 780 binder_buffer_object *obj = 781 reinterpret_cast<binder_buffer_object*>(mData+dataPos); 782 if(obj->hdr.type != BINDER_TYPE_PTR) 783 continue; 784 BufferInfo ifo; 785 ifo.index = i; 786 ifo.buffer = obj->buffer; 787 ifo.bufend = obj->buffer + obj->length; 788 mBufCache.push_back(ifo); 789 } 790 mBufCachePos = mObjectsSize; 791 } 792 793 /* O(n) (n=#buffers) to find a buffer that contains the given addr */ 794 status_t Parcel::findBuffer(const void *ptr, size_t length, bool *found, 795 size_t *handle, size_t *offset) const { 796 if(found == nullptr) 797 return UNKNOWN_ERROR; 798 updateCache(); 799 binder_uintptr_t ptrVal = reinterpret_cast<binder_uintptr_t>(ptr); 800 // true if the pointer is in some buffer, but the length is too big 801 // so that ptr + length doesn't fit into the buffer. 802 bool suspectRejectBadPointer = false; 803 LOG_BUFFER("findBuffer examining %zu objects.", mObjectsSize); 804 for(auto entry = mBufCache.rbegin(); entry != mBufCache.rend(); ++entry ) { 805 if(entry->buffer <= ptrVal && ptrVal < entry->bufend) { 806 // might have found it. 807 if(ptrVal + length <= entry->bufend) { 808 *found = true; 809 if(handle != nullptr) *handle = entry->index; 810 if(offset != nullptr) *offset = ptrVal - entry->buffer; 811 LOG_BUFFER(" findBuffer has a match at %zu!", entry->index); 812 return OK; 813 } else { 814 suspectRejectBadPointer = true; 815 } 816 } 817 } 818 LOG_BUFFER("findBuffer did not find for ptr = %p.", ptr); 819 *found = false; 820 return suspectRejectBadPointer ? BAD_VALUE : OK; 821 } 822 823 /* findBuffer with the assumption that ptr = .buffer (so it points to top 824 * of the buffer, aka offset 0). 825 * */ 826 status_t Parcel::quickFindBuffer(const void *ptr, size_t *handle) const { 827 updateCache(); 828 binder_uintptr_t ptrVal = reinterpret_cast<binder_uintptr_t>(ptr); 829 LOG_BUFFER("quickFindBuffer examining %zu objects.", mObjectsSize); 830 for(auto entry = mBufCache.rbegin(); entry != mBufCache.rend(); ++entry ) { 831 if(entry->buffer == ptrVal) { 832 if(handle != nullptr) *handle = entry->index; 833 return OK; 834 } 835 } 836 LOG_BUFFER("quickFindBuffer did not find for ptr = %p.", ptr); 837 return NO_INIT; 838 } 839 840 status_t Parcel::writeNativeHandleNoDup(const native_handle_t *handle, 841 bool embedded, 842 size_t parent_buffer_handle, 843 size_t parent_offset) 844 { 845 size_t buffer_handle; 846 status_t status = OK; 847 848 if (handle == nullptr) { 849 status = writeUint64(0); 850 return status; 851 } 852 853 size_t native_handle_size = sizeof(native_handle_t) 854 + handle->numFds * sizeof(int) + handle->numInts * sizeof(int); 855 writeUint64(native_handle_size); 856 857 if (embedded) { 858 status = writeEmbeddedBuffer((void*) handle, 859 native_handle_size, &buffer_handle, 860 parent_buffer_handle, parent_offset); 861 } else { 862 status = writeBuffer((void*) handle, native_handle_size, &buffer_handle); 863 } 864 865 if (status != OK) { 866 return status; 867 } 868 869 struct binder_fd_array_object fd_array { 870 .hdr = { .type = BINDER_TYPE_FDA }, 871 .num_fds = static_cast<binder_size_t>(handle->numFds), 872 .parent = buffer_handle, 873 .parent_offset = offsetof(native_handle_t, data), 874 }; 875 876 return writeObject(fd_array); 877 } 878 879 status_t Parcel::writeNativeHandleNoDup(const native_handle_t *handle) 880 { 881 return writeNativeHandleNoDup(handle, false /* embedded */); 882 } 883 884 status_t Parcel::writeEmbeddedNativeHandle(const native_handle_t *handle, 885 size_t parent_buffer_handle, 886 size_t parent_offset) 887 { 888 return writeNativeHandleNoDup(handle, true /* embedded */, 889 parent_buffer_handle, parent_offset); 890 } 891 892 void Parcel::remove(size_t /*start*/, size_t /*amt*/) 893 { 894 LOG_ALWAYS_FATAL("Parcel::remove() not yet implemented!"); 895 } 896 897 status_t Parcel::read(void* outData, size_t len) const 898 { 899 if (len > INT32_MAX) { 900 // don't accept size_t values which may have come from an 901 // inadvertent conversion from a negative int. 902 return BAD_VALUE; 903 } 904 905 if ((mDataPos+pad_size(len)) >= mDataPos && (mDataPos+pad_size(len)) <= mDataSize 906 && len <= pad_size(len)) { 907 memcpy(outData, mData+mDataPos, len); 908 mDataPos += pad_size(len); 909 ALOGV("read Setting data pos of %p to %zu", this, mDataPos); 910 return NO_ERROR; 911 } 912 return NOT_ENOUGH_DATA; 913 } 914 915 const void* Parcel::readInplace(size_t len) const 916 { 917 if (len > INT32_MAX) { 918 // don't accept size_t values which may have come from an 919 // inadvertent conversion from a negative int. 920 return nullptr; 921 } 922 923 if ((mDataPos+pad_size(len)) >= mDataPos && (mDataPos+pad_size(len)) <= mDataSize 924 && len <= pad_size(len)) { 925 const void* data = mData+mDataPos; 926 mDataPos += pad_size(len); 927 ALOGV("readInplace Setting data pos of %p to %zu", this, mDataPos); 928 return data; 929 } 930 return nullptr; 931 } 932 933 template<class T> 934 status_t Parcel::readAligned(T *pArg) const { 935 COMPILE_TIME_ASSERT_FUNCTION_SCOPE(PAD_SIZE_UNSAFE(sizeof(T)) == sizeof(T)); 936 937 if ((mDataPos+sizeof(T)) <= mDataSize) { 938 const void* data = mData+mDataPos; 939 mDataPos += sizeof(T); 940 *pArg = *reinterpret_cast<const T*>(data); 941 return NO_ERROR; 942 } else { 943 return NOT_ENOUGH_DATA; 944 } 945 } 946 947 template<class T> 948 T Parcel::readAligned() const { 949 T result; 950 if (readAligned(&result) != NO_ERROR) { 951 result = 0; 952 } 953 954 return result; 955 } 956 957 template<class T> 958 status_t Parcel::writeAligned(T val) { 959 COMPILE_TIME_ASSERT_FUNCTION_SCOPE(PAD_SIZE_UNSAFE(sizeof(T)) == sizeof(T)); 960 961 if ((mDataPos+sizeof(val)) <= mDataCapacity) { 962 restart_write: 963 *reinterpret_cast<T*>(mData+mDataPos) = val; 964 return finishWrite(sizeof(val)); 965 } 966 967 status_t err = growData(sizeof(val)); 968 if (err == NO_ERROR) goto restart_write; 969 return err; 970 } 971 972 status_t Parcel::readInt8(int8_t *pArg) const 973 { 974 return read(pArg, sizeof(*pArg)); 975 } 976 977 status_t Parcel::readUint8(uint8_t *pArg) const 978 { 979 return read(pArg, sizeof(*pArg)); 980 } 981 982 status_t Parcel::readInt16(int16_t *pArg) const 983 { 984 return read(pArg, sizeof(*pArg)); 985 } 986 987 status_t Parcel::readUint16(uint16_t *pArg) const 988 { 989 return read(pArg, sizeof(*pArg)); 990 } 991 992 status_t Parcel::readInt32(int32_t *pArg) const 993 { 994 return readAligned(pArg); 995 } 996 997 int32_t Parcel::readInt32() const 998 { 999 return readAligned<int32_t>(); 1000 } 1001 1002 status_t Parcel::readUint32(uint32_t *pArg) const 1003 { 1004 return readAligned(pArg); 1005 } 1006 1007 uint32_t Parcel::readUint32() const 1008 { 1009 return readAligned<uint32_t>(); 1010 } 1011 1012 status_t Parcel::readInt64(int64_t *pArg) const 1013 { 1014 return readAligned(pArg); 1015 } 1016 1017 int64_t Parcel::readInt64() const 1018 { 1019 return readAligned<int64_t>(); 1020 } 1021 1022 status_t Parcel::readUint64(uint64_t *pArg) const 1023 { 1024 return readAligned(pArg); 1025 } 1026 1027 uint64_t Parcel::readUint64() const 1028 { 1029 return readAligned<uint64_t>(); 1030 } 1031 1032 status_t Parcel::readPointer(uintptr_t *pArg) const 1033 { 1034 status_t ret; 1035 binder_uintptr_t ptr; 1036 ret = readAligned(&ptr); 1037 if (!ret) 1038 *pArg = ptr; 1039 return ret; 1040 } 1041 1042 uintptr_t Parcel::readPointer() const 1043 { 1044 return readAligned<binder_uintptr_t>(); 1045 } 1046 1047 1048 status_t Parcel::readFloat(float *pArg) const 1049 { 1050 return readAligned(pArg); 1051 } 1052 1053 1054 float Parcel::readFloat() const 1055 { 1056 return readAligned<float>(); 1057 } 1058 1059 #if defined(__mips__) && defined(__mips_hard_float) 1060 1061 status_t Parcel::readDouble(double *pArg) const 1062 { 1063 union { 1064 double d; 1065 unsigned long long ll; 1066 } u; 1067 u.d = 0; 1068 status_t status; 1069 status = readAligned(&u.ll); 1070 *pArg = u.d; 1071 return status; 1072 } 1073 1074 double Parcel::readDouble() const 1075 { 1076 union { 1077 double d; 1078 unsigned long long ll; 1079 } u; 1080 u.ll = readAligned<unsigned long long>(); 1081 return u.d; 1082 } 1083 1084 #else 1085 1086 status_t Parcel::readDouble(double *pArg) const 1087 { 1088 return readAligned(pArg); 1089 } 1090 1091 double Parcel::readDouble() const 1092 { 1093 return readAligned<double>(); 1094 } 1095 1096 #endif 1097 1098 status_t Parcel::readBool(bool *pArg) const 1099 { 1100 int8_t tmp; 1101 status_t ret = readInt8(&tmp); 1102 *pArg = (tmp != 0); 1103 return ret; 1104 } 1105 1106 bool Parcel::readBool() const 1107 { 1108 int8_t tmp; 1109 status_t err = readInt8(&tmp); 1110 1111 if (err != OK) { 1112 return 0; 1113 } 1114 1115 return tmp != 0; 1116 } 1117 1118 const char* Parcel::readCString() const 1119 { 1120 if (mDataPos < mDataSize) { 1121 const size_t avail = mDataSize-mDataPos; 1122 const char* str = reinterpret_cast<const char*>(mData+mDataPos); 1123 // is the string's trailing NUL within the parcel's valid bounds? 1124 const char* eos = reinterpret_cast<const char*>(memchr(str, 0, avail)); 1125 if (eos) { 1126 const size_t len = eos - str; 1127 mDataPos += pad_size(len+1); 1128 ALOGV("readCString Setting data pos of %p to %zu", this, mDataPos); 1129 return str; 1130 } 1131 } 1132 return nullptr; 1133 } 1134 String16 Parcel::readString16() const 1135 { 1136 size_t len; 1137 const char16_t* str = readString16Inplace(&len); 1138 if (str) return String16(str, len); 1139 ALOGE("Reading a NULL string not supported here."); 1140 return String16(); 1141 } 1142 1143 status_t Parcel::readString16(std::unique_ptr<String16>* pArg) const 1144 { 1145 const int32_t start = dataPosition(); 1146 int32_t size; 1147 status_t status = readInt32(&size); 1148 pArg->reset(); 1149 1150 if (status != OK || size < 0) { 1151 return status; 1152 } 1153 1154 setDataPosition(start); 1155 pArg->reset(new (std::nothrow) String16()); 1156 1157 status = readString16(pArg->get()); 1158 1159 if (status != OK) { 1160 pArg->reset(); 1161 } 1162 1163 return status; 1164 } 1165 1166 status_t Parcel::readString16(String16* pArg) const 1167 { 1168 size_t len; 1169 const char16_t* str = readString16Inplace(&len); 1170 if (str) { 1171 pArg->setTo(str, len); 1172 return 0; 1173 } else { 1174 *pArg = String16(); 1175 return UNEXPECTED_NULL; 1176 } 1177 } 1178 1179 const char16_t* Parcel::readString16Inplace(size_t* outLen) const 1180 { 1181 int32_t size = readInt32(); 1182 // watch for potential int overflow from size+1 1183 if (size >= 0 && size < INT32_MAX) { 1184 *outLen = size; 1185 const char16_t* str = (const char16_t*)readInplace((size+1)*sizeof(char16_t)); 1186 if (str != nullptr) { 1187 return str; 1188 } 1189 } 1190 *outLen = 0; 1191 return nullptr; 1192 } 1193 status_t Parcel::readStrongBinder(sp<IBinder>* val) const 1194 { 1195 status_t status = readNullableStrongBinder(val); 1196 if (status == OK && !val->get()) { 1197 status = UNEXPECTED_NULL; 1198 } 1199 return status; 1200 } 1201 1202 status_t Parcel::readNullableStrongBinder(sp<IBinder>* val) const 1203 { 1204 return unflatten_binder(ProcessState::self(), *this, val); 1205 } 1206 1207 sp<IBinder> Parcel::readStrongBinder() const 1208 { 1209 sp<IBinder> val; 1210 // Note that a lot of code in Android reads binders by hand with this 1211 // method, and that code has historically been ok with getting nullptr 1212 // back (while ignoring error codes). 1213 readNullableStrongBinder(&val); 1214 return val; 1215 } 1216 1217 template<typename T> 1218 const T* Parcel::readObject(size_t *objects_offset) const 1219 { 1220 const size_t DPOS = mDataPos; 1221 if (objects_offset != nullptr) { 1222 *objects_offset = 0; 1223 } 1224 1225 if ((DPOS+sizeof(T)) <= mDataSize) { 1226 const T* obj = reinterpret_cast<const T*>(mData+DPOS); 1227 mDataPos = DPOS + sizeof(T); 1228 const binder_object_header *hdr = reinterpret_cast<const binder_object_header*>(obj); 1229 switch (hdr->type) { 1230 case BINDER_TYPE_BINDER: 1231 case BINDER_TYPE_WEAK_BINDER: 1232 case BINDER_TYPE_HANDLE: 1233 case BINDER_TYPE_WEAK_HANDLE: { 1234 const flat_binder_object *flat_obj = 1235 reinterpret_cast<const flat_binder_object*>(hdr); 1236 if (flat_obj->cookie == 0 && flat_obj->binder == 0) { 1237 // When transferring a NULL binder object, we don't write it into 1238 // the object list, so we don't want to check for it when 1239 // reading. 1240 ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos); 1241 return obj; 1242 } 1243 break; 1244 } 1245 case BINDER_TYPE_FD: 1246 case BINDER_TYPE_FDA: 1247 // fd (-arrays) must always appear in the meta-data list (eg touched by the kernel) 1248 break; 1249 case BINDER_TYPE_PTR: { 1250 const binder_buffer_object *buffer_obj = 1251 reinterpret_cast<const binder_buffer_object*>(hdr); 1252 if ((void *)buffer_obj->buffer == nullptr) { 1253 // null pointers can be returned directly - they're not written in the 1254 // object list. All non-null buffers must appear in the objects list. 1255 return obj; 1256 } 1257 break; 1258 } 1259 } 1260 // Ensure that this object is valid... 1261 binder_size_t* const OBJS = mObjects; 1262 const size_t N = mObjectsSize; 1263 size_t opos = mNextObjectHint; 1264 1265 if (N > 0) { 1266 ALOGV("Parcel %p looking for obj at %zu, hint=%zu", 1267 this, DPOS, opos); 1268 1269 // Start at the current hint position, looking for an object at 1270 // the current data position. 1271 if (opos < N) { 1272 while (opos < (N-1) && OBJS[opos] < DPOS) { 1273 opos++; 1274 } 1275 } else { 1276 opos = N-1; 1277 } 1278 if (OBJS[opos] == DPOS) { 1279 // Found it! 1280 ALOGV("Parcel %p found obj %zu at index %zu with forward search", 1281 this, DPOS, opos); 1282 mNextObjectHint = opos+1; 1283 ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos); 1284 if (objects_offset != nullptr) { 1285 *objects_offset = opos; 1286 } 1287 return obj; 1288 } 1289 1290 // Look backwards for it... 1291 while (opos > 0 && OBJS[opos] > DPOS) { 1292 opos--; 1293 } 1294 if (OBJS[opos] == DPOS) { 1295 // Found it! 1296 ALOGV("Parcel %p found obj %zu at index %zu with backward search", 1297 this, DPOS, opos); 1298 mNextObjectHint = opos+1; 1299 ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos); 1300 if (objects_offset != nullptr) { 1301 *objects_offset = opos; 1302 } 1303 return obj; 1304 } 1305 } 1306 ALOGW("Attempt to read object from Parcel %p at offset %zu that is not in the object list", 1307 this, DPOS); 1308 } 1309 return nullptr; 1310 } 1311 1312 template const flat_binder_object* Parcel::readObject<flat_binder_object>(size_t *objects_offset) const; 1313 1314 template const binder_fd_object* Parcel::readObject<binder_fd_object>(size_t *objects_offset) const; 1315 1316 template const binder_buffer_object* Parcel::readObject<binder_buffer_object>(size_t *objects_offset) const; 1317 1318 template const binder_fd_array_object* Parcel::readObject<binder_fd_array_object>(size_t *objects_offset) const; 1319 1320 bool Parcel::verifyBufferObject(const binder_buffer_object *buffer_obj, 1321 size_t size, uint32_t flags, size_t parent, 1322 size_t parentOffset) const { 1323 if (buffer_obj->length != size) { 1324 ALOGE("Buffer length %" PRIu64 " does not match expected size %zu.", 1325 static_cast<uint64_t>(buffer_obj->length), size); 1326 return false; 1327 } 1328 1329 if (buffer_obj->flags != flags) { 1330 ALOGE("Buffer flags 0x%02X do not match expected flags 0x%02X.", buffer_obj->flags, flags); 1331 return false; 1332 } 1333 1334 if (flags & BINDER_BUFFER_FLAG_HAS_PARENT) { 1335 if (buffer_obj->parent != parent) { 1336 ALOGE("Buffer parent %" PRIu64 " does not match expected parent %zu.", 1337 static_cast<uint64_t>(buffer_obj->parent), parent); 1338 return false; 1339 } 1340 if (buffer_obj->parent_offset != parentOffset) { 1341 ALOGE("Buffer parent offset %" PRIu64 " does not match expected offset %zu.", 1342 static_cast<uint64_t>(buffer_obj->parent_offset), parentOffset); 1343 return false; 1344 } 1345 } 1346 1347 return true; 1348 } 1349 1350 status_t Parcel::readBuffer(size_t buffer_size, size_t *buffer_handle, 1351 uint32_t flags, size_t parent, size_t parentOffset, 1352 const void **buffer_out) const { 1353 1354 const binder_buffer_object* buffer_obj = readObject<binder_buffer_object>(buffer_handle); 1355 1356 if (buffer_obj == nullptr || buffer_obj->hdr.type != BINDER_TYPE_PTR) { 1357 return BAD_VALUE; 1358 } 1359 1360 if (!verifyBufferObject(buffer_obj, buffer_size, flags, parent, parentOffset)) { 1361 return BAD_VALUE; 1362 } 1363 1364 // in read side, always use .buffer and .length. 1365 *buffer_out = reinterpret_cast<void*>(buffer_obj->buffer); 1366 1367 return OK; 1368 } 1369 1370 status_t Parcel::readNullableBuffer(size_t buffer_size, size_t *buffer_handle, 1371 const void **buffer_out) const 1372 { 1373 return readBuffer(buffer_size, buffer_handle, 1374 0 /* flags */, 0 /* parent */, 0 /* parentOffset */, 1375 buffer_out); 1376 } 1377 1378 status_t Parcel::readBuffer(size_t buffer_size, size_t *buffer_handle, 1379 const void **buffer_out) const 1380 { 1381 status_t status = readNullableBuffer(buffer_size, buffer_handle, buffer_out); 1382 if (status == OK && *buffer_out == nullptr) { 1383 return UNEXPECTED_NULL; 1384 } 1385 return status; 1386 } 1387 1388 1389 status_t Parcel::readEmbeddedBuffer(size_t buffer_size, 1390 size_t *buffer_handle, 1391 size_t parent_buffer_handle, 1392 size_t parent_offset, 1393 const void **buffer_out) const 1394 { 1395 status_t status = readNullableEmbeddedBuffer(buffer_size, buffer_handle, 1396 parent_buffer_handle, 1397 parent_offset, buffer_out); 1398 if (status == OK && *buffer_out == nullptr) { 1399 return UNEXPECTED_NULL; 1400 } 1401 return status; 1402 } 1403 1404 status_t Parcel::readNullableEmbeddedBuffer(size_t buffer_size, 1405 size_t *buffer_handle, 1406 size_t parent_buffer_handle, 1407 size_t parent_offset, 1408 const void **buffer_out) const 1409 { 1410 return readBuffer(buffer_size, buffer_handle, BINDER_BUFFER_FLAG_HAS_PARENT, 1411 parent_buffer_handle, parent_offset, buffer_out); 1412 } 1413 1414 status_t Parcel::readEmbeddedNativeHandle(size_t parent_buffer_handle, 1415 size_t parent_offset, 1416 const native_handle_t **handle) const 1417 { 1418 status_t status = readNullableEmbeddedNativeHandle(parent_buffer_handle, parent_offset, handle); 1419 if (status == OK && *handle == nullptr) { 1420 return UNEXPECTED_NULL; 1421 } 1422 return status; 1423 } 1424 1425 status_t Parcel::readNullableNativeHandleNoDup(const native_handle_t **handle, 1426 bool embedded, 1427 size_t parent_buffer_handle, 1428 size_t parent_offset) const 1429 { 1430 status_t status; 1431 uint64_t nativeHandleSize; 1432 size_t fdaParent; 1433 1434 status = readUint64(&nativeHandleSize); 1435 if (status != OK || nativeHandleSize == 0) { 1436 *handle = nullptr; 1437 return status; 1438 } 1439 1440 if (nativeHandleSize < sizeof(native_handle_t)) { 1441 ALOGE("Received a native_handle_t size that was too small."); 1442 return BAD_VALUE; 1443 } 1444 1445 if (embedded) { 1446 status = readNullableEmbeddedBuffer(nativeHandleSize, &fdaParent, 1447 parent_buffer_handle, parent_offset, 1448 reinterpret_cast<const void**>(handle)); 1449 } else { 1450 status = readNullableBuffer(nativeHandleSize, &fdaParent, 1451 reinterpret_cast<const void**>(handle)); 1452 } 1453 1454 if (status != OK) { 1455 return status; 1456 } 1457 1458 if (*handle == nullptr) { 1459 // null handle already read above 1460 ALOGE("Expecting non-null handle buffer"); 1461 return BAD_VALUE; 1462 } 1463 1464 int numFds = (*handle)->numFds; 1465 int numInts = (*handle)->numInts; 1466 1467 if (numFds < 0 || numFds > NATIVE_HANDLE_MAX_FDS) { 1468 ALOGE("Received native_handle with invalid number of fds."); 1469 return BAD_VALUE; 1470 } 1471 1472 if (numInts < 0 || numInts > NATIVE_HANDLE_MAX_INTS) { 1473 ALOGE("Received native_handle with invalid number of ints."); 1474 return BAD_VALUE; 1475 } 1476 1477 if (nativeHandleSize != (sizeof(native_handle_t) + ((numFds + numInts) * sizeof(int)))) { 1478 ALOGE("Size of native_handle doesn't match."); 1479 return BAD_VALUE; 1480 } 1481 1482 const binder_fd_array_object* fd_array_obj = readObject<binder_fd_array_object>(); 1483 1484 if (fd_array_obj == nullptr || fd_array_obj->hdr.type != BINDER_TYPE_FDA) { 1485 ALOGE("Can't find file-descriptor array object."); 1486 return BAD_VALUE; 1487 } 1488 1489 if (static_cast<int>(fd_array_obj->num_fds) != numFds) { 1490 ALOGE("Number of native handles does not match."); 1491 return BAD_VALUE; 1492 } 1493 1494 if (fd_array_obj->parent != fdaParent) { 1495 ALOGE("Parent handle of file-descriptor array not correct."); 1496 return BAD_VALUE; 1497 } 1498 1499 if (fd_array_obj->parent_offset != offsetof(native_handle_t, data)) { 1500 ALOGE("FD array object not properly offset in parent."); 1501 return BAD_VALUE; 1502 } 1503 1504 return OK; 1505 } 1506 1507 status_t Parcel::readNullableEmbeddedNativeHandle(size_t parent_buffer_handle, 1508 size_t parent_offset, 1509 const native_handle_t **handle) const 1510 { 1511 return readNullableNativeHandleNoDup(handle, true /* embedded */, parent_buffer_handle, 1512 parent_offset); 1513 } 1514 1515 status_t Parcel::readNativeHandleNoDup(const native_handle_t **handle) const 1516 { 1517 status_t status = readNullableNativeHandleNoDup(handle); 1518 if (status == OK && *handle == nullptr) { 1519 return UNEXPECTED_NULL; 1520 } 1521 return status; 1522 } 1523 1524 status_t Parcel::readNullableNativeHandleNoDup(const native_handle_t **handle) const 1525 { 1526 return readNullableNativeHandleNoDup(handle, false /* embedded */); 1527 } 1528 1529 void Parcel::closeFileDescriptors() 1530 { 1531 size_t i = mObjectsSize; 1532 if (i > 0) { 1533 //ALOGI("Closing file descriptors for %zu objects...", i); 1534 } 1535 while (i > 0) { 1536 i--; 1537 const flat_binder_object* flat 1538 = reinterpret_cast<flat_binder_object*>(mData+mObjects[i]); 1539 if (flat->hdr.type == BINDER_TYPE_FD) { 1540 //ALOGI("Closing fd: %ld", flat->handle); 1541 close(flat->handle); 1542 } 1543 } 1544 } 1545 1546 uintptr_t Parcel::ipcData() const 1547 { 1548 return reinterpret_cast<uintptr_t>(mData); 1549 } 1550 1551 size_t Parcel::ipcDataSize() const 1552 { 1553 return mDataSize > mDataPos ? mDataSize : mDataPos; 1554 } 1555 1556 uintptr_t Parcel::ipcObjects() const 1557 { 1558 return reinterpret_cast<uintptr_t>(mObjects); 1559 } 1560 1561 size_t Parcel::ipcObjectsCount() const 1562 { 1563 return mObjectsSize; 1564 } 1565 1566 #define BUFFER_ALIGNMENT_BYTES 8 1567 size_t Parcel::ipcBufferSize() const 1568 { 1569 size_t totalBuffersSize = 0; 1570 // Add size for BINDER_TYPE_PTR 1571 size_t i = mObjectsSize; 1572 while (i > 0) { 1573 i--; 1574 const binder_buffer_object* buffer 1575 = reinterpret_cast<binder_buffer_object*>(mData+mObjects[i]); 1576 if (buffer->hdr.type == BINDER_TYPE_PTR) { 1577 /* The binder kernel driver requires each buffer to be 8-byte 1578 * aligned */ 1579 size_t alignedSize = (buffer->length + (BUFFER_ALIGNMENT_BYTES - 1)) 1580 & ~(BUFFER_ALIGNMENT_BYTES - 1); 1581 if (alignedSize > SIZE_MAX - totalBuffersSize) { 1582 ALOGE("ipcBuffersSize(): invalid buffer sizes."); 1583 return 0; 1584 } 1585 totalBuffersSize += alignedSize; 1586 } 1587 } 1588 return totalBuffersSize; 1589 } 1590 1591 void Parcel::ipcSetDataReference(const uint8_t* data, size_t dataSize, 1592 const binder_size_t* objects, size_t objectsCount, release_func relFunc, void* relCookie) 1593 { 1594 binder_size_t minOffset = 0; 1595 freeDataNoInit(); 1596 mError = NO_ERROR; 1597 mData = const_cast<uint8_t*>(data); 1598 mDataSize = mDataCapacity = dataSize; 1599 //ALOGI("setDataReference Setting data size of %p to %lu (pid=%d)", this, mDataSize, getpid()); 1600 mDataPos = 0; 1601 ALOGV("setDataReference Setting data pos of %p to %zu", this, mDataPos); 1602 mObjects = const_cast<binder_size_t*>(objects); 1603 mObjectsSize = mObjectsCapacity = objectsCount; 1604 mNextObjectHint = 0; 1605 clearCache(); 1606 mOwner = relFunc; 1607 mOwnerCookie = relCookie; 1608 for (size_t i = 0; i < mObjectsSize; i++) { 1609 binder_size_t offset = mObjects[i]; 1610 if (offset < minOffset) { 1611 ALOGE("%s: bad object offset %" PRIu64 " < %" PRIu64 "\n", 1612 __func__, (uint64_t)offset, (uint64_t)minOffset); 1613 mObjectsSize = 0; 1614 break; 1615 } 1616 minOffset = offset + sizeof(flat_binder_object); 1617 } 1618 scanForFds(); 1619 } 1620 1621 void Parcel::print(TextOutput& to, uint32_t /*flags*/) const 1622 { 1623 to << "Parcel("; 1624 1625 if (errorCheck() != NO_ERROR) { 1626 const status_t err = errorCheck(); 1627 to << "Error: " << (void*)(intptr_t)err << " \"" << strerror(-err) << "\""; 1628 } else if (dataSize() > 0) { 1629 const uint8_t* DATA = data(); 1630 to << indent << HexDump(DATA, dataSize()) << dedent; 1631 const binder_size_t* OBJS = objects(); 1632 const size_t N = objectsCount(); 1633 for (size_t i=0; i<N; i++) { 1634 const flat_binder_object* flat 1635 = reinterpret_cast<const flat_binder_object*>(DATA+OBJS[i]); 1636 if (flat->hdr.type == BINDER_TYPE_PTR) { 1637 const binder_buffer_object* buffer 1638 = reinterpret_cast<const binder_buffer_object*>(DATA+OBJS[i]); 1639 HexDump bufferDump((const uint8_t*)buffer->buffer, (size_t)buffer->length); 1640 bufferDump.setSingleLineCutoff(0); 1641 to << endl << "Object #" << i << " @ " << (void*)OBJS[i] << " (buffer size " << buffer->length << "):"; 1642 to << indent << bufferDump << dedent; 1643 } else { 1644 to << endl << "Object #" << i << " @ " << (void*)OBJS[i] << ": " 1645 << TypeCode(flat->hdr.type & 0x7f7f7f00) 1646 << " = " << flat->binder; 1647 } 1648 } 1649 } else { 1650 to << "NULL"; 1651 } 1652 1653 to << ")"; 1654 } 1655 1656 void Parcel::releaseObjects() 1657 { 1658 const sp<ProcessState> proc(ProcessState::self()); 1659 size_t i = mObjectsSize; 1660 uint8_t* const data = mData; 1661 binder_size_t* const objects = mObjects; 1662 while (i > 0) { 1663 i--; 1664 const flat_binder_object* flat 1665 = reinterpret_cast<flat_binder_object*>(data+objects[i]); 1666 release_object(proc, *flat, this); 1667 } 1668 } 1669 1670 void Parcel::acquireObjects() 1671 { 1672 const sp<ProcessState> proc(ProcessState::self()); 1673 size_t i = mObjectsSize; 1674 uint8_t* const data = mData; 1675 binder_size_t* const objects = mObjects; 1676 while (i > 0) { 1677 i--; 1678 const binder_object_header* flat 1679 = reinterpret_cast<binder_object_header*>(data+objects[i]); 1680 acquire_object(proc, *flat, this); 1681 } 1682 } 1683 1684 void Parcel::freeData() 1685 { 1686 freeDataNoInit(); 1687 initState(); 1688 } 1689 1690 void Parcel::freeDataNoInit() 1691 { 1692 if (mOwner) { 1693 LOG_ALLOC("Parcel %p: freeing other owner data", this); 1694 //ALOGI("Freeing data ref of %p (pid=%d)", this, getpid()); 1695 mOwner(this, mData, mDataSize, mObjects, mObjectsSize, mOwnerCookie); 1696 } else { 1697 LOG_ALLOC("Parcel %p: freeing allocated data", this); 1698 releaseObjects(); 1699 if (mData) { 1700 LOG_ALLOC("Parcel %p: freeing with %zu capacity", this, mDataCapacity); 1701 pthread_mutex_lock(&gParcelGlobalAllocSizeLock); 1702 if (mDataCapacity <= gParcelGlobalAllocSize) { 1703 gParcelGlobalAllocSize = gParcelGlobalAllocSize - mDataCapacity; 1704 } else { 1705 gParcelGlobalAllocSize = 0; 1706 } 1707 if (gParcelGlobalAllocCount > 0) { 1708 gParcelGlobalAllocCount--; 1709 } 1710 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock); 1711 free(mData); 1712 } 1713 if (mObjects) free(mObjects); 1714 } 1715 } 1716 1717 status_t Parcel::growData(size_t len) 1718 { 1719 if (len > INT32_MAX) { 1720 // don't accept size_t values which may have come from an 1721 // inadvertent conversion from a negative int. 1722 return BAD_VALUE; 1723 } 1724 if (len > SIZE_MAX - mDataSize) return NO_MEMORY; // overflow 1725 if (mDataSize + len > SIZE_MAX / 3) return NO_MEMORY; // overflow 1726 size_t newSize = ((mDataSize+len)*3)/2; 1727 return continueWrite(newSize); 1728 } 1729 1730 status_t Parcel::restartWrite(size_t desired) 1731 { 1732 if (desired > INT32_MAX) { 1733 // don't accept size_t values which may have come from an 1734 // inadvertent conversion from a negative int. 1735 return BAD_VALUE; 1736 } 1737 1738 if (mOwner) { 1739 freeData(); 1740 return continueWrite(desired); 1741 } 1742 1743 uint8_t* data = (uint8_t*)realloc(mData, desired); 1744 if (!data && desired > mDataCapacity) { 1745 mError = NO_MEMORY; 1746 return NO_MEMORY; 1747 } 1748 1749 releaseObjects(); 1750 1751 if (data) { 1752 LOG_ALLOC("Parcel %p: restart from %zu to %zu capacity", this, mDataCapacity, desired); 1753 pthread_mutex_lock(&gParcelGlobalAllocSizeLock); 1754 gParcelGlobalAllocSize += desired; 1755 gParcelGlobalAllocSize -= mDataCapacity; 1756 if (!mData) { 1757 gParcelGlobalAllocCount++; 1758 } 1759 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock); 1760 mData = data; 1761 mDataCapacity = desired; 1762 } 1763 1764 mDataSize = mDataPos = 0; 1765 ALOGV("restartWrite Setting data size of %p to %zu", this, mDataSize); 1766 ALOGV("restartWrite Setting data pos of %p to %zu", this, mDataPos); 1767 1768 free(mObjects); 1769 mObjects = nullptr; 1770 mObjectsSize = mObjectsCapacity = 0; 1771 mNextObjectHint = 0; 1772 mHasFds = false; 1773 clearCache(); 1774 mFdsKnown = true; 1775 mAllowFds = true; 1776 1777 return NO_ERROR; 1778 } 1779 1780 status_t Parcel::continueWrite(size_t desired) 1781 { 1782 if (desired > INT32_MAX) { 1783 // don't accept size_t values which may have come from an 1784 // inadvertent conversion from a negative int. 1785 return BAD_VALUE; 1786 } 1787 1788 // If shrinking, first adjust for any objects that appear 1789 // after the new data size. 1790 size_t objectsSize = mObjectsSize; 1791 if (desired < mDataSize) { 1792 if (desired == 0) { 1793 objectsSize = 0; 1794 } else { 1795 while (objectsSize > 0) { 1796 if (mObjects[objectsSize-1] < desired) 1797 break; 1798 objectsSize--; 1799 } 1800 } 1801 } 1802 1803 if (mOwner) { 1804 // If the size is going to zero, just release the owner's data. 1805 if (desired == 0) { 1806 freeData(); 1807 return NO_ERROR; 1808 } 1809 1810 // If there is a different owner, we need to take 1811 // posession. 1812 uint8_t* data = (uint8_t*)malloc(desired); 1813 if (!data) { 1814 mError = NO_MEMORY; 1815 return NO_MEMORY; 1816 } 1817 binder_size_t* objects = nullptr; 1818 1819 if (objectsSize) { 1820 objects = (binder_size_t*)calloc(objectsSize, sizeof(binder_size_t)); 1821 if (!objects) { 1822 free(data); 1823 1824 mError = NO_MEMORY; 1825 return NO_MEMORY; 1826 } 1827 1828 // Little hack to only acquire references on objects 1829 // we will be keeping. 1830 size_t oldObjectsSize = mObjectsSize; 1831 mObjectsSize = objectsSize; 1832 acquireObjects(); 1833 mObjectsSize = oldObjectsSize; 1834 } 1835 1836 if (mData) { 1837 memcpy(data, mData, mDataSize < desired ? mDataSize : desired); 1838 } 1839 if (objects && mObjects) { 1840 memcpy(objects, mObjects, objectsSize*sizeof(binder_size_t)); 1841 } 1842 //ALOGI("Freeing data ref of %p (pid=%d)", this, getpid()); 1843 mOwner(this, mData, mDataSize, mObjects, mObjectsSize, mOwnerCookie); 1844 mOwner = nullptr; 1845 1846 LOG_ALLOC("Parcel %p: taking ownership of %zu capacity", this, desired); 1847 pthread_mutex_lock(&gParcelGlobalAllocSizeLock); 1848 gParcelGlobalAllocSize += desired; 1849 gParcelGlobalAllocCount++; 1850 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock); 1851 1852 mData = data; 1853 mObjects = objects; 1854 mDataSize = (mDataSize < desired) ? mDataSize : desired; 1855 ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize); 1856 mDataCapacity = desired; 1857 mObjectsSize = mObjectsCapacity = objectsSize; 1858 mNextObjectHint = 0; 1859 1860 clearCache(); 1861 } else if (mData) { 1862 if (objectsSize < mObjectsSize) { 1863 // Need to release refs on any objects we are dropping. 1864 const sp<ProcessState> proc(ProcessState::self()); 1865 for (size_t i=objectsSize; i<mObjectsSize; i++) { 1866 const flat_binder_object* flat 1867 = reinterpret_cast<flat_binder_object*>(mData+mObjects[i]); 1868 if (flat->hdr.type == BINDER_TYPE_FD) { 1869 // will need to rescan because we may have lopped off the only FDs 1870 mFdsKnown = false; 1871 } 1872 release_object(proc, *flat, this); 1873 } 1874 1875 if (objectsSize == 0) { 1876 free(mObjects); 1877 mObjects = nullptr; 1878 } else { 1879 binder_size_t* objects = 1880 (binder_size_t*)realloc(mObjects, objectsSize*sizeof(binder_size_t)); 1881 if (objects) { 1882 mObjects = objects; 1883 } 1884 } 1885 mObjectsSize = objectsSize; 1886 mNextObjectHint = 0; 1887 1888 clearCache(); 1889 } 1890 1891 // We own the data, so we can just do a realloc(). 1892 if (desired > mDataCapacity) { 1893 uint8_t* data = (uint8_t*)realloc(mData, desired); 1894 if (data) { 1895 LOG_ALLOC("Parcel %p: continue from %zu to %zu capacity", this, mDataCapacity, 1896 desired); 1897 pthread_mutex_lock(&gParcelGlobalAllocSizeLock); 1898 gParcelGlobalAllocSize += desired; 1899 gParcelGlobalAllocSize -= mDataCapacity; 1900 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock); 1901 mData = data; 1902 mDataCapacity = desired; 1903 } else { 1904 mError = NO_MEMORY; 1905 return NO_MEMORY; 1906 } 1907 } else { 1908 if (mDataSize > desired) { 1909 mDataSize = desired; 1910 ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize); 1911 } 1912 if (mDataPos > desired) { 1913 mDataPos = desired; 1914 ALOGV("continueWrite Setting data pos of %p to %zu", this, mDataPos); 1915 } 1916 } 1917 1918 } else { 1919 // This is the first data. Easy! 1920 uint8_t* data = (uint8_t*)malloc(desired); 1921 if (!data) { 1922 mError = NO_MEMORY; 1923 return NO_MEMORY; 1924 } 1925 1926 if(!(mDataCapacity == 0 && mObjects == nullptr 1927 && mObjectsCapacity == 0)) { 1928 ALOGE("continueWrite: %zu/%p/%zu/%zu", mDataCapacity, mObjects, mObjectsCapacity, desired); 1929 } 1930 1931 LOG_ALLOC("Parcel %p: allocating with %zu capacity", this, desired); 1932 pthread_mutex_lock(&gParcelGlobalAllocSizeLock); 1933 gParcelGlobalAllocSize += desired; 1934 gParcelGlobalAllocCount++; 1935 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock); 1936 1937 mData = data; 1938 mDataSize = mDataPos = 0; 1939 ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize); 1940 ALOGV("continueWrite Setting data pos of %p to %zu", this, mDataPos); 1941 mDataCapacity = desired; 1942 } 1943 1944 return NO_ERROR; 1945 } 1946 1947 void Parcel::initState() 1948 { 1949 LOG_ALLOC("Parcel %p: initState", this); 1950 mError = NO_ERROR; 1951 mData = nullptr; 1952 mDataSize = 0; 1953 mDataCapacity = 0; 1954 mDataPos = 0; 1955 ALOGV("initState Setting data size of %p to %zu", this, mDataSize); 1956 ALOGV("initState Setting data pos of %p to %zu", this, mDataPos); 1957 mObjects = nullptr; 1958 mObjectsSize = 0; 1959 mObjectsCapacity = 0; 1960 mNextObjectHint = 0; 1961 mHasFds = false; 1962 mFdsKnown = true; 1963 mAllowFds = true; 1964 mOwner = nullptr; 1965 clearCache(); 1966 1967 // racing multiple init leads only to multiple identical write 1968 if (gMaxFds == 0) { 1969 struct rlimit result; 1970 if (!getrlimit(RLIMIT_NOFILE, &result)) { 1971 gMaxFds = (size_t)result.rlim_cur; 1972 //ALOGI("parcel fd limit set to %zu", gMaxFds); 1973 } else { 1974 ALOGW("Unable to getrlimit: %s", strerror(errno)); 1975 gMaxFds = 1024; 1976 } 1977 } 1978 } 1979 1980 void Parcel::scanForFds() const 1981 { 1982 bool hasFds = false; 1983 for (size_t i=0; i<mObjectsSize; i++) { 1984 const flat_binder_object* flat 1985 = reinterpret_cast<const flat_binder_object*>(mData + mObjects[i]); 1986 if (flat->hdr.type == BINDER_TYPE_FD) { 1987 hasFds = true; 1988 break; 1989 } 1990 } 1991 mHasFds = hasFds; 1992 mFdsKnown = true; 1993 } 1994 1995 } // namespace hardware 1996 } // namespace android 1997