1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <errno.h>
18 #include <poll.h>
19 #include <pthread.h>
20 #include <stdio.h>
21 #include <stdlib.h>
22 
23 #include <chrono>
24 #include <fstream>
25 #include <thread>
26 
27 #include <gmock/gmock.h>
28 #include <gtest/gtest.h>
29 
30 #include <android-base/properties.h>
31 #include <android-base/result-gmock.h>
32 #include <android-base/strings.h>
33 #include <binder/Binder.h>
34 #include <binder/BpBinder.h>
35 #include <binder/Functional.h>
36 #include <binder/IBinder.h>
37 #include <binder/IPCThreadState.h>
38 #include <binder/IServiceManager.h>
39 #include <binder/RpcServer.h>
40 #include <binder/RpcSession.h>
41 #include <binder/Status.h>
42 #include <binder/unique_fd.h>
43 #include <utils/Flattenable.h>
44 
45 #include <linux/sched.h>
46 #include <sys/epoll.h>
47 #include <sys/prctl.h>
48 #include <sys/socket.h>
49 #include <sys/un.h>
50 
51 #include "../binder_module.h"
52 
53 #define ARRAY_SIZE(array) (sizeof array / sizeof array[0])
54 
55 using namespace android;
56 using namespace android::binder::impl;
57 using namespace std::string_literals;
58 using namespace std::chrono_literals;
59 using android::base::testing::HasValue;
60 using android::base::testing::Ok;
61 using android::binder::Status;
62 using android::binder::unique_fd;
63 using testing::ExplainMatchResult;
64 using testing::Matcher;
65 using testing::Not;
66 using testing::WithParamInterface;
67 
68 // e.g. EXPECT_THAT(expr, StatusEq(OK)) << "additional message";
69 MATCHER_P(StatusEq, expected, (negation ? "not " : "") + statusToString(expected)) {
70     *result_listener << statusToString(arg);
71     return expected == arg;
72 }
73 
IsPageAligned(void * buf)74 static ::testing::AssertionResult IsPageAligned(void *buf) {
75     if (((unsigned long)buf & ((unsigned long)getpagesize() - 1)) == 0)
76         return ::testing::AssertionSuccess();
77     else
78         return ::testing::AssertionFailure() << buf << " is not page aligned";
79 }
80 
81 static testing::Environment* binder_env;
82 static char *binderservername;
83 static char *binderserversuffix;
84 static char binderserverarg[] = "--binderserver";
85 
86 static constexpr int kSchedPolicy = SCHED_RR;
87 static constexpr int kSchedPriority = 7;
88 static constexpr int kSchedPriorityMore = 8;
89 static constexpr int kKernelThreads = 17; // anything different than the default
90 
91 static String16 binderLibTestServiceName = String16("test.binderLib");
92 
93 enum BinderLibTestTranscationCode {
94     BINDER_LIB_TEST_NOP_TRANSACTION = IBinder::FIRST_CALL_TRANSACTION,
95     BINDER_LIB_TEST_REGISTER_SERVER,
96     BINDER_LIB_TEST_ADD_SERVER,
97     BINDER_LIB_TEST_ADD_POLL_SERVER,
98     BINDER_LIB_TEST_USE_CALLING_GUARD_TRANSACTION,
99     BINDER_LIB_TEST_CALL_BACK,
100     BINDER_LIB_TEST_CALL_BACK_VERIFY_BUF,
101     BINDER_LIB_TEST_DELAYED_CALL_BACK,
102     BINDER_LIB_TEST_NOP_CALL_BACK,
103     BINDER_LIB_TEST_GET_SELF_TRANSACTION,
104     BINDER_LIB_TEST_GET_ID_TRANSACTION,
105     BINDER_LIB_TEST_INDIRECT_TRANSACTION,
106     BINDER_LIB_TEST_SET_ERROR_TRANSACTION,
107     BINDER_LIB_TEST_GET_STATUS_TRANSACTION,
108     BINDER_LIB_TEST_ADD_STRONG_REF_TRANSACTION,
109     BINDER_LIB_TEST_LINK_DEATH_TRANSACTION,
110     BINDER_LIB_TEST_WRITE_FILE_TRANSACTION,
111     BINDER_LIB_TEST_WRITE_PARCEL_FILE_DESCRIPTOR_TRANSACTION,
112     BINDER_LIB_TEST_EXIT_TRANSACTION,
113     BINDER_LIB_TEST_DELAYED_EXIT_TRANSACTION,
114     BINDER_LIB_TEST_GET_PTR_SIZE_TRANSACTION,
115     BINDER_LIB_TEST_CREATE_BINDER_TRANSACTION,
116     BINDER_LIB_TEST_GET_WORK_SOURCE_TRANSACTION,
117     BINDER_LIB_TEST_GET_SCHEDULING_POLICY,
118     BINDER_LIB_TEST_NOP_TRANSACTION_WAIT,
119     BINDER_LIB_TEST_GETPID,
120     BINDER_LIB_TEST_GETUID,
121     BINDER_LIB_TEST_ECHO_VECTOR,
122     BINDER_LIB_TEST_GET_NON_BLOCKING_FD,
123     BINDER_LIB_TEST_REJECT_OBJECTS,
124     BINDER_LIB_TEST_CAN_GET_SID,
125     BINDER_LIB_TEST_GET_MAX_THREAD_COUNT,
126     BINDER_LIB_TEST_SET_MAX_THREAD_COUNT,
127     BINDER_LIB_TEST_IS_THREADPOOL_STARTED,
128     BINDER_LIB_TEST_LOCK_UNLOCK,
129     BINDER_LIB_TEST_PROCESS_LOCK,
130     BINDER_LIB_TEST_UNLOCK_AFTER_MS,
131     BINDER_LIB_TEST_PROCESS_TEMPORARY_LOCK
132 };
133 
start_server_process(int arg2,bool usePoll=false)134 pid_t start_server_process(int arg2, bool usePoll = false)
135 {
136     int ret;
137     pid_t pid;
138     status_t status;
139     int pipefd[2];
140     char stri[16];
141     char strpipefd1[16];
142     char usepoll[2];
143     char *childargv[] = {
144         binderservername,
145         binderserverarg,
146         stri,
147         strpipefd1,
148         usepoll,
149         binderserversuffix,
150         nullptr
151     };
152 
153     ret = pipe(pipefd);
154     if (ret < 0)
155         return ret;
156 
157     snprintf(stri, sizeof(stri), "%d", arg2);
158     snprintf(strpipefd1, sizeof(strpipefd1), "%d", pipefd[1]);
159     snprintf(usepoll, sizeof(usepoll), "%d", usePoll ? 1 : 0);
160 
161     pid = fork();
162     if (pid == -1)
163         return pid;
164     if (pid == 0) {
165         prctl(PR_SET_PDEATHSIG, SIGHUP);
166         close(pipefd[0]);
167         execv(binderservername, childargv);
168         status = -errno;
169         write(pipefd[1], &status, sizeof(status));
170         fprintf(stderr, "execv failed, %s\n", strerror(errno));
171         _exit(EXIT_FAILURE);
172     }
173     close(pipefd[1]);
174     ret = read(pipefd[0], &status, sizeof(status));
175     //printf("pipe read returned %d, status %d\n", ret, status);
176     close(pipefd[0]);
177     if (ret == sizeof(status)) {
178         ret = status;
179     } else {
180         kill(pid, SIGKILL);
181         if (ret >= 0) {
182             ret = NO_INIT;
183         }
184     }
185     if (ret < 0) {
186         wait(nullptr);
187         return ret;
188     }
189     return pid;
190 }
191 
GetId(sp<IBinder> service)192 android::base::Result<int32_t> GetId(sp<IBinder> service) {
193     using android::base::Error;
194     Parcel data, reply;
195     data.markForBinder(service);
196     const char *prefix = data.isForRpc() ? "On RPC server, " : "On binder server, ";
197     status_t status = service->transact(BINDER_LIB_TEST_GET_ID_TRANSACTION, data, &reply);
198     if (status != OK)
199         return Error(status) << prefix << "transact(GET_ID): " << statusToString(status);
200     int32_t result = 0;
201     status = reply.readInt32(&result);
202     if (status != OK) return Error(status) << prefix << "readInt32: " << statusToString(status);
203     return result;
204 }
205 
206 class BinderLibTestEnv : public ::testing::Environment {
207     public:
BinderLibTestEnv()208         BinderLibTestEnv() {}
getServer(void)209         sp<IBinder> getServer(void) {
210             return m_server;
211         }
212 
213     private:
SetUp()214         virtual void SetUp() {
215             m_serverpid = start_server_process(0);
216             //printf("m_serverpid %d\n", m_serverpid);
217             ASSERT_GT(m_serverpid, 0);
218 
219             sp<IServiceManager> sm = defaultServiceManager();
220             //printf("%s: pid %d, get service\n", __func__, m_pid);
221 #pragma clang diagnostic push
222 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
223             m_server = sm->getService(binderLibTestServiceName);
224 #pragma clang diagnostic pop
225             ASSERT_TRUE(m_server != nullptr);
226             //printf("%s: pid %d, get service done\n", __func__, m_pid);
227         }
TearDown()228         virtual void TearDown() {
229             status_t ret;
230             Parcel data, reply;
231             int exitStatus;
232             pid_t pid;
233 
234             //printf("%s: pid %d\n", __func__, m_pid);
235             if (m_server != nullptr) {
236                 ret = m_server->transact(BINDER_LIB_TEST_GET_STATUS_TRANSACTION, data, &reply);
237                 EXPECT_EQ(0, ret);
238                 ret = m_server->transact(BINDER_LIB_TEST_EXIT_TRANSACTION, data, &reply, TF_ONE_WAY);
239                 EXPECT_EQ(0, ret);
240             }
241             if (m_serverpid > 0) {
242                 //printf("wait for %d\n", m_pids[i]);
243                 pid = wait(&exitStatus);
244                 EXPECT_EQ(m_serverpid, pid);
245                 EXPECT_TRUE(WIFEXITED(exitStatus));
246                 EXPECT_EQ(0, WEXITSTATUS(exitStatus));
247             }
248         }
249 
250         pid_t m_serverpid;
251         sp<IBinder> m_server;
252 };
253 
254 class BinderLibTest : public ::testing::Test {
255     public:
SetUp()256         virtual void SetUp() {
257             m_server = static_cast<BinderLibTestEnv *>(binder_env)->getServer();
258             IPCThreadState::self()->restoreCallingWorkSource(0);
259         }
TearDown()260         virtual void TearDown() {
261         }
262     protected:
addServerEtc(int32_t * idPtr,int code)263         sp<IBinder> addServerEtc(int32_t *idPtr, int code)
264         {
265             int32_t id;
266             Parcel data, reply;
267 
268             EXPECT_THAT(m_server->transact(code, data, &reply), StatusEq(NO_ERROR));
269 
270             sp<IBinder> binder = reply.readStrongBinder();
271             EXPECT_NE(nullptr, binder);
272             EXPECT_THAT(reply.readInt32(&id), StatusEq(NO_ERROR));
273             if (idPtr)
274                 *idPtr = id;
275             return binder;
276         }
277 
addServer(int32_t * idPtr=nullptr)278         sp<IBinder> addServer(int32_t *idPtr = nullptr)
279         {
280             return addServerEtc(idPtr, BINDER_LIB_TEST_ADD_SERVER);
281         }
282 
addPollServer(int32_t * idPtr=nullptr)283         sp<IBinder> addPollServer(int32_t *idPtr = nullptr)
284         {
285             return addServerEtc(idPtr, BINDER_LIB_TEST_ADD_POLL_SERVER);
286         }
287 
waitForReadData(int fd,int timeout_ms)288         void waitForReadData(int fd, int timeout_ms) {
289             int ret;
290             pollfd pfd = pollfd();
291 
292             pfd.fd = fd;
293             pfd.events = POLLIN;
294             ret = poll(&pfd, 1, timeout_ms);
295             EXPECT_EQ(1, ret);
296         }
297 
298         sp<IBinder> m_server;
299 };
300 
301 class BinderLibTestBundle : public Parcel
302 {
303     public:
BinderLibTestBundle(void)304         BinderLibTestBundle(void) {}
BinderLibTestBundle(const Parcel * source)305         explicit BinderLibTestBundle(const Parcel *source) : m_isValid(false) {
306             int32_t mark;
307             int32_t bundleLen;
308             size_t pos;
309 
310             if (source->readInt32(&mark))
311                 return;
312             if (mark != MARK_START)
313                 return;
314             if (source->readInt32(&bundleLen))
315                 return;
316             pos = source->dataPosition();
317             if (Parcel::appendFrom(source, pos, bundleLen))
318                 return;
319             source->setDataPosition(pos + bundleLen);
320             if (source->readInt32(&mark))
321                 return;
322             if (mark != MARK_END)
323                 return;
324             m_isValid = true;
325             setDataPosition(0);
326         }
appendTo(Parcel * dest)327         void appendTo(Parcel *dest) {
328             dest->writeInt32(MARK_START);
329             dest->writeInt32(dataSize());
330             dest->appendFrom(this, 0, dataSize());
331             dest->writeInt32(MARK_END);
332         };
isValid(void)333         bool isValid(void) {
334             return m_isValid;
335         }
336     private:
337         enum {
338             MARK_START  = B_PACK_CHARS('B','T','B','S'),
339             MARK_END    = B_PACK_CHARS('B','T','B','E'),
340         };
341         bool m_isValid;
342 };
343 
344 class BinderLibTestEvent
345 {
346     public:
BinderLibTestEvent(void)347         BinderLibTestEvent(void)
348             : m_eventTriggered(false)
349         {
350             pthread_mutex_init(&m_waitMutex, nullptr);
351             pthread_cond_init(&m_waitCond, nullptr);
352         }
waitEvent(int timeout_s)353         int waitEvent(int timeout_s)
354         {
355             int ret;
356             pthread_mutex_lock(&m_waitMutex);
357             if (!m_eventTriggered) {
358                 struct timespec ts;
359                 clock_gettime(CLOCK_REALTIME, &ts);
360                 ts.tv_sec += timeout_s;
361                 pthread_cond_timedwait(&m_waitCond, &m_waitMutex, &ts);
362             }
363             ret = m_eventTriggered ? NO_ERROR : TIMED_OUT;
364             pthread_mutex_unlock(&m_waitMutex);
365             return ret;
366         }
getTriggeringThread()367         pthread_t getTriggeringThread()
368         {
369             return m_triggeringThread;
370         }
371     protected:
triggerEvent(void)372         void triggerEvent(void) {
373             pthread_mutex_lock(&m_waitMutex);
374             pthread_cond_signal(&m_waitCond);
375             m_eventTriggered = true;
376             m_triggeringThread = pthread_self();
377             pthread_mutex_unlock(&m_waitMutex);
378         };
379     private:
380         pthread_mutex_t m_waitMutex;
381         pthread_cond_t m_waitCond;
382         bool m_eventTriggered;
383         pthread_t m_triggeringThread;
384 };
385 
386 class BinderLibTestCallBack : public BBinder, public BinderLibTestEvent
387 {
388     public:
BinderLibTestCallBack()389         BinderLibTestCallBack()
390             : m_result(NOT_ENOUGH_DATA)
391             , m_prev_end(nullptr)
392         {
393         }
getResult(void)394         status_t getResult(void)
395         {
396             return m_result;
397         }
398 
399     private:
onTransact(uint32_t code,const Parcel & data,Parcel * reply,uint32_t flags=0)400         virtual status_t onTransact(uint32_t code,
401                                     const Parcel& data, Parcel* reply,
402                                     uint32_t flags = 0)
403         {
404             (void)reply;
405             (void)flags;
406             switch(code) {
407             case BINDER_LIB_TEST_CALL_BACK: {
408                 status_t status = data.readInt32(&m_result);
409                 if (status != NO_ERROR) {
410                     m_result = status;
411                 }
412                 triggerEvent();
413                 return NO_ERROR;
414             }
415             case BINDER_LIB_TEST_CALL_BACK_VERIFY_BUF: {
416                 sp<IBinder> server;
417                 int ret;
418                 const uint8_t *buf = data.data();
419                 size_t size = data.dataSize();
420                 if (m_prev_end) {
421                     /* 64-bit kernel needs at most 8 bytes to align buffer end */
422                     EXPECT_LE((size_t)(buf - m_prev_end), (size_t)8);
423                 } else {
424                     EXPECT_TRUE(IsPageAligned((void *)buf));
425                 }
426 
427                 m_prev_end = buf + size + data.objectsCount() * sizeof(binder_size_t);
428 
429                 if (size > 0) {
430                     server = static_cast<BinderLibTestEnv *>(binder_env)->getServer();
431                     ret = server->transact(BINDER_LIB_TEST_INDIRECT_TRANSACTION,
432                                            data, reply);
433                     EXPECT_EQ(NO_ERROR, ret);
434                 }
435                 return NO_ERROR;
436             }
437             default:
438                 return UNKNOWN_TRANSACTION;
439             }
440         }
441 
442         status_t m_result;
443         const uint8_t *m_prev_end;
444 };
445 
446 class TestDeathRecipient : public IBinder::DeathRecipient, public BinderLibTestEvent
447 {
448     private:
binderDied(const wp<IBinder> & who)449         virtual void binderDied(const wp<IBinder>& who) {
450             (void)who;
451             triggerEvent();
452         };
453 };
454 
TEST_F(BinderLibTest,CannotUseBinderAfterFork)455 TEST_F(BinderLibTest, CannotUseBinderAfterFork) {
456     // EXPECT_DEATH works by forking the process
457     EXPECT_DEATH({ ProcessState::self(); }, "libbinder ProcessState can not be used after fork");
458 }
459 
TEST_F(BinderLibTest,AddManagerToManager)460 TEST_F(BinderLibTest, AddManagerToManager) {
461     sp<IServiceManager> sm = defaultServiceManager();
462     sp<IBinder> binder = IInterface::asBinder(sm);
463     EXPECT_EQ(NO_ERROR, sm->addService(String16("binderLibTest-manager"), binder));
464 }
465 
TEST_F(BinderLibTest,RegisterForNotificationsFailure)466 TEST_F(BinderLibTest, RegisterForNotificationsFailure) {
467     auto sm = defaultServiceManager();
468     using LocalRegistrationCallback = IServiceManager::LocalRegistrationCallback;
469     class LocalRegistrationCallbackImpl : public virtual LocalRegistrationCallback {
470         void onServiceRegistration(const String16&, const sp<IBinder>&) override {}
471         virtual ~LocalRegistrationCallbackImpl() {}
472     };
473     sp<LocalRegistrationCallback> cb = sp<LocalRegistrationCallbackImpl>::make();
474 
475     EXPECT_EQ(BAD_VALUE, sm->registerForNotifications(String16("ValidName"), nullptr));
476     EXPECT_EQ(UNKNOWN_ERROR, sm->registerForNotifications(String16("InvalidName!$"), cb));
477 }
478 
TEST_F(BinderLibTest,UnregisterForNotificationsFailure)479 TEST_F(BinderLibTest, UnregisterForNotificationsFailure) {
480     auto sm = defaultServiceManager();
481     using LocalRegistrationCallback = IServiceManager::LocalRegistrationCallback;
482     class LocalRegistrationCallbackImpl : public virtual LocalRegistrationCallback {
483         void onServiceRegistration(const String16&, const sp<IBinder>&) override {}
484         virtual ~LocalRegistrationCallbackImpl() {}
485     };
486     sp<LocalRegistrationCallback> cb = sp<LocalRegistrationCallbackImpl>::make();
487 
488     EXPECT_EQ(OK, sm->registerForNotifications(String16("ValidName"), cb));
489 
490     EXPECT_EQ(BAD_VALUE, sm->unregisterForNotifications(String16("ValidName"), nullptr));
491     EXPECT_EQ(BAD_VALUE, sm->unregisterForNotifications(String16("AnotherValidName"), cb));
492     EXPECT_EQ(BAD_VALUE, sm->unregisterForNotifications(String16("InvalidName!!!"), cb));
493 }
494 
TEST_F(BinderLibTest,WasParceled)495 TEST_F(BinderLibTest, WasParceled) {
496     auto binder = sp<BBinder>::make();
497     EXPECT_FALSE(binder->wasParceled());
498     Parcel data;
499     data.writeStrongBinder(binder);
500     EXPECT_TRUE(binder->wasParceled());
501 }
502 
TEST_F(BinderLibTest,NopTransaction)503 TEST_F(BinderLibTest, NopTransaction) {
504     Parcel data, reply;
505     EXPECT_THAT(m_server->transact(BINDER_LIB_TEST_NOP_TRANSACTION, data, &reply),
506                 StatusEq(NO_ERROR));
507 }
508 
TEST_F(BinderLibTest,NopTransactionOneway)509 TEST_F(BinderLibTest, NopTransactionOneway) {
510     Parcel data, reply;
511     EXPECT_THAT(m_server->transact(BINDER_LIB_TEST_NOP_TRANSACTION, data, &reply, TF_ONE_WAY),
512                 StatusEq(NO_ERROR));
513 }
514 
TEST_F(BinderLibTest,NopTransactionClear)515 TEST_F(BinderLibTest, NopTransactionClear) {
516     Parcel data, reply;
517     // make sure it accepts the transaction flag
518     EXPECT_THAT(m_server->transact(BINDER_LIB_TEST_NOP_TRANSACTION, data, &reply, TF_CLEAR_BUF),
519                 StatusEq(NO_ERROR));
520 }
521 
TEST_F(BinderLibTest,Freeze)522 TEST_F(BinderLibTest, Freeze) {
523     Parcel data, reply, replypid;
524     std::ifstream freezer_file("/sys/fs/cgroup/uid_0/cgroup.freeze");
525 
526     // Pass test on devices where the cgroup v2 freezer is not supported
527     if (freezer_file.fail()) {
528         GTEST_SKIP();
529         return;
530     }
531 
532     EXPECT_THAT(m_server->transact(BINDER_LIB_TEST_GETPID, data, &replypid), StatusEq(NO_ERROR));
533     int32_t pid = replypid.readInt32();
534     for (int i = 0; i < 10; i++) {
535         EXPECT_EQ(NO_ERROR, m_server->transact(BINDER_LIB_TEST_NOP_TRANSACTION_WAIT, data, &reply, TF_ONE_WAY));
536     }
537 
538     // Pass test on devices where BINDER_FREEZE ioctl is not supported
539     int ret = IPCThreadState::self()->freeze(pid, false, 0);
540     if (ret == -EINVAL) {
541         GTEST_SKIP();
542         return;
543     }
544     EXPECT_EQ(NO_ERROR, ret);
545 
546     EXPECT_EQ(-EAGAIN, IPCThreadState::self()->freeze(pid, true, 0));
547 
548     // b/268232063 - succeeds ~0.08% of the time
549     {
550         auto ret = IPCThreadState::self()->freeze(pid, true, 0);
551         EXPECT_TRUE(ret == -EAGAIN || ret == OK);
552     }
553 
554     EXPECT_EQ(NO_ERROR, IPCThreadState::self()->freeze(pid, true, 1000));
555     EXPECT_EQ(FAILED_TRANSACTION, m_server->transact(BINDER_LIB_TEST_NOP_TRANSACTION, data, &reply));
556 
557     uint32_t sync_received, async_received;
558 
559     EXPECT_EQ(NO_ERROR, IPCThreadState::self()->getProcessFreezeInfo(pid, &sync_received,
560                 &async_received));
561 
562     EXPECT_EQ(sync_received, 1u);
563     EXPECT_EQ(async_received, 0u);
564 
565     EXPECT_EQ(NO_ERROR, IPCThreadState::self()->freeze(pid, false, 0));
566     EXPECT_EQ(NO_ERROR, m_server->transact(BINDER_LIB_TEST_NOP_TRANSACTION, data, &reply));
567 }
568 
TEST_F(BinderLibTest,SetError)569 TEST_F(BinderLibTest, SetError) {
570     int32_t testValue[] = { 0, -123, 123 };
571     for (size_t i = 0; i < ARRAY_SIZE(testValue); i++) {
572         Parcel data, reply;
573         data.writeInt32(testValue[i]);
574         EXPECT_THAT(m_server->transact(BINDER_LIB_TEST_SET_ERROR_TRANSACTION, data, &reply),
575                     StatusEq(testValue[i]));
576     }
577 }
578 
TEST_F(BinderLibTest,GetId)579 TEST_F(BinderLibTest, GetId) {
580     EXPECT_THAT(GetId(m_server), HasValue(0));
581 }
582 
TEST_F(BinderLibTest,PtrSize)583 TEST_F(BinderLibTest, PtrSize) {
584     int32_t ptrsize;
585     Parcel data, reply;
586     sp<IBinder> server = addServer();
587     ASSERT_TRUE(server != nullptr);
588     EXPECT_THAT(server->transact(BINDER_LIB_TEST_GET_PTR_SIZE_TRANSACTION, data, &reply),
589                 StatusEq(NO_ERROR));
590     EXPECT_THAT(reply.readInt32(&ptrsize), StatusEq(NO_ERROR));
591     RecordProperty("TestPtrSize", sizeof(void *));
592     RecordProperty("ServerPtrSize", sizeof(void *));
593 }
594 
TEST_F(BinderLibTest,IndirectGetId2)595 TEST_F(BinderLibTest, IndirectGetId2)
596 {
597     int32_t id;
598     int32_t count;
599     Parcel data, reply;
600     int32_t serverId[3];
601 
602     data.writeInt32(ARRAY_SIZE(serverId));
603     for (size_t i = 0; i < ARRAY_SIZE(serverId); i++) {
604         sp<IBinder> server;
605         BinderLibTestBundle datai;
606 
607         server = addServer(&serverId[i]);
608         ASSERT_TRUE(server != nullptr);
609         data.writeStrongBinder(server);
610         data.writeInt32(BINDER_LIB_TEST_GET_ID_TRANSACTION);
611         datai.appendTo(&data);
612     }
613 
614     ASSERT_THAT(m_server->transact(BINDER_LIB_TEST_INDIRECT_TRANSACTION, data, &reply),
615                 StatusEq(NO_ERROR));
616 
617     ASSERT_THAT(reply.readInt32(&id), StatusEq(NO_ERROR));
618     EXPECT_EQ(0, id);
619 
620     ASSERT_THAT(reply.readInt32(&count), StatusEq(NO_ERROR));
621     EXPECT_EQ(ARRAY_SIZE(serverId), (size_t)count);
622 
623     for (size_t i = 0; i < (size_t)count; i++) {
624         BinderLibTestBundle replyi(&reply);
625         EXPECT_TRUE(replyi.isValid());
626         EXPECT_THAT(replyi.readInt32(&id), StatusEq(NO_ERROR));
627         EXPECT_EQ(serverId[i], id);
628         EXPECT_EQ(replyi.dataSize(), replyi.dataPosition());
629     }
630 
631     EXPECT_EQ(reply.dataSize(), reply.dataPosition());
632 }
633 
TEST_F(BinderLibTest,IndirectGetId3)634 TEST_F(BinderLibTest, IndirectGetId3)
635 {
636     int32_t id;
637     int32_t count;
638     Parcel data, reply;
639     int32_t serverId[3];
640 
641     data.writeInt32(ARRAY_SIZE(serverId));
642     for (size_t i = 0; i < ARRAY_SIZE(serverId); i++) {
643         sp<IBinder> server;
644         BinderLibTestBundle datai;
645         BinderLibTestBundle datai2;
646 
647         server = addServer(&serverId[i]);
648         ASSERT_TRUE(server != nullptr);
649         data.writeStrongBinder(server);
650         data.writeInt32(BINDER_LIB_TEST_INDIRECT_TRANSACTION);
651 
652         datai.writeInt32(1);
653         datai.writeStrongBinder(m_server);
654         datai.writeInt32(BINDER_LIB_TEST_GET_ID_TRANSACTION);
655         datai2.appendTo(&datai);
656 
657         datai.appendTo(&data);
658     }
659 
660     ASSERT_THAT(m_server->transact(BINDER_LIB_TEST_INDIRECT_TRANSACTION, data, &reply),
661                 StatusEq(NO_ERROR));
662 
663     ASSERT_THAT(reply.readInt32(&id), StatusEq(NO_ERROR));
664     EXPECT_EQ(0, id);
665 
666     ASSERT_THAT(reply.readInt32(&count), StatusEq(NO_ERROR));
667     EXPECT_EQ(ARRAY_SIZE(serverId), (size_t)count);
668 
669     for (size_t i = 0; i < (size_t)count; i++) {
670         int32_t counti;
671 
672         BinderLibTestBundle replyi(&reply);
673         EXPECT_TRUE(replyi.isValid());
674         EXPECT_THAT(replyi.readInt32(&id), StatusEq(NO_ERROR));
675         EXPECT_EQ(serverId[i], id);
676 
677         ASSERT_THAT(replyi.readInt32(&counti), StatusEq(NO_ERROR));
678         EXPECT_EQ(1, counti);
679 
680         BinderLibTestBundle replyi2(&replyi);
681         EXPECT_TRUE(replyi2.isValid());
682         EXPECT_THAT(replyi2.readInt32(&id), StatusEq(NO_ERROR));
683         EXPECT_EQ(0, id);
684         EXPECT_EQ(replyi2.dataSize(), replyi2.dataPosition());
685 
686         EXPECT_EQ(replyi.dataSize(), replyi.dataPosition());
687     }
688 
689     EXPECT_EQ(reply.dataSize(), reply.dataPosition());
690 }
691 
TEST_F(BinderLibTest,CallBack)692 TEST_F(BinderLibTest, CallBack)
693 {
694     Parcel data, reply;
695     sp<BinderLibTestCallBack> callBack = new BinderLibTestCallBack();
696     data.writeStrongBinder(callBack);
697     EXPECT_THAT(m_server->transact(BINDER_LIB_TEST_NOP_CALL_BACK, data, &reply, TF_ONE_WAY),
698                 StatusEq(NO_ERROR));
699     EXPECT_THAT(callBack->waitEvent(5), StatusEq(NO_ERROR));
700     EXPECT_THAT(callBack->getResult(), StatusEq(NO_ERROR));
701 }
702 
TEST_F(BinderLibTest,BinderCallContextGuard)703 TEST_F(BinderLibTest, BinderCallContextGuard) {
704     sp<IBinder> binder = addServer();
705     Parcel data, reply;
706     EXPECT_THAT(binder->transact(BINDER_LIB_TEST_USE_CALLING_GUARD_TRANSACTION, data, &reply),
707                 StatusEq(DEAD_OBJECT));
708 }
709 
TEST_F(BinderLibTest,AddServer)710 TEST_F(BinderLibTest, AddServer)
711 {
712     sp<IBinder> server = addServer();
713     ASSERT_TRUE(server != nullptr);
714 }
715 
TEST_F(BinderLibTest,DeathNotificationStrongRef)716 TEST_F(BinderLibTest, DeathNotificationStrongRef)
717 {
718     sp<IBinder> sbinder;
719 
720     sp<TestDeathRecipient> testDeathRecipient = new TestDeathRecipient();
721 
722     {
723         sp<IBinder> binder = addServer();
724         ASSERT_TRUE(binder != nullptr);
725         EXPECT_THAT(binder->linkToDeath(testDeathRecipient), StatusEq(NO_ERROR));
726         sbinder = binder;
727     }
728     {
729         Parcel data, reply;
730         EXPECT_THAT(sbinder->transact(BINDER_LIB_TEST_EXIT_TRANSACTION, data, &reply, TF_ONE_WAY),
731                     StatusEq(OK));
732     }
733     IPCThreadState::self()->flushCommands();
734     EXPECT_THAT(testDeathRecipient->waitEvent(5), StatusEq(NO_ERROR));
735     EXPECT_THAT(sbinder->unlinkToDeath(testDeathRecipient), StatusEq(DEAD_OBJECT));
736 }
737 
TEST_F(BinderLibTest,DeathNotificationMultiple)738 TEST_F(BinderLibTest, DeathNotificationMultiple)
739 {
740     status_t ret;
741     const int clientcount = 2;
742     sp<IBinder> target;
743     sp<IBinder> linkedclient[clientcount];
744     sp<BinderLibTestCallBack> callBack[clientcount];
745     sp<IBinder> passiveclient[clientcount];
746 
747     target = addServer();
748     ASSERT_TRUE(target != nullptr);
749     for (int i = 0; i < clientcount; i++) {
750         {
751             Parcel data, reply;
752 
753             linkedclient[i] = addServer();
754             ASSERT_TRUE(linkedclient[i] != nullptr);
755             callBack[i] = new BinderLibTestCallBack();
756             data.writeStrongBinder(target);
757             data.writeStrongBinder(callBack[i]);
758             EXPECT_THAT(linkedclient[i]->transact(BINDER_LIB_TEST_LINK_DEATH_TRANSACTION, data,
759                                                   &reply, TF_ONE_WAY),
760                         StatusEq(NO_ERROR));
761         }
762         {
763             Parcel data, reply;
764 
765             passiveclient[i] = addServer();
766             ASSERT_TRUE(passiveclient[i] != nullptr);
767             data.writeStrongBinder(target);
768             EXPECT_THAT(passiveclient[i]->transact(BINDER_LIB_TEST_ADD_STRONG_REF_TRANSACTION, data,
769                                                    &reply, TF_ONE_WAY),
770                         StatusEq(NO_ERROR));
771         }
772     }
773     {
774         Parcel data, reply;
775         ret = target->transact(BINDER_LIB_TEST_EXIT_TRANSACTION, data, &reply, TF_ONE_WAY);
776         EXPECT_EQ(0, ret);
777     }
778 
779     for (int i = 0; i < clientcount; i++) {
780         EXPECT_THAT(callBack[i]->waitEvent(5), StatusEq(NO_ERROR));
781         EXPECT_THAT(callBack[i]->getResult(), StatusEq(NO_ERROR));
782     }
783 }
784 
TEST_F(BinderLibTest,DeathNotificationThread)785 TEST_F(BinderLibTest, DeathNotificationThread)
786 {
787     status_t ret;
788     sp<BinderLibTestCallBack> callback;
789     sp<IBinder> target = addServer();
790     ASSERT_TRUE(target != nullptr);
791     sp<IBinder> client = addServer();
792     ASSERT_TRUE(client != nullptr);
793 
794     sp<TestDeathRecipient> testDeathRecipient = new TestDeathRecipient();
795 
796     EXPECT_THAT(target->linkToDeath(testDeathRecipient), StatusEq(NO_ERROR));
797 
798     {
799         Parcel data, reply;
800         ret = target->transact(BINDER_LIB_TEST_EXIT_TRANSACTION, data, &reply, TF_ONE_WAY);
801         EXPECT_EQ(0, ret);
802     }
803 
804     /* Make sure it's dead */
805     testDeathRecipient->waitEvent(5);
806 
807     /* Now, pass the ref to another process and ask that process to
808      * call linkToDeath() on it, and wait for a response. This tests
809      * two things:
810      * 1) You still get death notifications when calling linkToDeath()
811      *    on a ref that is already dead when it was passed to you.
812      * 2) That death notifications are not directly pushed to the thread
813      *    registering them, but to the threadpool (proc workqueue) instead.
814      *
815      * 2) is tested because the thread handling BINDER_LIB_TEST_DEATH_TRANSACTION
816      * is blocked on a condition variable waiting for the death notification to be
817      * called; therefore, that thread is not available for handling proc work.
818      * So, if the death notification was pushed to the thread workqueue, the callback
819      * would never be called, and the test would timeout and fail.
820      *
821      * Note that we can't do this part of the test from this thread itself, because
822      * the binder driver would only push death notifications to the thread if
823      * it is a looper thread, which this thread is not.
824      *
825      * See b/23525545 for details.
826      */
827     {
828         Parcel data, reply;
829 
830         callback = new BinderLibTestCallBack();
831         data.writeStrongBinder(target);
832         data.writeStrongBinder(callback);
833         EXPECT_THAT(client->transact(BINDER_LIB_TEST_LINK_DEATH_TRANSACTION, data, &reply,
834                                      TF_ONE_WAY),
835                     StatusEq(NO_ERROR));
836     }
837 
838     EXPECT_THAT(callback->waitEvent(5), StatusEq(NO_ERROR));
839     EXPECT_THAT(callback->getResult(), StatusEq(NO_ERROR));
840 }
841 
TEST_F(BinderLibTest,PassFile)842 TEST_F(BinderLibTest, PassFile) {
843     int ret;
844     int pipefd[2];
845     uint8_t buf[1] = { 0 };
846     uint8_t write_value = 123;
847 
848     ret = pipe2(pipefd, O_NONBLOCK);
849     ASSERT_EQ(0, ret);
850 
851     {
852         Parcel data, reply;
853         uint8_t writebuf[1] = { write_value };
854 
855         EXPECT_THAT(data.writeFileDescriptor(pipefd[1], true), StatusEq(NO_ERROR));
856 
857         EXPECT_THAT(data.writeInt32(sizeof(writebuf)), StatusEq(NO_ERROR));
858 
859         EXPECT_THAT(data.write(writebuf, sizeof(writebuf)), StatusEq(NO_ERROR));
860 
861         EXPECT_THAT(m_server->transact(BINDER_LIB_TEST_WRITE_FILE_TRANSACTION, data, &reply),
862                     StatusEq(NO_ERROR));
863     }
864 
865     ret = read(pipefd[0], buf, sizeof(buf));
866     EXPECT_EQ(sizeof(buf), (size_t)ret);
867     EXPECT_EQ(write_value, buf[0]);
868 
869     waitForReadData(pipefd[0], 5000); /* wait for other proccess to close pipe */
870 
871     ret = read(pipefd[0], buf, sizeof(buf));
872     EXPECT_EQ(0, ret);
873 
874     close(pipefd[0]);
875 }
876 
TEST_F(BinderLibTest,PassParcelFileDescriptor)877 TEST_F(BinderLibTest, PassParcelFileDescriptor) {
878     const int datasize = 123;
879     std::vector<uint8_t> writebuf(datasize);
880     for (size_t i = 0; i < writebuf.size(); ++i) {
881         writebuf[i] = i;
882     }
883 
884     unique_fd read_end, write_end;
885     {
886         int pipefd[2];
887         ASSERT_EQ(0, pipe2(pipefd, O_NONBLOCK));
888         read_end.reset(pipefd[0]);
889         write_end.reset(pipefd[1]);
890     }
891     {
892         Parcel data;
893         EXPECT_EQ(NO_ERROR, data.writeDupParcelFileDescriptor(write_end.get()));
894         write_end.reset();
895         EXPECT_EQ(NO_ERROR, data.writeInt32(datasize));
896         EXPECT_EQ(NO_ERROR, data.write(writebuf.data(), datasize));
897 
898         Parcel reply;
899         EXPECT_EQ(NO_ERROR,
900                   m_server->transact(BINDER_LIB_TEST_WRITE_PARCEL_FILE_DESCRIPTOR_TRANSACTION, data,
901                                      &reply));
902     }
903     std::vector<uint8_t> readbuf(datasize);
904     EXPECT_EQ(datasize, read(read_end.get(), readbuf.data(), datasize));
905     EXPECT_EQ(writebuf, readbuf);
906 
907     waitForReadData(read_end.get(), 5000); /* wait for other proccess to close pipe */
908 
909     EXPECT_EQ(0, read(read_end.get(), readbuf.data(), datasize));
910 }
911 
TEST_F(BinderLibTest,PromoteLocal)912 TEST_F(BinderLibTest, PromoteLocal) {
913     sp<IBinder> strong = new BBinder();
914     wp<IBinder> weak = strong;
915     sp<IBinder> strong_from_weak = weak.promote();
916     EXPECT_TRUE(strong != nullptr);
917     EXPECT_EQ(strong, strong_from_weak);
918     strong = nullptr;
919     strong_from_weak = nullptr;
920     strong_from_weak = weak.promote();
921     EXPECT_TRUE(strong_from_weak == nullptr);
922 }
923 
TEST_F(BinderLibTest,LocalGetExtension)924 TEST_F(BinderLibTest, LocalGetExtension) {
925     sp<BBinder> binder = new BBinder();
926     sp<IBinder> ext = new BBinder();
927     binder->setExtension(ext);
928     EXPECT_EQ(ext, binder->getExtension());
929 }
930 
TEST_F(BinderLibTest,RemoteGetExtension)931 TEST_F(BinderLibTest, RemoteGetExtension) {
932     sp<IBinder> server = addServer();
933     ASSERT_TRUE(server != nullptr);
934 
935     sp<IBinder> extension;
936     EXPECT_EQ(NO_ERROR, server->getExtension(&extension));
937     ASSERT_NE(nullptr, extension.get());
938 
939     EXPECT_EQ(NO_ERROR, extension->pingBinder());
940 }
941 
TEST_F(BinderLibTest,CheckHandleZeroBinderHighBitsZeroCookie)942 TEST_F(BinderLibTest, CheckHandleZeroBinderHighBitsZeroCookie) {
943     Parcel data, reply;
944 
945     EXPECT_THAT(m_server->transact(BINDER_LIB_TEST_GET_SELF_TRANSACTION, data, &reply),
946                 StatusEq(NO_ERROR));
947 
948     const flat_binder_object *fb = reply.readObject(false);
949     ASSERT_TRUE(fb != nullptr);
950     EXPECT_EQ(BINDER_TYPE_HANDLE, fb->hdr.type);
951     EXPECT_EQ(m_server, ProcessState::self()->getStrongProxyForHandle(fb->handle));
952     EXPECT_EQ((binder_uintptr_t)0, fb->cookie);
953     EXPECT_EQ((uint64_t)0, (uint64_t)fb->binder >> 32);
954 }
955 
TEST_F(BinderLibTest,FreedBinder)956 TEST_F(BinderLibTest, FreedBinder) {
957     status_t ret;
958 
959     sp<IBinder> server = addServer();
960     ASSERT_TRUE(server != nullptr);
961 
962     __u32 freedHandle;
963     wp<IBinder> keepFreedBinder;
964     {
965         Parcel data, reply;
966         ASSERT_THAT(server->transact(BINDER_LIB_TEST_CREATE_BINDER_TRANSACTION, data, &reply),
967                     StatusEq(NO_ERROR));
968         struct flat_binder_object *freed = (struct flat_binder_object *)(reply.data());
969         freedHandle = freed->handle;
970         /* Add a weak ref to the freed binder so the driver does not
971          * delete its reference to it - otherwise the transaction
972          * fails regardless of whether the driver is fixed.
973          */
974         keepFreedBinder = reply.readStrongBinder();
975     }
976     IPCThreadState::self()->flushCommands();
977     {
978         Parcel data, reply;
979         data.writeStrongBinder(server);
980         /* Replace original handle with handle to the freed binder */
981         struct flat_binder_object *strong = (struct flat_binder_object *)(data.data());
982         __u32 oldHandle = strong->handle;
983         strong->handle = freedHandle;
984         ret = server->transact(BINDER_LIB_TEST_ADD_STRONG_REF_TRANSACTION, data, &reply);
985         /* Returns DEAD_OBJECT (-32) if target crashes and
986          * FAILED_TRANSACTION if the driver rejects the invalid
987          * object.
988          */
989         EXPECT_EQ((status_t)FAILED_TRANSACTION, ret);
990         /* Restore original handle so parcel destructor does not use
991          * the wrong handle.
992          */
993         strong->handle = oldHandle;
994     }
995 }
996 
TEST_F(BinderLibTest,CheckNoHeaderMappedInUser)997 TEST_F(BinderLibTest, CheckNoHeaderMappedInUser) {
998     Parcel data, reply;
999     sp<BinderLibTestCallBack> callBack = new BinderLibTestCallBack();
1000     for (int i = 0; i < 2; i++) {
1001         BinderLibTestBundle datai;
1002         datai.appendFrom(&data, 0, data.dataSize());
1003 
1004         data.freeData();
1005         data.writeInt32(1);
1006         data.writeStrongBinder(callBack);
1007         data.writeInt32(BINDER_LIB_TEST_CALL_BACK_VERIFY_BUF);
1008 
1009         datai.appendTo(&data);
1010     }
1011     EXPECT_THAT(m_server->transact(BINDER_LIB_TEST_INDIRECT_TRANSACTION, data, &reply),
1012                 StatusEq(NO_ERROR));
1013 }
1014 
TEST_F(BinderLibTest,OnewayQueueing)1015 TEST_F(BinderLibTest, OnewayQueueing)
1016 {
1017     Parcel data, data2;
1018 
1019     sp<IBinder> pollServer = addPollServer();
1020 
1021     sp<BinderLibTestCallBack> callBack = new BinderLibTestCallBack();
1022     data.writeStrongBinder(callBack);
1023     data.writeInt32(500000); // delay in us before calling back
1024 
1025     sp<BinderLibTestCallBack> callBack2 = new BinderLibTestCallBack();
1026     data2.writeStrongBinder(callBack2);
1027     data2.writeInt32(0); // delay in us
1028 
1029     EXPECT_THAT(pollServer->transact(BINDER_LIB_TEST_DELAYED_CALL_BACK, data, nullptr, TF_ONE_WAY),
1030                 StatusEq(NO_ERROR));
1031 
1032     // The delay ensures that this second transaction will end up on the async_todo list
1033     // (for a single-threaded server)
1034     EXPECT_THAT(pollServer->transact(BINDER_LIB_TEST_DELAYED_CALL_BACK, data2, nullptr, TF_ONE_WAY),
1035                 StatusEq(NO_ERROR));
1036 
1037     // The server will ensure that the two transactions are handled in the expected order;
1038     // If the ordering is not as expected, an error will be returned through the callbacks.
1039     EXPECT_THAT(callBack->waitEvent(2), StatusEq(NO_ERROR));
1040     EXPECT_THAT(callBack->getResult(), StatusEq(NO_ERROR));
1041 
1042     EXPECT_THAT(callBack2->waitEvent(2), StatusEq(NO_ERROR));
1043     EXPECT_THAT(callBack2->getResult(), StatusEq(NO_ERROR));
1044 }
1045 
TEST_F(BinderLibTest,WorkSourceUnsetByDefault)1046 TEST_F(BinderLibTest, WorkSourceUnsetByDefault)
1047 {
1048     status_t ret;
1049     Parcel data, reply;
1050     data.writeInterfaceToken(binderLibTestServiceName);
1051     ret = m_server->transact(BINDER_LIB_TEST_GET_WORK_SOURCE_TRANSACTION, data, &reply);
1052     EXPECT_EQ(-1, reply.readInt32());
1053     EXPECT_EQ(NO_ERROR, ret);
1054 }
1055 
TEST_F(BinderLibTest,WorkSourceSet)1056 TEST_F(BinderLibTest, WorkSourceSet)
1057 {
1058     status_t ret;
1059     Parcel data, reply;
1060     IPCThreadState::self()->clearCallingWorkSource();
1061     int64_t previousWorkSource = IPCThreadState::self()->setCallingWorkSourceUid(100);
1062     data.writeInterfaceToken(binderLibTestServiceName);
1063     ret = m_server->transact(BINDER_LIB_TEST_GET_WORK_SOURCE_TRANSACTION, data, &reply);
1064     EXPECT_EQ(100, reply.readInt32());
1065     EXPECT_EQ(-1, previousWorkSource);
1066     EXPECT_EQ(true, IPCThreadState::self()->shouldPropagateWorkSource());
1067     EXPECT_EQ(NO_ERROR, ret);
1068 }
1069 
TEST_F(BinderLibTest,WorkSourceSetWithoutPropagation)1070 TEST_F(BinderLibTest, WorkSourceSetWithoutPropagation)
1071 {
1072     status_t ret;
1073     Parcel data, reply;
1074 
1075     IPCThreadState::self()->setCallingWorkSourceUidWithoutPropagation(100);
1076     EXPECT_EQ(false, IPCThreadState::self()->shouldPropagateWorkSource());
1077 
1078     data.writeInterfaceToken(binderLibTestServiceName);
1079     ret = m_server->transact(BINDER_LIB_TEST_GET_WORK_SOURCE_TRANSACTION, data, &reply);
1080     EXPECT_EQ(-1, reply.readInt32());
1081     EXPECT_EQ(false, IPCThreadState::self()->shouldPropagateWorkSource());
1082     EXPECT_EQ(NO_ERROR, ret);
1083 }
1084 
TEST_F(BinderLibTest,WorkSourceCleared)1085 TEST_F(BinderLibTest, WorkSourceCleared)
1086 {
1087     status_t ret;
1088     Parcel data, reply;
1089 
1090     IPCThreadState::self()->setCallingWorkSourceUid(100);
1091     int64_t token = IPCThreadState::self()->clearCallingWorkSource();
1092     int32_t previousWorkSource = (int32_t)token;
1093     data.writeInterfaceToken(binderLibTestServiceName);
1094     ret = m_server->transact(BINDER_LIB_TEST_GET_WORK_SOURCE_TRANSACTION, data, &reply);
1095 
1096     EXPECT_EQ(-1, reply.readInt32());
1097     EXPECT_EQ(100, previousWorkSource);
1098     EXPECT_EQ(NO_ERROR, ret);
1099 }
1100 
TEST_F(BinderLibTest,WorkSourceRestored)1101 TEST_F(BinderLibTest, WorkSourceRestored)
1102 {
1103     status_t ret;
1104     Parcel data, reply;
1105 
1106     IPCThreadState::self()->setCallingWorkSourceUid(100);
1107     int64_t token = IPCThreadState::self()->clearCallingWorkSource();
1108     IPCThreadState::self()->restoreCallingWorkSource(token);
1109 
1110     data.writeInterfaceToken(binderLibTestServiceName);
1111     ret = m_server->transact(BINDER_LIB_TEST_GET_WORK_SOURCE_TRANSACTION, data, &reply);
1112 
1113     EXPECT_EQ(100, reply.readInt32());
1114     EXPECT_EQ(true, IPCThreadState::self()->shouldPropagateWorkSource());
1115     EXPECT_EQ(NO_ERROR, ret);
1116 }
1117 
TEST_F(BinderLibTest,PropagateFlagSet)1118 TEST_F(BinderLibTest, PropagateFlagSet)
1119 {
1120     IPCThreadState::self()->clearPropagateWorkSource();
1121     IPCThreadState::self()->setCallingWorkSourceUid(100);
1122     EXPECT_EQ(true, IPCThreadState::self()->shouldPropagateWorkSource());
1123 }
1124 
TEST_F(BinderLibTest,PropagateFlagCleared)1125 TEST_F(BinderLibTest, PropagateFlagCleared)
1126 {
1127     IPCThreadState::self()->setCallingWorkSourceUid(100);
1128     IPCThreadState::self()->clearPropagateWorkSource();
1129     EXPECT_EQ(false, IPCThreadState::self()->shouldPropagateWorkSource());
1130 }
1131 
TEST_F(BinderLibTest,PropagateFlagRestored)1132 TEST_F(BinderLibTest, PropagateFlagRestored)
1133 {
1134     int token = IPCThreadState::self()->setCallingWorkSourceUid(100);
1135     IPCThreadState::self()->restoreCallingWorkSource(token);
1136 
1137     EXPECT_EQ(false, IPCThreadState::self()->shouldPropagateWorkSource());
1138 }
1139 
TEST_F(BinderLibTest,WorkSourcePropagatedForAllFollowingBinderCalls)1140 TEST_F(BinderLibTest, WorkSourcePropagatedForAllFollowingBinderCalls)
1141 {
1142     IPCThreadState::self()->setCallingWorkSourceUid(100);
1143 
1144     Parcel data, reply;
1145     status_t ret;
1146     data.writeInterfaceToken(binderLibTestServiceName);
1147     ret = m_server->transact(BINDER_LIB_TEST_GET_WORK_SOURCE_TRANSACTION, data, &reply);
1148     EXPECT_EQ(NO_ERROR, ret);
1149 
1150     Parcel data2, reply2;
1151     status_t ret2;
1152     data2.writeInterfaceToken(binderLibTestServiceName);
1153     ret2 = m_server->transact(BINDER_LIB_TEST_GET_WORK_SOURCE_TRANSACTION, data2, &reply2);
1154     EXPECT_EQ(100, reply2.readInt32());
1155     EXPECT_EQ(NO_ERROR, ret2);
1156 }
1157 
TEST_F(BinderLibTest,SchedPolicySet)1158 TEST_F(BinderLibTest, SchedPolicySet) {
1159     sp<IBinder> server = addServer();
1160     ASSERT_TRUE(server != nullptr);
1161 
1162     Parcel data, reply;
1163     EXPECT_THAT(server->transact(BINDER_LIB_TEST_GET_SCHEDULING_POLICY, data, &reply),
1164                 StatusEq(NO_ERROR));
1165 
1166     int policy = reply.readInt32();
1167     int priority = reply.readInt32();
1168 
1169     EXPECT_EQ(kSchedPolicy, policy & (~SCHED_RESET_ON_FORK));
1170     EXPECT_EQ(kSchedPriority, priority);
1171 }
1172 
TEST_F(BinderLibTest,InheritRt)1173 TEST_F(BinderLibTest, InheritRt) {
1174     sp<IBinder> server = addServer();
1175     ASSERT_TRUE(server != nullptr);
1176 
1177     const struct sched_param param {
1178         .sched_priority = kSchedPriorityMore,
1179     };
1180     EXPECT_EQ(0, sched_setscheduler(getpid(), SCHED_RR, &param));
1181 
1182     Parcel data, reply;
1183     EXPECT_THAT(server->transact(BINDER_LIB_TEST_GET_SCHEDULING_POLICY, data, &reply),
1184                 StatusEq(NO_ERROR));
1185 
1186     int policy = reply.readInt32();
1187     int priority = reply.readInt32();
1188 
1189     EXPECT_EQ(kSchedPolicy, policy & (~SCHED_RESET_ON_FORK));
1190     EXPECT_EQ(kSchedPriorityMore, priority);
1191 }
1192 
TEST_F(BinderLibTest,VectorSent)1193 TEST_F(BinderLibTest, VectorSent) {
1194     Parcel data, reply;
1195     sp<IBinder> server = addServer();
1196     ASSERT_TRUE(server != nullptr);
1197 
1198     std::vector<uint64_t> const testValue = { std::numeric_limits<uint64_t>::max(), 0, 200 };
1199     data.writeUint64Vector(testValue);
1200 
1201     EXPECT_THAT(server->transact(BINDER_LIB_TEST_ECHO_VECTOR, data, &reply), StatusEq(NO_ERROR));
1202     std::vector<uint64_t> readValue;
1203     EXPECT_THAT(reply.readUint64Vector(&readValue), StatusEq(OK));
1204     EXPECT_EQ(readValue, testValue);
1205 }
1206 
TEST_F(BinderLibTest,FileDescriptorRemainsNonBlocking)1207 TEST_F(BinderLibTest, FileDescriptorRemainsNonBlocking) {
1208     sp<IBinder> server = addServer();
1209     ASSERT_TRUE(server != nullptr);
1210 
1211     Parcel reply;
1212     EXPECT_THAT(server->transact(BINDER_LIB_TEST_GET_NON_BLOCKING_FD, {} /*data*/, &reply),
1213                 StatusEq(NO_ERROR));
1214     unique_fd fd;
1215     EXPECT_THAT(reply.readUniqueFileDescriptor(&fd), StatusEq(OK));
1216 
1217     const int result = fcntl(fd.get(), F_GETFL);
1218     ASSERT_NE(result, -1);
1219     EXPECT_EQ(result & O_NONBLOCK, O_NONBLOCK);
1220 }
1221 
1222 // see ProcessState.cpp BINDER_VM_SIZE = 1MB.
1223 // This value is not exposed, but some code in the framework relies on being able to use
1224 // buffers near the cap size.
1225 constexpr size_t kSizeBytesAlmostFull = 950'000;
1226 constexpr size_t kSizeBytesOverFull = 1'050'000;
1227 
TEST_F(BinderLibTest,GargantuanVectorSent)1228 TEST_F(BinderLibTest, GargantuanVectorSent) {
1229     sp<IBinder> server = addServer();
1230     ASSERT_TRUE(server != nullptr);
1231 
1232     for (size_t i = 0; i < 10; i++) {
1233         // a slight variation in size is used to consider certain possible caching implementations
1234         const std::vector<uint64_t> testValue((kSizeBytesAlmostFull + i) / sizeof(uint64_t), 42);
1235 
1236         Parcel data, reply;
1237         data.writeUint64Vector(testValue);
1238         EXPECT_THAT(server->transact(BINDER_LIB_TEST_ECHO_VECTOR, data, &reply), StatusEq(NO_ERROR))
1239                 << i;
1240         std::vector<uint64_t> readValue;
1241         EXPECT_THAT(reply.readUint64Vector(&readValue), StatusEq(OK));
1242         EXPECT_EQ(readValue, testValue);
1243     }
1244 }
1245 
TEST_F(BinderLibTest,LimitExceededVectorSent)1246 TEST_F(BinderLibTest, LimitExceededVectorSent) {
1247     sp<IBinder> server = addServer();
1248     ASSERT_TRUE(server != nullptr);
1249     const std::vector<uint64_t> testValue(kSizeBytesOverFull / sizeof(uint64_t), 42);
1250 
1251     Parcel data, reply;
1252     data.writeUint64Vector(testValue);
1253     EXPECT_THAT(server->transact(BINDER_LIB_TEST_ECHO_VECTOR, data, &reply),
1254                 StatusEq(FAILED_TRANSACTION));
1255 }
1256 
TEST_F(BinderLibTest,BufRejected)1257 TEST_F(BinderLibTest, BufRejected) {
1258     Parcel data, reply;
1259     uint32_t buf;
1260     sp<IBinder> server = addServer();
1261     ASSERT_TRUE(server != nullptr);
1262 
1263     binder_buffer_object obj {
1264         .hdr = { .type = BINDER_TYPE_PTR },
1265         .flags = 0,
1266         .buffer = reinterpret_cast<binder_uintptr_t>((void*)&buf),
1267         .length = 4,
1268     };
1269     data.setDataCapacity(1024);
1270     // Write a bogus object at offset 0 to get an entry in the offset table
1271     data.writeFileDescriptor(0);
1272     EXPECT_EQ(data.objectsCount(), 1u);
1273     uint8_t *parcelData = const_cast<uint8_t*>(data.data());
1274     // And now, overwrite it with the buffer object
1275     memcpy(parcelData, &obj, sizeof(obj));
1276     data.setDataSize(sizeof(obj));
1277 
1278     EXPECT_EQ(data.objectsCount(), 1u);
1279 
1280     // Either the kernel should reject this transaction (if it's correct), but
1281     // if it's not, the server implementation should return an error if it
1282     // finds an object in the received Parcel.
1283     EXPECT_THAT(server->transact(BINDER_LIB_TEST_REJECT_OBJECTS, data, &reply),
1284                 Not(StatusEq(NO_ERROR)));
1285 }
1286 
TEST_F(BinderLibTest,WeakRejected)1287 TEST_F(BinderLibTest, WeakRejected) {
1288     Parcel data, reply;
1289     sp<IBinder> server = addServer();
1290     ASSERT_TRUE(server != nullptr);
1291 
1292     auto binder = sp<BBinder>::make();
1293     wp<BBinder> wpBinder(binder);
1294     flat_binder_object obj{
1295             .hdr = {.type = BINDER_TYPE_WEAK_BINDER},
1296             .flags = 0,
1297             .binder = reinterpret_cast<uintptr_t>(wpBinder.get_refs()),
1298             .cookie = reinterpret_cast<uintptr_t>(wpBinder.unsafe_get()),
1299     };
1300     data.setDataCapacity(1024);
1301     // Write a bogus object at offset 0 to get an entry in the offset table
1302     data.writeFileDescriptor(0);
1303     EXPECT_EQ(data.objectsCount(), 1u);
1304     uint8_t *parcelData = const_cast<uint8_t *>(data.data());
1305     // And now, overwrite it with the weak binder
1306     memcpy(parcelData, &obj, sizeof(obj));
1307     data.setDataSize(sizeof(obj));
1308 
1309     // a previous bug caused other objects to be released an extra time, so we
1310     // test with an object that libbinder will actually try to release
1311     EXPECT_EQ(OK, data.writeStrongBinder(sp<BBinder>::make()));
1312 
1313     EXPECT_EQ(data.objectsCount(), 2u);
1314 
1315     // send it many times, since previous error was memory corruption, make it
1316     // more likely that the server crashes
1317     for (size_t i = 0; i < 100; i++) {
1318         EXPECT_THAT(server->transact(BINDER_LIB_TEST_REJECT_OBJECTS, data, &reply),
1319                     StatusEq(BAD_VALUE));
1320     }
1321 
1322     EXPECT_THAT(server->pingBinder(), StatusEq(NO_ERROR));
1323 }
1324 
TEST_F(BinderLibTest,GotSid)1325 TEST_F(BinderLibTest, GotSid) {
1326     sp<IBinder> server = addServer();
1327 
1328     Parcel data;
1329     EXPECT_THAT(server->transact(BINDER_LIB_TEST_CAN_GET_SID, data, nullptr), StatusEq(OK));
1330 }
1331 
1332 struct TooManyFdsFlattenable : Flattenable<TooManyFdsFlattenable> {
TooManyFdsFlattenableTooManyFdsFlattenable1333     TooManyFdsFlattenable(size_t fdCount) : mFdCount(fdCount) {}
1334 
1335     // Flattenable protocol
getFlattenedSizeTooManyFdsFlattenable1336     size_t getFlattenedSize() const {
1337         // Return a valid non-zero size here so we don't get an unintended
1338         // BAD_VALUE from Parcel::write
1339         return 16;
1340     }
getFdCountTooManyFdsFlattenable1341     size_t getFdCount() const { return mFdCount; }
flattenTooManyFdsFlattenable1342     status_t flatten(void *& /*buffer*/, size_t & /*size*/, int *&fds, size_t &count) const {
1343         for (size_t i = 0; i < count; i++) {
1344             fds[i] = STDIN_FILENO;
1345         }
1346         return NO_ERROR;
1347     }
unflattenTooManyFdsFlattenable1348     status_t unflatten(void const *& /*buffer*/, size_t & /*size*/, int const *& /*fds*/,
1349                        size_t & /*count*/) {
1350         /* This doesn't get called */
1351         return NO_ERROR;
1352     }
1353 
1354     size_t mFdCount;
1355 };
1356 
TEST_F(BinderLibTest,TooManyFdsFlattenable)1357 TEST_F(BinderLibTest, TooManyFdsFlattenable) {
1358     rlimit origNofile;
1359     int ret = getrlimit(RLIMIT_NOFILE, &origNofile);
1360     ASSERT_EQ(0, ret);
1361 
1362     // Restore the original file limits when the test finishes
1363     auto guardUnguard = make_scope_guard([&]() { setrlimit(RLIMIT_NOFILE, &origNofile); });
1364 
1365     rlimit testNofile = {1024, 1024};
1366     ret = setrlimit(RLIMIT_NOFILE, &testNofile);
1367     ASSERT_EQ(0, ret);
1368 
1369     Parcel parcel;
1370     // Try to write more file descriptors than supported by the OS
1371     TooManyFdsFlattenable tooManyFds1(1024);
1372     EXPECT_THAT(parcel.write(tooManyFds1), StatusEq(-EMFILE));
1373 
1374     // Try to write more file descriptors than the internal limit
1375     TooManyFdsFlattenable tooManyFds2(1025);
1376     EXPECT_THAT(parcel.write(tooManyFds2), StatusEq(BAD_VALUE));
1377 }
1378 
TEST(ServiceNotifications,Unregister)1379 TEST(ServiceNotifications, Unregister) {
1380     auto sm = defaultServiceManager();
1381     using LocalRegistrationCallback = IServiceManager::LocalRegistrationCallback;
1382     class LocalRegistrationCallbackImpl : public virtual LocalRegistrationCallback {
1383         void onServiceRegistration(const String16 &, const sp<IBinder> &) override {}
1384         virtual ~LocalRegistrationCallbackImpl() {}
1385     };
1386     sp<LocalRegistrationCallback> cb = sp<LocalRegistrationCallbackImpl>::make();
1387 
1388     EXPECT_EQ(sm->registerForNotifications(String16("RogerRafa"), cb), OK);
1389     EXPECT_EQ(sm->unregisterForNotifications(String16("RogerRafa"), cb), OK);
1390 }
1391 
TEST_F(BinderLibTest,ThreadPoolAvailableThreads)1392 TEST_F(BinderLibTest, ThreadPoolAvailableThreads) {
1393     Parcel data, reply;
1394     sp<IBinder> server = addServer();
1395     ASSERT_TRUE(server != nullptr);
1396     EXPECT_THAT(server->transact(BINDER_LIB_TEST_GET_MAX_THREAD_COUNT, data, &reply),
1397                 StatusEq(NO_ERROR));
1398     int32_t replyi = reply.readInt32();
1399     // see getThreadPoolMaxTotalThreadCount for why there is a race
1400     EXPECT_TRUE(replyi == kKernelThreads + 1 || replyi == kKernelThreads + 2) << replyi;
1401 
1402     EXPECT_THAT(server->transact(BINDER_LIB_TEST_PROCESS_LOCK, data, &reply), NO_ERROR);
1403 
1404     /*
1405      * This will use all threads in the pool but one. There are actually kKernelThreads+2
1406      * available in the other process (startThreadPool, joinThreadPool, + the kernel-
1407      * started threads from setThreadPoolMaxThreadCount
1408      *
1409      * Adding one more will cause it to deadlock.
1410      */
1411     std::vector<std::thread> ts;
1412     for (size_t i = 0; i < kKernelThreads + 1; i++) {
1413         ts.push_back(std::thread([&] {
1414             Parcel local_reply;
1415             EXPECT_THAT(server->transact(BINDER_LIB_TEST_LOCK_UNLOCK, data, &local_reply),
1416                         NO_ERROR);
1417         }));
1418     }
1419 
1420     // make sure all of the above calls will be queued in parallel. Otherwise, most of
1421     // the time, the below call will pre-empt them (presumably because we have the
1422     // scheduler timeslice already + scheduler hint).
1423     sleep(1);
1424 
1425     data.writeInt32(1000);
1426     // Give a chance for all threads to be used (kKernelThreads + 1 thread in use)
1427     EXPECT_THAT(server->transact(BINDER_LIB_TEST_UNLOCK_AFTER_MS, data, &reply), NO_ERROR);
1428 
1429     for (auto &t : ts) {
1430         t.join();
1431     }
1432 
1433     EXPECT_THAT(server->transact(BINDER_LIB_TEST_GET_MAX_THREAD_COUNT, data, &reply),
1434                 StatusEq(NO_ERROR));
1435     replyi = reply.readInt32();
1436     EXPECT_EQ(replyi, kKernelThreads + 2);
1437 }
1438 
TEST_F(BinderLibTest,ThreadPoolStarted)1439 TEST_F(BinderLibTest, ThreadPoolStarted) {
1440     Parcel data, reply;
1441     sp<IBinder> server = addServer();
1442     ASSERT_TRUE(server != nullptr);
1443     EXPECT_THAT(server->transact(BINDER_LIB_TEST_IS_THREADPOOL_STARTED, data, &reply), NO_ERROR);
1444     EXPECT_TRUE(reply.readBool());
1445 }
1446 
epochMillis()1447 size_t epochMillis() {
1448     using std::chrono::duration_cast;
1449     using std::chrono::milliseconds;
1450     using std::chrono::seconds;
1451     using std::chrono::system_clock;
1452     return duration_cast<milliseconds>(system_clock::now().time_since_epoch()).count();
1453 }
1454 
TEST_F(BinderLibTest,HangingServices)1455 TEST_F(BinderLibTest, HangingServices) {
1456     Parcel data, reply;
1457     sp<IBinder> server = addServer();
1458     ASSERT_TRUE(server != nullptr);
1459     int32_t delay = 1000; // ms
1460     data.writeInt32(delay);
1461     // b/266537959 - must take before taking lock, since countdown is started in the remote
1462     // process there.
1463     size_t epochMsBefore = epochMillis();
1464     EXPECT_THAT(server->transact(BINDER_LIB_TEST_PROCESS_TEMPORARY_LOCK, data, &reply), NO_ERROR);
1465     std::vector<std::thread> ts;
1466     for (size_t i = 0; i < kKernelThreads + 1; i++) {
1467         ts.push_back(std::thread([&] {
1468             Parcel local_reply;
1469             EXPECT_THAT(server->transact(BINDER_LIB_TEST_LOCK_UNLOCK, data, &local_reply),
1470                         NO_ERROR);
1471         }));
1472     }
1473 
1474     for (auto &t : ts) {
1475         t.join();
1476     }
1477     size_t epochMsAfter = epochMillis();
1478 
1479     // deadlock occurred and threads only finished after 1s passed.
1480     EXPECT_GE(epochMsAfter, epochMsBefore + delay);
1481 }
1482 
TEST_F(BinderLibTest,BinderProxyCount)1483 TEST_F(BinderLibTest, BinderProxyCount) {
1484     Parcel data, reply;
1485     sp<IBinder> server = addServer();
1486     ASSERT_NE(server, nullptr);
1487 
1488     uint32_t initialCount = BpBinder::getBinderProxyCount();
1489     size_t iterations = 100;
1490     {
1491         uint32_t count = initialCount;
1492         std::vector<sp<IBinder> > proxies;
1493         sp<IBinder> proxy;
1494         // Create binder proxies and verify the count.
1495         for (size_t i = 0; i < iterations; i++) {
1496             ASSERT_THAT(server->transact(BINDER_LIB_TEST_CREATE_BINDER_TRANSACTION, data, &reply),
1497                         StatusEq(NO_ERROR));
1498             proxies.push_back(reply.readStrongBinder());
1499             EXPECT_EQ(BpBinder::getBinderProxyCount(), ++count);
1500         }
1501         // Remove every other one and verify the count.
1502         auto it = proxies.begin();
1503         for (size_t i = 0; it != proxies.end(); i++) {
1504             if (i % 2 == 0) {
1505                 it = proxies.erase(it);
1506                 EXPECT_EQ(BpBinder::getBinderProxyCount(), --count);
1507             }
1508         }
1509     }
1510     EXPECT_EQ(BpBinder::getBinderProxyCount(), initialCount);
1511 }
1512 
1513 static constexpr int kBpCountHighWatermark = 20;
1514 static constexpr int kBpCountLowWatermark = 10;
1515 static constexpr int kBpCountWarningWatermark = 15;
1516 static constexpr int kInvalidUid = -1;
1517 
TEST_F(BinderLibTest,BinderProxyCountCallback)1518 TEST_F(BinderLibTest, BinderProxyCountCallback) {
1519     Parcel data, reply;
1520     sp<IBinder> server = addServer();
1521     ASSERT_NE(server, nullptr);
1522 
1523     BpBinder::enableCountByUid();
1524     EXPECT_THAT(m_server->transact(BINDER_LIB_TEST_GETUID, data, &reply), StatusEq(NO_ERROR));
1525     int32_t uid = reply.readInt32();
1526     ASSERT_NE(uid, kInvalidUid);
1527 
1528     uint32_t initialCount = BpBinder::getBinderProxyCount();
1529     {
1530         uint32_t count = initialCount;
1531         BpBinder::setBinderProxyCountWatermarks(kBpCountHighWatermark,
1532                                                 kBpCountLowWatermark,
1533                                                 kBpCountWarningWatermark);
1534         int limitCallbackUid = kInvalidUid;
1535         int warningCallbackUid = kInvalidUid;
1536         BpBinder::setBinderProxyCountEventCallback([&](int uid) { limitCallbackUid = uid; },
1537                                                    [&](int uid) { warningCallbackUid = uid; });
1538 
1539         std::vector<sp<IBinder> > proxies;
1540         auto createProxyOnce = [&](int expectedWarningCallbackUid, int expectedLimitCallbackUid) {
1541             warningCallbackUid = limitCallbackUid = kInvalidUid;
1542             ASSERT_THAT(server->transact(BINDER_LIB_TEST_CREATE_BINDER_TRANSACTION, data, &reply),
1543                         StatusEq(NO_ERROR));
1544             proxies.push_back(reply.readStrongBinder());
1545             EXPECT_EQ(BpBinder::getBinderProxyCount(), ++count);
1546             EXPECT_EQ(warningCallbackUid, expectedWarningCallbackUid);
1547             EXPECT_EQ(limitCallbackUid, expectedLimitCallbackUid);
1548         };
1549         auto removeProxyOnce = [&](int expectedWarningCallbackUid, int expectedLimitCallbackUid) {
1550             warningCallbackUid = limitCallbackUid = kInvalidUid;
1551             proxies.pop_back();
1552             EXPECT_EQ(BpBinder::getBinderProxyCount(), --count);
1553             EXPECT_EQ(warningCallbackUid, expectedWarningCallbackUid);
1554             EXPECT_EQ(limitCallbackUid, expectedLimitCallbackUid);
1555         };
1556 
1557         // Test the increment/decrement of the binder proxies.
1558         for (int i = 1; i <= kBpCountWarningWatermark; i++) {
1559             createProxyOnce(kInvalidUid, kInvalidUid);
1560         }
1561         createProxyOnce(uid, kInvalidUid); // Warning callback should have been triggered.
1562         for (int i = kBpCountWarningWatermark + 2; i <= kBpCountHighWatermark; i++) {
1563             createProxyOnce(kInvalidUid, kInvalidUid);
1564         }
1565         createProxyOnce(kInvalidUid, uid); // Limit callback should have been triggered.
1566         createProxyOnce(kInvalidUid, kInvalidUid);
1567         for (int i = kBpCountHighWatermark + 2; i >= kBpCountHighWatermark; i--) {
1568             removeProxyOnce(kInvalidUid, kInvalidUid);
1569         }
1570         createProxyOnce(kInvalidUid, kInvalidUid);
1571 
1572         // Go down below the low watermark.
1573         for (int i = kBpCountHighWatermark; i >= kBpCountLowWatermark; i--) {
1574             removeProxyOnce(kInvalidUid, kInvalidUid);
1575         }
1576         for (int i = kBpCountLowWatermark; i <= kBpCountWarningWatermark; i++) {
1577             createProxyOnce(kInvalidUid, kInvalidUid);
1578         }
1579         createProxyOnce(uid, kInvalidUid); // Warning callback should have been triggered.
1580         for (int i = kBpCountWarningWatermark + 2; i <= kBpCountHighWatermark; i++) {
1581             createProxyOnce(kInvalidUid, kInvalidUid);
1582         }
1583         createProxyOnce(kInvalidUid, uid); // Limit callback should have been triggered.
1584         createProxyOnce(kInvalidUid, kInvalidUid);
1585         for (int i = kBpCountHighWatermark + 2; i >= kBpCountHighWatermark; i--) {
1586             removeProxyOnce(kInvalidUid, kInvalidUid);
1587         }
1588         createProxyOnce(kInvalidUid, kInvalidUid);
1589     }
1590     EXPECT_EQ(BpBinder::getBinderProxyCount(), initialCount);
1591 }
1592 
1593 class BinderLibRpcTestBase : public BinderLibTest {
1594 public:
SetUp()1595     void SetUp() override {
1596         if (!base::GetBoolProperty("ro.debuggable", false)) {
1597             GTEST_SKIP() << "Binder RPC is only enabled on debuggable builds, skipping test on "
1598                             "non-debuggable builds.";
1599         }
1600         BinderLibTest::SetUp();
1601     }
1602 
CreateSocket()1603     std::tuple<unique_fd, unsigned int> CreateSocket() {
1604         auto rpcServer = RpcServer::make();
1605         EXPECT_NE(nullptr, rpcServer);
1606         if (rpcServer == nullptr) return {};
1607         unsigned int port;
1608         if (status_t status = rpcServer->setupInetServer("127.0.0.1", 0, &port); status != OK) {
1609             ADD_FAILURE() << "setupInetServer failed" << statusToString(status);
1610             return {};
1611         }
1612         return {rpcServer->releaseServer(), port};
1613     }
1614 };
1615 
1616 class BinderLibRpcTest : public BinderLibRpcTestBase {};
1617 
1618 // e.g. EXPECT_THAT(expr, Debuggable(StatusEq(...))
1619 // If device is debuggable AND not on user builds, expects matcher.
1620 // Otherwise expects INVALID_OPERATION.
1621 // Debuggable + non user builds is necessary but not sufficient for setRpcClientDebug to work.
Debuggable(const Matcher<status_t> & matcher)1622 static Matcher<status_t> Debuggable(const Matcher<status_t> &matcher) {
1623     bool isDebuggable = android::base::GetBoolProperty("ro.debuggable", false) &&
1624             android::base::GetProperty("ro.build.type", "") != "user";
1625     return isDebuggable ? matcher : StatusEq(INVALID_OPERATION);
1626 }
1627 
TEST_F(BinderLibRpcTest,SetRpcClientDebug)1628 TEST_F(BinderLibRpcTest, SetRpcClientDebug) {
1629     auto binder = addServer();
1630     ASSERT_TRUE(binder != nullptr);
1631     auto [socket, port] = CreateSocket();
1632     ASSERT_TRUE(socket.ok());
1633     EXPECT_THAT(binder->setRpcClientDebug(std::move(socket), sp<BBinder>::make()),
1634                 Debuggable(StatusEq(OK)));
1635 }
1636 
1637 // Tests for multiple RpcServer's on the same binder object.
TEST_F(BinderLibRpcTest,SetRpcClientDebugTwice)1638 TEST_F(BinderLibRpcTest, SetRpcClientDebugTwice) {
1639     auto binder = addServer();
1640     ASSERT_TRUE(binder != nullptr);
1641 
1642     auto [socket1, port1] = CreateSocket();
1643     ASSERT_TRUE(socket1.ok());
1644     auto keepAliveBinder1 = sp<BBinder>::make();
1645     EXPECT_THAT(binder->setRpcClientDebug(std::move(socket1), keepAliveBinder1),
1646                 Debuggable(StatusEq(OK)));
1647 
1648     auto [socket2, port2] = CreateSocket();
1649     ASSERT_TRUE(socket2.ok());
1650     auto keepAliveBinder2 = sp<BBinder>::make();
1651     EXPECT_THAT(binder->setRpcClientDebug(std::move(socket2), keepAliveBinder2),
1652                 Debuggable(StatusEq(OK)));
1653 }
1654 
1655 // Negative tests for RPC APIs on IBinder. Call should fail in the same way on both remote and
1656 // local binders.
1657 class BinderLibRpcTestP : public BinderLibRpcTestBase, public WithParamInterface<bool> {
1658 public:
GetService()1659     sp<IBinder> GetService() {
1660         return GetParam() ? sp<IBinder>(addServer()) : sp<IBinder>(sp<BBinder>::make());
1661     }
ParamToString(const testing::TestParamInfo<ParamType> & info)1662     static std::string ParamToString(const testing::TestParamInfo<ParamType> &info) {
1663         return info.param ? "remote" : "local";
1664     }
1665 };
1666 
TEST_P(BinderLibRpcTestP,SetRpcClientDebugNoFd)1667 TEST_P(BinderLibRpcTestP, SetRpcClientDebugNoFd) {
1668     auto binder = GetService();
1669     ASSERT_TRUE(binder != nullptr);
1670     EXPECT_THAT(binder->setRpcClientDebug(unique_fd(), sp<BBinder>::make()),
1671                 Debuggable(StatusEq(BAD_VALUE)));
1672 }
1673 
TEST_P(BinderLibRpcTestP,SetRpcClientDebugNoKeepAliveBinder)1674 TEST_P(BinderLibRpcTestP, SetRpcClientDebugNoKeepAliveBinder) {
1675     auto binder = GetService();
1676     ASSERT_TRUE(binder != nullptr);
1677     auto [socket, port] = CreateSocket();
1678     ASSERT_TRUE(socket.ok());
1679     EXPECT_THAT(binder->setRpcClientDebug(std::move(socket), nullptr),
1680                 Debuggable(StatusEq(UNEXPECTED_NULL)));
1681 }
1682 INSTANTIATE_TEST_SUITE_P(BinderLibTest, BinderLibRpcTestP, testing::Bool(),
1683                          BinderLibRpcTestP::ParamToString);
1684 
1685 class BinderLibTestService : public BBinder {
1686 public:
BinderLibTestService(int32_t id,bool exitOnDestroy=true)1687     explicit BinderLibTestService(int32_t id, bool exitOnDestroy = true)
1688           : m_id(id),
1689             m_nextServerId(id + 1),
1690             m_serverStartRequested(false),
1691             m_callback(nullptr),
1692             m_exitOnDestroy(exitOnDestroy) {
1693         pthread_mutex_init(&m_serverWaitMutex, nullptr);
1694         pthread_cond_init(&m_serverWaitCond, nullptr);
1695     }
~BinderLibTestService()1696     ~BinderLibTestService() {
1697         if (m_exitOnDestroy) exit(EXIT_SUCCESS);
1698     }
1699 
processPendingCall()1700     void processPendingCall() {
1701         if (m_callback != nullptr) {
1702             Parcel data;
1703             data.writeInt32(NO_ERROR);
1704             m_callback->transact(BINDER_LIB_TEST_CALL_BACK, data, nullptr, TF_ONE_WAY);
1705             m_callback = nullptr;
1706         }
1707     }
1708 
onTransact(uint32_t code,const Parcel & data,Parcel * reply,uint32_t flags=0)1709     virtual status_t onTransact(uint32_t code, const Parcel &data, Parcel *reply,
1710                                 uint32_t flags = 0) {
1711         // TODO(b/182914638): also checks getCallingUid() for RPC
1712         if (!data.isForRpc() && getuid() != (uid_t)IPCThreadState::self()->getCallingUid()) {
1713             return PERMISSION_DENIED;
1714         }
1715         switch (code) {
1716             case BINDER_LIB_TEST_REGISTER_SERVER: {
1717                 sp<IBinder> binder;
1718                 /*id =*/data.readInt32();
1719                 binder = data.readStrongBinder();
1720                 if (binder == nullptr) {
1721                     return BAD_VALUE;
1722                 }
1723 
1724                 if (m_id != 0) return INVALID_OPERATION;
1725 
1726                 pthread_mutex_lock(&m_serverWaitMutex);
1727                 if (m_serverStartRequested) {
1728                     m_serverStartRequested = false;
1729                     m_serverStarted = binder;
1730                     pthread_cond_signal(&m_serverWaitCond);
1731                 }
1732                 pthread_mutex_unlock(&m_serverWaitMutex);
1733                 return NO_ERROR;
1734             }
1735             case BINDER_LIB_TEST_ADD_POLL_SERVER:
1736             case BINDER_LIB_TEST_ADD_SERVER: {
1737                 int ret;
1738                 int serverid;
1739 
1740                 if (m_id != 0) {
1741                     return INVALID_OPERATION;
1742                 }
1743                 pthread_mutex_lock(&m_serverWaitMutex);
1744                 if (m_serverStartRequested) {
1745                     ret = -EBUSY;
1746                 } else {
1747                     serverid = m_nextServerId++;
1748                     m_serverStartRequested = true;
1749                     bool usePoll = code == BINDER_LIB_TEST_ADD_POLL_SERVER;
1750 
1751                     pthread_mutex_unlock(&m_serverWaitMutex);
1752                     ret = start_server_process(serverid, usePoll);
1753                     pthread_mutex_lock(&m_serverWaitMutex);
1754                 }
1755                 if (ret > 0) {
1756                     if (m_serverStartRequested) {
1757                         struct timespec ts;
1758                         clock_gettime(CLOCK_REALTIME, &ts);
1759                         ts.tv_sec += 5;
1760                         ret = pthread_cond_timedwait(&m_serverWaitCond, &m_serverWaitMutex, &ts);
1761                     }
1762                     if (m_serverStartRequested) {
1763                         m_serverStartRequested = false;
1764                         ret = -ETIMEDOUT;
1765                     } else {
1766                         reply->writeStrongBinder(m_serverStarted);
1767                         reply->writeInt32(serverid);
1768                         m_serverStarted = nullptr;
1769                         ret = NO_ERROR;
1770                     }
1771                 } else if (ret >= 0) {
1772                     m_serverStartRequested = false;
1773                     ret = UNKNOWN_ERROR;
1774                 }
1775                 pthread_mutex_unlock(&m_serverWaitMutex);
1776                 return ret;
1777             }
1778             case BINDER_LIB_TEST_USE_CALLING_GUARD_TRANSACTION: {
1779                 IPCThreadState::SpGuard spGuard{
1780                         .address = __builtin_frame_address(0),
1781                         .context = "GuardInBinderTransaction",
1782                 };
1783                 const IPCThreadState::SpGuard *origGuard =
1784                         IPCThreadState::self()->pushGetCallingSpGuard(&spGuard);
1785 
1786                 // if the guard works, this should abort
1787                 (void)IPCThreadState::self()->getCallingPid();
1788 
1789                 IPCThreadState::self()->restoreGetCallingSpGuard(origGuard);
1790                 return NO_ERROR;
1791             }
1792 
1793             case BINDER_LIB_TEST_GETPID:
1794                 reply->writeInt32(getpid());
1795                 return NO_ERROR;
1796             case BINDER_LIB_TEST_GETUID:
1797                 reply->writeInt32(getuid());
1798                 return NO_ERROR;
1799             case BINDER_LIB_TEST_NOP_TRANSACTION_WAIT:
1800                 usleep(5000);
1801                 [[fallthrough]];
1802             case BINDER_LIB_TEST_NOP_TRANSACTION:
1803                 // oneway error codes should be ignored
1804                 if (flags & TF_ONE_WAY) {
1805                     return UNKNOWN_ERROR;
1806                 }
1807                 return NO_ERROR;
1808             case BINDER_LIB_TEST_DELAYED_CALL_BACK: {
1809                 // Note: this transaction is only designed for use with a
1810                 // poll() server. See comments around epoll_wait().
1811                 if (m_callback != nullptr) {
1812                     // A callback was already pending; this means that
1813                     // we received a second call while still processing
1814                     // the first one. Fail the test.
1815                     sp<IBinder> callback = data.readStrongBinder();
1816                     Parcel data2;
1817                     data2.writeInt32(UNKNOWN_ERROR);
1818 
1819                     callback->transact(BINDER_LIB_TEST_CALL_BACK, data2, nullptr, TF_ONE_WAY);
1820                 } else {
1821                     m_callback = data.readStrongBinder();
1822                     int32_t delayUs = data.readInt32();
1823                     /*
1824                      * It's necessary that we sleep here, so the next
1825                      * transaction the caller makes will be queued to
1826                      * the async queue.
1827                      */
1828                     usleep(delayUs);
1829 
1830                     /*
1831                      * Now when we return, libbinder will tell the kernel
1832                      * we are done with this transaction, and the kernel
1833                      * can move the queued transaction to either the
1834                      * thread todo worklist (for kernels without the fix),
1835                      * or the proc todo worklist. In case of the former,
1836                      * the next outbound call will pick up the pending
1837                      * transaction, which leads to undesired reentrant
1838                      * behavior. This is caught in the if() branch above.
1839                      */
1840                 }
1841 
1842                 return NO_ERROR;
1843             }
1844             case BINDER_LIB_TEST_NOP_CALL_BACK: {
1845                 Parcel data2, reply2;
1846                 sp<IBinder> binder;
1847                 binder = data.readStrongBinder();
1848                 if (binder == nullptr) {
1849                     return BAD_VALUE;
1850                 }
1851                 data2.writeInt32(NO_ERROR);
1852                 binder->transact(BINDER_LIB_TEST_CALL_BACK, data2, &reply2);
1853                 return NO_ERROR;
1854             }
1855             case BINDER_LIB_TEST_GET_SELF_TRANSACTION:
1856                 reply->writeStrongBinder(this);
1857                 return NO_ERROR;
1858             case BINDER_LIB_TEST_GET_ID_TRANSACTION:
1859                 reply->writeInt32(m_id);
1860                 return NO_ERROR;
1861             case BINDER_LIB_TEST_INDIRECT_TRANSACTION: {
1862                 int32_t count;
1863                 uint32_t indirect_code;
1864                 sp<IBinder> binder;
1865 
1866                 count = data.readInt32();
1867                 reply->writeInt32(m_id);
1868                 reply->writeInt32(count);
1869                 for (int i = 0; i < count; i++) {
1870                     binder = data.readStrongBinder();
1871                     if (binder == nullptr) {
1872                         return BAD_VALUE;
1873                     }
1874                     indirect_code = data.readInt32();
1875                     BinderLibTestBundle data2(&data);
1876                     if (!data2.isValid()) {
1877                         return BAD_VALUE;
1878                     }
1879                     BinderLibTestBundle reply2;
1880                     binder->transact(indirect_code, data2, &reply2);
1881                     reply2.appendTo(reply);
1882                 }
1883                 return NO_ERROR;
1884             }
1885             case BINDER_LIB_TEST_SET_ERROR_TRANSACTION:
1886                 reply->setError(data.readInt32());
1887                 return NO_ERROR;
1888             case BINDER_LIB_TEST_GET_PTR_SIZE_TRANSACTION:
1889                 reply->writeInt32(sizeof(void *));
1890                 return NO_ERROR;
1891             case BINDER_LIB_TEST_GET_STATUS_TRANSACTION:
1892                 return NO_ERROR;
1893             case BINDER_LIB_TEST_ADD_STRONG_REF_TRANSACTION:
1894                 m_strongRef = data.readStrongBinder();
1895                 return NO_ERROR;
1896             case BINDER_LIB_TEST_LINK_DEATH_TRANSACTION: {
1897                 int ret;
1898                 Parcel data2, reply2;
1899                 sp<TestDeathRecipient> testDeathRecipient = new TestDeathRecipient();
1900                 sp<IBinder> target;
1901                 sp<IBinder> callback;
1902 
1903                 target = data.readStrongBinder();
1904                 if (target == nullptr) {
1905                     return BAD_VALUE;
1906                 }
1907                 callback = data.readStrongBinder();
1908                 if (callback == nullptr) {
1909                     return BAD_VALUE;
1910                 }
1911                 ret = target->linkToDeath(testDeathRecipient);
1912                 if (ret == NO_ERROR) ret = testDeathRecipient->waitEvent(5);
1913                 data2.writeInt32(ret);
1914                 callback->transact(BINDER_LIB_TEST_CALL_BACK, data2, &reply2);
1915                 return NO_ERROR;
1916             }
1917             case BINDER_LIB_TEST_WRITE_FILE_TRANSACTION: {
1918                 int ret;
1919                 int32_t size;
1920                 const void *buf;
1921                 int fd;
1922 
1923                 fd = data.readFileDescriptor();
1924                 if (fd < 0) {
1925                     return BAD_VALUE;
1926                 }
1927                 ret = data.readInt32(&size);
1928                 if (ret != NO_ERROR) {
1929                     return ret;
1930                 }
1931                 buf = data.readInplace(size);
1932                 if (buf == nullptr) {
1933                     return BAD_VALUE;
1934                 }
1935                 ret = write(fd, buf, size);
1936                 if (ret != size) return UNKNOWN_ERROR;
1937                 return NO_ERROR;
1938             }
1939             case BINDER_LIB_TEST_WRITE_PARCEL_FILE_DESCRIPTOR_TRANSACTION: {
1940                 int ret;
1941                 int32_t size;
1942                 const void *buf;
1943                 unique_fd fd;
1944 
1945                 ret = data.readUniqueParcelFileDescriptor(&fd);
1946                 if (ret != NO_ERROR) {
1947                     return ret;
1948                 }
1949                 ret = data.readInt32(&size);
1950                 if (ret != NO_ERROR) {
1951                     return ret;
1952                 }
1953                 buf = data.readInplace(size);
1954                 if (buf == nullptr) {
1955                     return BAD_VALUE;
1956                 }
1957                 ret = write(fd.get(), buf, size);
1958                 if (ret != size) return UNKNOWN_ERROR;
1959                 return NO_ERROR;
1960             }
1961             case BINDER_LIB_TEST_DELAYED_EXIT_TRANSACTION:
1962                 alarm(10);
1963                 return NO_ERROR;
1964             case BINDER_LIB_TEST_EXIT_TRANSACTION:
1965                 while (wait(nullptr) != -1 || errno != ECHILD)
1966                     ;
1967                 exit(EXIT_SUCCESS);
1968             case BINDER_LIB_TEST_CREATE_BINDER_TRANSACTION: {
1969                 sp<IBinder> binder = new BBinder();
1970                 reply->writeStrongBinder(binder);
1971                 return NO_ERROR;
1972             }
1973             case BINDER_LIB_TEST_GET_WORK_SOURCE_TRANSACTION: {
1974                 data.enforceInterface(binderLibTestServiceName);
1975                 reply->writeInt32(IPCThreadState::self()->getCallingWorkSourceUid());
1976                 return NO_ERROR;
1977             }
1978             case BINDER_LIB_TEST_GET_SCHEDULING_POLICY: {
1979                 int policy = 0;
1980                 sched_param param;
1981                 if (0 != pthread_getschedparam(pthread_self(), &policy, &param)) {
1982                     return UNKNOWN_ERROR;
1983                 }
1984                 reply->writeInt32(policy);
1985                 reply->writeInt32(param.sched_priority);
1986                 return NO_ERROR;
1987             }
1988             case BINDER_LIB_TEST_ECHO_VECTOR: {
1989                 std::vector<uint64_t> vector;
1990                 auto err = data.readUint64Vector(&vector);
1991                 if (err != NO_ERROR) return err;
1992                 reply->writeUint64Vector(vector);
1993                 return NO_ERROR;
1994             }
1995             case BINDER_LIB_TEST_GET_NON_BLOCKING_FD: {
1996                 std::array<int, 2> sockets;
1997                 const bool created = socketpair(AF_UNIX, SOCK_SEQPACKET, 0, sockets.data()) == 0;
1998                 if (!created) {
1999                     ALOGE("Could not create socket pair");
2000                     return UNKNOWN_ERROR;
2001                 }
2002 
2003                 const int result = fcntl(sockets[0], F_SETFL, O_NONBLOCK);
2004                 if (result != 0) {
2005                     ALOGE("Could not make socket non-blocking: %s", strerror(errno));
2006                     return UNKNOWN_ERROR;
2007                 }
2008                 unique_fd out(sockets[0]);
2009                 status_t writeResult = reply->writeUniqueFileDescriptor(out);
2010                 if (writeResult != NO_ERROR) {
2011                     ALOGE("Could not write unique_fd");
2012                     return writeResult;
2013                 }
2014                 close(sockets[1]); // we don't need the other side of the fd
2015                 return NO_ERROR;
2016             }
2017             case BINDER_LIB_TEST_REJECT_OBJECTS: {
2018                 return data.objectsCount() == 0 ? BAD_VALUE : NO_ERROR;
2019             }
2020             case BINDER_LIB_TEST_CAN_GET_SID: {
2021                 return IPCThreadState::self()->getCallingSid() == nullptr ? BAD_VALUE : NO_ERROR;
2022             }
2023             case BINDER_LIB_TEST_GET_MAX_THREAD_COUNT: {
2024                 reply->writeInt32(ProcessState::self()->getThreadPoolMaxTotalThreadCount());
2025                 return NO_ERROR;
2026             }
2027             case BINDER_LIB_TEST_IS_THREADPOOL_STARTED: {
2028                 reply->writeBool(ProcessState::self()->isThreadPoolStarted());
2029                 return NO_ERROR;
2030             }
2031             case BINDER_LIB_TEST_PROCESS_LOCK: {
2032                 m_blockMutex.lock();
2033                 return NO_ERROR;
2034             }
2035             case BINDER_LIB_TEST_LOCK_UNLOCK: {
2036                 std::lock_guard<std::mutex> _l(m_blockMutex);
2037                 return NO_ERROR;
2038             }
2039             case BINDER_LIB_TEST_UNLOCK_AFTER_MS: {
2040                 int32_t ms = data.readInt32();
2041                 return unlockInMs(ms);
2042             }
2043             case BINDER_LIB_TEST_PROCESS_TEMPORARY_LOCK: {
2044                 m_blockMutex.lock();
2045                 sp<BinderLibTestService> thisService = this;
2046                 int32_t value = data.readInt32();
2047                 // start local thread to unlock in 1s
2048                 std::thread t([=] { thisService->unlockInMs(value); });
2049                 t.detach();
2050                 return NO_ERROR;
2051             }
2052             default:
2053                 return UNKNOWN_TRANSACTION;
2054         };
2055     }
2056 
unlockInMs(int32_t ms)2057     status_t unlockInMs(int32_t ms) {
2058         usleep(ms * 1000);
2059         m_blockMutex.unlock();
2060         return NO_ERROR;
2061     }
2062 
2063 private:
2064     int32_t m_id;
2065     int32_t m_nextServerId;
2066     pthread_mutex_t m_serverWaitMutex;
2067     pthread_cond_t m_serverWaitCond;
2068     bool m_serverStartRequested;
2069     sp<IBinder> m_serverStarted;
2070     sp<IBinder> m_strongRef;
2071     sp<IBinder> m_callback;
2072     bool m_exitOnDestroy;
2073     std::mutex m_blockMutex;
2074 };
2075 
run_server(int index,int readypipefd,bool usePoll)2076 int run_server(int index, int readypipefd, bool usePoll)
2077 {
2078     binderLibTestServiceName += String16(binderserversuffix);
2079 
2080     // Testing to make sure that calls that we are serving can use getCallin*
2081     // even though we don't here.
2082     IPCThreadState::SpGuard spGuard{
2083             .address = __builtin_frame_address(0),
2084             .context = "main server thread",
2085     };
2086     (void)IPCThreadState::self()->pushGetCallingSpGuard(&spGuard);
2087 
2088     status_t ret;
2089     sp<IServiceManager> sm = defaultServiceManager();
2090     BinderLibTestService* testServicePtr;
2091     {
2092         sp<BinderLibTestService> testService = new BinderLibTestService(index);
2093 
2094         testService->setMinSchedulerPolicy(kSchedPolicy, kSchedPriority);
2095 
2096         testService->setInheritRt(true);
2097 
2098         /*
2099          * Normally would also contain functionality as well, but we are only
2100          * testing the extension mechanism.
2101          */
2102         testService->setExtension(new BBinder());
2103 
2104         // Required for test "BufRejected'
2105         testService->setRequestingSid(true);
2106 
2107         /*
2108          * We need this below, but can't hold a sp<> because it prevents the
2109          * node from being cleaned up automatically. It's safe in this case
2110          * because of how the tests are written.
2111          */
2112         testServicePtr = testService.get();
2113 
2114         if (index == 0) {
2115             ret = sm->addService(binderLibTestServiceName, testService);
2116         } else {
2117 #pragma clang diagnostic push
2118 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2119             sp<IBinder> server = sm->getService(binderLibTestServiceName);
2120 #pragma clang diagnostic pop
2121             Parcel data, reply;
2122             data.writeInt32(index);
2123             data.writeStrongBinder(testService);
2124 
2125             ret = server->transact(BINDER_LIB_TEST_REGISTER_SERVER, data, &reply);
2126         }
2127     }
2128     write(readypipefd, &ret, sizeof(ret));
2129     close(readypipefd);
2130     //printf("%s: ret %d\n", __func__, ret);
2131     if (ret)
2132         return 1;
2133     //printf("%s: joinThreadPool\n", __func__);
2134     if (usePoll) {
2135         int fd;
2136         struct epoll_event ev;
2137         int epoll_fd;
2138         IPCThreadState::self()->setupPolling(&fd);
2139         if (fd < 0) {
2140             return 1;
2141         }
2142         IPCThreadState::self()->flushCommands(); // flush BC_ENTER_LOOPER
2143 
2144         epoll_fd = epoll_create1(EPOLL_CLOEXEC);
2145         if (epoll_fd == -1) {
2146             return 1;
2147         }
2148 
2149         ev.events = EPOLLIN;
2150         if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, fd, &ev) == -1) {
2151             return 1;
2152         }
2153 
2154         while (1) {
2155              /*
2156               * We simulate a single-threaded process using the binder poll
2157               * interface; besides handling binder commands, it can also
2158               * issue outgoing transactions, by storing a callback in
2159               * m_callback.
2160               *
2161               * processPendingCall() will then issue that transaction.
2162               */
2163              struct epoll_event events[1];
2164              int numEvents = epoll_wait(epoll_fd, events, 1, 1000);
2165              if (numEvents < 0) {
2166                  if (errno == EINTR) {
2167                      continue;
2168                  }
2169                  return 1;
2170              }
2171              if (numEvents > 0) {
2172                  IPCThreadState::self()->handlePolledCommands();
2173                  IPCThreadState::self()->flushCommands(); // flush BC_FREE_BUFFER
2174                  testServicePtr->processPendingCall();
2175              }
2176         }
2177     } else {
2178         ProcessState::self()->setThreadPoolMaxThreadCount(kKernelThreads);
2179         ProcessState::self()->startThreadPool();
2180         IPCThreadState::self()->joinThreadPool();
2181     }
2182     //printf("%s: joinThreadPool returned\n", __func__);
2183     return 1; /* joinThreadPool should not return */
2184 }
2185 
main(int argc,char ** argv)2186 int main(int argc, char** argv) {
2187     if (argc == 4 && !strcmp(argv[1], "--servername")) {
2188         binderservername = argv[2];
2189     } else {
2190         binderservername = argv[0];
2191     }
2192 
2193     if (argc == 6 && !strcmp(argv[1], binderserverarg)) {
2194         binderserversuffix = argv[5];
2195         return run_server(atoi(argv[2]), atoi(argv[3]), atoi(argv[4]) == 1);
2196     }
2197     binderserversuffix = new char[16];
2198     snprintf(binderserversuffix, 16, "%d", getpid());
2199     binderLibTestServiceName += String16(binderserversuffix);
2200 
2201     ::testing::InitGoogleTest(&argc, argv);
2202     binder_env = AddGlobalTestEnvironment(new BinderLibTestEnv());
2203     ProcessState::self()->startThreadPool();
2204     return RUN_ALL_TESTS();
2205 }
2206