1 #include <binder/Binder.h>
2 #include <binder/IBinder.h>
3 #include <binder/IPCThreadState.h>
4 #include <binder/IServiceManager.h>
5 #include <cstdio>
6 #include <cstdlib>
7 #include <cstring>
8 #include <string>
9
10 #include <iomanip>
11 #include <iostream>
12 #include <tuple>
13 #include <vector>
14
15 #include <pthread.h>
16 #include <sys/wait.h>
17 #include <unistd.h>
18 #include <fstream>
19
20 using namespace std;
21 using namespace android;
22
23 enum BinderWorkerServiceCode {
24 BINDER_NOP = IBinder::FIRST_CALL_TRANSACTION,
25 };
26
27 #define ASSERT(cond) \
28 do { \
29 if (!(cond)) { \
30 cerr << __func__ << ":" << __LINE__ << " condition:" << #cond \
31 << " failed\n" \
32 << endl; \
33 exit(EXIT_FAILURE); \
34 } \
35 } while (0)
36
37 vector<sp<IBinder> > workers;
38
39 // the ratio that the service is synced on the same cpu beyond
40 // GOOD_SYNC_MIN is considered as good
41 #define GOOD_SYNC_MIN (0.6)
42
43 #define DUMP_PRESICION 2
44
45 string trace_path = "/sys/kernel/debug/tracing";
46
47 // the default value
48 int no_process = 2;
49 int iterations = 100;
50 int payload_size = 16;
51 int no_inherent = 0;
52 int no_sync = 0;
53 int verbose = 0;
54 int trace;
55
traceIsOn()56 bool traceIsOn() {
57 fstream file;
58 file.open(trace_path + "/tracing_on", ios::in);
59 char on;
60 file >> on;
61 file.close();
62 return on == '1';
63 }
64
traceStop()65 void traceStop() {
66 ofstream file;
67 file.open(trace_path + "/tracing_on", ios::out | ios::trunc);
68 file << '0' << endl;
69 file.close();
70 }
71
72 // the deadline latency that we are interested in
73 uint64_t deadline_us = 2500;
74
thread_pri()75 int thread_pri() {
76 struct sched_param param;
77 int policy;
78 ASSERT(!pthread_getschedparam(pthread_self(), &policy, ¶m));
79 return param.sched_priority;
80 }
81
thread_dump(const char * prefix)82 void thread_dump(const char* prefix) {
83 struct sched_param param;
84 int policy;
85 if (!verbose) return;
86 cout << "--------------------------------------------------" << endl;
87 cout << setw(12) << left << prefix << " pid: " << getpid()
88 << " tid: " << gettid() << " cpu: " << sched_getcpu() << endl;
89 ASSERT(!pthread_getschedparam(pthread_self(), &policy, ¶m));
90 string s = (policy == SCHED_OTHER)
91 ? "SCHED_OTHER"
92 : (policy == SCHED_FIFO)
93 ? "SCHED_FIFO"
94 : (policy == SCHED_RR) ? "SCHED_RR" : "???";
95 cout << setw(12) << left << s << param.sched_priority << endl;
96 return;
97 }
98
99 class BinderWorkerService : public BBinder {
100 public:
BinderWorkerService()101 BinderWorkerService() {
102 }
~BinderWorkerService()103 ~BinderWorkerService() {
104 }
onTransact(uint32_t code,const Parcel & data,Parcel * reply,uint32_t flags=0)105 virtual status_t onTransact(uint32_t code, const Parcel& data, Parcel* reply,
106 uint32_t flags = 0) {
107 (void)flags;
108 (void)data;
109 (void)reply;
110 switch (code) {
111 // The transaction format is like
112 //
113 // data[in]: int32: caller priority
114 // int32: caller cpu
115 //
116 // reply[out]: int32: 1 if caller's priority != callee's priority
117 // int32: 1 if caller's cpu != callee's cpu
118 //
119 // note the caller cpu read here is not always correct
120 // there're still chances that the caller got switched out
121 // right after it read the cpu number and still before the transaction.
122 case BINDER_NOP: {
123 thread_dump("binder");
124 int priority = thread_pri();
125 int priority_caller = data.readInt32();
126 int h = 0, s = 0;
127 if (priority_caller != priority) {
128 h++;
129 if (verbose) {
130 cout << "err priority_caller:" << priority_caller
131 << ", priority:" << priority << endl;
132 }
133 }
134 if (priority == sched_get_priority_max(SCHED_FIFO)) {
135 int cpu = sched_getcpu();
136 int cpu_caller = data.readInt32();
137 if (cpu != cpu_caller) {
138 s++;
139 }
140 }
141 reply->writeInt32(h);
142 reply->writeInt32(s);
143 return NO_ERROR;
144 }
145 default:
146 return UNKNOWN_TRANSACTION;
147 };
148 }
149 };
150
151 class Pipe {
152 int m_readFd;
153 int m_writeFd;
Pipe(int readFd,int writeFd)154 Pipe(int readFd, int writeFd) : m_readFd{readFd}, m_writeFd{writeFd} {
155 }
156 Pipe(const Pipe&) = delete;
157 Pipe& operator=(const Pipe&) = delete;
158 Pipe& operator=(const Pipe&&) = delete;
159
160 public:
Pipe(Pipe && rval)161 Pipe(Pipe&& rval) noexcept {
162 m_readFd = rval.m_readFd;
163 m_writeFd = rval.m_writeFd;
164 rval.m_readFd = 0;
165 rval.m_writeFd = 0;
166 }
~Pipe()167 ~Pipe() {
168 if (m_readFd) close(m_readFd);
169 if (m_writeFd) close(m_writeFd);
170 }
signal()171 void signal() {
172 bool val = true;
173 int error = write(m_writeFd, &val, sizeof(val));
174 ASSERT(error >= 0);
175 };
wait()176 void wait() {
177 bool val = false;
178 int error = read(m_readFd, &val, sizeof(val));
179 ASSERT(error >= 0);
180 }
181 template <typename T>
send(const T & v)182 void send(const T& v) {
183 int error = write(m_writeFd, &v, sizeof(T));
184 ASSERT(error >= 0);
185 }
186 template <typename T>
recv(T & v)187 void recv(T& v) {
188 int error = read(m_readFd, &v, sizeof(T));
189 ASSERT(error >= 0);
190 }
createPipePair()191 static tuple<Pipe, Pipe> createPipePair() {
192 int a[2];
193 int b[2];
194
195 int error1 = pipe(a);
196 int error2 = pipe(b);
197 ASSERT(error1 >= 0);
198 ASSERT(error2 >= 0);
199
200 return make_tuple(Pipe(a[0], b[1]), Pipe(b[0], a[1]));
201 }
202 };
203
204 typedef chrono::time_point<chrono::high_resolution_clock> Tick;
205
tickNow()206 static inline Tick tickNow() {
207 return chrono::high_resolution_clock::now();
208 }
209
tickNano(Tick & sta,Tick & end)210 static inline uint64_t tickNano(Tick& sta, Tick& end) {
211 return uint64_t(chrono::duration_cast<chrono::nanoseconds>(end - sta).count());
212 }
213
214 struct Results {
215 uint64_t m_best = 0xffffffffffffffffULL;
216 uint64_t m_worst = 0;
217 uint64_t m_transactions = 0;
218 uint64_t m_total_time = 0;
219 uint64_t m_miss = 0;
220 bool tracing;
ResultsResults221 explicit Results(bool _tracing) : tracing(_tracing) {
222 }
miss_deadlineResults223 inline bool miss_deadline(uint64_t nano) {
224 return nano > deadline_us * 1000;
225 }
add_timeResults226 void add_time(uint64_t nano) {
227 m_best = min(nano, m_best);
228 m_worst = max(nano, m_worst);
229 m_transactions += 1;
230 m_total_time += nano;
231 if (miss_deadline(nano)) m_miss++;
232 if (miss_deadline(nano) && tracing) {
233 // There might be multiple process pair running the test concurrently
234 // each may execute following statements and only the first one actually
235 // stop the trace and any traceStop() afterthen has no effect.
236 traceStop();
237 cout << endl;
238 cout << "deadline triggered: halt & stop trace" << endl;
239 cout << "log:" + trace_path + "/trace" << endl;
240 cout << endl;
241 exit(1);
242 }
243 }
dumpResults244 void dump() {
245 double best = (double)m_best / 1.0E6;
246 double worst = (double)m_worst / 1.0E6;
247 double average = (double)m_total_time / m_transactions / 1.0E6;
248 // FIXME: libjson?
249 int W = DUMP_PRESICION + 2;
250 cout << setprecision(DUMP_PRESICION) << "{ \"avg\":" << setw(W) << left
251 << average << ",\"wst\":" << setw(W) << left << worst
252 << ",\"bst\":" << setw(W) << left << best << ",\"miss\":" << left
253 << m_miss << ",\"meetR\":" << left << setprecision(DUMP_PRESICION + 3)
254 << (1.0 - (double)m_miss / m_transactions) << "}";
255 }
256 };
257
generateServiceName(int num)258 String16 generateServiceName(int num) {
259 char num_str[32];
260 snprintf(num_str, sizeof(num_str), "%d", num);
261 String16 serviceName = String16("binderWorker") + String16(num_str);
262 return serviceName;
263 }
264
parcel_fill(Parcel & data,int sz,int priority,int cpu)265 static void parcel_fill(Parcel& data, int sz, int priority, int cpu) {
266 ASSERT(sz >= (int)sizeof(uint32_t) * 2);
267 data.writeInt32(priority);
268 data.writeInt32(cpu);
269 sz -= sizeof(uint32_t);
270 while (sz > (int)sizeof(uint32_t)) {
271 data.writeInt32(0);
272 sz -= sizeof(uint32_t);
273 }
274 }
275
276 typedef struct {
277 void* result;
278 int target;
279 } thread_priv_t;
280
thread_start(void * p)281 static void* thread_start(void* p) {
282 thread_priv_t* priv = (thread_priv_t*)p;
283 int target = priv->target;
284 Results* results_fifo = (Results*)priv->result;
285 Parcel data, reply;
286 Tick sta, end;
287
288 parcel_fill(data, payload_size, thread_pri(), sched_getcpu());
289 thread_dump("fifo-caller");
290
291 sta = tickNow();
292 status_t ret = workers[target]->transact(BINDER_NOP, data, &reply);
293 ASSERT(ret == NO_ERROR);
294 end = tickNow();
295 results_fifo->add_time(tickNano(sta, end));
296
297 no_inherent += reply.readInt32();
298 no_sync += reply.readInt32();
299 return nullptr;
300 }
301
302 // create a fifo thread to transact and wait it to finished
thread_transaction(int target,Results * results_fifo)303 static void thread_transaction(int target, Results* results_fifo) {
304 thread_priv_t thread_priv;
305 void* dummy;
306 pthread_t thread;
307 pthread_attr_t attr;
308 struct sched_param param;
309 thread_priv.target = target;
310 thread_priv.result = results_fifo;
311 ASSERT(!pthread_attr_init(&attr));
312 ASSERT(!pthread_attr_setschedpolicy(&attr, SCHED_FIFO));
313 param.sched_priority = sched_get_priority_max(SCHED_FIFO);
314 ASSERT(!pthread_attr_setschedparam(&attr, ¶m));
315 ASSERT(!pthread_create(&thread, &attr, &thread_start, &thread_priv));
316 ASSERT(!pthread_join(thread, &dummy));
317 }
318
319 #define is_client(_num) ((_num) >= (no_process / 2))
320
worker_fx(int num,int no_process,int iterations,int payload_size,Pipe p)321 void worker_fx(int num, int no_process, int iterations, int payload_size,
322 Pipe p) {
323 int dummy;
324 Results results_other(false), results_fifo(trace);
325
326 // Create BinderWorkerService and for go.
327 ProcessState::self()->startThreadPool();
328 sp<IServiceManager> serviceMgr = defaultServiceManager();
329 sp<BinderWorkerService> service = new BinderWorkerService;
330 serviceMgr->addService(generateServiceName(num), service);
331 // init done
332 p.signal();
333 // wait for kick-off
334 p.wait();
335
336 // If client/server pairs, then half the workers are
337 // servers and half are clients
338 int server_count = no_process / 2;
339
340 for (int i = 0; i < server_count; i++) {
341 // self service is in-process so just skip
342 if (num == i) continue;
343 workers.push_back(serviceMgr->getService(generateServiceName(i)));
344 }
345
346 // Client for each pair iterates here
347 // each iterations contains exatcly 2 transactions
348 for (int i = 0; is_client(num) && i < iterations; i++) {
349 Parcel data, reply;
350 Tick sta, end;
351 // the target is paired to make it easier to diagnose
352 int target = num % server_count;
353
354 // 1. transaction by fifo thread
355 thread_transaction(target, &results_fifo);
356 parcel_fill(data, payload_size, thread_pri(), sched_getcpu());
357 thread_dump("other-caller");
358
359 // 2. transaction by other thread
360 sta = tickNow();
361 ASSERT(NO_ERROR == workers[target]->transact(BINDER_NOP, data, &reply));
362 end = tickNow();
363 results_other.add_time(tickNano(sta, end));
364
365 no_inherent += reply.readInt32();
366 no_sync += reply.readInt32();
367 }
368 // Signal completion to master and wait.
369 p.signal();
370 p.wait();
371
372 p.send(&dummy);
373 // wait for kill
374 p.wait();
375 // Client for each pair dump here
376 if (is_client(num)) {
377 int no_trans = iterations * 2;
378 double sync_ratio = (1.0 - (double)no_sync / no_trans);
379 // FIXME: libjson?
380 cout << "\"P" << (num - server_count) << "\":{\"SYNC\":\""
381 << ((sync_ratio > GOOD_SYNC_MIN) ? "GOOD" : "POOR") << "\","
382 << "\"S\":" << (no_trans - no_sync) << ",\"I\":" << no_trans << ","
383 << "\"R\":" << sync_ratio << "," << endl;
384
385 cout << " \"other_ms\":";
386 results_other.dump();
387 cout << "," << endl;
388 cout << " \"fifo_ms\": ";
389 results_fifo.dump();
390 cout << endl;
391 cout << "}," << endl;
392 }
393 exit(no_inherent);
394 }
395
make_process(int num,int iterations,int no_process,int payload_size)396 Pipe make_process(int num, int iterations, int no_process, int payload_size) {
397 auto pipe_pair = Pipe::createPipePair();
398 pid_t pid = fork();
399 if (pid) {
400 // parent
401 return move(get<0>(pipe_pair));
402 } else {
403 // child
404 thread_dump(is_client(num) ? "client" : "server");
405 worker_fx(num, no_process, iterations, payload_size,
406 move(get<1>(pipe_pair)));
407 // never get here
408 return move(get<0>(pipe_pair));
409 }
410 }
411
wait_all(vector<Pipe> & v)412 void wait_all(vector<Pipe>& v) {
413 for (size_t i = 0; i < v.size(); i++) {
414 v[i].wait();
415 }
416 }
417
signal_all(vector<Pipe> & v)418 void signal_all(vector<Pipe>& v) {
419 for (size_t i = 0; i < v.size(); i++) {
420 v[i].signal();
421 }
422 }
423
424 // This test is modified from binderThroughputTest.cpp
main(int argc,char ** argv)425 int main(int argc, char** argv) {
426 for (int i = 1; i < argc; i++) {
427 if (string(argv[i]) == "-i") {
428 iterations = atoi(argv[i + 1]);
429 i++;
430 continue;
431 }
432 if (string(argv[i]) == "-pair") {
433 no_process = 2 * atoi(argv[i + 1]);
434 i++;
435 continue;
436 }
437 if (string(argv[i]) == "-deadline_us") {
438 deadline_us = atoi(argv[i + 1]);
439 i++;
440 continue;
441 }
442 if (string(argv[i]) == "-v") {
443 verbose = 1;
444 }
445 // The -trace argument is used like that:
446 //
447 // First start trace with atrace command as usual
448 // >atrace --async_start sched freq
449 //
450 // then use schd-dbg with -trace arguments
451 //./schd-dbg -trace -deadline_us 2500
452 //
453 // This makes schd-dbg to stop trace once it detects a transaction
454 // duration over the deadline. By writing '0' to
455 // /sys/kernel/debug/tracing and halt the process. The tracelog is
456 // then available on /sys/kernel/debug/trace
457 if (string(argv[i]) == "-trace") {
458 trace = 1;
459 }
460 }
461 if (trace && !traceIsOn()) {
462 cout << "trace is not running" << endl;
463 cout << "check " << trace_path + "/tracing_on" << endl;
464 cout << "use atrace --async_start first" << endl;
465 exit(-1);
466 }
467 vector<Pipe> pipes;
468 thread_dump("main");
469 // FIXME: libjson?
470 cout << "{" << endl;
471 cout << "\"cfg\":{\"pair\":" << (no_process / 2)
472 << ",\"iterations\":" << iterations << ",\"deadline_us\":" << deadline_us
473 << "}," << endl;
474
475 // the main process fork 2 processes for each pairs
476 // 1 server + 1 client
477 // each has a pipe to communicate with
478 for (int i = 0; i < no_process; i++) {
479 pipes.push_back(make_process(i, iterations, no_process, payload_size));
480 }
481 // wait for init done
482 wait_all(pipes);
483 // kick-off iterations
484 signal_all(pipes);
485 // wait for completion
486 wait_all(pipes);
487 // start to send result
488 signal_all(pipes);
489 for (int i = 0; i < no_process; i++) {
490 int status;
491 // kill
492 pipes[i].signal();
493 wait(&status);
494 // the exit status is number of transactions without priority inheritance
495 // detected in the child process
496 no_inherent += status;
497 }
498 // FIXME: libjson?
499 cout << "\"inheritance\": " << (no_inherent == 0 ? "\"PASS\"" : "\"FAIL\"")
500 << endl;
501 cout << "}" << endl;
502 return -no_inherent;
503 }
504