1 #include <binder/Binder.h>
2 #include <binder/IBinder.h>
3 #include <binder/IPCThreadState.h>
4 #include <binder/IServiceManager.h>
5 #include <string>
6 #include <cstring>
7 #include <cstdlib>
8 #include <cstdio>
9 
10 #include <iostream>
11 #include <vector>
12 #include <tuple>
13 
14 #include <unistd.h>
15 #include <sys/wait.h>
16 
17 using namespace std;
18 using namespace android;
19 
20 enum BinderWorkerServiceCode {
21     BINDER_NOP = IBinder::FIRST_CALL_TRANSACTION,
22 };
23 
24 #define ASSERT_TRUE(cond) \
25 do { \
26     if (!(cond)) {\
27        cerr << __func__ << ":" << __LINE__ << " condition:" << #cond << " failed\n" << endl; \
28        exit(EXIT_FAILURE); \
29     } \
30 } while (0)
31 
32 class BinderWorkerService : public BBinder
33 {
34 public:
BinderWorkerService()35     BinderWorkerService() {}
~BinderWorkerService()36     ~BinderWorkerService() {}
onTransact(uint32_t code,const Parcel & data,Parcel * reply,uint32_t flags=0)37     virtual status_t onTransact(uint32_t code,
38                                 const Parcel& data, Parcel* reply,
39                                 uint32_t flags = 0) {
40         (void)flags;
41         (void)data;
42         (void)reply;
43         switch (code) {
44         case BINDER_NOP:
45             return NO_ERROR;
46         default:
47             return UNKNOWN_TRANSACTION;
48         };
49     }
50 };
51 
52 class Pipe {
53     int m_readFd;
54     int m_writeFd;
Pipe(int readFd,int writeFd)55     Pipe(int readFd, int writeFd) : m_readFd{readFd}, m_writeFd{writeFd} {}
56     Pipe(const Pipe &) = delete;
57     Pipe& operator=(const Pipe &) = delete;
58     Pipe& operator=(const Pipe &&) = delete;
59 public:
Pipe(Pipe && rval)60     Pipe(Pipe&& rval) noexcept {
61         m_readFd = rval.m_readFd;
62         m_writeFd = rval.m_writeFd;
63         rval.m_readFd = 0;
64         rval.m_writeFd = 0;
65     }
~Pipe()66     ~Pipe() {
67         if (m_readFd)
68             close(m_readFd);
69         if (m_writeFd)
70             close(m_writeFd);
71     }
signal()72     void signal() {
73         bool val = true;
74         int error = write(m_writeFd, &val, sizeof(val));
75         ASSERT_TRUE(error >= 0);
76     };
wait()77     void wait() {
78         bool val = false;
79         int error = read(m_readFd, &val, sizeof(val));
80         ASSERT_TRUE(error >= 0);
81     }
send(const T & v)82     template <typename T> void send(const T& v) {
83         int error = write(m_writeFd, &v, sizeof(T));
84         ASSERT_TRUE(error >= 0);
85     }
recv(T & v)86     template <typename T> void recv(T& v) {
87         int error = read(m_readFd, &v, sizeof(T));
88         ASSERT_TRUE(error >= 0);
89     }
createPipePair()90     static tuple<Pipe, Pipe> createPipePair() {
91         int a[2];
92         int b[2];
93 
94         int error1 = pipe(a);
95         int error2 = pipe(b);
96         ASSERT_TRUE(error1 >= 0);
97         ASSERT_TRUE(error2 >= 0);
98 
99         return make_tuple(Pipe(a[0], b[1]), Pipe(b[0], a[1]));
100     }
101 };
102 
103 static const uint32_t num_buckets = 128;
104 static const uint64_t max_time_bucket = 50ull * 1000000;
105 static const uint64_t time_per_bucket = max_time_bucket / num_buckets;
106 static constexpr float time_per_bucket_ms = time_per_bucket / 1.0E6;
107 
108 struct ProcResults {
109     uint64_t m_best = max_time_bucket;
110     uint64_t m_worst = 0;
111     uint32_t m_buckets[num_buckets] = {0};
112     uint64_t m_transactions = 0;
113     uint64_t m_total_time = 0;
114 
add_timeProcResults115     void add_time(uint64_t time) {
116         m_buckets[min(time, max_time_bucket-1) / time_per_bucket] += 1;
117         m_best = min(time, m_best);
118         m_worst = max(time, m_worst);
119         m_transactions += 1;
120         m_total_time += time;
121     }
combineProcResults122     static ProcResults combine(const ProcResults& a, const ProcResults& b) {
123         ProcResults ret;
124         for (int i = 0; i < num_buckets; i++) {
125             ret.m_buckets[i] = a.m_buckets[i] + b.m_buckets[i];
126         }
127         ret.m_worst = max(a.m_worst, b.m_worst);
128         ret.m_best = min(a.m_best, b.m_best);
129         ret.m_transactions = a.m_transactions + b.m_transactions;
130         ret.m_total_time = a.m_total_time + b.m_total_time;
131         return ret;
132     }
dumpProcResults133     void dump() {
134         double best = (double)m_best / 1.0E6;
135         double worst = (double)m_worst / 1.0E6;
136         double average = (double)m_total_time / m_transactions / 1.0E6;
137         cout << "average:" << average << "ms worst:" << worst << "ms best:" << best << "ms" << endl;
138 
139         uint64_t cur_total = 0;
140         for (int i = 0; i < num_buckets; i++) {
141             float cur_time = time_per_bucket_ms * i + 0.5f * time_per_bucket_ms;
142             if ((cur_total < 0.5f * m_transactions) && (cur_total + m_buckets[i] >= 0.5f * m_transactions)) {
143                 cout << "50%: " << cur_time << " ";
144             }
145             if ((cur_total < 0.9f * m_transactions) && (cur_total + m_buckets[i] >= 0.9f * m_transactions)) {
146                 cout << "90%: " << cur_time << " ";
147             }
148             if ((cur_total < 0.95f * m_transactions) && (cur_total + m_buckets[i] >= 0.95f * m_transactions)) {
149                 cout << "95%: " << cur_time << " ";
150             }
151             if ((cur_total < 0.99f * m_transactions) && (cur_total + m_buckets[i] >= 0.99f * m_transactions)) {
152                 cout << "99%: " << cur_time << " ";
153             }
154             cur_total += m_buckets[i];
155         }
156         cout << endl;
157 
158     }
159 };
160 
generateServiceName(int num)161 String16 generateServiceName(int num)
162 {
163     char num_str[32];
164     snprintf(num_str, sizeof(num_str), "%d", num);
165     String16 serviceName = String16("binderWorker") + String16(num_str);
166     return serviceName;
167 }
168 
worker_fx(int num,int worker_count,int iterations,int payload_size,bool cs_pair,Pipe p)169 void worker_fx(
170     int num,
171     int worker_count,
172     int iterations,
173     int payload_size,
174     bool cs_pair,
175     Pipe p)
176 {
177     // Create BinderWorkerService and for go.
178     ProcessState::self()->startThreadPool();
179     sp<IServiceManager> serviceMgr = defaultServiceManager();
180     sp<BinderWorkerService> service = new BinderWorkerService;
181     serviceMgr->addService(generateServiceName(num), service);
182 
183     srand(num);
184     p.signal();
185     p.wait();
186 
187     // If client/server pairs, then half the workers are
188     // servers and half are clients
189     int server_count = cs_pair ? worker_count / 2 : worker_count;
190 
191     // Get references to other binder services.
192     cout << "Created BinderWorker" << num << endl;
193     (void)worker_count;
194     vector<sp<IBinder> > workers;
195     for (int i = 0; i < server_count; i++) {
196         if (num == i)
197             continue;
198         workers.push_back(serviceMgr->getService(generateServiceName(i)));
199     }
200 
201     // Run the benchmark if client
202     ProcResults results;
203     chrono::time_point<chrono::high_resolution_clock> start, end;
204     for (int i = 0; (!cs_pair || num >= server_count) && i < iterations; i++) {
205         Parcel data, reply;
206         int target = cs_pair ? num % server_count : rand() % workers.size();
207 	int sz = payload_size;
208 
209 	while (sz > sizeof(uint32_t)) {
210 		data.writeInt32(0);
211 		sz -= sizeof(uint32_t);
212 	}
213         start = chrono::high_resolution_clock::now();
214         status_t ret = workers[target]->transact(BINDER_NOP, data, &reply);
215         end = chrono::high_resolution_clock::now();
216 
217         uint64_t cur_time = uint64_t(chrono::duration_cast<chrono::nanoseconds>(end - start).count());
218         results.add_time(cur_time);
219 
220         if (ret != NO_ERROR) {
221            cout << "thread " << num << " failed " << ret << "i : " << i << endl;
222            exit(EXIT_FAILURE);
223         }
224     }
225 
226     // Signal completion to master and wait.
227     p.signal();
228     p.wait();
229 
230     // Send results to master and wait for go to exit.
231     p.send(results);
232     p.wait();
233 
234     exit(EXIT_SUCCESS);
235 }
236 
make_worker(int num,int iterations,int worker_count,int payload_size,bool cs_pair)237 Pipe make_worker(int num, int iterations, int worker_count, int payload_size, bool cs_pair)
238 {
239     auto pipe_pair = Pipe::createPipePair();
240     pid_t pid = fork();
241     if (pid) {
242         /* parent */
243         return move(get<0>(pipe_pair));
244     } else {
245         /* child */
246         worker_fx(num, worker_count, iterations, payload_size, cs_pair, move(get<1>(pipe_pair)));
247         /* never get here */
248         return move(get<0>(pipe_pair));
249     }
250 
251 }
252 
wait_all(vector<Pipe> & v)253 void wait_all(vector<Pipe>& v)
254 {
255     for (int i = 0; i < v.size(); i++) {
256         v[i].wait();
257     }
258 }
259 
signal_all(vector<Pipe> & v)260 void signal_all(vector<Pipe>& v)
261 {
262     for (int i = 0; i < v.size(); i++) {
263         v[i].signal();
264     }
265 }
266 
main(int argc,char * argv[])267 int main(int argc, char *argv[])
268 {
269     int workers = 2;
270     int iterations = 10000;
271     int payload_size = 0;
272     bool cs_pair = false;
273     (void)argc;
274     (void)argv;
275     vector<Pipe> pipes;
276 
277     // Parse arguments.
278     for (int i = 1; i < argc; i++) {
279         if (string(argv[i]) == "-w") {
280             workers = atoi(argv[i+1]);
281             i++;
282             continue;
283         }
284         if (string(argv[i]) == "-i") {
285             iterations = atoi(argv[i+1]);
286             i++;
287             continue;
288         }
289         if (string(argv[i]) == "-s") {
290             payload_size = atoi(argv[i+1]);
291 	    i++;
292 	}
293         if (string(argv[i]) == "-p") {
294 		// client/server pairs instead of spreading
295 		// requests to all workers. If true, half
296 		// the workers become clients and half servers
297 		cs_pair = true;
298 	}
299     }
300 
301     // Create all the workers and wait for them to spawn.
302     for (int i = 0; i < workers; i++) {
303         pipes.push_back(make_worker(i, iterations, workers, payload_size, cs_pair));
304     }
305     wait_all(pipes);
306 
307 
308     // Run the workers and wait for completion.
309     chrono::time_point<chrono::high_resolution_clock> start, end;
310     cout << "waiting for workers to complete" << endl;
311     start = chrono::high_resolution_clock::now();
312     signal_all(pipes);
313     wait_all(pipes);
314     end = chrono::high_resolution_clock::now();
315 
316     // Calculate overall throughput.
317     double iterations_per_sec = double(iterations * workers) / (chrono::duration_cast<chrono::nanoseconds>(end - start).count() / 1.0E9);
318     cout << "iterations per sec: " << iterations_per_sec << endl;
319 
320     // Collect all results from the workers.
321     cout << "collecting results" << endl;
322     signal_all(pipes);
323     ProcResults tot_results;
324     for (int i = 0; i < workers; i++) {
325         ProcResults tmp_results;
326         pipes[i].recv(tmp_results);
327         tot_results = ProcResults::combine(tot_results, tmp_results);
328     }
329     tot_results.dump();
330 
331     // Kill all the workers.
332     cout << "killing workers" << endl;
333     signal_all(pipes);
334     for (int i = 0; i < workers; i++) {
335         int status;
336         wait(&status);
337         if (status != 0) {
338             cout << "nonzero child status" << status << endl;
339         }
340     }
341     return 0;
342 }
343