1 /* Copyright 2008 The Android Open Source Project
2 */
3
4 #include <inttypes.h>
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <errno.h>
8 #include <unistd.h>
9 #include <fcntl.h>
10 #include <sys/mman.h>
11
12 #include "binder.h"
13
14 #define MAX_BIO_SIZE (1 << 30)
15
16 #define TRACE 0
17
18 #define LOG_TAG "Binder"
19 #include <cutils/log.h>
20
21 void bio_init_from_txn(struct binder_io *io, struct binder_transaction_data *txn);
22
23 #if TRACE
hexdump(void * _data,size_t len)24 void hexdump(void *_data, size_t len)
25 {
26 unsigned char *data = _data;
27 size_t count;
28
29 for (count = 0; count < len; count++) {
30 if ((count & 15) == 0)
31 fprintf(stderr,"%04zu:", count);
32 fprintf(stderr," %02x %c", *data,
33 (*data < 32) || (*data > 126) ? '.' : *data);
34 data++;
35 if ((count & 15) == 15)
36 fprintf(stderr,"\n");
37 }
38 if ((count & 15) != 0)
39 fprintf(stderr,"\n");
40 }
41
binder_dump_txn(struct binder_transaction_data * txn)42 void binder_dump_txn(struct binder_transaction_data *txn)
43 {
44 struct flat_binder_object *obj;
45 binder_size_t *offs = (binder_size_t *)(uintptr_t)txn->data.ptr.offsets;
46 size_t count = txn->offsets_size / sizeof(binder_size_t);
47
48 fprintf(stderr," target %016"PRIx64" cookie %016"PRIx64" code %08x flags %08x\n",
49 (uint64_t)txn->target.ptr, (uint64_t)txn->cookie, txn->code, txn->flags);
50 fprintf(stderr," pid %8d uid %8d data %"PRIu64" offs %"PRIu64"\n",
51 txn->sender_pid, txn->sender_euid, (uint64_t)txn->data_size, (uint64_t)txn->offsets_size);
52 hexdump((void *)(uintptr_t)txn->data.ptr.buffer, txn->data_size);
53 while (count--) {
54 obj = (struct flat_binder_object *) (((char*)(uintptr_t)txn->data.ptr.buffer) + *offs++);
55 fprintf(stderr," - type %08x flags %08x ptr %016"PRIx64" cookie %016"PRIx64"\n",
56 obj->type, obj->flags, (uint64_t)obj->binder, (uint64_t)obj->cookie);
57 }
58 }
59
60 #define NAME(n) case n: return #n
cmd_name(uint32_t cmd)61 const char *cmd_name(uint32_t cmd)
62 {
63 switch(cmd) {
64 NAME(BR_NOOP);
65 NAME(BR_TRANSACTION_COMPLETE);
66 NAME(BR_INCREFS);
67 NAME(BR_ACQUIRE);
68 NAME(BR_RELEASE);
69 NAME(BR_DECREFS);
70 NAME(BR_TRANSACTION);
71 NAME(BR_REPLY);
72 NAME(BR_FAILED_REPLY);
73 NAME(BR_DEAD_REPLY);
74 NAME(BR_DEAD_BINDER);
75 default: return "???";
76 }
77 }
78 #else
79 #define hexdump(a,b) do{} while (0)
80 #define binder_dump_txn(txn) do{} while (0)
81 #endif
82
83 #define BIO_F_SHARED 0x01 /* needs to be buffer freed */
84 #define BIO_F_OVERFLOW 0x02 /* ran out of space */
85 #define BIO_F_IOERROR 0x04
86 #define BIO_F_MALLOCED 0x08 /* needs to be free()'d */
87
88 struct binder_state
89 {
90 int fd;
91 void *mapped;
92 size_t mapsize;
93 };
94
binder_open(size_t mapsize)95 struct binder_state *binder_open(size_t mapsize)
96 {
97 struct binder_state *bs;
98 struct binder_version vers;
99
100 bs = malloc(sizeof(*bs));
101 if (!bs) {
102 errno = ENOMEM;
103 return NULL;
104 }
105
106 bs->fd = open("/dev/binder", O_RDWR);
107 if (bs->fd < 0) {
108 fprintf(stderr,"binder: cannot open device (%s)\n",
109 strerror(errno));
110 goto fail_open;
111 }
112
113 if ((ioctl(bs->fd, BINDER_VERSION, &vers) == -1) ||
114 (vers.protocol_version != BINDER_CURRENT_PROTOCOL_VERSION)) {
115 fprintf(stderr, "binder: driver version differs from user space\n");
116 goto fail_open;
117 }
118
119 bs->mapsize = mapsize;
120 bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0);
121 if (bs->mapped == MAP_FAILED) {
122 fprintf(stderr,"binder: cannot map device (%s)\n",
123 strerror(errno));
124 goto fail_map;
125 }
126
127 return bs;
128
129 fail_map:
130 close(bs->fd);
131 fail_open:
132 free(bs);
133 return NULL;
134 }
135
binder_close(struct binder_state * bs)136 void binder_close(struct binder_state *bs)
137 {
138 munmap(bs->mapped, bs->mapsize);
139 close(bs->fd);
140 free(bs);
141 }
142
binder_become_context_manager(struct binder_state * bs)143 int binder_become_context_manager(struct binder_state *bs)
144 {
145 return ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0);
146 }
147
binder_write(struct binder_state * bs,void * data,size_t len)148 int binder_write(struct binder_state *bs, void *data, size_t len)
149 {
150 struct binder_write_read bwr;
151 int res;
152
153 bwr.write_size = len;
154 bwr.write_consumed = 0;
155 bwr.write_buffer = (uintptr_t) data;
156 bwr.read_size = 0;
157 bwr.read_consumed = 0;
158 bwr.read_buffer = 0;
159 res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
160 if (res < 0) {
161 fprintf(stderr,"binder_write: ioctl failed (%s)\n",
162 strerror(errno));
163 }
164 return res;
165 }
166
binder_send_reply(struct binder_state * bs,struct binder_io * reply,binder_uintptr_t buffer_to_free,int status)167 void binder_send_reply(struct binder_state *bs,
168 struct binder_io *reply,
169 binder_uintptr_t buffer_to_free,
170 int status)
171 {
172 struct {
173 uint32_t cmd_free;
174 binder_uintptr_t buffer;
175 uint32_t cmd_reply;
176 struct binder_transaction_data txn;
177 } __attribute__((packed)) data;
178
179 data.cmd_free = BC_FREE_BUFFER;
180 data.buffer = buffer_to_free;
181 data.cmd_reply = BC_REPLY;
182 data.txn.target.ptr = 0;
183 data.txn.cookie = 0;
184 data.txn.code = 0;
185 if (status) {
186 data.txn.flags = TF_STATUS_CODE;
187 data.txn.data_size = sizeof(int);
188 data.txn.offsets_size = 0;
189 data.txn.data.ptr.buffer = (uintptr_t)&status;
190 data.txn.data.ptr.offsets = 0;
191 } else {
192 data.txn.flags = 0;
193 data.txn.data_size = reply->data - reply->data0;
194 data.txn.offsets_size = ((char*) reply->offs) - ((char*) reply->offs0);
195 data.txn.data.ptr.buffer = (uintptr_t)reply->data0;
196 data.txn.data.ptr.offsets = (uintptr_t)reply->offs0;
197 }
198 binder_write(bs, &data, sizeof(data));
199 }
200
binder_parse(struct binder_state * bs,struct binder_io * bio,uintptr_t ptr,size_t size,binder_handler func)201 int binder_parse(struct binder_state *bs, struct binder_io *bio,
202 uintptr_t ptr, size_t size, binder_handler func)
203 {
204 int r = 1;
205 uintptr_t end = ptr + (uintptr_t) size;
206
207 while (ptr < end) {
208 uint32_t cmd = *(uint32_t *) ptr;
209 ptr += sizeof(uint32_t);
210 #if TRACE
211 fprintf(stderr,"%s:\n", cmd_name(cmd));
212 #endif
213 switch(cmd) {
214 case BR_NOOP:
215 break;
216 case BR_TRANSACTION_COMPLETE:
217 break;
218 case BR_INCREFS:
219 case BR_ACQUIRE:
220 case BR_RELEASE:
221 case BR_DECREFS:
222 #if TRACE
223 fprintf(stderr," %p, %p\n", (void *)ptr, (void *)(ptr + sizeof(void *)));
224 #endif
225 ptr += sizeof(struct binder_ptr_cookie);
226 break;
227 case BR_TRANSACTION: {
228 struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
229 if ((end - ptr) < sizeof(*txn)) {
230 ALOGE("parse: txn too small!\n");
231 return -1;
232 }
233 binder_dump_txn(txn);
234 if (func) {
235 unsigned rdata[256/4];
236 struct binder_io msg;
237 struct binder_io reply;
238 int res;
239
240 bio_init(&reply, rdata, sizeof(rdata), 4);
241 bio_init_from_txn(&msg, txn);
242 res = func(bs, txn, &msg, &reply);
243 binder_send_reply(bs, &reply, txn->data.ptr.buffer, res);
244 }
245 ptr += sizeof(*txn);
246 break;
247 }
248 case BR_REPLY: {
249 struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
250 if ((end - ptr) < sizeof(*txn)) {
251 ALOGE("parse: reply too small!\n");
252 return -1;
253 }
254 binder_dump_txn(txn);
255 if (bio) {
256 bio_init_from_txn(bio, txn);
257 bio = 0;
258 } else {
259 /* todo FREE BUFFER */
260 }
261 ptr += sizeof(*txn);
262 r = 0;
263 break;
264 }
265 case BR_DEAD_BINDER: {
266 struct binder_death *death = (struct binder_death *)(uintptr_t) *(binder_uintptr_t *)ptr;
267 ptr += sizeof(binder_uintptr_t);
268 death->func(bs, death->ptr);
269 break;
270 }
271 case BR_FAILED_REPLY:
272 r = -1;
273 break;
274 case BR_DEAD_REPLY:
275 r = -1;
276 break;
277 default:
278 ALOGE("parse: OOPS %d\n", cmd);
279 return -1;
280 }
281 }
282
283 return r;
284 }
285
binder_acquire(struct binder_state * bs,uint32_t target)286 void binder_acquire(struct binder_state *bs, uint32_t target)
287 {
288 uint32_t cmd[2];
289 cmd[0] = BC_ACQUIRE;
290 cmd[1] = target;
291 binder_write(bs, cmd, sizeof(cmd));
292 }
293
binder_release(struct binder_state * bs,uint32_t target)294 void binder_release(struct binder_state *bs, uint32_t target)
295 {
296 uint32_t cmd[2];
297 cmd[0] = BC_RELEASE;
298 cmd[1] = target;
299 binder_write(bs, cmd, sizeof(cmd));
300 }
301
binder_link_to_death(struct binder_state * bs,uint32_t target,struct binder_death * death)302 void binder_link_to_death(struct binder_state *bs, uint32_t target, struct binder_death *death)
303 {
304 struct {
305 uint32_t cmd;
306 struct binder_handle_cookie payload;
307 } __attribute__((packed)) data;
308
309 data.cmd = BC_REQUEST_DEATH_NOTIFICATION;
310 data.payload.handle = target;
311 data.payload.cookie = (uintptr_t) death;
312 binder_write(bs, &data, sizeof(data));
313 }
314
binder_call(struct binder_state * bs,struct binder_io * msg,struct binder_io * reply,uint32_t target,uint32_t code)315 int binder_call(struct binder_state *bs,
316 struct binder_io *msg, struct binder_io *reply,
317 uint32_t target, uint32_t code)
318 {
319 int res;
320 struct binder_write_read bwr;
321 struct {
322 uint32_t cmd;
323 struct binder_transaction_data txn;
324 } __attribute__((packed)) writebuf;
325 unsigned readbuf[32];
326
327 if (msg->flags & BIO_F_OVERFLOW) {
328 fprintf(stderr,"binder: txn buffer overflow\n");
329 goto fail;
330 }
331
332 writebuf.cmd = BC_TRANSACTION;
333 writebuf.txn.target.handle = target;
334 writebuf.txn.code = code;
335 writebuf.txn.flags = 0;
336 writebuf.txn.data_size = msg->data - msg->data0;
337 writebuf.txn.offsets_size = ((char*) msg->offs) - ((char*) msg->offs0);
338 writebuf.txn.data.ptr.buffer = (uintptr_t)msg->data0;
339 writebuf.txn.data.ptr.offsets = (uintptr_t)msg->offs0;
340
341 bwr.write_size = sizeof(writebuf);
342 bwr.write_consumed = 0;
343 bwr.write_buffer = (uintptr_t) &writebuf;
344
345 hexdump(msg->data0, msg->data - msg->data0);
346 for (;;) {
347 bwr.read_size = sizeof(readbuf);
348 bwr.read_consumed = 0;
349 bwr.read_buffer = (uintptr_t) readbuf;
350
351 res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
352
353 if (res < 0) {
354 fprintf(stderr,"binder: ioctl failed (%s)\n", strerror(errno));
355 goto fail;
356 }
357
358 res = binder_parse(bs, reply, (uintptr_t) readbuf, bwr.read_consumed, 0);
359 if (res == 0) return 0;
360 if (res < 0) goto fail;
361 }
362
363 fail:
364 memset(reply, 0, sizeof(*reply));
365 reply->flags |= BIO_F_IOERROR;
366 return -1;
367 }
368
binder_loop(struct binder_state * bs,binder_handler func)369 void binder_loop(struct binder_state *bs, binder_handler func)
370 {
371 int res;
372 struct binder_write_read bwr;
373 uint32_t readbuf[32];
374
375 bwr.write_size = 0;
376 bwr.write_consumed = 0;
377 bwr.write_buffer = 0;
378
379 readbuf[0] = BC_ENTER_LOOPER;
380 binder_write(bs, readbuf, sizeof(uint32_t));
381
382 for (;;) {
383 bwr.read_size = sizeof(readbuf);
384 bwr.read_consumed = 0;
385 bwr.read_buffer = (uintptr_t) readbuf;
386
387 res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
388
389 if (res < 0) {
390 ALOGE("binder_loop: ioctl failed (%s)\n", strerror(errno));
391 break;
392 }
393
394 res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);
395 if (res == 0) {
396 ALOGE("binder_loop: unexpected reply?!\n");
397 break;
398 }
399 if (res < 0) {
400 ALOGE("binder_loop: io error %d %s\n", res, strerror(errno));
401 break;
402 }
403 }
404 }
405
bio_init_from_txn(struct binder_io * bio,struct binder_transaction_data * txn)406 void bio_init_from_txn(struct binder_io *bio, struct binder_transaction_data *txn)
407 {
408 bio->data = bio->data0 = (char *)(intptr_t)txn->data.ptr.buffer;
409 bio->offs = bio->offs0 = (binder_size_t *)(intptr_t)txn->data.ptr.offsets;
410 bio->data_avail = txn->data_size;
411 bio->offs_avail = txn->offsets_size / sizeof(size_t);
412 bio->flags = BIO_F_SHARED;
413 }
414
bio_init(struct binder_io * bio,void * data,size_t maxdata,size_t maxoffs)415 void bio_init(struct binder_io *bio, void *data,
416 size_t maxdata, size_t maxoffs)
417 {
418 size_t n = maxoffs * sizeof(size_t);
419
420 if (n > maxdata) {
421 bio->flags = BIO_F_OVERFLOW;
422 bio->data_avail = 0;
423 bio->offs_avail = 0;
424 return;
425 }
426
427 bio->data = bio->data0 = (char *) data + n;
428 bio->offs = bio->offs0 = data;
429 bio->data_avail = maxdata - n;
430 bio->offs_avail = maxoffs;
431 bio->flags = 0;
432 }
433
bio_alloc(struct binder_io * bio,size_t size)434 static void *bio_alloc(struct binder_io *bio, size_t size)
435 {
436 size = (size + 3) & (~3);
437 if (size > bio->data_avail) {
438 bio->flags |= BIO_F_OVERFLOW;
439 return NULL;
440 } else {
441 void *ptr = bio->data;
442 bio->data += size;
443 bio->data_avail -= size;
444 return ptr;
445 }
446 }
447
binder_done(struct binder_state * bs,struct binder_io * msg,struct binder_io * reply)448 void binder_done(struct binder_state *bs,
449 struct binder_io *msg,
450 struct binder_io *reply)
451 {
452 struct {
453 uint32_t cmd;
454 uintptr_t buffer;
455 } __attribute__((packed)) data;
456
457 if (reply->flags & BIO_F_SHARED) {
458 data.cmd = BC_FREE_BUFFER;
459 data.buffer = (uintptr_t) reply->data0;
460 binder_write(bs, &data, sizeof(data));
461 reply->flags = 0;
462 }
463 }
464
bio_alloc_obj(struct binder_io * bio)465 static struct flat_binder_object *bio_alloc_obj(struct binder_io *bio)
466 {
467 struct flat_binder_object *obj;
468
469 obj = bio_alloc(bio, sizeof(*obj));
470
471 if (obj && bio->offs_avail) {
472 bio->offs_avail--;
473 *bio->offs++ = ((char*) obj) - ((char*) bio->data0);
474 return obj;
475 }
476
477 bio->flags |= BIO_F_OVERFLOW;
478 return NULL;
479 }
480
bio_put_uint32(struct binder_io * bio,uint32_t n)481 void bio_put_uint32(struct binder_io *bio, uint32_t n)
482 {
483 uint32_t *ptr = bio_alloc(bio, sizeof(n));
484 if (ptr)
485 *ptr = n;
486 }
487
bio_put_obj(struct binder_io * bio,void * ptr)488 void bio_put_obj(struct binder_io *bio, void *ptr)
489 {
490 struct flat_binder_object *obj;
491
492 obj = bio_alloc_obj(bio);
493 if (!obj)
494 return;
495
496 obj->flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
497 obj->type = BINDER_TYPE_BINDER;
498 obj->binder = (uintptr_t)ptr;
499 obj->cookie = 0;
500 }
501
bio_put_ref(struct binder_io * bio,uint32_t handle)502 void bio_put_ref(struct binder_io *bio, uint32_t handle)
503 {
504 struct flat_binder_object *obj;
505
506 if (handle)
507 obj = bio_alloc_obj(bio);
508 else
509 obj = bio_alloc(bio, sizeof(*obj));
510
511 if (!obj)
512 return;
513
514 obj->flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
515 obj->type = BINDER_TYPE_HANDLE;
516 obj->handle = handle;
517 obj->cookie = 0;
518 }
519
bio_put_string16(struct binder_io * bio,const uint16_t * str)520 void bio_put_string16(struct binder_io *bio, const uint16_t *str)
521 {
522 size_t len;
523 uint16_t *ptr;
524
525 if (!str) {
526 bio_put_uint32(bio, 0xffffffff);
527 return;
528 }
529
530 len = 0;
531 while (str[len]) len++;
532
533 if (len >= (MAX_BIO_SIZE / sizeof(uint16_t))) {
534 bio_put_uint32(bio, 0xffffffff);
535 return;
536 }
537
538 /* Note: The payload will carry 32bit size instead of size_t */
539 bio_put_uint32(bio, (uint32_t) len);
540 len = (len + 1) * sizeof(uint16_t);
541 ptr = bio_alloc(bio, len);
542 if (ptr)
543 memcpy(ptr, str, len);
544 }
545
bio_put_string16_x(struct binder_io * bio,const char * _str)546 void bio_put_string16_x(struct binder_io *bio, const char *_str)
547 {
548 unsigned char *str = (unsigned char*) _str;
549 size_t len;
550 uint16_t *ptr;
551
552 if (!str) {
553 bio_put_uint32(bio, 0xffffffff);
554 return;
555 }
556
557 len = strlen(_str);
558
559 if (len >= (MAX_BIO_SIZE / sizeof(uint16_t))) {
560 bio_put_uint32(bio, 0xffffffff);
561 return;
562 }
563
564 /* Note: The payload will carry 32bit size instead of size_t */
565 bio_put_uint32(bio, len);
566 ptr = bio_alloc(bio, (len + 1) * sizeof(uint16_t));
567 if (!ptr)
568 return;
569
570 while (*str)
571 *ptr++ = *str++;
572 *ptr++ = 0;
573 }
574
bio_get(struct binder_io * bio,size_t size)575 static void *bio_get(struct binder_io *bio, size_t size)
576 {
577 size = (size + 3) & (~3);
578
579 if (bio->data_avail < size){
580 bio->data_avail = 0;
581 bio->flags |= BIO_F_OVERFLOW;
582 return NULL;
583 } else {
584 void *ptr = bio->data;
585 bio->data += size;
586 bio->data_avail -= size;
587 return ptr;
588 }
589 }
590
bio_get_uint32(struct binder_io * bio)591 uint32_t bio_get_uint32(struct binder_io *bio)
592 {
593 uint32_t *ptr = bio_get(bio, sizeof(*ptr));
594 return ptr ? *ptr : 0;
595 }
596
bio_get_string16(struct binder_io * bio,size_t * sz)597 uint16_t *bio_get_string16(struct binder_io *bio, size_t *sz)
598 {
599 size_t len;
600
601 /* Note: The payload will carry 32bit size instead of size_t */
602 len = (size_t) bio_get_uint32(bio);
603 if (sz)
604 *sz = len;
605 return bio_get(bio, (len + 1) * sizeof(uint16_t));
606 }
607
_bio_get_obj(struct binder_io * bio)608 static struct flat_binder_object *_bio_get_obj(struct binder_io *bio)
609 {
610 size_t n;
611 size_t off = bio->data - bio->data0;
612
613 /* TODO: be smarter about this? */
614 for (n = 0; n < bio->offs_avail; n++) {
615 if (bio->offs[n] == off)
616 return bio_get(bio, sizeof(struct flat_binder_object));
617 }
618
619 bio->data_avail = 0;
620 bio->flags |= BIO_F_OVERFLOW;
621 return NULL;
622 }
623
bio_get_ref(struct binder_io * bio)624 uint32_t bio_get_ref(struct binder_io *bio)
625 {
626 struct flat_binder_object *obj;
627
628 obj = _bio_get_obj(bio);
629 if (!obj)
630 return 0;
631
632 if (obj->type == BINDER_TYPE_HANDLE)
633 return obj->handle;
634
635 return 0;
636 }
637