1 /*
2 **
3 ** Copyright (C) 2008-2011, The Android Open Source Project
4 **
5 ** Licensed under the Apache License, Version 2.0 (the "License");
6 ** you may not use this file except in compliance with the License.
7 ** You may obtain a copy of the License at
8 **
9 **     http://www.apache.org/licenses/LICENSE-2.0
10 **
11 ** Unless required by applicable law or agreed to in writing, software
12 ** distributed under the License is distributed on an "AS IS" BASIS,
13 ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 ** See the License for the specific language governing permissions and
15 ** limitations under the License.
16 */
17 
18 #include <android/log.h>
19 #include <pthread.h>
20 #include <time.h>
21 #include <stdarg.h>
22 
23 #include "mapinfo.h"
24 
25 extern int heaptracker_stacktrace(intptr_t*, size_t);
26 extern void *__real_malloc(size_t size);
27 extern void *__real_realloc(void *ptr, size_t size);
28 extern void *__real_calloc(int nmemb, int size);
29 extern void __real_free(void *ptr);
30 
31 static mapinfo *milist;
32 
33 #define MAX_BACKTRACE_DEPTH 15
34 #define ALLOCATION_TAG      0x1ee7d00d
35 #define BACKLOG_TAG         0xbabecafe
36 #define FREE_POISON         0xa5
37 #define BACKLOG_MAX         50
38 #define FRONT_GUARD         0xaa
39 #define FRONT_GUARD_LEN     (1<<4)
40 #define REAR_GUARD          0xbb
41 #define REAR_GUARD_LEN      (1<<4)
42 #define SCANNER_SLEEP_S     3
43 
44 struct hdr {
45     uint32_t tag;
46     struct hdr *prev;
47     struct hdr *next;
48     intptr_t bt[MAX_BACKTRACE_DEPTH];
49     int bt_depth;
50     intptr_t freed_bt[MAX_BACKTRACE_DEPTH];
51     int freed_bt_depth;
52     size_t size;
53     char front_guard[FRONT_GUARD_LEN];
54 } __attribute__((packed));
55 
56 struct ftr {
57     char rear_guard[REAR_GUARD_LEN];
58 } __attribute__((packed));
59 
to_ftr(struct hdr * hdr)60 static inline struct ftr * to_ftr(struct hdr *hdr)
61 {
62     return (struct ftr *)(((char *)(hdr + 1)) + hdr->size);
63 }
64 
user(struct hdr * hdr)65 static inline void *user(struct hdr *hdr)
66 {
67     return hdr + 1;
68 }
69 
meta(void * user)70 static inline struct hdr *meta(void *user)
71 {
72     return ((struct hdr *)user) - 1;
73 }
74 
75 extern int __android_log_vprint(int prio, const char *tag, const char *fmt, va_list ap);
default_log(const char * fmt,...)76 static void default_log(const char *fmt, ...)
77 {
78     va_list lst;
79     va_start(lst, fmt);
80     __android_log_vprint(ANDROID_LOG_ERROR, "DEBUG", fmt, lst);
81     va_end(lst);
82 }
83 
84 /* Override this for non-printf reporting */
85 void (*malloc_log)(const char *fmt, ...) = default_log;
86 /* Call this ad dlclose() to get leaked memory */
87 void free_leaked_memory(void);
88 
89 static unsigned num;
90 static struct hdr *first;
91 static struct hdr *last;
92 static pthread_rwlock_t lock = PTHREAD_RWLOCK_INITIALIZER;
93 
94 static unsigned backlog_num;
95 static struct hdr *backlog_first;
96 static struct hdr *backlog_last;
97 static pthread_rwlock_t backlog_lock = PTHREAD_RWLOCK_INITIALIZER;
98 
print_backtrace(const intptr_t * bt,int depth)99 void print_backtrace(const intptr_t *bt, int depth)
100 {
101     mapinfo *mi;
102     int cnt, rel_pc;
103     intptr_t self_bt[MAX_BACKTRACE_DEPTH];
104 
105     if (!bt) {
106         depth = heaptracker_stacktrace(self_bt, MAX_BACKTRACE_DEPTH);
107         bt = self_bt;
108     }
109 
110     malloc_log("*** *** *** *** *** *** *** *** *** *** *** *** *** *** *** ***\n");
111     for (cnt = 0; cnt < depth && cnt < MAX_BACKTRACE_DEPTH; cnt++) {
112         mi = pc_to_mapinfo(milist, bt[cnt], &rel_pc);
113         malloc_log("\t#%02d  pc %08x  %s\n", cnt,
114                    mi ? rel_pc : bt[cnt],
115                    mi ? mi->name : "(unknown)");
116     }
117 }
118 
init_front_guard(struct hdr * hdr)119 static inline void init_front_guard(struct hdr *hdr)
120 {
121     memset(hdr->front_guard, FRONT_GUARD, FRONT_GUARD_LEN);
122 }
123 
is_front_guard_valid(struct hdr * hdr)124 static inline int is_front_guard_valid(struct hdr *hdr)
125 {
126     unsigned i;
127     for (i = 0; i < FRONT_GUARD_LEN; i++)
128         if (hdr->front_guard[i] != FRONT_GUARD)
129             return 0;
130     return 1;
131 }
132 
init_rear_guard(struct hdr * hdr)133 static inline void init_rear_guard(struct hdr *hdr)
134 {
135     struct ftr *ftr = to_ftr(hdr);
136     memset(ftr->rear_guard, REAR_GUARD, REAR_GUARD_LEN);
137 }
138 
is_rear_guard_valid(struct hdr * hdr)139 static inline int is_rear_guard_valid(struct hdr *hdr)
140 {
141     unsigned i;
142     int valid = 1;
143     int first_mismatch = -1;
144     struct ftr *ftr = to_ftr(hdr);
145     for (i = 0; i < REAR_GUARD_LEN; i++) {
146         if (ftr->rear_guard[i] != REAR_GUARD) {
147             if (first_mismatch < 0)
148                 first_mismatch = i;
149             valid = 0;
150         }
151         else if (first_mismatch >= 0) {
152             malloc_log("+++ REAR GUARD MISMATCH [%d, %d)\n", first_mismatch, i);
153             first_mismatch = -1;
154         }
155     }
156 
157     if (first_mismatch >= 0)
158         malloc_log("+++ REAR GUARD MISMATCH [%d, %d)\n", first_mismatch, i);
159     return valid;
160 }
161 
__add(struct hdr * hdr,struct hdr ** first,struct hdr ** last)162 static inline void __add(struct hdr *hdr, struct hdr **first, struct hdr **last)
163 {
164     hdr->prev = 0;
165     hdr->next = *last;
166     if (*last)
167         (*last)->prev = hdr;
168     else
169         *first = hdr;
170     *last = hdr;
171 }
172 
__del(struct hdr * hdr,struct hdr ** first,struct hdr ** last)173 static inline int __del(struct hdr *hdr, struct hdr **first, struct hdr **last)
174 {
175     if (hdr->prev)
176         hdr->prev->next = hdr->next;
177     else
178         *last = hdr->next;
179     if (hdr->next)
180         hdr->next->prev = hdr->prev;
181     else
182         *first = hdr->prev;
183     return 0;
184 }
185 
add(struct hdr * hdr,size_t size)186 static inline void add(struct hdr *hdr, size_t size)
187 {
188     pthread_rwlock_wrlock(&lock);
189     hdr->tag = ALLOCATION_TAG;
190     hdr->size = size;
191     init_front_guard(hdr);
192     init_rear_guard(hdr);
193     num++;
194     __add(hdr, &first, &last);
195     pthread_rwlock_unlock(&lock);
196 }
197 
del(struct hdr * hdr)198 static inline int del(struct hdr *hdr)
199 {
200     if (hdr->tag != ALLOCATION_TAG)
201         return -1;
202 
203     pthread_rwlock_wrlock(&lock);
204     __del(hdr, &first, &last);
205     num--;
206     pthread_rwlock_unlock(&lock);
207     return 0;
208 }
209 
poison(struct hdr * hdr)210 static inline void poison(struct hdr *hdr)
211 {
212     memset(user(hdr), FREE_POISON, hdr->size);
213 }
214 
was_used_after_free(struct hdr * hdr)215 static int was_used_after_free(struct hdr *hdr)
216 {
217     unsigned i;
218     const char *data = (const char *)user(hdr);
219     for (i = 0; i < hdr->size; i++)
220         if (data[i] != FREE_POISON)
221             return 1;
222     return 0;
223 }
224 
225 /* returns 1 if valid, *safe == 1 if safe to dump stack */
check_guards(struct hdr * hdr,int * safe)226 static inline int check_guards(struct hdr *hdr, int *safe)
227 {
228     *safe = 1;
229     if (!is_front_guard_valid(hdr)) {
230         if (hdr->front_guard[0] == FRONT_GUARD) {
231             malloc_log("+++ ALLOCATION %p SIZE %d HAS A CORRUPTED FRONT GUARD\n",
232                        user(hdr), hdr->size);
233         } else {
234             malloc_log("+++ ALLOCATION %p HAS A CORRUPTED FRONT GUARD "\
235                       "(NOT DUMPING STACKTRACE)\n", user(hdr));
236             /* Allocation header is probably corrupt, do not print stack trace */
237             *safe = 0;
238         }
239         return 0;
240     }
241 
242     if (!is_rear_guard_valid(hdr)) {
243         malloc_log("+++ ALLOCATION %p SIZE %d HAS A CORRUPTED REAR GUARD\n",
244                    user(hdr), hdr->size);
245         return 0;
246     }
247 
248     return 1;
249 }
250 
251 /* returns 1 if valid, *safe == 1 if safe to dump stack */
__check_allocation(struct hdr * hdr,int * safe)252 static inline int __check_allocation(struct hdr *hdr, int *safe)
253 {
254     int valid = 1;
255     *safe = 1;
256 
257     if (hdr->tag != ALLOCATION_TAG && hdr->tag != BACKLOG_TAG) {
258         malloc_log("+++ ALLOCATION %p HAS INVALID TAG %08x (NOT DUMPING STACKTRACE)\n",
259                    user(hdr), hdr->tag);
260 	/* Allocation header is probably corrupt, do not dequeue or dump stack
261          * trace.
262          */
263         *safe = 0;
264         return 0;
265     }
266 
267     if (hdr->tag == BACKLOG_TAG && was_used_after_free(hdr)) {
268         malloc_log("+++ ALLOCATION %p SIZE %d WAS USED AFTER BEING FREED\n",
269                    user(hdr), hdr->size);
270         valid = 0;
271 	/* check the guards to see if it's safe to dump a stack trace */
272         (void)check_guards(hdr, safe);
273     }
274     else
275         valid = check_guards(hdr, safe);
276 
277     if (!valid && *safe) {
278         malloc_log("+++ ALLOCATION %p SIZE %d ALLOCATED HERE:\n",
279                         user(hdr), hdr->size);
280         print_backtrace(hdr->bt, hdr->bt_depth);
281         if (hdr->tag == BACKLOG_TAG) {
282             malloc_log("+++ ALLOCATION %p SIZE %d FREED HERE:\n",
283                        user(hdr), hdr->size);
284             print_backtrace(hdr->freed_bt, hdr->freed_bt_depth);
285         }
286     }
287 
288     return valid;
289 }
290 
__del_and_check(struct hdr * hdr,struct hdr ** first,struct hdr ** last,unsigned * cnt,int * safe)291 static inline int __del_and_check(struct hdr *hdr,
292                                    struct hdr **first, struct hdr **last, unsigned *cnt,
293                                    int *safe)
294 {
295     int valid;
296     valid = __check_allocation(hdr, safe);
297     if (safe) {
298         (*cnt)--;
299         __del(hdr, first, last);
300     }
301     return valid;
302 }
303 
__del_from_backlog(struct hdr * hdr)304 static inline void __del_from_backlog(struct hdr *hdr)
305 {
306         int safe;
307         (void)__del_and_check(hdr,
308                               &backlog_first, &backlog_last, &backlog_num,
309                               &safe);
310         hdr->tag = 0; /* clear the tag */
311 }
312 
del_from_backlog(struct hdr * hdr)313 static inline void del_from_backlog(struct hdr *hdr)
314 {
315     pthread_rwlock_wrlock(&backlog_lock);
316     __del_from_backlog(hdr);
317     pthread_rwlock_unlock(&backlog_lock);
318 }
319 
del_leak(struct hdr * hdr,int * safe)320 static inline int del_leak(struct hdr *hdr, int *safe)
321 {
322     int valid;
323     pthread_rwlock_wrlock(&lock);
324     valid = __del_and_check(hdr,
325                             &first, &last, &num,
326                             safe);
327     pthread_rwlock_unlock(&lock);
328     return valid;
329 }
330 
add_to_backlog(struct hdr * hdr)331 static inline void add_to_backlog(struct hdr *hdr)
332 {
333     pthread_rwlock_wrlock(&backlog_lock);
334     hdr->tag = BACKLOG_TAG;
335     backlog_num++;
336     __add(hdr, &backlog_first, &backlog_last);
337     poison(hdr);
338     /* If we've exceeded the maximum backlog, clear it up */
339     while (backlog_num > BACKLOG_MAX) {
340         struct hdr *gone = backlog_first;
341         __del_from_backlog(gone);
342         __real_free(gone);
343     }
344     pthread_rwlock_unlock(&backlog_lock);
345 }
346 
__wrap_malloc(size_t size)347 void* __wrap_malloc(size_t size)
348 {
349 //  malloc_tracker_log("%s: %s\n", __FILE__, __FUNCTION__);
350     struct hdr *hdr = __real_malloc(sizeof(struct hdr) + size +
351                                     sizeof(struct ftr));
352     if (hdr) {
353         hdr->bt_depth = heaptracker_stacktrace(
354                             hdr->bt, MAX_BACKTRACE_DEPTH);
355         add(hdr, size);
356         return user(hdr);
357     }
358     return NULL;
359 }
360 
__wrap_free(void * ptr)361 void __wrap_free(void *ptr)
362 {
363     struct hdr *hdr;
364     if (!ptr) /* ignore free(NULL) */
365         return;
366 
367     hdr = meta(ptr);
368 
369     if (del(hdr) < 0) {
370         intptr_t bt[MAX_BACKTRACE_DEPTH];
371         int depth;
372         depth = heaptracker_stacktrace(bt, MAX_BACKTRACE_DEPTH);
373         if (hdr->tag == BACKLOG_TAG) {
374             malloc_log("+++ ALLOCATION %p SIZE %d BYTES MULTIPLY FREED!\n",
375                        user(hdr), hdr->size);
376             malloc_log("+++ ALLOCATION %p SIZE %d ALLOCATED HERE:\n",
377                        user(hdr), hdr->size);
378             print_backtrace(hdr->bt, hdr->bt_depth);
379             /* hdr->freed_bt_depth should be nonzero here */
380             malloc_log("+++ ALLOCATION %p SIZE %d FIRST FREED HERE:\n",
381                        user(hdr), hdr->size);
382             print_backtrace(hdr->freed_bt, hdr->freed_bt_depth);
383             malloc_log("+++ ALLOCATION %p SIZE %d NOW BEING FREED HERE:\n",
384                        user(hdr), hdr->size);
385             print_backtrace(bt, depth);
386         }
387         else {
388             malloc_log("+++ ALLOCATION %p IS CORRUPTED OR NOT ALLOCATED VIA TRACKER!\n",
389                        user(hdr));
390             print_backtrace(bt, depth);
391             /* Leak here so that we do not crash */
392             //__real_free(user(hdr));
393         }
394     }
395     else {
396         hdr->freed_bt_depth = heaptracker_stacktrace(hdr->freed_bt,
397                                       MAX_BACKTRACE_DEPTH);
398         add_to_backlog(hdr);
399     }
400 }
401 
__wrap_realloc(void * ptr,size_t size)402 void *__wrap_realloc(void *ptr, size_t size)
403 {
404     struct hdr *hdr;
405 
406     if (!size) {
407         __wrap_free(ptr);
408         return NULL;
409     }
410 
411     if (!ptr)
412         return __wrap_malloc(size);
413 
414     hdr = meta(ptr);
415 
416 //  malloc_log("%s: %s\n", __FILE__, __FUNCTION__);
417     if (del(hdr) < 0) {
418         intptr_t bt[MAX_BACKTRACE_DEPTH];
419         int depth;
420         depth = heaptracker_stacktrace(bt, MAX_BACKTRACE_DEPTH);
421         if (hdr->tag == BACKLOG_TAG) {
422             malloc_log("+++ REALLOCATION %p SIZE %d OF FREED MEMORY!\n",
423                        user(hdr), size, hdr->size);
424             malloc_log("+++ ALLOCATION %p SIZE %d ALLOCATED HERE:\n",
425                        user(hdr), hdr->size);
426             print_backtrace(hdr->bt, hdr->bt_depth);
427             /* hdr->freed_bt_depth should be nonzero here */
428             malloc_log("+++ ALLOCATION %p SIZE %d FIRST FREED HERE:\n",
429                        user(hdr), hdr->size);
430             print_backtrace(hdr->freed_bt, hdr->freed_bt_depth);
431             malloc_log("+++ ALLOCATION %p SIZE %d NOW BEING REALLOCATED HERE:\n",
432                        user(hdr), hdr->size);
433             print_backtrace(bt, depth);
434 
435 	    /* We take the memory out of the backlog and fall through so the
436 	     * reallocation below succeeds.  Since we didn't really free it, we
437 	     * can default to this behavior.
438              */
439             del_from_backlog(hdr);
440         }
441         else {
442             malloc_log("+++ REALLOCATION %p SIZE %d IS CORRUPTED OR NOT ALLOCATED VIA TRACKER!\n",
443                        user(hdr), size);
444             print_backtrace(bt, depth);
445             // just get a whole new allocation and leak the old one
446             return __real_realloc(0, size);
447             // return __real_realloc(user(hdr), size); // assuming it was allocated externally
448         }
449     }
450 
451     hdr = __real_realloc(hdr, sizeof(struct hdr) + size + sizeof(struct ftr));
452     if (hdr) {
453         hdr->bt_depth = heaptracker_stacktrace(hdr->bt, MAX_BACKTRACE_DEPTH);
454         add(hdr, size);
455         return user(hdr);
456     }
457 
458     return NULL;
459 }
460 
__wrap_calloc(int nmemb,size_t size)461 void *__wrap_calloc(int nmemb, size_t size)
462 {
463 //  malloc_tracker_log("%s: %s\n", __FILE__, __FUNCTION__);
464     struct hdr *hdr;
465     size_t __size = nmemb * size;
466     hdr = __real_calloc(1, sizeof(struct hdr) + __size + sizeof(struct ftr));
467     if (hdr) {
468         hdr->bt_depth = heaptracker_stacktrace(
469                             hdr->bt, MAX_BACKTRACE_DEPTH);
470         add(hdr, __size);
471         return user(hdr);
472     }
473     return NULL;
474 }
475 
heaptracker_free_leaked_memory(void)476 void heaptracker_free_leaked_memory(void)
477 {
478     struct hdr *del; int cnt;
479 
480     if (num)
481         malloc_log("+++ THERE ARE %d LEAKED ALLOCATIONS\n", num);
482 
483     while (last) {
484         int safe;
485         del = last;
486         malloc_log("+++ DELETING %d BYTES OF LEAKED MEMORY AT %p (%d REMAINING)\n",
487                 del->size, user(del), num);
488         if (del_leak(del, &safe)) {
489             /* safe == 1, because the allocation is valid */
490             malloc_log("+++ ALLOCATION %p SIZE %d ALLOCATED HERE:\n",
491                         user(del), del->size);
492             print_backtrace(del->bt, del->bt_depth);
493         }
494         __real_free(del);
495     }
496 
497 //  malloc_log("+++ DELETING %d BACKLOGGED ALLOCATIONS\n", backlog_num);
498     while (backlog_last) {
499 	del = backlog_first;
500         del_from_backlog(del);
501         __real_free(del);
502     }
503 }
504 
check_list(struct hdr * list,pthread_rwlock_t * rwlock)505 static int check_list(struct hdr *list, pthread_rwlock_t *rwlock)
506 {
507     struct hdr *hdr;
508     int safe, num_checked;
509 
510     pthread_rwlock_rdlock(rwlock);
511     num_checked = 0;
512     hdr = list;
513     while (hdr) {
514         (void)__check_allocation(hdr, &safe);
515         hdr = hdr->next;
516         num_checked++;
517     }
518     pthread_rwlock_unlock(rwlock);
519 
520     return num_checked;
521 }
522 
523 static pthread_t scanner_thread;
524 static pthread_cond_t scanner_cond = PTHREAD_COND_INITIALIZER;
525 static int scanner_stop;
526 static pthread_mutex_t scanner_lock = PTHREAD_MUTEX_INITIALIZER;
527 
scanner(void * data)528 static void* scanner(void *data __attribute__((unused)))
529 {
530     struct timespec ts;
531     int num_checked, num_checked_backlog;
532 
533     while (1) {
534         num_checked = check_list(last, &lock);
535         num_checked_backlog = check_list(backlog_last, &backlog_lock);
536 
537 //      malloc_log("@@@ scanned %d/%d allocs and %d/%d freed\n",
538 //                 num_checked, num,
539 //                 num_checked_backlog, backlog_num);
540 
541         pthread_mutex_lock(&scanner_lock);
542         if (!scanner_stop) {
543             clock_gettime(CLOCK_REALTIME, &ts);
544             ts.tv_sec += SCANNER_SLEEP_S;
545             pthread_cond_timedwait(&scanner_cond, &scanner_lock, &ts);
546         }
547         if (scanner_stop) {
548             pthread_mutex_unlock(&scanner_lock);
549             break;
550         }
551         pthread_mutex_unlock(&scanner_lock);
552     }
553 
554 //  malloc_log("@@@ scanner thread exiting");
555     return NULL;
556 }
557 
558 static void init(void) __attribute__((constructor));
init(void)559 static void init(void)
560 {
561 //  malloc_log("@@@ start scanner thread");
562     milist = init_mapinfo(getpid());
563     pthread_create(&scanner_thread,
564                    NULL,
565                    scanner,
566                    NULL);
567 }
568 
569 static void deinit(void) __attribute__((destructor));
deinit(void)570 static void deinit(void)
571 {
572 //  malloc_log("@@@ signal stop to scanner thread");
573     pthread_mutex_lock(&scanner_lock);
574     scanner_stop = 1;
575     pthread_cond_signal(&scanner_cond);
576     pthread_mutex_unlock(&scanner_lock);
577 //  malloc_log("@@@ wait for scanner thread to exit");
578     pthread_join(scanner_thread, NULL);
579 //  malloc_log("@@@ scanner thread stopped");
580 
581     heaptracker_free_leaked_memory();
582     deinit_mapinfo(milist);
583 }
584