1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *  * Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  *  * Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *    the documentation and/or other materials provided with the
13  *    distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <arpa/inet.h>
30 #include <dlfcn.h>
31 #include <errno.h>
32 #include <fcntl.h>
33 #include <pthread.h>
34 #include <stdarg.h>
35 #include <stddef.h>
36 #include <stdint.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <string.h>
40 #include <sys/param.h>
41 #include <sys/select.h>
42 #include <sys/socket.h>
43 #include <sys/system_properties.h>
44 #include <sys/types.h>
45 #include <sys/un.h>
46 #include <unistd.h>
47 #include <unwind.h>
48 
49 #include "debug_stacktrace.h"
50 #include "malloc_debug_backtrace.h"
51 #include "malloc_debug_common.h"
52 #include "malloc_debug_disable.h"
53 
54 #include "private/bionic_macros.h"
55 #include "private/libc_logging.h"
56 #include "private/ScopedPthreadMutexLocker.h"
57 
58 // This file should be included into the build only when
59 // MALLOC_LEAK_CHECK, or MALLOC_QEMU_INSTRUMENT, or both
60 // macros are defined.
61 #ifndef MALLOC_LEAK_CHECK
62 #error MALLOC_LEAK_CHECK is not defined.
63 #endif  // !MALLOC_LEAK_CHECK
64 
65 extern int gMallocLeakZygoteChild;
66 extern HashTable* g_hash_table;
67 extern const MallocDebug* g_malloc_dispatch;
68 
69 // =============================================================================
70 // stack trace functions
71 // =============================================================================
72 
73 #define GUARD               0x48151642
74 #define DEBUG               0
75 
76 // =============================================================================
77 // Structures
78 // =============================================================================
79 
80 struct AllocationEntry {
81     HashEntry* entry;
82     uint32_t guard;
83 } __attribute__((aligned(MALLOC_ALIGNMENT)));
84 
to_header(void * mem)85 static inline AllocationEntry* to_header(void* mem) {
86   return reinterpret_cast<AllocationEntry*>(mem) - 1;
87 }
88 
const_to_header(const void * mem)89 static inline const AllocationEntry* const_to_header(const void* mem) {
90   return reinterpret_cast<const AllocationEntry*>(mem) - 1;
91 }
92 
93 // =============================================================================
94 // Hash Table functions
95 // =============================================================================
96 
get_hash(uintptr_t * backtrace,size_t numEntries)97 static uint32_t get_hash(uintptr_t* backtrace, size_t numEntries) {
98     if (backtrace == NULL) return 0;
99 
100     int hash = 0;
101     size_t i;
102     for (i = 0 ; i < numEntries ; i++) {
103         hash = (hash * 33) + (backtrace[i] >> 2);
104     }
105 
106     return hash;
107 }
108 
find_entry(HashTable * table,int slot,uintptr_t * backtrace,size_t numEntries,size_t size)109 static HashEntry* find_entry(HashTable* table, int slot,
110                              uintptr_t* backtrace, size_t numEntries, size_t size) {
111     HashEntry* entry = table->slots[slot];
112     while (entry != NULL) {
113         //debug_log("backtrace: %p, entry: %p entry->backtrace: %p\n",
114         //        backtrace, entry, (entry != NULL) ? entry->backtrace : NULL);
115         /*
116          * See if the entry matches exactly.  We compare the "size" field,
117          * including the flag bits.
118          */
119         if (entry->size == size && entry->numEntries == numEntries &&
120                 !memcmp(backtrace, entry->backtrace, numEntries * sizeof(uintptr_t))) {
121             return entry;
122         }
123 
124         entry = entry->next;
125     }
126 
127     return NULL;
128 }
129 
record_backtrace(uintptr_t * backtrace,size_t numEntries,size_t size)130 static HashEntry* record_backtrace(uintptr_t* backtrace, size_t numEntries, size_t size) {
131     size_t hash = get_hash(backtrace, numEntries);
132     size_t slot = hash % HASHTABLE_SIZE;
133 
134     if (size & SIZE_FLAG_MASK) {
135         debug_log("malloc_debug: allocation %zx exceeds bit width\n", size);
136         abort();
137     }
138 
139     if (gMallocLeakZygoteChild) {
140         size |= SIZE_FLAG_ZYGOTE_CHILD;
141     }
142 
143     HashEntry* entry = find_entry(g_hash_table, slot, backtrace, numEntries, size);
144 
145     if (entry != NULL) {
146         entry->allocations++;
147     } else {
148         // create a new entry
149         entry = static_cast<HashEntry*>(g_malloc_dispatch->malloc(sizeof(HashEntry) + numEntries*sizeof(uintptr_t)));
150         if (!entry) {
151             return NULL;
152         }
153         entry->allocations = 1;
154         entry->slot = slot;
155         entry->prev = NULL;
156         entry->next = g_hash_table->slots[slot];
157         entry->numEntries = numEntries;
158         entry->size = size;
159 
160         memcpy(entry->backtrace, backtrace, numEntries * sizeof(uintptr_t));
161 
162         g_hash_table->slots[slot] = entry;
163 
164         if (entry->next != NULL) {
165             entry->next->prev = entry;
166         }
167 
168         // we just added an entry, increase the size of the hashtable
169         g_hash_table->count++;
170     }
171 
172     return entry;
173 }
174 
is_valid_entry(HashEntry * entry)175 static int is_valid_entry(HashEntry* entry) {
176   if (entry != NULL) {
177     for (size_t i = 0; i < HASHTABLE_SIZE; ++i) {
178       HashEntry* e1 = g_hash_table->slots[i];
179       while (e1 != NULL) {
180         if (e1 == entry) {
181           return 1;
182         }
183         e1 = e1->next;
184       }
185     }
186   }
187   return 0;
188 }
189 
remove_entry(HashEntry * entry)190 static void remove_entry(HashEntry* entry) {
191   HashEntry* prev = entry->prev;
192   HashEntry* next = entry->next;
193 
194   if (prev != NULL) entry->prev->next = next;
195   if (next != NULL) entry->next->prev = prev;
196 
197   if (prev == NULL) {
198     // we are the head of the list. set the head to be next
199     g_hash_table->slots[entry->slot] = entry->next;
200   }
201 
202   // we just removed and entry, decrease the size of the hashtable
203   g_hash_table->count--;
204 }
205 
206 // =============================================================================
207 // malloc fill functions
208 // =============================================================================
209 
210 #define CHK_FILL_FREE           0xef
211 #define CHK_SENTINEL_VALUE      0xeb
212 
fill_calloc(size_t n_elements,size_t elem_size)213 extern "C" void* fill_calloc(size_t n_elements, size_t elem_size) {
214     return g_malloc_dispatch->calloc(n_elements, elem_size);
215 }
216 
fill_malloc(size_t bytes)217 extern "C" void* fill_malloc(size_t bytes) {
218     void* buffer = g_malloc_dispatch->malloc(bytes);
219     if (buffer) {
220         memset(buffer, CHK_SENTINEL_VALUE, bytes);
221     }
222     return buffer;
223 }
224 
fill_free(void * mem)225 extern "C" void fill_free(void* mem) {
226     size_t bytes = g_malloc_dispatch->malloc_usable_size(mem);
227     memset(mem, CHK_FILL_FREE, bytes);
228     g_malloc_dispatch->free(mem);
229 }
230 
fill_realloc(void * mem,size_t bytes)231 extern "C" void* fill_realloc(void* mem, size_t bytes) {
232     size_t oldSize = g_malloc_dispatch->malloc_usable_size(mem);
233     void* newMem = g_malloc_dispatch->realloc(mem, bytes);
234     if (newMem) {
235         // If this is larger than before, fill the extra with our pattern.
236         size_t newSize = g_malloc_dispatch->malloc_usable_size(newMem);
237         if (newSize > oldSize) {
238             memset(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(newMem)+oldSize), CHK_FILL_FREE, newSize-oldSize);
239         }
240     }
241     return newMem;
242 }
243 
fill_memalign(size_t alignment,size_t bytes)244 extern "C" void* fill_memalign(size_t alignment, size_t bytes) {
245     void* buffer = g_malloc_dispatch->memalign(alignment, bytes);
246     if (buffer) {
247         memset(buffer, CHK_SENTINEL_VALUE, bytes);
248     }
249     return buffer;
250 }
251 
fill_malloc_usable_size(const void * mem)252 extern "C" size_t fill_malloc_usable_size(const void* mem) {
253     // Since we didn't allocate extra bytes before or after, we can
254     // report the normal usable size here.
255     return g_malloc_dispatch->malloc_usable_size(mem);
256 }
257 
fill_mallinfo()258 extern "C" struct mallinfo fill_mallinfo() {
259   return g_malloc_dispatch->mallinfo();
260 }
261 
fill_posix_memalign(void ** memptr,size_t alignment,size_t size)262 extern "C" int fill_posix_memalign(void** memptr, size_t alignment, size_t size) {
263   if (!powerof2(alignment)) {
264     return EINVAL;
265   }
266   int saved_errno = errno;
267   *memptr = fill_memalign(alignment, size);
268   errno = saved_errno;
269   return (*memptr != NULL) ? 0 : ENOMEM;
270 }
271 
272 #if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
fill_pvalloc(size_t bytes)273 extern "C" void* fill_pvalloc(size_t bytes) {
274   size_t pagesize = getpagesize();
275   size_t size = BIONIC_ALIGN(bytes, pagesize);
276   if (size < bytes) { // Overflow
277     return NULL;
278   }
279   return fill_memalign(pagesize, size);
280 }
281 
fill_valloc(size_t size)282 extern "C" void* fill_valloc(size_t size) {
283   return fill_memalign(getpagesize(), size);
284 }
285 #endif
286 
287 // =============================================================================
288 // malloc leak functions
289 // =============================================================================
290 
291 static uint32_t MEMALIGN_GUARD      = 0xA1A41520;
292 
leak_malloc(size_t bytes)293 extern "C" void* leak_malloc(size_t bytes) {
294     if (DebugCallsDisabled()) {
295         return g_malloc_dispatch->malloc(bytes);
296     }
297 
298     // allocate enough space infront of the allocation to store the pointer for
299     // the alloc structure. This will making free'ing the structer really fast!
300 
301     // 1. allocate enough memory and include our header
302     // 2. set the base pointer to be right after our header
303 
304     size_t size = bytes + sizeof(AllocationEntry);
305     if (size < bytes) { // Overflow.
306         errno = ENOMEM;
307         return NULL;
308     }
309 
310     void* base = g_malloc_dispatch->malloc(size);
311     if (base != NULL) {
312         ScopedPthreadMutexLocker locker(&g_hash_table->lock);
313 
314         uintptr_t backtrace[BACKTRACE_SIZE];
315         size_t numEntries = GET_BACKTRACE(backtrace, BACKTRACE_SIZE);
316 
317         AllocationEntry* header = reinterpret_cast<AllocationEntry*>(base);
318         header->entry = record_backtrace(backtrace, numEntries, bytes);
319         header->guard = GUARD;
320 
321         // now increment base to point to after our header.
322         // this should just work since our header is 8 bytes.
323         base = reinterpret_cast<AllocationEntry*>(base) + 1;
324     }
325 
326     return base;
327 }
328 
leak_free(void * mem)329 extern "C" void leak_free(void* mem) {
330   if (DebugCallsDisabled()) {
331     return g_malloc_dispatch->free(mem);
332   }
333 
334   if (mem == NULL) {
335     return;
336   }
337 
338   ScopedPthreadMutexLocker locker(&g_hash_table->lock);
339 
340   // check the guard to make sure it is valid
341   AllocationEntry* header = to_header(mem);
342 
343   if (header->guard != GUARD) {
344     // could be a memaligned block
345     if (header->guard == MEMALIGN_GUARD) {
346       // For memaligned blocks, header->entry points to the memory
347       // allocated through leak_malloc.
348       header = to_header(header->entry);
349     }
350   }
351 
352   if (header->guard == GUARD || is_valid_entry(header->entry)) {
353     // decrement the allocations
354     HashEntry* entry = header->entry;
355     entry->allocations--;
356     if (entry->allocations <= 0) {
357       remove_entry(entry);
358       g_malloc_dispatch->free(entry);
359     }
360 
361     // now free the memory!
362     g_malloc_dispatch->free(header);
363   } else {
364     debug_log("WARNING bad header guard: '0x%x'! and invalid entry: %p\n",
365               header->guard, header->entry);
366   }
367 }
368 
leak_calloc(size_t n_elements,size_t elem_size)369 extern "C" void* leak_calloc(size_t n_elements, size_t elem_size) {
370     if (DebugCallsDisabled()) {
371         return g_malloc_dispatch->calloc(n_elements, elem_size);
372     }
373 
374     // Fail on overflow - just to be safe even though this code runs only
375     // within the debugging C library, not the production one.
376     if (n_elements && SIZE_MAX / n_elements < elem_size) {
377         errno = ENOMEM;
378         return NULL;
379     }
380     size_t size = n_elements * elem_size;
381     void* ptr  = leak_malloc(size);
382     if (ptr != NULL) {
383         memset(ptr, 0, size);
384     }
385     return ptr;
386 }
387 
leak_malloc_usable_size(const void * mem)388 extern "C" size_t leak_malloc_usable_size(const void* mem) {
389     if (DebugCallsDisabled()) {
390         return g_malloc_dispatch->malloc_usable_size(mem);
391     }
392 
393     if (mem == NULL) {
394         return 0;
395     }
396 
397     // Check the guard to make sure it is valid.
398     const AllocationEntry* header = const_to_header(mem);
399 
400     if (header->guard == MEMALIGN_GUARD) {
401         // If this is a memalign'd pointer, then grab the header from
402         // entry.
403         header = const_to_header(header->entry);
404     } else if (header->guard != GUARD) {
405         debug_log("WARNING bad header guard: '0x%x'! and invalid entry: %p\n",
406                   header->guard, header->entry);
407         return 0;
408     }
409 
410     size_t ret = g_malloc_dispatch->malloc_usable_size(header);
411     if (ret != 0) {
412         // The usable area starts at 'mem' and stops at 'header+ret'.
413         return reinterpret_cast<uintptr_t>(header) + ret - reinterpret_cast<uintptr_t>(mem);
414     }
415     return 0;
416 }
417 
leak_realloc(void * oldMem,size_t bytes)418 extern "C" void* leak_realloc(void* oldMem, size_t bytes) {
419     if (DebugCallsDisabled()) {
420         return g_malloc_dispatch->realloc(oldMem, bytes);
421     }
422 
423     if (oldMem == NULL) {
424         return leak_malloc(bytes);
425     }
426 
427     void* newMem = NULL;
428     AllocationEntry* header = to_header(oldMem);
429     if (header->guard == MEMALIGN_GUARD) {
430         // Get the real header.
431         header = to_header(header->entry);
432     } else if (header->guard != GUARD) {
433         debug_log("WARNING bad header guard: '0x%x'! and invalid entry: %p\n",
434                    header->guard, header->entry);
435         errno = ENOMEM;
436         return NULL;
437     }
438 
439     newMem = leak_malloc(bytes);
440     if (newMem != NULL) {
441         size_t oldSize = leak_malloc_usable_size(oldMem);
442         size_t copySize = (oldSize <= bytes) ? oldSize : bytes;
443         memcpy(newMem, oldMem, copySize);
444         leak_free(oldMem);
445     }
446 
447     return newMem;
448 }
449 
leak_memalign(size_t alignment,size_t bytes)450 extern "C" void* leak_memalign(size_t alignment, size_t bytes) {
451     if (DebugCallsDisabled()) {
452         return g_malloc_dispatch->memalign(alignment, bytes);
453     }
454 
455     // we can just use malloc
456     if (alignment <= MALLOC_ALIGNMENT) {
457         return leak_malloc(bytes);
458     }
459 
460     // need to make sure it's a power of two
461     if (!powerof2(alignment)) {
462         alignment = BIONIC_ROUND_UP_POWER_OF_2(alignment);
463     }
464 
465     // here, alignment is at least MALLOC_ALIGNMENT<<1 bytes
466     // we will align by at least MALLOC_ALIGNMENT bytes
467     // and at most alignment-MALLOC_ALIGNMENT bytes
468     size_t size = (alignment-MALLOC_ALIGNMENT) + bytes;
469     if (size < bytes) { // Overflow.
470         return NULL;
471     }
472 
473     void* base = leak_malloc(size);
474     if (base != NULL) {
475         uintptr_t ptr = reinterpret_cast<uintptr_t>(base);
476         if ((ptr % alignment) == 0) {
477             return base;
478         }
479 
480         // align the pointer
481         ptr += ((-ptr) % alignment);
482 
483         // Already allocated enough space for the header. This assumes
484         // that the malloc alignment is at least 8, otherwise, this is
485         // not guaranteed to have the space for the header.
486         AllocationEntry* header = to_header(reinterpret_cast<void*>(ptr));
487         header->guard = MEMALIGN_GUARD;
488         header->entry = reinterpret_cast<HashEntry*>(base);
489 
490         return reinterpret_cast<void*>(ptr);
491     }
492     return base;
493 }
494 
leak_mallinfo()495 extern "C" struct mallinfo leak_mallinfo() {
496   return g_malloc_dispatch->mallinfo();
497 }
498 
leak_posix_memalign(void ** memptr,size_t alignment,size_t size)499 extern "C" int leak_posix_memalign(void** memptr, size_t alignment, size_t size) {
500   if (DebugCallsDisabled()) {
501     return g_malloc_dispatch->posix_memalign(memptr, alignment, size);
502   }
503 
504   if (!powerof2(alignment)) {
505     return EINVAL;
506   }
507   int saved_errno = errno;
508   *memptr = leak_memalign(alignment, size);
509   errno = saved_errno;
510   return (*memptr != NULL) ? 0 : ENOMEM;
511 }
512 
513 #if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
leak_pvalloc(size_t bytes)514 extern "C" void* leak_pvalloc(size_t bytes) {
515   if (DebugCallsDisabled()) {
516     return g_malloc_dispatch->pvalloc(bytes);
517   }
518 
519   size_t pagesize = getpagesize();
520   size_t size = BIONIC_ALIGN(bytes, pagesize);
521   if (size < bytes) { // Overflow
522     return NULL;
523   }
524   return leak_memalign(pagesize, size);
525 }
526 
leak_valloc(size_t size)527 extern "C" void* leak_valloc(size_t size) {
528   if (DebugCallsDisabled()) {
529     return g_malloc_dispatch->valloc(size);
530   }
531 
532   return leak_memalign(getpagesize(), size);
533 }
534 #endif
535