1 /*
2  * Copyright (C) 2019 The Android Open Source Project
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *  * Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  *  * Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *    the documentation and/or other materials provided with the
13  *    distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <inttypes.h>
30 #include <pthread.h>
31 #include <stdatomic.h>
32 #include <stdint.h>
33 #include <stdio.h>
34 
35 #include <private/bionic_malloc_dispatch.h>
36 
37 #if __has_feature(hwaddress_sanitizer)
38 #include <sanitizer/allocator_interface.h>
39 #endif
40 
41 #include "malloc_common.h"
42 #include "malloc_common_dynamic.h"
43 #include "malloc_heapprofd.h"
44 #include "malloc_limit.h"
45 
46 __BEGIN_DECLS
47 static void* LimitCalloc(size_t n_elements, size_t elem_size);
48 static void LimitFree(void* mem);
49 static void* LimitMalloc(size_t bytes);
50 static void* LimitMemalign(size_t alignment, size_t bytes);
51 static int LimitPosixMemalign(void** memptr, size_t alignment, size_t size);
52 static void* LimitRealloc(void* old_mem, size_t bytes);
53 static void* LimitAlignedAlloc(size_t alignment, size_t size);
54 #if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
55 static void* LimitPvalloc(size_t bytes);
56 static void* LimitValloc(size_t bytes);
57 #endif
58 
59 // Pass through functions.
60 static size_t LimitUsableSize(const void* mem);
61 static struct mallinfo LimitMallinfo();
62 static int LimitIterate(uintptr_t base, size_t size, void (*callback)(uintptr_t, size_t, void*), void* arg);
63 static void LimitMallocDisable();
64 static void LimitMallocEnable();
65 static int LimitMallocInfo(int options, FILE* fp);
66 static int LimitMallopt(int param, int value);
67 __END_DECLS
68 
69 static constexpr MallocDispatch __limit_dispatch
70   __attribute__((unused)) = {
71     LimitCalloc,
72     LimitFree,
73     LimitMallinfo,
74     LimitMalloc,
75     LimitUsableSize,
76     LimitMemalign,
77     LimitPosixMemalign,
78 #if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
79     LimitPvalloc,
80 #endif
81     LimitRealloc,
82 #if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
83     LimitValloc,
84 #endif
85     LimitIterate,
86     LimitMallocDisable,
87     LimitMallocEnable,
88     LimitMallopt,
89     LimitAlignedAlloc,
90     LimitMallocInfo,
91   };
92 
93 static _Atomic uint64_t gAllocated;
94 static uint64_t gAllocLimit;
95 
CheckLimit(size_t bytes)96 static inline bool CheckLimit(size_t bytes) {
97   uint64_t total;
98   if (__predict_false(__builtin_add_overflow(
99                           atomic_load_explicit(&gAllocated, memory_order_relaxed), bytes, &total) ||
100                       total > gAllocLimit)) {
101     return false;
102   }
103   return true;
104 }
105 
IncrementLimit(void * mem)106 static inline void* IncrementLimit(void* mem) {
107   if (__predict_false(mem == nullptr)) {
108     return nullptr;
109   }
110   atomic_fetch_add(&gAllocated, LimitUsableSize(mem));
111   return mem;
112 }
113 
LimitCalloc(size_t n_elements,size_t elem_size)114 void* LimitCalloc(size_t n_elements, size_t elem_size) {
115   size_t total;
116   if (__builtin_mul_overflow(n_elements, elem_size, &total) || !CheckLimit(total)) {
117     warning_log("malloc_limit: calloc(%zu, %zu) exceeds limit %" PRId64, n_elements, elem_size,
118                 gAllocLimit);
119     return nullptr;
120   }
121   auto dispatch_table = GetDefaultDispatchTable();
122   if (__predict_false(dispatch_table != nullptr)) {
123     return IncrementLimit(dispatch_table->calloc(n_elements, elem_size));
124   }
125   return IncrementLimit(Malloc(calloc)(n_elements, elem_size));
126 }
127 
LimitFree(void * mem)128 void LimitFree(void* mem) {
129   atomic_fetch_sub(&gAllocated, LimitUsableSize(mem));
130   auto dispatch_table = GetDefaultDispatchTable();
131   if (__predict_false(dispatch_table != nullptr)) {
132     return dispatch_table->free(mem);
133   }
134   return Malloc(free)(mem);
135 }
136 
LimitMalloc(size_t bytes)137 void* LimitMalloc(size_t bytes) {
138   if (!CheckLimit(bytes)) {
139     warning_log("malloc_limit: malloc(%zu) exceeds limit %" PRId64, bytes, gAllocLimit);
140     return nullptr;
141   }
142   auto dispatch_table = GetDefaultDispatchTable();
143   if (__predict_false(dispatch_table != nullptr)) {
144     return IncrementLimit(dispatch_table->malloc(bytes));
145   }
146   return IncrementLimit(Malloc(malloc)(bytes));
147 }
148 
LimitMemalign(size_t alignment,size_t bytes)149 static void* LimitMemalign(size_t alignment, size_t bytes) {
150   if (!CheckLimit(bytes)) {
151     warning_log("malloc_limit: memalign(%zu, %zu) exceeds limit %" PRId64, alignment, bytes,
152                 gAllocLimit);
153     return nullptr;
154   }
155   auto dispatch_table = GetDefaultDispatchTable();
156   if (__predict_false(dispatch_table != nullptr)) {
157     return IncrementLimit(dispatch_table->memalign(alignment, bytes));
158   }
159   return IncrementLimit(Malloc(memalign)(alignment, bytes));
160 }
161 
LimitPosixMemalign(void ** memptr,size_t alignment,size_t size)162 static int LimitPosixMemalign(void** memptr, size_t alignment, size_t size) {
163   if (!CheckLimit(size)) {
164     warning_log("malloc_limit: posix_memalign(%zu, %zu) exceeds limit %" PRId64, alignment, size,
165                 gAllocLimit);
166     return ENOMEM;
167   }
168   int retval;
169   auto dispatch_table = GetDefaultDispatchTable();
170   if (__predict_false(dispatch_table != nullptr)) {
171     retval = dispatch_table->posix_memalign(memptr, alignment, size);
172   } else {
173     retval = Malloc(posix_memalign)(memptr, alignment, size);
174   }
175   if (__predict_false(retval != 0)) {
176     return retval;
177   }
178   IncrementLimit(*memptr);
179   return 0;
180 }
181 
LimitAlignedAlloc(size_t alignment,size_t size)182 static void* LimitAlignedAlloc(size_t alignment, size_t size) {
183   if (!CheckLimit(size)) {
184     warning_log("malloc_limit: aligned_alloc(%zu, %zu) exceeds limit %" PRId64, alignment, size,
185                 gAllocLimit);
186     return nullptr;
187   }
188   auto dispatch_table = GetDefaultDispatchTable();
189   if (__predict_false(dispatch_table != nullptr)) {
190     return IncrementLimit(dispatch_table->aligned_alloc(alignment, size));
191   }
192   return IncrementLimit(Malloc(aligned_alloc)(alignment, size));
193 }
194 
LimitRealloc(void * old_mem,size_t bytes)195 static void* LimitRealloc(void* old_mem, size_t bytes) {
196   size_t old_usable_size = LimitUsableSize(old_mem);
197   void* new_ptr;
198   // Need to check the size only if the allocation will increase in size.
199   if (bytes > old_usable_size && !CheckLimit(bytes - old_usable_size)) {
200     warning_log("malloc_limit: realloc(%p, %zu) exceeds limit %" PRId64, old_mem, bytes,
201                 gAllocLimit);
202     // Free the old pointer.
203     LimitFree(old_mem);
204     return nullptr;
205   }
206 
207   auto dispatch_table = GetDefaultDispatchTable();
208   if (__predict_false(dispatch_table != nullptr)) {
209     new_ptr = dispatch_table->realloc(old_mem, bytes);
210   } else {
211     new_ptr = Malloc(realloc)(old_mem, bytes);
212   }
213 
214   if (__predict_false(new_ptr == nullptr)) {
215     // This acts as if the pointer was freed.
216     atomic_fetch_sub(&gAllocated, old_usable_size);
217     return nullptr;
218   }
219 
220   size_t new_usable_size = LimitUsableSize(new_ptr);
221   // Assumes that most allocations increase in size, rather than shrink.
222   if (__predict_false(old_usable_size > new_usable_size)) {
223     atomic_fetch_sub(&gAllocated, old_usable_size - new_usable_size);
224   } else {
225     atomic_fetch_add(&gAllocated, new_usable_size - old_usable_size);
226   }
227   return new_ptr;
228 }
229 
230 #if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
LimitPvalloc(size_t bytes)231 static void* LimitPvalloc(size_t bytes) {
232   if (!CheckLimit(bytes)) {
233     warning_log("malloc_limit: pvalloc(%zu) exceeds limit %" PRId64, bytes, gAllocLimit);
234     return nullptr;
235   }
236   auto dispatch_table = GetDefaultDispatchTable();
237   if (__predict_false(dispatch_table != nullptr)) {
238     return IncrementLimit(dispatch_table->pvalloc(bytes));
239   }
240   return IncrementLimit(Malloc(pvalloc)(bytes));
241 }
242 
LimitValloc(size_t bytes)243 static void* LimitValloc(size_t bytes) {
244   if (!CheckLimit(bytes)) {
245     warning_log("malloc_limit: valloc(%zu) exceeds limit %" PRId64, bytes, gAllocLimit);
246     return nullptr;
247   }
248   auto dispatch_table = GetDefaultDispatchTable();
249   if (__predict_false(dispatch_table != nullptr)) {
250     return IncrementLimit(dispatch_table->valloc(bytes));
251   }
252   return IncrementLimit(Malloc(valloc)(bytes));
253 }
254 #endif
255 
MallocLimitInstalled()256 bool MallocLimitInstalled() {
257   return GetDispatchTable() == &__limit_dispatch;
258 }
259 
260 #if defined(LIBC_STATIC)
EnableLimitDispatchTable()261 static bool EnableLimitDispatchTable() {
262   // This is the only valid way to modify the dispatch tables for a
263   // static executable so no locks are necessary.
264   __libc_globals.mutate([](libc_globals* globals) {
265     atomic_store(&globals->current_dispatch_table, &__limit_dispatch);
266   });
267   return true;
268 }
269 #else
EnableLimitDispatchTable()270 static bool EnableLimitDispatchTable() {
271   pthread_mutex_lock(&gGlobalsMutateLock);
272   // All other code that calls mutate will grab the gGlobalsMutateLock.
273   // However, there is one case where the lock cannot be acquired, in the
274   // signal handler that enables heapprofd. In order to avoid having two
275   // threads calling mutate at the same time, use an atomic variable to
276   // verify that only this function or the signal handler are calling mutate.
277   // If this function is called at the same time as the signal handler is
278   // being called, allow a short period for the signal handler to complete
279   // before failing.
280   bool enabled = false;
281   size_t num_tries = 20;
282   while (true) {
283     if (!atomic_exchange(&gGlobalsMutating, true)) {
284       __libc_globals.mutate([](libc_globals* globals) {
285         atomic_store(&globals->current_dispatch_table, &__limit_dispatch);
286       });
287       atomic_store(&gGlobalsMutating, false);
288       enabled = true;
289       break;
290     }
291     if (--num_tries == 0) {
292       break;
293     }
294     usleep(1000);
295   }
296   pthread_mutex_unlock(&gGlobalsMutateLock);
297   if (enabled) {
298     info_log("malloc_limit: Allocation limit enabled, max size %" PRId64 " bytes\n", gAllocLimit);
299   } else {
300     error_log("malloc_limit: Failed to enable allocation limit.");
301   }
302   return enabled;
303 }
304 #endif
305 
LimitEnable(void * arg,size_t arg_size)306 bool LimitEnable(void* arg, size_t arg_size) {
307   if (arg == nullptr || arg_size != sizeof(size_t)) {
308     errno = EINVAL;
309     return false;
310   }
311 
312   static _Atomic bool limit_enabled;
313   if (atomic_exchange(&limit_enabled, true)) {
314     // The limit can only be enabled once.
315     error_log("malloc_limit: The allocation limit has already been set, it can only be set once.");
316     return false;
317   }
318 
319   gAllocLimit = *reinterpret_cast<size_t*>(arg);
320 #if __has_feature(hwaddress_sanitizer)
321   size_t current_allocated = __sanitizer_get_current_allocated_bytes();
322 #else
323   size_t current_allocated;
324   auto dispatch_table = GetDefaultDispatchTable();
325   if (__predict_false(dispatch_table != nullptr)) {
326     current_allocated = dispatch_table->mallinfo().uordblks;
327   } else {
328     current_allocated = Malloc(mallinfo)().uordblks;
329   }
330 #endif
331   atomic_store(&gAllocated, current_allocated);
332 
333   return EnableLimitDispatchTable();
334 }
335 
LimitUsableSize(const void * mem)336 static size_t LimitUsableSize(const void* mem) {
337   auto dispatch_table = GetDefaultDispatchTable();
338   if (__predict_false(dispatch_table != nullptr)) {
339     return dispatch_table->malloc_usable_size(mem);
340   }
341   return Malloc(malloc_usable_size)(mem);
342 }
343 
LimitMallinfo()344 static struct mallinfo LimitMallinfo() {
345   auto dispatch_table = GetDefaultDispatchTable();
346   if (__predict_false(dispatch_table != nullptr)) {
347     return dispatch_table->mallinfo();
348   }
349   return Malloc(mallinfo)();
350 }
351 
LimitIterate(uintptr_t base,size_t size,void (* callback)(uintptr_t,size_t,void *),void * arg)352 static int LimitIterate(uintptr_t base, size_t size, void (*callback)(uintptr_t, size_t, void*), void* arg) {
353   auto dispatch_table = GetDefaultDispatchTable();
354   if (__predict_false(dispatch_table != nullptr)) {
355     return dispatch_table->malloc_iterate(base, size, callback, arg);
356   }
357   return Malloc(malloc_iterate)(base, size, callback, arg);
358 }
359 
LimitMallocDisable()360 static void LimitMallocDisable() {
361   auto dispatch_table = GetDefaultDispatchTable();
362   if (__predict_false(dispatch_table != nullptr)) {
363     dispatch_table->malloc_disable();
364   } else {
365     Malloc(malloc_disable)();
366   }
367 }
368 
LimitMallocEnable()369 static void LimitMallocEnable() {
370   auto dispatch_table = GetDefaultDispatchTable();
371   if (__predict_false(dispatch_table != nullptr)) {
372     dispatch_table->malloc_enable();
373   } else {
374     Malloc(malloc_enable)();
375   }
376 }
377 
LimitMallocInfo(int options,FILE * fp)378 static int LimitMallocInfo(int options, FILE* fp) {
379   auto dispatch_table = GetDefaultDispatchTable();
380   if (__predict_false(dispatch_table != nullptr)) {
381     return dispatch_table->malloc_info(options, fp);
382   }
383   return Malloc(malloc_info)(options, fp);
384 }
385 
LimitMallopt(int param,int value)386 static int LimitMallopt(int param, int value) {
387   auto dispatch_table = GetDefaultDispatchTable();
388   if (__predict_false(dispatch_table != nullptr)) {
389     return dispatch_table->mallopt(param, value);
390   }
391   return Malloc(mallopt)(param, value);
392 }
393