1 /*
2  * Copyright (C) 2015 The Android Open Source Project
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *  * Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  *  * Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *    the documentation and/or other materials provided with the
13  *    distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include "private/bionic_allocator.h"
30 
31 #include <stdlib.h>
32 #include <string.h>
33 #include <sys/mman.h>
34 #include <sys/param.h>
35 #include <sys/prctl.h>
36 #include <unistd.h>
37 
38 #include <new>
39 
40 #include <async_safe/log.h>
41 #include <async_safe/CHECK.h>
42 
43 #include "platform/bionic/page.h"
44 #include "platform/bionic/macros.h"
45 
46 //
47 // BionicAllocator is a general purpose allocator designed to provide the same
48 // functionality as the malloc/free/realloc/memalign libc functions.
49 //
50 // On alloc:
51 // If size is > 1k allocator proxies malloc call directly to mmap.
52 // If size <= 1k allocator uses BionicSmallObjectAllocator for the size
53 // rounded up to the nearest power of two.
54 //
55 // On free:
56 //
57 // For a pointer allocated using proxy-to-mmap allocator unmaps
58 // the memory.
59 //
60 // For a pointer allocated using BionicSmallObjectAllocator it adds
61 // the block to free_blocks_list in the corresponding page. If the number of
62 // free pages reaches 2, BionicSmallObjectAllocator munmaps one of the pages
63 // keeping the other one in reserve.
64 
65 // Memory management for large objects is fairly straightforward, but for small
66 // objects it is more complicated.  If you are changing this code, one simple
67 // way to evaluate the memory usage change is by running 'dd' and examine the
68 // memory usage by 'showmap $(pidof dd)'.  'dd' is nice in that:
69 //   1. It links in quite a few libraries, so you get some linker memory use.
70 //   2. When run with no arguments, it sits waiting for input, so it is easy to
71 //      examine its memory usage with showmap.
72 //   3. Since it does nothing while waiting for input, the memory usage is
73 //      determinisitic.
74 
75 static const char kSignature[4] = {'L', 'M', 'A', 1};
76 
77 static const size_t kSmallObjectMaxSize = 1 << kSmallObjectMaxSizeLog2;
78 
79 // This type is used for large allocations (with size >1k)
80 static const uint32_t kLargeObject = 111;
81 
82 // Allocated pointers must be at least 16-byte aligned.  Round up the size of
83 // page_info to multiple of 16.
84 static constexpr size_t kPageInfoSize = __BIONIC_ALIGN(sizeof(page_info), 16);
85 
log2(size_t number)86 static inline uint16_t log2(size_t number) {
87   uint16_t result = 0;
88   number--;
89 
90   while (number != 0) {
91     result++;
92     number >>= 1;
93   }
94 
95   return result;
96 }
97 
BionicSmallObjectAllocator(uint32_t type,size_t block_size)98 BionicSmallObjectAllocator::BionicSmallObjectAllocator(uint32_t type, size_t block_size)
99     : type_(type),
100       block_size_(block_size),
101       blocks_per_page_((page_size() - sizeof(small_object_page_info)) / block_size),
102       free_pages_cnt_(0),
103       page_list_(nullptr) {}
104 
alloc()105 void* BionicSmallObjectAllocator::alloc() {
106   CHECK(block_size_ != 0);
107 
108   if (page_list_ == nullptr) {
109     alloc_page();
110   }
111 
112   // Fully allocated pages are de-managed and removed from the page list, so
113   // every page from the page list must be useable.  Let's just take the first
114   // one.
115   small_object_page_info* page = page_list_;
116   CHECK(page->free_block_list != nullptr);
117 
118   small_object_block_record* const block_record = page->free_block_list;
119   if (block_record->free_blocks_cnt > 1) {
120     small_object_block_record* next_free =
121         reinterpret_cast<small_object_block_record*>(
122             reinterpret_cast<uint8_t*>(block_record) + block_size_);
123     next_free->next = block_record->next;
124     next_free->free_blocks_cnt = block_record->free_blocks_cnt - 1;
125     page->free_block_list = next_free;
126   } else {
127     page->free_block_list = block_record->next;
128   }
129 
130   if (page->free_blocks_cnt == blocks_per_page_) {
131     free_pages_cnt_--;
132   }
133 
134   page->free_blocks_cnt--;
135 
136   memset(block_record, 0, block_size_);
137 
138   if (page->free_blocks_cnt == 0) {
139     // De-manage fully allocated pages.  These pages will be managed again if
140     // a block is freed.
141     remove_from_page_list(page);
142   }
143 
144   return block_record;
145 }
146 
free_page(small_object_page_info * page)147 void BionicSmallObjectAllocator::free_page(small_object_page_info* page) {
148   CHECK(page->free_blocks_cnt == blocks_per_page_);
149   if (page->prev_page) {
150     page->prev_page->next_page = page->next_page;
151   }
152   if (page->next_page) {
153     page->next_page->prev_page = page->prev_page;
154   }
155   if (page_list_ == page) {
156     page_list_ = page->next_page;
157   }
158   munmap(page, page_size());
159   free_pages_cnt_--;
160 }
161 
free(void * ptr)162 void BionicSmallObjectAllocator::free(void* ptr) {
163   small_object_page_info* const page =
164       reinterpret_cast<small_object_page_info*>(page_start(reinterpret_cast<uintptr_t>(ptr)));
165 
166   if (reinterpret_cast<uintptr_t>(ptr) % block_size_ != 0) {
167     async_safe_fatal("invalid pointer: %p (block_size=%zd)", ptr, block_size_);
168   }
169 
170   memset(ptr, 0, block_size_);
171   small_object_block_record* const block_record =
172       reinterpret_cast<small_object_block_record*>(ptr);
173 
174   block_record->next = page->free_block_list;
175   block_record->free_blocks_cnt = 1;
176 
177   page->free_block_list = block_record;
178   page->free_blocks_cnt++;
179 
180   if (page->free_blocks_cnt == blocks_per_page_) {
181     if (++free_pages_cnt_ > 1) {
182       // if we already have a free page - unmap this one.
183       free_page(page);
184     }
185   } else if (page->free_blocks_cnt == 1) {
186     // We just freed from a full page.  Add this page back to the list.
187     add_to_page_list(page);
188   }
189 }
190 
alloc_page()191 void BionicSmallObjectAllocator::alloc_page() {
192   void* const map_ptr =
193       mmap(nullptr, page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
194   if (map_ptr == MAP_FAILED) {
195     async_safe_fatal("mmap failed: %m");
196   }
197 
198   prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, map_ptr, page_size(), "bionic_alloc_small_objects");
199 
200   small_object_page_info* const page =
201       reinterpret_cast<small_object_page_info*>(map_ptr);
202   memcpy(page->info.signature, kSignature, sizeof(kSignature));
203   page->info.type = type_;
204   page->info.allocator_addr = this;
205 
206   page->free_blocks_cnt = blocks_per_page_;
207 
208   // Align the first block to block_size_.
209   const uintptr_t first_block_addr =
210       __BIONIC_ALIGN(reinterpret_cast<uintptr_t>(page + 1), block_size_);
211   small_object_block_record* const first_block =
212       reinterpret_cast<small_object_block_record*>(first_block_addr);
213 
214   first_block->next = nullptr;
215   first_block->free_blocks_cnt = blocks_per_page_;
216 
217   page->free_block_list = first_block;
218 
219   add_to_page_list(page);
220 
221   free_pages_cnt_++;
222 }
223 
add_to_page_list(small_object_page_info * page)224 void BionicSmallObjectAllocator::add_to_page_list(small_object_page_info* page) {
225   page->next_page = page_list_;
226   page->prev_page = nullptr;
227   if (page_list_) {
228     page_list_->prev_page = page;
229   }
230   page_list_ = page;
231 }
232 
remove_from_page_list(small_object_page_info * page)233 void BionicSmallObjectAllocator::remove_from_page_list(
234     small_object_page_info* page) {
235   if (page->prev_page) {
236     page->prev_page->next_page = page->next_page;
237   }
238   if (page->next_page) {
239     page->next_page->prev_page = page->prev_page;
240   }
241   if (page_list_ == page) {
242     page_list_ = page->next_page;
243   }
244   page->prev_page = nullptr;
245   page->next_page = nullptr;
246 }
247 
initialize_allocators()248 void BionicAllocator::initialize_allocators() {
249   if (allocators_ != nullptr) {
250     return;
251   }
252 
253   BionicSmallObjectAllocator* allocators =
254       reinterpret_cast<BionicSmallObjectAllocator*>(allocators_buf_);
255 
256   for (size_t i = 0; i < kSmallObjectAllocatorsCount; ++i) {
257     uint32_t type = i + kSmallObjectMinSizeLog2;
258     new (allocators + i) BionicSmallObjectAllocator(type, 1 << type);
259   }
260 
261   allocators_ = allocators;
262 }
263 
alloc_mmap(size_t align,size_t size)264 void* BionicAllocator::alloc_mmap(size_t align, size_t size) {
265   size_t header_size = __BIONIC_ALIGN(kPageInfoSize, align);
266   size_t allocated_size;
267   if (__builtin_add_overflow(header_size, size, &allocated_size) ||
268       page_end(allocated_size) < allocated_size) {
269     async_safe_fatal("overflow trying to alloc %zu bytes", size);
270   }
271   allocated_size = page_end(allocated_size);
272   void* map_ptr = mmap(nullptr, allocated_size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS,
273                        -1, 0);
274 
275   if (map_ptr == MAP_FAILED) {
276     async_safe_fatal("mmap failed: %m");
277   }
278 
279   prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, map_ptr, allocated_size, "bionic_alloc_lob");
280 
281   void* result = static_cast<char*>(map_ptr) + header_size;
282   page_info* info = get_page_info_unchecked(result);
283   memcpy(info->signature, kSignature, sizeof(kSignature));
284   info->type = kLargeObject;
285   info->allocated_size = allocated_size;
286 
287   return result;
288 }
289 
290 
alloc_impl(size_t align,size_t size)291 inline void* BionicAllocator::alloc_impl(size_t align, size_t size) {
292   if (size > kSmallObjectMaxSize) {
293     return alloc_mmap(align, size);
294   }
295 
296   uint16_t log2_size = log2(size);
297 
298   if (log2_size < kSmallObjectMinSizeLog2) {
299     log2_size = kSmallObjectMinSizeLog2;
300   }
301 
302   return get_small_object_allocator_unchecked(log2_size)->alloc();
303 }
304 
alloc(size_t size)305 void* BionicAllocator::alloc(size_t size) {
306   // treat alloc(0) as alloc(1)
307   if (size == 0) {
308     size = 1;
309   }
310   return alloc_impl(16, size);
311 }
312 
memalign(size_t align,size_t size)313 void* BionicAllocator::memalign(size_t align, size_t size) {
314   // The Bionic allocator only supports alignment up to one page, which is good
315   // enough for ELF TLS.
316   align = MIN(align, page_size());
317   align = MAX(align, 16);
318   if (!powerof2(align)) {
319     align = BIONIC_ROUND_UP_POWER_OF_2(align);
320   }
321   size = MAX(size, align);
322   return alloc_impl(align, size);
323 }
324 
get_page_info_unchecked(void * ptr)325 inline page_info* BionicAllocator::get_page_info_unchecked(void* ptr) {
326   uintptr_t header_page = page_start(reinterpret_cast<size_t>(ptr) - kPageInfoSize);
327   return reinterpret_cast<page_info*>(header_page);
328 }
329 
get_page_info(void * ptr)330 inline page_info* BionicAllocator::get_page_info(void* ptr) {
331   page_info* info = get_page_info_unchecked(ptr);
332   if (memcmp(info->signature, kSignature, sizeof(kSignature)) != 0) {
333     async_safe_fatal("invalid pointer %p (page signature %04x instead of %04x)", ptr,
334                      *reinterpret_cast<const unsigned*>(info->signature),
335                      *reinterpret_cast<const unsigned*>(kSignature));
336   }
337   return info;
338 }
339 
realloc(void * ptr,size_t size)340 void* BionicAllocator::realloc(void* ptr, size_t size) {
341   if (ptr == nullptr) {
342     return alloc(size);
343   }
344 
345   if (size == 0) {
346     free(ptr);
347     return nullptr;
348   }
349 
350   page_info* info = get_page_info(ptr);
351 
352   size_t old_size = 0;
353 
354   if (info->type == kLargeObject) {
355     old_size = info->allocated_size - (static_cast<char*>(ptr) - reinterpret_cast<char*>(info));
356   } else {
357     old_size = get_small_object_allocator(info, ptr)->get_block_size();
358   }
359 
360   if (old_size < size) {
361     void *result = alloc(size);
362     memcpy(result, ptr, old_size);
363     free(ptr);
364     return result;
365   }
366 
367   return ptr;
368 }
369 
free(void * ptr)370 void BionicAllocator::free(void* ptr) {
371   if (ptr == nullptr) {
372     return;
373   }
374 
375   page_info* info = get_page_info(ptr);
376   if (info->type == kLargeObject) {
377     munmap(info, info->allocated_size);
378   } else {
379     get_small_object_allocator(info, ptr)->free(ptr);
380   }
381 }
382 
get_chunk_size(void * ptr)383 size_t BionicAllocator::get_chunk_size(void* ptr) {
384   if (ptr == nullptr) return 0;
385 
386   page_info* info = get_page_info_unchecked(ptr);
387   if (memcmp(info->signature, kSignature, sizeof(kSignature)) != 0) {
388     // Invalid pointer (mismatched signature)
389     return 0;
390   }
391   if (info->type == kLargeObject) {
392     return info->allocated_size - (static_cast<char*>(ptr) - reinterpret_cast<char*>(info));
393   }
394 
395   BionicSmallObjectAllocator* allocator = get_small_object_allocator_unchecked(info->type);
396   if (allocator != info->allocator_addr) {
397     // Invalid pointer.
398     return 0;
399   }
400   return allocator->get_block_size();
401 }
402 
get_small_object_allocator_unchecked(uint32_t type)403 BionicSmallObjectAllocator* BionicAllocator::get_small_object_allocator_unchecked(uint32_t type) {
404   if (type < kSmallObjectMinSizeLog2 || type > kSmallObjectMaxSizeLog2) {
405     async_safe_fatal("invalid type: %u", type);
406   }
407 
408   initialize_allocators();
409   return &allocators_[type - kSmallObjectMinSizeLog2];
410 }
411 
get_small_object_allocator(page_info * pi,void * ptr)412 BionicSmallObjectAllocator* BionicAllocator::get_small_object_allocator(page_info* pi, void* ptr) {
413   BionicSmallObjectAllocator* result = get_small_object_allocator_unchecked(pi->type);
414   if (result != pi->allocator_addr) {
415     async_safe_fatal("invalid pointer %p (invalid allocator address for the page)", ptr);
416   }
417   return result;
418 }