1 /*
2 * Copyright (C) 2015 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include "linker_allocator.h"
30 #include "linker_debug.h"
31 #include "linker.h"
32
33 #include <algorithm>
34 #include <vector>
35
36 #include <stdlib.h>
37 #include <sys/mman.h>
38 #include <unistd.h>
39
40 #include "private/bionic_prctl.h"
41
42 //
43 // LinkerMemeoryAllocator is general purpose allocator
44 // designed to provide the same functionality as the malloc/free/realloc
45 // libc functions.
46 //
47 // On alloc:
48 // If size is >= 1k allocator proxies malloc call directly to mmap
49 // If size < 1k allocator uses SmallObjectAllocator for the size
50 // rounded up to the nearest power of two.
51 //
52 // On free:
53 //
54 // For a pointer allocated using proxy-to-mmap allocator unmaps
55 // the memory.
56 //
57 // For a pointer allocated using SmallObjectAllocator it adds
58 // the block to free_blocks_list_. If the number of free pages reaches 2,
59 // SmallObjectAllocator munmaps one of the pages keeping the other one
60 // in reserve.
61
62 static const char kSignature[4] = {'L', 'M', 'A', 1};
63
64 static const size_t kSmallObjectMaxSize = 1 << kSmallObjectMaxSizeLog2;
65
66 // This type is used for large allocations (with size >1k)
67 static const uint32_t kLargeObject = 111;
68
operator <(const small_object_page_record & one,const small_object_page_record & two)69 bool operator<(const small_object_page_record& one, const small_object_page_record& two) {
70 return one.page_addr < two.page_addr;
71 }
72
log2(size_t number)73 static inline uint16_t log2(size_t number) {
74 uint16_t result = 0;
75 number--;
76
77 while (number != 0) {
78 result++;
79 number >>= 1;
80 }
81
82 return result;
83 }
84
LinkerSmallObjectAllocator(uint32_t type,size_t block_size)85 LinkerSmallObjectAllocator::LinkerSmallObjectAllocator(uint32_t type, size_t block_size)
86 : type_(type), block_size_(block_size), free_pages_cnt_(0), free_blocks_list_(nullptr) {}
87
alloc()88 void* LinkerSmallObjectAllocator::alloc() {
89 CHECK(block_size_ != 0);
90
91 if (free_blocks_list_ == nullptr) {
92 alloc_page();
93 }
94
95 small_object_block_record* block_record = free_blocks_list_;
96 if (block_record->free_blocks_cnt > 1) {
97 small_object_block_record* next_free = reinterpret_cast<small_object_block_record*>(
98 reinterpret_cast<uint8_t*>(block_record) + block_size_);
99 next_free->next = block_record->next;
100 next_free->free_blocks_cnt = block_record->free_blocks_cnt - 1;
101 free_blocks_list_ = next_free;
102 } else {
103 free_blocks_list_ = block_record->next;
104 }
105
106 // bookkeeping...
107 auto page_record = find_page_record(block_record);
108
109 if (page_record->allocated_blocks_cnt == 0) {
110 free_pages_cnt_--;
111 }
112
113 page_record->free_blocks_cnt--;
114 page_record->allocated_blocks_cnt++;
115
116 memset(block_record, 0, block_size_);
117
118 return block_record;
119 }
120
free_page(linker_vector_t::iterator page_record)121 void LinkerSmallObjectAllocator::free_page(linker_vector_t::iterator page_record) {
122 void* page_start = reinterpret_cast<void*>(page_record->page_addr);
123 void* page_end = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(page_start) + PAGE_SIZE);
124
125 while (free_blocks_list_ != nullptr &&
126 free_blocks_list_ > page_start &&
127 free_blocks_list_ < page_end) {
128 free_blocks_list_ = free_blocks_list_->next;
129 }
130
131 small_object_block_record* current = free_blocks_list_;
132
133 while (current != nullptr) {
134 while (current->next > page_start && current->next < page_end) {
135 current->next = current->next->next;
136 }
137
138 current = current->next;
139 }
140
141 munmap(page_start, PAGE_SIZE);
142 page_records_.erase(page_record);
143 free_pages_cnt_--;
144 }
145
free(void * ptr)146 void LinkerSmallObjectAllocator::free(void* ptr) {
147 auto page_record = find_page_record(ptr);
148
149 ssize_t offset = reinterpret_cast<uintptr_t>(ptr) - sizeof(page_info);
150
151 if (offset % block_size_ != 0) {
152 __libc_fatal("invalid pointer: %p (block_size=%zd)", ptr, block_size_);
153 }
154
155 memset(ptr, 0, block_size_);
156 small_object_block_record* block_record = reinterpret_cast<small_object_block_record*>(ptr);
157
158 block_record->next = free_blocks_list_;
159 block_record->free_blocks_cnt = 1;
160
161 free_blocks_list_ = block_record;
162
163 page_record->free_blocks_cnt++;
164 page_record->allocated_blocks_cnt--;
165
166 if (page_record->allocated_blocks_cnt == 0) {
167 if (free_pages_cnt_++ > 1) {
168 // if we already have a free page - unmap this one.
169 free_page(page_record);
170 }
171 }
172 }
173
find_page_record(void * ptr)174 linker_vector_t::iterator LinkerSmallObjectAllocator::find_page_record(void* ptr) {
175 void* addr = reinterpret_cast<void*>(PAGE_START(reinterpret_cast<uintptr_t>(ptr)));
176 small_object_page_record boundary;
177 boundary.page_addr = addr;
178 linker_vector_t::iterator it = std::lower_bound(
179 page_records_.begin(), page_records_.end(), boundary);
180
181 if (it == page_records_.end() || it->page_addr != addr) {
182 // not found...
183 __libc_fatal("page record for %p was not found (block_size=%zd)", ptr, block_size_);
184 }
185
186 return it;
187 }
188
create_page_record(void * page_addr,size_t free_blocks_cnt)189 void LinkerSmallObjectAllocator::create_page_record(void* page_addr, size_t free_blocks_cnt) {
190 small_object_page_record record;
191 record.page_addr = page_addr;
192 record.free_blocks_cnt = free_blocks_cnt;
193 record.allocated_blocks_cnt = 0;
194
195 linker_vector_t::iterator it = std::lower_bound(
196 page_records_.begin(), page_records_.end(), record);
197 page_records_.insert(it, record);
198 }
199
alloc_page()200 void LinkerSmallObjectAllocator::alloc_page() {
201 static_assert(sizeof(page_info) % 16 == 0,
202 "sizeof(page_info) is not multiple of 16");
203 void* map_ptr = mmap(nullptr, PAGE_SIZE,
204 PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
205 if (map_ptr == MAP_FAILED) {
206 __libc_fatal("mmap failed");
207 }
208
209 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, map_ptr, PAGE_SIZE, "linker_alloc_small_objects");
210
211 page_info* info = reinterpret_cast<page_info*>(map_ptr);
212 memcpy(info->signature, kSignature, sizeof(kSignature));
213 info->type = type_;
214 info->allocator_addr = this;
215
216 size_t free_blocks_cnt = (PAGE_SIZE - sizeof(page_info))/block_size_;
217
218 create_page_record(map_ptr, free_blocks_cnt);
219
220 small_object_block_record* first_block = reinterpret_cast<small_object_block_record*>(info + 1);
221
222 first_block->next = free_blocks_list_;
223 first_block->free_blocks_cnt = free_blocks_cnt;
224
225 free_blocks_list_ = first_block;
226 }
227
228
initialize_allocators()229 void LinkerMemoryAllocator::initialize_allocators() {
230 if (allocators_ != nullptr) {
231 return;
232 }
233
234 LinkerSmallObjectAllocator* allocators =
235 reinterpret_cast<LinkerSmallObjectAllocator*>(allocators_buf_);
236
237 for (size_t i = 0; i < kSmallObjectAllocatorsCount; ++i) {
238 uint32_t type = i + kSmallObjectMinSizeLog2;
239 new (allocators + i) LinkerSmallObjectAllocator(type, 1 << type);
240 }
241
242 allocators_ = allocators;
243 }
244
alloc_mmap(size_t size)245 void* LinkerMemoryAllocator::alloc_mmap(size_t size) {
246 size_t allocated_size = PAGE_END(size + sizeof(page_info));
247 void* map_ptr = mmap(nullptr, allocated_size,
248 PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
249
250 if (map_ptr == MAP_FAILED) {
251 __libc_fatal("mmap failed");
252 }
253
254 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, map_ptr, allocated_size, "linker_alloc_lob");
255
256 page_info* info = reinterpret_cast<page_info*>(map_ptr);
257 memcpy(info->signature, kSignature, sizeof(kSignature));
258 info->type = kLargeObject;
259 info->allocated_size = allocated_size;
260
261 return info + 1;
262 }
263
alloc(size_t size)264 void* LinkerMemoryAllocator::alloc(size_t size) {
265 // treat alloc(0) as alloc(1)
266 if (size == 0) {
267 size = 1;
268 }
269
270 if (size > kSmallObjectMaxSize) {
271 return alloc_mmap(size);
272 }
273
274 uint16_t log2_size = log2(size);
275
276 if (log2_size < kSmallObjectMinSizeLog2) {
277 log2_size = kSmallObjectMinSizeLog2;
278 }
279
280 return get_small_object_allocator(log2_size)->alloc();
281 }
282
get_page_info(void * ptr)283 page_info* LinkerMemoryAllocator::get_page_info(void* ptr) {
284 page_info* info = reinterpret_cast<page_info*>(PAGE_START(reinterpret_cast<size_t>(ptr)));
285 if (memcmp(info->signature, kSignature, sizeof(kSignature)) != 0) {
286 __libc_fatal("invalid pointer %p (page signature mismatch)", ptr);
287 }
288
289 return info;
290 }
291
realloc(void * ptr,size_t size)292 void* LinkerMemoryAllocator::realloc(void* ptr, size_t size) {
293 if (ptr == nullptr) {
294 return alloc(size);
295 }
296
297 if (size == 0) {
298 free(ptr);
299 return nullptr;
300 }
301
302 page_info* info = get_page_info(ptr);
303
304 size_t old_size = 0;
305
306 if (info->type == kLargeObject) {
307 old_size = info->allocated_size - sizeof(page_info);
308 } else {
309 LinkerSmallObjectAllocator* allocator = get_small_object_allocator(info->type);
310 if (allocator != info->allocator_addr) {
311 __libc_fatal("invalid pointer %p (page signature mismatch)", ptr);
312 }
313
314 old_size = allocator->get_block_size();
315 }
316
317 if (old_size < size) {
318 void *result = alloc(size);
319 memcpy(result, ptr, old_size);
320 free(ptr);
321 return result;
322 }
323
324 return ptr;
325 }
326
free(void * ptr)327 void LinkerMemoryAllocator::free(void* ptr) {
328 if (ptr == nullptr) {
329 return;
330 }
331
332 page_info* info = get_page_info(ptr);
333
334 if (info->type == kLargeObject) {
335 munmap(info, info->allocated_size);
336 } else {
337 LinkerSmallObjectAllocator* allocator = get_small_object_allocator(info->type);
338 if (allocator != info->allocator_addr) {
339 __libc_fatal("invalid pointer %p (invalid allocator address for the page)", ptr);
340 }
341
342 allocator->free(ptr);
343 }
344 }
345
get_small_object_allocator(uint32_t type)346 LinkerSmallObjectAllocator* LinkerMemoryAllocator::get_small_object_allocator(uint32_t type) {
347 if (type < kSmallObjectMinSizeLog2 || type > kSmallObjectMaxSizeLog2) {
348 __libc_fatal("invalid type: %u", type);
349 }
350
351 initialize_allocators();
352 return &allocators_[type - kSmallObjectMinSizeLog2];
353 }
354