1 //===------------------------ fallback_malloc.cpp -------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is dual licensed under the MIT and the University of Illinois Open
6 // Source Licenses. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9
10 #include "fallback_malloc.h"
11
12 #include <__threading_support>
13
14 #include <cstdlib> // for malloc, calloc, free
15 #include <cstring> // for memset
16
17 // A small, simple heap manager based (loosely) on
18 // the startup heap manager from FreeBSD, optimized for space.
19 //
20 // Manages a fixed-size memory pool, supports malloc and free only.
21 // No support for realloc.
22 //
23 // Allocates chunks in multiples of four bytes, with a four byte header
24 // for each chunk. The overhead of each chunk is kept low by keeping pointers
25 // as two byte offsets within the heap, rather than (4 or 8 byte) pointers.
26
27 namespace {
28
29 // When POSIX threads are not available, make the mutex operations a nop
30 #ifndef _LIBCXXABI_HAS_NO_THREADS
31 _LIBCPP_SAFE_STATIC
32 static std::__libcpp_mutex_t heap_mutex = _LIBCPP_MUTEX_INITIALIZER;
33 #else
34 static void* heap_mutex = 0;
35 #endif
36
37 class mutexor {
38 public:
39 #ifndef _LIBCXXABI_HAS_NO_THREADS
mutexor(std::__libcpp_mutex_t * m)40 mutexor(std::__libcpp_mutex_t* m) : mtx_(m) {
41 std::__libcpp_mutex_lock(mtx_);
42 }
~mutexor()43 ~mutexor() { std::__libcpp_mutex_unlock(mtx_); }
44 #else
45 mutexor(void*) {}
46 ~mutexor() {}
47 #endif
48 private:
49 mutexor(const mutexor& rhs);
50 mutexor& operator=(const mutexor& rhs);
51 #ifndef _LIBCXXABI_HAS_NO_THREADS
52 std::__libcpp_mutex_t* mtx_;
53 #endif
54 };
55
56 static const size_t HEAP_SIZE = 512;
57 char heap[HEAP_SIZE] __attribute__((aligned));
58
59 typedef unsigned short heap_offset;
60 typedef unsigned short heap_size;
61
62 struct heap_node {
63 heap_offset next_node; // offset into heap
64 heap_size len; // size in units of "sizeof(heap_node)"
65 };
66
67 static const heap_node* list_end =
68 (heap_node*)(&heap[HEAP_SIZE]); // one past the end of the heap
69 static heap_node* freelist = NULL;
70
node_from_offset(const heap_offset offset)71 heap_node* node_from_offset(const heap_offset offset) {
72 return (heap_node*)(heap + (offset * sizeof(heap_node)));
73 }
74
offset_from_node(const heap_node * ptr)75 heap_offset offset_from_node(const heap_node* ptr) {
76 return static_cast<heap_offset>(
77 static_cast<size_t>(reinterpret_cast<const char*>(ptr) - heap) /
78 sizeof(heap_node));
79 }
80
init_heap()81 void init_heap() {
82 freelist = (heap_node*)heap;
83 freelist->next_node = offset_from_node(list_end);
84 freelist->len = HEAP_SIZE / sizeof(heap_node);
85 }
86
87 // How big a chunk we allocate
alloc_size(size_t len)88 size_t alloc_size(size_t len) {
89 return (len + sizeof(heap_node) - 1) / sizeof(heap_node) + 1;
90 }
91
is_fallback_ptr(void * ptr)92 bool is_fallback_ptr(void* ptr) {
93 return ptr >= heap && ptr < (heap + HEAP_SIZE);
94 }
95
fallback_malloc(size_t len)96 void* fallback_malloc(size_t len) {
97 heap_node *p, *prev;
98 const size_t nelems = alloc_size(len);
99 mutexor mtx(&heap_mutex);
100
101 if (NULL == freelist)
102 init_heap();
103
104 // Walk the free list, looking for a "big enough" chunk
105 for (p = freelist, prev = 0; p && p != list_end;
106 prev = p, p = node_from_offset(p->next_node)) {
107
108 if (p->len > nelems) { // chunk is larger, shorten, and return the tail
109 heap_node* q;
110
111 p->len = static_cast<heap_size>(p->len - nelems);
112 q = p + p->len;
113 q->next_node = 0;
114 q->len = static_cast<heap_size>(nelems);
115 return (void*)(q + 1);
116 }
117
118 if (p->len == nelems) { // exact size match
119 if (prev == 0)
120 freelist = node_from_offset(p->next_node);
121 else
122 prev->next_node = p->next_node;
123 p->next_node = 0;
124 return (void*)(p + 1);
125 }
126 }
127 return NULL; // couldn't find a spot big enough
128 }
129
130 // Return the start of the next block
after(struct heap_node * p)131 heap_node* after(struct heap_node* p) { return p + p->len; }
132
fallback_free(void * ptr)133 void fallback_free(void* ptr) {
134 struct heap_node* cp = ((struct heap_node*)ptr) - 1; // retrieve the chunk
135 struct heap_node *p, *prev;
136
137 mutexor mtx(&heap_mutex);
138
139 #ifdef DEBUG_FALLBACK_MALLOC
140 std::cout << "Freeing item at " << offset_from_node(cp) << " of size "
141 << cp->len << std::endl;
142 #endif
143
144 for (p = freelist, prev = 0; p && p != list_end;
145 prev = p, p = node_from_offset(p->next_node)) {
146 #ifdef DEBUG_FALLBACK_MALLOC
147 std::cout << " p, cp, after (p), after(cp) " << offset_from_node(p) << ' '
148 << offset_from_node(cp) << ' ' << offset_from_node(after(p))
149 << ' ' << offset_from_node(after(cp)) << std::endl;
150 #endif
151 if (after(p) == cp) {
152 #ifdef DEBUG_FALLBACK_MALLOC
153 std::cout << " Appending onto chunk at " << offset_from_node(p)
154 << std::endl;
155 #endif
156 p->len = static_cast<heap_size>(
157 p->len + cp->len); // make the free heap_node larger
158 return;
159 } else if (after(cp) == p) { // there's a free heap_node right after
160 #ifdef DEBUG_FALLBACK_MALLOC
161 std::cout << " Appending free chunk at " << offset_from_node(p)
162 << std::endl;
163 #endif
164 cp->len = static_cast<heap_size>(cp->len + p->len);
165 if (prev == 0) {
166 freelist = cp;
167 cp->next_node = p->next_node;
168 } else
169 prev->next_node = offset_from_node(cp);
170 return;
171 }
172 }
173 // Nothing to merge with, add it to the start of the free list
174 #ifdef DEBUG_FALLBACK_MALLOC
175 std::cout << " Making new free list entry " << offset_from_node(cp)
176 << std::endl;
177 #endif
178 cp->next_node = offset_from_node(freelist);
179 freelist = cp;
180 }
181
182 #ifdef INSTRUMENT_FALLBACK_MALLOC
print_free_list()183 size_t print_free_list() {
184 struct heap_node *p, *prev;
185 heap_size total_free = 0;
186 if (NULL == freelist)
187 init_heap();
188
189 for (p = freelist, prev = 0; p && p != list_end;
190 prev = p, p = node_from_offset(p->next_node)) {
191 std::cout << (prev == 0 ? "" : " ") << "Offset: " << offset_from_node(p)
192 << "\tsize: " << p->len << " Next: " << p->next_node << std::endl;
193 total_free += p->len;
194 }
195 std::cout << "Total Free space: " << total_free << std::endl;
196 return total_free;
197 }
198 #endif
199 } // end unnamed namespace
200
201 namespace __cxxabiv1 {
202
203 struct __attribute__((aligned)) __aligned_type {};
204
__aligned_malloc_with_fallback(size_t size)205 void* __aligned_malloc_with_fallback(size_t size) {
206 #if defined(_WIN32)
207 if (void* dest = _aligned_malloc(size, alignof(__aligned_type)))
208 return dest;
209 #elif defined(_LIBCPP_HAS_NO_ALIGNED_ALLOCATION)
210 if (void* dest = std::malloc(size))
211 return dest;
212 #else
213 if (size == 0)
214 size = 1;
215 void* dest;
216 if (::posix_memalign(&dest, alignof(__aligned_type), size) == 0)
217 return dest;
218 #endif
219 return fallback_malloc(size);
220 }
221
__calloc_with_fallback(size_t count,size_t size)222 void* __calloc_with_fallback(size_t count, size_t size) {
223 void* ptr = std::calloc(count, size);
224 if (NULL != ptr)
225 return ptr;
226 // if calloc fails, fall back to emergency stash
227 ptr = fallback_malloc(size * count);
228 if (NULL != ptr)
229 std::memset(ptr, 0, size * count);
230 return ptr;
231 }
232
__aligned_free_with_fallback(void * ptr)233 void __aligned_free_with_fallback(void* ptr) {
234 if (is_fallback_ptr(ptr))
235 fallback_free(ptr);
236 else {
237 #if defined(_WIN32)
238 ::_aligned_free(ptr);
239 #else
240 std::free(ptr);
241 #endif
242 }
243 }
244
__free_with_fallback(void * ptr)245 void __free_with_fallback(void* ptr) {
246 if (is_fallback_ptr(ptr))
247 fallback_free(ptr);
248 else
249 std::free(ptr);
250 }
251
252 } // namespace __cxxabiv1
253