1 /*
2 * Stack-less Just-In-Time compiler
3 *
4 * Copyright 2009-2012 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without modification, are
7 * permitted provided that the following conditions are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright notice, this list of
10 * conditions and the following disclaimer.
11 *
12 * 2. Redistributions in binary form must reproduce the above copyright notice, this list
13 * of conditions and the following disclaimer in the documentation and/or other materials
14 * provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
19 * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
21 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
22 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 /* ------------------------------------------------------------------------ */
28 /* Locks */
29 /* ------------------------------------------------------------------------ */
30
31 #if (defined SLJIT_EXECUTABLE_ALLOCATOR && SLJIT_EXECUTABLE_ALLOCATOR) || (defined SLJIT_UTIL_GLOBAL_LOCK && SLJIT_UTIL_GLOBAL_LOCK)
32
33 #if (defined SLJIT_SINGLE_THREADED && SLJIT_SINGLE_THREADED)
34
35 #if (defined SLJIT_EXECUTABLE_ALLOCATOR && SLJIT_EXECUTABLE_ALLOCATOR)
36
allocator_grab_lock(void)37 static SLJIT_INLINE void allocator_grab_lock(void)
38 {
39 /* Always successful. */
40 }
41
allocator_release_lock(void)42 static SLJIT_INLINE void allocator_release_lock(void)
43 {
44 /* Always successful. */
45 }
46
47 #endif /* SLJIT_EXECUTABLE_ALLOCATOR */
48
49 #if (defined SLJIT_UTIL_GLOBAL_LOCK && SLJIT_UTIL_GLOBAL_LOCK)
50
sljit_grab_lock(void)51 SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_grab_lock(void)
52 {
53 /* Always successful. */
54 }
55
sljit_release_lock(void)56 SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_release_lock(void)
57 {
58 /* Always successful. */
59 }
60
61 #endif /* SLJIT_UTIL_GLOBAL_LOCK */
62
63 #elif defined(_WIN32) /* SLJIT_SINGLE_THREADED */
64
65 #include "windows.h"
66
67 #if (defined SLJIT_EXECUTABLE_ALLOCATOR && SLJIT_EXECUTABLE_ALLOCATOR)
68
69 static HANDLE allocator_mutex = 0;
70
allocator_grab_lock(void)71 static SLJIT_INLINE void allocator_grab_lock(void)
72 {
73 /* No idea what to do if an error occures. Static mutexes should never fail... */
74 if (!allocator_mutex)
75 allocator_mutex = CreateMutex(NULL, TRUE, NULL);
76 else
77 WaitForSingleObject(allocator_mutex, INFINITE);
78 }
79
allocator_release_lock(void)80 static SLJIT_INLINE void allocator_release_lock(void)
81 {
82 ReleaseMutex(allocator_mutex);
83 }
84
85 #endif /* SLJIT_EXECUTABLE_ALLOCATOR */
86
87 #if (defined SLJIT_UTIL_GLOBAL_LOCK && SLJIT_UTIL_GLOBAL_LOCK)
88
89 static HANDLE global_mutex = 0;
90
sljit_grab_lock(void)91 SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_grab_lock(void)
92 {
93 /* No idea what to do if an error occures. Static mutexes should never fail... */
94 if (!global_mutex)
95 global_mutex = CreateMutex(NULL, TRUE, NULL);
96 else
97 WaitForSingleObject(global_mutex, INFINITE);
98 }
99
sljit_release_lock(void)100 SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_release_lock(void)
101 {
102 ReleaseMutex(global_mutex);
103 }
104
105 #endif /* SLJIT_UTIL_GLOBAL_LOCK */
106
107 #else /* _WIN32 */
108
109 #if (defined SLJIT_EXECUTABLE_ALLOCATOR && SLJIT_EXECUTABLE_ALLOCATOR)
110
111 #include <pthread.h>
112
113 static pthread_mutex_t allocator_mutex = PTHREAD_MUTEX_INITIALIZER;
114
allocator_grab_lock(void)115 static SLJIT_INLINE void allocator_grab_lock(void)
116 {
117 pthread_mutex_lock(&allocator_mutex);
118 }
119
allocator_release_lock(void)120 static SLJIT_INLINE void allocator_release_lock(void)
121 {
122 pthread_mutex_unlock(&allocator_mutex);
123 }
124
125 #endif /* SLJIT_EXECUTABLE_ALLOCATOR */
126
127 #if (defined SLJIT_UTIL_GLOBAL_LOCK && SLJIT_UTIL_GLOBAL_LOCK)
128
129 #include <pthread.h>
130
131 static pthread_mutex_t global_mutex = PTHREAD_MUTEX_INITIALIZER;
132
sljit_grab_lock(void)133 SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_grab_lock(void)
134 {
135 pthread_mutex_lock(&global_mutex);
136 }
137
sljit_release_lock(void)138 SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_release_lock(void)
139 {
140 pthread_mutex_unlock(&global_mutex);
141 }
142
143 #endif /* SLJIT_UTIL_GLOBAL_LOCK */
144
145 #endif /* _WIN32 */
146
147 /* ------------------------------------------------------------------------ */
148 /* Stack */
149 /* ------------------------------------------------------------------------ */
150
151 #if (defined SLJIT_UTIL_STACK && SLJIT_UTIL_STACK) || (defined SLJIT_EXECUTABLE_ALLOCATOR && SLJIT_EXECUTABLE_ALLOCATOR)
152
153 #ifdef _WIN32
154 #include "windows.h"
155 #else
156 /* Provides mmap function. */
157 #include <sys/mman.h>
158 /* For detecting the page size. */
159 #include <unistd.h>
160
161 #ifndef MAP_ANON
162
163 #include <fcntl.h>
164
165 /* Some old systems does not have MAP_ANON. */
166 static sljit_si dev_zero = -1;
167
168 #if (defined SLJIT_SINGLE_THREADED && SLJIT_SINGLE_THREADED)
169
open_dev_zero(void)170 static SLJIT_INLINE sljit_si open_dev_zero(void)
171 {
172 dev_zero = open("/dev/zero", O_RDWR);
173 return dev_zero < 0;
174 }
175
176 #else /* SLJIT_SINGLE_THREADED */
177
178 #include <pthread.h>
179
180 static pthread_mutex_t dev_zero_mutex = PTHREAD_MUTEX_INITIALIZER;
181
open_dev_zero(void)182 static SLJIT_INLINE sljit_si open_dev_zero(void)
183 {
184 pthread_mutex_lock(&dev_zero_mutex);
185 dev_zero = open("/dev/zero", O_RDWR);
186 pthread_mutex_unlock(&dev_zero_mutex);
187 return dev_zero < 0;
188 }
189
190 #endif /* SLJIT_SINGLE_THREADED */
191
192 #endif
193
194 #endif
195
196 #endif /* SLJIT_UTIL_STACK || SLJIT_EXECUTABLE_ALLOCATOR */
197
198 #if (defined SLJIT_UTIL_STACK && SLJIT_UTIL_STACK)
199
200 /* Planning to make it even more clever in the future. */
201 static sljit_sw sljit_page_align = 0;
202
sljit_allocate_stack(sljit_uw limit,sljit_uw max_limit)203 SLJIT_API_FUNC_ATTRIBUTE struct sljit_stack* SLJIT_CALL sljit_allocate_stack(sljit_uw limit, sljit_uw max_limit)
204 {
205 struct sljit_stack *stack;
206 union {
207 void *ptr;
208 sljit_uw uw;
209 } base;
210 #ifdef _WIN32
211 SYSTEM_INFO si;
212 #endif
213
214 if (limit > max_limit || limit < 1)
215 return NULL;
216
217 #ifdef _WIN32
218 if (!sljit_page_align) {
219 GetSystemInfo(&si);
220 sljit_page_align = si.dwPageSize - 1;
221 }
222 #else
223 if (!sljit_page_align) {
224 sljit_page_align = sysconf(_SC_PAGESIZE);
225 /* Should never happen. */
226 if (sljit_page_align < 0)
227 sljit_page_align = 4096;
228 sljit_page_align--;
229 }
230 #endif
231
232 /* Align limit and max_limit. */
233 max_limit = (max_limit + sljit_page_align) & ~sljit_page_align;
234
235 stack = (struct sljit_stack*)SLJIT_MALLOC(sizeof(struct sljit_stack));
236 if (!stack)
237 return NULL;
238
239 #ifdef _WIN32
240 base.ptr = VirtualAlloc(NULL, max_limit, MEM_RESERVE, PAGE_READWRITE);
241 if (!base.ptr) {
242 SLJIT_FREE(stack);
243 return NULL;
244 }
245 stack->base = base.uw;
246 stack->limit = stack->base;
247 stack->max_limit = stack->base + max_limit;
248 if (sljit_stack_resize(stack, stack->base + limit)) {
249 sljit_free_stack(stack);
250 return NULL;
251 }
252 #else
253 #ifdef MAP_ANON
254 base.ptr = mmap(NULL, max_limit, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
255 #else
256 if (dev_zero < 0) {
257 if (open_dev_zero()) {
258 SLJIT_FREE(stack);
259 return NULL;
260 }
261 }
262 base.ptr = mmap(NULL, max_limit, PROT_READ | PROT_WRITE, MAP_PRIVATE, dev_zero, 0);
263 #endif
264 if (base.ptr == MAP_FAILED) {
265 SLJIT_FREE(stack);
266 return NULL;
267 }
268 stack->base = base.uw;
269 stack->limit = stack->base + limit;
270 stack->max_limit = stack->base + max_limit;
271 #endif
272 stack->top = stack->base;
273 return stack;
274 }
275
276 #undef PAGE_ALIGN
277
sljit_free_stack(struct sljit_stack * stack)278 SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_free_stack(struct sljit_stack* stack)
279 {
280 #ifdef _WIN32
281 VirtualFree((void*)stack->base, 0, MEM_RELEASE);
282 #else
283 munmap((void*)stack->base, stack->max_limit - stack->base);
284 #endif
285 SLJIT_FREE(stack);
286 }
287
sljit_stack_resize(struct sljit_stack * stack,sljit_uw new_limit)288 SLJIT_API_FUNC_ATTRIBUTE sljit_sw SLJIT_CALL sljit_stack_resize(struct sljit_stack* stack, sljit_uw new_limit)
289 {
290 sljit_uw aligned_old_limit;
291 sljit_uw aligned_new_limit;
292
293 if ((new_limit > stack->max_limit) || (new_limit < stack->base))
294 return -1;
295 #ifdef _WIN32
296 aligned_new_limit = (new_limit + sljit_page_align) & ~sljit_page_align;
297 aligned_old_limit = (stack->limit + sljit_page_align) & ~sljit_page_align;
298 if (aligned_new_limit != aligned_old_limit) {
299 if (aligned_new_limit > aligned_old_limit) {
300 if (!VirtualAlloc((void*)aligned_old_limit, aligned_new_limit - aligned_old_limit, MEM_COMMIT, PAGE_READWRITE))
301 return -1;
302 }
303 else {
304 if (!VirtualFree((void*)aligned_new_limit, aligned_old_limit - aligned_new_limit, MEM_DECOMMIT))
305 return -1;
306 }
307 }
308 stack->limit = new_limit;
309 return 0;
310 #else
311 if (new_limit >= stack->limit) {
312 stack->limit = new_limit;
313 return 0;
314 }
315 aligned_new_limit = (new_limit + sljit_page_align) & ~sljit_page_align;
316 aligned_old_limit = (stack->limit + sljit_page_align) & ~sljit_page_align;
317 /* If madvise is available, we release the unnecessary space. */
318 #if defined(MADV_DONTNEED)
319 if (aligned_new_limit < aligned_old_limit)
320 madvise((void*)aligned_new_limit, aligned_old_limit - aligned_new_limit, MADV_DONTNEED);
321 #elif defined(POSIX_MADV_DONTNEED)
322 if (aligned_new_limit < aligned_old_limit)
323 posix_madvise((void*)aligned_new_limit, aligned_old_limit - aligned_new_limit, POSIX_MADV_DONTNEED);
324 #endif
325 stack->limit = new_limit;
326 return 0;
327 #endif
328 }
329
330 #endif /* SLJIT_UTIL_STACK */
331
332 #endif
333