1 #define JEMALLOC_HUGE_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
3
4 /******************************************************************************/
5
6 static extent_node_t *
huge_node_get(const void * ptr)7 huge_node_get(const void *ptr)
8 {
9 extent_node_t *node;
10
11 node = chunk_lookup(ptr, true);
12 assert(!extent_node_achunk_get(node));
13
14 return (node);
15 }
16
17 static bool
huge_node_set(tsdn_t * tsdn,const void * ptr,extent_node_t * node)18 huge_node_set(tsdn_t *tsdn, const void *ptr, extent_node_t *node)
19 {
20
21 assert(extent_node_addr_get(node) == ptr);
22 assert(!extent_node_achunk_get(node));
23 return (chunk_register(tsdn, ptr, node));
24 }
25
26 static void
huge_node_reset(tsdn_t * tsdn,const void * ptr,extent_node_t * node)27 huge_node_reset(tsdn_t *tsdn, const void *ptr, extent_node_t *node)
28 {
29 bool err;
30
31 err = huge_node_set(tsdn, ptr, node);
32 assert(!err);
33 }
34
35 static void
huge_node_unset(const void * ptr,const extent_node_t * node)36 huge_node_unset(const void *ptr, const extent_node_t *node)
37 {
38
39 chunk_deregister(ptr, node);
40 }
41
42 void *
huge_malloc(tsdn_t * tsdn,arena_t * arena,size_t usize,bool zero)43 huge_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero)
44 {
45
46 assert(usize == s2u(usize));
47
48 return (huge_palloc(tsdn, arena, usize, chunksize, zero));
49 }
50
51 void *
huge_palloc(tsdn_t * tsdn,arena_t * arena,size_t usize,size_t alignment,bool zero)52 huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
53 bool zero)
54 {
55 void *ret;
56 size_t ausize;
57 arena_t *iarena;
58 extent_node_t *node;
59 size_t sn;
60 bool is_zeroed;
61
62 /* Allocate one or more contiguous chunks for this request. */
63
64 assert(!tsdn_null(tsdn) || arena != NULL);
65
66 ausize = sa2u(usize, alignment);
67 if (unlikely(ausize == 0 || ausize > HUGE_MAXCLASS))
68 return (NULL);
69 assert(ausize >= chunksize);
70
71 /* Allocate an extent node with which to track the chunk. */
72 iarena = (!tsdn_null(tsdn)) ? arena_ichoose(tsdn_tsd(tsdn), NULL) :
73 a0get();
74 node = ipallocztm(tsdn, CACHELINE_CEILING(sizeof(extent_node_t)),
75 CACHELINE, false, NULL, true, iarena);
76 if (node == NULL)
77 return (NULL);
78
79 /*
80 * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
81 * it is possible to make correct junk/zero fill decisions below.
82 */
83 is_zeroed = zero;
84 /* ANDROID change */
85 if (likely(!tsdn_null(tsdn))) {
86 #if !defined(__LP64__)
87 /* On 32 bit systems, using a per arena cache can exhaust
88 * virtual address space. Force all huge allocations to
89 * always take place in the first arena.
90 */
91 extern arena_t *a0get(void);
92 arena = a0get();
93 #else
94 arena = arena_choose(tsdn_tsd(tsdn), arena);
95 #endif
96 }
97 /* End ANDROID change */
98 if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(tsdn,
99 arena, usize, alignment, &sn, &is_zeroed)) == NULL) {
100 idalloctm(tsdn, node, NULL, true, true);
101 return (NULL);
102 }
103
104 extent_node_init(node, arena, ret, usize, sn, is_zeroed, true);
105
106 if (huge_node_set(tsdn, ret, node)) {
107 arena_chunk_dalloc_huge(tsdn, arena, ret, usize, sn);
108 idalloctm(tsdn, node, NULL, true, true);
109 return (NULL);
110 }
111
112 /* Insert node into huge. */
113 malloc_mutex_lock(tsdn, &arena->huge_mtx);
114 ql_elm_new(node, ql_link);
115 ql_tail_insert(&arena->huge, node, ql_link);
116 malloc_mutex_unlock(tsdn, &arena->huge_mtx);
117
118 if (zero || (config_fill && unlikely(opt_zero))) {
119 if (!is_zeroed)
120 memset(ret, 0, usize);
121 } else if (config_fill && unlikely(opt_junk_alloc))
122 memset(ret, JEMALLOC_ALLOC_JUNK, usize);
123
124 arena_decay_tick(tsdn, arena);
125 return (ret);
126 }
127
128 #ifdef JEMALLOC_JET
129 #undef huge_dalloc_junk
130 #define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl)
131 #endif
132 static void
huge_dalloc_junk(void * ptr,size_t usize)133 huge_dalloc_junk(void *ptr, size_t usize)
134 {
135
136 if (config_fill && have_dss && unlikely(opt_junk_free)) {
137 /*
138 * Only bother junk filling if the chunk isn't about to be
139 * unmapped.
140 */
141 if (!config_munmap || (have_dss && chunk_in_dss(ptr)))
142 memset(ptr, JEMALLOC_FREE_JUNK, usize);
143 }
144 }
145 #ifdef JEMALLOC_JET
146 #undef huge_dalloc_junk
147 #define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk)
148 huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
149 #endif
150
151 static void
huge_ralloc_no_move_similar(tsdn_t * tsdn,void * ptr,size_t oldsize,size_t usize_min,size_t usize_max,bool zero)152 huge_ralloc_no_move_similar(tsdn_t *tsdn, void *ptr, size_t oldsize,
153 size_t usize_min, size_t usize_max, bool zero)
154 {
155 size_t usize, usize_next;
156 extent_node_t *node;
157 arena_t *arena;
158 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
159 bool pre_zeroed, post_zeroed;
160
161 /* Increase usize to incorporate extra. */
162 for (usize = usize_min; usize < usize_max && (usize_next = s2u(usize+1))
163 <= oldsize; usize = usize_next)
164 ; /* Do nothing. */
165
166 if (oldsize == usize)
167 return;
168
169 node = huge_node_get(ptr);
170 arena = extent_node_arena_get(node);
171 pre_zeroed = extent_node_zeroed_get(node);
172
173 /* Fill if necessary (shrinking). */
174 if (oldsize > usize) {
175 size_t sdiff = oldsize - usize;
176 if (config_fill && unlikely(opt_junk_free)) {
177 memset((void *)((uintptr_t)ptr + usize),
178 JEMALLOC_FREE_JUNK, sdiff);
179 post_zeroed = false;
180 } else {
181 post_zeroed = !chunk_purge_wrapper(tsdn, arena,
182 &chunk_hooks, ptr, CHUNK_CEILING(oldsize), usize,
183 sdiff);
184 }
185 } else
186 post_zeroed = pre_zeroed;
187
188 malloc_mutex_lock(tsdn, &arena->huge_mtx);
189 /* Update the size of the huge allocation. */
190 huge_node_unset(ptr, node);
191 assert(extent_node_size_get(node) != usize);
192 extent_node_size_set(node, usize);
193 huge_node_reset(tsdn, ptr, node);
194 /* Update zeroed. */
195 extent_node_zeroed_set(node, post_zeroed);
196 malloc_mutex_unlock(tsdn, &arena->huge_mtx);
197
198 arena_chunk_ralloc_huge_similar(tsdn, arena, ptr, oldsize, usize);
199
200 /* Fill if necessary (growing). */
201 if (oldsize < usize) {
202 if (zero || (config_fill && unlikely(opt_zero))) {
203 if (!pre_zeroed) {
204 memset((void *)((uintptr_t)ptr + oldsize), 0,
205 usize - oldsize);
206 }
207 } else if (config_fill && unlikely(opt_junk_alloc)) {
208 memset((void *)((uintptr_t)ptr + oldsize),
209 JEMALLOC_ALLOC_JUNK, usize - oldsize);
210 }
211 }
212 }
213
214 static bool
huge_ralloc_no_move_shrink(tsdn_t * tsdn,void * ptr,size_t oldsize,size_t usize)215 huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize,
216 size_t usize)
217 {
218 extent_node_t *node;
219 arena_t *arena;
220 chunk_hooks_t chunk_hooks;
221 size_t cdiff;
222 bool pre_zeroed, post_zeroed;
223
224 node = huge_node_get(ptr);
225 arena = extent_node_arena_get(node);
226 pre_zeroed = extent_node_zeroed_get(node);
227 chunk_hooks = chunk_hooks_get(tsdn, arena);
228
229 assert(oldsize > usize);
230
231 /* Split excess chunks. */
232 cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
233 if (cdiff != 0 && chunk_hooks.split(ptr, CHUNK_CEILING(oldsize),
234 CHUNK_CEILING(usize), cdiff, true, arena->ind))
235 return (true);
236
237 if (oldsize > usize) {
238 size_t sdiff = oldsize - usize;
239 if (config_fill && unlikely(opt_junk_free)) {
240 huge_dalloc_junk((void *)((uintptr_t)ptr + usize),
241 sdiff);
242 post_zeroed = false;
243 } else {
244 post_zeroed = !chunk_purge_wrapper(tsdn, arena,
245 &chunk_hooks, CHUNK_ADDR2BASE((uintptr_t)ptr +
246 usize), CHUNK_CEILING(oldsize),
247 CHUNK_ADDR2OFFSET((uintptr_t)ptr + usize), sdiff);
248 }
249 } else
250 post_zeroed = pre_zeroed;
251
252 malloc_mutex_lock(tsdn, &arena->huge_mtx);
253 /* Update the size of the huge allocation. */
254 huge_node_unset(ptr, node);
255 extent_node_size_set(node, usize);
256 huge_node_reset(tsdn, ptr, node);
257 /* Update zeroed. */
258 extent_node_zeroed_set(node, post_zeroed);
259 malloc_mutex_unlock(tsdn, &arena->huge_mtx);
260
261 /* Zap the excess chunks. */
262 arena_chunk_ralloc_huge_shrink(tsdn, arena, ptr, oldsize, usize,
263 extent_node_sn_get(node));
264
265 return (false);
266 }
267
268 static bool
huge_ralloc_no_move_expand(tsdn_t * tsdn,void * ptr,size_t oldsize,size_t usize,bool zero)269 huge_ralloc_no_move_expand(tsdn_t *tsdn, void *ptr, size_t oldsize,
270 size_t usize, bool zero) {
271 extent_node_t *node;
272 arena_t *arena;
273 bool is_zeroed_subchunk, is_zeroed_chunk;
274
275 node = huge_node_get(ptr);
276 arena = extent_node_arena_get(node);
277 malloc_mutex_lock(tsdn, &arena->huge_mtx);
278 is_zeroed_subchunk = extent_node_zeroed_get(node);
279 malloc_mutex_unlock(tsdn, &arena->huge_mtx);
280
281 /*
282 * Use is_zeroed_chunk to detect whether the trailing memory is zeroed,
283 * update extent's zeroed field, and zero as necessary.
284 */
285 is_zeroed_chunk = false;
286 if (arena_chunk_ralloc_huge_expand(tsdn, arena, ptr, oldsize, usize,
287 &is_zeroed_chunk))
288 return (true);
289
290 malloc_mutex_lock(tsdn, &arena->huge_mtx);
291 huge_node_unset(ptr, node);
292 extent_node_size_set(node, usize);
293 extent_node_zeroed_set(node, extent_node_zeroed_get(node) &&
294 is_zeroed_chunk);
295 huge_node_reset(tsdn, ptr, node);
296 malloc_mutex_unlock(tsdn, &arena->huge_mtx);
297
298 if (zero || (config_fill && unlikely(opt_zero))) {
299 if (!is_zeroed_subchunk) {
300 memset((void *)((uintptr_t)ptr + oldsize), 0,
301 CHUNK_CEILING(oldsize) - oldsize);
302 }
303 if (!is_zeroed_chunk) {
304 memset((void *)((uintptr_t)ptr +
305 CHUNK_CEILING(oldsize)), 0, usize -
306 CHUNK_CEILING(oldsize));
307 }
308 } else if (config_fill && unlikely(opt_junk_alloc)) {
309 memset((void *)((uintptr_t)ptr + oldsize), JEMALLOC_ALLOC_JUNK,
310 usize - oldsize);
311 }
312
313 return (false);
314 }
315
316 bool
huge_ralloc_no_move(tsdn_t * tsdn,void * ptr,size_t oldsize,size_t usize_min,size_t usize_max,bool zero)317 huge_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min,
318 size_t usize_max, bool zero)
319 {
320
321 assert(s2u(oldsize) == oldsize);
322 /* The following should have been caught by callers. */
323 assert(usize_min > 0 && usize_max <= HUGE_MAXCLASS);
324
325 /* Both allocations must be huge to avoid a move. */
326 if (oldsize < chunksize || usize_max < chunksize)
327 return (true);
328
329 if (CHUNK_CEILING(usize_max) > CHUNK_CEILING(oldsize)) {
330 /* Attempt to expand the allocation in-place. */
331 if (!huge_ralloc_no_move_expand(tsdn, ptr, oldsize, usize_max,
332 zero)) {
333 arena_decay_tick(tsdn, huge_aalloc(ptr));
334 return (false);
335 }
336 /* Try again, this time with usize_min. */
337 if (usize_min < usize_max && CHUNK_CEILING(usize_min) >
338 CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(tsdn,
339 ptr, oldsize, usize_min, zero)) {
340 arena_decay_tick(tsdn, huge_aalloc(ptr));
341 return (false);
342 }
343 }
344
345 /*
346 * Avoid moving the allocation if the existing chunk size accommodates
347 * the new size.
348 */
349 if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize_min)
350 && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(usize_max)) {
351 huge_ralloc_no_move_similar(tsdn, ptr, oldsize, usize_min,
352 usize_max, zero);
353 arena_decay_tick(tsdn, huge_aalloc(ptr));
354 return (false);
355 }
356
357 /* Attempt to shrink the allocation in-place. */
358 if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(usize_max)) {
359 if (!huge_ralloc_no_move_shrink(tsdn, ptr, oldsize,
360 usize_max)) {
361 arena_decay_tick(tsdn, huge_aalloc(ptr));
362 return (false);
363 }
364 }
365 return (true);
366 }
367
368 static void *
huge_ralloc_move_helper(tsdn_t * tsdn,arena_t * arena,size_t usize,size_t alignment,bool zero)369 huge_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
370 size_t alignment, bool zero)
371 {
372
373 if (alignment <= chunksize)
374 return (huge_malloc(tsdn, arena, usize, zero));
375 return (huge_palloc(tsdn, arena, usize, alignment, zero));
376 }
377
378 void *
huge_ralloc(tsd_t * tsd,arena_t * arena,void * ptr,size_t oldsize,size_t usize,size_t alignment,bool zero,tcache_t * tcache)379 huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
380 size_t usize, size_t alignment, bool zero, tcache_t *tcache)
381 {
382 void *ret;
383 size_t copysize;
384
385 /* The following should have been caught by callers. */
386 assert(usize > 0 && usize <= HUGE_MAXCLASS);
387
388 /* Try to avoid moving the allocation. */
389 if (!huge_ralloc_no_move(tsd_tsdn(tsd), ptr, oldsize, usize, usize,
390 zero))
391 return (ptr);
392
393 /*
394 * usize and oldsize are different enough that we need to use a
395 * different size class. In that case, fall back to allocating new
396 * space and copying.
397 */
398 ret = huge_ralloc_move_helper(tsd_tsdn(tsd), arena, usize, alignment,
399 zero);
400 if (ret == NULL)
401 return (NULL);
402
403 copysize = (usize < oldsize) ? usize : oldsize;
404 memcpy(ret, ptr, copysize);
405 isqalloc(tsd, ptr, oldsize, tcache, true);
406 return (ret);
407 }
408
409 void
huge_dalloc(tsdn_t * tsdn,void * ptr)410 huge_dalloc(tsdn_t *tsdn, void *ptr)
411 {
412 extent_node_t *node;
413 arena_t *arena;
414
415 node = huge_node_get(ptr);
416 arena = extent_node_arena_get(node);
417 huge_node_unset(ptr, node);
418 malloc_mutex_lock(tsdn, &arena->huge_mtx);
419 ql_remove(&arena->huge, node, ql_link);
420 malloc_mutex_unlock(tsdn, &arena->huge_mtx);
421
422 huge_dalloc_junk(extent_node_addr_get(node),
423 extent_node_size_get(node));
424 arena_chunk_dalloc_huge(tsdn, extent_node_arena_get(node),
425 extent_node_addr_get(node), extent_node_size_get(node),
426 extent_node_sn_get(node));
427 idalloctm(tsdn, node, NULL, true, true);
428
429 arena_decay_tick(tsdn, arena);
430 }
431
432 arena_t *
huge_aalloc(const void * ptr)433 huge_aalloc(const void *ptr)
434 {
435
436 return (extent_node_arena_get(huge_node_get(ptr)));
437 }
438
439 size_t
huge_salloc(tsdn_t * tsdn,const void * ptr)440 huge_salloc(tsdn_t *tsdn, const void *ptr)
441 {
442 size_t size;
443 extent_node_t *node;
444 arena_t *arena;
445
446 node = huge_node_get(ptr);
447 arena = extent_node_arena_get(node);
448 malloc_mutex_lock(tsdn, &arena->huge_mtx);
449 size = extent_node_size_get(node);
450 malloc_mutex_unlock(tsdn, &arena->huge_mtx);
451
452 return (size);
453 }
454
455 prof_tctx_t *
huge_prof_tctx_get(tsdn_t * tsdn,const void * ptr)456 huge_prof_tctx_get(tsdn_t *tsdn, const void *ptr)
457 {
458 prof_tctx_t *tctx;
459 extent_node_t *node;
460 arena_t *arena;
461
462 node = huge_node_get(ptr);
463 arena = extent_node_arena_get(node);
464 malloc_mutex_lock(tsdn, &arena->huge_mtx);
465 tctx = extent_node_prof_tctx_get(node);
466 malloc_mutex_unlock(tsdn, &arena->huge_mtx);
467
468 return (tctx);
469 }
470
471 void
huge_prof_tctx_set(tsdn_t * tsdn,const void * ptr,prof_tctx_t * tctx)472 huge_prof_tctx_set(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx)
473 {
474 extent_node_t *node;
475 arena_t *arena;
476
477 node = huge_node_get(ptr);
478 arena = extent_node_arena_get(node);
479 malloc_mutex_lock(tsdn, &arena->huge_mtx);
480 extent_node_prof_tctx_set(node, tctx);
481 malloc_mutex_unlock(tsdn, &arena->huge_mtx);
482 }
483
484 void
huge_prof_tctx_reset(tsdn_t * tsdn,const void * ptr)485 huge_prof_tctx_reset(tsdn_t *tsdn, const void *ptr)
486 {
487
488 huge_prof_tctx_set(tsdn, ptr, (prof_tctx_t *)(uintptr_t)1U);
489 }
490