1 /******************************************************************************/
2 #ifdef JEMALLOC_H_TYPES
3
4 #define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS)
5
6 /* Maximum number of regions in one run. */
7 #define LG_RUN_MAXREGS (LG_PAGE - LG_TINY_MIN)
8 #define RUN_MAXREGS (1U << LG_RUN_MAXREGS)
9
10 /*
11 * Minimum redzone size. Redzones may be larger than this if necessary to
12 * preserve region alignment.
13 */
14 #define REDZONE_MINSIZE 16
15
16 /*
17 * The minimum ratio of active:dirty pages per arena is computed as:
18 *
19 * (nactive >> lg_dirty_mult) >= ndirty
20 *
21 * So, supposing that lg_dirty_mult is 3, there can be no less than 8 times as
22 * many active pages as dirty pages.
23 */
24 #define LG_DIRTY_MULT_DEFAULT 3
25
26 typedef struct arena_runs_dirty_link_s arena_runs_dirty_link_t;
27 typedef struct arena_run_s arena_run_t;
28 typedef struct arena_chunk_map_bits_s arena_chunk_map_bits_t;
29 typedef struct arena_chunk_map_misc_s arena_chunk_map_misc_t;
30 typedef struct arena_chunk_s arena_chunk_t;
31 typedef struct arena_bin_info_s arena_bin_info_t;
32 typedef struct arena_bin_s arena_bin_t;
33 typedef struct arena_s arena_t;
34
35 #endif /* JEMALLOC_H_TYPES */
36 /******************************************************************************/
37 #ifdef JEMALLOC_H_STRUCTS
38
39 #ifdef JEMALLOC_ARENA_STRUCTS_A
40 struct arena_run_s {
41 /* Index of bin this run is associated with. */
42 index_t binind;
43
44 /* Number of free regions in run. */
45 unsigned nfree;
46
47 /* Per region allocated/deallocated bitmap. */
48 bitmap_t bitmap[BITMAP_GROUPS_MAX];
49 };
50
51 /* Each element of the chunk map corresponds to one page within the chunk. */
52 struct arena_chunk_map_bits_s {
53 /*
54 * Run address (or size) and various flags are stored together. The bit
55 * layout looks like (assuming 32-bit system):
56 *
57 * ???????? ???????? ????nnnn nnnndula
58 *
59 * ? : Unallocated: Run address for first/last pages, unset for internal
60 * pages.
61 * Small: Run page offset.
62 * Large: Run size for first page, unset for trailing pages.
63 * n : binind for small size class, BININD_INVALID for large size class.
64 * d : dirty?
65 * u : unzeroed?
66 * l : large?
67 * a : allocated?
68 *
69 * Following are example bit patterns for the three types of runs.
70 *
71 * p : run page offset
72 * s : run size
73 * n : binind for size class; large objects set these to BININD_INVALID
74 * x : don't care
75 * - : 0
76 * + : 1
77 * [DULA] : bit set
78 * [dula] : bit unset
79 *
80 * Unallocated (clean):
81 * ssssssss ssssssss ssss++++ ++++du-a
82 * xxxxxxxx xxxxxxxx xxxxxxxx xxxx-Uxx
83 * ssssssss ssssssss ssss++++ ++++dU-a
84 *
85 * Unallocated (dirty):
86 * ssssssss ssssssss ssss++++ ++++D--a
87 * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
88 * ssssssss ssssssss ssss++++ ++++D--a
89 *
90 * Small:
91 * pppppppp pppppppp ppppnnnn nnnnd--A
92 * pppppppp pppppppp ppppnnnn nnnn---A
93 * pppppppp pppppppp ppppnnnn nnnnd--A
94 *
95 * Large:
96 * ssssssss ssssssss ssss++++ ++++D-LA
97 * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
98 * -------- -------- ----++++ ++++D-LA
99 *
100 * Large (sampled, size <= LARGE_MINCLASS):
101 * ssssssss ssssssss ssssnnnn nnnnD-LA
102 * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
103 * -------- -------- ----++++ ++++D-LA
104 *
105 * Large (not sampled, size == LARGE_MINCLASS):
106 * ssssssss ssssssss ssss++++ ++++D-LA
107 * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
108 * -------- -------- ----++++ ++++D-LA
109 */
110 size_t bits;
111 #define CHUNK_MAP_BININD_SHIFT 4
112 #define BININD_INVALID ((size_t)0xffU)
113 /* CHUNK_MAP_BININD_MASK == (BININD_INVALID << CHUNK_MAP_BININD_SHIFT) */
114 #define CHUNK_MAP_BININD_MASK ((size_t)0xff0U)
115 #define CHUNK_MAP_BININD_INVALID CHUNK_MAP_BININD_MASK
116 #define CHUNK_MAP_FLAGS_MASK ((size_t)0xcU)
117 #define CHUNK_MAP_DIRTY ((size_t)0x8U)
118 #define CHUNK_MAP_UNZEROED ((size_t)0x4U)
119 #define CHUNK_MAP_LARGE ((size_t)0x2U)
120 #define CHUNK_MAP_ALLOCATED ((size_t)0x1U)
121 #define CHUNK_MAP_KEY CHUNK_MAP_ALLOCATED
122 };
123
124 struct arena_runs_dirty_link_s {
125 qr(arena_runs_dirty_link_t) rd_link;
126 };
127
128 /*
129 * Each arena_chunk_map_misc_t corresponds to one page within the chunk, just
130 * like arena_chunk_map_bits_t. Two separate arrays are stored within each
131 * chunk header in order to improve cache locality.
132 */
133 struct arena_chunk_map_misc_s {
134 /*
135 * Linkage for run trees. There are two disjoint uses:
136 *
137 * 1) arena_t's runs_avail tree.
138 * 2) arena_run_t conceptually uses this linkage for in-use non-full
139 * runs, rather than directly embedding linkage.
140 */
141 rb_node(arena_chunk_map_misc_t) rb_link;
142
143 union {
144 /* Linkage for list of dirty runs. */
145 arena_runs_dirty_link_t rd;
146
147 /* Profile counters, used for large object runs. */
148 union {
149 void *prof_tctx_pun;
150 prof_tctx_t *prof_tctx;
151 };
152
153 /* Small region run metadata. */
154 arena_run_t run;
155 };
156 };
157 typedef rb_tree(arena_chunk_map_misc_t) arena_avail_tree_t;
158 typedef rb_tree(arena_chunk_map_misc_t) arena_run_tree_t;
159 #endif /* JEMALLOC_ARENA_STRUCTS_A */
160
161 #ifdef JEMALLOC_ARENA_STRUCTS_B
162 /* Arena chunk header. */
163 struct arena_chunk_s {
164 /*
165 * A pointer to the arena that owns the chunk is stored within the node.
166 * This field as a whole is used by chunks_rtree to support both
167 * ivsalloc() and core-based debugging.
168 */
169 extent_node_t node;
170
171 /*
172 * Map of pages within chunk that keeps track of free/large/small. The
173 * first map_bias entries are omitted, since the chunk header does not
174 * need to be tracked in the map. This omission saves a header page
175 * for common chunk sizes (e.g. 4 MiB).
176 */
177 arena_chunk_map_bits_t map_bits[1]; /* Dynamically sized. */
178 };
179
180 /*
181 * Read-only information associated with each element of arena_t's bins array
182 * is stored separately, partly to reduce memory usage (only one copy, rather
183 * than one per arena), but mainly to avoid false cacheline sharing.
184 *
185 * Each run has the following layout:
186 *
187 * /--------------------\
188 * | pad? |
189 * |--------------------|
190 * | redzone |
191 * reg0_offset | region 0 |
192 * | redzone |
193 * |--------------------| \
194 * | redzone | |
195 * | region 1 | > reg_interval
196 * | redzone | /
197 * |--------------------|
198 * | ... |
199 * | ... |
200 * | ... |
201 * |--------------------|
202 * | redzone |
203 * | region nregs-1 |
204 * | redzone |
205 * |--------------------|
206 * | alignment pad? |
207 * \--------------------/
208 *
209 * reg_interval has at least the same minimum alignment as reg_size; this
210 * preserves the alignment constraint that sa2u() depends on. Alignment pad is
211 * either 0 or redzone_size; it is present only if needed to align reg0_offset.
212 */
213 struct arena_bin_info_s {
214 /* Size of regions in a run for this bin's size class. */
215 size_t reg_size;
216
217 /* Redzone size. */
218 size_t redzone_size;
219
220 /* Interval between regions (reg_size + (redzone_size << 1)). */
221 size_t reg_interval;
222
223 /* Total size of a run for this bin's size class. */
224 size_t run_size;
225
226 /* Total number of regions in a run for this bin's size class. */
227 uint32_t nregs;
228
229 /*
230 * Metadata used to manipulate bitmaps for runs associated with this
231 * bin.
232 */
233 bitmap_info_t bitmap_info;
234
235 /* Offset of first region in a run for this bin's size class. */
236 uint32_t reg0_offset;
237 };
238
239 struct arena_bin_s {
240 /*
241 * All operations on runcur, runs, and stats require that lock be
242 * locked. Run allocation/deallocation are protected by the arena lock,
243 * which may be acquired while holding one or more bin locks, but not
244 * vise versa.
245 */
246 malloc_mutex_t lock;
247
248 /*
249 * Current run being used to service allocations of this bin's size
250 * class.
251 */
252 arena_run_t *runcur;
253
254 /*
255 * Tree of non-full runs. This tree is used when looking for an
256 * existing run when runcur is no longer usable. We choose the
257 * non-full run that is lowest in memory; this policy tends to keep
258 * objects packed well, and it can also help reduce the number of
259 * almost-empty chunks.
260 */
261 arena_run_tree_t runs;
262
263 /* Bin statistics. */
264 malloc_bin_stats_t stats;
265 };
266
267 struct arena_s {
268 /* This arena's index within the arenas array. */
269 unsigned ind;
270
271 /*
272 * Number of threads currently assigned to this arena. This field is
273 * protected by arenas_lock.
274 */
275 unsigned nthreads;
276
277 /*
278 * There are three classes of arena operations from a locking
279 * perspective:
280 * 1) Thread assignment (modifies nthreads) is protected by arenas_lock.
281 * 2) Bin-related operations are protected by bin locks.
282 * 3) Chunk- and run-related operations are protected by this mutex.
283 */
284 malloc_mutex_t lock;
285
286 arena_stats_t stats;
287 /*
288 * List of tcaches for extant threads associated with this arena.
289 * Stats from these are merged incrementally, and at exit if
290 * opt_stats_print is enabled.
291 */
292 ql_head(tcache_t) tcache_ql;
293
294 uint64_t prof_accumbytes;
295
296 /*
297 * PRNG state for cache index randomization of large allocation base
298 * pointers.
299 */
300 uint64_t offset_state;
301
302 dss_prec_t dss_prec;
303
304 /*
305 * In order to avoid rapid chunk allocation/deallocation when an arena
306 * oscillates right on the cusp of needing a new chunk, cache the most
307 * recently freed chunk. The spare is left in the arena's chunk trees
308 * until it is deleted.
309 *
310 * There is one spare chunk per arena, rather than one spare total, in
311 * order to avoid interactions between multiple threads that could make
312 * a single spare inadequate.
313 */
314 arena_chunk_t *spare;
315
316 /* Minimum ratio (log base 2) of nactive:ndirty. */
317 ssize_t lg_dirty_mult;
318
319 /* Number of pages in active runs and huge regions. */
320 size_t nactive;
321
322 /*
323 * Current count of pages within unused runs that are potentially
324 * dirty, and for which madvise(... MADV_DONTNEED) has not been called.
325 * By tracking this, we can institute a limit on how much dirty unused
326 * memory is mapped for each arena.
327 */
328 size_t ndirty;
329
330 /*
331 * Size/address-ordered tree of this arena's available runs. The tree
332 * is used for first-best-fit run allocation.
333 */
334 arena_avail_tree_t runs_avail;
335
336 /*
337 * Unused dirty memory this arena manages. Dirty memory is conceptually
338 * tracked as an arbitrarily interleaved LRU of dirty runs and cached
339 * chunks, but the list linkage is actually semi-duplicated in order to
340 * avoid extra arena_chunk_map_misc_t space overhead.
341 *
342 * LRU-----------------------------------------------------------MRU
343 *
344 * /-- arena ---\
345 * | |
346 * | |
347 * |------------| /- chunk -\
348 * ...->|chunks_cache|<--------------------------->| /----\ |<--...
349 * |------------| | |node| |
350 * | | | | | |
351 * | | /- run -\ /- run -\ | | | |
352 * | | | | | | | | | |
353 * | | | | | | | | | |
354 * |------------| |-------| |-------| | |----| |
355 * ...->|runs_dirty |<-->|rd |<-->|rd |<---->|rd |<----...
356 * |------------| |-------| |-------| | |----| |
357 * | | | | | | | | | |
358 * | | | | | | | \----/ |
359 * | | \-------/ \-------/ | |
360 * | | | |
361 * | | | |
362 * \------------/ \---------/
363 */
364 arena_runs_dirty_link_t runs_dirty;
365 extent_node_t chunks_cache;
366
367 /* Extant huge allocations. */
368 ql_head(extent_node_t) huge;
369 /* Synchronizes all huge allocation/update/deallocation. */
370 malloc_mutex_t huge_mtx;
371
372 /*
373 * Trees of chunks that were previously allocated (trees differ only in
374 * node ordering). These are used when allocating chunks, in an attempt
375 * to re-use address space. Depending on function, different tree
376 * orderings are needed, which is why there are two trees with the same
377 * contents.
378 */
379 extent_tree_t chunks_szad_cache;
380 extent_tree_t chunks_ad_cache;
381 extent_tree_t chunks_szad_mmap;
382 extent_tree_t chunks_ad_mmap;
383 extent_tree_t chunks_szad_dss;
384 extent_tree_t chunks_ad_dss;
385 malloc_mutex_t chunks_mtx;
386 /* Cache of nodes that were allocated via base_alloc(). */
387 ql_head(extent_node_t) node_cache;
388 malloc_mutex_t node_cache_mtx;
389
390 /*
391 * User-configurable chunk allocation/deallocation/purge functions.
392 */
393 chunk_alloc_t *chunk_alloc;
394 chunk_dalloc_t *chunk_dalloc;
395 chunk_purge_t *chunk_purge;
396
397 /* bins is used to store trees of free regions. */
398 arena_bin_t bins[NBINS];
399 };
400 #endif /* JEMALLOC_ARENA_STRUCTS_B */
401
402 #endif /* JEMALLOC_H_STRUCTS */
403 /******************************************************************************/
404 #ifdef JEMALLOC_H_EXTERNS
405
406 static const size_t large_pad =
407 #ifdef JEMALLOC_CACHE_OBLIVIOUS
408 PAGE
409 #else
410 0
411 #endif
412 ;
413
414 extern ssize_t opt_lg_dirty_mult;
415
416 extern arena_bin_info_t arena_bin_info[NBINS];
417
418 extern size_t map_bias; /* Number of arena chunk header pages. */
419 extern size_t map_misc_offset;
420 extern size_t arena_maxrun; /* Max run size for arenas. */
421 extern size_t arena_maxclass; /* Max size class for arenas. */
422 extern unsigned nlclasses; /* Number of large size classes. */
423 extern unsigned nhclasses; /* Number of huge size classes. */
424
425 void arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node,
426 bool cache);
427 void arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node,
428 bool cache);
429 extent_node_t *arena_node_alloc(arena_t *arena);
430 void arena_node_dalloc(arena_t *arena, extent_node_t *node);
431 void *arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment,
432 bool *zero);
433 void arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize);
434 void arena_chunk_ralloc_huge_similar(arena_t *arena, void *chunk,
435 size_t oldsize, size_t usize);
436 void arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk,
437 size_t oldsize, size_t usize);
438 bool arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk,
439 size_t oldsize, size_t usize, bool *zero);
440 ssize_t arena_lg_dirty_mult_get(arena_t *arena);
441 bool arena_lg_dirty_mult_set(arena_t *arena, ssize_t lg_dirty_mult);
442 void arena_maybe_purge(arena_t *arena);
443 void arena_purge_all(arena_t *arena);
444 void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin,
445 index_t binind, uint64_t prof_accumbytes);
446 void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info,
447 bool zero);
448 #ifdef JEMALLOC_JET
449 typedef void (arena_redzone_corruption_t)(void *, size_t, bool, size_t,
450 uint8_t);
451 extern arena_redzone_corruption_t *arena_redzone_corruption;
452 typedef void (arena_dalloc_junk_small_t)(void *, arena_bin_info_t *);
453 extern arena_dalloc_junk_small_t *arena_dalloc_junk_small;
454 #else
455 void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info);
456 #endif
457 void arena_quarantine_junk_small(void *ptr, size_t usize);
458 void *arena_malloc_small(arena_t *arena, size_t size, bool zero);
459 void *arena_malloc_large(arena_t *arena, size_t size, bool zero);
460 void *arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize,
461 size_t alignment, bool zero, tcache_t *tcache);
462 void arena_prof_promoted(const void *ptr, size_t size);
463 void arena_dalloc_bin_junked_locked(arena_t *arena, arena_chunk_t *chunk,
464 void *ptr, arena_chunk_map_bits_t *bitselm);
465 void arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
466 size_t pageind, arena_chunk_map_bits_t *bitselm);
467 void arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
468 size_t pageind);
469 #ifdef JEMALLOC_JET
470 typedef void (arena_dalloc_junk_large_t)(void *, size_t);
471 extern arena_dalloc_junk_large_t *arena_dalloc_junk_large;
472 #else
473 void arena_dalloc_junk_large(void *ptr, size_t usize);
474 #endif
475 void arena_dalloc_large_junked_locked(arena_t *arena, arena_chunk_t *chunk,
476 void *ptr);
477 void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr);
478 #ifdef JEMALLOC_JET
479 typedef void (arena_ralloc_junk_large_t)(void *, size_t, size_t);
480 extern arena_ralloc_junk_large_t *arena_ralloc_junk_large;
481 #endif
482 bool arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
483 size_t extra, bool zero);
484 void *arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
485 size_t size, size_t extra, size_t alignment, bool zero, tcache_t *tcache);
486 dss_prec_t arena_dss_prec_get(arena_t *arena);
487 bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
488 ssize_t arena_lg_dirty_mult_default_get(void);
489 bool arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult);
490 void arena_stats_merge(arena_t *arena, const char **dss,
491 ssize_t *lg_dirty_mult, size_t *nactive, size_t *ndirty,
492 arena_stats_t *astats, malloc_bin_stats_t *bstats,
493 malloc_large_stats_t *lstats, malloc_huge_stats_t *hstats);
494 arena_t *arena_new(unsigned ind);
495 bool arena_boot(void);
496 void arena_prefork(arena_t *arena);
497 void arena_postfork_parent(arena_t *arena);
498 void arena_postfork_child(arena_t *arena);
499
500 #endif /* JEMALLOC_H_EXTERNS */
501 /******************************************************************************/
502 #ifdef JEMALLOC_H_INLINES
503
504 #ifndef JEMALLOC_ENABLE_INLINE
505 arena_chunk_map_bits_t *arena_bitselm_get(arena_chunk_t *chunk,
506 size_t pageind);
507 arena_chunk_map_misc_t *arena_miscelm_get(arena_chunk_t *chunk,
508 size_t pageind);
509 size_t arena_miscelm_to_pageind(arena_chunk_map_misc_t *miscelm);
510 void *arena_miscelm_to_rpages(arena_chunk_map_misc_t *miscelm);
511 arena_chunk_map_misc_t *arena_rd_to_miscelm(arena_runs_dirty_link_t *rd);
512 arena_chunk_map_misc_t *arena_run_to_miscelm(arena_run_t *run);
513 size_t *arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind);
514 size_t arena_mapbitsp_read(size_t *mapbitsp);
515 size_t arena_mapbits_get(arena_chunk_t *chunk, size_t pageind);
516 size_t arena_mapbits_unallocated_size_get(arena_chunk_t *chunk,
517 size_t pageind);
518 size_t arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind);
519 size_t arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind);
520 index_t arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind);
521 size_t arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind);
522 size_t arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind);
523 size_t arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind);
524 size_t arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind);
525 void arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits);
526 void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind,
527 size_t size, size_t flags);
528 void arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
529 size_t size);
530 void arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind,
531 size_t size, size_t flags);
532 void arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
533 index_t binind);
534 void arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind,
535 size_t runind, index_t binind, size_t flags);
536 void arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind,
537 size_t unzeroed);
538 void arena_metadata_allocated_add(arena_t *arena, size_t size);
539 void arena_metadata_allocated_sub(arena_t *arena, size_t size);
540 size_t arena_metadata_allocated_get(arena_t *arena);
541 bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes);
542 bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes);
543 bool arena_prof_accum(arena_t *arena, uint64_t accumbytes);
544 index_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits);
545 index_t arena_bin_index(arena_t *arena, arena_bin_t *bin);
546 unsigned arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info,
547 const void *ptr);
548 prof_tctx_t *arena_prof_tctx_get(const void *ptr);
549 void arena_prof_tctx_set(const void *ptr, prof_tctx_t *tctx);
550 void *arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
551 tcache_t *tcache);
552 arena_t *arena_aalloc(const void *ptr);
553 size_t arena_salloc(const void *ptr, bool demote);
554 void arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache);
555 void arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache);
556 #endif
557
558 #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_))
559 # ifdef JEMALLOC_ARENA_INLINE_A
560 JEMALLOC_ALWAYS_INLINE arena_chunk_map_bits_t *
arena_bitselm_get(arena_chunk_t * chunk,size_t pageind)561 arena_bitselm_get(arena_chunk_t *chunk, size_t pageind)
562 {
563
564 assert(pageind >= map_bias);
565 assert(pageind < chunk_npages);
566
567 return (&chunk->map_bits[pageind-map_bias]);
568 }
569
570 JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t *
arena_miscelm_get(arena_chunk_t * chunk,size_t pageind)571 arena_miscelm_get(arena_chunk_t *chunk, size_t pageind)
572 {
573
574 assert(pageind >= map_bias);
575 assert(pageind < chunk_npages);
576
577 return ((arena_chunk_map_misc_t *)((uintptr_t)chunk +
578 (uintptr_t)map_misc_offset) + pageind-map_bias);
579 }
580
581 JEMALLOC_ALWAYS_INLINE size_t
arena_miscelm_to_pageind(arena_chunk_map_misc_t * miscelm)582 arena_miscelm_to_pageind(arena_chunk_map_misc_t *miscelm)
583 {
584 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
585 size_t pageind = ((uintptr_t)miscelm - ((uintptr_t)chunk +
586 map_misc_offset)) / sizeof(arena_chunk_map_misc_t) + map_bias;
587
588 assert(pageind >= map_bias);
589 assert(pageind < chunk_npages);
590
591 return (pageind);
592 }
593
594 JEMALLOC_ALWAYS_INLINE void *
arena_miscelm_to_rpages(arena_chunk_map_misc_t * miscelm)595 arena_miscelm_to_rpages(arena_chunk_map_misc_t *miscelm)
596 {
597 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
598 size_t pageind = arena_miscelm_to_pageind(miscelm);
599
600 return ((void *)((uintptr_t)chunk + (pageind << LG_PAGE)));
601 }
602
603 JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t *
arena_rd_to_miscelm(arena_runs_dirty_link_t * rd)604 arena_rd_to_miscelm(arena_runs_dirty_link_t *rd)
605 {
606 arena_chunk_map_misc_t *miscelm = (arena_chunk_map_misc_t
607 *)((uintptr_t)rd - offsetof(arena_chunk_map_misc_t, rd));
608
609 assert(arena_miscelm_to_pageind(miscelm) >= map_bias);
610 assert(arena_miscelm_to_pageind(miscelm) < chunk_npages);
611
612 return (miscelm);
613 }
614
615 JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t *
arena_run_to_miscelm(arena_run_t * run)616 arena_run_to_miscelm(arena_run_t *run)
617 {
618 arena_chunk_map_misc_t *miscelm = (arena_chunk_map_misc_t
619 *)((uintptr_t)run - offsetof(arena_chunk_map_misc_t, run));
620
621 assert(arena_miscelm_to_pageind(miscelm) >= map_bias);
622 assert(arena_miscelm_to_pageind(miscelm) < chunk_npages);
623
624 return (miscelm);
625 }
626
627 JEMALLOC_ALWAYS_INLINE size_t *
arena_mapbitsp_get(arena_chunk_t * chunk,size_t pageind)628 arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind)
629 {
630
631 return (&arena_bitselm_get(chunk, pageind)->bits);
632 }
633
634 JEMALLOC_ALWAYS_INLINE size_t
arena_mapbitsp_read(size_t * mapbitsp)635 arena_mapbitsp_read(size_t *mapbitsp)
636 {
637
638 return (*mapbitsp);
639 }
640
641 JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_get(arena_chunk_t * chunk,size_t pageind)642 arena_mapbits_get(arena_chunk_t *chunk, size_t pageind)
643 {
644
645 return (arena_mapbitsp_read(arena_mapbitsp_get(chunk, pageind)));
646 }
647
648 JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_unallocated_size_get(arena_chunk_t * chunk,size_t pageind)649 arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind)
650 {
651 size_t mapbits;
652
653 mapbits = arena_mapbits_get(chunk, pageind);
654 assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
655 return (mapbits & ~PAGE_MASK);
656 }
657
658 JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_large_size_get(arena_chunk_t * chunk,size_t pageind)659 arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind)
660 {
661 size_t mapbits;
662
663 mapbits = arena_mapbits_get(chunk, pageind);
664 assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) ==
665 (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED));
666 return (mapbits & ~PAGE_MASK);
667 }
668
669 JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_small_runind_get(arena_chunk_t * chunk,size_t pageind)670 arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind)
671 {
672 size_t mapbits;
673
674 mapbits = arena_mapbits_get(chunk, pageind);
675 assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) ==
676 CHUNK_MAP_ALLOCATED);
677 return (mapbits >> LG_PAGE);
678 }
679
680 JEMALLOC_ALWAYS_INLINE index_t
arena_mapbits_binind_get(arena_chunk_t * chunk,size_t pageind)681 arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind)
682 {
683 size_t mapbits;
684 index_t binind;
685
686 mapbits = arena_mapbits_get(chunk, pageind);
687 binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
688 assert(binind < NBINS || binind == BININD_INVALID);
689 return (binind);
690 }
691
692 JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_dirty_get(arena_chunk_t * chunk,size_t pageind)693 arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind)
694 {
695 size_t mapbits;
696
697 mapbits = arena_mapbits_get(chunk, pageind);
698 return (mapbits & CHUNK_MAP_DIRTY);
699 }
700
701 JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_unzeroed_get(arena_chunk_t * chunk,size_t pageind)702 arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind)
703 {
704 size_t mapbits;
705
706 mapbits = arena_mapbits_get(chunk, pageind);
707 return (mapbits & CHUNK_MAP_UNZEROED);
708 }
709
710 JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_large_get(arena_chunk_t * chunk,size_t pageind)711 arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind)
712 {
713 size_t mapbits;
714
715 mapbits = arena_mapbits_get(chunk, pageind);
716 return (mapbits & CHUNK_MAP_LARGE);
717 }
718
719 JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_allocated_get(arena_chunk_t * chunk,size_t pageind)720 arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind)
721 {
722 size_t mapbits;
723
724 mapbits = arena_mapbits_get(chunk, pageind);
725 return (mapbits & CHUNK_MAP_ALLOCATED);
726 }
727
728 JEMALLOC_ALWAYS_INLINE void
arena_mapbitsp_write(size_t * mapbitsp,size_t mapbits)729 arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits)
730 {
731
732 *mapbitsp = mapbits;
733 }
734
735 JEMALLOC_ALWAYS_INLINE void
arena_mapbits_unallocated_set(arena_chunk_t * chunk,size_t pageind,size_t size,size_t flags)736 arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size,
737 size_t flags)
738 {
739 size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
740
741 assert(size == PAGE_CEILING(size));
742 assert((flags & ~CHUNK_MAP_FLAGS_MASK) == 0);
743 assert((flags & (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == flags);
744 arena_mapbitsp_write(mapbitsp, size | CHUNK_MAP_BININD_INVALID | flags);
745 }
746
747 JEMALLOC_ALWAYS_INLINE void
arena_mapbits_unallocated_size_set(arena_chunk_t * chunk,size_t pageind,size_t size)748 arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
749 size_t size)
750 {
751 size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
752 size_t mapbits = arena_mapbitsp_read(mapbitsp);
753
754 assert(size == PAGE_CEILING(size));
755 assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
756 arena_mapbitsp_write(mapbitsp, size | (mapbits & PAGE_MASK));
757 }
758
759 JEMALLOC_ALWAYS_INLINE void
arena_mapbits_large_set(arena_chunk_t * chunk,size_t pageind,size_t size,size_t flags)760 arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size,
761 size_t flags)
762 {
763 size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
764 size_t mapbits = arena_mapbitsp_read(mapbitsp);
765 size_t unzeroed;
766
767 assert(size == PAGE_CEILING(size));
768 assert((flags & CHUNK_MAP_DIRTY) == flags);
769 unzeroed = mapbits & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */
770 arena_mapbitsp_write(mapbitsp, size | CHUNK_MAP_BININD_INVALID | flags
771 | unzeroed | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED);
772 }
773
774 JEMALLOC_ALWAYS_INLINE void
arena_mapbits_large_binind_set(arena_chunk_t * chunk,size_t pageind,index_t binind)775 arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
776 index_t binind)
777 {
778 size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
779 size_t mapbits = arena_mapbitsp_read(mapbitsp);
780
781 assert(binind <= BININD_INVALID);
782 assert(arena_mapbits_large_size_get(chunk, pageind) == LARGE_MINCLASS +
783 large_pad);
784 arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_BININD_MASK) |
785 (binind << CHUNK_MAP_BININD_SHIFT));
786 }
787
788 JEMALLOC_ALWAYS_INLINE void
arena_mapbits_small_set(arena_chunk_t * chunk,size_t pageind,size_t runind,index_t binind,size_t flags)789 arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind,
790 index_t binind, size_t flags)
791 {
792 size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
793 size_t mapbits = arena_mapbitsp_read(mapbitsp);
794 size_t unzeroed;
795
796 assert(binind < BININD_INVALID);
797 assert(pageind - runind >= map_bias);
798 assert((flags & CHUNK_MAP_DIRTY) == flags);
799 unzeroed = mapbits & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */
800 arena_mapbitsp_write(mapbitsp, (runind << LG_PAGE) | (binind <<
801 CHUNK_MAP_BININD_SHIFT) | flags | unzeroed | CHUNK_MAP_ALLOCATED);
802 }
803
804 JEMALLOC_ALWAYS_INLINE void
arena_mapbits_unzeroed_set(arena_chunk_t * chunk,size_t pageind,size_t unzeroed)805 arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind,
806 size_t unzeroed)
807 {
808 size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
809 size_t mapbits = arena_mapbitsp_read(mapbitsp);
810
811 arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_UNZEROED) |
812 unzeroed);
813 }
814
815 JEMALLOC_INLINE void
arena_metadata_allocated_add(arena_t * arena,size_t size)816 arena_metadata_allocated_add(arena_t *arena, size_t size)
817 {
818
819 atomic_add_z(&arena->stats.metadata_allocated, size);
820 }
821
822 JEMALLOC_INLINE void
arena_metadata_allocated_sub(arena_t * arena,size_t size)823 arena_metadata_allocated_sub(arena_t *arena, size_t size)
824 {
825
826 atomic_sub_z(&arena->stats.metadata_allocated, size);
827 }
828
829 JEMALLOC_INLINE size_t
arena_metadata_allocated_get(arena_t * arena)830 arena_metadata_allocated_get(arena_t *arena)
831 {
832
833 return (atomic_read_z(&arena->stats.metadata_allocated));
834 }
835
836 JEMALLOC_INLINE bool
arena_prof_accum_impl(arena_t * arena,uint64_t accumbytes)837 arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes)
838 {
839
840 cassert(config_prof);
841 assert(prof_interval != 0);
842
843 arena->prof_accumbytes += accumbytes;
844 if (arena->prof_accumbytes >= prof_interval) {
845 arena->prof_accumbytes -= prof_interval;
846 return (true);
847 }
848 return (false);
849 }
850
851 JEMALLOC_INLINE bool
arena_prof_accum_locked(arena_t * arena,uint64_t accumbytes)852 arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes)
853 {
854
855 cassert(config_prof);
856
857 if (likely(prof_interval == 0))
858 return (false);
859 return (arena_prof_accum_impl(arena, accumbytes));
860 }
861
862 JEMALLOC_INLINE bool
arena_prof_accum(arena_t * arena,uint64_t accumbytes)863 arena_prof_accum(arena_t *arena, uint64_t accumbytes)
864 {
865
866 cassert(config_prof);
867
868 if (likely(prof_interval == 0))
869 return (false);
870
871 {
872 bool ret;
873
874 malloc_mutex_lock(&arena->lock);
875 ret = arena_prof_accum_impl(arena, accumbytes);
876 malloc_mutex_unlock(&arena->lock);
877 return (ret);
878 }
879 }
880
881 JEMALLOC_ALWAYS_INLINE index_t
arena_ptr_small_binind_get(const void * ptr,size_t mapbits)882 arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
883 {
884 index_t binind;
885
886 binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
887
888 if (config_debug) {
889 arena_chunk_t *chunk;
890 arena_t *arena;
891 size_t pageind;
892 size_t actual_mapbits;
893 size_t rpages_ind;
894 arena_run_t *run;
895 arena_bin_t *bin;
896 index_t run_binind, actual_binind;
897 arena_bin_info_t *bin_info;
898 arena_chunk_map_misc_t *miscelm;
899 void *rpages;
900
901 assert(binind != BININD_INVALID);
902 assert(binind < NBINS);
903 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
904 arena = extent_node_arena_get(&chunk->node);
905 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
906 actual_mapbits = arena_mapbits_get(chunk, pageind);
907 assert(mapbits == actual_mapbits);
908 assert(arena_mapbits_large_get(chunk, pageind) == 0);
909 assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
910 rpages_ind = pageind - arena_mapbits_small_runind_get(chunk,
911 pageind);
912 miscelm = arena_miscelm_get(chunk, rpages_ind);
913 run = &miscelm->run;
914 run_binind = run->binind;
915 bin = &arena->bins[run_binind];
916 actual_binind = bin - arena->bins;
917 assert(run_binind == actual_binind);
918 bin_info = &arena_bin_info[actual_binind];
919 rpages = arena_miscelm_to_rpages(miscelm);
920 assert(((uintptr_t)ptr - ((uintptr_t)rpages +
921 (uintptr_t)bin_info->reg0_offset)) % bin_info->reg_interval
922 == 0);
923 }
924
925 return (binind);
926 }
927 # endif /* JEMALLOC_ARENA_INLINE_A */
928
929 # ifdef JEMALLOC_ARENA_INLINE_B
930 JEMALLOC_INLINE index_t
arena_bin_index(arena_t * arena,arena_bin_t * bin)931 arena_bin_index(arena_t *arena, arena_bin_t *bin)
932 {
933 index_t binind = bin - arena->bins;
934 assert(binind < NBINS);
935 return (binind);
936 }
937
938 JEMALLOC_INLINE unsigned
arena_run_regind(arena_run_t * run,arena_bin_info_t * bin_info,const void * ptr)939 arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
940 {
941 unsigned shift, diff, regind;
942 size_t interval;
943 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
944 void *rpages = arena_miscelm_to_rpages(miscelm);
945
946 /*
947 * Freeing a pointer lower than region zero can cause assertion
948 * failure.
949 */
950 assert((uintptr_t)ptr >= (uintptr_t)rpages +
951 (uintptr_t)bin_info->reg0_offset);
952
953 /*
954 * Avoid doing division with a variable divisor if possible. Using
955 * actual division here can reduce allocator throughput by over 20%!
956 */
957 diff = (unsigned)((uintptr_t)ptr - (uintptr_t)rpages -
958 bin_info->reg0_offset);
959
960 /* Rescale (factor powers of 2 out of the numerator and denominator). */
961 interval = bin_info->reg_interval;
962 shift = jemalloc_ffs(interval) - 1;
963 diff >>= shift;
964 interval >>= shift;
965
966 if (interval == 1) {
967 /* The divisor was a power of 2. */
968 regind = diff;
969 } else {
970 /*
971 * To divide by a number D that is not a power of two we
972 * multiply by (2^21 / D) and then right shift by 21 positions.
973 *
974 * X / D
975 *
976 * becomes
977 *
978 * (X * interval_invs[D - 3]) >> SIZE_INV_SHIFT
979 *
980 * We can omit the first three elements, because we never
981 * divide by 0, and 1 and 2 are both powers of two, which are
982 * handled above.
983 */
984 #define SIZE_INV_SHIFT ((sizeof(unsigned) << 3) - LG_RUN_MAXREGS)
985 #define SIZE_INV(s) (((1U << SIZE_INV_SHIFT) / (s)) + 1)
986 static const unsigned interval_invs[] = {
987 SIZE_INV(3),
988 SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7),
989 SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11),
990 SIZE_INV(12), SIZE_INV(13), SIZE_INV(14), SIZE_INV(15),
991 SIZE_INV(16), SIZE_INV(17), SIZE_INV(18), SIZE_INV(19),
992 SIZE_INV(20), SIZE_INV(21), SIZE_INV(22), SIZE_INV(23),
993 SIZE_INV(24), SIZE_INV(25), SIZE_INV(26), SIZE_INV(27),
994 SIZE_INV(28), SIZE_INV(29), SIZE_INV(30), SIZE_INV(31)
995 };
996
997 if (likely(interval <= ((sizeof(interval_invs) /
998 sizeof(unsigned)) + 2))) {
999 regind = (diff * interval_invs[interval - 3]) >>
1000 SIZE_INV_SHIFT;
1001 } else
1002 regind = diff / interval;
1003 #undef SIZE_INV
1004 #undef SIZE_INV_SHIFT
1005 }
1006 assert(diff == regind * interval);
1007 assert(regind < bin_info->nregs);
1008
1009 return (regind);
1010 }
1011
1012 JEMALLOC_INLINE prof_tctx_t *
arena_prof_tctx_get(const void * ptr)1013 arena_prof_tctx_get(const void *ptr)
1014 {
1015 prof_tctx_t *ret;
1016 arena_chunk_t *chunk;
1017
1018 cassert(config_prof);
1019 assert(ptr != NULL);
1020
1021 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1022 if (likely(chunk != ptr)) {
1023 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1024 size_t mapbits = arena_mapbits_get(chunk, pageind);
1025 assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
1026 if (likely((mapbits & CHUNK_MAP_LARGE) == 0))
1027 ret = (prof_tctx_t *)(uintptr_t)1U;
1028 else {
1029 arena_chunk_map_misc_t *elm = arena_miscelm_get(chunk,
1030 pageind);
1031 ret = atomic_read_p(&elm->prof_tctx_pun);
1032 }
1033 } else
1034 ret = huge_prof_tctx_get(ptr);
1035
1036 return (ret);
1037 }
1038
1039 JEMALLOC_INLINE void
arena_prof_tctx_set(const void * ptr,prof_tctx_t * tctx)1040 arena_prof_tctx_set(const void *ptr, prof_tctx_t *tctx)
1041 {
1042 arena_chunk_t *chunk;
1043
1044 cassert(config_prof);
1045 assert(ptr != NULL);
1046
1047 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1048 if (likely(chunk != ptr)) {
1049 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1050 assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
1051
1052 if (unlikely(arena_mapbits_large_get(chunk, pageind) != 0)) {
1053 arena_chunk_map_misc_t *elm = arena_miscelm_get(chunk,
1054 pageind);
1055 atomic_write_p(&elm->prof_tctx_pun, tctx);
1056 }
1057 } else
1058 huge_prof_tctx_set(ptr, tctx);
1059 }
1060
1061 JEMALLOC_ALWAYS_INLINE void *
arena_malloc(tsd_t * tsd,arena_t * arena,size_t size,bool zero,tcache_t * tcache)1062 arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
1063 tcache_t *tcache)
1064 {
1065
1066 assert(size != 0);
1067
1068 arena = arena_choose(tsd, arena);
1069 if (unlikely(arena == NULL))
1070 return (NULL);
1071
1072 if (likely(size <= SMALL_MAXCLASS)) {
1073 if (likely(tcache != NULL)) {
1074 return (tcache_alloc_small(tsd, arena, tcache, size,
1075 zero));
1076 } else
1077 return (arena_malloc_small(arena, size, zero));
1078 } else if (likely(size <= arena_maxclass)) {
1079 /*
1080 * Initialize tcache after checking size in order to avoid
1081 * infinite recursion during tcache initialization.
1082 */
1083 if (likely(tcache != NULL) && size <= tcache_maxclass) {
1084 return (tcache_alloc_large(tsd, arena, tcache, size,
1085 zero));
1086 } else
1087 return (arena_malloc_large(arena, size, zero));
1088 } else
1089 return (huge_malloc(tsd, arena, size, zero, tcache));
1090 }
1091
1092 JEMALLOC_ALWAYS_INLINE arena_t *
arena_aalloc(const void * ptr)1093 arena_aalloc(const void *ptr)
1094 {
1095 arena_chunk_t *chunk;
1096
1097 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1098 if (likely(chunk != ptr))
1099 return (extent_node_arena_get(&chunk->node));
1100 else
1101 return (huge_aalloc(ptr));
1102 }
1103
1104 /* Return the size of the allocation pointed to by ptr. */
1105 JEMALLOC_ALWAYS_INLINE size_t
arena_salloc(const void * ptr,bool demote)1106 arena_salloc(const void *ptr, bool demote)
1107 {
1108 size_t ret;
1109 arena_chunk_t *chunk;
1110 size_t pageind;
1111 index_t binind;
1112
1113 assert(ptr != NULL);
1114
1115 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1116 if (likely(chunk != ptr)) {
1117 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1118 assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
1119 binind = arena_mapbits_binind_get(chunk, pageind);
1120 if (unlikely(binind == BININD_INVALID || (config_prof && !demote
1121 && arena_mapbits_large_get(chunk, pageind) != 0))) {
1122 /*
1123 * Large allocation. In the common case (demote), and
1124 * as this is an inline function, most callers will only
1125 * end up looking at binind to determine that ptr is a
1126 * small allocation.
1127 */
1128 assert(config_cache_oblivious || ((uintptr_t)ptr &
1129 PAGE_MASK) == 0);
1130 ret = arena_mapbits_large_size_get(chunk, pageind) -
1131 large_pad;
1132 assert(ret != 0);
1133 assert(pageind + ((ret+large_pad)>>LG_PAGE) <=
1134 chunk_npages);
1135 assert(arena_mapbits_dirty_get(chunk, pageind) ==
1136 arena_mapbits_dirty_get(chunk,
1137 pageind+((ret+large_pad)>>LG_PAGE)-1));
1138 } else {
1139 /*
1140 * Small allocation (possibly promoted to a large
1141 * object).
1142 */
1143 assert(arena_mapbits_large_get(chunk, pageind) != 0 ||
1144 arena_ptr_small_binind_get(ptr,
1145 arena_mapbits_get(chunk, pageind)) == binind);
1146 ret = index2size(binind);
1147 }
1148 } else
1149 ret = huge_salloc(ptr);
1150
1151 return (ret);
1152 }
1153
1154 JEMALLOC_ALWAYS_INLINE void
arena_dalloc(tsd_t * tsd,void * ptr,tcache_t * tcache)1155 arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
1156 {
1157 arena_chunk_t *chunk;
1158 size_t pageind, mapbits;
1159
1160 assert(ptr != NULL);
1161
1162 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1163 if (likely(chunk != ptr)) {
1164 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1165 #if defined(__ANDROID__)
1166 /* Verify the ptr is actually in the chunk. */
1167 if (unlikely(pageind < map_bias || pageind >= chunk_npages)) {
1168 __libc_fatal_no_abort("Invalid address %p passed to free: invalid page index", ptr);
1169 return;
1170 }
1171 #endif
1172 mapbits = arena_mapbits_get(chunk, pageind);
1173 assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
1174 #if defined(__ANDROID__)
1175 /* Verify the ptr has been allocated. */
1176 if (unlikely((mapbits & CHUNK_MAP_ALLOCATED) == 0)) {
1177 __libc_fatal("Invalid address %p passed to free: value not allocated", ptr);
1178 }
1179 #endif
1180 if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) {
1181 /* Small allocation. */
1182 if (likely(tcache != NULL)) {
1183 index_t binind = arena_ptr_small_binind_get(ptr,
1184 mapbits);
1185 tcache_dalloc_small(tsd, tcache, ptr, binind);
1186 } else {
1187 arena_dalloc_small(extent_node_arena_get(
1188 &chunk->node), chunk, ptr, pageind);
1189 }
1190 } else {
1191 size_t size = arena_mapbits_large_size_get(chunk,
1192 pageind);
1193
1194 assert(config_cache_oblivious || ((uintptr_t)ptr &
1195 PAGE_MASK) == 0);
1196
1197 if (likely(tcache != NULL) && size - large_pad <=
1198 tcache_maxclass) {
1199 tcache_dalloc_large(tsd, tcache, ptr, size -
1200 large_pad);
1201 } else {
1202 arena_dalloc_large(extent_node_arena_get(
1203 &chunk->node), chunk, ptr);
1204 }
1205 }
1206 } else
1207 huge_dalloc(tsd, ptr, tcache);
1208 }
1209
1210 JEMALLOC_ALWAYS_INLINE void
arena_sdalloc(tsd_t * tsd,void * ptr,size_t size,tcache_t * tcache)1211 arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
1212 {
1213 arena_chunk_t *chunk;
1214
1215 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1216 if (likely(chunk != ptr)) {
1217 if (config_prof && opt_prof) {
1218 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >>
1219 LG_PAGE;
1220 assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
1221 if (arena_mapbits_large_get(chunk, pageind) != 0) {
1222 /*
1223 * Make sure to use promoted size, not request
1224 * size.
1225 */
1226 assert(((uintptr_t)ptr & PAGE_MASK) == 0);
1227 size = arena_mapbits_large_size_get(chunk,
1228 pageind) - large_pad;
1229 }
1230 }
1231 assert(s2u(size) == s2u(arena_salloc(ptr, false)));
1232
1233 if (likely(size <= SMALL_MAXCLASS)) {
1234 /* Small allocation. */
1235 if (likely(tcache != NULL)) {
1236 index_t binind = size2index(size);
1237 tcache_dalloc_small(tsd, tcache, ptr, binind);
1238 } else {
1239 size_t pageind = ((uintptr_t)ptr -
1240 (uintptr_t)chunk) >> LG_PAGE;
1241 arena_dalloc_small(extent_node_arena_get(
1242 &chunk->node), chunk, ptr, pageind);
1243 }
1244 } else {
1245 assert(config_cache_oblivious || ((uintptr_t)ptr &
1246 PAGE_MASK) == 0);
1247
1248 if (likely(tcache != NULL) && size <= tcache_maxclass)
1249 tcache_dalloc_large(tsd, tcache, ptr, size);
1250 else {
1251 arena_dalloc_large(extent_node_arena_get(
1252 &chunk->node), chunk, ptr);
1253 }
1254 }
1255 } else
1256 huge_dalloc(tsd, ptr, tcache);
1257 }
1258 # endif /* JEMALLOC_ARENA_INLINE_B */
1259 #endif
1260
1261 #endif /* JEMALLOC_H_INLINES */
1262 /******************************************************************************/
1263