1 /******************************************************************************/
2 #ifdef JEMALLOC_H_TYPES
3
4 typedef struct tcache_bin_info_s tcache_bin_info_t;
5 typedef struct tcache_bin_s tcache_bin_t;
6 typedef struct tcache_s tcache_t;
7 typedef struct tcaches_s tcaches_t;
8
9 /*
10 * tcache pointers close to NULL are used to encode state information that is
11 * used for two purposes: preventing thread caching on a per thread basis and
12 * cleaning up during thread shutdown.
13 */
14 #define TCACHE_STATE_DISABLED ((tcache_t *)(uintptr_t)1)
15 #define TCACHE_STATE_REINCARNATED ((tcache_t *)(uintptr_t)2)
16 #define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3)
17 #define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY
18
19 /*
20 * Absolute minimum number of cache slots for each small bin.
21 */
22 #define TCACHE_NSLOTS_SMALL_MIN 20
23
24 /*
25 * Absolute maximum number of cache slots for each small bin in the thread
26 * cache. This is an additional constraint beyond that imposed as: twice the
27 * number of regions per run for this size class.
28 *
29 * This constant must be an even number.
30 */
31 #if defined(ANDROID_TCACHE_NSLOTS_SMALL_MAX)
32 #define TCACHE_NSLOTS_SMALL_MAX ANDROID_TCACHE_NSLOTS_SMALL_MAX
33 #else
34 #define TCACHE_NSLOTS_SMALL_MAX 200
35 #endif
36
37 /* Number of cache slots for large size classes. */
38 #if defined(ANDROID_TCACHE_NSLOTS_LARGE)
39 #define TCACHE_NSLOTS_LARGE ANDROID_TCACHE_NSLOTS_LARGE
40 #else
41 #define TCACHE_NSLOTS_LARGE 20
42 #endif
43
44 /* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */
45 #if defined(ANDROID_LG_TCACHE_MAXCLASS_DEFAULT)
46 #define LG_TCACHE_MAXCLASS_DEFAULT ANDROID_LG_TCACHE_MAXCLASS_DEFAULT
47 #else
48 #define LG_TCACHE_MAXCLASS_DEFAULT 15
49 #endif
50
51 /*
52 * TCACHE_GC_SWEEP is the approximate number of allocation events between
53 * full GC sweeps. Integer rounding may cause the actual number to be
54 * slightly higher, since GC is performed incrementally.
55 */
56 #define TCACHE_GC_SWEEP 8192
57
58 /* Number of tcache allocation/deallocation events between incremental GCs. */
59 #define TCACHE_GC_INCR \
60 ((TCACHE_GC_SWEEP / NBINS) + ((TCACHE_GC_SWEEP / NBINS == 0) ? 0 : 1))
61
62 #endif /* JEMALLOC_H_TYPES */
63 /******************************************************************************/
64 #ifdef JEMALLOC_H_STRUCTS
65
66 typedef enum {
67 tcache_enabled_false = 0, /* Enable cast to/from bool. */
68 tcache_enabled_true = 1,
69 tcache_enabled_default = 2
70 } tcache_enabled_t;
71
72 /*
73 * Read-only information associated with each element of tcache_t's tbins array
74 * is stored separately, mainly to reduce memory usage.
75 */
76 struct tcache_bin_info_s {
77 unsigned ncached_max; /* Upper limit on ncached. */
78 };
79
80 struct tcache_bin_s {
81 tcache_bin_stats_t tstats;
82 int low_water; /* Min # cached since last GC. */
83 unsigned lg_fill_div; /* Fill (ncached_max >> lg_fill_div). */
84 unsigned ncached; /* # of cached objects. */
85 void **avail; /* Stack of available objects. */
86 };
87
88 struct tcache_s {
89 ql_elm(tcache_t) link; /* Used for aggregating stats. */
90 uint64_t prof_accumbytes;/* Cleared after arena_prof_accum(). */
91 unsigned ev_cnt; /* Event count since incremental GC. */
92 index_t next_gc_bin; /* Next bin to GC. */
93 tcache_bin_t tbins[1]; /* Dynamically sized. */
94 /*
95 * The pointer stacks associated with tbins follow as a contiguous
96 * array. During tcache initialization, the avail pointer in each
97 * element of tbins is initialized to point to the proper offset within
98 * this array.
99 */
100 };
101
102 /* Linkage for list of available (previously used) explicit tcache IDs. */
103 struct tcaches_s {
104 union {
105 tcache_t *tcache;
106 tcaches_t *next;
107 };
108 };
109
110 #endif /* JEMALLOC_H_STRUCTS */
111 /******************************************************************************/
112 #ifdef JEMALLOC_H_EXTERNS
113
114 extern bool opt_tcache;
115 extern ssize_t opt_lg_tcache_max;
116
117 extern tcache_bin_info_t *tcache_bin_info;
118
119 /*
120 * Number of tcache bins. There are NBINS small-object bins, plus 0 or more
121 * large-object bins.
122 */
123 extern size_t nhbins;
124
125 /* Maximum cached size class. */
126 extern size_t tcache_maxclass;
127
128 /*
129 * Explicit tcaches, managed via the tcache.{create,flush,destroy} mallctls and
130 * usable via the MALLOCX_TCACHE() flag. The automatic per thread tcaches are
131 * completely disjoint from this data structure. tcaches starts off as a sparse
132 * array, so it has no physical memory footprint until individual pages are
133 * touched. This allows the entire array to be allocated the first time an
134 * explicit tcache is created without a disproportionate impact on memory usage.
135 */
136 extern tcaches_t *tcaches;
137
138 size_t tcache_salloc(const void *ptr);
139 void tcache_event_hard(tsd_t *tsd, tcache_t *tcache);
140 void *tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
141 tcache_bin_t *tbin, index_t binind);
142 void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
143 index_t binind, unsigned rem);
144 void tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, index_t binind,
145 unsigned rem, tcache_t *tcache);
146 void tcache_arena_associate(tcache_t *tcache, arena_t *arena);
147 void tcache_arena_reassociate(tcache_t *tcache, arena_t *oldarena,
148 arena_t *newarena);
149 void tcache_arena_dissociate(tcache_t *tcache, arena_t *arena);
150 tcache_t *tcache_get_hard(tsd_t *tsd);
151 tcache_t *tcache_create(tsd_t *tsd, arena_t *arena);
152 void tcache_cleanup(tsd_t *tsd);
153 void tcache_enabled_cleanup(tsd_t *tsd);
154 void tcache_stats_merge(tcache_t *tcache, arena_t *arena);
155 bool tcaches_create(tsd_t *tsd, unsigned *r_ind);
156 void tcaches_flush(tsd_t *tsd, unsigned ind);
157 void tcaches_destroy(tsd_t *tsd, unsigned ind);
158 bool tcache_boot(void);
159
160 #endif /* JEMALLOC_H_EXTERNS */
161 /******************************************************************************/
162 #ifdef JEMALLOC_H_INLINES
163
164 #ifndef JEMALLOC_ENABLE_INLINE
165 void tcache_event(tsd_t *tsd, tcache_t *tcache);
166 void tcache_flush(void);
167 bool tcache_enabled_get(void);
168 tcache_t *tcache_get(tsd_t *tsd, bool create);
169 void tcache_enabled_set(bool enabled);
170 void *tcache_alloc_easy(tcache_bin_t *tbin);
171 void *tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
172 size_t size, bool zero);
173 void *tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
174 size_t size, bool zero);
175 void tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr,
176 index_t binind);
177 void tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr,
178 size_t size);
179 tcache_t *tcaches_get(tsd_t *tsd, unsigned ind);
180 #endif
181
182 #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_))
183 JEMALLOC_INLINE void
tcache_flush(void)184 tcache_flush(void)
185 {
186 tsd_t *tsd;
187
188 cassert(config_tcache);
189
190 tsd = tsd_fetch();
191 tcache_cleanup(tsd);
192 }
193
194 JEMALLOC_INLINE bool
tcache_enabled_get(void)195 tcache_enabled_get(void)
196 {
197 tsd_t *tsd;
198 tcache_enabled_t tcache_enabled;
199
200 cassert(config_tcache);
201
202 tsd = tsd_fetch();
203 tcache_enabled = tsd_tcache_enabled_get(tsd);
204 if (tcache_enabled == tcache_enabled_default) {
205 tcache_enabled = (tcache_enabled_t)opt_tcache;
206 tsd_tcache_enabled_set(tsd, tcache_enabled);
207 }
208
209 return ((bool)tcache_enabled);
210 }
211
212 JEMALLOC_INLINE void
tcache_enabled_set(bool enabled)213 tcache_enabled_set(bool enabled)
214 {
215 tsd_t *tsd;
216 tcache_enabled_t tcache_enabled;
217
218 cassert(config_tcache);
219
220 tsd = tsd_fetch();
221
222 tcache_enabled = (tcache_enabled_t)enabled;
223 tsd_tcache_enabled_set(tsd, tcache_enabled);
224
225 if (!enabled)
226 tcache_cleanup(tsd);
227 }
228
229 JEMALLOC_ALWAYS_INLINE tcache_t *
tcache_get(tsd_t * tsd,bool create)230 tcache_get(tsd_t *tsd, bool create)
231 {
232 tcache_t *tcache;
233
234 if (!config_tcache)
235 return (NULL);
236
237 tcache = tsd_tcache_get(tsd);
238 if (!create)
239 return (tcache);
240 if (unlikely(tcache == NULL) && tsd_nominal(tsd)) {
241 tcache = tcache_get_hard(tsd);
242 tsd_tcache_set(tsd, tcache);
243 }
244
245 return (tcache);
246 }
247
248 JEMALLOC_ALWAYS_INLINE void
tcache_event(tsd_t * tsd,tcache_t * tcache)249 tcache_event(tsd_t *tsd, tcache_t *tcache)
250 {
251
252 if (TCACHE_GC_INCR == 0)
253 return;
254
255 tcache->ev_cnt++;
256 assert(tcache->ev_cnt <= TCACHE_GC_INCR);
257 if (unlikely(tcache->ev_cnt == TCACHE_GC_INCR))
258 tcache_event_hard(tsd, tcache);
259 }
260
261 JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_easy(tcache_bin_t * tbin)262 tcache_alloc_easy(tcache_bin_t *tbin)
263 {
264 void *ret;
265
266 if (unlikely(tbin->ncached == 0)) {
267 tbin->low_water = -1;
268 return (NULL);
269 }
270 tbin->ncached--;
271 if (unlikely((int)tbin->ncached < tbin->low_water))
272 tbin->low_water = tbin->ncached;
273 ret = tbin->avail[tbin->ncached];
274 return (ret);
275 }
276
277 JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_small(tsd_t * tsd,arena_t * arena,tcache_t * tcache,size_t size,bool zero)278 tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
279 bool zero)
280 {
281 void *ret;
282 index_t binind;
283 size_t usize;
284 tcache_bin_t *tbin;
285
286 binind = size2index(size);
287 assert(binind < NBINS);
288 tbin = &tcache->tbins[binind];
289 usize = index2size(binind);
290 ret = tcache_alloc_easy(tbin);
291 if (unlikely(ret == NULL)) {
292 ret = tcache_alloc_small_hard(tsd, arena, tcache, tbin, binind);
293 if (ret == NULL)
294 return (NULL);
295 }
296 assert(tcache_salloc(ret) == usize);
297
298 if (likely(!zero)) {
299 if (config_fill) {
300 if (unlikely(opt_junk_alloc)) {
301 arena_alloc_junk_small(ret,
302 &arena_bin_info[binind], false);
303 } else if (unlikely(opt_zero))
304 memset(ret, 0, usize);
305 }
306 } else {
307 if (config_fill && unlikely(opt_junk_alloc)) {
308 arena_alloc_junk_small(ret, &arena_bin_info[binind],
309 true);
310 }
311 memset(ret, 0, usize);
312 }
313
314 if (config_stats)
315 tbin->tstats.nrequests++;
316 if (config_prof)
317 tcache->prof_accumbytes += usize;
318 tcache_event(tsd, tcache);
319 return (ret);
320 }
321
322 JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_large(tsd_t * tsd,arena_t * arena,tcache_t * tcache,size_t size,bool zero)323 tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
324 bool zero)
325 {
326 void *ret;
327 index_t binind;
328 size_t usize;
329 tcache_bin_t *tbin;
330
331 binind = size2index(size);
332 usize = index2size(binind);
333 assert(usize <= tcache_maxclass);
334 assert(binind < nhbins);
335 tbin = &tcache->tbins[binind];
336 ret = tcache_alloc_easy(tbin);
337 if (unlikely(ret == NULL)) {
338 /*
339 * Only allocate one large object at a time, because it's quite
340 * expensive to create one and not use it.
341 */
342 ret = arena_malloc_large(arena, usize, zero);
343 if (ret == NULL)
344 return (NULL);
345 } else {
346 if (config_prof && usize == LARGE_MINCLASS) {
347 arena_chunk_t *chunk =
348 (arena_chunk_t *)CHUNK_ADDR2BASE(ret);
349 size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >>
350 LG_PAGE);
351 arena_mapbits_large_binind_set(chunk, pageind,
352 BININD_INVALID);
353 }
354 if (likely(!zero)) {
355 if (config_fill) {
356 if (unlikely(opt_junk_alloc))
357 memset(ret, 0xa5, usize);
358 else if (unlikely(opt_zero))
359 memset(ret, 0, usize);
360 }
361 } else
362 memset(ret, 0, usize);
363
364 if (config_stats)
365 tbin->tstats.nrequests++;
366 if (config_prof)
367 tcache->prof_accumbytes += usize;
368 }
369
370 tcache_event(tsd, tcache);
371 return (ret);
372 }
373
374 JEMALLOC_ALWAYS_INLINE void
tcache_dalloc_small(tsd_t * tsd,tcache_t * tcache,void * ptr,index_t binind)375 tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, index_t binind)
376 {
377 tcache_bin_t *tbin;
378 tcache_bin_info_t *tbin_info;
379
380 assert(tcache_salloc(ptr) <= SMALL_MAXCLASS);
381
382 if (config_fill && unlikely(opt_junk_free))
383 arena_dalloc_junk_small(ptr, &arena_bin_info[binind]);
384
385 tbin = &tcache->tbins[binind];
386 tbin_info = &tcache_bin_info[binind];
387 if (unlikely(tbin->ncached == tbin_info->ncached_max)) {
388 tcache_bin_flush_small(tsd, tcache, tbin, binind,
389 (tbin_info->ncached_max >> 1));
390 }
391 assert(tbin->ncached < tbin_info->ncached_max);
392 tbin->avail[tbin->ncached] = ptr;
393 tbin->ncached++;
394
395 tcache_event(tsd, tcache);
396 }
397
398 JEMALLOC_ALWAYS_INLINE void
tcache_dalloc_large(tsd_t * tsd,tcache_t * tcache,void * ptr,size_t size)399 tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size)
400 {
401 index_t binind;
402 tcache_bin_t *tbin;
403 tcache_bin_info_t *tbin_info;
404
405 assert((size & PAGE_MASK) == 0);
406 assert(tcache_salloc(ptr) > SMALL_MAXCLASS);
407 assert(tcache_salloc(ptr) <= tcache_maxclass);
408
409 binind = size2index(size);
410
411 if (config_fill && unlikely(opt_junk_free))
412 arena_dalloc_junk_large(ptr, size);
413
414 tbin = &tcache->tbins[binind];
415 tbin_info = &tcache_bin_info[binind];
416 if (unlikely(tbin->ncached == tbin_info->ncached_max)) {
417 tcache_bin_flush_large(tsd, tbin, binind,
418 (tbin_info->ncached_max >> 1), tcache);
419 }
420 assert(tbin->ncached < tbin_info->ncached_max);
421 tbin->avail[tbin->ncached] = ptr;
422 tbin->ncached++;
423
424 tcache_event(tsd, tcache);
425 }
426
427 JEMALLOC_ALWAYS_INLINE tcache_t *
tcaches_get(tsd_t * tsd,unsigned ind)428 tcaches_get(tsd_t *tsd, unsigned ind)
429 {
430 tcaches_t *elm = &tcaches[ind];
431 if (unlikely(elm->tcache == NULL))
432 elm->tcache = tcache_create(tsd, arena_choose(tsd, NULL));
433 return (elm->tcache);
434 }
435 #endif
436
437 #endif /* JEMALLOC_H_INLINES */
438 /******************************************************************************/
439