Searched refs:arenas (Results 1 – 14 of 14) sorted by relevance
/external/jemalloc/src/ |
D | android_je_mallinfo.c | 26 if (arenas[i] != NULL) { in je_mallinfo() 27 malloc_mutex_lock(&arenas[i]->lock); in je_mallinfo() 28 mi.hblkhd += arenas[i]->stats.mapped; in je_mallinfo() 29 mi.uordblks += arenas[i]->stats.allocated_large; in je_mallinfo() 30 mi.uordblks += arenas[i]->stats.allocated_huge; in je_mallinfo() 31 malloc_mutex_unlock(&arenas[i]->lock); in je_mallinfo() 34 arena_bin_t* bin = &arenas[i]->bins[j]; in je_mallinfo() 62 if (arenas[aidx] != NULL) { in __mallinfo_arena_info() 63 malloc_mutex_lock(&arenas[aidx]->lock); in __mallinfo_arena_info() 64 mi.hblkhd = arenas[aidx]->stats.mapped; in __mallinfo_arena_info() [all …]
|
D | ctl.c | 496 {NAME("arenas"), CHILD(named, arenas)}, 654 ctl_arena_stats_t *astats = &ctl_stats.arenas[i]; in ctl_arena_refresh() 655 ctl_arena_stats_t *sstats = &ctl_stats.arenas[ctl_stats.narenas]; in ctl_arena_refresh() 679 memcpy(astats, ctl_stats.arenas, (ctl_stats.narenas + 1) * in ctl_grow() 696 a0dalloc(ctl_stats.arenas); in ctl_grow() 697 ctl_stats.arenas = astats; in ctl_grow() 713 ctl_arena_clear(&ctl_stats.arenas[ctl_stats.narenas]); in ctl_refresh() 721 ctl_stats.arenas[i].initialized = initialized; in ctl_refresh() 730 ctl_stats.arenas[ctl_stats.narenas].allocated_small + in ctl_refresh() 731 ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large + in ctl_refresh() [all …]
|
D | jemalloc.c | 60 arena_t **arenas; variable 390 atomic_write_p((void **)&arenas[ind], arena); in arena_set() 1259 arenas = &a0; 1260 memset(arenas, 0, sizeof(arena_t *) * narenas_auto); 1358 arenas = (arena_t **)base_alloc(sizeof(arena_t *) * 1360 if (arenas == NULL)
|
D | stats.c | 507 arenas.lg_dirty_mult) in stats_print() 510 OPT_WRITE_SSIZE_T_MUTABLE(decay_time, arenas.decay_time) in stats_print()
|
/external/jemalloc/ |
D | ChangeLog | 26 + arenas.decay_time 27 + stats.arenas.<i>.decay_time 63 - Refactor arenas array. In addition to fixing a fork-related deadlock, this 77 - Fix stats.arenas.<i>.{dss,lg_dirty_mult,decay_time,pactive,pdirty} for 209 - Refactor huge allocation to be managed by arenas, so that arenas now 213 + The "stats.arenas.<i>.huge.allocated", "stats.arenas.<i>.huge.nmalloc", 214 "stats.arenas.<i>.huge.ndalloc", and "stats.arenas.<i>.huge.nrequests" 216 + The "arenas.nhchunks", "arenas.hchunk.<i>.size", 217 "stats.arenas.<i>.hchunks.<j>.nmalloc", 218 "stats.arenas.<i>.hchunks.<j>.ndalloc", [all …]
|
D | Android.bp | 29 // The total number of arenas will be less than or equal to this number. 30 // The number of arenas will be calculated as 2 * the number of cpus
|
/external/jemalloc/include/jemalloc/internal/ |
D | ctl.h | 64 ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */ member
|
D | jemalloc_internal.h | 447 extern arena_t **arenas; 828 ret = arenas[ind]; in arena_get() 830 ret = atomic_read_p((void *)&arenas[ind]); in arena_get()
|
D | jemalloc_internal.h.in | 445 * arenas array are necessarily used; arenas are created lazily as needed. 447 extern arena_t **arenas; 828 ret = arenas[ind]; 830 ret = atomic_read_p((void *)&arenas[ind]);
|
D | private_symbols.txt | 108 arenas
|
D | private_namespace.h | 108 #define arenas JEMALLOC_N(arenas) macro
|
D | jemalloc_internal_defs.h.in | 188 /* TLS is used to map arenas and magazine caches to threads. */
|
D | private_unnamespace.h | 108 #undef arenas
|
/external/valgrind/ |
D | NEWS | 1101 308711 give more info about aspacemgr and arenas in out_of_memory
|