1 #define	JEMALLOC_ARENA_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
3 
4 /******************************************************************************/
5 /* Data. */
6 
7 purge_mode_t	opt_purge = PURGE_DEFAULT;
8 const char	*purge_mode_names[] = {
9 	"ratio",
10 	"decay",
11 	"N/A"
12 };
13 ssize_t		opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
14 static ssize_t	lg_dirty_mult_default;
15 ssize_t		opt_decay_time = DECAY_TIME_DEFAULT;
16 static ssize_t	decay_time_default;
17 
18 arena_bin_info_t	arena_bin_info[NBINS];
19 
20 size_t		map_bias;
21 size_t		map_misc_offset;
22 size_t		arena_maxrun; /* Max run size for arenas. */
23 size_t		large_maxclass; /* Max large size class. */
24 size_t		run_quantize_max; /* Max run_quantize_*() input. */
25 static size_t	small_maxrun; /* Max run size for small size classes. */
26 static bool	*small_run_tab; /* Valid small run page multiples. */
27 static size_t	*run_quantize_floor_tab; /* run_quantize_floor() memoization. */
28 static size_t	*run_quantize_ceil_tab; /* run_quantize_ceil() memoization. */
29 unsigned	nlclasses; /* Number of large size classes. */
30 unsigned	nhclasses; /* Number of huge size classes. */
31 static szind_t	runs_avail_bias; /* Size index for first runs_avail tree. */
32 static szind_t	runs_avail_nclasses; /* Number of runs_avail trees. */
33 
34 /******************************************************************************/
35 /*
36  * Function prototypes for static functions that are referenced prior to
37  * definition.
38  */
39 
40 static void	arena_purge_to_limit(arena_t *arena, size_t ndirty_limit);
41 static void	arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty,
42     bool cleaned, bool decommitted);
43 static void	arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk,
44     arena_run_t *run, arena_bin_t *bin);
45 static void	arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
46     arena_run_t *run, arena_bin_t *bin);
47 
48 /******************************************************************************/
49 
50 JEMALLOC_INLINE_C size_t
arena_miscelm_size_get(const arena_chunk_map_misc_t * miscelm)51 arena_miscelm_size_get(const arena_chunk_map_misc_t *miscelm)
52 {
53 	arena_chunk_t *chunk;
54 	size_t pageind, mapbits;
55 
56 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
57 	pageind = arena_miscelm_to_pageind(miscelm);
58 	mapbits = arena_mapbits_get(chunk, pageind);
59 	return (arena_mapbits_size_decode(mapbits));
60 }
61 
62 JEMALLOC_INLINE_C int
arena_run_addr_comp(const arena_chunk_map_misc_t * a,const arena_chunk_map_misc_t * b)63 arena_run_addr_comp(const arena_chunk_map_misc_t *a,
64     const arena_chunk_map_misc_t *b)
65 {
66 	uintptr_t a_miscelm = (uintptr_t)a;
67 	uintptr_t b_miscelm = (uintptr_t)b;
68 
69 	assert(a != NULL);
70 	assert(b != NULL);
71 
72 	return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm));
73 }
74 
75 /* Generate red-black tree functions. */
rb_gen(static UNUSED,arena_run_tree_,arena_run_tree_t,arena_chunk_map_misc_t,rb_link,arena_run_addr_comp)76 rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_misc_t,
77     rb_link, arena_run_addr_comp)
78 
79 static size_t
80 run_quantize_floor_compute(size_t size)
81 {
82 	size_t qsize;
83 
84 	assert(size != 0);
85 	assert(size == PAGE_CEILING(size));
86 
87 	/* Don't change sizes that are valid small run sizes. */
88 	if (size <= small_maxrun && small_run_tab[size >> LG_PAGE])
89 		return (size);
90 
91 	/*
92 	 * Round down to the nearest run size that can actually be requested
93 	 * during normal large allocation.  Add large_pad so that cache index
94 	 * randomization can offset the allocation from the page boundary.
95 	 */
96 	qsize = index2size(size2index(size - large_pad + 1) - 1) + large_pad;
97 	if (qsize <= SMALL_MAXCLASS + large_pad)
98 		return (run_quantize_floor_compute(size - large_pad));
99 	assert(qsize <= size);
100 	return (qsize);
101 }
102 
103 static size_t
run_quantize_ceil_compute_hard(size_t size)104 run_quantize_ceil_compute_hard(size_t size)
105 {
106 	size_t large_run_size_next;
107 
108 	assert(size != 0);
109 	assert(size == PAGE_CEILING(size));
110 
111 	/*
112 	 * Return the next quantized size greater than the input size.
113 	 * Quantized sizes comprise the union of run sizes that back small
114 	 * region runs, and run sizes that back large regions with no explicit
115 	 * alignment constraints.
116 	 */
117 
118 	if (size > SMALL_MAXCLASS) {
119 		large_run_size_next = PAGE_CEILING(index2size(size2index(size -
120 		    large_pad) + 1) + large_pad);
121 	} else
122 		large_run_size_next = SIZE_T_MAX;
123 	if (size >= small_maxrun)
124 		return (large_run_size_next);
125 
126 	while (true) {
127 		size += PAGE;
128 		assert(size <= small_maxrun);
129 		if (small_run_tab[size >> LG_PAGE]) {
130 			if (large_run_size_next < size)
131 				return (large_run_size_next);
132 			return (size);
133 		}
134 	}
135 }
136 
137 static size_t
run_quantize_ceil_compute(size_t size)138 run_quantize_ceil_compute(size_t size)
139 {
140 	size_t qsize = run_quantize_floor_compute(size);
141 
142 	if (qsize < size) {
143 		/*
144 		 * Skip a quantization that may have an adequately large run,
145 		 * because under-sized runs may be mixed in.  This only happens
146 		 * when an unusual size is requested, i.e. for aligned
147 		 * allocation, and is just one of several places where linear
148 		 * search would potentially find sufficiently aligned available
149 		 * memory somewhere lower.
150 		 */
151 		qsize = run_quantize_ceil_compute_hard(qsize);
152 	}
153 	return (qsize);
154 }
155 
156 #ifdef JEMALLOC_JET
157 #undef run_quantize_floor
158 #define	run_quantize_floor JEMALLOC_N(run_quantize_floor_impl)
159 #endif
160 static size_t
run_quantize_floor(size_t size)161 run_quantize_floor(size_t size)
162 {
163 	size_t ret;
164 
165 	assert(size > 0);
166 	assert(size <= run_quantize_max);
167 	assert((size & PAGE_MASK) == 0);
168 
169 	ret = run_quantize_floor_tab[(size >> LG_PAGE) - 1];
170 	assert(ret == run_quantize_floor_compute(size));
171 	return (ret);
172 }
173 #ifdef JEMALLOC_JET
174 #undef run_quantize_floor
175 #define	run_quantize_floor JEMALLOC_N(run_quantize_floor)
176 run_quantize_t *run_quantize_floor = JEMALLOC_N(run_quantize_floor_impl);
177 #endif
178 
179 #ifdef JEMALLOC_JET
180 #undef run_quantize_ceil
181 #define	run_quantize_ceil JEMALLOC_N(run_quantize_ceil_impl)
182 #endif
183 static size_t
run_quantize_ceil(size_t size)184 run_quantize_ceil(size_t size)
185 {
186 	size_t ret;
187 
188 	assert(size > 0);
189 	assert(size <= run_quantize_max);
190 	assert((size & PAGE_MASK) == 0);
191 
192 	ret = run_quantize_ceil_tab[(size >> LG_PAGE) - 1];
193 	assert(ret == run_quantize_ceil_compute(size));
194 	return (ret);
195 }
196 #ifdef JEMALLOC_JET
197 #undef run_quantize_ceil
198 #define	run_quantize_ceil JEMALLOC_N(run_quantize_ceil)
199 run_quantize_t *run_quantize_ceil = JEMALLOC_N(run_quantize_ceil_impl);
200 #endif
201 
202 static arena_run_tree_t *
arena_runs_avail_get(arena_t * arena,szind_t ind)203 arena_runs_avail_get(arena_t *arena, szind_t ind)
204 {
205 
206 	assert(ind >= runs_avail_bias);
207 	assert(ind - runs_avail_bias < runs_avail_nclasses);
208 
209 	return (&arena->runs_avail[ind - runs_avail_bias]);
210 }
211 
212 static void
arena_avail_insert(arena_t * arena,arena_chunk_t * chunk,size_t pageind,size_t npages)213 arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
214     size_t npages)
215 {
216 	szind_t ind = size2index(run_quantize_floor(arena_miscelm_size_get(
217 	    arena_miscelm_get(chunk, pageind))));
218 	assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
219 	    LG_PAGE));
220 	arena_run_tree_insert(arena_runs_avail_get(arena, ind),
221 	    arena_miscelm_get(chunk, pageind));
222 }
223 
224 static void
arena_avail_remove(arena_t * arena,arena_chunk_t * chunk,size_t pageind,size_t npages)225 arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
226     size_t npages)
227 {
228 	szind_t ind = size2index(run_quantize_floor(arena_miscelm_size_get(
229 	    arena_miscelm_get(chunk, pageind))));
230 	assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
231 	    LG_PAGE));
232 	arena_run_tree_remove(arena_runs_avail_get(arena, ind),
233 	    arena_miscelm_get(chunk, pageind));
234 }
235 
236 static void
arena_run_dirty_insert(arena_t * arena,arena_chunk_t * chunk,size_t pageind,size_t npages)237 arena_run_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
238     size_t npages)
239 {
240 	arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
241 
242 	assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
243 	    LG_PAGE));
244 	assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
245 	assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
246 	    CHUNK_MAP_DIRTY);
247 
248 	qr_new(&miscelm->rd, rd_link);
249 	qr_meld(&arena->runs_dirty, &miscelm->rd, rd_link);
250 	arena->ndirty += npages;
251 }
252 
253 static void
arena_run_dirty_remove(arena_t * arena,arena_chunk_t * chunk,size_t pageind,size_t npages)254 arena_run_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
255     size_t npages)
256 {
257 	arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
258 
259 	assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
260 	    LG_PAGE));
261 	assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
262 	assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
263 	    CHUNK_MAP_DIRTY);
264 
265 	qr_remove(&miscelm->rd, rd_link);
266 	assert(arena->ndirty >= npages);
267 	arena->ndirty -= npages;
268 }
269 
270 static size_t
arena_chunk_dirty_npages(const extent_node_t * node)271 arena_chunk_dirty_npages(const extent_node_t *node)
272 {
273 
274 	return (extent_node_size_get(node) >> LG_PAGE);
275 }
276 
277 void
arena_chunk_cache_maybe_insert(arena_t * arena,extent_node_t * node,bool cache)278 arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node, bool cache)
279 {
280 
281 	if (cache) {
282 		extent_node_dirty_linkage_init(node);
283 		extent_node_dirty_insert(node, &arena->runs_dirty,
284 		    &arena->chunks_cache);
285 		arena->ndirty += arena_chunk_dirty_npages(node);
286 	}
287 }
288 
289 void
arena_chunk_cache_maybe_remove(arena_t * arena,extent_node_t * node,bool dirty)290 arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node, bool dirty)
291 {
292 
293 	if (dirty) {
294 		extent_node_dirty_remove(node);
295 		assert(arena->ndirty >= arena_chunk_dirty_npages(node));
296 		arena->ndirty -= arena_chunk_dirty_npages(node);
297 	}
298 }
299 
300 JEMALLOC_INLINE_C void *
arena_run_reg_alloc(arena_run_t * run,arena_bin_info_t * bin_info)301 arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
302 {
303 	void *ret;
304 	size_t regind;
305 	arena_chunk_map_misc_t *miscelm;
306 	void *rpages;
307 
308 	assert(run->nfree > 0);
309 	assert(!bitmap_full(run->bitmap, &bin_info->bitmap_info));
310 
311 	regind = (unsigned)bitmap_sfu(run->bitmap, &bin_info->bitmap_info);
312 	miscelm = arena_run_to_miscelm(run);
313 	rpages = arena_miscelm_to_rpages(miscelm);
314 	ret = (void *)((uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset +
315 	    (uintptr_t)(bin_info->reg_interval * regind));
316 	run->nfree--;
317 	return (ret);
318 }
319 
320 JEMALLOC_INLINE_C void
arena_run_reg_dalloc(arena_run_t * run,void * ptr)321 arena_run_reg_dalloc(arena_run_t *run, void *ptr)
322 {
323 	arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
324 	size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
325 	size_t mapbits = arena_mapbits_get(chunk, pageind);
326 	szind_t binind = arena_ptr_small_binind_get(ptr, mapbits);
327 	arena_bin_info_t *bin_info = &arena_bin_info[binind];
328 	size_t regind = arena_run_regind(run, bin_info, ptr);
329 
330 	assert(run->nfree < bin_info->nregs);
331 	/* Freeing an interior pointer can cause assertion failure. */
332 	assert(((uintptr_t)ptr -
333 	    ((uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) +
334 	    (uintptr_t)bin_info->reg0_offset)) %
335 	    (uintptr_t)bin_info->reg_interval == 0);
336 	assert((uintptr_t)ptr >=
337 	    (uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) +
338 	    (uintptr_t)bin_info->reg0_offset);
339 	/* Freeing an unallocated pointer can cause assertion failure. */
340 	assert(bitmap_get(run->bitmap, &bin_info->bitmap_info, regind));
341 
342 	bitmap_unset(run->bitmap, &bin_info->bitmap_info, regind);
343 	run->nfree++;
344 }
345 
346 JEMALLOC_INLINE_C void
arena_run_zero(arena_chunk_t * chunk,size_t run_ind,size_t npages)347 arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages)
348 {
349 
350 	JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
351 	    (run_ind << LG_PAGE)), (npages << LG_PAGE));
352 	memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0,
353 	    (npages << LG_PAGE));
354 }
355 
356 JEMALLOC_INLINE_C void
arena_run_page_mark_zeroed(arena_chunk_t * chunk,size_t run_ind)357 arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind)
358 {
359 
360 	JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind
361 	    << LG_PAGE)), PAGE);
362 }
363 
364 JEMALLOC_INLINE_C void
arena_run_page_validate_zeroed(arena_chunk_t * chunk,size_t run_ind)365 arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
366 {
367 	size_t i;
368 	UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE));
369 
370 	arena_run_page_mark_zeroed(chunk, run_ind);
371 	for (i = 0; i < PAGE / sizeof(size_t); i++)
372 		assert(p[i] == 0);
373 }
374 
375 static void
arena_nactive_add(arena_t * arena,size_t add_pages)376 arena_nactive_add(arena_t *arena, size_t add_pages)
377 {
378 
379 	if (config_stats) {
380 		size_t cactive_add = CHUNK_CEILING((arena->nactive +
381 		    add_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
382 		    LG_PAGE);
383 		if (cactive_add != 0)
384 			stats_cactive_add(cactive_add);
385 	}
386 	arena->nactive += add_pages;
387 }
388 
389 static void
arena_nactive_sub(arena_t * arena,size_t sub_pages)390 arena_nactive_sub(arena_t *arena, size_t sub_pages)
391 {
392 
393 	if (config_stats) {
394 		size_t cactive_sub = CHUNK_CEILING(arena->nactive << LG_PAGE) -
395 		    CHUNK_CEILING((arena->nactive - sub_pages) << LG_PAGE);
396 		if (cactive_sub != 0)
397 			stats_cactive_sub(cactive_sub);
398 	}
399 	arena->nactive -= sub_pages;
400 }
401 
402 static void
arena_run_split_remove(arena_t * arena,arena_chunk_t * chunk,size_t run_ind,size_t flag_dirty,size_t flag_decommitted,size_t need_pages)403 arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind,
404     size_t flag_dirty, size_t flag_decommitted, size_t need_pages)
405 {
406 	size_t total_pages, rem_pages;
407 
408 	assert(flag_dirty == 0 || flag_decommitted == 0);
409 
410 	total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >>
411 	    LG_PAGE;
412 	assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) ==
413 	    flag_dirty);
414 	assert(need_pages <= total_pages);
415 	rem_pages = total_pages - need_pages;
416 
417 	arena_avail_remove(arena, chunk, run_ind, total_pages);
418 	if (flag_dirty != 0)
419 		arena_run_dirty_remove(arena, chunk, run_ind, total_pages);
420 	arena_nactive_add(arena, need_pages);
421 
422 	/* Keep track of trailing unused pages for later use. */
423 	if (rem_pages > 0) {
424 		size_t flags = flag_dirty | flag_decommitted;
425 		size_t flag_unzeroed_mask = (flags == 0) ?  CHUNK_MAP_UNZEROED :
426 		    0;
427 
428 		arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
429 		    (rem_pages << LG_PAGE), flags |
430 		    (arena_mapbits_unzeroed_get(chunk, run_ind+need_pages) &
431 		    flag_unzeroed_mask));
432 		arena_mapbits_unallocated_set(chunk, run_ind+total_pages-1,
433 		    (rem_pages << LG_PAGE), flags |
434 		    (arena_mapbits_unzeroed_get(chunk, run_ind+total_pages-1) &
435 		    flag_unzeroed_mask));
436 		if (flag_dirty != 0) {
437 			arena_run_dirty_insert(arena, chunk, run_ind+need_pages,
438 			    rem_pages);
439 		}
440 		arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages);
441 	}
442 }
443 
444 static bool
arena_run_split_large_helper(arena_t * arena,arena_run_t * run,size_t size,bool remove,bool zero)445 arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size,
446     bool remove, bool zero)
447 {
448 	arena_chunk_t *chunk;
449 	arena_chunk_map_misc_t *miscelm;
450 	size_t flag_dirty, flag_decommitted, run_ind, need_pages;
451 	size_t flag_unzeroed_mask;
452 
453 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
454 	miscelm = arena_run_to_miscelm(run);
455 	run_ind = arena_miscelm_to_pageind(miscelm);
456 	flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
457 	flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind);
458 	need_pages = (size >> LG_PAGE);
459 	assert(need_pages > 0);
460 
461 	if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize,
462 	    run_ind << LG_PAGE, size, arena->ind))
463 		return (true);
464 
465 	if (remove) {
466 		arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
467 		    flag_decommitted, need_pages);
468 	}
469 
470 	if (zero) {
471 		if (flag_decommitted != 0) {
472 			/* The run is untouched, and therefore zeroed. */
473 			JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
474 			    *)((uintptr_t)chunk + (run_ind << LG_PAGE)),
475 			    (need_pages << LG_PAGE));
476 		} else if (flag_dirty != 0) {
477 			/* The run is dirty, so all pages must be zeroed. */
478 			arena_run_zero(chunk, run_ind, need_pages);
479 		} else {
480 			/*
481 			 * The run is clean, so some pages may be zeroed (i.e.
482 			 * never before touched).
483 			 */
484 			size_t i;
485 			for (i = 0; i < need_pages; i++) {
486 				if (arena_mapbits_unzeroed_get(chunk, run_ind+i)
487 				    != 0)
488 					arena_run_zero(chunk, run_ind+i, 1);
489 				else if (config_debug) {
490 					arena_run_page_validate_zeroed(chunk,
491 					    run_ind+i);
492 				} else {
493 					arena_run_page_mark_zeroed(chunk,
494 					    run_ind+i);
495 				}
496 			}
497 		}
498 	} else {
499 		JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
500 		    (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
501 	}
502 
503 	/*
504 	 * Set the last element first, in case the run only contains one page
505 	 * (i.e. both statements set the same element).
506 	 */
507 	flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
508 	    CHUNK_MAP_UNZEROED : 0;
509 	arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty |
510 	    (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
511 	    run_ind+need_pages-1)));
512 	arena_mapbits_large_set(chunk, run_ind, size, flag_dirty |
513 	    (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, run_ind)));
514 	return (false);
515 }
516 
517 static bool
arena_run_split_large(arena_t * arena,arena_run_t * run,size_t size,bool zero)518 arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
519 {
520 
521 	return (arena_run_split_large_helper(arena, run, size, true, zero));
522 }
523 
524 static bool
arena_run_init_large(arena_t * arena,arena_run_t * run,size_t size,bool zero)525 arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
526 {
527 
528 	return (arena_run_split_large_helper(arena, run, size, false, zero));
529 }
530 
531 static bool
arena_run_split_small(arena_t * arena,arena_run_t * run,size_t size,szind_t binind)532 arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size,
533     szind_t binind)
534 {
535 	arena_chunk_t *chunk;
536 	arena_chunk_map_misc_t *miscelm;
537 	size_t flag_dirty, flag_decommitted, run_ind, need_pages, i;
538 
539 	assert(binind != BININD_INVALID);
540 
541 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
542 	miscelm = arena_run_to_miscelm(run);
543 	run_ind = arena_miscelm_to_pageind(miscelm);
544 	flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
545 	flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind);
546 	need_pages = (size >> LG_PAGE);
547 	assert(need_pages > 0);
548 
549 	if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize,
550 	    run_ind << LG_PAGE, size, arena->ind))
551 		return (true);
552 
553 	arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
554 	    flag_decommitted, need_pages);
555 
556 	for (i = 0; i < need_pages; i++) {
557 		size_t flag_unzeroed = arena_mapbits_unzeroed_get(chunk,
558 		    run_ind+i);
559 		arena_mapbits_small_set(chunk, run_ind+i, i, binind,
560 		    flag_unzeroed);
561 		if (config_debug && flag_dirty == 0 && flag_unzeroed == 0)
562 			arena_run_page_validate_zeroed(chunk, run_ind+i);
563 	}
564 	JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
565 	    (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
566 	return (false);
567 }
568 
569 static arena_chunk_t *
arena_chunk_init_spare(arena_t * arena)570 arena_chunk_init_spare(arena_t *arena)
571 {
572 	arena_chunk_t *chunk;
573 
574 	assert(arena->spare != NULL);
575 
576 	chunk = arena->spare;
577 	arena->spare = NULL;
578 
579 	assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
580 	assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
581 	assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
582 	    arena_maxrun);
583 	assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
584 	    arena_maxrun);
585 	assert(arena_mapbits_dirty_get(chunk, map_bias) ==
586 	    arena_mapbits_dirty_get(chunk, chunk_npages-1));
587 
588 	return (chunk);
589 }
590 
591 static bool
arena_chunk_register(arena_t * arena,arena_chunk_t * chunk,bool zero)592 arena_chunk_register(arena_t *arena, arena_chunk_t *chunk, bool zero)
593 {
594 
595 	/*
596 	 * The extent node notion of "committed" doesn't directly apply to
597 	 * arena chunks.  Arbitrarily mark them as committed.  The commit state
598 	 * of runs is tracked individually, and upon chunk deallocation the
599 	 * entire chunk is in a consistent commit state.
600 	 */
601 	extent_node_init(&chunk->node, arena, chunk, chunksize, zero, true);
602 	extent_node_achunk_set(&chunk->node, true);
603 	return (chunk_register(chunk, &chunk->node));
604 }
605 
606 static arena_chunk_t *
arena_chunk_alloc_internal_hard(arena_t * arena,chunk_hooks_t * chunk_hooks,bool * zero,bool * commit)607 arena_chunk_alloc_internal_hard(arena_t *arena, chunk_hooks_t *chunk_hooks,
608     bool *zero, bool *commit)
609 {
610 	arena_chunk_t *chunk;
611 
612 	malloc_mutex_unlock(&arena->lock);
613 
614 	chunk = (arena_chunk_t *)chunk_alloc_wrapper(arena, chunk_hooks, NULL,
615 	    chunksize, chunksize, zero, commit);
616 	if (chunk != NULL && !*commit) {
617 		/* Commit header. */
618 		if (chunk_hooks->commit(chunk, chunksize, 0, map_bias <<
619 		    LG_PAGE, arena->ind)) {
620 			chunk_dalloc_wrapper(arena, chunk_hooks, (void *)chunk,
621 			    chunksize, *zero, *commit);
622 			chunk = NULL;
623 		}
624 	}
625 	if (chunk != NULL && arena_chunk_register(arena, chunk, *zero)) {
626 		if (!*commit) {
627 			/* Undo commit of header. */
628 			chunk_hooks->decommit(chunk, chunksize, 0, map_bias <<
629 			    LG_PAGE, arena->ind);
630 		}
631 		chunk_dalloc_wrapper(arena, chunk_hooks, (void *)chunk,
632 		    chunksize, *zero, *commit);
633 		chunk = NULL;
634 	}
635 
636 	malloc_mutex_lock(&arena->lock);
637 	return (chunk);
638 }
639 
640 static arena_chunk_t *
arena_chunk_alloc_internal(arena_t * arena,bool * zero,bool * commit)641 arena_chunk_alloc_internal(arena_t *arena, bool *zero, bool *commit)
642 {
643 	arena_chunk_t *chunk;
644 	chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
645 
646 	chunk = chunk_alloc_cache(arena, &chunk_hooks, NULL, chunksize,
647 	    chunksize, zero, true);
648 	if (chunk != NULL) {
649 		if (arena_chunk_register(arena, chunk, *zero)) {
650 			chunk_dalloc_cache(arena, &chunk_hooks, chunk,
651 			    chunksize, true);
652 			return (NULL);
653 		}
654 		*commit = true;
655 	}
656 	if (chunk == NULL) {
657 		chunk = arena_chunk_alloc_internal_hard(arena, &chunk_hooks,
658 		    zero, commit);
659 	}
660 
661 	if (config_stats && chunk != NULL) {
662 		arena->stats.mapped += chunksize;
663 		arena->stats.metadata_mapped += (map_bias << LG_PAGE);
664 	}
665 
666 	return (chunk);
667 }
668 
669 static arena_chunk_t *
arena_chunk_init_hard(arena_t * arena)670 arena_chunk_init_hard(arena_t *arena)
671 {
672 	arena_chunk_t *chunk;
673 	bool zero, commit;
674 	size_t flag_unzeroed, flag_decommitted, i;
675 
676 	assert(arena->spare == NULL);
677 
678 	zero = false;
679 	commit = false;
680 	chunk = arena_chunk_alloc_internal(arena, &zero, &commit);
681 	if (chunk == NULL)
682 		return (NULL);
683 
684 	/*
685 	 * Initialize the map to contain one maximal free untouched run.  Mark
686 	 * the pages as zeroed if chunk_alloc() returned a zeroed or decommitted
687 	 * chunk.
688 	 */
689 	flag_unzeroed = (zero || !commit) ? 0 : CHUNK_MAP_UNZEROED;
690 	flag_decommitted = commit ? 0 : CHUNK_MAP_DECOMMITTED;
691 	arena_mapbits_unallocated_set(chunk, map_bias, arena_maxrun,
692 	    flag_unzeroed | flag_decommitted);
693 	/*
694 	 * There is no need to initialize the internal page map entries unless
695 	 * the chunk is not zeroed.
696 	 */
697 	if (!zero) {
698 		JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
699 		    (void *)arena_bitselm_get(chunk, map_bias+1),
700 		    (size_t)((uintptr_t) arena_bitselm_get(chunk,
701 		    chunk_npages-1) - (uintptr_t)arena_bitselm_get(chunk,
702 		    map_bias+1)));
703 		for (i = map_bias+1; i < chunk_npages-1; i++)
704 			arena_mapbits_internal_set(chunk, i, flag_unzeroed);
705 	} else {
706 		JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
707 		    *)arena_bitselm_get(chunk, map_bias+1), (size_t)((uintptr_t)
708 		    arena_bitselm_get(chunk, chunk_npages-1) -
709 		    (uintptr_t)arena_bitselm_get(chunk, map_bias+1)));
710 		if (config_debug) {
711 			for (i = map_bias+1; i < chunk_npages-1; i++) {
712 				assert(arena_mapbits_unzeroed_get(chunk, i) ==
713 				    flag_unzeroed);
714 			}
715 		}
716 	}
717 	arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxrun,
718 	    flag_unzeroed);
719 
720 	return (chunk);
721 }
722 
723 static arena_chunk_t *
arena_chunk_alloc(arena_t * arena)724 arena_chunk_alloc(arena_t *arena)
725 {
726 	arena_chunk_t *chunk;
727 
728 	if (arena->spare != NULL)
729 		chunk = arena_chunk_init_spare(arena);
730 	else {
731 		chunk = arena_chunk_init_hard(arena);
732 		if (chunk == NULL)
733 			return (NULL);
734 	}
735 
736 	arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias);
737 
738 	return (chunk);
739 }
740 
741 static void
arena_chunk_dalloc(arena_t * arena,arena_chunk_t * chunk)742 arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk)
743 {
744 
745 	assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
746 	assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
747 	assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
748 	    arena_maxrun);
749 	assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
750 	    arena_maxrun);
751 	assert(arena_mapbits_dirty_get(chunk, map_bias) ==
752 	    arena_mapbits_dirty_get(chunk, chunk_npages-1));
753 	assert(arena_mapbits_decommitted_get(chunk, map_bias) ==
754 	    arena_mapbits_decommitted_get(chunk, chunk_npages-1));
755 
756 	/* Remove run from runs_avail, so that the arena does not use it. */
757 	arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias);
758 
759 	if (arena->spare != NULL) {
760 		arena_chunk_t *spare = arena->spare;
761 		chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
762 		bool committed;
763 
764 		arena->spare = chunk;
765 		if (arena_mapbits_dirty_get(spare, map_bias) != 0) {
766 			arena_run_dirty_remove(arena, spare, map_bias,
767 			    chunk_npages-map_bias);
768 		}
769 
770 		chunk_deregister(spare, &spare->node);
771 
772 		committed = (arena_mapbits_decommitted_get(spare, map_bias) ==
773 		    0);
774 		if (!committed) {
775 			/*
776 			 * Decommit the header.  Mark the chunk as decommitted
777 			 * even if header decommit fails, since treating a
778 			 * partially committed chunk as committed has a high
779 			 * potential for causing later access of decommitted
780 			 * memory.
781 			 */
782 			chunk_hooks = chunk_hooks_get(arena);
783 			chunk_hooks.decommit(spare, chunksize, 0, map_bias <<
784 			    LG_PAGE, arena->ind);
785 		}
786 
787 		chunk_dalloc_cache(arena, &chunk_hooks, (void *)spare,
788 		    chunksize, committed);
789 
790 		if (config_stats) {
791 			arena->stats.mapped -= chunksize;
792 			arena->stats.metadata_mapped -= (map_bias << LG_PAGE);
793 		}
794 	} else
795 		arena->spare = chunk;
796 }
797 
798 static void
arena_huge_malloc_stats_update(arena_t * arena,size_t usize)799 arena_huge_malloc_stats_update(arena_t *arena, size_t usize)
800 {
801 	szind_t index = size2index(usize) - nlclasses - NBINS;
802 
803 	cassert(config_stats);
804 
805 	arena->stats.nmalloc_huge++;
806 	arena->stats.allocated_huge += usize;
807 	arena->stats.hstats[index].nmalloc++;
808 	arena->stats.hstats[index].curhchunks++;
809 }
810 
811 static void
arena_huge_malloc_stats_update_undo(arena_t * arena,size_t usize)812 arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize)
813 {
814 	szind_t index = size2index(usize) - nlclasses - NBINS;
815 
816 	cassert(config_stats);
817 
818 	arena->stats.nmalloc_huge--;
819 	arena->stats.allocated_huge -= usize;
820 	arena->stats.hstats[index].nmalloc--;
821 	arena->stats.hstats[index].curhchunks--;
822 }
823 
824 static void
arena_huge_dalloc_stats_update(arena_t * arena,size_t usize)825 arena_huge_dalloc_stats_update(arena_t *arena, size_t usize)
826 {
827 	szind_t index = size2index(usize) - nlclasses - NBINS;
828 
829 	cassert(config_stats);
830 
831 	arena->stats.ndalloc_huge++;
832 	arena->stats.allocated_huge -= usize;
833 	arena->stats.hstats[index].ndalloc++;
834 	arena->stats.hstats[index].curhchunks--;
835 }
836 
837 static void
arena_huge_dalloc_stats_update_undo(arena_t * arena,size_t usize)838 arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize)
839 {
840 	szind_t index = size2index(usize) - nlclasses - NBINS;
841 
842 	cassert(config_stats);
843 
844 	arena->stats.ndalloc_huge--;
845 	arena->stats.allocated_huge += usize;
846 	arena->stats.hstats[index].ndalloc--;
847 	arena->stats.hstats[index].curhchunks++;
848 }
849 
850 static void
arena_huge_ralloc_stats_update(arena_t * arena,size_t oldsize,size_t usize)851 arena_huge_ralloc_stats_update(arena_t *arena, size_t oldsize, size_t usize)
852 {
853 
854 	arena_huge_dalloc_stats_update(arena, oldsize);
855 	arena_huge_malloc_stats_update(arena, usize);
856 }
857 
858 static void
arena_huge_ralloc_stats_update_undo(arena_t * arena,size_t oldsize,size_t usize)859 arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize,
860     size_t usize)
861 {
862 
863 	arena_huge_dalloc_stats_update_undo(arena, oldsize);
864 	arena_huge_malloc_stats_update_undo(arena, usize);
865 }
866 
867 extent_node_t *
arena_node_alloc(arena_t * arena)868 arena_node_alloc(arena_t *arena)
869 {
870 	extent_node_t *node;
871 
872 	malloc_mutex_lock(&arena->node_cache_mtx);
873 	node = ql_last(&arena->node_cache, ql_link);
874 	if (node == NULL) {
875 		malloc_mutex_unlock(&arena->node_cache_mtx);
876 		return (base_alloc(sizeof(extent_node_t)));
877 	}
878 	ql_tail_remove(&arena->node_cache, extent_node_t, ql_link);
879 	malloc_mutex_unlock(&arena->node_cache_mtx);
880 	return (node);
881 }
882 
883 void
arena_node_dalloc(arena_t * arena,extent_node_t * node)884 arena_node_dalloc(arena_t *arena, extent_node_t *node)
885 {
886 
887 	malloc_mutex_lock(&arena->node_cache_mtx);
888 	ql_elm_new(node, ql_link);
889 	ql_tail_insert(&arena->node_cache, node, ql_link);
890 	malloc_mutex_unlock(&arena->node_cache_mtx);
891 }
892 
893 static void *
arena_chunk_alloc_huge_hard(arena_t * arena,chunk_hooks_t * chunk_hooks,size_t usize,size_t alignment,bool * zero,size_t csize)894 arena_chunk_alloc_huge_hard(arena_t *arena, chunk_hooks_t *chunk_hooks,
895     size_t usize, size_t alignment, bool *zero, size_t csize)
896 {
897 	void *ret;
898 	bool commit = true;
899 
900 	ret = chunk_alloc_wrapper(arena, chunk_hooks, NULL, csize, alignment,
901 	    zero, &commit);
902 	if (ret == NULL) {
903 		/* Revert optimistic stats updates. */
904 		malloc_mutex_lock(&arena->lock);
905 		if (config_stats) {
906 			arena_huge_malloc_stats_update_undo(arena, usize);
907 			arena->stats.mapped -= usize;
908 		}
909 		arena_nactive_sub(arena, usize >> LG_PAGE);
910 		malloc_mutex_unlock(&arena->lock);
911 	}
912 
913 	return (ret);
914 }
915 
916 void *
arena_chunk_alloc_huge(arena_t * arena,size_t usize,size_t alignment,bool * zero)917 arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment,
918     bool *zero)
919 {
920 	void *ret;
921 	chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
922 	size_t csize = CHUNK_CEILING(usize);
923 
924 	malloc_mutex_lock(&arena->lock);
925 
926 	/* Optimistically update stats. */
927 	if (config_stats) {
928 		arena_huge_malloc_stats_update(arena, usize);
929 		arena->stats.mapped += usize;
930 	}
931 	arena_nactive_add(arena, usize >> LG_PAGE);
932 
933 	ret = chunk_alloc_cache(arena, &chunk_hooks, NULL, csize, alignment,
934 	    zero, true);
935 	malloc_mutex_unlock(&arena->lock);
936 	if (ret == NULL) {
937 		ret = arena_chunk_alloc_huge_hard(arena, &chunk_hooks, usize,
938 		    alignment, zero, csize);
939 	}
940 
941 	return (ret);
942 }
943 
944 void
arena_chunk_dalloc_huge(arena_t * arena,void * chunk,size_t usize)945 arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize)
946 {
947 	chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
948 	size_t csize;
949 
950 	csize = CHUNK_CEILING(usize);
951 	malloc_mutex_lock(&arena->lock);
952 	if (config_stats) {
953 		arena_huge_dalloc_stats_update(arena, usize);
954 		arena->stats.mapped -= usize;
955 	}
956 	arena_nactive_sub(arena, usize >> LG_PAGE);
957 
958 	chunk_dalloc_cache(arena, &chunk_hooks, chunk, csize, true);
959 	malloc_mutex_unlock(&arena->lock);
960 }
961 
962 void
arena_chunk_ralloc_huge_similar(arena_t * arena,void * chunk,size_t oldsize,size_t usize)963 arena_chunk_ralloc_huge_similar(arena_t *arena, void *chunk, size_t oldsize,
964     size_t usize)
965 {
966 
967 	assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize));
968 	assert(oldsize != usize);
969 
970 	malloc_mutex_lock(&arena->lock);
971 	if (config_stats)
972 		arena_huge_ralloc_stats_update(arena, oldsize, usize);
973 	if (oldsize < usize)
974 		arena_nactive_add(arena, (usize - oldsize) >> LG_PAGE);
975 	else
976 		arena_nactive_sub(arena, (oldsize - usize) >> LG_PAGE);
977 	malloc_mutex_unlock(&arena->lock);
978 }
979 
980 void
arena_chunk_ralloc_huge_shrink(arena_t * arena,void * chunk,size_t oldsize,size_t usize)981 arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk, size_t oldsize,
982     size_t usize)
983 {
984 	size_t udiff = oldsize - usize;
985 	size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
986 
987 	malloc_mutex_lock(&arena->lock);
988 	if (config_stats) {
989 		arena_huge_ralloc_stats_update(arena, oldsize, usize);
990 		if (cdiff != 0)
991 			arena->stats.mapped -= cdiff;
992 	}
993 	arena_nactive_sub(arena, udiff >> LG_PAGE);
994 
995 	if (cdiff != 0) {
996 		chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
997 		void *nchunk = (void *)((uintptr_t)chunk +
998 		    CHUNK_CEILING(usize));
999 
1000 		chunk_dalloc_cache(arena, &chunk_hooks, nchunk, cdiff, true);
1001 	}
1002 	malloc_mutex_unlock(&arena->lock);
1003 }
1004 
1005 static bool
arena_chunk_ralloc_huge_expand_hard(arena_t * arena,chunk_hooks_t * chunk_hooks,void * chunk,size_t oldsize,size_t usize,bool * zero,void * nchunk,size_t udiff,size_t cdiff)1006 arena_chunk_ralloc_huge_expand_hard(arena_t *arena, chunk_hooks_t *chunk_hooks,
1007     void *chunk, size_t oldsize, size_t usize, bool *zero, void *nchunk,
1008     size_t udiff, size_t cdiff)
1009 {
1010 	bool err;
1011 	bool commit = true;
1012 
1013 	err = (chunk_alloc_wrapper(arena, chunk_hooks, nchunk, cdiff, chunksize,
1014 	    zero, &commit) == NULL);
1015 	if (err) {
1016 		/* Revert optimistic stats updates. */
1017 		malloc_mutex_lock(&arena->lock);
1018 		if (config_stats) {
1019 			arena_huge_ralloc_stats_update_undo(arena, oldsize,
1020 			    usize);
1021 			arena->stats.mapped -= cdiff;
1022 		}
1023 		arena_nactive_sub(arena, udiff >> LG_PAGE);
1024 		malloc_mutex_unlock(&arena->lock);
1025 	} else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk,
1026 	    cdiff, true, arena->ind)) {
1027 		chunk_dalloc_wrapper(arena, chunk_hooks, nchunk, cdiff, *zero,
1028 		    true);
1029 		err = true;
1030 	}
1031 	return (err);
1032 }
1033 
1034 bool
arena_chunk_ralloc_huge_expand(arena_t * arena,void * chunk,size_t oldsize,size_t usize,bool * zero)1035 arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk, size_t oldsize,
1036     size_t usize, bool *zero)
1037 {
1038 	bool err;
1039 	chunk_hooks_t chunk_hooks = chunk_hooks_get(arena);
1040 	void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize));
1041 	size_t udiff = usize - oldsize;
1042 	size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize);
1043 
1044 	malloc_mutex_lock(&arena->lock);
1045 
1046 	/* Optimistically update stats. */
1047 	if (config_stats) {
1048 		arena_huge_ralloc_stats_update(arena, oldsize, usize);
1049 		arena->stats.mapped += cdiff;
1050 	}
1051 	arena_nactive_add(arena, udiff >> LG_PAGE);
1052 
1053 	err = (chunk_alloc_cache(arena, &chunk_hooks, nchunk, cdiff, chunksize,
1054 	    zero, true) == NULL);
1055 	malloc_mutex_unlock(&arena->lock);
1056 	if (err) {
1057 		err = arena_chunk_ralloc_huge_expand_hard(arena, &chunk_hooks,
1058 		    chunk, oldsize, usize, zero, nchunk, udiff,
1059 		    cdiff);
1060 	} else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk,
1061 	    cdiff, true, arena->ind)) {
1062 		chunk_dalloc_wrapper(arena, &chunk_hooks, nchunk, cdiff, *zero,
1063 		    true);
1064 		err = true;
1065 	}
1066 
1067 	return (err);
1068 }
1069 
1070 /*
1071  * Do first-best-fit run selection, i.e. select the lowest run that best fits.
1072  * Run sizes are indexed, so not all candidate runs are necessarily exactly the
1073  * same size.
1074  */
1075 static arena_run_t *
arena_run_first_best_fit(arena_t * arena,size_t size)1076 arena_run_first_best_fit(arena_t *arena, size_t size)
1077 {
1078 	szind_t ind, i;
1079 
1080 	ind = size2index(run_quantize_ceil(size));
1081 	for (i = ind; i < runs_avail_nclasses + runs_avail_bias; i++) {
1082 		arena_chunk_map_misc_t *miscelm = arena_run_tree_first(
1083 		    arena_runs_avail_get(arena, i));
1084 		if (miscelm != NULL)
1085 			return (&miscelm->run);
1086 	}
1087 
1088 	return (NULL);
1089 }
1090 
1091 static arena_run_t *
arena_run_alloc_large_helper(arena_t * arena,size_t size,bool zero)1092 arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
1093 {
1094 	arena_run_t *run = arena_run_first_best_fit(arena, s2u(size));
1095 	if (run != NULL) {
1096 		if (arena_run_split_large(arena, run, size, zero))
1097 			run = NULL;
1098 	}
1099 	return (run);
1100 }
1101 
1102 static arena_run_t *
arena_run_alloc_large(arena_t * arena,size_t size,bool zero)1103 arena_run_alloc_large(arena_t *arena, size_t size, bool zero)
1104 {
1105 	arena_chunk_t *chunk;
1106 	arena_run_t *run;
1107 
1108 	assert(size <= arena_maxrun);
1109 	assert(size == PAGE_CEILING(size));
1110 
1111 	/* Search the arena's chunks for the lowest best fit. */
1112 	run = arena_run_alloc_large_helper(arena, size, zero);
1113 	if (run != NULL)
1114 		return (run);
1115 
1116 	/*
1117 	 * No usable runs.  Create a new chunk from which to allocate the run.
1118 	 */
1119 	chunk = arena_chunk_alloc(arena);
1120 	if (chunk != NULL) {
1121 		run = &arena_miscelm_get(chunk, map_bias)->run;
1122 		if (arena_run_split_large(arena, run, size, zero))
1123 			run = NULL;
1124 		return (run);
1125 	}
1126 
1127 	/*
1128 	 * arena_chunk_alloc() failed, but another thread may have made
1129 	 * sufficient memory available while this one dropped arena->lock in
1130 	 * arena_chunk_alloc(), so search one more time.
1131 	 */
1132 	return (arena_run_alloc_large_helper(arena, size, zero));
1133 }
1134 
1135 static arena_run_t *
arena_run_alloc_small_helper(arena_t * arena,size_t size,szind_t binind)1136 arena_run_alloc_small_helper(arena_t *arena, size_t size, szind_t binind)
1137 {
1138 	arena_run_t *run = arena_run_first_best_fit(arena, size);
1139 	if (run != NULL) {
1140 		if (arena_run_split_small(arena, run, size, binind))
1141 			run = NULL;
1142 	}
1143 	return (run);
1144 }
1145 
1146 static arena_run_t *
arena_run_alloc_small(arena_t * arena,size_t size,szind_t binind)1147 arena_run_alloc_small(arena_t *arena, size_t size, szind_t binind)
1148 {
1149 	arena_chunk_t *chunk;
1150 	arena_run_t *run;
1151 
1152 	assert(size <= arena_maxrun);
1153 	assert(size == PAGE_CEILING(size));
1154 	assert(binind != BININD_INVALID);
1155 
1156 	/* Search the arena's chunks for the lowest best fit. */
1157 	run = arena_run_alloc_small_helper(arena, size, binind);
1158 	if (run != NULL)
1159 		return (run);
1160 
1161 	/*
1162 	 * No usable runs.  Create a new chunk from which to allocate the run.
1163 	 */
1164 	chunk = arena_chunk_alloc(arena);
1165 	if (chunk != NULL) {
1166 		run = &arena_miscelm_get(chunk, map_bias)->run;
1167 		if (arena_run_split_small(arena, run, size, binind))
1168 			run = NULL;
1169 		return (run);
1170 	}
1171 
1172 	/*
1173 	 * arena_chunk_alloc() failed, but another thread may have made
1174 	 * sufficient memory available while this one dropped arena->lock in
1175 	 * arena_chunk_alloc(), so search one more time.
1176 	 */
1177 	return (arena_run_alloc_small_helper(arena, size, binind));
1178 }
1179 
1180 static bool
arena_lg_dirty_mult_valid(ssize_t lg_dirty_mult)1181 arena_lg_dirty_mult_valid(ssize_t lg_dirty_mult)
1182 {
1183 
1184 	return (lg_dirty_mult >= -1 && lg_dirty_mult < (ssize_t)(sizeof(size_t)
1185 	    << 3));
1186 }
1187 
1188 ssize_t
arena_lg_dirty_mult_get(arena_t * arena)1189 arena_lg_dirty_mult_get(arena_t *arena)
1190 {
1191 	ssize_t lg_dirty_mult;
1192 
1193 	malloc_mutex_lock(&arena->lock);
1194 	lg_dirty_mult = arena->lg_dirty_mult;
1195 	malloc_mutex_unlock(&arena->lock);
1196 
1197 	return (lg_dirty_mult);
1198 }
1199 
1200 bool
arena_lg_dirty_mult_set(arena_t * arena,ssize_t lg_dirty_mult)1201 arena_lg_dirty_mult_set(arena_t *arena, ssize_t lg_dirty_mult)
1202 {
1203 
1204 	if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
1205 		return (true);
1206 
1207 	malloc_mutex_lock(&arena->lock);
1208 	arena->lg_dirty_mult = lg_dirty_mult;
1209 	arena_maybe_purge(arena);
1210 	malloc_mutex_unlock(&arena->lock);
1211 
1212 	return (false);
1213 }
1214 
1215 static void
arena_decay_deadline_init(arena_t * arena)1216 arena_decay_deadline_init(arena_t *arena)
1217 {
1218 
1219 	assert(opt_purge == purge_mode_decay);
1220 
1221 	/*
1222 	 * Generate a new deadline that is uniformly random within the next
1223 	 * epoch after the current one.
1224 	 */
1225 	nstime_copy(&arena->decay_deadline, &arena->decay_epoch);
1226 	nstime_add(&arena->decay_deadline, &arena->decay_interval);
1227 	if (arena->decay_time > 0) {
1228 		nstime_t jitter;
1229 
1230 		nstime_init(&jitter, prng_range(&arena->decay_jitter_state,
1231 		    nstime_ns(&arena->decay_interval)));
1232 		nstime_add(&arena->decay_deadline, &jitter);
1233 	}
1234 }
1235 
1236 static bool
arena_decay_deadline_reached(const arena_t * arena,const nstime_t * time)1237 arena_decay_deadline_reached(const arena_t *arena, const nstime_t *time)
1238 {
1239 
1240 	assert(opt_purge == purge_mode_decay);
1241 
1242 	return (nstime_compare(&arena->decay_deadline, time) <= 0);
1243 }
1244 
1245 static size_t
arena_decay_backlog_npages_limit(const arena_t * arena)1246 arena_decay_backlog_npages_limit(const arena_t *arena)
1247 {
1248 	static const uint64_t h_steps[] = {
1249 #define	STEP(step, h, x, y) \
1250 		h,
1251 		SMOOTHSTEP
1252 #undef STEP
1253 	};
1254 	uint64_t sum;
1255 	size_t npages_limit_backlog;
1256 	unsigned i;
1257 
1258 	assert(opt_purge == purge_mode_decay);
1259 
1260 	/*
1261 	 * For each element of decay_backlog, multiply by the corresponding
1262 	 * fixed-point smoothstep decay factor.  Sum the products, then divide
1263 	 * to round down to the nearest whole number of pages.
1264 	 */
1265 	sum = 0;
1266 	for (i = 0; i < SMOOTHSTEP_NSTEPS; i++)
1267 		sum += arena->decay_backlog[i] * h_steps[i];
1268 	npages_limit_backlog = (sum >> SMOOTHSTEP_BFP);
1269 
1270 	return (npages_limit_backlog);
1271 }
1272 
1273 static void
arena_decay_epoch_advance(arena_t * arena,const nstime_t * time)1274 arena_decay_epoch_advance(arena_t *arena, const nstime_t *time)
1275 {
1276 	uint64_t nadvance;
1277 	nstime_t delta;
1278 	size_t ndirty_delta;
1279 
1280 	assert(opt_purge == purge_mode_decay);
1281 	assert(arena_decay_deadline_reached(arena, time));
1282 
1283 	nstime_copy(&delta, time);
1284 	nstime_subtract(&delta, &arena->decay_epoch);
1285 	nadvance = nstime_divide(&delta, &arena->decay_interval);
1286 	assert(nadvance > 0);
1287 
1288 	/* Add nadvance decay intervals to epoch. */
1289 	nstime_copy(&delta, &arena->decay_interval);
1290 	nstime_imultiply(&delta, nadvance);
1291 	nstime_add(&arena->decay_epoch, &delta);
1292 
1293 	/* Set a new deadline. */
1294 	arena_decay_deadline_init(arena);
1295 
1296 	/* Update the backlog. */
1297 	if (nadvance >= SMOOTHSTEP_NSTEPS) {
1298 		memset(arena->decay_backlog, 0, (SMOOTHSTEP_NSTEPS-1) *
1299 		    sizeof(size_t));
1300 	} else {
1301 		memmove(arena->decay_backlog, &arena->decay_backlog[nadvance],
1302 		    (SMOOTHSTEP_NSTEPS - nadvance) * sizeof(size_t));
1303 		if (nadvance > 1) {
1304 			memset(&arena->decay_backlog[SMOOTHSTEP_NSTEPS -
1305 			    nadvance], 0, (nadvance-1) * sizeof(size_t));
1306 		}
1307 	}
1308 	ndirty_delta = (arena->ndirty > arena->decay_ndirty) ? arena->ndirty -
1309 	    arena->decay_ndirty : 0;
1310 	arena->decay_ndirty = arena->ndirty;
1311 	arena->decay_backlog[SMOOTHSTEP_NSTEPS-1] = ndirty_delta;
1312 	arena->decay_backlog_npages_limit =
1313 	    arena_decay_backlog_npages_limit(arena);
1314 }
1315 
1316 static size_t
arena_decay_npages_limit(arena_t * arena)1317 arena_decay_npages_limit(arena_t *arena)
1318 {
1319 	size_t npages_limit;
1320 
1321 	assert(opt_purge == purge_mode_decay);
1322 
1323 	npages_limit = arena->decay_backlog_npages_limit;
1324 
1325 	/* Add in any dirty pages created during the current epoch. */
1326 	if (arena->ndirty > arena->decay_ndirty)
1327 		npages_limit += arena->ndirty - arena->decay_ndirty;
1328 
1329 	return (npages_limit);
1330 }
1331 
1332 static void
arena_decay_init(arena_t * arena,ssize_t decay_time)1333 arena_decay_init(arena_t *arena, ssize_t decay_time)
1334 {
1335 
1336 	arena->decay_time = decay_time;
1337 	if (decay_time > 0) {
1338 		nstime_init2(&arena->decay_interval, decay_time, 0);
1339 		nstime_idivide(&arena->decay_interval, SMOOTHSTEP_NSTEPS);
1340 	}
1341 
1342 	nstime_init(&arena->decay_epoch, 0);
1343 	nstime_update(&arena->decay_epoch);
1344 	arena->decay_jitter_state = (uint64_t)(uintptr_t)arena;
1345 	arena_decay_deadline_init(arena);
1346 	arena->decay_ndirty = arena->ndirty;
1347 	arena->decay_backlog_npages_limit = 0;
1348 	memset(arena->decay_backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t));
1349 }
1350 
1351 static bool
arena_decay_time_valid(ssize_t decay_time)1352 arena_decay_time_valid(ssize_t decay_time)
1353 {
1354 
1355 	return (decay_time >= -1 && decay_time <= NSTIME_SEC_MAX);
1356 }
1357 
1358 ssize_t
arena_decay_time_get(arena_t * arena)1359 arena_decay_time_get(arena_t *arena)
1360 {
1361 	ssize_t decay_time;
1362 
1363 	malloc_mutex_lock(&arena->lock);
1364 	decay_time = arena->decay_time;
1365 	malloc_mutex_unlock(&arena->lock);
1366 
1367 	return (decay_time);
1368 }
1369 
1370 bool
arena_decay_time_set(arena_t * arena,ssize_t decay_time)1371 arena_decay_time_set(arena_t *arena, ssize_t decay_time)
1372 {
1373 
1374 	if (!arena_decay_time_valid(decay_time))
1375 		return (true);
1376 
1377 	malloc_mutex_lock(&arena->lock);
1378 	/*
1379 	 * Restart decay backlog from scratch, which may cause many dirty pages
1380 	 * to be immediately purged.  It would conceptually be possible to map
1381 	 * the old backlog onto the new backlog, but there is no justification
1382 	 * for such complexity since decay_time changes are intended to be
1383 	 * infrequent, either between the {-1, 0, >0} states, or a one-time
1384 	 * arbitrary change during initial arena configuration.
1385 	 */
1386 	arena_decay_init(arena, decay_time);
1387 	arena_maybe_purge(arena);
1388 	malloc_mutex_unlock(&arena->lock);
1389 
1390 	return (false);
1391 }
1392 
1393 static void
arena_maybe_purge_ratio(arena_t * arena)1394 arena_maybe_purge_ratio(arena_t *arena)
1395 {
1396 
1397 	assert(opt_purge == purge_mode_ratio);
1398 
1399 	/* Don't purge if the option is disabled. */
1400 	if (arena->lg_dirty_mult < 0)
1401 		return;
1402 
1403 	/*
1404 	 * Iterate, since preventing recursive purging could otherwise leave too
1405 	 * many dirty pages.
1406 	 */
1407 	while (true) {
1408 		size_t threshold = (arena->nactive >> arena->lg_dirty_mult);
1409 		if (threshold < chunk_npages)
1410 			threshold = chunk_npages;
1411 		/*
1412 		 * Don't purge unless the number of purgeable pages exceeds the
1413 		 * threshold.
1414 		 */
1415 		if (arena->ndirty <= threshold)
1416 			return;
1417 		arena_purge_to_limit(arena, threshold);
1418 	}
1419 }
1420 
1421 static void
arena_maybe_purge_decay(arena_t * arena)1422 arena_maybe_purge_decay(arena_t *arena)
1423 {
1424 	nstime_t time;
1425 	size_t ndirty_limit;
1426 
1427 	assert(opt_purge == purge_mode_decay);
1428 
1429 	/* Purge all or nothing if the option is disabled. */
1430 	if (arena->decay_time <= 0) {
1431 		if (arena->decay_time == 0)
1432 			arena_purge_to_limit(arena, 0);
1433 		return;
1434 	}
1435 
1436 	nstime_copy(&time, &arena->decay_epoch);
1437 	if (unlikely(nstime_update(&time))) {
1438 		/* Time went backwards.  Force an epoch advance. */
1439 		nstime_copy(&time, &arena->decay_deadline);
1440 	}
1441 
1442 	if (arena_decay_deadline_reached(arena, &time))
1443 		arena_decay_epoch_advance(arena, &time);
1444 
1445 	ndirty_limit = arena_decay_npages_limit(arena);
1446 
1447 	/*
1448 	 * Don't try to purge unless the number of purgeable pages exceeds the
1449 	 * current limit.
1450 	 */
1451 	if (arena->ndirty <= ndirty_limit)
1452 		return;
1453 	arena_purge_to_limit(arena, ndirty_limit);
1454 }
1455 
1456 void
arena_maybe_purge(arena_t * arena)1457 arena_maybe_purge(arena_t *arena)
1458 {
1459 
1460 	/* Don't recursively purge. */
1461 	if (arena->purging)
1462 		return;
1463 
1464 	if (opt_purge == purge_mode_ratio)
1465 		arena_maybe_purge_ratio(arena);
1466 	else
1467 		arena_maybe_purge_decay(arena);
1468 }
1469 
1470 static size_t
arena_dirty_count(arena_t * arena)1471 arena_dirty_count(arena_t *arena)
1472 {
1473 	size_t ndirty = 0;
1474 	arena_runs_dirty_link_t *rdelm;
1475 	extent_node_t *chunkselm;
1476 
1477 	for (rdelm = qr_next(&arena->runs_dirty, rd_link),
1478 	    chunkselm = qr_next(&arena->chunks_cache, cc_link);
1479 	    rdelm != &arena->runs_dirty; rdelm = qr_next(rdelm, rd_link)) {
1480 		size_t npages;
1481 
1482 		if (rdelm == &chunkselm->rd) {
1483 			npages = extent_node_size_get(chunkselm) >> LG_PAGE;
1484 			chunkselm = qr_next(chunkselm, cc_link);
1485 		} else {
1486 			arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
1487 			    rdelm);
1488 			arena_chunk_map_misc_t *miscelm =
1489 			    arena_rd_to_miscelm(rdelm);
1490 			size_t pageind = arena_miscelm_to_pageind(miscelm);
1491 			assert(arena_mapbits_allocated_get(chunk, pageind) ==
1492 			    0);
1493 			assert(arena_mapbits_large_get(chunk, pageind) == 0);
1494 			assert(arena_mapbits_dirty_get(chunk, pageind) != 0);
1495 			npages = arena_mapbits_unallocated_size_get(chunk,
1496 			    pageind) >> LG_PAGE;
1497 		}
1498 		ndirty += npages;
1499 	}
1500 
1501 	return (ndirty);
1502 }
1503 
1504 static size_t
arena_stash_dirty(arena_t * arena,chunk_hooks_t * chunk_hooks,size_t ndirty_limit,arena_runs_dirty_link_t * purge_runs_sentinel,extent_node_t * purge_chunks_sentinel)1505 arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks,
1506     size_t ndirty_limit, arena_runs_dirty_link_t *purge_runs_sentinel,
1507     extent_node_t *purge_chunks_sentinel)
1508 {
1509 	arena_runs_dirty_link_t *rdelm, *rdelm_next;
1510 	extent_node_t *chunkselm;
1511 	size_t nstashed = 0;
1512 
1513 	/* Stash runs/chunks according to ndirty_limit. */
1514 	for (rdelm = qr_next(&arena->runs_dirty, rd_link),
1515 	    chunkselm = qr_next(&arena->chunks_cache, cc_link);
1516 	    rdelm != &arena->runs_dirty; rdelm = rdelm_next) {
1517 		size_t npages;
1518 		rdelm_next = qr_next(rdelm, rd_link);
1519 
1520 		if (rdelm == &chunkselm->rd) {
1521 			extent_node_t *chunkselm_next;
1522 			bool zero;
1523 			UNUSED void *chunk;
1524 
1525 			npages = extent_node_size_get(chunkselm) >> LG_PAGE;
1526 			if (opt_purge == purge_mode_decay && arena->ndirty -
1527 			    (nstashed + npages) < ndirty_limit)
1528 				break;
1529 
1530 			chunkselm_next = qr_next(chunkselm, cc_link);
1531 			/*
1532 			 * Allocate.  chunkselm remains valid due to the
1533 			 * dalloc_node=false argument to chunk_alloc_cache().
1534 			 */
1535 			zero = false;
1536 			chunk = chunk_alloc_cache(arena, chunk_hooks,
1537 			    extent_node_addr_get(chunkselm),
1538 			    extent_node_size_get(chunkselm), chunksize, &zero,
1539 			    false);
1540 			assert(chunk == extent_node_addr_get(chunkselm));
1541 			assert(zero == extent_node_zeroed_get(chunkselm));
1542 			extent_node_dirty_insert(chunkselm, purge_runs_sentinel,
1543 			    purge_chunks_sentinel);
1544 			assert(npages == (extent_node_size_get(chunkselm) >>
1545 			    LG_PAGE));
1546 			chunkselm = chunkselm_next;
1547 		} else {
1548 			arena_chunk_t *chunk =
1549 			    (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
1550 			arena_chunk_map_misc_t *miscelm =
1551 			    arena_rd_to_miscelm(rdelm);
1552 			size_t pageind = arena_miscelm_to_pageind(miscelm);
1553 			arena_run_t *run = &miscelm->run;
1554 			size_t run_size =
1555 			    arena_mapbits_unallocated_size_get(chunk, pageind);
1556 
1557 			npages = run_size >> LG_PAGE;
1558 			if (opt_purge == purge_mode_decay && arena->ndirty -
1559 			    (nstashed + npages) < ndirty_limit)
1560 				break;
1561 
1562 			assert(pageind + npages <= chunk_npages);
1563 			assert(arena_mapbits_dirty_get(chunk, pageind) ==
1564 			    arena_mapbits_dirty_get(chunk, pageind+npages-1));
1565 
1566 			/*
1567 			 * If purging the spare chunk's run, make it available
1568 			 * prior to allocation.
1569 			 */
1570 			if (chunk == arena->spare)
1571 				arena_chunk_alloc(arena);
1572 
1573 			/* Temporarily allocate the free dirty run. */
1574 			arena_run_split_large(arena, run, run_size, false);
1575 			/* Stash. */
1576 			if (false)
1577 				qr_new(rdelm, rd_link); /* Redundant. */
1578 			else {
1579 				assert(qr_next(rdelm, rd_link) == rdelm);
1580 				assert(qr_prev(rdelm, rd_link) == rdelm);
1581 			}
1582 			qr_meld(purge_runs_sentinel, rdelm, rd_link);
1583 		}
1584 
1585 		nstashed += npages;
1586 		if (opt_purge == purge_mode_ratio && arena->ndirty - nstashed <=
1587 		    ndirty_limit)
1588 			break;
1589 	}
1590 
1591 	return (nstashed);
1592 }
1593 
1594 static size_t
arena_purge_stashed(arena_t * arena,chunk_hooks_t * chunk_hooks,arena_runs_dirty_link_t * purge_runs_sentinel,extent_node_t * purge_chunks_sentinel)1595 arena_purge_stashed(arena_t *arena, chunk_hooks_t *chunk_hooks,
1596     arena_runs_dirty_link_t *purge_runs_sentinel,
1597     extent_node_t *purge_chunks_sentinel)
1598 {
1599 	size_t npurged, nmadvise;
1600 	arena_runs_dirty_link_t *rdelm;
1601 	extent_node_t *chunkselm;
1602 
1603 	if (config_stats)
1604 		nmadvise = 0;
1605 	npurged = 0;
1606 
1607 	malloc_mutex_unlock(&arena->lock);
1608 	for (rdelm = qr_next(purge_runs_sentinel, rd_link),
1609 	    chunkselm = qr_next(purge_chunks_sentinel, cc_link);
1610 	    rdelm != purge_runs_sentinel; rdelm = qr_next(rdelm, rd_link)) {
1611 		size_t npages;
1612 
1613 		if (rdelm == &chunkselm->rd) {
1614 			/*
1615 			 * Don't actually purge the chunk here because 1)
1616 			 * chunkselm is embedded in the chunk and must remain
1617 			 * valid, and 2) we deallocate the chunk in
1618 			 * arena_unstash_purged(), where it is destroyed,
1619 			 * decommitted, or purged, depending on chunk
1620 			 * deallocation policy.
1621 			 */
1622 			size_t size = extent_node_size_get(chunkselm);
1623 			npages = size >> LG_PAGE;
1624 			chunkselm = qr_next(chunkselm, cc_link);
1625 		} else {
1626 			size_t pageind, run_size, flag_unzeroed, flags, i;
1627 			bool decommitted;
1628 			arena_chunk_t *chunk =
1629 			    (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
1630 			arena_chunk_map_misc_t *miscelm =
1631 			    arena_rd_to_miscelm(rdelm);
1632 			pageind = arena_miscelm_to_pageind(miscelm);
1633 			run_size = arena_mapbits_large_size_get(chunk, pageind);
1634 			npages = run_size >> LG_PAGE;
1635 
1636 			assert(pageind + npages <= chunk_npages);
1637 			assert(!arena_mapbits_decommitted_get(chunk, pageind));
1638 			assert(!arena_mapbits_decommitted_get(chunk,
1639 			    pageind+npages-1));
1640 			decommitted = !chunk_hooks->decommit(chunk, chunksize,
1641 			    pageind << LG_PAGE, npages << LG_PAGE, arena->ind);
1642 			if (decommitted) {
1643 				flag_unzeroed = 0;
1644 				flags = CHUNK_MAP_DECOMMITTED;
1645 			} else {
1646 				flag_unzeroed = chunk_purge_wrapper(arena,
1647 				    chunk_hooks, chunk, chunksize, pageind <<
1648 				    LG_PAGE, run_size) ? CHUNK_MAP_UNZEROED : 0;
1649 				flags = flag_unzeroed;
1650 			}
1651 			arena_mapbits_large_set(chunk, pageind+npages-1, 0,
1652 			    flags);
1653 			arena_mapbits_large_set(chunk, pageind, run_size,
1654 			    flags);
1655 
1656 			/*
1657 			 * Set the unzeroed flag for internal pages, now that
1658 			 * chunk_purge_wrapper() has returned whether the pages
1659 			 * were zeroed as a side effect of purging.  This chunk
1660 			 * map modification is safe even though the arena mutex
1661 			 * isn't currently owned by this thread, because the run
1662 			 * is marked as allocated, thus protecting it from being
1663 			 * modified by any other thread.  As long as these
1664 			 * writes don't perturb the first and last elements'
1665 			 * CHUNK_MAP_ALLOCATED bits, behavior is well defined.
1666 			 */
1667 			for (i = 1; i < npages-1; i++) {
1668 				arena_mapbits_internal_set(chunk, pageind+i,
1669 				    flag_unzeroed);
1670 			}
1671 		}
1672 
1673 		npurged += npages;
1674 		if (config_stats)
1675 			nmadvise++;
1676 	}
1677 	malloc_mutex_lock(&arena->lock);
1678 
1679 	if (config_stats) {
1680 		arena->stats.nmadvise += nmadvise;
1681 		arena->stats.purged += npurged;
1682 	}
1683 
1684 	return (npurged);
1685 }
1686 
1687 static void
arena_unstash_purged(arena_t * arena,chunk_hooks_t * chunk_hooks,arena_runs_dirty_link_t * purge_runs_sentinel,extent_node_t * purge_chunks_sentinel)1688 arena_unstash_purged(arena_t *arena, chunk_hooks_t *chunk_hooks,
1689     arena_runs_dirty_link_t *purge_runs_sentinel,
1690     extent_node_t *purge_chunks_sentinel)
1691 {
1692 	arena_runs_dirty_link_t *rdelm, *rdelm_next;
1693 	extent_node_t *chunkselm;
1694 
1695 	/* Deallocate chunks/runs. */
1696 	for (rdelm = qr_next(purge_runs_sentinel, rd_link),
1697 	    chunkselm = qr_next(purge_chunks_sentinel, cc_link);
1698 	    rdelm != purge_runs_sentinel; rdelm = rdelm_next) {
1699 		rdelm_next = qr_next(rdelm, rd_link);
1700 		if (rdelm == &chunkselm->rd) {
1701 			extent_node_t *chunkselm_next = qr_next(chunkselm,
1702 			    cc_link);
1703 			void *addr = extent_node_addr_get(chunkselm);
1704 			size_t size = extent_node_size_get(chunkselm);
1705 			bool zeroed = extent_node_zeroed_get(chunkselm);
1706 			bool committed = extent_node_committed_get(chunkselm);
1707 			extent_node_dirty_remove(chunkselm);
1708 			arena_node_dalloc(arena, chunkselm);
1709 			chunkselm = chunkselm_next;
1710 			chunk_dalloc_wrapper(arena, chunk_hooks, addr, size,
1711 			    zeroed, committed);
1712 		} else {
1713 			arena_chunk_t *chunk =
1714 			    (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
1715 			arena_chunk_map_misc_t *miscelm =
1716 			    arena_rd_to_miscelm(rdelm);
1717 			size_t pageind = arena_miscelm_to_pageind(miscelm);
1718 			bool decommitted = (arena_mapbits_decommitted_get(chunk,
1719 			    pageind) != 0);
1720 			arena_run_t *run = &miscelm->run;
1721 			qr_remove(rdelm, rd_link);
1722 			arena_run_dalloc(arena, run, false, true, decommitted);
1723 		}
1724 	}
1725 }
1726 
1727 /*
1728  * NB: ndirty_limit is interpreted differently depending on opt_purge:
1729  *   - purge_mode_ratio: Purge as few dirty run/chunks as possible to reach the
1730  *                       desired state:
1731  *                       (arena->ndirty <= ndirty_limit)
1732  *   - purge_mode_decay: Purge as many dirty runs/chunks as possible without
1733  *                       violating the invariant:
1734  *                       (arena->ndirty >= ndirty_limit)
1735  */
1736 static void
arena_purge_to_limit(arena_t * arena,size_t ndirty_limit)1737 arena_purge_to_limit(arena_t *arena, size_t ndirty_limit)
1738 {
1739 	chunk_hooks_t chunk_hooks = chunk_hooks_get(arena);
1740 	size_t npurge, npurged;
1741 	arena_runs_dirty_link_t purge_runs_sentinel;
1742 	extent_node_t purge_chunks_sentinel;
1743 
1744 	arena->purging = true;
1745 
1746 	/*
1747 	 * Calls to arena_dirty_count() are disabled even for debug builds
1748 	 * because overhead grows nonlinearly as memory usage increases.
1749 	 */
1750 	if (false && config_debug) {
1751 		size_t ndirty = arena_dirty_count(arena);
1752 		assert(ndirty == arena->ndirty);
1753 	}
1754 	assert(opt_purge != purge_mode_ratio || (arena->nactive >>
1755 	    arena->lg_dirty_mult) < arena->ndirty || ndirty_limit == 0);
1756 
1757 	qr_new(&purge_runs_sentinel, rd_link);
1758 	extent_node_dirty_linkage_init(&purge_chunks_sentinel);
1759 
1760 	npurge = arena_stash_dirty(arena, &chunk_hooks, ndirty_limit,
1761 	    &purge_runs_sentinel, &purge_chunks_sentinel);
1762 	if (npurge == 0)
1763 		goto label_return;
1764 	npurged = arena_purge_stashed(arena, &chunk_hooks, &purge_runs_sentinel,
1765 	    &purge_chunks_sentinel);
1766 	assert(npurged == npurge);
1767 	arena_unstash_purged(arena, &chunk_hooks, &purge_runs_sentinel,
1768 	    &purge_chunks_sentinel);
1769 
1770 	if (config_stats)
1771 		arena->stats.npurge++;
1772 
1773 label_return:
1774 	arena->purging = false;
1775 }
1776 
1777 void
arena_purge(arena_t * arena,bool all)1778 arena_purge(arena_t *arena, bool all)
1779 {
1780 
1781 	malloc_mutex_lock(&arena->lock);
1782 	if (all)
1783 		arena_purge_to_limit(arena, 0);
1784 	else
1785 		arena_maybe_purge(arena);
1786 	malloc_mutex_unlock(&arena->lock);
1787 }
1788 
1789 static void
arena_run_coalesce(arena_t * arena,arena_chunk_t * chunk,size_t * p_size,size_t * p_run_ind,size_t * p_run_pages,size_t flag_dirty,size_t flag_decommitted)1790 arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size,
1791     size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty,
1792     size_t flag_decommitted)
1793 {
1794 	size_t size = *p_size;
1795 	size_t run_ind = *p_run_ind;
1796 	size_t run_pages = *p_run_pages;
1797 
1798 	/* Try to coalesce forward. */
1799 	if (run_ind + run_pages < chunk_npages &&
1800 	    arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 &&
1801 	    arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty &&
1802 	    arena_mapbits_decommitted_get(chunk, run_ind+run_pages) ==
1803 	    flag_decommitted) {
1804 		size_t nrun_size = arena_mapbits_unallocated_size_get(chunk,
1805 		    run_ind+run_pages);
1806 		size_t nrun_pages = nrun_size >> LG_PAGE;
1807 
1808 		/*
1809 		 * Remove successor from runs_avail; the coalesced run is
1810 		 * inserted later.
1811 		 */
1812 		assert(arena_mapbits_unallocated_size_get(chunk,
1813 		    run_ind+run_pages+nrun_pages-1) == nrun_size);
1814 		assert(arena_mapbits_dirty_get(chunk,
1815 		    run_ind+run_pages+nrun_pages-1) == flag_dirty);
1816 		assert(arena_mapbits_decommitted_get(chunk,
1817 		    run_ind+run_pages+nrun_pages-1) == flag_decommitted);
1818 		arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages);
1819 
1820 		/*
1821 		 * If the successor is dirty, remove it from the set of dirty
1822 		 * pages.
1823 		 */
1824 		if (flag_dirty != 0) {
1825 			arena_run_dirty_remove(arena, chunk, run_ind+run_pages,
1826 			    nrun_pages);
1827 		}
1828 
1829 		size += nrun_size;
1830 		run_pages += nrun_pages;
1831 
1832 		arena_mapbits_unallocated_size_set(chunk, run_ind, size);
1833 		arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
1834 		    size);
1835 	}
1836 
1837 	/* Try to coalesce backward. */
1838 	if (run_ind > map_bias && arena_mapbits_allocated_get(chunk,
1839 	    run_ind-1) == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) ==
1840 	    flag_dirty && arena_mapbits_decommitted_get(chunk, run_ind-1) ==
1841 	    flag_decommitted) {
1842 		size_t prun_size = arena_mapbits_unallocated_size_get(chunk,
1843 		    run_ind-1);
1844 		size_t prun_pages = prun_size >> LG_PAGE;
1845 
1846 		run_ind -= prun_pages;
1847 
1848 		/*
1849 		 * Remove predecessor from runs_avail; the coalesced run is
1850 		 * inserted later.
1851 		 */
1852 		assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
1853 		    prun_size);
1854 		assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty);
1855 		assert(arena_mapbits_decommitted_get(chunk, run_ind) ==
1856 		    flag_decommitted);
1857 		arena_avail_remove(arena, chunk, run_ind, prun_pages);
1858 
1859 		/*
1860 		 * If the predecessor is dirty, remove it from the set of dirty
1861 		 * pages.
1862 		 */
1863 		if (flag_dirty != 0) {
1864 			arena_run_dirty_remove(arena, chunk, run_ind,
1865 			    prun_pages);
1866 		}
1867 
1868 		size += prun_size;
1869 		run_pages += prun_pages;
1870 
1871 		arena_mapbits_unallocated_size_set(chunk, run_ind, size);
1872 		arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
1873 		    size);
1874 	}
1875 
1876 	*p_size = size;
1877 	*p_run_ind = run_ind;
1878 	*p_run_pages = run_pages;
1879 }
1880 
1881 static size_t
arena_run_size_get(arena_t * arena,arena_chunk_t * chunk,arena_run_t * run,size_t run_ind)1882 arena_run_size_get(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1883     size_t run_ind)
1884 {
1885 	size_t size;
1886 
1887 	assert(run_ind >= map_bias);
1888 	assert(run_ind < chunk_npages);
1889 
1890 	if (arena_mapbits_large_get(chunk, run_ind) != 0) {
1891 		size = arena_mapbits_large_size_get(chunk, run_ind);
1892 		assert(size == PAGE || arena_mapbits_large_size_get(chunk,
1893 		    run_ind+(size>>LG_PAGE)-1) == 0);
1894 	} else {
1895 		arena_bin_info_t *bin_info = &arena_bin_info[run->binind];
1896 		size = bin_info->run_size;
1897 	}
1898 
1899 	return (size);
1900 }
1901 
1902 static void
arena_run_dalloc(arena_t * arena,arena_run_t * run,bool dirty,bool cleaned,bool decommitted)1903 arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned,
1904     bool decommitted)
1905 {
1906 	arena_chunk_t *chunk;
1907 	arena_chunk_map_misc_t *miscelm;
1908 	size_t size, run_ind, run_pages, flag_dirty, flag_decommitted;
1909 
1910 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
1911 	miscelm = arena_run_to_miscelm(run);
1912 	run_ind = arena_miscelm_to_pageind(miscelm);
1913 	assert(run_ind >= map_bias);
1914 	assert(run_ind < chunk_npages);
1915 	size = arena_run_size_get(arena, chunk, run, run_ind);
1916 	run_pages = (size >> LG_PAGE);
1917 	arena_nactive_sub(arena, run_pages);
1918 
1919 	/*
1920 	 * The run is dirty if the caller claims to have dirtied it, as well as
1921 	 * if it was already dirty before being allocated and the caller
1922 	 * doesn't claim to have cleaned it.
1923 	 */
1924 	assert(arena_mapbits_dirty_get(chunk, run_ind) ==
1925 	    arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
1926 	if (!cleaned && !decommitted && arena_mapbits_dirty_get(chunk, run_ind)
1927 	    != 0)
1928 		dirty = true;
1929 	flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
1930 	flag_decommitted = decommitted ? CHUNK_MAP_DECOMMITTED : 0;
1931 
1932 	/* Mark pages as unallocated in the chunk map. */
1933 	if (dirty || decommitted) {
1934 		size_t flags = flag_dirty | flag_decommitted;
1935 		arena_mapbits_unallocated_set(chunk, run_ind, size, flags);
1936 		arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
1937 		    flags);
1938 	} else {
1939 		arena_mapbits_unallocated_set(chunk, run_ind, size,
1940 		    arena_mapbits_unzeroed_get(chunk, run_ind));
1941 		arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
1942 		    arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1));
1943 	}
1944 
1945 	arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages,
1946 	    flag_dirty, flag_decommitted);
1947 
1948 	/* Insert into runs_avail, now that coalescing is complete. */
1949 	assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
1950 	    arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1));
1951 	assert(arena_mapbits_dirty_get(chunk, run_ind) ==
1952 	    arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
1953 	assert(arena_mapbits_decommitted_get(chunk, run_ind) ==
1954 	    arena_mapbits_decommitted_get(chunk, run_ind+run_pages-1));
1955 	arena_avail_insert(arena, chunk, run_ind, run_pages);
1956 
1957 	if (dirty)
1958 		arena_run_dirty_insert(arena, chunk, run_ind, run_pages);
1959 
1960 	/* Deallocate chunk if it is now completely unused. */
1961 	if (size == arena_maxrun) {
1962 		assert(run_ind == map_bias);
1963 		assert(run_pages == (arena_maxrun >> LG_PAGE));
1964 		arena_chunk_dalloc(arena, chunk);
1965 	}
1966 
1967 	/*
1968 	 * It is okay to do dirty page processing here even if the chunk was
1969 	 * deallocated above, since in that case it is the spare.  Waiting
1970 	 * until after possible chunk deallocation to do dirty processing
1971 	 * allows for an old spare to be fully deallocated, thus decreasing the
1972 	 * chances of spuriously crossing the dirty page purging threshold.
1973 	 */
1974 	if (dirty)
1975 		arena_maybe_purge(arena);
1976 }
1977 
1978 static void
arena_run_trim_head(arena_t * arena,arena_chunk_t * chunk,arena_run_t * run,size_t oldsize,size_t newsize)1979 arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1980     size_t oldsize, size_t newsize)
1981 {
1982 	arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
1983 	size_t pageind = arena_miscelm_to_pageind(miscelm);
1984 	size_t head_npages = (oldsize - newsize) >> LG_PAGE;
1985 	size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
1986 	size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind);
1987 	size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
1988 	    CHUNK_MAP_UNZEROED : 0;
1989 
1990 	assert(oldsize > newsize);
1991 
1992 	/*
1993 	 * Update the chunk map so that arena_run_dalloc() can treat the
1994 	 * leading run as separately allocated.  Set the last element of each
1995 	 * run first, in case of single-page runs.
1996 	 */
1997 	assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
1998 	arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
1999 	    (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2000 	    pageind+head_npages-1)));
2001 	arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty |
2002 	    (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind)));
2003 
2004 	if (config_debug) {
2005 		UNUSED size_t tail_npages = newsize >> LG_PAGE;
2006 		assert(arena_mapbits_large_size_get(chunk,
2007 		    pageind+head_npages+tail_npages-1) == 0);
2008 		assert(arena_mapbits_dirty_get(chunk,
2009 		    pageind+head_npages+tail_npages-1) == flag_dirty);
2010 	}
2011 	arena_mapbits_large_set(chunk, pageind+head_npages, newsize,
2012 	    flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2013 	    pageind+head_npages)));
2014 
2015 	arena_run_dalloc(arena, run, false, false, (flag_decommitted != 0));
2016 }
2017 
2018 static void
arena_run_trim_tail(arena_t * arena,arena_chunk_t * chunk,arena_run_t * run,size_t oldsize,size_t newsize,bool dirty)2019 arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
2020     size_t oldsize, size_t newsize, bool dirty)
2021 {
2022 	arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
2023 	size_t pageind = arena_miscelm_to_pageind(miscelm);
2024 	size_t head_npages = newsize >> LG_PAGE;
2025 	size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
2026 	size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind);
2027 	size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
2028 	    CHUNK_MAP_UNZEROED : 0;
2029 	arena_chunk_map_misc_t *tail_miscelm;
2030 	arena_run_t *tail_run;
2031 
2032 	assert(oldsize > newsize);
2033 
2034 	/*
2035 	 * Update the chunk map so that arena_run_dalloc() can treat the
2036 	 * trailing run as separately allocated.  Set the last element of each
2037 	 * run first, in case of single-page runs.
2038 	 */
2039 	assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
2040 	arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
2041 	    (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2042 	    pageind+head_npages-1)));
2043 	arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty |
2044 	    (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind)));
2045 
2046 	if (config_debug) {
2047 		UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE;
2048 		assert(arena_mapbits_large_size_get(chunk,
2049 		    pageind+head_npages+tail_npages-1) == 0);
2050 		assert(arena_mapbits_dirty_get(chunk,
2051 		    pageind+head_npages+tail_npages-1) == flag_dirty);
2052 	}
2053 	arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize,
2054 	    flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2055 	    pageind+head_npages)));
2056 
2057 	tail_miscelm = arena_miscelm_get(chunk, pageind + head_npages);
2058 	tail_run = &tail_miscelm->run;
2059 	arena_run_dalloc(arena, tail_run, dirty, false, (flag_decommitted !=
2060 	    0));
2061 }
2062 
2063 static arena_run_t *
arena_bin_runs_first(arena_bin_t * bin)2064 arena_bin_runs_first(arena_bin_t *bin)
2065 {
2066 	arena_chunk_map_misc_t *miscelm = arena_run_tree_first(&bin->runs);
2067 	if (miscelm != NULL)
2068 		return (&miscelm->run);
2069 
2070 	return (NULL);
2071 }
2072 
2073 static void
arena_bin_runs_insert(arena_bin_t * bin,arena_run_t * run)2074 arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
2075 {
2076 	arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
2077 
2078 	assert(arena_run_tree_search(&bin->runs, miscelm) == NULL);
2079 
2080 	arena_run_tree_insert(&bin->runs, miscelm);
2081 }
2082 
2083 static void
arena_bin_runs_remove(arena_bin_t * bin,arena_run_t * run)2084 arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run)
2085 {
2086 	arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
2087 
2088 	assert(arena_run_tree_search(&bin->runs, miscelm) != NULL);
2089 
2090 	arena_run_tree_remove(&bin->runs, miscelm);
2091 }
2092 
2093 static arena_run_t *
arena_bin_nonfull_run_tryget(arena_bin_t * bin)2094 arena_bin_nonfull_run_tryget(arena_bin_t *bin)
2095 {
2096 	arena_run_t *run = arena_bin_runs_first(bin);
2097 	if (run != NULL) {
2098 		arena_bin_runs_remove(bin, run);
2099 		if (config_stats)
2100 			bin->stats.reruns++;
2101 	}
2102 	return (run);
2103 }
2104 
2105 static arena_run_t *
arena_bin_nonfull_run_get(arena_t * arena,arena_bin_t * bin)2106 arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
2107 {
2108 	arena_run_t *run;
2109 	szind_t binind;
2110 	arena_bin_info_t *bin_info;
2111 
2112 	/* Look for a usable run. */
2113 	run = arena_bin_nonfull_run_tryget(bin);
2114 	if (run != NULL)
2115 		return (run);
2116 	/* No existing runs have any space available. */
2117 
2118 	binind = arena_bin_index(arena, bin);
2119 	bin_info = &arena_bin_info[binind];
2120 
2121 	/* Allocate a new run. */
2122 	malloc_mutex_unlock(&bin->lock);
2123 	/******************************/
2124 	malloc_mutex_lock(&arena->lock);
2125 	run = arena_run_alloc_small(arena, bin_info->run_size, binind);
2126 	if (run != NULL) {
2127 		/* Initialize run internals. */
2128 		run->binind = binind;
2129 		run->nfree = bin_info->nregs;
2130 		bitmap_init(run->bitmap, &bin_info->bitmap_info);
2131 	}
2132 	malloc_mutex_unlock(&arena->lock);
2133 	/********************************/
2134 	malloc_mutex_lock(&bin->lock);
2135 	if (run != NULL) {
2136 		if (config_stats) {
2137 			bin->stats.nruns++;
2138 			bin->stats.curruns++;
2139 		}
2140 		return (run);
2141 	}
2142 
2143 	/*
2144 	 * arena_run_alloc_small() failed, but another thread may have made
2145 	 * sufficient memory available while this one dropped bin->lock above,
2146 	 * so search one more time.
2147 	 */
2148 	run = arena_bin_nonfull_run_tryget(bin);
2149 	if (run != NULL)
2150 		return (run);
2151 
2152 	return (NULL);
2153 }
2154 
2155 /* Re-fill bin->runcur, then call arena_run_reg_alloc(). */
2156 static void *
arena_bin_malloc_hard(arena_t * arena,arena_bin_t * bin)2157 arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
2158 {
2159 	szind_t binind;
2160 	arena_bin_info_t *bin_info;
2161 	arena_run_t *run;
2162 
2163 	binind = arena_bin_index(arena, bin);
2164 	bin_info = &arena_bin_info[binind];
2165 	bin->runcur = NULL;
2166 	run = arena_bin_nonfull_run_get(arena, bin);
2167 	if (bin->runcur != NULL && bin->runcur->nfree > 0) {
2168 		/*
2169 		 * Another thread updated runcur while this one ran without the
2170 		 * bin lock in arena_bin_nonfull_run_get().
2171 		 */
2172 		void *ret;
2173 		assert(bin->runcur->nfree > 0);
2174 		ret = arena_run_reg_alloc(bin->runcur, bin_info);
2175 		if (run != NULL) {
2176 			arena_chunk_t *chunk;
2177 
2178 			/*
2179 			 * arena_run_alloc_small() may have allocated run, or
2180 			 * it may have pulled run from the bin's run tree.
2181 			 * Therefore it is unsafe to make any assumptions about
2182 			 * how run has previously been used, and
2183 			 * arena_bin_lower_run() must be called, as if a region
2184 			 * were just deallocated from the run.
2185 			 */
2186 			chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
2187 			if (run->nfree == bin_info->nregs)
2188 				arena_dalloc_bin_run(arena, chunk, run, bin);
2189 			else
2190 				arena_bin_lower_run(arena, chunk, run, bin);
2191 		}
2192 		return (ret);
2193 	}
2194 
2195 	if (run == NULL)
2196 		return (NULL);
2197 
2198 	bin->runcur = run;
2199 
2200 	assert(bin->runcur->nfree > 0);
2201 
2202 	return (arena_run_reg_alloc(bin->runcur, bin_info));
2203 }
2204 
2205 void
arena_tcache_fill_small(tsd_t * tsd,arena_t * arena,tcache_bin_t * tbin,szind_t binind,uint64_t prof_accumbytes)2206 arena_tcache_fill_small(tsd_t *tsd, arena_t *arena, tcache_bin_t *tbin,
2207     szind_t binind, uint64_t prof_accumbytes)
2208 {
2209 	unsigned i, nfill;
2210 	arena_bin_t *bin;
2211 
2212 	assert(tbin->ncached == 0);
2213 
2214 	if (config_prof && arena_prof_accum(arena, prof_accumbytes))
2215 		prof_idump();
2216 	bin = &arena->bins[binind];
2217 	malloc_mutex_lock(&bin->lock);
2218 	for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
2219 	    tbin->lg_fill_div); i < nfill; i++) {
2220 		arena_run_t *run;
2221 		void *ptr;
2222 		if ((run = bin->runcur) != NULL && run->nfree > 0)
2223 			ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
2224 		else
2225 			ptr = arena_bin_malloc_hard(arena, bin);
2226 		if (ptr == NULL) {
2227 			/*
2228 			 * OOM.  tbin->avail isn't yet filled down to its first
2229 			 * element, so the successful allocations (if any) must
2230 			 * be moved just before tbin->avail before bailing out.
2231 			 */
2232 			if (i > 0) {
2233 				memmove(tbin->avail - i, tbin->avail - nfill,
2234 				    i * sizeof(void *));
2235 			}
2236 			break;
2237 		}
2238 		if (config_fill && unlikely(opt_junk_alloc)) {
2239 			arena_alloc_junk_small(ptr, &arena_bin_info[binind],
2240 			    true);
2241 		}
2242 		/* Insert such that low regions get used first. */
2243 		*(tbin->avail - nfill + i) = ptr;
2244 	}
2245 	if (config_stats) {
2246 		bin->stats.nmalloc += i;
2247 		bin->stats.nrequests += tbin->tstats.nrequests;
2248 		bin->stats.curregs += i;
2249 		bin->stats.nfills++;
2250 		tbin->tstats.nrequests = 0;
2251 	}
2252 	malloc_mutex_unlock(&bin->lock);
2253 	tbin->ncached = i;
2254 	arena_decay_tick(tsd, arena);
2255 }
2256 
2257 void
arena_alloc_junk_small(void * ptr,arena_bin_info_t * bin_info,bool zero)2258 arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
2259 {
2260 
2261 	if (zero) {
2262 		size_t redzone_size = bin_info->redzone_size;
2263 		memset((void *)((uintptr_t)ptr - redzone_size), 0xa5,
2264 		    redzone_size);
2265 		memset((void *)((uintptr_t)ptr + bin_info->reg_size), 0xa5,
2266 		    redzone_size);
2267 	} else {
2268 		memset((void *)((uintptr_t)ptr - bin_info->redzone_size), 0xa5,
2269 		    bin_info->reg_interval);
2270 	}
2271 }
2272 
2273 #ifdef JEMALLOC_JET
2274 #undef arena_redzone_corruption
2275 #define	arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption_impl)
2276 #endif
2277 static void
arena_redzone_corruption(void * ptr,size_t usize,bool after,size_t offset,uint8_t byte)2278 arena_redzone_corruption(void *ptr, size_t usize, bool after,
2279     size_t offset, uint8_t byte)
2280 {
2281 
2282 	malloc_printf("<jemalloc>: Corrupt redzone %zu byte%s %s %p "
2283 	    "(size %zu), byte=%#x\n", offset, (offset == 1) ? "" : "s",
2284 	    after ? "after" : "before", ptr, usize, byte);
2285 }
2286 #ifdef JEMALLOC_JET
2287 #undef arena_redzone_corruption
2288 #define	arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
2289 arena_redzone_corruption_t *arena_redzone_corruption =
2290     JEMALLOC_N(arena_redzone_corruption_impl);
2291 #endif
2292 
2293 static void
arena_redzones_validate(void * ptr,arena_bin_info_t * bin_info,bool reset)2294 arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset)
2295 {
2296 	bool error = false;
2297 
2298 	if (opt_junk_alloc) {
2299 		size_t size = bin_info->reg_size;
2300 		size_t redzone_size = bin_info->redzone_size;
2301 		size_t i;
2302 
2303 		for (i = 1; i <= redzone_size; i++) {
2304 			uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i);
2305 			if (*byte != 0xa5) {
2306 				error = true;
2307 				arena_redzone_corruption(ptr, size, false, i,
2308 				    *byte);
2309 				if (reset)
2310 					*byte = 0xa5;
2311 			}
2312 		}
2313 		for (i = 0; i < redzone_size; i++) {
2314 			uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i);
2315 			if (*byte != 0xa5) {
2316 				error = true;
2317 				arena_redzone_corruption(ptr, size, true, i,
2318 				    *byte);
2319 				if (reset)
2320 					*byte = 0xa5;
2321 			}
2322 		}
2323 	}
2324 
2325 	if (opt_abort && error)
2326 		abort();
2327 }
2328 
2329 #ifdef JEMALLOC_JET
2330 #undef arena_dalloc_junk_small
2331 #define	arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small_impl)
2332 #endif
2333 void
arena_dalloc_junk_small(void * ptr,arena_bin_info_t * bin_info)2334 arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
2335 {
2336 	size_t redzone_size = bin_info->redzone_size;
2337 
2338 	arena_redzones_validate(ptr, bin_info, false);
2339 	memset((void *)((uintptr_t)ptr - redzone_size), 0x5a,
2340 	    bin_info->reg_interval);
2341 }
2342 #ifdef JEMALLOC_JET
2343 #undef arena_dalloc_junk_small
2344 #define	arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
2345 arena_dalloc_junk_small_t *arena_dalloc_junk_small =
2346     JEMALLOC_N(arena_dalloc_junk_small_impl);
2347 #endif
2348 
2349 void
arena_quarantine_junk_small(void * ptr,size_t usize)2350 arena_quarantine_junk_small(void *ptr, size_t usize)
2351 {
2352 	szind_t binind;
2353 	arena_bin_info_t *bin_info;
2354 	cassert(config_fill);
2355 	assert(opt_junk_free);
2356 	assert(opt_quarantine);
2357 	assert(usize <= SMALL_MAXCLASS);
2358 
2359 	binind = size2index(usize);
2360 	bin_info = &arena_bin_info[binind];
2361 	arena_redzones_validate(ptr, bin_info, true);
2362 }
2363 
2364 static void *
arena_malloc_small(tsd_t * tsd,arena_t * arena,szind_t binind,bool zero)2365 arena_malloc_small(tsd_t *tsd, arena_t *arena, szind_t binind, bool zero)
2366 {
2367 	void *ret;
2368 	arena_bin_t *bin;
2369 	size_t usize;
2370 	arena_run_t *run;
2371 
2372 	assert(binind < NBINS);
2373 	bin = &arena->bins[binind];
2374 	usize = index2size(binind);
2375 
2376 	malloc_mutex_lock(&bin->lock);
2377 	if ((run = bin->runcur) != NULL && run->nfree > 0)
2378 		ret = arena_run_reg_alloc(run, &arena_bin_info[binind]);
2379 	else
2380 		ret = arena_bin_malloc_hard(arena, bin);
2381 
2382 	if (ret == NULL) {
2383 		malloc_mutex_unlock(&bin->lock);
2384 		return (NULL);
2385 	}
2386 
2387 	if (config_stats) {
2388 		bin->stats.nmalloc++;
2389 		bin->stats.nrequests++;
2390 		bin->stats.curregs++;
2391 	}
2392 	malloc_mutex_unlock(&bin->lock);
2393 	if (config_prof && !isthreaded && arena_prof_accum(arena, usize))
2394 		prof_idump();
2395 
2396 	if (!zero) {
2397 		if (config_fill) {
2398 			if (unlikely(opt_junk_alloc)) {
2399 				arena_alloc_junk_small(ret,
2400 				    &arena_bin_info[binind], false);
2401 			} else if (unlikely(opt_zero))
2402 				memset(ret, 0, usize);
2403 		}
2404 		JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize);
2405 	} else {
2406 		if (config_fill && unlikely(opt_junk_alloc)) {
2407 			arena_alloc_junk_small(ret, &arena_bin_info[binind],
2408 			    true);
2409 		}
2410 		JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize);
2411 		memset(ret, 0, usize);
2412 	}
2413 
2414 	arena_decay_tick(tsd, arena);
2415 	return (ret);
2416 }
2417 
2418 void *
arena_malloc_large(tsd_t * tsd,arena_t * arena,szind_t binind,bool zero)2419 arena_malloc_large(tsd_t *tsd, arena_t *arena, szind_t binind, bool zero)
2420 {
2421 	void *ret;
2422 	size_t usize;
2423 	uintptr_t random_offset;
2424 	arena_run_t *run;
2425 	arena_chunk_map_misc_t *miscelm;
2426 	UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false);
2427 
2428 	/* Large allocation. */
2429 	usize = index2size(binind);
2430 	malloc_mutex_lock(&arena->lock);
2431 	if (config_cache_oblivious) {
2432 		uint64_t r;
2433 
2434 		/*
2435 		 * Compute a uniformly distributed offset within the first page
2436 		 * that is a multiple of the cacheline size, e.g. [0 .. 63) * 64
2437 		 * for 4 KiB pages and 64-byte cachelines.
2438 		 */
2439 		r = prng_lg_range(&arena->offset_state, LG_PAGE - LG_CACHELINE);
2440 		random_offset = ((uintptr_t)r) << LG_CACHELINE;
2441 	} else
2442 		random_offset = 0;
2443 	run = arena_run_alloc_large(arena, usize + large_pad, zero);
2444 	if (run == NULL) {
2445 		malloc_mutex_unlock(&arena->lock);
2446 		return (NULL);
2447 	}
2448 	miscelm = arena_run_to_miscelm(run);
2449 	ret = (void *)((uintptr_t)arena_miscelm_to_rpages(miscelm) +
2450 	    random_offset);
2451 	if (config_stats) {
2452 		szind_t index = binind - NBINS;
2453 
2454 		arena->stats.nmalloc_large++;
2455 		arena->stats.nrequests_large++;
2456 		arena->stats.allocated_large += usize;
2457 		arena->stats.lstats[index].nmalloc++;
2458 		arena->stats.lstats[index].nrequests++;
2459 		arena->stats.lstats[index].curruns++;
2460 	}
2461 	if (config_prof)
2462 		idump = arena_prof_accum_locked(arena, usize);
2463 	malloc_mutex_unlock(&arena->lock);
2464 	if (config_prof && idump)
2465 		prof_idump();
2466 
2467 	if (!zero) {
2468 		if (config_fill) {
2469 			if (unlikely(opt_junk_alloc))
2470 				memset(ret, 0xa5, usize);
2471 			else if (unlikely(opt_zero))
2472 				memset(ret, 0, usize);
2473 		}
2474 	}
2475 
2476 	arena_decay_tick(tsd, arena);
2477 	return (ret);
2478 }
2479 
2480 void *
arena_malloc_hard(tsd_t * tsd,arena_t * arena,size_t size,szind_t ind,bool zero,tcache_t * tcache)2481 arena_malloc_hard(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind,
2482     bool zero, tcache_t *tcache)
2483 {
2484 
2485 	arena = arena_choose(tsd, arena);
2486 	if (unlikely(arena == NULL))
2487 		return (NULL);
2488 
2489 	if (likely(size <= SMALL_MAXCLASS))
2490 		return (arena_malloc_small(tsd, arena, ind, zero));
2491 	if (likely(size <= large_maxclass))
2492 		return (arena_malloc_large(tsd, arena, ind, zero));
2493 	return (huge_malloc(tsd, arena, index2size(ind), zero, tcache));
2494 }
2495 
2496 /* Only handles large allocations that require more than page alignment. */
2497 static void *
arena_palloc_large(tsd_t * tsd,arena_t * arena,size_t usize,size_t alignment,bool zero)2498 arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
2499     bool zero)
2500 {
2501 	void *ret;
2502 	size_t alloc_size, leadsize, trailsize;
2503 	arena_run_t *run;
2504 	arena_chunk_t *chunk;
2505 	arena_chunk_map_misc_t *miscelm;
2506 	void *rpages;
2507 
2508 	assert(usize == PAGE_CEILING(usize));
2509 
2510 	arena = arena_choose(tsd, arena);
2511 	if (unlikely(arena == NULL))
2512 		return (NULL);
2513 
2514 	alignment = PAGE_CEILING(alignment);
2515 	alloc_size = usize + large_pad + alignment - PAGE;
2516 
2517 	malloc_mutex_lock(&arena->lock);
2518 	run = arena_run_alloc_large(arena, alloc_size, false);
2519 	if (run == NULL) {
2520 		malloc_mutex_unlock(&arena->lock);
2521 		return (NULL);
2522 	}
2523 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
2524 	miscelm = arena_run_to_miscelm(run);
2525 	rpages = arena_miscelm_to_rpages(miscelm);
2526 
2527 	leadsize = ALIGNMENT_CEILING((uintptr_t)rpages, alignment) -
2528 	    (uintptr_t)rpages;
2529 	assert(alloc_size >= leadsize + usize);
2530 	trailsize = alloc_size - leadsize - usize - large_pad;
2531 	if (leadsize != 0) {
2532 		arena_chunk_map_misc_t *head_miscelm = miscelm;
2533 		arena_run_t *head_run = run;
2534 
2535 		miscelm = arena_miscelm_get(chunk,
2536 		    arena_miscelm_to_pageind(head_miscelm) + (leadsize >>
2537 		    LG_PAGE));
2538 		run = &miscelm->run;
2539 
2540 		arena_run_trim_head(arena, chunk, head_run, alloc_size,
2541 		    alloc_size - leadsize);
2542 	}
2543 	if (trailsize != 0) {
2544 		arena_run_trim_tail(arena, chunk, run, usize + large_pad +
2545 		    trailsize, usize + large_pad, false);
2546 	}
2547 	if (arena_run_init_large(arena, run, usize + large_pad, zero)) {
2548 		size_t run_ind =
2549 		    arena_miscelm_to_pageind(arena_run_to_miscelm(run));
2550 		bool dirty = (arena_mapbits_dirty_get(chunk, run_ind) != 0);
2551 		bool decommitted = (arena_mapbits_decommitted_get(chunk,
2552 		    run_ind) != 0);
2553 
2554 		assert(decommitted); /* Cause of OOM. */
2555 		arena_run_dalloc(arena, run, dirty, false, decommitted);
2556 		malloc_mutex_unlock(&arena->lock);
2557 		return (NULL);
2558 	}
2559 	ret = arena_miscelm_to_rpages(miscelm);
2560 
2561 	if (config_stats) {
2562 		szind_t index = size2index(usize) - NBINS;
2563 
2564 		arena->stats.nmalloc_large++;
2565 		arena->stats.nrequests_large++;
2566 		arena->stats.allocated_large += usize;
2567 		arena->stats.lstats[index].nmalloc++;
2568 		arena->stats.lstats[index].nrequests++;
2569 		arena->stats.lstats[index].curruns++;
2570 	}
2571 	malloc_mutex_unlock(&arena->lock);
2572 
2573 	if (config_fill && !zero) {
2574 		if (unlikely(opt_junk_alloc))
2575 			memset(ret, 0xa5, usize);
2576 		else if (unlikely(opt_zero))
2577 			memset(ret, 0, usize);
2578 	}
2579 	arena_decay_tick(tsd, arena);
2580 	return (ret);
2581 }
2582 
2583 void *
arena_palloc(tsd_t * tsd,arena_t * arena,size_t usize,size_t alignment,bool zero,tcache_t * tcache)2584 arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
2585     bool zero, tcache_t *tcache)
2586 {
2587 	void *ret;
2588 
2589 	if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE
2590 	    && (usize & PAGE_MASK) == 0))) {
2591 		/* Small; alignment doesn't require special run placement. */
2592 		ret = arena_malloc(tsd, arena, usize, size2index(usize), zero,
2593 		    tcache, true);
2594 	} else if (usize <= large_maxclass && alignment <= PAGE) {
2595 		/*
2596 		 * Large; alignment doesn't require special run placement.
2597 		 * However, the cached pointer may be at a random offset from
2598 		 * the base of the run, so do some bit manipulation to retrieve
2599 		 * the base.
2600 		 */
2601 		ret = arena_malloc(tsd, arena, usize, size2index(usize), zero,
2602 		    tcache, true);
2603 		if (config_cache_oblivious)
2604 			ret = (void *)((uintptr_t)ret & ~PAGE_MASK);
2605 	} else {
2606 		if (likely(usize <= large_maxclass)) {
2607 			ret = arena_palloc_large(tsd, arena, usize, alignment,
2608 			    zero);
2609 		} else if (likely(alignment <= chunksize))
2610 			ret = huge_malloc(tsd, arena, usize, zero, tcache);
2611 		else {
2612 			ret = huge_palloc(tsd, arena, usize, alignment, zero,
2613 			    tcache);
2614 		}
2615 	}
2616 	return (ret);
2617 }
2618 
2619 void
arena_prof_promoted(const void * ptr,size_t size)2620 arena_prof_promoted(const void *ptr, size_t size)
2621 {
2622 	arena_chunk_t *chunk;
2623 	size_t pageind;
2624 	szind_t binind;
2625 
2626 	cassert(config_prof);
2627 	assert(ptr != NULL);
2628 	assert(CHUNK_ADDR2BASE(ptr) != ptr);
2629 	assert(isalloc(ptr, false) == LARGE_MINCLASS);
2630 	assert(isalloc(ptr, true) == LARGE_MINCLASS);
2631 	assert(size <= SMALL_MAXCLASS);
2632 
2633 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
2634 	pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
2635 	binind = size2index(size);
2636 	assert(binind < NBINS);
2637 	arena_mapbits_large_binind_set(chunk, pageind, binind);
2638 
2639 	assert(isalloc(ptr, false) == LARGE_MINCLASS);
2640 	assert(isalloc(ptr, true) == size);
2641 }
2642 
2643 static void
arena_dissociate_bin_run(arena_chunk_t * chunk,arena_run_t * run,arena_bin_t * bin)2644 arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
2645     arena_bin_t *bin)
2646 {
2647 
2648 	/* Dissociate run from bin. */
2649 	if (run == bin->runcur)
2650 		bin->runcur = NULL;
2651 	else {
2652 		szind_t binind = arena_bin_index(extent_node_arena_get(
2653 		    &chunk->node), bin);
2654 		arena_bin_info_t *bin_info = &arena_bin_info[binind];
2655 
2656 		if (bin_info->nregs != 1) {
2657 			/*
2658 			 * This block's conditional is necessary because if the
2659 			 * run only contains one region, then it never gets
2660 			 * inserted into the non-full runs tree.
2661 			 */
2662 			arena_bin_runs_remove(bin, run);
2663 		}
2664 	}
2665 }
2666 
2667 static void
arena_dalloc_bin_run(arena_t * arena,arena_chunk_t * chunk,arena_run_t * run,arena_bin_t * bin)2668 arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
2669     arena_bin_t *bin)
2670 {
2671 
2672 	assert(run != bin->runcur);
2673 	assert(arena_run_tree_search(&bin->runs, arena_run_to_miscelm(run)) ==
2674 	    NULL);
2675 
2676 	malloc_mutex_unlock(&bin->lock);
2677 	/******************************/
2678 	malloc_mutex_lock(&arena->lock);
2679 	arena_run_dalloc(arena, run, true, false, false);
2680 	malloc_mutex_unlock(&arena->lock);
2681 	/****************************/
2682 	malloc_mutex_lock(&bin->lock);
2683 	if (config_stats)
2684 		bin->stats.curruns--;
2685 }
2686 
2687 static void
arena_bin_lower_run(arena_t * arena,arena_chunk_t * chunk,arena_run_t * run,arena_bin_t * bin)2688 arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
2689     arena_bin_t *bin)
2690 {
2691 
2692 	/*
2693 	 * Make sure that if bin->runcur is non-NULL, it refers to the lowest
2694 	 * non-full run.  It is okay to NULL runcur out rather than proactively
2695 	 * keeping it pointing at the lowest non-full run.
2696 	 */
2697 	if ((uintptr_t)run < (uintptr_t)bin->runcur) {
2698 		/* Switch runcur. */
2699 		if (bin->runcur->nfree > 0)
2700 			arena_bin_runs_insert(bin, bin->runcur);
2701 		bin->runcur = run;
2702 		if (config_stats)
2703 			bin->stats.reruns++;
2704 	} else
2705 		arena_bin_runs_insert(bin, run);
2706 }
2707 
2708 static void
arena_dalloc_bin_locked_impl(arena_t * arena,arena_chunk_t * chunk,void * ptr,arena_chunk_map_bits_t * bitselm,bool junked)2709 arena_dalloc_bin_locked_impl(arena_t *arena, arena_chunk_t *chunk, void *ptr,
2710     arena_chunk_map_bits_t *bitselm, bool junked)
2711 {
2712 	size_t pageind, rpages_ind;
2713 	arena_run_t *run;
2714 	arena_bin_t *bin;
2715 	arena_bin_info_t *bin_info;
2716 	szind_t binind;
2717 
2718 	pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
2719 	rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
2720 	run = &arena_miscelm_get(chunk, rpages_ind)->run;
2721 	binind = run->binind;
2722 	bin = &arena->bins[binind];
2723 	bin_info = &arena_bin_info[binind];
2724 
2725 	if (!junked && config_fill && unlikely(opt_junk_free))
2726 		arena_dalloc_junk_small(ptr, bin_info);
2727 
2728 	arena_run_reg_dalloc(run, ptr);
2729 	if (run->nfree == bin_info->nregs) {
2730 		arena_dissociate_bin_run(chunk, run, bin);
2731 		arena_dalloc_bin_run(arena, chunk, run, bin);
2732 	} else if (run->nfree == 1 && run != bin->runcur)
2733 		arena_bin_lower_run(arena, chunk, run, bin);
2734 
2735 	if (config_stats) {
2736 		bin->stats.ndalloc++;
2737 		bin->stats.curregs--;
2738 	}
2739 }
2740 
2741 void
arena_dalloc_bin_junked_locked(arena_t * arena,arena_chunk_t * chunk,void * ptr,arena_chunk_map_bits_t * bitselm)2742 arena_dalloc_bin_junked_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
2743     arena_chunk_map_bits_t *bitselm)
2744 {
2745 
2746 	arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, true);
2747 }
2748 
2749 void
arena_dalloc_bin(arena_t * arena,arena_chunk_t * chunk,void * ptr,size_t pageind,arena_chunk_map_bits_t * bitselm)2750 arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
2751     size_t pageind, arena_chunk_map_bits_t *bitselm)
2752 {
2753 	arena_run_t *run;
2754 	arena_bin_t *bin;
2755 	size_t rpages_ind;
2756 
2757 	rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
2758 	run = &arena_miscelm_get(chunk, rpages_ind)->run;
2759 	bin = &arena->bins[run->binind];
2760 	malloc_mutex_lock(&bin->lock);
2761 	arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, false);
2762 	malloc_mutex_unlock(&bin->lock);
2763 }
2764 
2765 void
arena_dalloc_small(tsd_t * tsd,arena_t * arena,arena_chunk_t * chunk,void * ptr,size_t pageind)2766 arena_dalloc_small(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, void *ptr,
2767     size_t pageind)
2768 {
2769 	arena_chunk_map_bits_t *bitselm;
2770 
2771 	if (config_debug) {
2772 		/* arena_ptr_small_binind_get() does extra sanity checking. */
2773 		assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
2774 		    pageind)) != BININD_INVALID);
2775 	}
2776 	bitselm = arena_bitselm_get(chunk, pageind);
2777 	arena_dalloc_bin(arena, chunk, ptr, pageind, bitselm);
2778 	arena_decay_tick(tsd, arena);
2779 }
2780 
2781 #ifdef JEMALLOC_JET
2782 #undef arena_dalloc_junk_large
2783 #define	arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large_impl)
2784 #endif
2785 void
arena_dalloc_junk_large(void * ptr,size_t usize)2786 arena_dalloc_junk_large(void *ptr, size_t usize)
2787 {
2788 
2789 	if (config_fill && unlikely(opt_junk_free))
2790 		memset(ptr, 0x5a, usize);
2791 }
2792 #ifdef JEMALLOC_JET
2793 #undef arena_dalloc_junk_large
2794 #define	arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large)
2795 arena_dalloc_junk_large_t *arena_dalloc_junk_large =
2796     JEMALLOC_N(arena_dalloc_junk_large_impl);
2797 #endif
2798 
2799 static void
arena_dalloc_large_locked_impl(arena_t * arena,arena_chunk_t * chunk,void * ptr,bool junked)2800 arena_dalloc_large_locked_impl(arena_t *arena, arena_chunk_t *chunk,
2801     void *ptr, bool junked)
2802 {
2803 	size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
2804 	arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
2805 	arena_run_t *run = &miscelm->run;
2806 
2807 	if (config_fill || config_stats) {
2808 		size_t usize = arena_mapbits_large_size_get(chunk, pageind) -
2809 		    large_pad;
2810 
2811 		if (!junked)
2812 			arena_dalloc_junk_large(ptr, usize);
2813 		if (config_stats) {
2814 			szind_t index = size2index(usize) - NBINS;
2815 
2816 			arena->stats.ndalloc_large++;
2817 			arena->stats.allocated_large -= usize;
2818 			arena->stats.lstats[index].ndalloc++;
2819 			arena->stats.lstats[index].curruns--;
2820 		}
2821 	}
2822 
2823 	arena_run_dalloc(arena, run, true, false, false);
2824 }
2825 
2826 void
arena_dalloc_large_junked_locked(arena_t * arena,arena_chunk_t * chunk,void * ptr)2827 arena_dalloc_large_junked_locked(arena_t *arena, arena_chunk_t *chunk,
2828     void *ptr)
2829 {
2830 
2831 	arena_dalloc_large_locked_impl(arena, chunk, ptr, true);
2832 }
2833 
2834 void
arena_dalloc_large(tsd_t * tsd,arena_t * arena,arena_chunk_t * chunk,void * ptr)2835 arena_dalloc_large(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, void *ptr)
2836 {
2837 
2838 	malloc_mutex_lock(&arena->lock);
2839 	arena_dalloc_large_locked_impl(arena, chunk, ptr, false);
2840 	malloc_mutex_unlock(&arena->lock);
2841 	arena_decay_tick(tsd, arena);
2842 }
2843 
2844 static void
arena_ralloc_large_shrink(arena_t * arena,arena_chunk_t * chunk,void * ptr,size_t oldsize,size_t size)2845 arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
2846     size_t oldsize, size_t size)
2847 {
2848 	size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
2849 	arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
2850 	arena_run_t *run = &miscelm->run;
2851 
2852 	assert(size < oldsize);
2853 
2854 	/*
2855 	 * Shrink the run, and make trailing pages available for other
2856 	 * allocations.
2857 	 */
2858 	malloc_mutex_lock(&arena->lock);
2859 	arena_run_trim_tail(arena, chunk, run, oldsize + large_pad, size +
2860 	    large_pad, true);
2861 	if (config_stats) {
2862 		szind_t oldindex = size2index(oldsize) - NBINS;
2863 		szind_t index = size2index(size) - NBINS;
2864 
2865 		arena->stats.ndalloc_large++;
2866 		arena->stats.allocated_large -= oldsize;
2867 		arena->stats.lstats[oldindex].ndalloc++;
2868 		arena->stats.lstats[oldindex].curruns--;
2869 
2870 		arena->stats.nmalloc_large++;
2871 		arena->stats.nrequests_large++;
2872 		arena->stats.allocated_large += size;
2873 		arena->stats.lstats[index].nmalloc++;
2874 		arena->stats.lstats[index].nrequests++;
2875 		arena->stats.lstats[index].curruns++;
2876 	}
2877 	malloc_mutex_unlock(&arena->lock);
2878 }
2879 
2880 static bool
arena_ralloc_large_grow(arena_t * arena,arena_chunk_t * chunk,void * ptr,size_t oldsize,size_t usize_min,size_t usize_max,bool zero)2881 arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
2882     size_t oldsize, size_t usize_min, size_t usize_max, bool zero)
2883 {
2884 	size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
2885 	size_t npages = (oldsize + large_pad) >> LG_PAGE;
2886 	size_t followsize;
2887 
2888 	assert(oldsize == arena_mapbits_large_size_get(chunk, pageind) -
2889 	    large_pad);
2890 
2891 	/* Try to extend the run. */
2892 	malloc_mutex_lock(&arena->lock);
2893 	if (pageind+npages >= chunk_npages || arena_mapbits_allocated_get(chunk,
2894 	    pageind+npages) != 0)
2895 		goto label_fail;
2896 	followsize = arena_mapbits_unallocated_size_get(chunk, pageind+npages);
2897 	if (oldsize + followsize >= usize_min) {
2898 		/*
2899 		 * The next run is available and sufficiently large.  Split the
2900 		 * following run, then merge the first part with the existing
2901 		 * allocation.
2902 		 */
2903 		arena_run_t *run;
2904 		size_t usize, splitsize, size, flag_dirty, flag_unzeroed_mask;
2905 
2906 		usize = usize_max;
2907 		while (oldsize + followsize < usize)
2908 			usize = index2size(size2index(usize)-1);
2909 		assert(usize >= usize_min);
2910 		assert(usize >= oldsize);
2911 		splitsize = usize - oldsize;
2912 		if (splitsize == 0)
2913 			goto label_fail;
2914 
2915 		run = &arena_miscelm_get(chunk, pageind+npages)->run;
2916 		if (arena_run_split_large(arena, run, splitsize, zero))
2917 			goto label_fail;
2918 
2919 		if (config_cache_oblivious && zero) {
2920 			/*
2921 			 * Zero the trailing bytes of the original allocation's
2922 			 * last page, since they are in an indeterminate state.
2923 			 * There will always be trailing bytes, because ptr's
2924 			 * offset from the beginning of the run is a multiple of
2925 			 * CACHELINE in [0 .. PAGE).
2926 			 */
2927 			void *zbase = (void *)((uintptr_t)ptr + oldsize);
2928 			void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase +
2929 			    PAGE));
2930 			size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase;
2931 			assert(nzero > 0);
2932 			memset(zbase, 0, nzero);
2933 		}
2934 
2935 		size = oldsize + splitsize;
2936 		npages = (size + large_pad) >> LG_PAGE;
2937 
2938 		/*
2939 		 * Mark the extended run as dirty if either portion of the run
2940 		 * was dirty before allocation.  This is rather pedantic,
2941 		 * because there's not actually any sequence of events that
2942 		 * could cause the resulting run to be passed to
2943 		 * arena_run_dalloc() with the dirty argument set to false
2944 		 * (which is when dirty flag consistency would really matter).
2945 		 */
2946 		flag_dirty = arena_mapbits_dirty_get(chunk, pageind) |
2947 		    arena_mapbits_dirty_get(chunk, pageind+npages-1);
2948 		flag_unzeroed_mask = flag_dirty == 0 ? CHUNK_MAP_UNZEROED : 0;
2949 		arena_mapbits_large_set(chunk, pageind, size + large_pad,
2950 		    flag_dirty | (flag_unzeroed_mask &
2951 		    arena_mapbits_unzeroed_get(chunk, pageind)));
2952 		arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty |
2953 		    (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2954 		    pageind+npages-1)));
2955 
2956 		if (config_stats) {
2957 			szind_t oldindex = size2index(oldsize) - NBINS;
2958 			szind_t index = size2index(size) - NBINS;
2959 
2960 			arena->stats.ndalloc_large++;
2961 			arena->stats.allocated_large -= oldsize;
2962 			arena->stats.lstats[oldindex].ndalloc++;
2963 			arena->stats.lstats[oldindex].curruns--;
2964 
2965 			arena->stats.nmalloc_large++;
2966 			arena->stats.nrequests_large++;
2967 			arena->stats.allocated_large += size;
2968 			arena->stats.lstats[index].nmalloc++;
2969 			arena->stats.lstats[index].nrequests++;
2970 			arena->stats.lstats[index].curruns++;
2971 		}
2972 		malloc_mutex_unlock(&arena->lock);
2973 		return (false);
2974 	}
2975 label_fail:
2976 	malloc_mutex_unlock(&arena->lock);
2977 	return (true);
2978 }
2979 
2980 #ifdef JEMALLOC_JET
2981 #undef arena_ralloc_junk_large
2982 #define	arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large_impl)
2983 #endif
2984 static void
arena_ralloc_junk_large(void * ptr,size_t old_usize,size_t usize)2985 arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize)
2986 {
2987 
2988 	if (config_fill && unlikely(opt_junk_free)) {
2989 		memset((void *)((uintptr_t)ptr + usize), 0x5a,
2990 		    old_usize - usize);
2991 	}
2992 }
2993 #ifdef JEMALLOC_JET
2994 #undef arena_ralloc_junk_large
2995 #define	arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large)
2996 arena_ralloc_junk_large_t *arena_ralloc_junk_large =
2997     JEMALLOC_N(arena_ralloc_junk_large_impl);
2998 #endif
2999 
3000 /*
3001  * Try to resize a large allocation, in order to avoid copying.  This will
3002  * always fail if growing an object, and the following run is already in use.
3003  */
3004 static bool
arena_ralloc_large(void * ptr,size_t oldsize,size_t usize_min,size_t usize_max,bool zero)3005 arena_ralloc_large(void *ptr, size_t oldsize, size_t usize_min,
3006     size_t usize_max, bool zero)
3007 {
3008 	arena_chunk_t *chunk;
3009 	arena_t *arena;
3010 
3011 	if (oldsize == usize_max) {
3012 		/* Current size class is compatible and maximal. */
3013 		return (false);
3014 	}
3015 
3016 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
3017 	arena = extent_node_arena_get(&chunk->node);
3018 
3019 	if (oldsize < usize_max) {
3020 		bool ret = arena_ralloc_large_grow(arena, chunk, ptr, oldsize,
3021 		    usize_min, usize_max, zero);
3022 		if (config_fill && !ret && !zero) {
3023 			if (unlikely(opt_junk_alloc)) {
3024 				memset((void *)((uintptr_t)ptr + oldsize), 0xa5,
3025 				    isalloc(ptr, config_prof) - oldsize);
3026 			} else if (unlikely(opt_zero)) {
3027 				memset((void *)((uintptr_t)ptr + oldsize), 0,
3028 				    isalloc(ptr, config_prof) - oldsize);
3029 			}
3030 		}
3031 		return (ret);
3032 	}
3033 
3034 	assert(oldsize > usize_max);
3035 	/* Fill before shrinking in order avoid a race. */
3036 	arena_ralloc_junk_large(ptr, oldsize, usize_max);
3037 	arena_ralloc_large_shrink(arena, chunk, ptr, oldsize, usize_max);
3038 	return (false);
3039 }
3040 
3041 bool
arena_ralloc_no_move(tsd_t * tsd,void * ptr,size_t oldsize,size_t size,size_t extra,bool zero)3042 arena_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
3043     size_t extra, bool zero)
3044 {
3045 	size_t usize_min, usize_max;
3046 
3047 	/* Calls with non-zero extra had to clamp extra. */
3048 	assert(extra == 0 || size + extra <= HUGE_MAXCLASS);
3049 
3050 	if (unlikely(size > HUGE_MAXCLASS))
3051 		return (true);
3052 
3053 	usize_min = s2u(size);
3054 	usize_max = s2u(size + extra);
3055 	if (likely(oldsize <= large_maxclass && usize_min <= large_maxclass)) {
3056 		arena_chunk_t *chunk;
3057 
3058 		/*
3059 		 * Avoid moving the allocation if the size class can be left the
3060 		 * same.
3061 		 */
3062 		if (oldsize <= SMALL_MAXCLASS) {
3063 			assert(arena_bin_info[size2index(oldsize)].reg_size ==
3064 			    oldsize);
3065 			if ((usize_max > SMALL_MAXCLASS ||
3066 			    size2index(usize_max) != size2index(oldsize)) &&
3067 			    (size > oldsize || usize_max < oldsize))
3068 				return (true);
3069 		} else {
3070 			if (usize_max <= SMALL_MAXCLASS)
3071 				return (true);
3072 			if (arena_ralloc_large(ptr, oldsize, usize_min,
3073 			    usize_max, zero))
3074 				return (true);
3075 		}
3076 
3077 		chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
3078 		arena_decay_tick(tsd, extent_node_arena_get(&chunk->node));
3079 		return (false);
3080 	} else {
3081 		return (huge_ralloc_no_move(tsd, ptr, oldsize, usize_min,
3082 		    usize_max, zero));
3083 	}
3084 }
3085 
3086 static void *
arena_ralloc_move_helper(tsd_t * tsd,arena_t * arena,size_t usize,size_t alignment,bool zero,tcache_t * tcache)3087 arena_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize,
3088     size_t alignment, bool zero, tcache_t *tcache)
3089 {
3090 
3091 	if (alignment == 0)
3092 		return (arena_malloc(tsd, arena, usize, size2index(usize), zero,
3093 		    tcache, true));
3094 	usize = sa2u(usize, alignment);
3095 	if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
3096 		return (NULL);
3097 	return (ipalloct(tsd, usize, alignment, zero, tcache, arena));
3098 }
3099 
3100 void *
arena_ralloc(tsd_t * tsd,arena_t * arena,void * ptr,size_t oldsize,size_t size,size_t alignment,bool zero,tcache_t * tcache)3101 arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
3102     size_t alignment, bool zero, tcache_t *tcache)
3103 {
3104 	void *ret;
3105 	size_t usize;
3106 
3107 	usize = s2u(size);
3108 	if (unlikely(usize == 0 || size > HUGE_MAXCLASS))
3109 		return (NULL);
3110 
3111 	if (likely(usize <= large_maxclass)) {
3112 		size_t copysize;
3113 
3114 		/* Try to avoid moving the allocation. */
3115 		if (!arena_ralloc_no_move(tsd, ptr, oldsize, usize, 0, zero))
3116 			return (ptr);
3117 
3118 		/*
3119 		 * size and oldsize are different enough that we need to move
3120 		 * the object.  In that case, fall back to allocating new space
3121 		 * and copying.
3122 		 */
3123 		ret = arena_ralloc_move_helper(tsd, arena, usize, alignment,
3124 		    zero, tcache);
3125 		if (ret == NULL)
3126 			return (NULL);
3127 
3128 		/*
3129 		 * Junk/zero-filling were already done by
3130 		 * ipalloc()/arena_malloc().
3131 		 */
3132 
3133 		copysize = (usize < oldsize) ? usize : oldsize;
3134 		JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
3135 		memcpy(ret, ptr, copysize);
3136 		isqalloc(tsd, ptr, oldsize, tcache);
3137 	} else {
3138 		ret = huge_ralloc(tsd, arena, ptr, oldsize, usize, alignment,
3139 		    zero, tcache);
3140 	}
3141 	return (ret);
3142 }
3143 
3144 dss_prec_t
arena_dss_prec_get(arena_t * arena)3145 arena_dss_prec_get(arena_t *arena)
3146 {
3147 	dss_prec_t ret;
3148 
3149 	malloc_mutex_lock(&arena->lock);
3150 	ret = arena->dss_prec;
3151 	malloc_mutex_unlock(&arena->lock);
3152 	return (ret);
3153 }
3154 
3155 bool
arena_dss_prec_set(arena_t * arena,dss_prec_t dss_prec)3156 arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec)
3157 {
3158 
3159 	if (!have_dss)
3160 		return (dss_prec != dss_prec_disabled);
3161 	malloc_mutex_lock(&arena->lock);
3162 	arena->dss_prec = dss_prec;
3163 	malloc_mutex_unlock(&arena->lock);
3164 	return (false);
3165 }
3166 
3167 ssize_t
arena_lg_dirty_mult_default_get(void)3168 arena_lg_dirty_mult_default_get(void)
3169 {
3170 
3171 	return ((ssize_t)atomic_read_z((size_t *)&lg_dirty_mult_default));
3172 }
3173 
3174 bool
arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult)3175 arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult)
3176 {
3177 
3178 	if (opt_purge != purge_mode_ratio)
3179 		return (true);
3180 	if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
3181 		return (true);
3182 	atomic_write_z((size_t *)&lg_dirty_mult_default, (size_t)lg_dirty_mult);
3183 	return (false);
3184 }
3185 
3186 ssize_t
arena_decay_time_default_get(void)3187 arena_decay_time_default_get(void)
3188 {
3189 
3190 	return ((ssize_t)atomic_read_z((size_t *)&decay_time_default));
3191 }
3192 
3193 bool
arena_decay_time_default_set(ssize_t decay_time)3194 arena_decay_time_default_set(ssize_t decay_time)
3195 {
3196 
3197 	if (opt_purge != purge_mode_decay)
3198 		return (true);
3199 	if (!arena_decay_time_valid(decay_time))
3200 		return (true);
3201 	atomic_write_z((size_t *)&decay_time_default, (size_t)decay_time);
3202 	return (false);
3203 }
3204 
3205 static void
arena_basic_stats_merge_locked(arena_t * arena,unsigned * nthreads,const char ** dss,ssize_t * lg_dirty_mult,ssize_t * decay_time,size_t * nactive,size_t * ndirty)3206 arena_basic_stats_merge_locked(arena_t *arena, unsigned *nthreads,
3207     const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
3208     size_t *nactive, size_t *ndirty)
3209 {
3210 
3211 	*nthreads += arena_nthreads_get(arena);
3212 	*dss = dss_prec_names[arena->dss_prec];
3213 	*lg_dirty_mult = arena->lg_dirty_mult;
3214 	*decay_time = arena->decay_time;
3215 	*nactive += arena->nactive;
3216 	*ndirty += arena->ndirty;
3217 }
3218 
3219 void
arena_basic_stats_merge(arena_t * arena,unsigned * nthreads,const char ** dss,ssize_t * lg_dirty_mult,ssize_t * decay_time,size_t * nactive,size_t * ndirty)3220 arena_basic_stats_merge(arena_t *arena, unsigned *nthreads, const char **dss,
3221     ssize_t *lg_dirty_mult, ssize_t *decay_time, size_t *nactive,
3222     size_t *ndirty)
3223 {
3224 
3225 	malloc_mutex_lock(&arena->lock);
3226 	arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult,
3227 	    decay_time, nactive, ndirty);
3228 	malloc_mutex_unlock(&arena->lock);
3229 }
3230 
3231 void
arena_stats_merge(arena_t * arena,unsigned * nthreads,const char ** dss,ssize_t * lg_dirty_mult,ssize_t * decay_time,size_t * nactive,size_t * ndirty,arena_stats_t * astats,malloc_bin_stats_t * bstats,malloc_large_stats_t * lstats,malloc_huge_stats_t * hstats)3232 arena_stats_merge(arena_t *arena, unsigned *nthreads, const char **dss,
3233     ssize_t *lg_dirty_mult, ssize_t *decay_time, size_t *nactive,
3234     size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
3235     malloc_large_stats_t *lstats, malloc_huge_stats_t *hstats)
3236 {
3237 	unsigned i;
3238 
3239 	cassert(config_stats);
3240 
3241 	malloc_mutex_lock(&arena->lock);
3242 	arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult,
3243 	    decay_time, nactive, ndirty);
3244 
3245 	astats->mapped += arena->stats.mapped;
3246 	astats->npurge += arena->stats.npurge;
3247 	astats->nmadvise += arena->stats.nmadvise;
3248 	astats->purged += arena->stats.purged;
3249 	astats->metadata_mapped += arena->stats.metadata_mapped;
3250 	astats->metadata_allocated += arena_metadata_allocated_get(arena);
3251 	astats->allocated_large += arena->stats.allocated_large;
3252 	astats->nmalloc_large += arena->stats.nmalloc_large;
3253 	astats->ndalloc_large += arena->stats.ndalloc_large;
3254 	astats->nrequests_large += arena->stats.nrequests_large;
3255 	astats->allocated_huge += arena->stats.allocated_huge;
3256 	astats->nmalloc_huge += arena->stats.nmalloc_huge;
3257 	astats->ndalloc_huge += arena->stats.ndalloc_huge;
3258 
3259 	for (i = 0; i < nlclasses; i++) {
3260 		lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
3261 		lstats[i].ndalloc += arena->stats.lstats[i].ndalloc;
3262 		lstats[i].nrequests += arena->stats.lstats[i].nrequests;
3263 		lstats[i].curruns += arena->stats.lstats[i].curruns;
3264 	}
3265 
3266 	for (i = 0; i < nhclasses; i++) {
3267 		hstats[i].nmalloc += arena->stats.hstats[i].nmalloc;
3268 		hstats[i].ndalloc += arena->stats.hstats[i].ndalloc;
3269 		hstats[i].curhchunks += arena->stats.hstats[i].curhchunks;
3270 	}
3271 	malloc_mutex_unlock(&arena->lock);
3272 
3273 	for (i = 0; i < NBINS; i++) {
3274 		arena_bin_t *bin = &arena->bins[i];
3275 
3276 		malloc_mutex_lock(&bin->lock);
3277 		bstats[i].nmalloc += bin->stats.nmalloc;
3278 		bstats[i].ndalloc += bin->stats.ndalloc;
3279 		bstats[i].nrequests += bin->stats.nrequests;
3280 		bstats[i].curregs += bin->stats.curregs;
3281 		if (config_tcache) {
3282 			bstats[i].nfills += bin->stats.nfills;
3283 			bstats[i].nflushes += bin->stats.nflushes;
3284 		}
3285 		bstats[i].nruns += bin->stats.nruns;
3286 		bstats[i].reruns += bin->stats.reruns;
3287 		bstats[i].curruns += bin->stats.curruns;
3288 		malloc_mutex_unlock(&bin->lock);
3289 	}
3290 }
3291 
3292 unsigned
arena_nthreads_get(arena_t * arena)3293 arena_nthreads_get(arena_t *arena)
3294 {
3295 
3296 	return (atomic_read_u(&arena->nthreads));
3297 }
3298 
3299 void
arena_nthreads_inc(arena_t * arena)3300 arena_nthreads_inc(arena_t *arena)
3301 {
3302 
3303 	atomic_add_u(&arena->nthreads, 1);
3304 }
3305 
3306 void
arena_nthreads_dec(arena_t * arena)3307 arena_nthreads_dec(arena_t *arena)
3308 {
3309 
3310 	atomic_sub_u(&arena->nthreads, 1);
3311 }
3312 
3313 arena_t *
arena_new(unsigned ind)3314 arena_new(unsigned ind)
3315 {
3316 	arena_t *arena;
3317 	size_t arena_size;
3318 	unsigned i;
3319 	arena_bin_t *bin;
3320 
3321 	/* Compute arena size to incorporate sufficient runs_avail elements. */
3322 	arena_size = offsetof(arena_t, runs_avail) + (sizeof(arena_run_tree_t) *
3323 	    runs_avail_nclasses);
3324 	/*
3325 	 * Allocate arena, arena->lstats, and arena->hstats contiguously, mainly
3326 	 * because there is no way to clean up if base_alloc() OOMs.
3327 	 */
3328 	if (config_stats) {
3329 		arena = (arena_t *)base_alloc(CACHELINE_CEILING(arena_size) +
3330 		    QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t) +
3331 		    nhclasses) * sizeof(malloc_huge_stats_t));
3332 	} else
3333 		arena = (arena_t *)base_alloc(arena_size);
3334 	if (arena == NULL)
3335 		return (NULL);
3336 
3337 	arena->ind = ind;
3338 	arena->nthreads = 0;
3339 	if (malloc_mutex_init(&arena->lock))
3340 		return (NULL);
3341 
3342 	if (config_stats) {
3343 		memset(&arena->stats, 0, sizeof(arena_stats_t));
3344 		arena->stats.lstats = (malloc_large_stats_t *)((uintptr_t)arena
3345 		    + CACHELINE_CEILING(arena_size));
3346 		memset(arena->stats.lstats, 0, nlclasses *
3347 		    sizeof(malloc_large_stats_t));
3348 		arena->stats.hstats = (malloc_huge_stats_t *)((uintptr_t)arena
3349 		    + CACHELINE_CEILING(arena_size) +
3350 		    QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t)));
3351 		memset(arena->stats.hstats, 0, nhclasses *
3352 		    sizeof(malloc_huge_stats_t));
3353 		if (config_tcache)
3354 			ql_new(&arena->tcache_ql);
3355 	}
3356 
3357 	if (config_prof)
3358 		arena->prof_accumbytes = 0;
3359 
3360 	if (config_cache_oblivious) {
3361 		/*
3362 		 * A nondeterministic seed based on the address of arena reduces
3363 		 * the likelihood of lockstep non-uniform cache index
3364 		 * utilization among identical concurrent processes, but at the
3365 		 * cost of test repeatability.  For debug builds, instead use a
3366 		 * deterministic seed.
3367 		 */
3368 		arena->offset_state = config_debug ? ind :
3369 		    (uint64_t)(uintptr_t)arena;
3370 	}
3371 
3372 	arena->dss_prec = chunk_dss_prec_get();
3373 
3374 	arena->spare = NULL;
3375 
3376 	arena->lg_dirty_mult = arena_lg_dirty_mult_default_get();
3377 	arena->purging = false;
3378 	arena->nactive = 0;
3379 	arena->ndirty = 0;
3380 
3381 	for(i = 0; i < runs_avail_nclasses; i++)
3382 		arena_run_tree_new(&arena->runs_avail[i]);
3383 	qr_new(&arena->runs_dirty, rd_link);
3384 	qr_new(&arena->chunks_cache, cc_link);
3385 
3386 	if (opt_purge == purge_mode_decay)
3387 		arena_decay_init(arena, arena_decay_time_default_get());
3388 
3389 	ql_new(&arena->huge);
3390 	if (malloc_mutex_init(&arena->huge_mtx))
3391 		return (NULL);
3392 
3393 	extent_tree_szad_new(&arena->chunks_szad_cached);
3394 	extent_tree_ad_new(&arena->chunks_ad_cached);
3395 	extent_tree_szad_new(&arena->chunks_szad_retained);
3396 	extent_tree_ad_new(&arena->chunks_ad_retained);
3397 	if (malloc_mutex_init(&arena->chunks_mtx))
3398 		return (NULL);
3399 	ql_new(&arena->node_cache);
3400 	if (malloc_mutex_init(&arena->node_cache_mtx))
3401 		return (NULL);
3402 
3403 	arena->chunk_hooks = chunk_hooks_default;
3404 
3405 	/* Initialize bins. */
3406 	for (i = 0; i < NBINS; i++) {
3407 		bin = &arena->bins[i];
3408 		if (malloc_mutex_init(&bin->lock))
3409 			return (NULL);
3410 		bin->runcur = NULL;
3411 		arena_run_tree_new(&bin->runs);
3412 		if (config_stats)
3413 			memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
3414 	}
3415 
3416 	return (arena);
3417 }
3418 
3419 /*
3420  * Calculate bin_info->run_size such that it meets the following constraints:
3421  *
3422  *   *) bin_info->run_size <= arena_maxrun
3423  *   *) bin_info->nregs <= RUN_MAXREGS
3424  *
3425  * bin_info->nregs and bin_info->reg0_offset are also calculated here, since
3426  * these settings are all interdependent.
3427  */
3428 static void
bin_info_run_size_calc(arena_bin_info_t * bin_info)3429 bin_info_run_size_calc(arena_bin_info_t *bin_info)
3430 {
3431 	size_t pad_size;
3432 	size_t try_run_size, perfect_run_size, actual_run_size;
3433 	uint32_t try_nregs, perfect_nregs, actual_nregs;
3434 
3435 	/*
3436 	 * Determine redzone size based on minimum alignment and minimum
3437 	 * redzone size.  Add padding to the end of the run if it is needed to
3438 	 * align the regions.  The padding allows each redzone to be half the
3439 	 * minimum alignment; without the padding, each redzone would have to
3440 	 * be twice as large in order to maintain alignment.
3441 	 */
3442 	if (config_fill && unlikely(opt_redzone)) {
3443 		size_t align_min = ZU(1) << (ffs_zu(bin_info->reg_size) - 1);
3444 		if (align_min <= REDZONE_MINSIZE) {
3445 			bin_info->redzone_size = REDZONE_MINSIZE;
3446 			pad_size = 0;
3447 		} else {
3448 			bin_info->redzone_size = align_min >> 1;
3449 			pad_size = bin_info->redzone_size;
3450 		}
3451 	} else {
3452 		bin_info->redzone_size = 0;
3453 		pad_size = 0;
3454 	}
3455 	bin_info->reg_interval = bin_info->reg_size +
3456 	    (bin_info->redzone_size << 1);
3457 
3458 	/*
3459 	 * Compute run size under ideal conditions (no redzones, no limit on run
3460 	 * size).
3461 	 */
3462 	try_run_size = PAGE;
3463 	try_nregs = (uint32_t)(try_run_size / bin_info->reg_size);
3464 	do {
3465 		perfect_run_size = try_run_size;
3466 		perfect_nregs = try_nregs;
3467 
3468 		try_run_size += PAGE;
3469 		try_nregs = (uint32_t)(try_run_size / bin_info->reg_size);
3470 	} while (perfect_run_size != perfect_nregs * bin_info->reg_size);
3471 	assert(perfect_nregs <= RUN_MAXREGS);
3472 
3473 	actual_run_size = perfect_run_size;
3474 	actual_nregs = (uint32_t)((actual_run_size - pad_size) /
3475 	    bin_info->reg_interval);
3476 
3477 	/*
3478 	 * Redzones can require enough padding that not even a single region can
3479 	 * fit within the number of pages that would normally be dedicated to a
3480 	 * run for this size class.  Increase the run size until at least one
3481 	 * region fits.
3482 	 */
3483 	while (actual_nregs == 0) {
3484 		assert(config_fill && unlikely(opt_redzone));
3485 
3486 		actual_run_size += PAGE;
3487 		actual_nregs = (uint32_t)((actual_run_size - pad_size) /
3488 		    bin_info->reg_interval);
3489 	}
3490 
3491 	/*
3492 	 * Make sure that the run will fit within an arena chunk.
3493 	 */
3494 	while (actual_run_size > arena_maxrun) {
3495 		actual_run_size -= PAGE;
3496 		actual_nregs = (uint32_t)((actual_run_size - pad_size) /
3497 		    bin_info->reg_interval);
3498 	}
3499 	assert(actual_nregs > 0);
3500 	assert(actual_run_size == s2u(actual_run_size));
3501 
3502 	/* Copy final settings. */
3503 	bin_info->run_size = actual_run_size;
3504 	bin_info->nregs = actual_nregs;
3505 	bin_info->reg0_offset = (uint32_t)(actual_run_size - (actual_nregs *
3506 	    bin_info->reg_interval) - pad_size + bin_info->redzone_size);
3507 
3508 	if (actual_run_size > small_maxrun)
3509 		small_maxrun = actual_run_size;
3510 
3511 	assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs
3512 	    * bin_info->reg_interval) + pad_size == bin_info->run_size);
3513 }
3514 
3515 static void
bin_info_init(void)3516 bin_info_init(void)
3517 {
3518 	arena_bin_info_t *bin_info;
3519 
3520 #define	BIN_INFO_INIT_bin_yes(index, size)				\
3521 	bin_info = &arena_bin_info[index];				\
3522 	bin_info->reg_size = size;					\
3523 	bin_info_run_size_calc(bin_info);				\
3524 	bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
3525 #define	BIN_INFO_INIT_bin_no(index, size)
3526 #define	SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup)	\
3527 	BIN_INFO_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
3528 	SIZE_CLASSES
3529 #undef BIN_INFO_INIT_bin_yes
3530 #undef BIN_INFO_INIT_bin_no
3531 #undef SC
3532 }
3533 
3534 static bool
small_run_size_init(void)3535 small_run_size_init(void)
3536 {
3537 
3538 	assert(small_maxrun != 0);
3539 
3540 	small_run_tab = (bool *)base_alloc(sizeof(bool) * (small_maxrun >>
3541 	    LG_PAGE));
3542 	if (small_run_tab == NULL)
3543 		return (true);
3544 
3545 #define	TAB_INIT_bin_yes(index, size) {					\
3546 		arena_bin_info_t *bin_info = &arena_bin_info[index];	\
3547 		small_run_tab[bin_info->run_size >> LG_PAGE] = true;	\
3548 	}
3549 #define	TAB_INIT_bin_no(index, size)
3550 #define	SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup)	\
3551 	TAB_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
3552 	SIZE_CLASSES
3553 #undef TAB_INIT_bin_yes
3554 #undef TAB_INIT_bin_no
3555 #undef SC
3556 
3557 	return (false);
3558 }
3559 
3560 static bool
run_quantize_init(void)3561 run_quantize_init(void)
3562 {
3563 	unsigned i;
3564 
3565 	run_quantize_max = chunksize + large_pad;
3566 
3567 	run_quantize_floor_tab = (size_t *)base_alloc(sizeof(size_t) *
3568 	    (run_quantize_max >> LG_PAGE));
3569 	if (run_quantize_floor_tab == NULL)
3570 		return (true);
3571 
3572 	run_quantize_ceil_tab = (size_t *)base_alloc(sizeof(size_t) *
3573 	    (run_quantize_max >> LG_PAGE));
3574 	if (run_quantize_ceil_tab == NULL)
3575 		return (true);
3576 
3577 	for (i = 1; i <= run_quantize_max >> LG_PAGE; i++) {
3578 		size_t run_size = i << LG_PAGE;
3579 
3580 		run_quantize_floor_tab[i-1] =
3581 		    run_quantize_floor_compute(run_size);
3582 		run_quantize_ceil_tab[i-1] =
3583 		    run_quantize_ceil_compute(run_size);
3584 	}
3585 
3586 	return (false);
3587 }
3588 
3589 bool
arena_boot(void)3590 arena_boot(void)
3591 {
3592 	unsigned i;
3593 
3594 	arena_lg_dirty_mult_default_set(opt_lg_dirty_mult);
3595 	arena_decay_time_default_set(opt_decay_time);
3596 
3597 	/*
3598 	 * Compute the header size such that it is large enough to contain the
3599 	 * page map.  The page map is biased to omit entries for the header
3600 	 * itself, so some iteration is necessary to compute the map bias.
3601 	 *
3602 	 * 1) Compute safe header_size and map_bias values that include enough
3603 	 *    space for an unbiased page map.
3604 	 * 2) Refine map_bias based on (1) to omit the header pages in the page
3605 	 *    map.  The resulting map_bias may be one too small.
3606 	 * 3) Refine map_bias based on (2).  The result will be >= the result
3607 	 *    from (2), and will always be correct.
3608 	 */
3609 	map_bias = 0;
3610 	for (i = 0; i < 3; i++) {
3611 		size_t header_size = offsetof(arena_chunk_t, map_bits) +
3612 		    ((sizeof(arena_chunk_map_bits_t) +
3613 		    sizeof(arena_chunk_map_misc_t)) * (chunk_npages-map_bias));
3614 		map_bias = (header_size + PAGE_MASK) >> LG_PAGE;
3615 	}
3616 	assert(map_bias > 0);
3617 
3618 	map_misc_offset = offsetof(arena_chunk_t, map_bits) +
3619 	    sizeof(arena_chunk_map_bits_t) * (chunk_npages-map_bias);
3620 
3621 	arena_maxrun = chunksize - (map_bias << LG_PAGE);
3622 	assert(arena_maxrun > 0);
3623 	large_maxclass = index2size(size2index(chunksize)-1);
3624 	if (large_maxclass > arena_maxrun) {
3625 		/*
3626 		 * For small chunk sizes it's possible for there to be fewer
3627 		 * non-header pages available than are necessary to serve the
3628 		 * size classes just below chunksize.
3629 		 */
3630 		large_maxclass = arena_maxrun;
3631 	}
3632 	assert(large_maxclass > 0);
3633 	nlclasses = size2index(large_maxclass) - size2index(SMALL_MAXCLASS);
3634 	nhclasses = NSIZES - nlclasses - NBINS;
3635 
3636 	bin_info_init();
3637 	if (small_run_size_init())
3638 		return (true);
3639 	if (run_quantize_init())
3640 		return (true);
3641 
3642 	runs_avail_bias = size2index(PAGE);
3643 	runs_avail_nclasses = size2index(run_quantize_max)+1 - runs_avail_bias;
3644 
3645 	return (false);
3646 }
3647 
3648 void
arena_prefork(arena_t * arena)3649 arena_prefork(arena_t *arena)
3650 {
3651 	unsigned i;
3652 
3653 	malloc_mutex_prefork(&arena->lock);
3654 	malloc_mutex_prefork(&arena->huge_mtx);
3655 	malloc_mutex_prefork(&arena->chunks_mtx);
3656 	malloc_mutex_prefork(&arena->node_cache_mtx);
3657 	for (i = 0; i < NBINS; i++)
3658 		malloc_mutex_prefork(&arena->bins[i].lock);
3659 }
3660 
3661 void
arena_postfork_parent(arena_t * arena)3662 arena_postfork_parent(arena_t *arena)
3663 {
3664 	unsigned i;
3665 
3666 	for (i = 0; i < NBINS; i++)
3667 		malloc_mutex_postfork_parent(&arena->bins[i].lock);
3668 	malloc_mutex_postfork_parent(&arena->node_cache_mtx);
3669 	malloc_mutex_postfork_parent(&arena->chunks_mtx);
3670 	malloc_mutex_postfork_parent(&arena->huge_mtx);
3671 	malloc_mutex_postfork_parent(&arena->lock);
3672 }
3673 
3674 void
arena_postfork_child(arena_t * arena)3675 arena_postfork_child(arena_t *arena)
3676 {
3677 	unsigned i;
3678 
3679 	for (i = 0; i < NBINS; i++)
3680 		malloc_mutex_postfork_child(&arena->bins[i].lock);
3681 	malloc_mutex_postfork_child(&arena->node_cache_mtx);
3682 	malloc_mutex_postfork_child(&arena->chunks_mtx);
3683 	malloc_mutex_postfork_child(&arena->huge_mtx);
3684 	malloc_mutex_postfork_child(&arena->lock);
3685 }
3686